366 lines
10 KiB
Kotlin
366 lines
10 KiB
Kotlin
package ru.dbotthepony.kstarbound.io
|
||
|
||
import it.unimi.dsi.fastutil.objects.Object2ObjectFunction
|
||
import it.unimi.dsi.fastutil.objects.Object2ObjectLinkedOpenHashMap
|
||
import ru.dbotthepony.kommons.gson.get
|
||
import ru.dbotthepony.kommons.io.readBinaryString
|
||
import ru.dbotthepony.kommons.io.readVarInt
|
||
import ru.dbotthepony.kommons.io.readVarLong
|
||
import ru.dbotthepony.kstarbound.IStarboundFile
|
||
import ru.dbotthepony.kstarbound.Starbound
|
||
import ru.dbotthepony.kstarbound.getValue
|
||
import ru.dbotthepony.kstarbound.json.readJsonObject
|
||
import ru.dbotthepony.kstarbound.util.CarriedExecutor
|
||
import ru.dbotthepony.kstarbound.util.sbIntern
|
||
import ru.dbotthepony.kstarbound.util.supplyAsync
|
||
import java.io.BufferedInputStream
|
||
import java.io.Closeable
|
||
import java.io.DataInputStream
|
||
import java.io.File
|
||
import java.io.IOException
|
||
import java.io.InputStream
|
||
import java.io.RandomAccessFile
|
||
import java.nio.channels.Channels
|
||
import java.util.*
|
||
import java.util.concurrent.CompletableFuture
|
||
import java.util.concurrent.atomic.AtomicInteger
|
||
import java.util.concurrent.locks.ReentrantLock
|
||
import java.util.function.Supplier
|
||
import kotlin.concurrent.withLock
|
||
|
||
private fun readHeader(reader: RandomAccessFile, required: Int) {
|
||
val read = reader.read()
|
||
require(read == required) { "Bad Starbound Pak header, expected $required, got $read" }
|
||
}
|
||
|
||
class StarboundPak(val path: File, callback: (finished: Boolean, status: String) -> Unit = { _, _ -> }) : Closeable {
|
||
internal inner class SBDirectory(
|
||
override val name: String,
|
||
override val parent: IStarboundFile?,
|
||
) : IStarboundFile {
|
||
override val exists: Boolean
|
||
get() = true
|
||
|
||
override val isDirectory: Boolean
|
||
get() = true
|
||
|
||
override val isFile: Boolean
|
||
get() = false
|
||
|
||
private var frozen = false
|
||
private val innerChildren = Object2ObjectLinkedOpenHashMap<String, IStarboundFile>()
|
||
|
||
fun put(value: IStarboundFile) {
|
||
check(!frozen) { "Can't put, already frozen!" }
|
||
innerChildren[value.name] = value
|
||
}
|
||
|
||
fun subdir(name: String): SBDirectory {
|
||
check(!frozen) { "Can't subdir, already frozen!" }
|
||
require(name != "") { "Empty directory name provided" }
|
||
return innerChildren.computeIfAbsent(name, Object2ObjectFunction { SBDirectory(it as String, this) }) as? SBDirectory ?: throw IllegalStateException("$name already exists (in ${computeFullPath()})")
|
||
}
|
||
|
||
override val children: Map<String, IStarboundFile> = Collections.unmodifiableMap(innerChildren)
|
||
|
||
fun freeze() {
|
||
check(!frozen) { "Already frozen" }
|
||
frozen = true
|
||
|
||
for (children in innerChildren.values) {
|
||
if (children is SBDirectory) {
|
||
children.freeze()
|
||
}
|
||
}
|
||
}
|
||
|
||
override fun equals(other: Any?): Boolean {
|
||
return other is IStarboundFile && computeFullPath() == other.computeFullPath()
|
||
}
|
||
|
||
private val hash = computeFullPath().hashCode()
|
||
|
||
override fun hashCode(): Int {
|
||
return hash
|
||
}
|
||
|
||
override fun open(): InputStream {
|
||
throw IllegalStateException("${computeFullPath()} is a directory")
|
||
}
|
||
|
||
override fun asyncRead(): CompletableFuture<ByteArray> {
|
||
throw IllegalStateException("${computeFullPath()} is a directory")
|
||
}
|
||
|
||
override fun toString(): String {
|
||
return "SBDirectory[${computeFullPath()} @ ${metadata.get("friendlyName", "")} $path]"
|
||
}
|
||
}
|
||
|
||
internal inner class SBFile(
|
||
override val name: String,
|
||
override val parent: IStarboundFile?,
|
||
val offset: Long,
|
||
val length: Long
|
||
) : IStarboundFile {
|
||
override val exists: Boolean
|
||
get() = true
|
||
|
||
override val isDirectory: Boolean
|
||
get() = false
|
||
|
||
override val isFile: Boolean
|
||
get() = true
|
||
|
||
override val children: Map<String, IStarboundFile>?
|
||
get() = null
|
||
|
||
override fun equals(other: Any?): Boolean {
|
||
return other is IStarboundFile && computeFullPath() == other.computeFullPath()
|
||
}
|
||
|
||
private val hash = computeFullPath().hashCode()
|
||
|
||
override fun hashCode(): Int {
|
||
return hash
|
||
}
|
||
|
||
override fun asyncRead(): CompletableFuture<ByteArray> {
|
||
if (length > Int.MAX_VALUE.toLong())
|
||
throw RuntimeException("File is too big to be read in async way: $length bytes to read!")
|
||
|
||
return scheduler.schedule(offset, Supplier {
|
||
reader.seek(offset)
|
||
val bytes = ByteArray(length.toInt())
|
||
reader.readFully(bytes)
|
||
bytes
|
||
})
|
||
}
|
||
|
||
override fun open(): InputStream {
|
||
return object : InputStream() {
|
||
private var innerOffset = 0L
|
||
|
||
override fun skip(n: Long): Long {
|
||
if (innerOffset >= length) {
|
||
return 0
|
||
} else if (innerOffset + n < length) {
|
||
innerOffset += n
|
||
return n
|
||
} else {
|
||
val diff = length - innerOffset
|
||
innerOffset = length
|
||
return diff
|
||
}
|
||
}
|
||
|
||
override fun read(): Int {
|
||
if (innerOffset >= length) {
|
||
return -1
|
||
}
|
||
|
||
reader.seek(innerOffset + offset)
|
||
innerOffset++
|
||
return reader.read()
|
||
}
|
||
|
||
override fun readNBytes(len: Int): ByteArray {
|
||
require(len >= 0) { "Negative length to read: $len" }
|
||
|
||
val readMax = len.coerceAtMost((length - innerOffset).toInt())
|
||
|
||
if (readMax == 0)
|
||
return ByteArray(0)
|
||
|
||
val b = ByteArray(readMax)
|
||
reader.seek(innerOffset + offset)
|
||
reader.readFully(b)
|
||
innerOffset += readMax
|
||
return b
|
||
}
|
||
|
||
override fun read(b: ByteArray, off: Int, len: Int): Int {
|
||
Objects.checkFromIndexSize(off, len, b.size)
|
||
|
||
// ok
|
||
if (len == 0)
|
||
return 0
|
||
|
||
val readMax = len.coerceAtMost((length - innerOffset).toInt())
|
||
|
||
if (readMax <= 0)
|
||
return -1
|
||
|
||
reader.seek(innerOffset + offset)
|
||
val readBytes = reader.read(b, off, readMax)
|
||
|
||
if (readBytes == -1)
|
||
throw RuntimeException("Unexpected EOF, want to read $readMax bytes from starting $offset in $path")
|
||
|
||
innerOffset += readBytes
|
||
return readBytes
|
||
}
|
||
}
|
||
}
|
||
|
||
override fun toString(): String {
|
||
return "SBFile[${computeFullPath()} @ ${metadata.get("friendlyName", "")} $path]"
|
||
}
|
||
}
|
||
|
||
private fun interface ReadScheduler {
|
||
fun schedule(offset: Long, action: Supplier<ByteArray>): CompletableFuture<ByteArray>
|
||
}
|
||
|
||
// SSDs
|
||
private object DirectScheduler : ReadScheduler {
|
||
override fun schedule(offset: Long, action: Supplier<ByteArray>): CompletableFuture<ByteArray> {
|
||
return Starbound.IO_EXECUTOR.supplyAsync(action)
|
||
}
|
||
}
|
||
|
||
// HDDs
|
||
private class PriorityScheduler : ReadScheduler {
|
||
private val counter = AtomicInteger()
|
||
|
||
private data class Action(val offset: Long, val action: Supplier<ByteArray>, val id: Int, val future: CompletableFuture<ByteArray>) : Runnable, Comparable<Action> {
|
||
override fun compareTo(other: Action): Int {
|
||
var cmp = offset.compareTo(other.offset) // read files closer to beginning first
|
||
if (cmp == 0) cmp = id.compareTo(other.id) // else fulfil requests as FIFO
|
||
return cmp
|
||
}
|
||
|
||
override fun run() {
|
||
future.complete(action.get())
|
||
}
|
||
}
|
||
|
||
private val lock = ReentrantLock()
|
||
private val queue = PriorityQueue<Action>()
|
||
private val carrier = CarriedExecutor(Starbound.IO_EXECUTOR)
|
||
|
||
override fun schedule(offset: Long, action: Supplier<ByteArray>): CompletableFuture<ByteArray> {
|
||
val future = CompletableFuture<ByteArray>()
|
||
val task = Action(offset, action, counter.getAndIncrement(), future)
|
||
|
||
lock.withLock {
|
||
queue.add(task)
|
||
}
|
||
|
||
carrier.execute {
|
||
lock.withLock { queue.remove() }.run()
|
||
}
|
||
|
||
return future
|
||
}
|
||
}
|
||
|
||
// TODO: we need to determine whenever we are on SSD, or on HDD.
|
||
// if we are on SSD, assuming it is an HDD won't hurt performance much
|
||
private val scheduler: ReadScheduler = PriorityScheduler()
|
||
|
||
private val reader by object : ThreadLocal<RandomAccessFile>() {
|
||
override fun initialValue(): RandomAccessFile {
|
||
return RandomAccessFile(path, "r")
|
||
}
|
||
}
|
||
|
||
init {
|
||
readHeader(reader, 0x53) // S
|
||
readHeader(reader, 0x42) // B
|
||
readHeader(reader, 0x41) // A
|
||
readHeader(reader, 0x73) // s
|
||
readHeader(reader, 0x73) // s
|
||
readHeader(reader, 0x65) // e
|
||
readHeader(reader, 0x74) // t
|
||
readHeader(reader, 0x36) // 6
|
||
}
|
||
|
||
// Далее идёт 8 байтный long в формате Big Endian, который указывает на offset до INDEX
|
||
// т.е. сделав seek(indexOffset) мы выйдем прямо на INDEX
|
||
private val indexOffset = reader.readLong()
|
||
|
||
init {
|
||
reader.seek(indexOffset)
|
||
|
||
readHeader(reader, 0x49) // I
|
||
readHeader(reader, 0x4E) // N
|
||
readHeader(reader, 0x44) // D
|
||
readHeader(reader, 0x45) // E
|
||
readHeader(reader, 0x58) // X
|
||
|
||
callback(false, "Reading metadata")
|
||
}
|
||
|
||
// сразу за INDEX идут метаданные в формате Binary Json
|
||
val metadata = DataInputStream(object : InputStream() {
|
||
override fun read(): Int {
|
||
return reader.read()
|
||
}
|
||
|
||
override fun read(b: ByteArray, off: Int, len: Int): Int {
|
||
return reader.read(b, off, len)
|
||
}
|
||
}).readJsonObject()
|
||
|
||
// сразу за метаданными идёт количество файлов внутри данного pak в формате Big Endian variable int
|
||
val indexNodeCount = reader.readVarLong()
|
||
|
||
val root: IStarboundFile = SBDirectory("", null)
|
||
|
||
init {
|
||
// Сразу же за количеством файлов идут сами файлы в формате
|
||
// VarInt (длинна имени файла)
|
||
// byte[] (utf-8 имя файла)
|
||
// long (offset от начала файла)
|
||
// long (длина файла)
|
||
val stream = DataInputStream(BufferedInputStream(Channels.newInputStream(reader.channel), 2 shl 22))
|
||
val flength = reader.length()
|
||
|
||
for (i in 0 until indexNodeCount) {
|
||
var name: String? = null
|
||
|
||
try {
|
||
callback(false, "Reading index node $i")
|
||
|
||
name = stream.readInternedString()
|
||
require(name[0] == '/') { "index node at $i with '$name' appears to be not an absolute filename" }
|
||
val offset = stream.readLong()
|
||
val length = stream.readLong()
|
||
|
||
if (offset > flength) {
|
||
throw IndexOutOfBoundsException("Garbage offset at index $i: $offset")
|
||
}
|
||
|
||
if (length + offset > flength) {
|
||
throw IndexOutOfBoundsException("Garbage offset + length at index $i: ${length + offset}")
|
||
}
|
||
|
||
// Starbound игнорирует регистр букв когда ищет пути, даже внутри pak архивов
|
||
val split = name.lowercase().split("/")
|
||
var parent = root as SBDirectory
|
||
|
||
for (splitIndex in 1 until split.size - 1) {
|
||
parent = parent.subdir(split[splitIndex].sbIntern())
|
||
}
|
||
|
||
parent.put(SBFile(split.last().sbIntern(), parent, offset, length))
|
||
} catch (err: Throwable) {
|
||
if (name == null) {
|
||
throw IOException("Reading index node at $i", err)
|
||
} else {
|
||
throw IOException("Reading index node at $i ($name)", err)
|
||
}
|
||
}
|
||
}
|
||
|
||
callback(false, "Freezing virtual file system")
|
||
(root as SBDirectory).freeze()
|
||
callback(true, "Reading indexes finished")
|
||
}
|
||
|
||
override fun close() {
|
||
reader.close()
|
||
}
|
||
}
|