Initial commit

This commit is contained in:
lordwelch 2020-11-12 06:41:45 -08:00
commit 2e9e89a247
10 changed files with 1953 additions and 0 deletions

21
LICENSE Normal file
View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2015 Norbyte
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

481
NodeAttribute.go Normal file
View File

@ -0,0 +1,481 @@
package lslib
import (
"encoding/base64"
"encoding/xml"
"errors"
"fmt"
"strconv"
"strings"
"github.com/google/uuid"
)
type XMLMarshaler interface {
MarshalXML2(e *xml.Encoder, start *xml.StartElement) error
}
type TranslatedString struct {
Version uint16
Value string
Handle string
}
func (ts TranslatedString) MarshalXML2(e *xml.Encoder, start *xml.StartElement) error {
start.Attr = append(start.Attr,
xml.Attr{
Name: xml.Name{Local: "handle"},
Value: ts.Handle,
},
xml.Attr{
Name: xml.Name{Local: "version"},
Value: strconv.Itoa(int(ts.Version)),
},
// xml.Attr{
// Name: xml.Name{Local: "value"},
// Value: ts.Value,
// },
)
return nil
}
type TranslatedFSStringArgument struct {
String TranslatedFSString
Key string
Value string
}
type TranslatedFSString struct {
TranslatedString
Arguments []TranslatedFSStringArgument
}
// func (tfs TranslatedFSString) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
// start.Attr = append(start.Attr,
// xml.Attr{
// Name: xml.Name{Local: "version"},
// Value: strconv.Itoa(int(tfs.Version)),
// },
// xml.Attr{
// Name: xml.Name{Local: "handle"},
// Value: tfs.Handle,
// },
// xml.Attr{
// Name: xml.Name{Local: "value"},
// Value: ts.Value,
// },
// )
// return nil
// }
type Ivec []int
func (i Ivec) String() string {
b := &strings.Builder{}
for _, v := range i {
b.WriteString(" ")
b.WriteString(strconv.Itoa(v))
}
return b.String()[1:]
}
type Vec []float32
func (v Vec) String() string {
b := &strings.Builder{}
for _, x := range v {
b.WriteString(" ")
if x == 0 {
x = 0
}
b.WriteString(strconv.FormatFloat(float64(x), 'G', 7, 32))
}
return b.String()[1:]
}
type DataType int
const (
DT_None DataType = iota
DT_Byte
DT_Short
DT_UShort
DT_Int
DT_UInt
DT_Float
DT_Double
DT_IVec2
DT_IVec3
DT_IVec4
DT_Vec2
DT_Vec3
DT_Vec4
DT_Mat2
DT_Mat3
DT_Mat3x4
DT_Mat4x3
DT_Mat4
DT_Bool
DT_String
DT_Path
DT_FixedString
DT_LSString
DT_ULongLong
DT_ScratchBuffer
// Seems to be unused?
DT_Long
DT_Int8
DT_TranslatedString
DT_WString
DT_LSWString
DT_UUID
DT_Int64
DT_TranslatedFSString
// Last supported datatype, always keep this one at the end
DT_Max = iota - 1
)
func (dt *DataType) MarshalXMLAttr(name xml.Name) (xml.Attr, error) {
return xml.Attr{
Value: dt.String(),
Name: name,
}, nil
}
func (dt DataType) String() string {
switch dt {
case DT_None:
return "None"
case DT_Byte:
return "uint8"
case DT_Short:
return "int16"
case DT_UShort:
return "uint16"
case DT_Int:
return "int32"
case DT_UInt:
return "uint32"
case DT_Float:
return "float"
case DT_Double:
return "double"
case DT_IVec2:
return "ivec2"
case DT_IVec3:
return "ivec3"
case DT_IVec4:
return "ivec4"
case DT_Vec2:
return "fvec2"
case DT_Vec3:
return "fvec3"
case DT_Vec4:
return "fvec4"
case DT_Mat2:
return "mat2x2"
case DT_Mat3:
return "mat3x3"
case DT_Mat3x4:
return "mat3x4"
case DT_Mat4x3:
return "mat4x3"
case DT_Mat4:
return "mat4x4"
case DT_Bool:
return "bool"
case DT_String:
return "string"
case DT_Path:
return "path"
case DT_FixedString:
return "FixedString"
case DT_LSString:
return "LSString"
case DT_ULongLong:
return "uint64"
case DT_ScratchBuffer:
return "ScratchBuffer"
case DT_Long:
return "old_int64"
case DT_Int8:
return "int8"
case DT_TranslatedString:
return "TranslatedString"
case DT_WString:
return "WString"
case DT_LSWString:
return "LSWString"
case DT_UUID:
return "guid"
case DT_Int64:
return "int64"
case DT_TranslatedFSString:
return "TranslatedFSString"
}
return ""
}
type NodeAttribute struct {
Name string `xml:"id,attr"`
Type DataType `xml:"type,attr"`
Value interface{} `xml:"value,attr"`
}
func (na NodeAttribute) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
t, _ := na.Type.MarshalXMLAttr(xml.Name{Local: "type"})
start.Attr = append(start.Attr,
xml.Attr{
Name: xml.Name{Local: "id"},
Value: na.Name,
},
t,
)
if v, ok := na.Value.(XMLMarshaler); ok {
v.MarshalXML2(e, &start)
} else {
start.Attr = append(start.Attr,
xml.Attr{
Name: xml.Name{Local: "value"},
Value: na.String(),
},
)
}
e.EncodeToken(start)
e.EncodeToken(xml.EndElement{
Name: start.Name,
})
return nil
}
func (na NodeAttribute) String() string {
switch na.Type {
case DT_ScratchBuffer:
// ScratchBuffer is a special case, as its stored as byte[] and ToString() doesn't really do what we want
if value, ok := na.Value.([]byte); ok {
return base64.StdEncoding.EncodeToString(value)
}
return fmt.Sprint(na.Value)
case DT_Float, DT_Double:
if na.Value == 0 {
na.Value = 0
}
return fmt.Sprintf("%.7G", na.Value)
default:
return fmt.Sprint(na.Value)
}
}
func (na NodeAttribute) GetRows() (int, error) {
switch na.Type {
case DT_IVec2, DT_IVec3, DT_IVec4, DT_Vec2, DT_Vec3, DT_Vec4:
return 1, nil
case DT_Mat2:
return 2, nil
case DT_Mat3, DT_Mat3x4:
return 3, nil
case DT_Mat4x3, DT_Mat4:
return 4, nil
default:
return 0, errors.New("Data type does not have rows")
}
}
func (na NodeAttribute) GetColumns() (int, error) {
switch na.Type {
case DT_IVec2, DT_Vec2, DT_Mat2:
return 2, nil
case DT_IVec3, DT_Vec3, DT_Mat3, DT_Mat4x3:
return 3, nil
case DT_IVec4, DT_Vec4, DT_Mat3x4, DT_Mat4:
return 4, nil
default:
return 0, errors.New("Data type does not have columns")
}
}
func (na NodeAttribute) IsNumeric() bool {
switch na.Type {
case DT_Byte, DT_Short, DT_Int, DT_UInt, DT_Float, DT_Double, DT_ULongLong, DT_Long, DT_Int8:
return true
default:
return false
}
}
func (na *NodeAttribute) FromString(str string) error {
if na.IsNumeric() {
// Workaround: Some XML files use empty strings, instead of "0" for zero values.
if str == "" {
str = "0"
// Handle hexadecimal integers in XML files
}
}
var (
err error
)
switch na.Type {
case DT_None:
// This is a null type, cannot have a value
case DT_Byte:
na.Value = []byte(str)
case DT_Short:
na.Value, err = strconv.ParseInt(str, 0, 16)
if err != nil {
return err
}
case DT_UShort:
na.Value, err = strconv.ParseUint(str, 0, 16)
if err != nil {
return err
}
case DT_Int:
na.Value, err = strconv.ParseInt(str, 0, 32)
if err != nil {
return err
}
case DT_UInt:
na.Value, err = strconv.ParseUint(str, 0, 16)
if err != nil {
return err
}
case DT_Float:
na.Value, err = strconv.ParseFloat(str, 32)
if err != nil {
return err
}
case DT_Double:
na.Value, err = strconv.ParseFloat(str, 64)
if err != nil {
return err
}
case DT_IVec2, DT_IVec3, DT_IVec4:
nums := strings.Split(str, ".")
length, err := na.GetColumns()
if err != nil {
return err
}
if length != len(nums) {
return fmt.Errorf("A vector of length %d was expected, got %d", length, len(nums))
}
vec := make([]int, length)
for i, v := range nums {
var n int64
n, err = strconv.ParseInt(v, 0, 64)
vec[i] = int(n)
if err != nil {
return err
}
}
na.Value = vec
case DT_Vec2, DT_Vec3, DT_Vec4:
nums := strings.Split(str, ".")
length, err := na.GetColumns()
if err != nil {
return err
}
if length != len(nums) {
return fmt.Errorf("A vector of length %d was expected, got %d", length, len(nums))
}
vec := make([]float64, length)
for i, v := range nums {
vec[i], err = strconv.ParseFloat(v, 64)
if err != nil {
return err
}
}
na.Value = vec
case DT_Mat2, DT_Mat3, DT_Mat3x4, DT_Mat4x3, DT_Mat4:
// var mat = Matrix.Parse(str);
// if (mat.cols != na.GetColumns() || mat.rows != na.GetRows()){
// return errors.New("Invalid column/row count for matrix");
// }
// value = mat;
return errors.New("not implemented")
case DT_Bool:
na.Value, err = strconv.ParseBool(str)
if err != nil {
return err
}
case DT_String, DT_Path, DT_FixedString, DT_LSString, DT_WString, DT_LSWString:
na.Value = str
case DT_TranslatedString:
// // We'll only set the value part of the translated string, not the TranslatedStringKey / Handle part
// // That can be changed separately via attribute.Value.Handle
// if (value == null)
// value = new TranslatedString();
// ((TranslatedString)value).Value = str;
case DT_TranslatedFSString:
// // We'll only set the value part of the translated string, not the TranslatedStringKey / Handle part
// // That can be changed separately via attribute.Value.Handle
// if (value == null)
// value = new TranslatedFSString();
// ((TranslatedFSString)value).Value = str;
case DT_ULongLong:
na.Value, err = strconv.ParseUint(str, 10, 64)
case DT_ScratchBuffer:
na.Value, err = base64.StdEncoding.DecodeString(str)
if err != nil {
return err
}
case DT_Long, DT_Int64:
na.Value, err = strconv.ParseInt(str, 10, 64)
if err != nil {
return err
}
case DT_Int8:
na.Value, err = strconv.ParseInt(str, 10, 8)
if err != nil {
return err
}
case DT_UUID:
na.Value, err = uuid.Parse(str)
if err != nil {
return err
}
default:
// This should not happen!
return fmt.Errorf("FromString() not implemented for type %v", na.Type)
}
return nil
}

1
README Normal file
View File

@ -0,0 +1 @@
TODO

327
binutils.go Normal file
View File

@ -0,0 +1,327 @@
package lslib
import (
"bytes"
"compress/zlib"
"encoding/binary"
"errors"
"fmt"
"io"
"io/ioutil"
"github.com/google/uuid"
"github.com/pierrec/lz4"
)
func reverse(nums []byte) {
i := 0
j := len(nums) - 1
for i < j {
nums[i], nums[j] = nums[j], nums[i]
i++
j--
}
}
func clen(n []byte) int {
for i := 0; i < len(n); i++ {
if n[i] == 0 {
return i
}
}
return len(n)
}
func CompressionFlagsToMethod(flags byte) CompressionMethod {
switch CompressionMethod(flags & 0x0f) {
case CMNone:
return CMNone
case CMZlib:
return CMZlib
case CMLZ4:
return CMLZ4
default:
return CMInvalid
}
}
func CompressionFlagsToLevel(flags byte) CompressionLevel {
switch CompressionLevel(flags & 0xf0) {
case FastCompression:
return FastCompression
case DefaultCompression:
return DefaultCompression
case MaxCompression:
return MaxCompression
default:
panic(errors.New("Invalid compression flags"))
}
return 0
}
func MakeCompressionFlags(method CompressionMethod, level CompressionLevel) int {
if method == CMNone || method == CMInvalid {
return 0
}
var flags int = 0
if method == CMZlib {
flags = 0x1
} else if method == CMLZ4 {
flags = 0x2
}
return flags | int(level)
}
func Decompress(compressed io.Reader, compressionFlags byte, chunked bool) io.ReadSeeker {
switch CompressionMethod(compressionFlags & 0x0f) {
case CMNone:
if v, ok := compressed.(io.ReadSeeker); ok {
return v
}
panic(errors.New("compressed must be an io.ReadSeeker if there is no compression"))
case CMZlib:
zr, _ := zlib.NewReader(compressed)
v, _ := ioutil.ReadAll(zr)
return bytes.NewReader(v)
case CMLZ4:
if chunked {
zr := lz4.NewReader(compressed)
v, _ := ioutil.ReadAll(zr)
return bytes.NewReader(v)
} else {
panic(errors.New("not implemented"))
// src, _ := ioutil.ReadAll(compressed)
// dst := make([]byte, decompressedSize)
// lz4.UncompressBlock(src, dst)
// return bytes.NewReader(dst)
}
default:
panic(fmt.Errorf("No decompressor found for this format: %v", compressionFlags))
}
}
func ReadCString(r io.Reader, length int) (string, error) {
var err error
buf := make([]byte, length)
_, err = r.Read(buf)
if err != nil {
return string(buf[:clen(buf)]), err
}
if buf[len(buf)-1] != 0 {
return string(buf[:clen(buf)]), errors.New("string is not null-terminated")
}
return string(buf[:clen(buf)]), nil
}
func ReadAttribute(r io.Reader, name string, DT DataType) (NodeAttribute, error) {
var (
attr = NodeAttribute{
Type: DT,
Name: name,
}
err error
)
switch DT {
case DT_None:
return attr, nil
case DT_Byte:
p := make([]byte, 1)
_, err = r.Read(p)
attr.Value = p[0]
return attr, err
case DT_Short:
var v int16
err = binary.Read(r, binary.LittleEndian, &v)
attr.Value = v
return attr, err
case DT_UShort:
var v uint16
err = binary.Read(r, binary.LittleEndian, &v)
attr.Value = v
return attr, err
case DT_Int:
var v int32
err = binary.Read(r, binary.LittleEndian, &v)
attr.Value = v
return attr, err
case DT_UInt:
var v uint32
err = binary.Read(r, binary.LittleEndian, &v)
attr.Value = v
return attr, err
case DT_Float:
var v float32
err = binary.Read(r, binary.LittleEndian, &v)
attr.Value = v
return attr, err
case DT_Double:
var v float64
err = binary.Read(r, binary.LittleEndian, &v)
attr.Value = v
return attr, err
case DT_IVec2, DT_IVec3, DT_IVec4:
var col int
col, err = attr.GetColumns()
if err != nil {
return attr, err
}
vec := make(Ivec, col)
for i, _ := range vec {
var v int32
err = binary.Read(r, binary.LittleEndian, &v)
if err != nil {
return attr, err
}
vec[i] = int(v)
}
attr.Value = vec
return attr, nil
case DT_Vec2, DT_Vec3, DT_Vec4:
var col int
col, err = attr.GetColumns()
if err != nil {
return attr, err
}
vec := make(Vec, col)
for i, _ := range vec {
var v float32
err = binary.Read(r, binary.LittleEndian, &v)
if err != nil {
return attr, err
}
vec[i] = v
}
attr.Value = vec
return attr, nil
case DT_Mat2, DT_Mat3, DT_Mat3x4, DT_Mat4x3, DT_Mat4:
// int columns = attr.GetColumns();
// int rows = attr.GetRows();
// var mat = new Matrix(rows, columns);
// attr.Value = mat;
// for (int col = 0; col < columns; col++) {
// for (int row = 0; row < rows; row++) {
// mat[row, col] = reader.ReadSingle();
// }
// }
return attr, errors.New("not implemented")
case DT_Bool:
var v bool
err = binary.Read(r, binary.LittleEndian, &v)
attr.Value = v
return attr, err
case DT_ULongLong:
var v uint64
err = binary.Read(r, binary.LittleEndian, &v)
attr.Value = v
return attr, err
case DT_Long, DT_Int64:
var v int64
err = binary.Read(r, binary.LittleEndian, &v)
attr.Value = v
return attr, err
case DT_Int8:
var v int8
err = binary.Read(r, binary.LittleEndian, &v)
attr.Value = v
return attr, err
case DT_UUID:
var v uuid.UUID
p := make([]byte, 16)
r.Read(p)
reverse(p[:4])
reverse(p[4:6])
reverse(p[6:8])
v, err = uuid.FromBytes(p)
attr.Value = v
return attr, err
default:
// Strings are serialized differently for each file format and should be
// handled by the format-specific ReadAttribute()
return attr, fmt.Errorf("ReadAttribute() not implemented for type %v", DT)
}
return attr, nil
}
// LimitReader returns a Reader that reads from r
// but stops with EOF after n bytes.
// The underlying implementation is a *LimitedReader.
func LimitReadSeeker(r io.ReadSeeker, n int64) io.ReadSeeker { return &LimitedReadSeeker{r, n} }
// A LimitedReader reads from R but limits the amount of
// data returned to just N bytes. Each call to Read
// updates N to reflect the new amount remaining.
// Read returns EOF when N <= 0 or when the underlying R returns EOF.
type LimitedReadSeeker struct {
R io.ReadSeeker // underlying reader
N int64 // max bytes remaining
}
func (l *LimitedReadSeeker) Read(p []byte) (n int, err error) {
if l.N <= 0 {
return 0, io.EOF
}
if int64(len(p)) > l.N {
p = p[0:l.N]
}
n, err = l.R.Read(p)
l.N -= int64(n)
return
}
func (l *LimitedReadSeeker) Seek(offset int64, whence int) (int64, error) {
switch whence {
case io.SeekStart:
n, err := l.R.Seek(0, io.SeekCurrent)
if err != nil {
return n, err
}
l.N += n - offset
return l.R.Seek(offset, whence)
case io.SeekEnd:
n, err := l.R.Seek(l.N, io.SeekCurrent)
if err != nil {
return n, err
}
l.N = 0 - offset
return l.R.Seek(offset, io.SeekCurrent)
case io.SeekCurrent:
l.N -= offset
return l.R.Seek(offset, whence)
default:
return -1, io.ErrNoProgress
}
}

32
cmd/lsconvert/main.go Normal file
View File

@ -0,0 +1,32 @@
package main
import (
"encoding/xml"
"fmt"
"os"
"strings"
"github.com/kr/pretty"
lslib "github.com/lordwelch/golslib"
)
func main() {
f, err := os.Open(os.Args[1])
defer f.Close()
l, err := lslib.ReadLSF(f)
pretty.Log(err, l)
v, err := xml.MarshalIndent(struct {
lslib.Resource
XMLName string `xml:"save"`
}{l, ""}, "", "\t")
fmt.Fprintln(os.Stderr, err)
n := string(v)
n = strings.ReplaceAll(n, "></version>", " />")
n = strings.ReplaceAll(n, "></attribute>", " />")
n = strings.ReplaceAll(n, "></node>", " />")
n = strings.ReplaceAll(n, "false", "False")
n = strings.ReplaceAll(n, "true", "True")
n = strings.ReplaceAll(n, "&#39;", "'")
fmt.Printf("%s%s", strings.ToLower(xml.Header), n)
}

47
const.go Normal file
View File

@ -0,0 +1,47 @@
package lslib
type FileVersion uint32
const (
/// <summary>
/// Initial version of the LSF format
/// </summary>
VerInitial FileVersion = iota + 1
/// <summary>
/// LSF version that added chunked compression for substreams
/// </summary>
VerChunkedCompress
/// <summary>
/// LSF version that extended the node descriptors
/// </summary>
VerExtendedNodes
/// <summary>
/// BG3 version, no changes found so far apart from version numbering
/// </summary>
VerBG3
/// <summary>
/// Latest version supported by this library
/// </summary>
MaxVersion = iota
)
type CompressionMethod int
const (
CMInvalid CompressionMethod = iota - 1
CMNone
CMZlib
CMLZ4
)
type CompressionLevel int
const (
FastCompression CompressionLevel = 0x10
DefaultCompression CompressionLevel = 0x20
MaxCompression CompressionLevel = 0x40
)

10
go.mod Normal file
View File

@ -0,0 +1,10 @@
module github.com/lordwelch/golslib
go 1.15
require (
github.com/google/uuid v1.1.2
github.com/kr/pretty v0.2.1
github.com/pierrec/lz4 v2.6.0+incompatible
github.com/satori/go.uuid v1.2.0
)

21
go.sum Normal file
View File

@ -0,0 +1,21 @@
github.com/frankban/quicktest v1.11.2 h1:mjwHjStlXWibxOohM7HYieIViKyh56mmt3+6viyhDDI=
github.com/frankban/quicktest v1.11.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s=
github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/pierrec/lz4 v2.6.0+incompatible h1:Ix9yFKn1nSPBLFl/yZknTp8TU5G4Ps0JDmguYK6iH1A=
github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b h1:QRR6H1YWRnHb4Y/HeNFCTJLFVxaq6wH4YuVdsUOr75U=
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

907
lsf.go Normal file
View File

@ -0,0 +1,907 @@
package lslib
import (
"bufio"
"encoding/binary"
"fmt"
"io"
"log"
"os"
)
var (
LSFSignature = [4]byte{0x4C, 0x53, 0x4F, 0x46}
logger = log.New(os.Stderr, "lslib:", log.LstdFlags|log.Lshortfile)
)
type LSFHeader struct {
/// summary
/// LSOF file signature
/// /summary
Signature [4]byte
/// summary
/// Version of the LSOF file D:OS EE is version 1/2, D:OS 2 is version 3
/// /summary
Version FileVersion
/// summary
/// Possibly version number? (major, minor, rev, build)
/// /summary
EngineVersion uint32
/// summary
/// Total uncompressed size of the string hash table
/// /summary
StringsUncompressedSize uint32
/// summary
/// Compressed size of the string hash table
/// /summary
StringsSizeOnDisk uint32
/// summary
/// Total uncompressed size of the node list
/// /summary
NodesUncompressedSize uint32
/// summary
/// Compressed size of the node list
/// /summary
NodesSizeOnDisk uint32
/// summary
/// Total uncompressed size of the attribute list
/// /summary
AttributesUncompressedSize uint32
/// summary
/// Compressed size of the attribute list
/// /summary
AttributesSizeOnDisk uint32
/// summary
/// Total uncompressed size of the raw value buffer
/// /summary
ValuesUncompressedSize uint32
/// summary
/// Compressed size of the raw value buffer
/// /summary
ValuesSizeOnDisk uint32
/// summary
/// Compression method and level used for the string, node, attribute and value buffers.
/// Uses the same format as packages (see BinUtils.MakeCompressionFlags)
/// /summary
CompressionFlags byte
/// summary
/// Possibly unused, always 0
/// /summary
Unknown2 byte
Unknown3 uint16
/// summary
/// Extended node/attribute format indicator, 0 for V2, 0/1 for V3
/// /summary
Extended uint32
}
func (lsfh *LSFHeader) Read(r io.Reader) error {
_, err := r.Read(lsfh.Signature[:])
if err != nil {
return err
}
err = binary.Read(r, binary.LittleEndian, &lsfh.Version)
if err != nil {
return err
}
err = binary.Read(r, binary.LittleEndian, &lsfh.EngineVersion)
if err != nil {
return err
}
err = binary.Read(r, binary.LittleEndian, &lsfh.StringsUncompressedSize)
if err != nil {
return err
}
err = binary.Read(r, binary.LittleEndian, &lsfh.StringsSizeOnDisk)
if err != nil {
return err
}
err = binary.Read(r, binary.LittleEndian, &lsfh.NodesUncompressedSize)
if err != nil {
return err
}
err = binary.Read(r, binary.LittleEndian, &lsfh.NodesSizeOnDisk)
if err != nil {
return err
}
err = binary.Read(r, binary.LittleEndian, &lsfh.AttributesUncompressedSize)
if err != nil {
return err
}
err = binary.Read(r, binary.LittleEndian, &lsfh.AttributesSizeOnDisk)
if err != nil {
return err
}
err = binary.Read(r, binary.LittleEndian, &lsfh.ValuesUncompressedSize)
if err != nil {
return err
}
err = binary.Read(r, binary.LittleEndian, &lsfh.ValuesSizeOnDisk)
if err != nil {
return err
}
err = binary.Read(r, binary.LittleEndian, &lsfh.CompressionFlags)
if err != nil {
return err
}
err = binary.Read(r, binary.LittleEndian, &lsfh.Unknown2)
if err != nil {
return err
}
err = binary.Read(r, binary.LittleEndian, &lsfh.Unknown3)
if err != nil {
return err
}
err = binary.Read(r, binary.LittleEndian, &lsfh.Extended)
// n, _ := r.Seek(0, io.SeekCurrent)
// log.Printf("extended flags %#X", lsfh.Extended)
// log.Printf("current location %v", n)
if err != nil {
return err
}
// log.Print("Is Compressed: ", lsfh.IsCompressed())
if !lsfh.IsCompressed() {
lsfh.NodesSizeOnDisk = lsfh.NodesUncompressedSize
lsfh.AttributesSizeOnDisk = lsfh.AttributesUncompressedSize
lsfh.StringsSizeOnDisk = lsfh.StringsUncompressedSize
lsfh.ValuesSizeOnDisk = lsfh.ValuesUncompressedSize
}
return nil
}
func (lsfh LSFHeader) IsCompressed() bool {
return CompressionFlagsToMethod(lsfh.CompressionFlags) != CMNone && CompressionFlagsToMethod(lsfh.CompressionFlags) != CMInvalid
}
type NodeEntry struct {
Long bool
/// summary
/// Name of this node
/// (16-bit MSB: index into name hash table, 16-bit LSB: offset in hash chain)
/// /summary
NameHashTableIndex uint32
/// summary
/// Index of the first attribute of this node
/// (-1: node has no attributes)
/// /summary
FirstAttributeIndex int32
/// summary
/// Index of the parent node
/// (-1: this node is a root region)
/// /summary
ParentIndex int32
/// summary
/// Index of the next sibling of this node
/// (-1: this is the last node)
/// /summary
NextSiblingIndex int32
}
func (ne *NodeEntry) Read(r io.Reader) error {
if ne.Long {
return ne.readLong(r)
}
return ne.readShort(r)
}
func (ne *NodeEntry) readShort(r io.Reader) error {
err := binary.Read(r, binary.LittleEndian, &ne.NameHashTableIndex)
if err != nil {
return err
}
err = binary.Read(r, binary.LittleEndian, &ne.FirstAttributeIndex)
if err != nil {
return err
}
err = binary.Read(r, binary.LittleEndian, &ne.ParentIndex)
if err != nil {
return err
}
return nil
}
func (ne *NodeEntry) readLong(r io.Reader) error {
err := binary.Read(r, binary.LittleEndian, &ne.NameHashTableIndex)
if err != nil {
return err
}
err = binary.Read(r, binary.LittleEndian, &ne.ParentIndex)
if err != nil {
return err
}
err = binary.Read(r, binary.LittleEndian, &ne.NextSiblingIndex)
if err != nil {
return err
}
err = binary.Read(r, binary.LittleEndian, &ne.FirstAttributeIndex)
if err != nil {
return err
}
return nil
}
func (ne NodeEntry) NameIndex() int {
return int(ne.NameHashTableIndex >> 16)
}
func (ne NodeEntry) NameOffset() int {
return int(ne.NameHashTableIndex & 0xffff)
}
/// summary
/// Processed node information for a node in the LSF file
/// /summary
type NodeInfo struct {
/// summary
/// Index of the parent node
/// (-1: this node is a root region)
/// /summary
ParentIndex int
/// summary
/// Index into name hash table
/// /summary
NameIndex int
/// summary
/// Offset in hash chain
/// /summary
NameOffset int
/// summary
/// Index of the first attribute of this node
/// (-1: node has no attributes)
/// /summary
FirstAttributeIndex int
}
/// summary
/// attribute extension in the LSF file
/// /summary
type AttributeEntry struct {
Long bool
/// summary
/// Name of this attribute
/// (16-bit MSB: index into name hash table, 16-bit LSB: offset in hash chain)
/// /summary
NameHashTableIndex uint32
/// summary
/// 6-bit LSB: Type of this attribute (see NodeAttribute.DataType)
/// 26-bit MSB: Length of this attribute
/// /summary
TypeAndLength uint32
/// summary
/// Index of the node that this attribute belongs to
/// Note: These indexes are assigned seemingly arbitrarily, and are not neccessarily indices into the node list
/// /summary
NodeIndex int32
/// summary
/// Index of the node that this attribute belongs to
/// Note: These indexes are assigned seemingly arbitrarily, and are not neccessarily indices into the node list
/// /summary
NextAttributeIndex int32
/// <summary>
/// Absolute position of attribute value in the value stream
/// </summary>
Offset uint32
}
func (ae *AttributeEntry) Read(r io.Reader) error {
if ae.Long {
return ae.readLong(r)
}
return ae.readShort(r)
}
func (ae *AttributeEntry) readShort(r io.Reader) error {
err := binary.Read(r, binary.LittleEndian, &ae.NameHashTableIndex)
if err != nil {
return err
}
err = binary.Read(r, binary.LittleEndian, &ae.TypeAndLength)
if err != nil {
return err
}
err = binary.Read(r, binary.LittleEndian, &ae.NodeIndex)
if err != nil {
return err
}
return nil
}
func (ae *AttributeEntry) readLong(r io.Reader) error {
err := binary.Read(r, binary.LittleEndian, &ae.NameHashTableIndex)
if err != nil {
return err
}
err = binary.Read(r, binary.LittleEndian, &ae.TypeAndLength)
if err != nil {
return err
}
err = binary.Read(r, binary.LittleEndian, &ae.NextAttributeIndex)
if err != nil {
return err
}
err = binary.Read(r, binary.LittleEndian, &ae.Offset)
if err != nil {
return err
}
return nil
}
/// summary
/// Index into name hash table
/// /summary
func (ae AttributeEntry) NameIndex() int {
return int(ae.NameHashTableIndex >> 16)
}
/// summary
/// Offset in hash chain
/// /summary
func (ae AttributeEntry) NameOffset() int {
return int(ae.NameHashTableIndex & 0xffff)
}
/// summary
/// Type of this attribute (see NodeAttribute.DataType)
/// /summary
func (ae AttributeEntry) TypeID() DataType {
return DataType(ae.TypeAndLength & 0x3f)
}
/// summary
/// Length of this attribute
/// /summary
func (ae AttributeEntry) Len() int {
return int(ae.TypeAndLength >> 6)
}
type AttributeInfo struct {
V2 bool
/// summary
/// Index into name hash table
/// /summary
NameIndex int
/// summary
/// Offset in hash chain
/// /summary
NameOffset int
/// summary
/// Type of this attribute (see NodeAttribute.DataType)
/// /summary
TypeId DataType
/// summary
/// Length of this attribute
/// /summary
Length uint
/// summary
/// Absolute position of attribute data in the values section
/// /summary
DataOffset uint
/// summary
/// Index of the next attribute in this node
/// (-1: this is the last attribute)
/// /summary
NextAttributeIndex int
}
type LSFReader struct {
data *bufio.Reader
}
// extract to lsf package
func ReadNames(r io.Reader) ([][]string, error) {
var (
numHashEntries uint32
err error
names [][]string
)
// n, _ := r.Seek(0, io.SeekCurrent)
// logger.Print("current location: ", n)
err = binary.Read(r, binary.LittleEndian, &numHashEntries)
// logger.Print("names size: ", numHashEntries)
if err != nil {
return nil, err
}
names = make([][]string, int(numHashEntries))
for i, _ := range names {
var numStrings uint16
err = binary.Read(r, binary.LittleEndian, &numStrings)
// n, _ = r.Seek(0, io.SeekCurrent)
// logger.Print("current location: ", n, " name count: ", numStrings)
var hash = make([]string, int(numStrings))
for x, _ := range hash {
var (
nameLen uint16
name []byte
)
err = binary.Read(r, binary.LittleEndian, &nameLen)
if err != nil {
return nil, err
}
name = make([]byte, nameLen)
_, err = r.Read(name)
if err != nil {
return nil, err
}
hash[x] = string(name)
}
names[i] = hash
}
return names, nil
}
func readNodeInfo(r io.Reader, longNodes bool) ([]NodeInfo, error) {
// Console.WriteLine(" ----- DUMP OF NODE TABLE -----");
var (
nodes []NodeInfo
err error
)
index := 0
for err == nil {
var node NodeInfo
// var pos = lsfr.Position;
item := &NodeEntry{Long: longNodes}
err = item.Read(r)
node.FirstAttributeIndex = int(item.FirstAttributeIndex)
node.NameIndex = item.NameIndex()
node.NameOffset = item.NameOffset()
node.ParentIndex = int(item.ParentIndex)
// Console.WriteLine(String.Format(
// "{0}: {1} @ {2:X} (parent {3}, firstAttribute {4})",
// index, Names[node.NameIndex][node.NameOffset], pos, node.ParentIndex,
// node.FirstAttributeIndex
// ));
nodes = append(nodes, node)
index++
}
return nodes[:len(nodes)-1], err
}
/// <summary>
/// Reads the attribute headers for the LSOF resource
/// </summary>
/// <param name="s">Stream to read the attribute headers from</param>
func readAttributeInfo(r io.Reader, long bool) []AttributeInfo {
// var rawAttributes = new List<AttributeEntryV2>();
var (
prevAttributeRefs []int
dataOffset uint = 0
index = 0
nextAttrIndex int = -1
attributes []AttributeInfo
err error
)
for err == nil {
attribute := &AttributeEntry{Long: long}
err = attribute.Read(r)
// pretty.Log(err, attribute)
if long {
dataOffset = uint(attribute.Offset)
nextAttrIndex = int(attribute.NextAttributeIndex)
}
resolved := AttributeInfo{
NameIndex: attribute.NameIndex(),
NameOffset: attribute.NameOffset(),
TypeId: attribute.TypeID(),
Length: uint(attribute.Len()),
DataOffset: dataOffset,
NextAttributeIndex: nextAttrIndex,
}
if !long {
nodeIndex := int(attribute.NodeIndex + 1)
if len(prevAttributeRefs) > int(nodeIndex) {
if prevAttributeRefs[nodeIndex] != -1 {
attributes[prevAttributeRefs[nodeIndex]].NextAttributeIndex = index
}
prevAttributeRefs[nodeIndex] = index
} else {
for len(prevAttributeRefs) < nodeIndex {
prevAttributeRefs = append(prevAttributeRefs, -1)
}
prevAttributeRefs = append(prevAttributeRefs, index)
}
// rawAttributes.Add(attribute);
dataOffset += uint(resolved.Length)
}
attributes = append(attributes, resolved)
index++
}
return attributes[:len(attributes)-1]
// }
// Console.WriteLine(" ----- DUMP OF ATTRIBUTE REFERENCES -----");
// for (int i = 0; i < prevAttributeRefs.Count; i++)
// {
// Console.WriteLine(String.Format("Node {0}: last attribute {1}", i, prevAttributeRefs[i]));
// }
// Console.WriteLine(" ----- DUMP OF V2 ATTRIBUTE TABLE -----");
// for (int i = 0; i < lsfr.Attributes.Count; i++)
// {
// var resolved = lsfr.Attributes[i];
// var attribute = rawAttributes[i];
// var debug = String.Format(
// "{0}: {1} (offset {2:X}, typeId {3}, nextAttribute {4}, node {5})",
// i, Names[resolved.NameIndex][resolved.NameOffset], resolved.DataOffset,
// resolved.TypeId, resolved.NextAttributeIndex, attribute.NodeIndex
// );
// Console.WriteLine(debug);
// }
}
func ReadLSF(r io.Reader) (Resource, error) {
var (
err error
/// summary
/// Static string hash map
/// /summary
names [][]string
/// summary
/// Preprocessed list of nodes (structures)
/// /summary
nodeInfo []NodeInfo
/// summary
/// Preprocessed list of node attributes
/// /summary
attributeInfo []AttributeInfo
/// summary
/// Node instances
/// /summary
nodeInstances []*Node
)
// s, ok := r.(*bufio.Reader)
// if ok {
// lsfr.data = s
// } else {
// lsfr.data = bufio.NewReader(r)
// }
hdr := &LSFHeader{}
err = hdr.Read(r)
// logger.Print("Signature: ", string(hdr.Signature[:]))
// pretty.Log(hdr)
if err != nil || (hdr.Signature != LSFSignature) {
return Resource{}, fmt.Errorf("Invalid LSF signature; expected %v, got %v", LSFSignature, hdr.Signature)
}
if hdr.Version < VerInitial || hdr.Version > MaxVersion {
return Resource{}, fmt.Errorf("LSF version %v is not supported", hdr.Version)
}
// Names = new List<List<String>>();
isCompressed := CompressionFlagsToMethod(hdr.CompressionFlags) != CMNone && CompressionFlagsToMethod(hdr.CompressionFlags) != CMInvalid
// logger.Printf("Names compressed: %v; Compression Flags: %v", isCompressed, CompressionFlagsToMethod(hdr.CompressionFlags))
if hdr.StringsSizeOnDisk > 0 || hdr.StringsUncompressedSize > 0 {
var (
uncompressed = io.LimitReader(r, int64(hdr.StringsSizeOnDisk))
)
if isCompressed {
uncompressed = Decompress(uncompressed, hdr.CompressionFlags, false)
}
// using (var nodesFile = new FileStream("names.bin", FileMode.Create, FileAccess.Write))
// {
// nodesFile.Write(uncompressed, 0, uncompressed.Length);
// }
names, err = ReadNames(uncompressed)
// pretty.Log(len(names), names)
if err != nil && err != io.EOF {
return Resource{}, err
}
}
// Nodes = new List<NodeInfo>();
if hdr.NodesSizeOnDisk > 0 || hdr.NodesUncompressedSize > 0 {
var (
uncompressed = io.LimitReader(r, int64(hdr.NodesSizeOnDisk))
)
if isCompressed {
uncompressed = Decompress(uncompressed, hdr.CompressionFlags, hdr.Version >= VerChunkedCompress)
}
// using (var nodesFile = new FileStream("nodes.bin", FileMode.Create, FileAccess.Write))
// {
// nodesFile.Write(uncompressed, 0, uncompressed.Length);
// }
longNodes := hdr.Version >= VerExtendedNodes && hdr.Extended == 1
nodeInfo, err = readNodeInfo(uncompressed, longNodes)
// logger.Printf("region 1 name: %v", names[nodeInfo[0].NameIndex])
// pretty.Log(nodeInfo)
if err != nil && err != io.EOF {
return Resource{}, err
}
}
// Attributes = new List<AttributeInfo>();
if hdr.AttributesSizeOnDisk > 0 || hdr.AttributesUncompressedSize > 0 {
var (
uncompressed io.Reader = io.LimitReader(r, int64(hdr.AttributesSizeOnDisk))
)
if isCompressed {
uncompressed = Decompress(uncompressed, hdr.CompressionFlags, hdr.Version >= VerChunkedCompress)
}
// using (var attributesFile = new FileStream("attributes.bin", FileMode.Create, FileAccess.Write))
// {
// attributesFile.Write(uncompressed, 0, uncompressed.Length);
// }
longAttributes := hdr.Version >= VerExtendedNodes && hdr.Extended == 1
attributeInfo = readAttributeInfo(uncompressed, longAttributes)
// logger.Printf("attribute 1 name: %v", names[attributeInfo[0].NameIndex])
// pretty.Log(attributeInfo)
}
var (
uncompressed io.Reader = io.LimitReader(r, int64(hdr.ValuesSizeOnDisk))
)
if hdr.ValuesSizeOnDisk > 0 || hdr.ValuesUncompressedSize > 0 {
if isCompressed {
uncompressed = Decompress(r, hdr.CompressionFlags, hdr.Version >= VerChunkedCompress)
}
// using (var valuesFile = new FileStream("values.bin", FileMode.Create, FileAccess.Write))
// {
// valuesFile.Write(uncompressed, 0, uncompressed.Length);
// }
}
res := Resource{}
// n, _ := r.Seek(0, io.SeekCurrent)
nodeInstances, err = ReadRegions(uncompressed, names, nodeInfo, attributeInfo, hdr.Version)
if err != nil {
return res, err
}
for _, v := range nodeInstances {
if v.Parent == nil {
res.Regions = append(res.Regions, v)
}
}
res.Metadata.MajorVersion = (hdr.EngineVersion & 0xf0000000) >> 28
res.Metadata.MinorVersion = (hdr.EngineVersion & 0xf000000) >> 24
res.Metadata.Revision = (hdr.EngineVersion & 0xff0000) >> 16
res.Metadata.BuildNumber = (hdr.EngineVersion & 0xffff)
// pretty.Log(res)
return res, nil
}
func ReadRegions(r io.Reader, names [][]string, nodeInfo []NodeInfo, attributeInfo []AttributeInfo, Version FileVersion) ([]*Node, error) {
NodeInstances := make([]*Node, 0, len(nodeInfo))
for _, nodeInfo := range nodeInfo {
if nodeInfo.ParentIndex == -1 {
region, err := ReadNode(r, nodeInfo, names, attributeInfo, Version)
// pretty.Log(err, region)
region.RegionName = region.Name
NodeInstances = append(NodeInstances, &region)
if err != nil {
return NodeInstances, err
}
} else {
node, err := ReadNode(r, nodeInfo, names, attributeInfo, Version)
// pretty.Log(err, node)
node.Parent = NodeInstances[nodeInfo.ParentIndex]
NodeInstances = append(NodeInstances, &node)
NodeInstances[nodeInfo.ParentIndex].AppendChild(&node)
if err != nil {
return NodeInstances, err
}
}
}
return NodeInstances, nil
}
func ReadNode(r io.Reader, ni NodeInfo, names [][]string, attributeInfo []AttributeInfo, Version FileVersion) (Node, error) {
var (
node = Node{}
index = ni.FirstAttributeIndex
err error
)
// pretty.Log(ni)
node.Name = names[ni.NameIndex][ni.NameOffset]
logger.Printf("reading node %s", names[ni.NameIndex][ni.NameOffset])
for index != -1 {
var (
attribute = attributeInfo[index]
v NodeAttribute
)
// n, _ := r.Seek(0, io.SeekCurrent)
// logger.Printf("seeking to %v now at %v", attribute.DataOffset, n)
v, err = ReadLSFAttribute(r, names[attribute.NameIndex][attribute.NameOffset], attribute.TypeId, attribute.Length, Version)
node.Attributes = append(node.Attributes, v)
if err != nil {
return node, err
}
index = attribute.NextAttributeIndex
// Console.WriteLine(String.Format(" {0:X}: {1} ({2})", attribute.DataOffset, names[attribute.NameIndex][attribute.NameOffset], value));
}
return node, nil
}
func ReadLSFAttribute(r io.Reader, name string, DT DataType, length uint, Version FileVersion) (NodeAttribute, error) {
// LSF and LSB serialize the buffer types differently, so specialized
// code is added to the LSB and LSf serializers, and the common code is
// available in BinUtils.ReadAttribute()
var (
attr = NodeAttribute{
Type: DT,
Name: name,
}
err error
)
logger.Printf("reading attribute '%v' type %v of length %v", name, DT, length)
switch DT {
case DT_String, DT_Path, DT_FixedString, DT_LSString, DT_WString, DT_LSWString:
var v string
v, err = ReadCString(r, int(length))
attr.Value = v
return attr, err
case DT_TranslatedString:
var str TranslatedString
if Version >= VerBG3 {
// logger.Println("decoding bg3 data")
var version uint16
/*err =*/ binary.Read(r, binary.LittleEndian, &version)
str.Version = version
} else {
str.Version = 0
var (
vlength int32
v []byte
)
/*err =*/
binary.Read(r, binary.LittleEndian, &vlength)
v = make([]byte, length)
/*err =*/ r.Read(v)
str.Value = string(v)
}
var handleLength int32
/*err =*/ binary.Read(r, binary.LittleEndian, &handleLength)
str.Handle, err = ReadCString(r, int(handleLength))
// logger.Printf("string %s; %v", str.Handle, err)
attr.Value = str
return attr, err
case DT_TranslatedFSString:
var v TranslatedFSString
v, err = ReadTranslatedFSString(r, Version)
attr.Value = v
return attr, err
case DT_ScratchBuffer:
v := make([]byte, length)
_, err = r.Read(v)
attr.Value = v
return attr, err
default:
return ReadAttribute(r, name, DT)
}
}
func ReadTranslatedFSString(r io.Reader, Version FileVersion) (TranslatedFSString, error) {
var (
str = TranslatedFSString{}
err error
)
if Version >= VerBG3 {
var version uint16
/*err =*/ binary.Read(r, binary.LittleEndian, &version)
str.Version = version
} else {
str.Version = 0
var (
length int32
)
/*err =*/
binary.Read(r, binary.LittleEndian, &length)
str.Value, err = ReadCString(r, int(length))
if err != nil {
return str, err
}
}
var handleLength int32
err = binary.Read(r, binary.LittleEndian, &handleLength)
if err != nil {
return str, err
}
str.Handle, err = ReadCString(r, int(handleLength))
if err != nil {
return str, err
}
var arguments int32
err = binary.Read(r, binary.LittleEndian, &arguments)
if err != nil {
return str, err
}
str.Arguments = make([]TranslatedFSStringArgument, 0, arguments)
for i := 0; i < int(arguments); i++ {
arg := TranslatedFSStringArgument{}
var argKeyLength int32
err = binary.Read(r, binary.LittleEndian, &argKeyLength)
if err != nil {
return str, err
}
arg.Key, err = ReadCString(r, int(argKeyLength))
if err != nil {
return str, err
}
arg.String, err = ReadTranslatedFSString(r, Version)
if err != nil {
return str, err
}
var argValueLength int32
err = binary.Read(r, binary.LittleEndian, &argValueLength)
if err != nil {
return str, err
}
arg.Value, err = ReadCString(r, int(argValueLength))
if err != nil {
return str, err
}
str.Arguments = append(str.Arguments, arg)
}
return str, nil
}

106
resource.go Normal file
View File

@ -0,0 +1,106 @@
package lslib
import (
"encoding/xml"
"io"
)
type LSMetadata struct {
//public const uint CurrentMajorVersion = 33;
Timestamp uint64 `xml:"-"`
MajorVersion uint32 `xml:"major,attr"`
MinorVersion uint32 `xml:"minor,attr"`
Revision uint32 `xml:"revision,attr"`
BuildNumber uint32 `xml:"build,attr"`
}
type format struct {
name, magic string
read func(io.Reader) (Resource, error)
}
type Resource struct {
Metadata LSMetadata `xml:"version"`
Regions []*Node `xml:"region"`
}
func (r *Resource) Read(io.Reader) {
}
// public Resource()
// {
// Metadata.MajorVersion = 3;
// }
type Node struct {
Name string `xml:"id,attr"`
Parent *Node `xml:"-"`
Attributes []NodeAttribute `xml:"attribute"`
Children []*Node `xml:"children>node,omitempty"`
RegionName string `xml:"-"`
}
func (n Node) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
R := xml.Name{
Local: "region",
}
N := xml.Name{
Local: "node",
}
I := xml.Name{
Local: "id",
}
C := xml.Name{
Local: "children",
}
if n.RegionName != "" {
tmp := xml.StartElement{
Name: R,
Attr: []xml.Attr{{Name: I, Value: n.Name}},
}
e.EncodeToken(tmp)
}
e.EncodeToken(xml.StartElement{
Name: N,
Attr: []xml.Attr{{Name: I, Value: n.Name}},
})
e.EncodeElement(n.Attributes, xml.StartElement{Name: xml.Name{Local: "attribute"}})
if len(n.Children) > 0 {
e.EncodeToken(xml.StartElement{Name: C})
e.Encode(n.Children)
e.EncodeToken(xml.EndElement{Name: C})
}
e.EncodeToken(xml.EndElement{Name: N})
if n.RegionName != "" {
e.EncodeToken(xml.EndElement{Name: R})
}
return nil
}
func (n Node) ChildCount() (sum int) {
// for _, v := range n.Children {
// sum += len(v)
// }
return len(n.Children)
}
func (n *Node) AppendChild(child *Node) {
n.Children = append(n.Children, child)
}
// int TotalChildCount()
// {
// int count = 0;
// foreach (var key in Children)
// {
// foreach (var child in key.Value)
// {
// count += 1 + child.TotalChildCount();
// }
// }
// return count;
// }