Compare commits

...

2 commits

4 changed files with 29 additions and 92 deletions

3
.gitmodules vendored
View file

@ -4,3 +4,6 @@
[submodule "libs/pixie"]
path = libs/pixie
url = https://git.myou.dev/MyouProject/pixie
[submodule "libs/nim_zstd"]
path = libs/nim_zstd
url = https://github.com/DiThi/nim_zstd

View file

@ -131,78 +131,9 @@ when defined(isNimSkull):
if a.len != 0:
copyMem(result[0].addr, a.arr.addr, result.len * sizeof(T))
# The following is just copied straight from hashes.nim, just replacing openArray by ArrRef...
when defined(js):
proc imul(a, b: uint32): uint32 =
# https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/imul
let mask = 0xffff'u32
var
aHi = (a shr 16) and mask
aLo = a and mask
bHi = (b shr 16) and mask
bLo = b and mask
result = (aLo * bLo) + (aHi * bLo + aLo * bHi) shl 16
else:
template imul(a, b: uint32): untyped = a * b
proc rotl32(x: uint32, r: int): uint32 {.inline.} =
(x shl r) or (x shr (32 - r))
proc hash*[T](arr: ArrRef[T]): Hash =
# https://github.com/PeterScott/murmur3/blob/master/murmur3.c
const
c1 = 0xcc9e2d51'u32
c2 = 0x1b873593'u32
n1 = 0xe6546b64'u32
m1 = 0x85ebca6b'u32
m2 = 0xc2b2ae35'u32
let
x = arr.to byte
size = len(x)
stepSize = 4 # 32-bit
n = size div stepSize
var
h1: uint32
i = 0
# body
while i < n * stepSize:
var k1: uint32
when defined(js) or defined(sparc) or defined(sparc64):
var j = stepSize
while j > 0:
dec j
k1 = (k1 shl 8) or (ord(x[i+j])).uint32
else:
k1 = cast[ptr uint32](unsafeAddr x[i])[]
inc i, stepSize
k1 = imul(k1, c1)
k1 = rotl32(k1, 15)
k1 = imul(k1, c2)
h1 = h1 xor k1
h1 = rotl32(h1, 13)
h1 = h1*5 + n1
# tail
var k1: uint32
var rem = size mod stepSize
while rem > 0:
dec rem
k1 = (k1 shl 8) or (ord(x[i+rem])).uint32
k1 = imul(k1, c1)
k1 = rotl32(k1, 15)
k1 = imul(k1, c2)
h1 = h1 xor k1
# finalization
h1 = h1 xor size.uint32
h1 = h1 xor (h1 shr 16)
h1 = imul(h1, m1)
h1 = h1 xor (h1 shr 13)
h1 = imul(h1, m2)
h1 = h1 xor (h1 shr 16)
return cast[Hash](h1)
# Make use of stdlib's murmur3
# NOTE: We use the size of elements in bytes instead of the actual size
# just in case the actual size is bigger than that.
hash(cast[ptr UncheckedArray[byte]](arr.arr.addr).toOpenArray(0, arr.len * sizeof(T) - 1))
# TODO: for bigger elements, would a different algorithm be faster?

1
libs/nim_zstd Submodule

@ -0,0 +1 @@
Subproject commit 75ee00aa3da80903dc509f1ff1605304cc77c173

View file

@ -32,10 +32,11 @@
{.experimental: "dotOperators".}
import strutils, tables, algorithm, sequtils, strformat, bitops
import memfiles
import std/[strutils, tables, algorithm, sequtils, strformat, bitops]
import std/memfiles
# import sugar
import hashes
import std/hashes
import zstd/decompress
# TODO!! ADD BOUND CHECKS TO ALL FNODES AND DNA1 PARSING!
# TODO: implement big endian, test 32 bit
@ -56,6 +57,8 @@ type
old_mem_ptr: pointer
# end pointer for bounds checking
endp: pointer
# byte seq in case we used zstd
byte_seq: seq[byte]
BlendFile* = ref BlendFileVal
@ -142,13 +145,16 @@ template `-`(p, q: pointer): int = cast[int](cast[int](p) -% cast[int](q))
template `+=`(p: pointer, i: Natural) = p = cast[pointer](cast[int](p) +% cast[int](i))
# this is for getting a string that may not be null terminated
# otherwise you can just do $cast[cstring](x.addr)
# otherwise you could just do $cast[cstring](x.addr)
proc getString(data: pointer, max_size: Natural): string =
let u8 = cast[ptr UncheckedArray[char]](data)
for i in 0 ..< max_size:
var i = 0
while i < max_size:
if u8[i] == '\0':
break
result &= u8[i]
inc(i)
result.setLen i
copyMem(result.cstring, data, i)
proc build_dna_table(dna_table: ptr FBlock64): (TypeAttrsRef, StructToTypeMap, seq[cstring], TypeLengths) =
var type_attrs = new TypeAttrsRef
@ -230,25 +236,21 @@ proc build_dna_table(dna_table: ptr FBlock64): (TypeAttrsRef, StructToTypeMap, s
)
aoffset += asize
offset += 4
# if true:
# # if lengths[type_id] != aoffset:
# echo &"{types[type_id]}: {lengths[type_id]}, {aoffset}"
# var offsets = toSeq(type_attrs[type_id].pairs)
# for _,(k,v) in offsets.sortedByIt(it[1].offset):
# echo &"{types[type_id]}.{k}:\t\t{v.offset}"
assert lengths[type_id] == aoffset
# type_attrs[type_id]["(size)"] = AttrOffset(offset: lengths[type_id])
return (type_attrs, struct_to_type, types, lengths)
proc openBlendFile*(path: string, data: pointer, len: int): BlendFile =
result = new BlendFile
# result.mem_file = memfiles.open(path, mode=fmRead)
result.file_path = path
# let mem = result.mem_file.mem
# result.mem = mem
# let file_length = result.mem_file.size
let mem = data
let file_length = len
let (mem, file_length) = if cast[ptr uint32](data)[] == 0xFD2FB528'u32:
# Zstandard compressed file
# TODO: store offsets of each zstd frame so we can semi-random access
# big files later
result.byte_seq = decompress cast[ptr UncheckedArray[byte]](data).toOpenArray(0, len-1)
(result.byte_seq[0].addr.pointer, result.byte_seq.len)
else:
(data, len)
assert file_length > 32, "Invalid file size"
result.endp = mem + file_length
let u8 = cast[ptr UncheckedArray[uint8]](mem)