155 lines
4.6 KiB
Nim
155 lines
4.6 KiB
Nim
import std/strformat
|
|
import std/hashes
|
|
import ./slice_mem
|
|
export slice_mem
|
|
|
|
const header_size = when not defined(release):
|
|
sizeof(pointer)*2 + sizeof(Natural)
|
|
else:
|
|
sizeof(pointer)
|
|
|
|
type ArrRef*[T] = ref object
|
|
byte_len: int
|
|
when not defined(release):
|
|
# to see in the debugger
|
|
arr_ptr: ptr T
|
|
arr: UncheckedArray[T]
|
|
|
|
proc newArrRef*[T](size: Natural): ArrRef[T] =
|
|
when defined(TCC):
|
|
var r: ref pointer # workaround for tcc bug
|
|
unsafeNew(r, size * sizeof(T) + header_size)
|
|
result = cast[ArrRef[T]](r)
|
|
else:
|
|
unsafeNew(result, size * sizeof(T) + header_size)
|
|
result.byte_len = size * sizeof(T)
|
|
when not defined(release):
|
|
result.arr_ptr = result.arr[0].addr
|
|
|
|
template len*[T](a: ArrRef[T]): Natural =
|
|
a.byte_len div sizeof(T)
|
|
|
|
template byteLen*[T](a: ArrRef[T]): Natural =
|
|
a.byte_len
|
|
|
|
template low*[T](a: ArrRef[T]): Natural = 0
|
|
|
|
template high*[T](a: ArrRef[T]): int = a.len - 1
|
|
|
|
proc `[]`*[T](a: ArrRef[T], i: Natural): var T =
|
|
when compileOption("rangechecks"):
|
|
if i > a.len:
|
|
raise newException(RangeDefect, &"index out of range: {i} >= {a.len}")
|
|
a.arr[i]
|
|
|
|
proc `[]=`*[T](a: ArrRef[T], i: Natural, v: T) =
|
|
when compileOption("rangechecks"):
|
|
if i > a.len:
|
|
raise newException(RangeDefect, &"index out of range: {i} >= {a.len}")
|
|
a.arr[i] = v
|
|
|
|
template `[]`*[T](a: ArrRef[T], i: BackwardsIndex): T =
|
|
a[a.len - i.int]
|
|
|
|
template `[]=`*[T](a: ArrRef[T], i: BackwardsIndex, v: T) =
|
|
a[a.len - i.int] = v
|
|
|
|
template toPointer*[T](a: ArrRef[T]): pointer = a.arr[0].addr
|
|
|
|
iterator items*[T](a: ArrRef[T]): T =
|
|
for i in 0 ..< a.len:
|
|
yield a[i]
|
|
|
|
iterator pairs*[T](a: ArrRef[T]): tuple[key: int, val: T] =
|
|
for i in 0 ..< a.len:
|
|
yield (i, a[i])
|
|
|
|
iterator mitems*[T](a: ArrRef[T]): var T =
|
|
for i in 0 ..< a.len:
|
|
yield a[i]
|
|
|
|
iterator mpairs*[T](a: ArrRef[T]): tuple[key: int, val: var T] =
|
|
for i in 0 ..< a.len:
|
|
yield (i, a[i])
|
|
|
|
proc `$`*[T](a: ArrRef[T]): string =
|
|
result = "ArrRef(["
|
|
if a.byte_len >= sizeof(T):
|
|
let hi = a.high
|
|
for i in 0 ..< hi:
|
|
result &= $a[i] & ", "
|
|
result &= $a[hi]
|
|
result &= "])"
|
|
|
|
template to*[T](a: ArrRef[T], U: untyped): untyped =
|
|
cast[ArrRef[U]](a)
|
|
|
|
proc fill*[T](a: ArrRef[T], v: T) =
|
|
for i in a.low .. a.high: a[i] = v
|
|
|
|
proc newArrRefWith*[T](size: Natural, v: T): ArrRef[T] =
|
|
when defined(TCC):
|
|
var r: ref pointer # workaround for tcc bug
|
|
unsafeNew(r, size * sizeof(T) + header_size)
|
|
result = cast[ArrRef[T]](r)
|
|
else:
|
|
unsafeNew(result, size * sizeof(T) + header_size)
|
|
result.byte_len = size * sizeof(T)
|
|
when not defined(release):
|
|
result.arr_ptr = result.arr[0].addr
|
|
for i in result.low .. result.high: result[i] = v
|
|
|
|
proc newArrRef*[T](s: seq[T] | ArrRef[T] | openArray[T]): ArrRef[T] =
|
|
result = newArrRef[T](s.len)
|
|
if s.len != 0:
|
|
copyMem(result.arr.addr, s[0].addr, s.len * sizeof(T))
|
|
|
|
template newArrRef*[T](s: string): ArrRef[T] =
|
|
newArrRef[byte](s.toOpenArrayByte(0, s.high)).to(T)
|
|
|
|
proc newArrRef*[T; U: not T](s: seq[U]): ArrRef[T] =
|
|
result = newArrRef[T](s.len)
|
|
for i,v in s: result[i] = v.T
|
|
|
|
when defined(isNimSkull):
|
|
# mainline nim is WAY too buggy to handle this
|
|
proc newSeq*[T: not ref](a: ArrRef[T]): var seq[T] =
|
|
when defined(isNimSkull):
|
|
result = newSeqUninitialized[T](a.len)
|
|
# elif compiles(newSeqUninit[T](a.len)):
|
|
# result = newSeqUninit[T](a.len)
|
|
# else:
|
|
# result = newSeqOfCap[T](a.len)
|
|
if a.len != 0:
|
|
copyMem(result[0].addr, a.arr.addr, result.len * sizeof(T))
|
|
|
|
proc hash*[T](arr: ArrRef[T]): Hash =
|
|
# Make use of stdlib's murmur3
|
|
# NOTE: We use the size of elements in bytes instead of the actual size
|
|
# just in case the actual size is bigger than that.
|
|
hash(cast[ptr UncheckedArray[byte]](arr.arr.addr).toOpenArray(0, arr.len * sizeof(T) - 1))
|
|
# TODO: for bigger elements, would a different algorithm be faster?
|
|
|
|
template `[]`*[T](a: ArrRef[T], s: Slice[int]): var SliceMem[T] =
|
|
toSliceMem(a, s)
|
|
|
|
template concatImpl(arrs: untyped) =
|
|
var total = 0
|
|
for a in arrs:
|
|
total += a.len
|
|
result = newArrRef[T](total)
|
|
var offset = 0
|
|
for a in arrs:
|
|
copyMem(result[offset].addr, a.toPointer, a.len * sizeof(T))
|
|
offset += a.len
|
|
|
|
proc concat*[T](arrs: varargs[ArrRef[T]]): ArrRef[T] =
|
|
concatImpl(arrs)
|
|
|
|
proc concat*[T](arrs: seq[ArrRef[T]]): ArrRef[T] =
|
|
concatImpl(arrs)
|
|
|
|
template `&`*[T,U](a: ArrRef[T], b: ArrRef[U]): ArrRef[T] =
|
|
concat(a, b.to(T))
|
|
|