myou-engine/libs/arr_ref/arr_ref.nim

139 lines
4.5 KiB
Nim

import std/strformat
import std/hashes
const header_size = when not defined(release):
sizeof(pointer)*2 + sizeof(Natural)
else:
sizeof(pointer)
type ArrRef*[T] = ref object
endp: pointer
when not defined(release):
# to see in the debugger
size_bytes: Natural
arr_ptr: ptr T
arr: UncheckedArray[T]
proc newArrRef*[T](size: Natural): ArrRef[T] =
when defined(TCC):
var r: ref pointer # workaround for tcc bug
unsafeNew(r, size * sizeof(T) + header_size)
result = cast[ArrRef[T]](r)
else:
unsafeNew(result, size * sizeof(T) + header_size)
result.endp = addr(result.arr[size])
when not defined(release):
result.size_bytes = size * sizeof(T)
result.arr_ptr = result.arr[0].addr
template len*[T](a: ArrRef[T]): Natural =
((cast[int](a.endp) -% cast[int](a)) -% header_size) div sizeof(T)
template byteLen*[T](a: ArrRef[T]): Natural =
((cast[int](a.endp) -% cast[int](a)) -% header_size)
template low*[T](a: ArrRef[T]): Natural = 0
template high*[T](a: ArrRef[T]): int = a.len - 1
template rangeError[T](a: ArrRef[T], i: Natural) =
raise newException(RangeDefect, &"index out of range: {i} >= {a.len}")
proc `[]`*[T](a: ArrRef[T], i: Natural): var T =
let p = cast[int](a) +% header_size +% sizeof(T) * i
when compileOption("rangechecks"):
if p +% sizeof(T) > cast[int](a.endp): rangeError(a, i)
cast[ptr T](p)[]
proc `[]=`*[T](a: ArrRef[T], i: Natural, v: T) =
let p = cast[int](a) +% header_size +% sizeof(T) * i
when compileOption("rangechecks"):
if p +% sizeof(T) > cast[int](a.endp): rangeError(a, i)
cast[ptr T](p)[] = v
template toPointer*[T](a: ArrRef[T]): pointer = a.arr[0].addr
iterator items*[T](a: ArrRef[T]): T =
for i in 0 ..< a.len:
yield a[i]
iterator pairs*[T](a: ArrRef[T]): tuple[key: int, val: T] =
for i in 0 ..< a.len:
yield (i, a[i])
iterator mitems*[T](a: ArrRef[T]): var T =
for i in 0 ..< a.len:
yield a[i]
iterator mpairs*[T](a: ArrRef[T]): tuple[key: int, val: var T] =
for i in 0 ..< a.len:
yield (i, a[i])
proc `$`*[T](a: ArrRef[T]): string =
result = "["
let hi = a.high
for i in 0 ..< hi:
result &= $a[i] & ", "
result &= $a[hi] & "]"
template to*[T](a: ArrRef[T], U: untyped): untyped =
cast[ArrRef[U]](a)
# proc setCap*[T](a: var ArrRef[T], size: Natural) =
# let n = newArrRef[T](size)
# copyMem(n[0].addr, a[0].addr, min(size, a.len) * sizeof(T))
# a = n
# proc setGrow*[T](a: var ArrRef[T], i: Natural, v: T) =
# let p = cast[int](a) +% header_size +% sizeof(T) * i
# if p +% sizeof(T) > cast[int](a.endp):
# var cap = max(1, a.len)
# while i >= cap:
# if cap < 65535: cap *= 2
# else: cap = (cap * 3) div 2
# a.setCap cap
# cast[ptr T](p)[] = v
proc fill*[T](a: ArrRef[T], v: T) =
for i in a.low .. a.high: a[i] = v
proc newArrRefWith*[T](size: Natural, v: T): ArrRef[T] =
when defined(TCC):
var r: ref pointer # workaround for tcc bug
unsafeNew(r, size * sizeof(T) + header_size)
result = cast[ArrRef[T]](r)
else:
unsafeNew(result, size * sizeof(T) + header_size)
result.endp = addr(result.arr[size])
when not defined(release):
result.size_bytes = size * sizeof(T)
result.arr_ptr = result.arr[0].addr
for i in result.low .. result.high: result[i] = v
proc newArrRef*[T](s: seq[T] | ArrRef[T]): ArrRef[T] =
result = newArrRef[T](s.len)
if s.len != 0:
copyMem(result.arr.addr, s[0].addr, s.len * sizeof(T))
proc newArrRef*[T; U: not T](s: seq[U]): ArrRef[T] =
result = newArrRef[T](s.len)
for i,v in s: result[i] = v.T
when defined(isNimSkull):
# mainline nim is WAY too buggy to handle this
proc newSeq*[T: not ref](a: ArrRef[T]): var seq[T] =
when defined(isNimSkull):
result = newSeqUninitialized[T](a.len)
# elif compiles(newSeqUninit[T](a.len)):
# result = newSeqUninit[T](a.len)
# else:
# result = newSeqOfCap[T](a.len)
if a.len != 0:
copyMem(result[0].addr, a.arr.addr, result.len * sizeof(T))
proc hash*[T](arr: ArrRef[T]): Hash =
# Make use of stdlib's murmur3
# NOTE: We use the size of elements in bytes instead of the actual size
# just in case the actual size is bigger than that.
hash(cast[ptr UncheckedArray[byte]](arr.arr.addr).toOpenArray(0, arr.len * sizeof(T) - 1))
# TODO: for bigger elements, would a different algorithm be faster?