orc: fix overflow checking regression (#25089)

Raising exceptions halfway through a memory allocation is undefined
behavior since exceptions themselves require multiple allocations and
the allocator functions are not reentrant.

It is of course also expensive performance-wise to introduce lots of
exception-raising code everywhere since it breaks many optimisations and
bloats the code.

Finally, performing pointer arithmetic with signed integers is incorrect
for example on on a 32-bit systems that allows up to 3gb of address
space for applications (large address extensions) and unnecessary
elsewhere - broadly, stuff inside the memory allocator is generated by
the compiler or controlled by the standard library meaning that
applications should not be forced to pay this price.

If we wanted to check for overflow, the right way would be in the
initial allocation location where both the size and count of objects is
known.

The code is updated to use the same arithmetic operator style as for
refc with unchecked operations rather than disabling overflow checking
wholesale in the allocator module - there are reasons for both, but
going with the existing flow seems like an easier place to start.

(cherry picked from commit 8b9972c8b6)
This commit is contained in:
Jacek Sieka
2025-09-15 15:08:21 +02:00
committed by narimiran
parent f0b22a7620
commit 2123969cc4
14 changed files with 93 additions and 123 deletions

View File

@@ -134,19 +134,17 @@ else:
proc zeroNewElements(len: int; p: pointer; addlen, elemSize, elemAlign: int) {.
importCompilerProc.}
template `+!!`(a, b): untyped = cast[pointer](cast[int](a) + b)
include system/ptrarith
proc getDiscriminant(aa: pointer, n: ptr TNimNode): int =
assert(n.kind == nkCase)
var d: int
let a = cast[int](aa)
let a = aa +! n.offset
case n.typ.size
of 1: d = int(cast[ptr uint8](a +% n.offset)[])
of 2: d = int(cast[ptr uint16](a +% n.offset)[])
of 4: d = int(cast[ptr uint32](a +% n.offset)[])
of 8: d = int(cast[ptr uint64](a +% n.offset)[])
of 1: int(cast[ptr uint8](a)[])
of 2: int(cast[ptr uint16](a)[])
of 4: int(cast[ptr uint32](a)[])
of 8: cast[int](cast[ptr uint64](a)[])
else: raiseAssert "unreachable"
return d
proc selectBranch(aa: pointer, n: ptr TNimNode): ptr TNimNode =
let discr = getDiscriminant(aa, n)
@@ -240,9 +238,6 @@ proc skipRange(x: PNimType): PNimType {.inline.} =
result = x
if result.kind == tyRange: result = result.base
proc align(address, alignment: int): int =
result = (address + (alignment - 1)) and not (alignment - 1)
proc `[]`*(x: Any, i: int): Any =
## Accessor for an any `x` that represents an array or a sequence.
case x.rawType.kind
@@ -250,7 +245,7 @@ proc `[]`*(x: Any, i: int): Any =
let bs = x.rawType.base.size
if i >=% x.rawType.size div bs:
raise newException(IndexDefect, formatErrorIndexBound(i, x.rawType.size div bs))
return newAny(x.value +!! i*bs, x.rawType.base)
return newAny(x.value +! i*bs, x.rawType.base)
of tySequence:
when defined(gcDestructors):
var s = cast[ptr NimSeqV2Reimpl](x.value)
@@ -259,14 +254,14 @@ proc `[]`*(x: Any, i: int): Any =
let bs = x.rawType.base.size
let ba = x.rawType.base.align
let headerSize = align(sizeof(int), ba)
return newAny(s.p +!! (headerSize+i*bs), x.rawType.base)
return newAny(s.p +! (headerSize+i*bs), x.rawType.base)
else:
var s = cast[ppointer](x.value)[]
if s == nil: raise newException(ValueError, "sequence is nil")
let bs = x.rawType.base.size
if i >=% cast[PGenSeq](s).len:
raise newException(IndexDefect, formatErrorIndexBound(i, cast[PGenSeq](s).len-1))
return newAny(s +!! (align(GenericSeqSize, x.rawType.base.align)+i*bs), x.rawType.base)
return newAny(s +! (align(GenericSeqSize, x.rawType.base.align)+i*bs), x.rawType.base)
else: raiseAssert "unreachable"
proc `[]=`*(x: Any, i: int, y: Any) =
@@ -277,7 +272,7 @@ proc `[]=`*(x: Any, i: int, y: Any) =
if i >=% x.rawType.size div bs:
raise newException(IndexDefect, formatErrorIndexBound(i, x.rawType.size div bs))
assert y.rawType == x.rawType.base
genericAssign(x.value +!! i*bs, y.value, y.rawType)
genericAssign(x.value +! i*bs, y.value, y.rawType)
of tySequence:
when defined(gcDestructors):
var s = cast[ptr NimSeqV2Reimpl](x.value)
@@ -287,7 +282,7 @@ proc `[]=`*(x: Any, i: int, y: Any) =
let ba = x.rawType.base.align
let headerSize = align(sizeof(int), ba)
assert y.rawType == x.rawType.base
genericAssign(s.p +!! (headerSize+i*bs), y.value, y.rawType)
genericAssign(s.p +! (headerSize+i*bs), y.value, y.rawType)
else:
var s = cast[ppointer](x.value)[]
if s == nil: raise newException(ValueError, "sequence is nil")
@@ -295,7 +290,7 @@ proc `[]=`*(x: Any, i: int, y: Any) =
if i >=% cast[PGenSeq](s).len:
raise newException(IndexDefect, formatErrorIndexBound(i, cast[PGenSeq](s).len-1))
assert y.rawType == x.rawType.base
genericAssign(s +!! (align(GenericSeqSize, x.rawType.base.align)+i*bs), y.value, y.rawType)
genericAssign(s +! (align(GenericSeqSize, x.rawType.base.align)+i*bs), y.value, y.rawType)
else: raiseAssert "unreachable"
proc len*(x: Any): int =
@@ -352,13 +347,13 @@ proc fieldsAux(p: pointer, n: ptr TNimNode,
case n.kind
of nkNone: assert(false)
of nkSlot:
ret.add((n.name, newAny(p +!! n.offset, n.typ)))
ret.add((n.name, newAny(p +! n.offset, n.typ)))
assert ret[ret.len()-1][0] != nil
of nkList:
for i in 0..n.len-1: fieldsAux(p, n.sons[i], ret)
of nkCase:
var m = selectBranch(p, n)
ret.add((n.name, newAny(p +!! n.offset, n.typ)))
ret.add((n.name, newAny(p +! n.offset, n.typ)))
if m != nil: fieldsAux(p, m, ret)
iterator fields*(x: Any): tuple[name: string, any: Any] =
@@ -409,7 +404,7 @@ proc `[]=`*(x: Any, fieldName: string, value: Any) =
let n = getFieldNode(x.value, t.node, fieldName)
if n != nil:
assert n.typ == value.rawType
genericAssign(x.value +!! n.offset, value.value, value.rawType)
genericAssign(x.value +! n.offset, value.value, value.rawType)
else:
raise newException(ValueError, "invalid field name: " & fieldName)
@@ -422,7 +417,7 @@ proc `[]`*(x: Any, fieldName: string): Any =
assert x.rawType.kind in {tyTuple, tyObject}
let n = getFieldNode(x.value, t.node, fieldName)
if n != nil:
result = Any(value: x.value +!! n.offset)
result = Any(value: x.value +! n.offset)
result.rawType = n.typ
elif x.rawType.kind == tyObject and x.rawType.base != nil:
return `[]`(newAny(x.value, x.rawType.base), fieldName)

View File

@@ -1043,15 +1043,6 @@ proc toDecimal64*(ieeeSignificand: uint64; ieeeExponent: uint64): FloatingDecima
# ToChars
# ==================================================================================================
when false:
template `+!`(x: cstring; offset: int): cstring = cast[cstring](cast[uint](x) + uint(offset))
template dec(x: cstring; offset=1) = x = cast[cstring](cast[uint](x) - uint(offset))
template inc(x: cstring; offset=1) = x = cast[cstring](cast[uint](x) + uint(offset))
proc memset(x: cstring; ch: char; L: int) {.importc, nodecl.}
proc memmove(a, b: cstring; L: int) {.importc, nodecl.}
proc utoa8DigitsSkipTrailingZeros*(buf: var openArray[char]; pos: int; digits: uint32): int {.inline.} =
dragonbox_Assert(digits >= 1)
dragonbox_Assert(digits <= 99999999'u32)

View File

@@ -1116,12 +1116,7 @@ import std/private/since
import system/ctypes
export ctypes
proc align(address, alignment: int): int =
if alignment == 0: # Actually, this is illegal. This branch exists to actively
# hide problems.
result = address
else:
result = (address + (alignment - 1)) and not (alignment - 1)
include system/ptrarith
include system/rawquits
when defined(genode):

View File

@@ -83,9 +83,9 @@ when defined(gcAtomicArc) and hasThreadSupport:
atomicLoadN(x.rc.addr, ATOMIC_ACQUIRE) shr rcShift
else:
template decrement(cell: Cell): untyped =
dec(cell.rc, rcIncrement)
cell.rc = cell.rc -% rcIncrement
template increment(cell: Cell): untyped =
inc(cell.rc, rcIncrement)
cell.rc = cell.rc +% rcIncrement
template count(x: Cell): untyped =
x.rc shr rcShift
@@ -94,7 +94,7 @@ when not defined(nimHasQuirky):
proc nimNewObj(size, alignment: int): pointer {.compilerRtl.} =
let hdrSize = align(sizeof(RefHeader), alignment)
let s = size + hdrSize
let s = size +% hdrSize
when defined(nimscript):
discard
else:

View File

@@ -22,9 +22,9 @@ proc contains(s: CellSeq, c: PCell): bool {.inline.} =
return false
proc resize(s: var CellSeq) =
s.cap = s.cap div 2 + s.cap
let d = cast[PCellArray](alloc(s.cap * sizeof(PCell)))
copyMem(d, s.d, s.len * sizeof(PCell))
s.cap = s.cap div 2 +% s.cap
let d = cast[PCellArray](alloc(cast[Natural](s.cap *% sizeof(PCell))))
copyMem(d, s.d, s.len *% sizeof(PCell))
dealloc(s.d)
s.d = d
@@ -37,7 +37,7 @@ proc add(s: var CellSeq, c: PCell) {.inline.} =
proc init(s: var CellSeq, cap: int = 1024) =
s.len = 0
s.cap = cap
s.d = cast[PCellArray](alloc0(cap * sizeof(PCell)))
s.d = cast[PCellArray](alloc0(cast[Natural](cap *% sizeof(PCell))))
proc deinit(s: var CellSeq) =
dealloc(s.d)

View File

@@ -17,26 +17,26 @@ type
d: CellArray[T]
proc resize[T](s: var CellSeq[T]) =
s.cap = s.cap div 2 + s.cap
var newSize = s.cap * sizeof(CellTuple[T])
s.cap = s.cap div 2 +% s.cap
let newSize = s.cap *% sizeof(CellTuple[T])
when compileOption("threads"):
s.d = cast[CellArray[T]](reallocShared(s.d, newSize))
s.d = cast[CellArray[T]](reallocShared(s.d, cast[Natural](newSize)))
else:
s.d = cast[CellArray[T]](realloc(s.d, newSize))
s.d = cast[CellArray[T]](realloc(s.d, cast[Natural](newSize)))
proc add[T](s: var CellSeq[T], c: T, t: PNimTypeV2) {.inline.} =
if s.len >= s.cap:
s.resize()
s.d[s.len] = (c, t)
inc(s.len)
s.len = s.len +% 1
proc init[T](s: var CellSeq[T], cap: int = 1024) =
s.len = 0
s.cap = cap
when compileOption("threads"):
s.d = cast[CellArray[T]](allocShared(uint(s.cap * sizeof(CellTuple[T]))))
s.d = cast[CellArray[T]](allocShared(cast[Natural](s.cap *% sizeof(CellTuple[T]))))
else:
s.d = cast[CellArray[T]](alloc(s.cap * sizeof(CellTuple[T])))
s.d = cast[CellArray[T]](alloc(cast[Natural](s.cap *% sizeof(CellTuple[T]))))
proc deinit[T](s: var CellSeq[T]) =
if s.d != nil:
@@ -49,5 +49,6 @@ proc deinit[T](s: var CellSeq[T]) =
s.cap = 0
proc pop[T](s: var CellSeq[T]): (T, PNimTypeV2) =
result = s.d[s.len-1]
dec s.len
let last = s.len -% 1
s.len = last
s.d[last]

View File

@@ -180,7 +180,6 @@ proc deinitRawChannel(p: pointer) =
deinitSysCond(c.cond)
when not usesDestructors:
proc storeAux(dest, src: pointer, mt: PNimType, t: PRawChannel,
mode: LoadStoreMode) {.benign.}
@@ -203,9 +202,6 @@ when not usesDestructors:
proc storeAux(dest, src: pointer, mt: PNimType, t: PRawChannel,
mode: LoadStoreMode) =
template `+!`(p: pointer; x: int): pointer =
cast[pointer](cast[int](p) +% x)
var
d = cast[int](dest)
s = cast[int](src)

View File

@@ -173,11 +173,11 @@ proc addZCT(s: var CellSeq, c: PCell) {.noinline.} =
proc cellToUsr(cell: PCell): pointer {.inline.} =
# convert object (=pointer to refcount) to pointer to userdata
result = cast[pointer](cast[int](cell)+%ByteAddress(sizeof(Cell)))
cell +! sizeof(Cell)
proc usrToCell(usr: pointer): PCell {.inline.} =
# convert pointer to userdata to object (=pointer to refcount)
result = cast[PCell](cast[int](usr)-%ByteAddress(sizeof(Cell)))
cast[PCell](usr -! sizeof(Cell))
proc extGetCellType(c: pointer): PNimType {.compilerproc.} =
# used for code generation concerning debugging

View File

@@ -94,11 +94,11 @@ template gcAssert(cond: bool, msg: string) =
proc cellToUsr(cell: PCell): pointer {.inline.} =
# convert object (=pointer to refcount) to pointer to userdata
result = cast[pointer](cast[int](cell)+%ByteAddress(sizeof(Cell)))
cell +! sizeof(Cell)
proc usrToCell(usr: pointer): PCell {.inline.} =
# convert pointer to userdata to object (=pointer to refcount)
result = cast[PCell](cast[int](usr)-%ByteAddress(sizeof(Cell)))
cast[PCell](usr -! sizeof(Cell))
proc extGetCellType(c: pointer): PNimType {.compilerproc.} =
# used for code generation concerning debugging

View File

@@ -101,16 +101,10 @@ template withRegion*(r: var MemRegion; body: untyped) =
tlRegion = oldRegion
template inc(p: pointer, s: int) =
p = cast[pointer](cast[int](p) +% s)
p = p +! s
template dec(p: pointer, s: int) =
p = cast[pointer](cast[int](p) -% s)
template `+!`(p: pointer, s: int): pointer =
cast[pointer](cast[int](p) +% s)
template `-!`(p: pointer, s: int): pointer =
cast[pointer](cast[int](p) -% s)
p = p -! s
const nimMinHeapPages {.intdefine.} = 4

View File

@@ -319,12 +319,6 @@ when hasAlloc and not defined(js):
include bitmasks
template `+!`(p: pointer, s: SomeInteger): pointer =
cast[pointer](cast[int](p) +% int(s))
template `-!`(p: pointer, s: SomeInteger): pointer =
cast[pointer](cast[int](p) -% int(s))
proc alignedAlloc(size, align: Natural): pointer =
if align <= MemAlign:
when compileOption("threads"):
@@ -334,32 +328,21 @@ when hasAlloc and not defined(js):
else:
# allocate (size + align - 1) necessary for alignment,
# plus 2 bytes to store offset
when compileOption("threads"):
let base = allocShared(size + align - 1 + sizeof(uint16))
else:
let base = alloc(size + align - 1 + sizeof(uint16))
let base =
when compileOption("threads"):
allocShared(cast[Natural](size +% align -% 1 +% sizeof(uint16)))
else:
alloc(cast[Natural](size +% align -% 1 +% sizeof(uint16)))
# memory layout: padding + offset (2 bytes) + user_data
# in order to deallocate: read offset at user_data - 2 bytes,
# then deallocate user_data - offset
let offset = align - (cast[int](base) and (align - 1))
cast[ptr uint16](base +! (offset - sizeof(uint16)))[] = uint16(offset)
let offset = align -% cast[int](cast[uint](base) and uint(align -% 1))
result = base +! offset
cast[ptr uint16](result -! sizeof(uint16))[] = uint16(offset)
proc alignedAlloc0(size, align: Natural): pointer =
if align <= MemAlign:
when compileOption("threads"):
result = allocShared0(size)
else:
result = alloc0(size)
else:
# see comments for alignedAlloc
when compileOption("threads"):
let base = allocShared0(size + align - 1 + sizeof(uint16))
else:
let base = alloc0(size + align - 1 + sizeof(uint16))
let offset = align - (cast[int](base) and (align - 1))
cast[ptr uint16](base +! (offset - sizeof(uint16)))[] = uint16(offset)
result = base +! offset
result = alignedAlloc(size, align)
zeroMem(result, size)
proc alignedDealloc(p: pointer, align: int) {.compilerproc.} =
if align <= MemAlign:
@@ -395,7 +378,7 @@ when hasAlloc and not defined(js):
else:
result = alignedAlloc(newSize, align)
copyMem(result, p, oldSize)
zeroMem(result +! oldSize, newSize - oldSize)
zeroMem(result +! oldSize, newSize -% oldSize)
alignedDealloc(p, align)
{.pop.}

View File

@@ -45,7 +45,7 @@ const
proc nimIncRefCyclic(p: pointer; cyclic: bool) {.compilerRtl, inl.} =
let h = head(p)
inc h.rc, rcIncrement
h.rc = h.rc +% rcIncrement
when optimizedOrc:
if cyclic:
h.rc = h.rc or maybeCycle
@@ -145,14 +145,17 @@ var
proc unregisterCycle(s: Cell) =
# swap with the last element. O(1)
let idx = s.rootIdx-1
let
rootIdx = s.rootIdx
idx = rootIdx -% 1
last = roots.len -% 1
when false:
if idx >= roots.len or idx < 0:
cprintf("[Bug!] %ld %ld\n", idx, roots.len)
rawQuit 1
roots.d[idx] = roots.d[roots.len-1]
roots.d[idx][0].rootIdx = idx+1
dec roots.len
roots.d[idx] = roots.d[last]
roots.d[idx][0].rootIdx = rootIdx
roots.len = last
s.rootIdx = 0
proc scanBlack(s: Cell; desc: PNimTypeV2; j: var GcEnv) =
@@ -171,7 +174,7 @@ proc scanBlack(s: Cell; desc: PNimTypeV2; j: var GcEnv) =
while j.traceStack.len > until:
let (entry, desc) = j.traceStack.pop()
let t = head entry[]
inc t.rc, rcIncrement
t.rc = t.rc +% rcIncrement
if t.color != colBlack:
t.setColor colBlack
trace(t, desc, j)
@@ -189,16 +192,16 @@ proc markGray(s: Cell; desc: PNimTypeV2; j: var GcEnv) =
]#
if s.color != colGray:
s.setColor colGray
inc j.touched
j.touched = j.touched +% 1
# keep in mind that refcounts are zero based so add 1 here:
inc j.rcSum, (s.rc shr rcShift) + 1
j.rcSum = j.rcSum +% (s.rc shr rcShift) +% 1
orcAssert(j.traceStack.len == 0, "markGray: trace stack not empty")
trace(s, desc, j)
while j.traceStack.len > 0:
let (entry, desc) = j.traceStack.pop()
let t = head entry[]
dec t.rc, rcIncrement
inc j.edges
t.rc = t.rc -% rcIncrement
j.edges = j.edges +% 1
when useJumpStack:
if (t.rc shr rcShift) >= 0 and (t.rc and jumpStackFlag) == 0:
t.rc = t.rc or jumpStackFlag
@@ -207,9 +210,9 @@ proc markGray(s: Cell; desc: PNimTypeV2; j: var GcEnv) =
j.jumpStack.add(entry, desc)
if t.color != colGray:
t.setColor colGray
inc j.touched
j.touched = j.touched +% 1
# we already decremented its refcount so account for that:
inc j.rcSum, (t.rc shr rcShift) + 2
j.rcSum = j.rcSum +% (t.rc shr rcShift) +% 2
trace(t, desc, j)
proc scan(s: Cell; desc: PNimTypeV2; j: var GcEnv) =
@@ -327,7 +330,8 @@ proc collectCyclesBacon(j: var GcEnv; lowMark: int) =
s.buffered = false
collectWhite(s)
]#
let last = roots.len - 1
let last = roots.len -% 1
when logOrc:
for i in countdown(last, lowMark):
writeCell("root", roots.d[i][0], roots.d[i][1])
@@ -368,7 +372,7 @@ proc collectCyclesBacon(j: var GcEnv; lowMark: int) =
when not defined(nimStressOrc):
rootsThreshold = oldThreshold
inc j.freed, j.toFree.len
j.freed = j.freed +% j.toFree.len
deinit j.toFree
when defined(nimOrcStats):
@@ -419,15 +423,15 @@ proc collectCycles() =
# we touched. If we're effective, we can reset the threshold:
if j.keepThreshold:
discard
elif j.freed * 2 >= j.touched:
elif j.freed *% 2 >= j.touched:
when not defined(nimFixedOrc):
rootsThreshold = max(rootsThreshold div 3 * 2, 16)
rootsThreshold = max(rootsThreshold div 3 *% 2, 16)
else:
rootsThreshold = 0
#cfprintf(cstderr, "[collectCycles] freed %ld, touched %ld new threshold %ld\n", j.freed, j.touched, rootsThreshold)
elif rootsThreshold < high(int) div 4:
rootsThreshold = (if rootsThreshold <= 0: defaultThreshold else: rootsThreshold)
rootsThreshold = rootsThreshold div 2 + rootsThreshold
rootsThreshold = rootsThreshold div 2 +% rootsThreshold
when logOrc:
cfprintf(cstderr, "[collectCycles] end; freed %ld new threshold %ld touched: %ld mem: %ld rcSum: %ld edges: %ld\n", j.freed, rootsThreshold, j.touched,
getOccupiedMem(), j.rcSum, j.edges)
@@ -443,11 +447,11 @@ when defined(nimOrcStats):
result = OrcStats(freedCyclicObjects: freedCyclicObjects)
proc registerCycle(s: Cell; desc: PNimTypeV2) =
s.rootIdx = roots.len+1
s.rootIdx = roots.len +% 1
if roots.d == nil: init(roots)
add(roots, s, desc)
if roots.len - defaultThreshold >= rootsThreshold:
if roots.len -% defaultThreshold >= rootsThreshold:
collectCycles()
when logOrc:
writeCell("[added root]", s, desc)
@@ -518,7 +522,7 @@ proc nimDecRefIsLastCyclicDyn(p: pointer): bool {.compilerRtl, inl.} =
result = true
#cprintf("[DESTROY] %p\n", p)
else:
dec cell.rc, rcIncrement
cell.rc = cell.rc -% rcIncrement
#if cell.color == colPurple:
rememberCycle(result, cell, cast[ptr PNimTypeV2](p)[])
@@ -530,7 +534,7 @@ proc nimDecRefIsLastDyn(p: pointer): bool {.compilerRtl, inl.} =
result = true
#cprintf("[DESTROY] %p\n", p)
else:
dec cell.rc, rcIncrement
cell.rc = cell.rc -% rcIncrement
#if cell.color == colPurple:
if result:
if cell.rootIdx > 0:
@@ -544,7 +548,7 @@ proc nimDecRefIsLastCyclicStatic(p: pointer; desc: PNimTypeV2): bool {.compilerR
result = true
#cprintf("[DESTROY] %p %s\n", p, desc.name)
else:
dec cell.rc, rcIncrement
cell.rc = cell.rc -% rcIncrement
#if cell.color == colPurple:
rememberCycle(result, cell, desc)

17
lib/system/ptrarith.nim Normal file
View File

@@ -0,0 +1,17 @@
# Wrapping and non-defect-raising arithmetic operators for pointers
proc align(address, alignment: int): int {.used.} =
if alignment == 0: # Actually, this is illegal. This branch exists to actively
# hide problems.
address
else:
let
address = cast[uint](address)
alignment1 = cast[uint](alignment) - 1
cast[int]((address + alignment1) and not alignment1)
template `+!`(p: pointer, s: SomeInteger): pointer {.used.} =
cast[pointer](cast[uint](p) + cast[uint](s))
template `-!`(p: pointer, s: SomeInteger): pointer {.used.} =
cast[pointer](cast[uint](p) - cast[uint](s))

View File

@@ -57,12 +57,6 @@ proc newSeqPayloadUninit(cap, elemSize, elemAlign: int): pointer {.compilerRtl,
else:
result = nil
template `+!`(p: pointer, s: int): pointer =
cast[pointer](cast[int](p) +% s)
template `-!`(p: pointer, s: int): pointer =
cast[pointer](cast[int](p) -% s)
proc prepareSeqAdd(len: int; p: pointer; addlen, elemSize, elemAlign: int): pointer {.
noSideEffect, tags: [], raises: [], compilerRtl.} =
{.noSideEffect.}: