This commit is contained in:
Bung
2022-12-16 15:01:15 +08:00
committed by GitHub
parent 8a3b76b287
commit a9bd78d579
23 changed files with 140 additions and 140 deletions

View File

@@ -333,7 +333,7 @@ proc typeToYamlAux(conf: ConfigRef; n: PType, marker: var IntSet, indent: int,
sonsRope = rope("null")
elif containsOrIncl(marker, n.id):
sonsRope = "\"$1 @$2\"" % [rope($n.kind), rope(
strutils.toHex(cast[ByteAddress](n), sizeof(n) * 2))]
strutils.toHex(cast[int](n), sizeof(n) * 2))]
else:
if n.len > 0:
sonsRope = rope("[")

View File

@@ -77,7 +77,7 @@ proc importcSymbol*(conf: ConfigRef, sym: PSym): PNode =
theAddr = dllhandle.symAddr(name.cstring)
if theAddr.isNil: globalError(conf, sym.info,
"cannot import symbol: " & name & " from " & libPathMsg)
result.intVal = cast[ByteAddress](theAddr)
result.intVal = cast[int](theAddr)
proc mapType(conf: ConfigRef, t: ast.PType): ptr libffi.Type =
if t == nil: return addr libffi.type_void
@@ -113,7 +113,7 @@ proc mapCallConv(conf: ConfigRef, cc: TCallingConvention, info: TLineInfo): TABI
template rd(typ, p: untyped): untyped = (cast[ptr typ](p))[]
template wr(typ, p, v: untyped): untyped = (cast[ptr typ](p))[] = v
template `+!`(x, y: untyped): untyped =
cast[pointer](cast[ByteAddress](x) + y)
cast[pointer](cast[int](x) + y)
proc packSize(conf: ConfigRef, v: PNode, typ: PType): int =
## computes the size of the blob
@@ -369,13 +369,13 @@ proc unpack(conf: ConfigRef, x: pointer, typ: PType, n: PNode): PNode =
# in their unboxed representation so nothing it to be unpacked:
result = n
else:
awi(nkPtrLit, cast[ByteAddress](p))
awi(nkPtrLit, cast[int](p))
of tyPtr, tyRef, tyVar, tyLent:
let p = rd(pointer, x)
if p.isNil:
setNil()
elif n == nil or n.kind == nkPtrLit:
awi(nkPtrLit, cast[ByteAddress](p))
awi(nkPtrLit, cast[int](p))
elif n != nil and n.len == 1:
internalAssert(conf, n.kind == nkRefTy)
n[0] = unpack(conf, p, typ.lastSon, n[0])

View File

@@ -132,12 +132,12 @@ else:
proc prepareSeqAdd(len: int; p: pointer; addlen, elemSize, elemAlign: int): pointer {.
importCompilerProc.}
template `+!!`(a, b): untyped = cast[pointer](cast[ByteAddress](a) + b)
template `+!!`(a, b): untyped = cast[pointer](cast[int](a) + b)
proc getDiscriminant(aa: pointer, n: ptr TNimNode): int =
assert(n.kind == nkCase)
var d: int
let a = cast[ByteAddress](aa)
let a = cast[int](aa)
case n.typ.size
of 1: d = int(cast[ptr uint8](a +% n.offset)[])
of 2: d = int(cast[ptr uint16](a +% n.offset)[])

View File

@@ -281,8 +281,8 @@ proc start*(c: proc(), stacksize: int = defaultStackSize): CoroutineRef {.discar
(proc(p: pointer) {.stdcall.} = runCurrentTask()), nil)
else:
coro = cast[CoroutinePtr](alloc0(sizeof(Coroutine) + stacksize))
coro.stack.top = cast[pointer](cast[ByteAddress](coro) + sizeof(Coroutine))
coro.stack.bottom = cast[pointer](cast[ByteAddress](coro.stack.top) + stacksize)
coro.stack.top = cast[pointer](cast[int](coro) + sizeof(Coroutine))
coro.stack.bottom = cast[pointer](cast[int](coro.stack.top) + stacksize)
when coroBackend == CORO_BACKEND_UCONTEXT:
discard getcontext(coro.execContext)
coro.execContext.uc_stack.ss_sp = coro.stack.top

View File

@@ -495,8 +495,8 @@ proc mmsSetPosition(s: Stream, pos: int) =
proc mmsGetPosition(s: Stream): int = MemMapFileStream(s).pos
proc mmsPeekData(s: Stream, buffer: pointer, bufLen: int): int =
let startAddress = cast[ByteAddress](MemMapFileStream(s).mf.mem)
let p = cast[ByteAddress](MemMapFileStream(s).pos)
let startAddress = cast[int](MemMapFileStream(s).mf.mem)
let p = cast[int](MemMapFileStream(s).pos)
let l = min(bufLen, MemMapFileStream(s).mf.size - p)
moveMem(buffer, cast[pointer](startAddress + p), l)
result = l
@@ -511,8 +511,8 @@ proc mmsWriteData(s: Stream, buffer: pointer, bufLen: int) =
let size = MemMapFileStream(s).mf.size
if MemMapFileStream(s).pos + bufLen > size:
raise newEIO("cannot write to stream")
let p = cast[ByteAddress](MemMapFileStream(s).mf.mem) +
cast[ByteAddress](MemMapFileStream(s).pos)
let p = cast[int](MemMapFileStream(s).mf.mem) +
cast[int](MemMapFileStream(s).pos)
moveMem(cast[pointer](p), buffer, bufLen)
inc(MemMapFileStream(s).pos, bufLen)

View File

@@ -44,7 +44,7 @@ proc hexbyte*(hex: char): int {.inline.} =
proc parseOid*(str: cstring): Oid =
## Parses an OID.
var bytes = cast[cstring](cast[pointer](cast[ByteAddress](addr(result.time)) + 4))
var bytes = cast[cstring](cast[pointer](cast[int](addr(result.time)) + 4))
var i = 0
while i < 12:
bytes[i] = chr((hexbyte(str[2 * i]) shl 4) or hexbyte(str[2 * i + 1]))
@@ -57,7 +57,7 @@ proc `$`*(oid: Oid): string =
result.setLen 24
var o = oid
var bytes = cast[cstring](cast[pointer](cast[ByteAddress](addr(o)) + 4))
var bytes = cast[cstring](cast[pointer](cast[int](addr(o)) + 4))
var i = 0
while i < 12:
let b = bytes[i].ord

View File

@@ -1914,7 +1914,7 @@ func find*(s: string, sub: char, start: Natural = 0, last = -1): int {.rtl,
if length > 0:
let found = c_memchr(s[start].unsafeAddr, sub, cast[csize_t](length))
if not found.isNil:
return cast[ByteAddress](found) -% cast[ByteAddress](s.cstring)
return cast[int](found) -% cast[int](s.cstring)
else:
findImpl()
@@ -1970,7 +1970,7 @@ func find*(s, sub: string, start: Natural = 0, last = -1): int {.rtl,
if last < 0 and start < s.len and subLen != 0:
let found = memmem(s[start].unsafeAddr, csize_t(s.len - start), sub.cstring, csize_t(subLen))
result = if not found.isNil:
cast[ByteAddress](found) -% cast[ByteAddress](s.cstring)
cast[int](found) -% cast[int](s.cstring)
else:
-1
else:

View File

@@ -183,7 +183,7 @@ when not defined(nimscript):
let kv = $e
let p = find(kv, '=')
yield (substr(kv, 0, p-1), substr(kv, p+1))
e = cast[typ](cast[ByteAddress](eend)+size)
e = cast[typ](cast[int](eend)+size)
if typeof(zero)(eend[1]) == zero: break
discard free_fun(env)
impl(getEnvironmentStringsW, WideCString, 2, 0, freeEnvironmentStringsW)

View File

@@ -93,7 +93,7 @@ func find*(s: cstring, sub: char, start: Natural = 0, last = 0): int =
if L > 0:
let found = c_memchr(s[start].unsafeAddr, sub, cast[csize_t](L))
if not found.isNil:
return cast[ByteAddress](found) -% cast[ByteAddress](s)
return cast[int](found) -% cast[int](s)
return -1
func find*(s, sub: cstring, start: Natural = 0, last = 0): int =
@@ -108,6 +108,6 @@ func find*(s, sub: cstring, start: Natural = 0, last = 0): int =
if last == 0 and s.len > start:
let found = c_strstr(cast[cstring](s[start].unsafeAddr), sub)
if not found.isNil:
result = cast[ByteAddress](found) -% cast[ByteAddress](s)
result = cast[int](found) -% cast[int](s)
else:
result = -1

View File

@@ -480,7 +480,7 @@ proc readLine*(f: File, line: var string): bool {.tags: [ReadIOEffect],
let m = c_memchr(addr line[pos], '\L'.ord, cast[csize_t](sp))
if m != nil:
# \l found: Could be our own or the one by fgets, in any case, we're done
var last = cast[ByteAddress](m) - cast[ByteAddress](addr line[0])
var last = cast[int](m) - cast[int](addr line[0])
if last > 0 and line[last-1] == '\c':
line.setLen(last-1)
return last > 1 or fgetsSuccess

View File

@@ -290,7 +290,7 @@ proc llAlloc(a: var MemRegion, size: int): pointer =
a.llmem.size = PageSize - sizeof(LLChunk)
a.llmem.acc = sizeof(LLChunk)
a.llmem.next = old
result = cast[pointer](cast[ByteAddress](a.llmem) + a.llmem.acc)
result = cast[pointer](cast[int](a.llmem) + a.llmem.acc)
dec(a.llmem.size, size)
inc(a.llmem.acc, size)
zeroMem(result, size)
@@ -422,7 +422,7 @@ iterator allObjects(m: var MemRegion): pointer {.inline.} =
var c = cast[PSmallChunk](c)
let size = c.size
var a = cast[ByteAddress](addr(c.data))
var a = cast[int](addr(c.data))
let limit = a + c.acc
while a <% limit:
yield cast[pointer](a)
@@ -441,13 +441,13 @@ when not defined(gcDestructors):
# ------------- chunk management ----------------------------------------------
proc pageIndex(c: PChunk): int {.inline.} =
result = cast[ByteAddress](c) shr PageShift
result = cast[int](c) shr PageShift
proc pageIndex(p: pointer): int {.inline.} =
result = cast[ByteAddress](p) shr PageShift
result = cast[int](p) shr PageShift
proc pageAddr(p: pointer): PChunk {.inline.} =
result = cast[PChunk](cast[ByteAddress](p) and not PageMask)
result = cast[PChunk](cast[int](p) and not PageMask)
#sysAssert(Contains(allocator.chunkStarts, pageIndex(result)))
when false:
@@ -495,13 +495,13 @@ proc requestOsChunks(a: var MemRegion, size: int): PBigChunk =
when defined(memtracker):
trackLocation(addr result.size, sizeof(int))
sysAssert((cast[ByteAddress](result) and PageMask) == 0, "requestOsChunks 1")
sysAssert((cast[int](result) and PageMask) == 0, "requestOsChunks 1")
#zeroMem(result, size)
result.next = nil
result.prev = nil
result.size = size
# update next.prevSize:
var nxt = cast[ByteAddress](result) +% size
var nxt = cast[int](result) +% size
sysAssert((nxt and PageMask) == 0, "requestOsChunks 2")
var next = cast[PChunk](nxt)
if pageIndex(next) in a.chunkStarts:
@@ -509,7 +509,7 @@ proc requestOsChunks(a: var MemRegion, size: int): PBigChunk =
next.prevSize = size or (next.prevSize and 1)
# set result.prevSize:
var lastSize = if a.lastSize != 0: a.lastSize else: PageSize
var prv = cast[ByteAddress](result) -% lastSize
var prv = cast[int](result) -% lastSize
sysAssert((nxt and PageMask) == 0, "requestOsChunks 3")
var prev = cast[PChunk](prv)
if pageIndex(prev) in a.chunkStarts and prev.size == lastSize:
@@ -555,13 +555,13 @@ proc listRemove[T](head: var T, c: T) {.inline.} =
proc updatePrevSize(a: var MemRegion, c: PBigChunk,
prevSize: int) {.inline.} =
var ri = cast[PChunk](cast[ByteAddress](c) +% c.size)
sysAssert((cast[ByteAddress](ri) and PageMask) == 0, "updatePrevSize")
var ri = cast[PChunk](cast[int](c) +% c.size)
sysAssert((cast[int](ri) and PageMask) == 0, "updatePrevSize")
if isAccessible(a, ri):
ri.prevSize = prevSize or (ri.prevSize and 1)
proc splitChunk2(a: var MemRegion, c: PBigChunk, size: int): PBigChunk =
result = cast[PBigChunk](cast[ByteAddress](c) +% size)
result = cast[PBigChunk](cast[int](c) +% size)
result.size = c.size - size
track("result.size", addr result.size, sizeof(int))
when not defined(nimOptimizedSplitChunk):
@@ -590,8 +590,8 @@ proc freeBigChunk(a: var MemRegion, c: PBigChunk) =
when coalescLeft:
let prevSize = c.prevSize
if prevSize != 0:
var le = cast[PChunk](cast[ByteAddress](c) -% prevSize)
sysAssert((cast[ByteAddress](le) and PageMask) == 0, "freeBigChunk 4")
var le = cast[PChunk](cast[int](c) -% prevSize)
sysAssert((cast[int](le) and PageMask) == 0, "freeBigChunk 4")
if isAccessible(a, le) and chunkUnused(le):
sysAssert(not isSmallChunk(le), "freeBigChunk 5")
if not isSmallChunk(le) and le.size < MaxBigChunkSize:
@@ -607,8 +607,8 @@ proc freeBigChunk(a: var MemRegion, c: PBigChunk) =
addChunkToMatrix(a, c)
c = rest
when coalescRight:
var ri = cast[PChunk](cast[ByteAddress](c) +% c.size)
sysAssert((cast[ByteAddress](ri) and PageMask) == 0, "freeBigChunk 2")
var ri = cast[PChunk](cast[int](c) +% c.size)
sysAssert((cast[int](ri) and PageMask) == 0, "freeBigChunk 2")
if isAccessible(a, ri) and chunkUnused(ri):
sysAssert(not isSmallChunk(ri), "freeBigChunk 3")
if not isSmallChunk(ri) and c.size < MaxBigChunkSize:
@@ -669,7 +669,7 @@ proc getHugeChunk(a: var MemRegion; size: int): PBigChunk =
incCurrMem(a, size)
# XXX add this to the heap links. But also remove it from it later.
when false: a.addHeapLink(result, size)
sysAssert((cast[ByteAddress](result) and PageMask) == 0, "getHugeChunk")
sysAssert((cast[int](result) and PageMask) == 0, "getHugeChunk")
result.next = nil
result.prev = nil
result.size = size
@@ -838,7 +838,7 @@ proc rawAlloc(a: var MemRegion, requestedSize: int): pointer =
c.prev = nil
listAdd(a.freeSmallChunks[s], c)
result = addr(c.data)
sysAssert((cast[ByteAddress](result) and (MemAlign-1)) == 0, "rawAlloc 4")
sysAssert((cast[int](result) and (MemAlign-1)) == 0, "rawAlloc 4")
else:
sysAssert(allocInv(a), "rawAlloc: begin c != nil")
sysAssert c.next != c, "rawAlloc 5"
@@ -856,7 +856,7 @@ proc rawAlloc(a: var MemRegion, requestedSize: int): pointer =
if c.freeList == nil:
sysAssert(c.acc + smallChunkOverhead() + size <= SmallChunkSize,
"rawAlloc 7")
result = cast[pointer](cast[ByteAddress](addr(c.data)) +% c.acc)
result = cast[pointer](cast[int](addr(c.data)) +% c.acc)
inc(c.acc, size)
else:
result = c.freeList
@@ -864,14 +864,14 @@ proc rawAlloc(a: var MemRegion, requestedSize: int): pointer =
sysAssert(c.freeList.zeroField == 0, "rawAlloc 8")
c.freeList = c.freeList.next
dec(c.free, size)
sysAssert((cast[ByteAddress](result) and (MemAlign-1)) == 0, "rawAlloc 9")
sysAssert((cast[int](result) and (MemAlign-1)) == 0, "rawAlloc 9")
sysAssert(allocInv(a), "rawAlloc: end c != nil")
sysAssert(allocInv(a), "rawAlloc: before c.free < size")
if c.free < size:
sysAssert(allocInv(a), "rawAlloc: before listRemove test")
listRemove(a.freeSmallChunks[s], c)
sysAssert(allocInv(a), "rawAlloc: end listRemove test")
sysAssert(((cast[ByteAddress](result) and PageMask) - smallChunkOverhead()) %%
sysAssert(((cast[int](result) and PageMask) - smallChunkOverhead()) %%
size == 0, "rawAlloc 21")
sysAssert(allocInv(a), "rawAlloc: end small size")
inc a.occ, size
@@ -893,11 +893,11 @@ proc rawAlloc(a: var MemRegion, requestedSize: int): pointer =
sysAssert c.prev == nil, "rawAlloc 10"
sysAssert c.next == nil, "rawAlloc 11"
result = addr(c.data)
sysAssert((cast[ByteAddress](c) and (MemAlign-1)) == 0, "rawAlloc 13")
sysAssert((cast[ByteAddress](c) and PageMask) == 0, "rawAlloc: Not aligned on a page boundary")
sysAssert((cast[int](c) and (MemAlign-1)) == 0, "rawAlloc 13")
sysAssert((cast[int](c) and PageMask) == 0, "rawAlloc: Not aligned on a page boundary")
when not defined(gcDestructors):
if a.root == nil: a.root = getBottom(a)
add(a, a.root, cast[ByteAddress](result), cast[ByteAddress](result)+%size)
add(a, a.root, cast[int](result), cast[int](result)+%size)
inc a.occ, c.size
trackSize(c.size)
sysAssert(isAccessible(a, result), "rawAlloc 14")
@@ -927,10 +927,10 @@ proc rawDealloc(a: var MemRegion, p: pointer) =
dec a.occ, s
untrackSize(s)
sysAssert a.occ >= 0, "rawDealloc: negative occupied memory (case A)"
sysAssert(((cast[ByteAddress](p) and PageMask) - smallChunkOverhead()) %%
sysAssert(((cast[int](p) and PageMask) - smallChunkOverhead()) %%
s == 0, "rawDealloc 3")
when not defined(gcDestructors):
#echo("setting to nil: ", $cast[ByteAddress](addr(f.zeroField)))
#echo("setting to nil: ", $cast[int](addr(f.zeroField)))
sysAssert(f.zeroField != 0, "rawDealloc 1")
f.zeroField = 0
f.next = c.freeList
@@ -953,7 +953,7 @@ proc rawDealloc(a: var MemRegion, p: pointer) =
else:
when defined(gcDestructors):
addToSharedFreeList(c, f)
sysAssert(((cast[ByteAddress](p) and PageMask) - smallChunkOverhead()) %%
sysAssert(((cast[int](p) and PageMask) - smallChunkOverhead()) %%
s == 0, "rawDealloc 2")
else:
# set to 0xff to check for usage after free bugs:
@@ -975,7 +975,7 @@ when not defined(gcDestructors):
if not chunkUnused(c):
if isSmallChunk(c):
var c = cast[PSmallChunk](c)
var offset = (cast[ByteAddress](p) and (PageSize-1)) -%
var offset = (cast[int](p) and (PageSize-1)) -%
smallChunkOverhead()
result = (c.acc >% offset) and (offset %% c.size == 0) and
(cast[ptr FreeCell](p).zeroField >% 1)
@@ -993,12 +993,12 @@ when not defined(gcDestructors):
if not chunkUnused(c):
if isSmallChunk(c):
var c = cast[PSmallChunk](c)
var offset = (cast[ByteAddress](p) and (PageSize-1)) -%
var offset = (cast[int](p) and (PageSize-1)) -%
smallChunkOverhead()
if c.acc >% offset:
sysAssert(cast[ByteAddress](addr(c.data)) +% offset ==
cast[ByteAddress](p), "offset is not what you think it is")
var d = cast[ptr FreeCell](cast[ByteAddress](addr(c.data)) +%
sysAssert(cast[int](addr(c.data)) +% offset ==
cast[int](p), "offset is not what you think it is")
var d = cast[ptr FreeCell](cast[int](addr(c.data)) +%
offset -% (offset %% c.size))
if d.zeroField >% 1:
result = d
@@ -1025,7 +1025,7 @@ when not defined(gcDestructors):
proc ptrSize(p: pointer): int =
when not defined(gcDestructors):
var x = cast[pointer](cast[ByteAddress](p) -% sizeof(FreeCell))
var x = cast[pointer](cast[int](p) -% sizeof(FreeCell))
var c = pageAddr(p)
sysAssert(not chunkUnused(c), "ptrSize")
result = c.size -% sizeof(FreeCell)
@@ -1043,7 +1043,7 @@ proc alloc(allocator: var MemRegion, size: Natural): pointer {.gcsafe.} =
result = rawAlloc(allocator, size+sizeof(FreeCell))
cast[ptr FreeCell](result).zeroField = 1 # mark it as used
sysAssert(not isAllocatedPtr(allocator, result), "alloc")
result = cast[pointer](cast[ByteAddress](result) +% sizeof(FreeCell))
result = cast[pointer](cast[int](result) +% sizeof(FreeCell))
track("alloc", result, size)
else:
result = rawAlloc(allocator, size)
@@ -1055,7 +1055,7 @@ proc alloc0(allocator: var MemRegion, size: Natural): pointer =
proc dealloc(allocator: var MemRegion, p: pointer) =
when not defined(gcDestructors):
sysAssert(p != nil, "dealloc: p is nil")
var x = cast[pointer](cast[ByteAddress](p) -% sizeof(FreeCell))
var x = cast[pointer](cast[int](p) -% sizeof(FreeCell))
sysAssert(x != nil, "dealloc: x is nil")
sysAssert(isAccessible(allocator, x), "is not accessible")
sysAssert(cast[ptr FreeCell](x).zeroField == 1, "dealloc: object header corrupted")
@@ -1116,7 +1116,7 @@ template instantiateForRegion(allocator: untyped) {.dirty.} =
result = interiorAllocatedPtr(allocator, p)
proc isAllocatedPtr*(p: pointer): bool =
let p = cast[pointer](cast[ByteAddress](p)-%ByteAddress(sizeof(Cell)))
let p = cast[pointer](cast[int](p)-%ByteAddress(sizeof(Cell)))
result = isAllocatedPtr(allocator, p)
proc deallocOsPages = deallocOsPages(allocator)

View File

@@ -15,8 +15,8 @@ proc genericAssignAux(dest, src: pointer, mt: PNimType, shallow: bool) {.benign.
proc genericAssignAux(dest, src: pointer, n: ptr TNimNode,
shallow: bool) {.benign.} =
var
d = cast[ByteAddress](dest)
s = cast[ByteAddress](src)
d = cast[int](dest)
s = cast[int](src)
case n.kind
of nkSlot:
genericAssignAux(cast[pointer](d +% n.offset),
@@ -56,8 +56,8 @@ template deepSeqAssignImpl(operation, additionalArg) {.dirty.} =
proc genericAssignAux(dest, src: pointer, mt: PNimType, shallow: bool) =
var
d = cast[ByteAddress](dest)
s = cast[ByteAddress](src)
d = cast[int](dest)
s = cast[int](src)
sysAssert(mt != nil, "genericAssignAux 2")
case mt.kind
of tyString:
@@ -89,17 +89,17 @@ proc genericAssignAux(dest, src: pointer, mt: PNimType, shallow: bool) =
var ss = nimNewSeqOfCap(mt, seq.len)
cast[PGenericSeq](ss).len = seq.len
unsureAsgnRef(x, ss)
var dst = cast[ByteAddress](cast[PPointer](dest)[])
var dst = cast[int](cast[PPointer](dest)[])
copyMem(cast[pointer](dst +% align(GenericSeqSize, mt.base.align)),
cast[pointer](cast[ByteAddress](s2) +% align(GenericSeqSize, mt.base.align)),
cast[pointer](cast[int](s2) +% align(GenericSeqSize, mt.base.align)),
seq.len *% mt.base.size)
else:
unsureAsgnRef(x, newSeq(mt, seq.len))
var dst = cast[ByteAddress](cast[PPointer](dest)[])
var dst = cast[int](cast[PPointer](dest)[])
for i in 0..seq.len-1:
genericAssignAux(
cast[pointer](dst +% align(GenericSeqSize, mt.base.align) +% i *% mt.base.size ),
cast[pointer](cast[ByteAddress](s2) +% align(GenericSeqSize, mt.base.align) +% i *% mt.base.size ),
cast[pointer](cast[int](s2) +% align(GenericSeqSize, mt.base.align) +% i *% mt.base.size ),
mt.base, shallow)
of tyObject:
var it = mt.base
@@ -181,15 +181,15 @@ proc genericSeqAssign(dest, src: pointer, mt: PNimType) {.compilerproc.} =
proc genericAssignOpenArray(dest, src: pointer, len: int,
mt: PNimType) {.compilerproc.} =
var
d = cast[ByteAddress](dest)
s = cast[ByteAddress](src)
d = cast[int](dest)
s = cast[int](src)
for i in 0..len-1:
genericAssign(cast[pointer](d +% i *% mt.base.size),
cast[pointer](s +% i *% mt.base.size), mt.base)
proc objectInit(dest: pointer, typ: PNimType) {.compilerproc, benign.}
proc objectInitAux(dest: pointer, n: ptr TNimNode) {.benign.} =
var d = cast[ByteAddress](dest)
var d = cast[int](dest)
case n.kind
of nkNone: sysAssert(false, "objectInitAux")
of nkSlot: objectInit(cast[pointer](d +% n.offset), n.typ)
@@ -203,7 +203,7 @@ proc objectInitAux(dest: pointer, n: ptr TNimNode) {.benign.} =
proc objectInit(dest: pointer, typ: PNimType) =
# the generic init proc that takes care of initialization of complex
# objects on the stack or heap
var d = cast[ByteAddress](dest)
var d = cast[int](dest)
case typ.kind
of tyObject:
# iterate over any structural type
@@ -226,7 +226,7 @@ proc objectInit(dest: pointer, typ: PNimType) =
proc genericReset(dest: pointer, mt: PNimType) {.compilerproc, benign.}
proc genericResetAux(dest: pointer, n: ptr TNimNode) =
var d = cast[ByteAddress](dest)
var d = cast[int](dest)
case n.kind
of nkNone: sysAssert(false, "genericResetAux")
of nkSlot: genericReset(cast[pointer](d +% n.offset), n.typ)
@@ -238,7 +238,7 @@ proc genericResetAux(dest: pointer, n: ptr TNimNode) =
zeroMem(cast[pointer](d +% n.offset), n.typ.size)
proc genericReset(dest: pointer, mt: PNimType) =
var d = cast[ByteAddress](dest)
var d = cast[int](dest)
sysAssert(mt != nil, "genericReset 2")
case mt.kind
of tyRef:

View File

@@ -185,8 +185,8 @@ when not usesDestructors:
proc storeAux(dest, src: pointer, n: ptr TNimNode, t: PRawChannel,
mode: LoadStoreMode) {.benign.} =
var
d = cast[ByteAddress](dest)
s = cast[ByteAddress](src)
d = cast[int](dest)
s = cast[int](src)
case n.kind
of nkSlot: storeAux(cast[pointer](d +% n.offset),
cast[pointer](s +% n.offset), n.typ, t, mode)
@@ -205,8 +205,8 @@ when not usesDestructors:
cast[pointer](cast[int](p) +% x)
var
d = cast[ByteAddress](dest)
s = cast[ByteAddress](src)
d = cast[int](dest)
s = cast[int](src)
sysAssert(mt != nil, "mt == nil")
case mt.kind
of tyString:
@@ -245,14 +245,14 @@ when not usesDestructors:
x[] = alloc0(t.region, align(GenericSeqSize, mt.base.align) +% seq.len *% mt.base.size)
else:
unsureAsgnRef(x, newSeq(mt, seq.len))
var dst = cast[ByteAddress](cast[PPointer](dest)[])
var dst = cast[int](cast[PPointer](dest)[])
var dstseq = cast[PGenericSeq](dst)
dstseq.len = seq.len
dstseq.reserved = seq.len
for i in 0..seq.len-1:
storeAux(
cast[pointer](dst +% align(GenericSeqSize, mt.base.align) +% i *% mt.base.size),
cast[pointer](cast[ByteAddress](s2) +% align(GenericSeqSize, mt.base.align) +%
cast[pointer](cast[int](s2) +% align(GenericSeqSize, mt.base.align) +%
i *% mt.base.size),
mt.base, t, mode)
if mode != mStore: dealloc(t.region, s2)

View File

@@ -67,7 +67,7 @@ type # these work for most platforms:
## This is the same as the type `unsigned long long` in *C*.
type
ByteAddress* = int
ByteAddress* {.deprecated: "use `uint`".} = int
## is the signed integer type that should be used for converting
## pointers to integer addresses for readability.

View File

@@ -61,8 +61,8 @@ proc genericDeepCopyAux(dest, src: pointer, mt: PNimType;
proc genericDeepCopyAux(dest, src: pointer, n: ptr TNimNode;
tab: var PtrTable) {.benign.} =
var
d = cast[ByteAddress](dest)
s = cast[ByteAddress](src)
d = cast[int](dest)
s = cast[int](src)
case n.kind
of nkSlot:
genericDeepCopyAux(cast[pointer](d +% n.offset),
@@ -85,8 +85,8 @@ proc genericDeepCopyAux(dest, src: pointer, n: ptr TNimNode;
proc genericDeepCopyAux(dest, src: pointer, mt: PNimType; tab: var PtrTable) =
var
d = cast[ByteAddress](dest)
s = cast[ByteAddress](src)
d = cast[int](dest)
s = cast[int](src)
sysAssert(mt != nil, "genericDeepCopyAux 2")
case mt.kind
of tyString:
@@ -113,11 +113,11 @@ proc genericDeepCopyAux(dest, src: pointer, mt: PNimType; tab: var PtrTable) =
return
sysAssert(dest != nil, "genericDeepCopyAux 3")
unsureAsgnRef(x, newSeq(mt, seq.len))
var dst = cast[ByteAddress](cast[PPointer](dest)[])
var dst = cast[int](cast[PPointer](dest)[])
for i in 0..seq.len-1:
genericDeepCopyAux(
cast[pointer](dst +% align(GenericSeqSize, mt.base.align) +% i *% mt.base.size),
cast[pointer](cast[ByteAddress](s2) +% align(GenericSeqSize, mt.base.align) +% i *% mt.base.size),
cast[pointer](cast[int](s2) +% align(GenericSeqSize, mt.base.align) +% i *% mt.base.size),
mt.base, tab)
of tyObject:
# we need to copy m_type field for tyObject, as it could be empty for
@@ -199,8 +199,8 @@ proc genericSeqDeepCopy(dest, src: pointer, mt: PNimType) {.compilerproc.} =
proc genericDeepCopyOpenArray(dest, src: pointer, len: int,
mt: PNimType) {.compilerproc.} =
var
d = cast[ByteAddress](dest)
s = cast[ByteAddress](src)
d = cast[int](dest)
s = cast[int](src)
for i in 0..len-1:
genericDeepCopy(cast[pointer](d +% i *% mt.base.size),
cast[pointer](s +% i *% mt.base.size), mt.base)

View File

@@ -170,11 +170,11 @@ proc addZCT(s: var CellSeq, c: PCell) {.noinline.} =
proc cellToUsr(cell: PCell): pointer {.inline.} =
# convert object (=pointer to refcount) to pointer to userdata
result = cast[pointer](cast[ByteAddress](cell)+%ByteAddress(sizeof(Cell)))
result = cast[pointer](cast[int](cell)+%ByteAddress(sizeof(Cell)))
proc usrToCell(usr: pointer): PCell {.inline.} =
# convert pointer to userdata to object (=pointer to refcount)
result = cast[PCell](cast[ByteAddress](usr)-%ByteAddress(sizeof(Cell)))
result = cast[PCell](cast[int](usr)-%ByteAddress(sizeof(Cell)))
proc extGetCellType(c: pointer): PNimType {.compilerproc.} =
# used for code generation concerning debugging
@@ -336,7 +336,7 @@ proc cellsetReset(s: var CellSet) =
{.push stacktrace:off.}
proc forAllSlotsAux(dest: pointer, n: ptr TNimNode, op: WalkOp) {.benign.} =
var d = cast[ByteAddress](dest)
var d = cast[int](dest)
case n.kind
of nkSlot: forAllChildrenAux(cast[pointer](d +% n.offset), n.typ, op)
of nkList:
@@ -356,7 +356,7 @@ proc forAllSlotsAux(dest: pointer, n: ptr TNimNode, op: WalkOp) {.benign.} =
of nkNone: sysAssert(false, "forAllSlotsAux")
proc forAllChildrenAux(dest: pointer, mt: PNimType, op: WalkOp) =
var d = cast[ByteAddress](dest)
var d = cast[int](dest)
if dest == nil: return # nothing to do
if ntfNoRefs notin mt.flags:
case mt.kind
@@ -382,7 +382,7 @@ proc forAllChildren(cell: PCell, op: WalkOp) =
of tyRef: # common case
forAllChildrenAux(cellToUsr(cell), cell.typ.base, op)
of tySequence:
var d = cast[ByteAddress](cellToUsr(cell))
var d = cast[int](cellToUsr(cell))
var s = cast[PGenericSeq](d)
if s != nil:
for i in 0..s.len-1:
@@ -457,7 +457,7 @@ proc rawNewObj(typ: PNimType, size: int, gch: var GcHeap): pointer =
collectCT(gch)
var res = cast[PCell](rawAlloc(gch.region, size + sizeof(Cell)))
#gcAssert typ.kind in {tyString, tySequence} or size >= typ.base.size, "size too small"
gcAssert((cast[ByteAddress](res) and (MemAlign-1)) == 0, "newObj: 2")
gcAssert((cast[int](res) and (MemAlign-1)) == 0, "newObj: 2")
# now it is buffered in the ZCT
res.typ = typ
setFrameInfo(res)
@@ -507,7 +507,7 @@ proc newObjRC1(typ: PNimType, size: int): pointer {.compilerRtl, noinline.} =
var res = cast[PCell](rawAlloc(gch.region, size + sizeof(Cell)))
sysAssert(allocInv(gch.region), "newObjRC1 after rawAlloc")
sysAssert((cast[ByteAddress](res) and (MemAlign-1)) == 0, "newObj: 2")
sysAssert((cast[int](res) and (MemAlign-1)) == 0, "newObj: 2")
# now it is buffered in the ZCT
res.typ = typ
setFrameInfo(res)
@@ -549,9 +549,9 @@ proc growObj(old: pointer, newsize: int, gch: var GcHeap): pointer =
var oldsize = align(GenericSeqSize, elemAlign) + cast[PGenericSeq](old).len * elemSize
copyMem(res, ol, oldsize + sizeof(Cell))
zeroMem(cast[pointer](cast[ByteAddress](res) +% oldsize +% sizeof(Cell)),
zeroMem(cast[pointer](cast[int](res) +% oldsize +% sizeof(Cell)),
newsize-oldsize)
sysAssert((cast[ByteAddress](res) and (MemAlign-1)) == 0, "growObj: 3")
sysAssert((cast[int](res) and (MemAlign-1)) == 0, "growObj: 3")
# This can be wrong for intermediate temps that are nevertheless on the
# heap because of lambda lifting:
#gcAssert(res.refcount shr rcShift <=% 1, "growObj: 4")
@@ -683,7 +683,7 @@ proc collectCycles(gch: var GcHeap) {.raises: [].} =
proc gcMark(gch: var GcHeap, p: pointer) {.inline.} =
# the addresses are not as cells on the stack, so turn them to cells:
sysAssert(allocInv(gch.region), "gcMark begin")
var c = cast[ByteAddress](p)
var c = cast[int](p)
if c >% PageSize:
# fast check: does it look like a cell?
var objStart = cast[PCell](interiorAllocatedPtr(gch.region, p))
@@ -848,10 +848,10 @@ when withRealTime:
stack.bottomSaved = stack.bottom
when stackIncreases:
stack.bottom = cast[pointer](
cast[ByteAddress](stack.pos) - sizeof(pointer) * 6 - stackSize)
cast[int](stack.pos) - sizeof(pointer) * 6 - stackSize)
else:
stack.bottom = cast[pointer](
cast[ByteAddress](stack.pos) + sizeof(pointer) * 6 + stackSize)
cast[int](stack.pos) + sizeof(pointer) * 6 + stackSize)
GC_step(gch, us, strongAdvice)

View File

@@ -133,11 +133,11 @@ template gcAssert(cond: bool, msg: string) =
proc cellToUsr(cell: PCell): pointer {.inline.} =
# convert object (=pointer to refcount) to pointer to userdata
result = cast[pointer](cast[ByteAddress](cell)+%ByteAddress(sizeof(Cell)))
result = cast[pointer](cast[int](cell)+%ByteAddress(sizeof(Cell)))
proc usrToCell(usr: pointer): PCell {.inline.} =
# convert pointer to userdata to object (=pointer to refcount)
result = cast[PCell](cast[ByteAddress](usr)-%ByteAddress(sizeof(Cell)))
result = cast[PCell](cast[int](usr)-%ByteAddress(sizeof(Cell)))
proc extGetCellType(c: pointer): PNimType {.compilerproc.} =
# used for code generation concerning debugging
@@ -252,7 +252,7 @@ proc unsureAsgnRef(dest: PPointer, src: pointer) {.compilerproc.} =
dest[] = src
proc forAllSlotsAux(dest: pointer, n: ptr TNimNode, op: WalkOp) {.benign.} =
var d = cast[ByteAddress](dest)
var d = cast[int](dest)
case n.kind
of nkSlot: forAllChildrenAux(cast[pointer](d +% n.offset), n.typ, op)
of nkList:
@@ -264,7 +264,7 @@ proc forAllSlotsAux(dest: pointer, n: ptr TNimNode, op: WalkOp) {.benign.} =
of nkNone: sysAssert(false, "forAllSlotsAux")
proc forAllChildrenAux(dest: pointer, mt: PNimType, op: WalkOp) =
var d = cast[ByteAddress](dest)
var d = cast[int](dest)
if dest == nil: return # nothing to do
if ntfNoRefs notin mt.flags:
case mt.kind
@@ -290,7 +290,7 @@ proc forAllChildren(cell: PCell, op: WalkOp) =
of tyRef: # common case
forAllChildrenAux(cellToUsr(cell), cell.typ.base, op)
of tySequence:
var d = cast[ByteAddress](cellToUsr(cell))
var d = cast[int](cellToUsr(cell))
var s = cast[PGenericSeq](d)
if s != nil:
for i in 0..s.len-1:
@@ -330,7 +330,7 @@ proc rawNewObj(typ: PNimType, size: int, gch: var GcHeap): pointer =
gcAssert(typ.kind in {tyRef, tyString, tySequence}, "newObj: 1")
collectCT(gch)
var res = cast[PCell](rawAlloc(gch.region, size + sizeof(Cell)))
gcAssert((cast[ByteAddress](res) and (MemAlign-1)) == 0, "newObj: 2")
gcAssert((cast[int](res) and (MemAlign-1)) == 0, "newObj: 2")
# now it is buffered in the ZCT
res.typ = typ
when leakDetector and not hasThreadSupport:
@@ -388,9 +388,9 @@ proc growObj(old: pointer, newsize: int, gch: var GcHeap): pointer =
var oldsize = align(GenericSeqSize, elemAlign) + cast[PGenericSeq](old).len*elemSize
copyMem(res, ol, oldsize + sizeof(Cell))
zeroMem(cast[pointer](cast[ByteAddress](res)+% oldsize +% sizeof(Cell)),
zeroMem(cast[pointer](cast[int](res)+% oldsize +% sizeof(Cell)),
newsize-oldsize)
sysAssert((cast[ByteAddress](res) and (MemAlign-1)) == 0, "growObj: 3")
sysAssert((cast[int](res) and (MemAlign-1)) == 0, "growObj: 3")
when false:
# this is wrong since seqs can be shared via 'shallow':
when reallyDealloc: rawDealloc(gch.region, ol)
@@ -593,7 +593,7 @@ proc gcMark(gch: var GcHeap, p: pointer) {.inline.} =
# the addresses are not as cells on the stack, so turn them to cells:
sysAssert(allocInv(gch.region), "gcMark begin")
var cell = usrToCell(p)
var c = cast[ByteAddress](cell)
var c = cast[int](cell)
if c >% PageSize:
# fast check: does it look like a cell?
var objStart = cast[PCell](interiorAllocatedPtr(gch.region, cell))
@@ -697,10 +697,10 @@ when withRealTime:
stack.bottomSaved = stack.bottom
when stackIncreases:
stack.bottom = cast[pointer](
cast[ByteAddress](stack.pos) - sizeof(pointer) * 6 - stackSize)
cast[int](stack.pos) - sizeof(pointer) * 6 - stackSize)
else:
stack.bottom = cast[pointer](
cast[ByteAddress](stack.pos) + sizeof(pointer) * 6 + stackSize)
cast[int](stack.pos) + sizeof(pointer) * 6 + stackSize)
GC_step(gch, us, strongAdvice)

View File

@@ -222,9 +222,9 @@ proc stackSize(stack: ptr GcStack): int {.noinline.} =
if pos != nil:
when stackIncreases:
result = cast[ByteAddress](pos) -% cast[ByteAddress](stack.bottom)
result = cast[int](pos) -% cast[int](stack.bottom)
else:
result = cast[ByteAddress](stack.bottom) -% cast[ByteAddress](pos)
result = cast[int](stack.bottom) -% cast[int](pos)
else:
result = 0
@@ -295,8 +295,8 @@ when not defined(useNimRtl):
# the first init must be the one that defines the stack bottom:
gch.stack.bottom = theStackBottom
elif theStackBottom != gch.stack.bottom:
var a = cast[ByteAddress](theStackBottom) # and not PageMask - PageSize*2
var b = cast[ByteAddress](gch.stack.bottom)
var a = cast[int](theStackBottom) # and not PageMask - PageSize*2
var b = cast[int](gch.stack.bottom)
#c_fprintf(stdout, "old: %p new: %p;\n",gch.stack.bottom,theStackBottom)
when stackIncreases:
gch.stack.bottom = cast[pointer](min(a, b))
@@ -312,11 +312,11 @@ when not defined(useNimRtl):
proc isOnStack(p: pointer): bool =
var stackTop {.volatile, noinit.}: pointer
stackTop = addr(stackTop)
var a = cast[ByteAddress](gch.getActiveStack().bottom)
var b = cast[ByteAddress](stackTop)
var a = cast[int](gch.getActiveStack().bottom)
var b = cast[int](stackTop)
when not stackIncreases:
swap(a, b)
var x = cast[ByteAddress](p)
var x = cast[int](p)
result = a <=% x and x <=% b
when defined(sparc): # For SPARC architecture.
@@ -337,7 +337,7 @@ when defined(sparc): # For SPARC architecture.
# Addresses decrease as the stack grows.
while sp <= max:
gcMark(gch, sp[])
sp = cast[PPointer](cast[ByteAddress](sp) +% sizeof(pointer))
sp = cast[PPointer](cast[int](sp) +% sizeof(pointer))
elif defined(ELATE):
{.error: "stack marking code is to be written for this architecture".}
@@ -354,8 +354,8 @@ elif stackIncreases:
template forEachStackSlotAux(gch, gcMark: untyped) {.dirty.} =
for stack in gch.stack.items():
var max = cast[ByteAddress](gch.stack.bottom)
var sp = cast[ByteAddress](addr(registers)) -% sizeof(pointer)
var max = cast[int](gch.stack.bottom)
var sp = cast[int](addr(registers)) -% sizeof(pointer)
while sp >=% max:
gcMark(gch, cast[PPointer](sp)[])
sp = sp -% sizeof(pointer)
@@ -383,8 +383,8 @@ else:
gch.getActiveStack().setPosition(addr(registers))
if c_setjmp(registers) == 0'i32: # To fill the C stack with registers.
for stack in gch.stack.items():
var max = cast[ByteAddress](stack.bottom)
var sp = cast[ByteAddress](addr(registers))
var max = cast[int](stack.bottom)
var sp = cast[int](addr(registers))
when defined(amd64):
if stack.isActiveStack():
# words within the jmp_buf structure may not be properly aligned.

View File

@@ -94,11 +94,11 @@ template gcAssert(cond: bool, msg: string) =
proc cellToUsr(cell: PCell): pointer {.inline.} =
# convert object (=pointer to refcount) to pointer to userdata
result = cast[pointer](cast[ByteAddress](cell)+%ByteAddress(sizeof(Cell)))
result = cast[pointer](cast[int](cell)+%ByteAddress(sizeof(Cell)))
proc usrToCell(usr: pointer): PCell {.inline.} =
# convert pointer to userdata to object (=pointer to refcount)
result = cast[PCell](cast[ByteAddress](usr)-%ByteAddress(sizeof(Cell)))
result = cast[PCell](cast[int](usr)-%ByteAddress(sizeof(Cell)))
proc extGetCellType(c: pointer): PNimType {.compilerproc.} =
# used for code generation concerning debugging
@@ -217,7 +217,7 @@ proc initGC() =
gcAssert(gch.gcThreadId >= 0, "invalid computed thread ID")
proc forAllSlotsAux(dest: pointer, n: ptr TNimNode, op: WalkOp) {.benign.} =
var d = cast[ByteAddress](dest)
var d = cast[int](dest)
case n.kind
of nkSlot: forAllChildrenAux(cast[pointer](d +% n.offset), n.typ, op)
of nkList:
@@ -229,7 +229,7 @@ proc forAllSlotsAux(dest: pointer, n: ptr TNimNode, op: WalkOp) {.benign.} =
of nkNone: sysAssert(false, "forAllSlotsAux")
proc forAllChildrenAux(dest: pointer, mt: PNimType, op: WalkOp) =
var d = cast[ByteAddress](dest)
var d = cast[int](dest)
if dest == nil: return # nothing to do
if ntfNoRefs notin mt.flags:
case mt.kind
@@ -255,7 +255,7 @@ proc forAllChildren(cell: PCell, op: WalkOp) =
forAllChildrenAux(cellToUsr(cell), cell.typ.base, op)
of tySequence:
when not defined(nimSeqsV2):
var d = cast[ByteAddress](cellToUsr(cell))
var d = cast[int](cellToUsr(cell))
var s = cast[PGenericSeq](d)
if s != nil:
for i in 0..s.len-1:
@@ -268,7 +268,7 @@ proc rawNewObj(typ: PNimType, size: int, gch: var GcHeap): pointer =
gcAssert(typ.kind in {tyRef, tyString, tySequence}, "newObj: 1")
collectCT(gch, size + sizeof(Cell))
var res = cast[PCell](rawAlloc(gch.region, size + sizeof(Cell)))
gcAssert((cast[ByteAddress](res) and (MemAlign-1)) == 0, "newObj: 2")
gcAssert((cast[int](res) and (MemAlign-1)) == 0, "newObj: 2")
# now it is buffered in the ZCT
res.typ = typ
when leakDetector and not hasThreadSupport:
@@ -336,9 +336,9 @@ when not defined(nimSeqsV2):
var oldsize = align(GenericSeqSize, elemAlign) + cast[PGenericSeq](old).len*elemSize
copyMem(res, ol, oldsize + sizeof(Cell))
zeroMem(cast[pointer](cast[ByteAddress](res)+% oldsize +% sizeof(Cell)),
zeroMem(cast[pointer](cast[int](res)+% oldsize +% sizeof(Cell)),
newsize-oldsize)
sysAssert((cast[ByteAddress](res) and (MemAlign-1)) == 0, "growObj: 3")
sysAssert((cast[int](res) and (MemAlign-1)) == 0, "growObj: 3")
when withBitvectors: incl(gch.allocated, res)
when useCellIds:
inc gch.idGenerator
@@ -446,7 +446,7 @@ proc markGlobals(gch: var GcHeap) =
proc gcMark(gch: var GcHeap, p: pointer) {.inline.} =
# the addresses are not as cells on the stack, so turn them to cells:
var c = cast[ByteAddress](p)
var c = cast[int](p)
if c >% PageSize:
# fast check: does it look like a cell?
var objStart = cast[PCell](interiorAllocatedPtr(gch.region, p))

View File

@@ -80,12 +80,12 @@ elif defined(emscripten) and not defined(StandaloneHeapSize):
let pos = cast[int](result)
# Convert pointer to PageSize correct one.
var new_pos = cast[ByteAddress](pos) +% (PageSize - (pos %% PageSize))
var new_pos = cast[int](pos) +% (PageSize - (pos %% PageSize))
if (new_pos-pos) < sizeof(EmscriptenMMapBlock):
new_pos = new_pos +% PageSize
result = cast[pointer](new_pos)
var mmapDescrPos = cast[ByteAddress](result) -% sizeof(EmscriptenMMapBlock)
var mmapDescrPos = cast[int](result) -% sizeof(EmscriptenMMapBlock)
var mmapDescr = cast[EmscriptenMMapBlock](mmapDescrPos)
mmapDescr.realSize = realSize
@@ -96,7 +96,7 @@ elif defined(emscripten) and not defined(StandaloneHeapSize):
proc osTryAllocPages(size: int): pointer = osAllocPages(size)
proc osDeallocPages(p: pointer, size: int) {.inline.} =
var mmapDescrPos = cast[ByteAddress](p) -% sizeof(EmscriptenMMapBlock)
var mmapDescrPos = cast[int](p) -% sizeof(EmscriptenMMapBlock)
var mmapDescr = cast[EmscriptenMMapBlock](mmapDescrPos)
munmap(mmapDescr.realPointer, mmapDescr.realSize)

View File

@@ -155,7 +155,7 @@ when not defined(useNimRtl):
var bs = typ.base.size
for i in 0..typ.size div bs - 1:
if i > 0: add result, ", "
reprAux(result, cast[pointer](cast[ByteAddress](p) + i*bs), typ.base, cl)
reprAux(result, cast[pointer](cast[int](p) + i*bs), typ.base, cl)
add result, "]"
when defined(nimSeqsV2):
@@ -183,7 +183,7 @@ when not defined(useNimRtl):
var bs = typ.base.size
for i in 0..cast[PGenericSeq](p).len-1:
if i > 0: add result, ", "
reprAux(result, cast[pointer](cast[ByteAddress](payloadPtr(p)) + align(payloadOffset, typ.align) + i*bs),
reprAux(result, cast[pointer](cast[int](payloadPtr(p)) + align(payloadOffset, typ.align) + i*bs),
typ.base, cl)
add result, "]"
@@ -194,14 +194,14 @@ when not defined(useNimRtl):
of nkSlot:
add result, $n.name
add result, " = "
reprAux(result, cast[pointer](cast[ByteAddress](p) + n.offset), n.typ, cl)
reprAux(result, cast[pointer](cast[int](p) + n.offset), n.typ, cl)
of nkList:
for i in 0..n.len-1:
if i > 0: add result, ",\n"
reprRecordAux(result, p, n.sons[i], cl)
of nkCase:
var m = selectBranch(p, n)
reprAux(result, cast[pointer](cast[ByteAddress](p) + n.offset), n.typ, cl)
reprAux(result, cast[pointer](cast[int](p) + n.offset), n.typ, cl)
if m != nil: reprRecordAux(result, p, m, cl)
proc reprRecord(result: var string, p: pointer, typ: PNimType,
@@ -307,7 +307,7 @@ when not defined(useNimRtl):
var bs = elemtyp.size
for i in 0..length - 1:
if i > 0: add result, ", "
reprAux(result, cast[pointer](cast[ByteAddress](p) + i*bs), elemtyp, cl)
reprAux(result, cast[pointer](cast[int](p) + i*bs), elemtyp, cl)
add result, "]"
deinitReprClosure(cl)

View File

@@ -17,10 +17,10 @@
proc dataPointer(a: PGenericSeq, elemAlign: int): pointer =
cast[pointer](cast[ByteAddress](a) +% align(GenericSeqSize, elemAlign))
cast[pointer](cast[int](a) +% align(GenericSeqSize, elemAlign))
proc dataPointer(a: PGenericSeq, elemAlign, elemSize, index: int): pointer =
cast[pointer](cast[ByteAddress](a) +% align(GenericSeqSize, elemAlign) +% (index*%elemSize))
cast[pointer](cast[int](a) +% align(GenericSeqSize, elemAlign) +% (index*%elemSize))
proc resize(old: int): int {.inline.} =
if old <= 0: result = 4

View File

@@ -5,7 +5,7 @@ output: "90"
when false:
template lock(a, b: ptr Lock; body: stmt) =
if cast[ByteAddress](a) < cast[ByteAddress](b):
if cast[int](a) < cast[int](b):
pthread_mutex_lock(a)
pthread_mutex_lock(b)
else: