mirror of
https://github.com/nim-lang/Nim.git
synced 2025-12-29 09:24:36 +00:00
bugfixes for .rod files and visual C++
This commit is contained in:
@@ -73,7 +73,7 @@ proc CompileModule(filename: string, flags: TSymFlags): PSym =
|
||||
var f = addFileExt(filename, nimExt)
|
||||
result = newModule(filename)
|
||||
result.flags = result.flags + flags
|
||||
if gCmd in {cmdCompileToC, cmdCompileToCpp}:
|
||||
if gCmd in {cmdCompileToC, cmdCompileToCpp, cmdCheck, cmdIdeTools}:
|
||||
rd = handleSymbolFile(result, f)
|
||||
if result.id < 0:
|
||||
InternalError("handleSymbolFile should have set the module\'s ID")
|
||||
|
||||
@@ -643,6 +643,10 @@ proc startsWith(buf: cstring, token: string, pos = 0): bool =
|
||||
proc newRodReader(modfilename: string, crc: TCrc32,
|
||||
readerIndex: int): PRodReader =
|
||||
new(result)
|
||||
try:
|
||||
result.memFile = memfiles.open(modfilename)
|
||||
except EOS:
|
||||
return nil
|
||||
result.files = @[]
|
||||
result.modDeps = @[]
|
||||
result.methods = @[]
|
||||
@@ -653,10 +657,6 @@ proc newRodReader(modfilename: string, crc: TCrc32,
|
||||
r.readerIndex = readerIndex
|
||||
r.filename = modfilename
|
||||
InitIdTable(r.syms)
|
||||
try:
|
||||
r.memFile = memfiles.open(modfilename)
|
||||
except EOS:
|
||||
return nil
|
||||
# we terminate the file explicitely with ``\0``, so the cast to `cstring`
|
||||
# is safe:
|
||||
r.s = cast[cstring](r.memFile.mem)
|
||||
@@ -793,6 +793,7 @@ proc getModuleIdx(filename: string): int =
|
||||
setlen(gMods, result + 1)
|
||||
|
||||
proc checkDep(filename: string): TReasonForRecompile =
|
||||
assert(not isNil(filename))
|
||||
var idx = getModuleIdx(filename)
|
||||
if gMods[idx].reason != rrEmpty:
|
||||
# reason has already been computed for this module:
|
||||
@@ -804,30 +805,28 @@ proc checkDep(filename: string): TReasonForRecompile =
|
||||
result = rrNone
|
||||
var r: PRodReader = nil
|
||||
var rodfile = toGeneratedFile(filename, RodExt)
|
||||
if ExistsFile(rodfile):
|
||||
r = newRodReader(rodfile, crc, idx)
|
||||
if r == nil:
|
||||
result = rrRodInvalid
|
||||
else:
|
||||
result = r.reason
|
||||
if result == rrNone:
|
||||
# check modules it depends on
|
||||
# NOTE: we need to process the entire module graph so that no ID will
|
||||
# be used twice! However, compilation speed does not suffer much from
|
||||
# this, since results are cached.
|
||||
var res = checkDep(options.libpath / addFileExt("system", nimExt))
|
||||
if res != rrNone: result = rrModDeps
|
||||
for i in countup(0, high(r.modDeps)):
|
||||
res = checkDep(r.modDeps[i])
|
||||
if res != rrNone:
|
||||
result = rrModDeps
|
||||
# we cannot break here, because of side-effects of `checkDep`
|
||||
else:
|
||||
result = rrRodDoesNotExist
|
||||
r = newRodReader(rodfile, crc, idx)
|
||||
if r == nil:
|
||||
result = (if ExistsFile(rodfile): rrRodInvalid else: rrRodDoesNotExist)
|
||||
else:
|
||||
result = r.reason
|
||||
if result == rrNone:
|
||||
# check modules it depends on
|
||||
# NOTE: we need to process the entire module graph so that no ID will
|
||||
# be used twice! However, compilation speed does not suffer much from
|
||||
# this, since results are cached.
|
||||
var res = checkDep(options.libpath / addFileExt("system", nimExt))
|
||||
if res != rrNone: result = rrModDeps
|
||||
for i in countup(0, high(r.modDeps)):
|
||||
res = checkDep(r.modDeps[i])
|
||||
if res != rrNone:
|
||||
result = rrModDeps
|
||||
# we cannot break here, because of side-effects of `checkDep`
|
||||
if result != rrNone and gVerbosity > 0:
|
||||
rawMessage(hintProcessing, reasonToFrmt[result] % filename)
|
||||
if result != rrNone or optForceFullMake in gGlobalOptions:
|
||||
if result != rrNone or optForceFullMake in gGlobalOptions:
|
||||
# recompilation is necessary:
|
||||
if r != nil: memfiles.close(r.memFile)
|
||||
r = nil
|
||||
gMods[idx].rd = r
|
||||
gMods[idx].reason = result # now we know better
|
||||
@@ -853,8 +852,12 @@ proc handleSymbolFile(module: PSym, filename: string): PRodReader =
|
||||
module.id = getID()
|
||||
|
||||
proc GetCRC*(filename: string): TCrc32 =
|
||||
var idx = getModuleIdx(filename)
|
||||
result = gMods[idx].crc
|
||||
for i in countup(0, high(gMods)):
|
||||
if sameFile(gMods[i].filename, filename): return gMods[i].crc
|
||||
|
||||
result = crcFromFile(filename)
|
||||
#var idx = getModuleIdx(filename)
|
||||
#result = gMods[idx].crc
|
||||
|
||||
proc rawLoadStub(s: PSym) =
|
||||
if s.kind != skStub: InternalError("loadStub")
|
||||
|
||||
@@ -423,6 +423,7 @@ proc writeRod(w: PRodWriter) =
|
||||
var f: TFile
|
||||
if not open(f, completeGeneratedFilePath(changeFileExt(w.filename, "rod")),
|
||||
fmWrite):
|
||||
#echo "couldn't write rod file for: ", w.filename
|
||||
return
|
||||
# write header:
|
||||
f.write("NIM:")
|
||||
|
||||
@@ -72,7 +72,7 @@ icc.options.linker = "-cxxlib"
|
||||
|
||||
# Configuration for the GNU C/C++ compiler:
|
||||
@if windows:
|
||||
gcc.path = r"$nimrod\dist\mingw\bin"
|
||||
#gcc.path = r"$nimrod\dist\mingw\bin"
|
||||
@end
|
||||
gcc.options.debug = "-g3 -O0"
|
||||
|
||||
|
||||
@@ -79,10 +79,10 @@ __TINYC__
|
||||
|
||||
/* --------------- how int64 constants should be declared: ----------- */
|
||||
#if defined(__GNUC__) || defined(__LCC__) || \
|
||||
defined(__POCC__) || defined(__DMC__)
|
||||
defined(__POCC__) || defined(__DMC__) || defined(_MSC_VER)
|
||||
# define IL64(x) x##LL
|
||||
#else /* works only without LL */
|
||||
# define IL64(x) x
|
||||
# define IL64(x) ((NI64)x)
|
||||
#endif
|
||||
|
||||
/* ---------------- casting without correct aliasing rules ----------- */
|
||||
|
||||
@@ -47,9 +47,9 @@ elif defined(windows):
|
||||
|
||||
when defined(vcc):
|
||||
# newest version of Visual C++ defines time_t to be of 64 bits
|
||||
type TTimeImpl {.importc: "time_t", header: "<sys/time.h>".} = int64
|
||||
type TTimeImpl {.importc: "time_t", header: "<time.h>".} = int64
|
||||
else:
|
||||
type TTimeImpl {.importc: "time_t", header: "<sys/time.h>".} = int32
|
||||
type TTimeImpl {.importc: "time_t", header: "<time.h>".} = int32
|
||||
|
||||
type
|
||||
TTime* = distinct TTimeImpl
|
||||
|
||||
@@ -808,8 +808,9 @@ when hasThreadSupport:
|
||||
else:
|
||||
{.pragma: rtlThreadVar.}
|
||||
|
||||
template sysAssert(cond: expr) =
|
||||
template sysAssert(cond, msg: expr) =
|
||||
# change this to activate system asserts
|
||||
#if not cond: echo msg
|
||||
nil
|
||||
|
||||
include "system/inclrtl"
|
||||
@@ -1821,14 +1822,14 @@ when not defined(EcmaScript) and not defined(NimrodVM):
|
||||
proc reprAny(p: pointer, typ: PNimType): string {.compilerRtl.}
|
||||
|
||||
proc getDiscriminant(aa: Pointer, n: ptr TNimNode): int =
|
||||
sysAssert(n.kind == nkCase)
|
||||
sysAssert(n.kind == nkCase, "getDiscriminant: node != nkCase")
|
||||
var d: int
|
||||
var a = cast[TAddress](aa)
|
||||
case n.typ.size
|
||||
of 1: d = ze(cast[ptr int8](a +% n.offset)[])
|
||||
of 2: d = ze(cast[ptr int16](a +% n.offset)[])
|
||||
of 4: d = int(cast[ptr int32](a +% n.offset)[])
|
||||
else: sysAssert(false)
|
||||
else: sysAssert(false, "getDiscriminant: invalid n.typ.size")
|
||||
return d
|
||||
|
||||
proc selectBranch(aa: Pointer, n: ptr TNimNode): ptr TNimNode =
|
||||
|
||||
@@ -131,12 +131,12 @@ template bigChunkOverhead(): expr = sizeof(TBigChunk)-sizeof(TAlignType)
|
||||
|
||||
proc roundup(x, v: int): int {.inline.} =
|
||||
result = (x + (v-1)) and not (v-1)
|
||||
sysAssert(result >= x)
|
||||
sysAssert(result >= x, "roundup: result < x")
|
||||
#return ((-x) and (v-1)) +% x
|
||||
|
||||
sysAssert(roundup(14, PageSize) == PageSize)
|
||||
sysAssert(roundup(15, 8) == 16)
|
||||
sysAssert(roundup(65, 8) == 72)
|
||||
sysAssert(roundup(14, PageSize) == PageSize, "invalid PageSize")
|
||||
sysAssert(roundup(15, 8) == 16, "roundup broken")
|
||||
sysAssert(roundup(65, 8) == 72, "roundup broken 2")
|
||||
|
||||
# ------------- chunk table ---------------------------------------------------
|
||||
# We use a PtrSet of chunk starts and a table[Page, chunksize] for chunk
|
||||
@@ -180,7 +180,7 @@ proc llAlloc(a: var TMemRegion, size: int): pointer =
|
||||
# the requested size is ``roundup(size+sizeof(TLLChunk), PageSize)``, but
|
||||
# since we know ``size`` is a (small) constant, we know the requested size
|
||||
# is one page:
|
||||
sysAssert roundup(size+sizeof(TLLChunk), PageSize) == PageSize
|
||||
sysAssert roundup(size+sizeof(TLLChunk), PageSize) == PageSize, "roundup 6"
|
||||
var old = a.llmem # can be nil and is correct with nil
|
||||
a.llmem = cast[PLLChunk](osAllocPages(PageSize))
|
||||
incCurrMem(a, PageSize)
|
||||
@@ -268,7 +268,7 @@ proc requestOsChunks(a: var TMemRegion, size: int): PBigChunk =
|
||||
incCurrMem(a, size)
|
||||
inc(a.freeMem, size)
|
||||
result = cast[PBigChunk](osAllocPages(size))
|
||||
sysAssert((cast[TAddress](result) and PageMask) == 0)
|
||||
sysAssert((cast[TAddress](result) and PageMask) == 0, "requestOsChunks 1")
|
||||
#zeroMem(result, size)
|
||||
result.next = nil
|
||||
result.prev = nil
|
||||
@@ -276,7 +276,7 @@ proc requestOsChunks(a: var TMemRegion, size: int): PBigChunk =
|
||||
result.size = size
|
||||
# update next.prevSize:
|
||||
var nxt = cast[TAddress](result) +% size
|
||||
sysAssert((nxt and PageMask) == 0)
|
||||
sysAssert((nxt and PageMask) == 0, "requestOsChunks 2")
|
||||
var next = cast[PChunk](nxt)
|
||||
if pageIndex(next) in a.chunkStarts:
|
||||
#echo("Next already allocated!")
|
||||
@@ -284,7 +284,7 @@ proc requestOsChunks(a: var TMemRegion, size: int): PBigChunk =
|
||||
# set result.prevSize:
|
||||
var lastSize = if a.lastSize != 0: a.lastSize else: PageSize
|
||||
var prv = cast[TAddress](result) -% lastSize
|
||||
sysAssert((nxt and PageMask) == 0)
|
||||
sysAssert((nxt and PageMask) == 0, "requestOsChunks 3")
|
||||
var prev = cast[PChunk](prv)
|
||||
if pageIndex(prev) in a.chunkStarts and prev.size == lastSize:
|
||||
#echo("Prev already allocated!")
|
||||
@@ -297,7 +297,7 @@ proc freeOsChunks(a: var TMemRegion, p: pointer, size: int) =
|
||||
# update next.prevSize:
|
||||
var c = cast[PChunk](p)
|
||||
var nxt = cast[TAddress](p) +% c.size
|
||||
sysAssert((nxt and PageMask) == 0)
|
||||
sysAssert((nxt and PageMask) == 0, "freeOsChunks")
|
||||
var next = cast[PChunk](nxt)
|
||||
if pageIndex(next) in a.chunkStarts:
|
||||
next.prevSize = 0 # XXX used
|
||||
@@ -325,23 +325,23 @@ proc writeFreeList(a: TMemRegion) =
|
||||
it = it.next
|
||||
|
||||
proc ListAdd[T](head: var T, c: T) {.inline.} =
|
||||
sysAssert(c notin head)
|
||||
sysAssert c.prev == nil
|
||||
sysAssert c.next == nil
|
||||
sysAssert(c notin head, "listAdd 1")
|
||||
sysAssert c.prev == nil, "listAdd 2"
|
||||
sysAssert c.next == nil, "listAdd 3"
|
||||
c.next = head
|
||||
if head != nil:
|
||||
sysAssert head.prev == nil
|
||||
sysAssert head.prev == nil, "listAdd 4"
|
||||
head.prev = c
|
||||
head = c
|
||||
|
||||
proc ListRemove[T](head: var T, c: T) {.inline.} =
|
||||
sysAssert(c in head)
|
||||
sysAssert(c in head, "listRemove")
|
||||
if c == head:
|
||||
head = c.next
|
||||
sysAssert c.prev == nil
|
||||
sysAssert c.prev == nil, "listRemove 2"
|
||||
if head != nil: head.prev = nil
|
||||
else:
|
||||
sysAssert c.prev != nil
|
||||
sysAssert c.prev != nil, "listRemove 3"
|
||||
c.prev.next = c.next
|
||||
if c.next != nil: c.next.prev = c.prev
|
||||
c.next = nil
|
||||
@@ -356,19 +356,19 @@ proc chunkUnused(c: PChunk): bool {.inline.} =
|
||||
proc updatePrevSize(a: var TMemRegion, c: PBigChunk,
|
||||
prevSize: int) {.inline.} =
|
||||
var ri = cast[PChunk](cast[TAddress](c) +% c.size)
|
||||
sysAssert((cast[TAddress](ri) and PageMask) == 0)
|
||||
sysAssert((cast[TAddress](ri) and PageMask) == 0, "updatePrevSize")
|
||||
if isAccessible(a, ri):
|
||||
ri.prevSize = prevSize
|
||||
|
||||
proc freeBigChunk(a: var TMemRegion, c: PBigChunk) =
|
||||
var c = c
|
||||
sysAssert(c.size >= PageSize)
|
||||
sysAssert(c.size >= PageSize, "freeBigChunk")
|
||||
inc(a.freeMem, c.size)
|
||||
when coalescRight:
|
||||
var ri = cast[PChunk](cast[TAddress](c) +% c.size)
|
||||
sysAssert((cast[TAddress](ri) and PageMask) == 0)
|
||||
sysAssert((cast[TAddress](ri) and PageMask) == 0, "freeBigChunk 2")
|
||||
if isAccessible(a, ri) and chunkUnused(ri):
|
||||
sysAssert(not isSmallChunk(ri))
|
||||
sysAssert(not isSmallChunk(ri), "freeBigChunk 3")
|
||||
if not isSmallChunk(ri):
|
||||
ListRemove(a.freeChunksList, cast[PBigChunk](ri))
|
||||
inc(c.size, ri.size)
|
||||
@@ -376,9 +376,9 @@ proc freeBigChunk(a: var TMemRegion, c: PBigChunk) =
|
||||
when coalescLeft:
|
||||
if c.prevSize != 0:
|
||||
var le = cast[PChunk](cast[TAddress](c) -% c.prevSize)
|
||||
sysAssert((cast[TAddress](le) and PageMask) == 0)
|
||||
sysAssert((cast[TAddress](le) and PageMask) == 0, "freeBigChunk 4")
|
||||
if isAccessible(a, le) and chunkUnused(le):
|
||||
sysAssert(not isSmallChunk(le))
|
||||
sysAssert(not isSmallChunk(le), "freeBigChunk 5")
|
||||
if not isSmallChunk(le):
|
||||
ListRemove(a.freeChunksList, cast[PBigChunk](le))
|
||||
inc(le.size, c.size)
|
||||
@@ -395,7 +395,7 @@ proc freeBigChunk(a: var TMemRegion, c: PBigChunk) =
|
||||
|
||||
proc splitChunk(a: var TMemRegion, c: PBigChunk, size: int) =
|
||||
var rest = cast[PBigChunk](cast[TAddress](c) +% size)
|
||||
sysAssert(rest notin a.freeChunksList)
|
||||
sysAssert(rest notin a.freeChunksList, "splitChunk")
|
||||
rest.size = c.size - size
|
||||
rest.used = false
|
||||
rest.next = nil
|
||||
@@ -408,12 +408,12 @@ proc splitChunk(a: var TMemRegion, c: PBigChunk, size: int) =
|
||||
|
||||
proc getBigChunk(a: var TMemRegion, size: int): PBigChunk =
|
||||
# use first fit for now:
|
||||
sysAssert((size and PageMask) == 0)
|
||||
sysAssert(size > 0)
|
||||
sysAssert((size and PageMask) == 0, "getBigChunk 1")
|
||||
sysAssert(size > 0, "getBigChunk 2")
|
||||
result = a.freeChunksList
|
||||
block search:
|
||||
while result != nil:
|
||||
sysAssert chunkUnused(result)
|
||||
sysAssert chunkUnused(result), "getBigChunk 3"
|
||||
if result.size == size:
|
||||
ListRemove(a.freeChunksList, result)
|
||||
break search
|
||||
@@ -422,7 +422,7 @@ proc getBigChunk(a: var TMemRegion, size: int): PBigChunk =
|
||||
splitChunk(a, result, size)
|
||||
break search
|
||||
result = result.next
|
||||
sysAssert result != a.freeChunksList
|
||||
sysAssert result != a.freeChunksList, "getBigChunk 4"
|
||||
if size < InitialMemoryRequest:
|
||||
result = requestOsChunks(a, InitialMemoryRequest)
|
||||
splitChunk(a, result, size)
|
||||
@@ -435,8 +435,8 @@ proc getBigChunk(a: var TMemRegion, size: int): PBigChunk =
|
||||
|
||||
proc getSmallChunk(a: var TMemRegion): PSmallChunk =
|
||||
var res = getBigChunk(a, PageSize)
|
||||
sysAssert res.prev == nil
|
||||
sysAssert res.next == nil
|
||||
sysAssert res.prev == nil, "getSmallChunk 1"
|
||||
sysAssert res.next == nil, "getSmallChunk 2"
|
||||
result = cast[PSmallChunk](res)
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
@@ -450,8 +450,8 @@ proc memSize(a: TMemRegion, p: pointer): int {.inline.} =
|
||||
result = c.size
|
||||
|
||||
proc rawAlloc(a: var TMemRegion, requestedSize: int): pointer =
|
||||
sysAssert(roundup(65, 8) == 72)
|
||||
sysAssert requestedSize >= sizeof(TFreeCell)
|
||||
sysAssert(roundup(65, 8) == 72, "rawAlloc 1")
|
||||
sysAssert requestedSize >= sizeof(TFreeCell), "rawAlloc 2"
|
||||
var size = roundup(requestedSize, MemAlign)
|
||||
#c_fprintf(c_stdout, "alloc; size: %ld; %ld\n", requestedSize, size)
|
||||
if size <= SmallChunkSize-smallChunkOverhead():
|
||||
@@ -461,7 +461,7 @@ proc rawAlloc(a: var TMemRegion, requestedSize: int): pointer =
|
||||
if c == nil:
|
||||
c = getSmallChunk(a)
|
||||
c.freeList = nil
|
||||
sysAssert c.size == PageSize
|
||||
sysAssert c.size == PageSize, "rawAlloc 3"
|
||||
c.size = size
|
||||
c.acc = size
|
||||
c.free = SmallChunkSize - smallChunkOverhead() - size
|
||||
@@ -469,34 +469,35 @@ proc rawAlloc(a: var TMemRegion, requestedSize: int): pointer =
|
||||
c.prev = nil
|
||||
ListAdd(a.freeSmallChunks[s], c)
|
||||
result = addr(c.data)
|
||||
sysAssert((cast[TAddress](result) and (MemAlign-1)) == 0)
|
||||
sysAssert((cast[TAddress](result) and (MemAlign-1)) == 0, "rawAlloc 4")
|
||||
else:
|
||||
sysAssert c.next != c
|
||||
sysAssert c.next != c, "rawAlloc 5"
|
||||
#if c.size != size:
|
||||
# c_fprintf(c_stdout, "csize: %lld; size %lld\n", c.size, size)
|
||||
sysAssert c.size == size
|
||||
sysAssert c.size == size, "rawAlloc 6"
|
||||
if c.freeList == nil:
|
||||
sysAssert(c.acc + smallChunkOverhead() + size <= SmallChunkSize)
|
||||
sysAssert(c.acc + smallChunkOverhead() + size <= SmallChunkSize,
|
||||
"rawAlloc 7")
|
||||
result = cast[pointer](cast[TAddress](addr(c.data)) +% c.acc)
|
||||
inc(c.acc, size)
|
||||
inc(c.acc, size)
|
||||
else:
|
||||
result = c.freeList
|
||||
sysAssert(c.freeList.zeroField == 0)
|
||||
sysAssert(c.freeList.zeroField == 0, "rawAlloc 8")
|
||||
c.freeList = c.freeList.next
|
||||
dec(c.free, size)
|
||||
sysAssert((cast[TAddress](result) and (MemAlign-1)) == 0)
|
||||
if c.free < size:
|
||||
sysAssert((cast[TAddress](result) and (MemAlign-1)) == 0, "rawAlloc 9")
|
||||
if c.free < size:
|
||||
ListRemove(a.freeSmallChunks[s], c)
|
||||
else:
|
||||
size = roundup(requestedSize+bigChunkOverhead(), PageSize)
|
||||
# allocate a large block
|
||||
var c = getBigChunk(a, size)
|
||||
sysAssert c.prev == nil
|
||||
sysAssert c.next == nil
|
||||
sysAssert c.size == size
|
||||
sysAssert c.prev == nil, "rawAlloc 10"
|
||||
sysAssert c.next == nil, "rawAlloc 11"
|
||||
sysAssert c.size == size, "rawAlloc 12"
|
||||
result = addr(c.data)
|
||||
sysAssert((cast[TAddress](result) and (MemAlign-1)) == 0)
|
||||
sysAssert(isAccessible(a, result))
|
||||
sysAssert((cast[TAddress](result) and (MemAlign-1)) == 0, "rawAlloc 13")
|
||||
sysAssert(isAccessible(a, result), "rawAlloc 14")
|
||||
|
||||
proc rawAlloc0(a: var TMemRegion, requestedSize: int): pointer =
|
||||
result = rawAlloc(a, requestedSize)
|
||||
@@ -510,7 +511,7 @@ proc rawDealloc(a: var TMemRegion, p: pointer) =
|
||||
var s = c.size
|
||||
var f = cast[ptr TFreeCell](p)
|
||||
#echo("setting to nil: ", $cast[TAddress](addr(f.zeroField)))
|
||||
sysAssert(f.zeroField != 0)
|
||||
sysAssert(f.zeroField != 0, "rawDealloc 1")
|
||||
f.zeroField = 0
|
||||
f.next = c.freeList
|
||||
c.freeList = f
|
||||
@@ -520,7 +521,6 @@ proc rawDealloc(a: var TMemRegion, p: pointer) =
|
||||
s -% sizeof(TFreeCell))
|
||||
# check if it is not in the freeSmallChunks[s] list:
|
||||
if c.free < s:
|
||||
sysAssert c notin a.freeSmallChunks[s div memAlign]
|
||||
# add it to the freeSmallChunks[s] array:
|
||||
ListAdd(a.freeSmallChunks[s div memAlign], c)
|
||||
inc(c.free, s)
|
||||
@@ -557,7 +557,7 @@ proc ptrSize(p: pointer): int =
|
||||
proc alloc(allocator: var TMemRegion, size: int): pointer =
|
||||
result = rawAlloc(allocator, size+sizeof(TFreeCell))
|
||||
cast[ptr TFreeCell](result).zeroField = 1 # mark it as used
|
||||
sysAssert(not isAllocatedPtr(allocator, result))
|
||||
sysAssert(not isAllocatedPtr(allocator, result), "alloc")
|
||||
result = cast[pointer](cast[TAddress](result) +% sizeof(TFreeCell))
|
||||
|
||||
proc alloc0(allocator: var TMemRegion, size: int): pointer =
|
||||
@@ -566,9 +566,9 @@ proc alloc0(allocator: var TMemRegion, size: int): pointer =
|
||||
|
||||
proc dealloc(allocator: var TMemRegion, p: pointer) =
|
||||
var x = cast[pointer](cast[TAddress](p) -% sizeof(TFreeCell))
|
||||
sysAssert(cast[ptr TFreeCell](x).zeroField == 1)
|
||||
sysAssert(cast[ptr TFreeCell](x).zeroField == 1, "dealloc 1")
|
||||
rawDealloc(allocator, x)
|
||||
sysAssert(not isAllocatedPtr(allocator, x))
|
||||
sysAssert(not isAllocatedPtr(allocator, x), "dealloc 2")
|
||||
|
||||
proc realloc(allocator: var TMemRegion, p: pointer, newsize: int): pointer =
|
||||
if newsize > 0:
|
||||
|
||||
@@ -24,7 +24,7 @@ proc genericAssignAux(dest, src: Pointer, n: ptr TNimNode, shallow: bool) =
|
||||
n.typ.size)
|
||||
var m = selectBranch(src, n)
|
||||
if m != nil: genericAssignAux(dest, src, m, shallow)
|
||||
of nkNone: sysAssert(false)
|
||||
of nkNone: sysAssert(false, "genericAssignAux")
|
||||
#else:
|
||||
# echo "ugh memory corruption! ", n.kind
|
||||
# quit 1
|
||||
@@ -33,7 +33,7 @@ proc genericAssignAux(dest, src: Pointer, mt: PNimType, shallow: bool) =
|
||||
var
|
||||
d = cast[TAddress](dest)
|
||||
s = cast[TAddress](src)
|
||||
sysAssert(mt != nil)
|
||||
sysAssert(mt != nil, "genericAssignAux 2")
|
||||
case mt.Kind
|
||||
of tyString:
|
||||
var x = cast[ppointer](dest)
|
||||
@@ -50,7 +50,7 @@ proc genericAssignAux(dest, src: Pointer, mt: PNimType, shallow: bool) =
|
||||
# this can happen! nil sequences are allowed
|
||||
unsureAsgnRef(x, s2)
|
||||
return
|
||||
sysAssert(dest != nil)
|
||||
sysAssert(dest != nil, "genericAssignAux 3")
|
||||
unsureAsgnRef(x, newObj(mt, seq.len * mt.base.size + GenericSeqSize))
|
||||
var dst = cast[taddress](cast[ppointer](dest)[])
|
||||
for i in 0..seq.len-1:
|
||||
@@ -101,7 +101,7 @@ proc objectInit(dest: Pointer, typ: PNimType) {.compilerProc.}
|
||||
proc objectInitAux(dest: Pointer, n: ptr TNimNode) =
|
||||
var d = cast[TAddress](dest)
|
||||
case n.kind
|
||||
of nkNone: sysAssert(false)
|
||||
of nkNone: sysAssert(false, "objectInitAux")
|
||||
of nkSLot: objectInit(cast[pointer](d +% n.offset), n.typ)
|
||||
of nkList:
|
||||
for i in 0..n.len-1:
|
||||
@@ -134,7 +134,7 @@ proc genericReset(dest: Pointer, mt: PNimType) {.compilerProc.}
|
||||
proc genericResetAux(dest: Pointer, n: ptr TNimNode) =
|
||||
var d = cast[TAddress](dest)
|
||||
case n.kind
|
||||
of nkNone: sysAssert(false)
|
||||
of nkNone: sysAssert(false, "genericResetAux")
|
||||
of nkSlot: genericReset(cast[pointer](d +% n.offset), n.typ)
|
||||
of nkList:
|
||||
for i in 0..n.len-1: genericResetAux(dest, n.sons[i])
|
||||
@@ -145,7 +145,7 @@ proc genericResetAux(dest: Pointer, n: ptr TNimNode) =
|
||||
|
||||
proc genericReset(dest: Pointer, mt: PNimType) =
|
||||
var d = cast[TAddress](dest)
|
||||
sysAssert(mt != nil)
|
||||
sysAssert(mt != nil, "genericReset 2")
|
||||
case mt.Kind
|
||||
of tyString, tyRef, tySequence:
|
||||
unsureAsgnRef(cast[ppointer](dest), nil)
|
||||
|
||||
@@ -102,9 +102,9 @@ proc CellSetGet(t: TCellSet, key: TAddress): PPageDesc =
|
||||
proc CellSetRawInsert(t: TCellSet, data: PPageDescArray, desc: PPageDesc) =
|
||||
var h = cast[int](desc.key) and t.max
|
||||
while data[h] != nil:
|
||||
sysAssert(data[h] != desc)
|
||||
sysAssert(data[h] != desc, "CellSetRawInsert 1")
|
||||
h = nextTry(h, t.max)
|
||||
sysAssert(data[h] == nil)
|
||||
sysAssert(data[h] == nil, "CellSetRawInsert 2")
|
||||
data[h] = desc
|
||||
|
||||
proc CellSetEnlarge(t: var TCellSet) =
|
||||
@@ -130,7 +130,7 @@ proc CellSetPut(t: var TCellSet, key: TAddress): PPageDesc =
|
||||
inc(t.counter)
|
||||
h = cast[int](key) and t.max
|
||||
while t.data[h] != nil: h = nextTry(h, t.max)
|
||||
sysAssert(t.data[h] == nil)
|
||||
sysAssert(t.data[h] == nil, "CellSetPut")
|
||||
# the new page descriptor goes into result
|
||||
result = cast[PPageDesc](Alloc0(sizeof(TPageDesc)))
|
||||
result.next = t.head
|
||||
|
||||
@@ -133,30 +133,30 @@ when traceGC:
|
||||
of csAllocated:
|
||||
if c in states[csAllocated]:
|
||||
writeCell("attempt to alloc an already allocated cell", c)
|
||||
sysAssert(false)
|
||||
sysAssert(false, "traceCell 1")
|
||||
excl(states[csCycFreed], c)
|
||||
excl(states[csZctFreed], c)
|
||||
of csZctFreed:
|
||||
if c in states[csZctFreed]:
|
||||
writeCell("attempt to free zct cell twice", c)
|
||||
sysAssert(false)
|
||||
sysAssert(false, "traceCell 2")
|
||||
if c in states[csCycFreed]:
|
||||
writeCell("attempt to free with zct, but already freed with cyc", c)
|
||||
sysAssert(false)
|
||||
sysAssert(false, "traceCell 3")
|
||||
if c notin states[csAllocated]:
|
||||
writeCell("attempt to free not an allocated cell", c)
|
||||
sysAssert(false)
|
||||
sysAssert(false, "traceCell 4")
|
||||
excl(states[csAllocated], c)
|
||||
of csCycFreed:
|
||||
if c notin states[csAllocated]:
|
||||
writeCell("attempt to free a not allocated cell", c)
|
||||
sysAssert(false)
|
||||
sysAssert(false, "traceCell 5")
|
||||
if c in states[csCycFreed]:
|
||||
writeCell("attempt to free cyc cell twice", c)
|
||||
sysAssert(false)
|
||||
sysAssert(false, "traceCell 6")
|
||||
if c in states[csZctFreed]:
|
||||
writeCell("attempt to free with cyc, but already freed with zct", c)
|
||||
sysAssert(false)
|
||||
sysAssert(false, "traceCell 7")
|
||||
excl(states[csAllocated], c)
|
||||
incl(states[state], c)
|
||||
|
||||
@@ -225,7 +225,7 @@ proc decRef(c: PCell) {.inline.} =
|
||||
when stressGC:
|
||||
if c.refcount <% rcIncrement:
|
||||
writeCell("broken cell", c)
|
||||
sysAssert(c.refcount >=% rcIncrement)
|
||||
sysAssert(c.refcount >=% rcIncrement, "decRef")
|
||||
#if c.refcount <% rcIncrement: quit("leck mich")
|
||||
if --c.refcount:
|
||||
rtlAddZCT(c)
|
||||
@@ -242,7 +242,7 @@ proc nimGCunref(p: pointer) {.compilerProc, inline.} = decRef(usrToCell(p))
|
||||
|
||||
proc asgnRef(dest: ppointer, src: pointer) {.compilerProc, inline.} =
|
||||
# the code generator calls this proc!
|
||||
sysAssert(not isOnStack(dest))
|
||||
sysAssert(not isOnStack(dest), "asgnRef")
|
||||
# BUGFIX: first incRef then decRef!
|
||||
if src != nil: incRef(usrToCell(src))
|
||||
if dest[] != nil: decRef(usrToCell(dest[]))
|
||||
@@ -298,7 +298,7 @@ proc forAllSlotsAux(dest: pointer, n: ptr TNimNode, op: TWalkOp) =
|
||||
of nkCase:
|
||||
var m = selectBranch(dest, n)
|
||||
if m != nil: forAllSlotsAux(dest, m, op)
|
||||
of nkNone: sysAssert(false)
|
||||
of nkNone: sysAssert(false, "forAllSlotsAux")
|
||||
|
||||
proc forAllChildrenAux(dest: Pointer, mt: PNimType, op: TWalkOp) =
|
||||
var d = cast[TAddress](dest)
|
||||
@@ -315,9 +315,9 @@ proc forAllChildrenAux(dest: Pointer, mt: PNimType, op: TWalkOp) =
|
||||
else: nil
|
||||
|
||||
proc forAllChildren(cell: PCell, op: TWalkOp) =
|
||||
sysAssert(cell != nil)
|
||||
sysAssert(cell.typ != nil)
|
||||
sysAssert cell.typ.kind in {tyRef, tySequence, tyString}
|
||||
sysAssert(cell != nil, "forAllChildren: 1")
|
||||
sysAssert(cell.typ != nil, "forAllChildren: 2")
|
||||
sysAssert cell.typ.kind in {tyRef, tySequence, tyString}, "forAllChildren: 3"
|
||||
case cell.typ.Kind
|
||||
of tyRef: # common case
|
||||
forAllChildrenAux(cellToUsr(cell), cell.typ.base, op)
|
||||
@@ -377,11 +377,11 @@ proc addNewObjToZCT(res: PCell, gch: var TGcHeap) {.inline.} =
|
||||
proc newObj(typ: PNimType, size: int, gch: var TGcHeap): pointer =
|
||||
# generates a new object and sets its reference counter to 0
|
||||
acquire(gch)
|
||||
sysAssert(typ.kind in {tyRef, tyString, tySequence})
|
||||
sysAssert(typ.kind in {tyRef, tyString, tySequence}, "newObj: 1")
|
||||
collectCT(gch)
|
||||
var res = cast[PCell](rawAlloc(gch.region, size + sizeof(TCell)))
|
||||
zeroMem(res, size+sizeof(TCell))
|
||||
sysAssert((cast[TAddress](res) and (MemAlign-1)) == 0)
|
||||
sysAssert((cast[TAddress](res) and (MemAlign-1)) == 0, "newObj: 2")
|
||||
# now it is buffered in the ZCT
|
||||
res.typ = typ
|
||||
when debugGC and not hasThreadSupport:
|
||||
@@ -389,13 +389,13 @@ proc newObj(typ: PNimType, size: int, gch: var TGcHeap): pointer =
|
||||
res.filename = framePtr.prev.filename
|
||||
res.line = framePtr.prev.line
|
||||
res.refcount = rcZct # refcount is zero, but mark it to be in the ZCT
|
||||
sysAssert(isAllocatedPtr(gch.region, res))
|
||||
sysAssert(isAllocatedPtr(gch.region, res), "newObj: 3")
|
||||
# its refcount is zero, so add it to the ZCT:
|
||||
addNewObjToZCT(res, gch)
|
||||
when logGC: writeCell("new cell", res)
|
||||
gcTrace(res, csAllocated)
|
||||
gcTrace(res, csAllocated)
|
||||
release(gch)
|
||||
result = cellToUsr(res)
|
||||
result = cellToUsr(res)
|
||||
|
||||
proc newObj(typ: PNimType, size: int): pointer {.compilerRtl.} =
|
||||
result = newObj(typ, size, gch)
|
||||
@@ -410,8 +410,8 @@ proc growObj(old: pointer, newsize: int, gch: var TGcHeap): pointer =
|
||||
acquire(gch)
|
||||
collectCT(gch)
|
||||
var ol = usrToCell(old)
|
||||
sysAssert(ol.typ != nil)
|
||||
sysAssert(ol.typ.kind in {tyString, tySequence})
|
||||
sysAssert(ol.typ != nil, "growObj: 1")
|
||||
sysAssert(ol.typ.kind in {tyString, tySequence}, "growObj: 2")
|
||||
var res = cast[PCell](rawAlloc(gch.region, newsize + sizeof(TCell)))
|
||||
var elemSize = 1
|
||||
if ol.typ.kind != tyString: elemSize = ol.typ.base.size
|
||||
@@ -420,8 +420,8 @@ proc growObj(old: pointer, newsize: int, gch: var TGcHeap): pointer =
|
||||
copyMem(res, ol, oldsize + sizeof(TCell))
|
||||
zeroMem(cast[pointer](cast[TAddress](res)+% oldsize +% sizeof(TCell)),
|
||||
newsize-oldsize)
|
||||
sysAssert((cast[TAddress](res) and (MemAlign-1)) == 0)
|
||||
sysAssert(res.refcount shr rcShift <=% 1)
|
||||
sysAssert((cast[TAddress](res) and (MemAlign-1)) == 0, "growObj: 3")
|
||||
sysAssert(res.refcount shr rcShift <=% 1, "growObj: 4")
|
||||
#if res.refcount <% rcIncrement:
|
||||
# add(gch.zct, res)
|
||||
#else: # XXX: what to do here?
|
||||
@@ -442,7 +442,7 @@ proc growObj(old: pointer, newsize: int, gch: var TGcHeap): pointer =
|
||||
gcTrace(res, csAllocated)
|
||||
when reallyDealloc: rawDealloc(gch.region, ol)
|
||||
else:
|
||||
sysAssert(ol.typ != nil)
|
||||
sysAssert(ol.typ != nil, "growObj: 5")
|
||||
zeroMem(ol, sizeof(TCell))
|
||||
release(gch)
|
||||
result = cellToUsr(res)
|
||||
@@ -455,17 +455,17 @@ proc growObj(old: pointer, newsize: int): pointer {.rtl.} =
|
||||
proc doOperation(p: pointer, op: TWalkOp) =
|
||||
if p == nil: return
|
||||
var c: PCell = usrToCell(p)
|
||||
sysAssert(c != nil)
|
||||
sysAssert(c != nil, "doOperation: 1")
|
||||
case op # faster than function pointers because of easy prediction
|
||||
of waZctDecRef:
|
||||
sysAssert(c.refcount >=% rcIncrement)
|
||||
sysAssert(c.refcount >=% rcIncrement, "doOperation 2")
|
||||
c.refcount = c.refcount -% rcIncrement
|
||||
when logGC: writeCell("decref (from doOperation)", c)
|
||||
if c.refcount <% rcIncrement: addZCT(gch.zct, c)
|
||||
of waPush:
|
||||
add(gch.tempStack, c)
|
||||
of waCycleDecRef:
|
||||
sysAssert(c.refcount >=% rcIncrement)
|
||||
sysAssert(c.refcount >=% rcIncrement, "doOperation 3")
|
||||
c.refcount = c.refcount -% rcIncrement
|
||||
|
||||
# we now use a much simpler and non-recursive algorithm for cycle removal
|
||||
@@ -507,7 +507,7 @@ proc collectCycles(gch: var TGcHeap) =
|
||||
when logGC: writeCell("cycle collector dealloc cell", c)
|
||||
when reallyDealloc: rawDealloc(gch.region, c)
|
||||
else:
|
||||
sysAssert(c.typ != nil)
|
||||
sysAssert(c.typ != nil, "collectCycles")
|
||||
zeroMem(c, sizeof(TCell))
|
||||
Deinit(gch.cycleRoots)
|
||||
Init(gch.cycleRoots)
|
||||
@@ -686,7 +686,7 @@ proc CollectZCT(gch: var TGcHeap) =
|
||||
while L[] > 0:
|
||||
var c = gch.zct.d[0]
|
||||
# remove from ZCT:
|
||||
sysAssert((c.refcount and colorMask) == rcZct)
|
||||
sysAssert((c.refcount and rcZct) == rcZct, "collectZCT")
|
||||
c.refcount = c.refcount and not colorMask
|
||||
gch.zct.d[0] = gch.zct.d[L[] - 1]
|
||||
dec(L[])
|
||||
@@ -707,19 +707,19 @@ proc CollectZCT(gch: var TGcHeap) =
|
||||
forAllChildren(c, waZctDecRef)
|
||||
when reallyDealloc: rawDealloc(gch.region, c)
|
||||
else:
|
||||
sysAssert(c.typ != nil)
|
||||
sysAssert(c.typ != nil, "collectZCT 2")
|
||||
zeroMem(c, sizeof(TCell))
|
||||
|
||||
proc unmarkStackAndRegisters(gch: var TGcHeap) =
|
||||
var d = gch.decStack.d
|
||||
for i in 0..gch.decStack.len-1:
|
||||
sysAssert isAllocatedPtr(allocator, d[i])
|
||||
sysAssert isAllocatedPtr(gch.region, d[i]), "unmarkStackAndRegisters"
|
||||
# decRef(d[i]) inlined: cannot create a cycle and must not acquire lock
|
||||
var c = d[i]
|
||||
# XXX no need for an atomic dec here:
|
||||
if --c.refcount:
|
||||
addZCT(gch.zct, c)
|
||||
sysAssert c.typ != nil
|
||||
sysAssert c.typ != nil, "unmarkStackAndRegisters 2"
|
||||
gch.decStack.len = 0
|
||||
|
||||
proc collectCT(gch: var TGcHeap) =
|
||||
@@ -727,7 +727,7 @@ proc collectCT(gch: var TGcHeap) =
|
||||
getOccupiedMem(gch.region) >= gch.cycleThreshold) or stressGC) and
|
||||
gch.recGcLock == 0:
|
||||
gch.stat.maxStackSize = max(gch.stat.maxStackSize, stackSize())
|
||||
sysAssert(gch.decStack.len == 0)
|
||||
sysAssert(gch.decStack.len == 0, "collectCT")
|
||||
markStackAndRegisters(gch)
|
||||
markThreadStacks(gch)
|
||||
gch.stat.maxStackCells = max(gch.stat.maxStackCells, gch.decStack.len)
|
||||
|
||||
@@ -207,7 +207,7 @@ else:
|
||||
include "system/alloc"
|
||||
|
||||
include "system/cellsets"
|
||||
sysAssert(sizeof(TCell) == sizeof(TFreeCell))
|
||||
sysAssert(sizeof(TCell) == sizeof(TFreeCell), "sizeof TFreeCell")
|
||||
include "system/gc"
|
||||
|
||||
{.pop.}
|
||||
|
||||
@@ -158,7 +158,7 @@ when not defined(useNimRtl):
|
||||
proc reprRecordAux(result: var string, p: pointer, n: ptr TNimNode,
|
||||
cl: var TReprClosure) =
|
||||
case n.kind
|
||||
of nkNone: sysAssert(false)
|
||||
of nkNone: sysAssert(false, "reprRecordAux")
|
||||
of nkSlot:
|
||||
add result, $n.name
|
||||
add result, " = "
|
||||
@@ -206,7 +206,7 @@ when not defined(useNimRtl):
|
||||
var t = cast[ptr PNimType](p)[]
|
||||
reprRecord(result, p, t, cl)
|
||||
of tyRef, tyPtr:
|
||||
sysAssert(p != nil)
|
||||
sysAssert(p != nil, "reprAux")
|
||||
if cast[ppointer](p)[] == nil: add result, "nil"
|
||||
else: reprRef(result, cast[ppointer](p)[], typ, cl)
|
||||
of tySequence:
|
||||
|
||||
Reference in New Issue
Block a user