mirror of
https://github.com/nim-lang/Nim.git
synced 2025-12-28 08:54:53 +00:00
GC with primitive MS
This commit is contained in:
@@ -16,7 +16,7 @@ const
|
||||
# above X strings a hash-switch for strings is generated
|
||||
|
||||
proc registerGcRoot(p: BProc, v: PSym) =
|
||||
if gSelectedGC in {gcMarkAndSweep, gcGenerational, gcV2} and
|
||||
if gSelectedGC in {gcMarkAndSweep, gcGenerational, gcV2, gcRefc} and
|
||||
containsGarbageCollectedRef(v.loc.t):
|
||||
# we register a specialized marked proc here; this has the advantage
|
||||
# that it works out of the box for thread local storage then :-)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#
|
||||
#
|
||||
# Nim's Runtime Library
|
||||
# (c) Copyright 2015 Andreas Rumpf
|
||||
# (c) Copyright 2016 Andreas Rumpf
|
||||
#
|
||||
# See the file "copying.txt", included in this
|
||||
# distribution, for details about the copyright.
|
||||
@@ -9,13 +9,8 @@
|
||||
|
||||
# Garbage Collector
|
||||
#
|
||||
# The basic algorithm is *Deferred Reference Counting* with cycle detection.
|
||||
# This is achieved by combining a Deutsch-Bobrow garbage collector
|
||||
# together with Christoper's partial mark-sweep garbage collector.
|
||||
#
|
||||
# Special care has been taken to avoid recursion as far as possible to avoid
|
||||
# stack overflows when traversing deep datastructures. It is well-suited
|
||||
# for soft real time applications (like games).
|
||||
# Refcounting + Mark&Sweep. Complex algorithms avoided.
|
||||
# Been there, done that, didn't work.
|
||||
|
||||
when defined(nimCoroutines):
|
||||
import arch
|
||||
@@ -30,7 +25,7 @@ const
|
||||
# this seems to be a good value
|
||||
withRealTime = defined(useRealtimeGC)
|
||||
useMarkForDebug = defined(gcGenerational)
|
||||
useBackupGc = false # use a simple M&S GC to collect
|
||||
useBackupGc = true # use a simple M&S GC to collect
|
||||
# cycles instead of the complex
|
||||
# algorithm
|
||||
|
||||
@@ -55,8 +50,7 @@ type
|
||||
WalkOp = enum
|
||||
waMarkGlobal, # part of the backup/debug mark&sweep
|
||||
waMarkPrecise, # part of the backup/debug mark&sweep
|
||||
waZctDecRef, waPush, waCycleDecRef, waMarkGray, waScan, waScanBlack,
|
||||
waCollectWhite #, waDebug
|
||||
waZctDecRef, waPush
|
||||
|
||||
Finalizer {.compilerproc.} = proc (self: pointer) {.nimcall, benign.}
|
||||
# A ref type can have a finalizer that is called before the object's
|
||||
@@ -87,7 +81,6 @@ type
|
||||
idGenerator: int
|
||||
zct: CellSeq # the zero count table
|
||||
decStack: CellSeq # cells in the stack that are to decref again
|
||||
cycleRoots: CellSet
|
||||
tempStack: CellSeq # temporary stack for recursion elimination
|
||||
recGcLock: int # prevent recursion via finalizers; no thread lock
|
||||
when withRealTime:
|
||||
@@ -136,9 +129,6 @@ proc usrToCell(usr: pointer): PCell {.inline.} =
|
||||
# convert pointer to userdata to object (=pointer to refcount)
|
||||
result = cast[PCell](cast[ByteAddress](usr)-%ByteAddress(sizeof(Cell)))
|
||||
|
||||
proc canBeCycleRoot(c: PCell): bool {.inline.} =
|
||||
result = ntfAcyclic notin c.typ.flags
|
||||
|
||||
proc extGetCellType(c: pointer): PNimType {.compilerproc.} =
|
||||
# used for code generation concerning debugging
|
||||
result = usrToCell(c).typ
|
||||
@@ -204,10 +194,6 @@ proc rtlAddCycleRoot(c: PCell) {.rtl, inl.} =
|
||||
# we MUST access gch as a global here, because this crosses DLL boundaries!
|
||||
when hasThreadSupport and hasSharedHeap:
|
||||
acquireSys(HeapLock)
|
||||
when cycleGC:
|
||||
if c.color != rcPurple:
|
||||
c.setColor(rcPurple)
|
||||
incl(gch.cycleRoots, c)
|
||||
when hasThreadSupport and hasSharedHeap:
|
||||
releaseSys(HeapLock)
|
||||
|
||||
@@ -224,19 +210,12 @@ proc decRef(c: PCell) {.inline.} =
|
||||
gcAssert(c.refcount >=% rcIncrement, "decRef")
|
||||
if --c.refcount:
|
||||
rtlAddZCT(c)
|
||||
elif canbeCycleRoot(c):
|
||||
# unfortunately this is necessary here too, because a cycle might just
|
||||
# have been broken up and we could recycle it.
|
||||
rtlAddCycleRoot(c)
|
||||
#writeCell("decRef", c)
|
||||
|
||||
proc incRef(c: PCell) {.inline.} =
|
||||
gcAssert(isAllocatedPtr(gch.region, c), "incRef: interiorPtr")
|
||||
c.refcount = c.refcount +% rcIncrement
|
||||
# and not colorMask
|
||||
#writeCell("incRef", c)
|
||||
if canbeCycleRoot(c):
|
||||
rtlAddCycleRoot(c)
|
||||
|
||||
proc nimGCref(p: pointer) {.compilerProc, inline.} = incRef(usrToCell(p))
|
||||
proc nimGCunref(p: pointer) {.compilerProc, inline.} = decRef(usrToCell(p))
|
||||
@@ -306,7 +285,6 @@ proc initGC() =
|
||||
# init the rt
|
||||
init(gch.zct)
|
||||
init(gch.tempStack)
|
||||
init(gch.cycleRoots)
|
||||
init(gch.decStack)
|
||||
when useMarkForDebug or useBackupGc:
|
||||
init(gch.marked)
|
||||
@@ -563,7 +541,6 @@ proc growObj(old: pointer, newsize: int, gch: var GcHeap): pointer =
|
||||
d[j] = res
|
||||
break
|
||||
dec(j)
|
||||
if canbeCycleRoot(ol): excl(gch.cycleRoots, ol)
|
||||
rawDealloc(gch.region, ol)
|
||||
else:
|
||||
# we split the old refcount in 2 parts. XXX This is still not entirely
|
||||
@@ -602,49 +579,6 @@ proc freeCyclicCell(gch: var GcHeap, c: PCell) =
|
||||
gcAssert(c.typ != nil, "freeCyclicCell")
|
||||
zeroMem(c, sizeof(Cell))
|
||||
|
||||
proc markGray(s: PCell) =
|
||||
if s.color != rcGray:
|
||||
setColor(s, rcGray)
|
||||
forAllChildren(s, waMarkGray)
|
||||
|
||||
proc scanBlack(s: PCell) =
|
||||
s.setColor(rcBlack)
|
||||
forAllChildren(s, waScanBlack)
|
||||
|
||||
proc scan(s: PCell) =
|
||||
if s.color == rcGray:
|
||||
if s.refcount >=% rcIncrement:
|
||||
scanBlack(s)
|
||||
else:
|
||||
s.setColor(rcWhite)
|
||||
forAllChildren(s, waScan)
|
||||
|
||||
proc collectWhite(s: PCell) =
|
||||
# This is a hacky way to deal with the following problem (bug #1796)
|
||||
# Consider this content in cycleRoots:
|
||||
# x -> a; y -> a where 'a' is an acyclic object so not included in
|
||||
# cycleRoots itself. Then 'collectWhite' used to free 'a' twice. The
|
||||
# 'isAllocatedPtr' check prevents this. This also means we do not need
|
||||
# to query 's notin gch.cycleRoots' at all.
|
||||
if isAllocatedPtr(gch.region, s) and s.color == rcWhite:
|
||||
s.setColor(rcBlack)
|
||||
forAllChildren(s, waCollectWhite)
|
||||
freeCyclicCell(gch, s)
|
||||
|
||||
proc markRoots(gch: var GcHeap) =
|
||||
var tabSize = 0
|
||||
for s in elements(gch.cycleRoots):
|
||||
#writeCell("markRoot", s)
|
||||
inc tabSize
|
||||
if s.color == rcPurple and s.refcount >=% rcIncrement:
|
||||
markGray(s)
|
||||
else:
|
||||
excl(gch.cycleRoots, s)
|
||||
# (s.color == rcBlack and rc == 0) as 1 condition:
|
||||
if s.refcount == 0:
|
||||
freeCyclicCell(gch, s)
|
||||
gch.stat.cycleTableSize = max(gch.stat.cycleTableSize, tabSize)
|
||||
|
||||
when useBackupGc:
|
||||
proc sweep(gch: var GcHeap) =
|
||||
for x in allObjects(gch.region):
|
||||
@@ -717,19 +651,6 @@ proc doOperation(p: pointer, op: WalkOp) =
|
||||
#if c.refcount <% rcIncrement: addZCT(gch.zct, c)
|
||||
of waPush:
|
||||
add(gch.tempStack, c)
|
||||
of waCycleDecRef:
|
||||
gcAssert(c.refcount >=% rcIncrement, "doOperation 3")
|
||||
c.refcount = c.refcount -% rcIncrement
|
||||
of waMarkGray:
|
||||
gcAssert(c.refcount >=% rcIncrement, "waMarkGray")
|
||||
c.refcount = c.refcount -% rcIncrement
|
||||
markGray(c)
|
||||
of waScan: scan(c)
|
||||
of waScanBlack:
|
||||
c.refcount = c.refcount +% rcIncrement
|
||||
if c.color != rcBlack:
|
||||
scanBlack(c)
|
||||
of waCollectWhite: collectWhite(c)
|
||||
of waMarkGlobal:
|
||||
when useMarkForDebug or useBackupGc:
|
||||
when hasThreadSupport:
|
||||
@@ -752,10 +673,6 @@ when useMarkForDebug or useBackupGc:
|
||||
proc markStackAndRegistersForSweep(gch: var GcHeap) {.noinline, cdecl,
|
||||
benign.}
|
||||
|
||||
proc collectRoots(gch: var GcHeap) =
|
||||
for s in elements(gch.cycleRoots):
|
||||
collectWhite(s)
|
||||
|
||||
proc collectCycles(gch: var GcHeap) =
|
||||
when hasThreadSupport:
|
||||
for c in gch.toDispose:
|
||||
@@ -767,30 +684,6 @@ proc collectCycles(gch: var GcHeap) =
|
||||
markStackAndRegistersForSweep(gch)
|
||||
markGlobals(gch)
|
||||
sweep(gch)
|
||||
else:
|
||||
markRoots(gch)
|
||||
# scanRoots:
|
||||
for s in elements(gch.cycleRoots): scan(s)
|
||||
collectRoots(gch)
|
||||
|
||||
cellsetReset(gch.cycleRoots)
|
||||
# alive cycles need to be kept in 'cycleRoots' if they are referenced
|
||||
# from the stack; otherwise the write barrier will add the cycle root again
|
||||
# anyway:
|
||||
when false:
|
||||
var d = gch.decStack.d
|
||||
var cycleRootsLen = 0
|
||||
for i in 0..gch.decStack.len-1:
|
||||
var c = d[i]
|
||||
gcAssert isAllocatedPtr(gch.region, c), "addBackStackRoots"
|
||||
gcAssert c.refcount >=% rcIncrement, "addBackStackRoots: dead cell"
|
||||
if canBeCycleRoot(c):
|
||||
#if c notin gch.cycleRoots:
|
||||
inc cycleRootsLen
|
||||
incl(gch.cycleRoots, c)
|
||||
gcAssert c.typ != nil, "addBackStackRoots 2"
|
||||
if cycleRootsLen != 0:
|
||||
cfprintf(cstdout, "cycle roots: %ld\n", cycleRootsLen)
|
||||
|
||||
proc gcMark(gch: var GcHeap, p: pointer) {.inline.} =
|
||||
# the addresses are not as cells on the stack, so turn them to cells:
|
||||
@@ -812,22 +705,6 @@ proc gcMark(gch: var GcHeap, p: pointer) {.inline.} =
|
||||
add(gch.decStack, cell)
|
||||
sysAssert(allocInv(gch.region), "gcMark end")
|
||||
|
||||
proc markThreadStacks(gch: var GcHeap) =
|
||||
when hasThreadSupport and hasSharedHeap:
|
||||
{.error: "not fully implemented".}
|
||||
var it = threadList
|
||||
while it != nil:
|
||||
# mark registers:
|
||||
for i in 0 .. high(it.registers): gcMark(gch, it.registers[i])
|
||||
var sp = cast[ByteAddress](it.stackBottom)
|
||||
var max = cast[ByteAddress](it.stackTop)
|
||||
# XXX stack direction?
|
||||
# XXX unroll this loop:
|
||||
while sp <=% max:
|
||||
gcMark(gch, cast[ppointer](sp)[])
|
||||
sp = sp +% sizeof(pointer)
|
||||
it = it.next
|
||||
|
||||
include gc_common
|
||||
|
||||
proc markStackAndRegisters(gch: var GcHeap) {.noinline, cdecl.} =
|
||||
@@ -866,8 +743,6 @@ proc collectZCT(gch: var GcHeap): bool =
|
||||
# as this might be too slow.
|
||||
# In any case, it should be removed from the ZCT. But not
|
||||
# freed. **KEEP THIS IN MIND WHEN MAKING THIS INCREMENTAL!**
|
||||
when cycleGC:
|
||||
if canbeCycleRoot(c): excl(gch.cycleRoots, c)
|
||||
when logGC: writeCell("zct dealloc cell", c)
|
||||
gcTrace(c, csZctFreed)
|
||||
# We are about to free the object, call the finalizer BEFORE its
|
||||
@@ -915,7 +790,6 @@ proc collectCTBody(gch: var GcHeap) =
|
||||
sysAssert(gch.decStack.len == 0, "collectCT")
|
||||
prepareForInteriorPointerChecking(gch.region)
|
||||
markStackAndRegisters(gch)
|
||||
markThreadStacks(gch)
|
||||
gch.stat.maxStackCells = max(gch.stat.maxStackCells, gch.decStack.len)
|
||||
inc(gch.stat.stackScans)
|
||||
if collectZCT(gch):
|
||||
|
||||
Reference in New Issue
Block a user