* fixes #15076

* heapqueue: optimized for ARC

* added another test case [backport:1.4]

* code cleanup
This commit is contained in:
Andreas Rumpf
2020-11-26 10:24:52 +01:00
committed by GitHub
parent 3e7077ac7d
commit da753c6a2e
4 changed files with 105 additions and 8 deletions

View File

@@ -37,7 +37,7 @@ type
g: ControlFlowGraph
graph: ModuleGraph
otherRead: PNode
inLoop, inSpawn: int
inLoop, inSpawn, inLoopCond: int
uninit: IntSet # set of uninit'ed vars
uninitComputed: bool
idgen: IdGenerator
@@ -296,8 +296,8 @@ proc isNoInit(dest: PNode): bool {.inline.} =
result = dest.kind == nkSym and sfNoInit in dest.sym.flags
proc genSink(c: var Con; dest, ri: PNode, isDecl = false): PNode =
if isUnpackedTuple(dest) or isDecl or
(isAnalysableFieldAccess(dest, c.owner) and isFirstWrite(dest, c)) or
if (c.inLoopCond == 0 and (isUnpackedTuple(dest) or isDecl or
(isAnalysableFieldAccess(dest, c.owner) and isFirstWrite(dest, c)))) or
isNoInit(dest):
# optimize sink call into a bitwise memcopy
result = newTree(nkFastAsgn, dest, ri)
@@ -629,8 +629,10 @@ template handleNestedTempl(n, processCall: untyped, willProduceStmt = false) =
of nkWhileStmt:
inc c.inLoop
inc c.inLoopCond
result = copyNode(n)
result.add p(n[0], c, s, normal)
dec c.inLoopCond
var bodyScope = nestedScope(s)
let bodyResult = p(n[1], c, bodyScope, normal)
result.add processScope(c, bodyScope, bodyResult)

View File

@@ -65,7 +65,7 @@ proc len*[T](heap: HeapQueue[T]): int {.inline.} =
## Returns the number of elements of `heap`.
heap.data.len
proc `[]`*[T](heap: HeapQueue[T], i: Natural): T {.inline.} =
proc `[]`*[T](heap: HeapQueue[T], i: Natural): lent T {.inline.} =
## Accesses the i-th element of `heap`.
heap.data[i]
@@ -111,7 +111,7 @@ proc siftup[T](heap: var HeapQueue[T], p: int) =
heap.data[pos] = newitem
siftdown(heap, startpos, pos)
proc push*[T](heap: var HeapQueue[T], item: T) =
proc push*[T](heap: var HeapQueue[T], item: sink T) =
## Pushes `item` onto heap, maintaining the heap invariant.
heap.data.add(item)
siftdown(heap, 0, len(heap)-1)
@@ -168,7 +168,7 @@ proc del*[T](heap: var HeapQueue[T], index: Natural) =
if index < newLen:
heap.siftup(index)
proc replace*[T](heap: var HeapQueue[T], item: T): T =
proc replace*[T](heap: var HeapQueue[T], item: sink T): T =
## Pops and returns the current smallest value, and add the new item.
## This is more efficient than pop() followed by push(), and can be
## more appropriate when using a fixed-size heap. Note that the value
@@ -186,7 +186,7 @@ proc replace*[T](heap: var HeapQueue[T], item: T): T =
heap.data[0] = item
siftup(heap, 0)
proc pushpop*[T](heap: var HeapQueue[T], item: T): T =
proc pushpop*[T](heap: var HeapQueue[T], item: sink T): T =
## Fast version of a push followed by a pop.
runnableExamples:
var heap = initHeapQueue[int]()
@@ -197,7 +197,7 @@ proc pushpop*[T](heap: var HeapQueue[T], item: T): T =
assert heap[0] == 6
assert heap.pushpop(4) == 4
result = item
if heap.len > 0 and heapCmp(heap.data[0], item):
if heap.len > 0 and heapCmp(heap.data[0], result):
swap(result, heap.data[0])
siftup(heap, 0)

21
tests/arc/tasyncleak4.nim Normal file
View File

@@ -0,0 +1,21 @@
discard """
cmd: "nim c --gc:orc -d:useMalloc $file"
output: '''ok'''
valgrind: "leaks"
"""
# bug #15076
import asyncdispatch
var futures: seq[Future[void]]
for i in 1..20:
futures.add sleepAsync 1
futures.add sleepAsync 1
futures.all.waitFor()
futures.setLen 0
setGlobalDispatcher nil
GC_fullCollect()
echo "ok"

View File

@@ -0,0 +1,74 @@
discard """
output: '''400 true'''
cmd: "nim c --gc:orc $file"
"""
type HeapQueue*[T] = object
data: seq[T]
proc len*[T](heap: HeapQueue[T]): int {.inline.} =
heap.data.len
proc `[]`*[T](heap: HeapQueue[T], i: Natural): T {.inline.} =
heap.data[i]
proc push*[T](heap: var HeapQueue[T], item: T) =
heap.data.add(item)
proc pop*[T](heap: var HeapQueue[T]): T =
result = heap.data.pop
proc clear*[T](heap: var HeapQueue[T]) = heap.data.setLen 0
type
Future = ref object of RootObj
s: string
callme: proc()
var called = 0
proc consume(f: Future) =
inc called
proc newFuture(s: string): Future =
var r: Future
r = Future(s: s, callme: proc() =
consume r)
result = r
var q: HeapQueue[tuple[finishAt: int64, fut: Future]]
proc sleep(f: int64): Future =
q.push (finishAt: f, fut: newFuture("async-sleep"))
proc processTimers =
# Pop the timers in the order in which they will expire (smaller `finishAt`).
var count = q.len
let t = high(int64)
while count > 0 and t >= q[0].finishAt:
q.pop().fut.callme()
dec count
var futures: seq[Future]
proc main =
for i in 1..200:
futures.add sleep(56020904056300)
futures.add sleep(56020804337500)
#futures.add sleep(2.0)
#futures.add sleep(4.0)
processTimers()
#q.pop()[1].callme()
#q.pop()[1].callme()
futures.setLen 0
q.clear()
main()
GC_fullCollect()
echo called, " ", getOccupiedMem() < 160