mirror of
https://github.com/nim-lang/Nim.git
synced 2026-01-08 14:03:23 +00:00
fixes #5301
This commit is contained in:
@@ -100,7 +100,7 @@ path="$lib/pure"
|
|||||||
@if windows:
|
@if windows:
|
||||||
#gcc.path = r"$nim\dist\mingw\bin"
|
#gcc.path = r"$nim\dist\mingw\bin"
|
||||||
@if gcc:
|
@if gcc:
|
||||||
tlsEmulation:on
|
#tlsEmulation:on
|
||||||
@end
|
@end
|
||||||
@end
|
@end
|
||||||
|
|
||||||
@@ -110,7 +110,13 @@ path="$lib/pure"
|
|||||||
gcc.options.always = "-w"
|
gcc.options.always = "-w"
|
||||||
gcc.cpp.options.always = "-w -fpermissive"
|
gcc.cpp.options.always = "-w -fpermissive"
|
||||||
@else:
|
@else:
|
||||||
gcc.options.always = "-w"
|
@if cpu32:
|
||||||
|
gcc.options.always = "-w -m32"
|
||||||
|
gcc.options.linker = "-m32"
|
||||||
|
@else:
|
||||||
|
gcc.options.always = "-w -m64"
|
||||||
|
gcc.options.linker = "-m64"
|
||||||
|
@end
|
||||||
gcc.cpp.options.always = "-w -fpermissive"
|
gcc.cpp.options.always = "-w -fpermissive"
|
||||||
@end
|
@end
|
||||||
|
|
||||||
|
|||||||
@@ -54,7 +54,7 @@ type
|
|||||||
BaseChunk {.pure, inheritable.} = object
|
BaseChunk {.pure, inheritable.} = object
|
||||||
prevSize: int # size of previous chunk; for coalescing
|
prevSize: int # size of previous chunk; for coalescing
|
||||||
size: int # if < PageSize it is a small chunk
|
size: int # if < PageSize it is a small chunk
|
||||||
used: bool # later will be optimized into prevSize...
|
origSize: int # 0th bit == 1 if 'used'
|
||||||
|
|
||||||
SmallChunk = object of BaseChunk
|
SmallChunk = object of BaseChunk
|
||||||
next, prev: PSmallChunk # chunks of the same size
|
next, prev: PSmallChunk # chunks of the same size
|
||||||
@@ -65,11 +65,11 @@ type
|
|||||||
|
|
||||||
BigChunk = object of BaseChunk # not necessarily > PageSize!
|
BigChunk = object of BaseChunk # not necessarily > PageSize!
|
||||||
next, prev: PBigChunk # chunks of the same (or bigger) size
|
next, prev: PBigChunk # chunks of the same (or bigger) size
|
||||||
align: int
|
heapLink: PBigChunk # linked list of all chunks for bulk 'deallocPages'
|
||||||
data: AlignType # start of usable memory
|
data: AlignType # start of usable memory
|
||||||
|
|
||||||
template smallChunkOverhead(): expr = sizeof(SmallChunk)-sizeof(AlignType)
|
template smallChunkOverhead(): untyped = sizeof(SmallChunk)-sizeof(AlignType)
|
||||||
template bigChunkOverhead(): expr = sizeof(BigChunk)-sizeof(AlignType)
|
template bigChunkOverhead(): untyped = sizeof(BigChunk)-sizeof(AlignType)
|
||||||
|
|
||||||
# ------------- chunk table ---------------------------------------------------
|
# ------------- chunk table ---------------------------------------------------
|
||||||
# We use a PtrSet of chunk starts and a table[Page, chunksize] for chunk
|
# We use a PtrSet of chunk starts and a table[Page, chunksize] for chunk
|
||||||
@@ -98,6 +98,7 @@ type
|
|||||||
currMem, maxMem, freeMem: int # memory sizes (allocated from OS)
|
currMem, maxMem, freeMem: int # memory sizes (allocated from OS)
|
||||||
lastSize: int # needed for the case that OS gives us pages linearly
|
lastSize: int # needed for the case that OS gives us pages linearly
|
||||||
freeChunksList: PBigChunk # XXX make this a datastructure with O(1) access
|
freeChunksList: PBigChunk # XXX make this a datastructure with O(1) access
|
||||||
|
heapLink: PBigChunk # used to link every chunk for bulk 'deallocPages'
|
||||||
chunkStarts: IntSet
|
chunkStarts: IntSet
|
||||||
root, deleted, last, freeAvlNodes: PAvlNode
|
root, deleted, last, freeAvlNodes: PAvlNode
|
||||||
locked, blockChunkSizeIncrease: bool # if locked, we cannot free pages.
|
locked, blockChunkSizeIncrease: bool # if locked, we cannot free pages.
|
||||||
@@ -241,7 +242,7 @@ proc isSmallChunk(c: PChunk): bool {.inline.} =
|
|||||||
return c.size <= SmallChunkSize-smallChunkOverhead()
|
return c.size <= SmallChunkSize-smallChunkOverhead()
|
||||||
|
|
||||||
proc chunkUnused(c: PChunk): bool {.inline.} =
|
proc chunkUnused(c: PChunk): bool {.inline.} =
|
||||||
result = not c.used
|
result = (c.origSize and 1) == 0
|
||||||
|
|
||||||
iterator allObjects(m: var MemRegion): pointer {.inline.} =
|
iterator allObjects(m: var MemRegion): pointer {.inline.} =
|
||||||
m.locked = true
|
m.locked = true
|
||||||
@@ -310,12 +311,14 @@ proc requestOsChunks(a: var MemRegion, size: int): PBigChunk =
|
|||||||
|
|
||||||
incCurrMem(a, size)
|
incCurrMem(a, size)
|
||||||
inc(a.freeMem, size)
|
inc(a.freeMem, size)
|
||||||
|
result.heapLink = a.heapLink
|
||||||
|
result.origSize = size
|
||||||
|
a.heapLink = result
|
||||||
|
|
||||||
sysAssert((cast[ByteAddress](result) and PageMask) == 0, "requestOsChunks 1")
|
sysAssert((cast[ByteAddress](result) and PageMask) == 0, "requestOsChunks 1")
|
||||||
#zeroMem(result, size)
|
#zeroMem(result, size)
|
||||||
result.next = nil
|
result.next = nil
|
||||||
result.prev = nil
|
result.prev = nil
|
||||||
result.used = false
|
|
||||||
result.size = size
|
result.size = size
|
||||||
# update next.prevSize:
|
# update next.prevSize:
|
||||||
var nxt = cast[ByteAddress](result) +% size
|
var nxt = cast[ByteAddress](result) +% size
|
||||||
@@ -336,19 +339,21 @@ proc requestOsChunks(a: var MemRegion, size: int): PBigChunk =
|
|||||||
result.prevSize = 0 # unknown
|
result.prevSize = 0 # unknown
|
||||||
a.lastSize = size # for next request
|
a.lastSize = size # for next request
|
||||||
|
|
||||||
proc freeOsChunks(a: var MemRegion, p: pointer, size: int) =
|
when false:
|
||||||
# update next.prevSize:
|
# with the new linked list design this is not possible anymore:
|
||||||
var c = cast[PChunk](p)
|
proc freeOsChunks(a: var MemRegion, p: pointer, size: int) =
|
||||||
var nxt = cast[ByteAddress](p) +% c.size
|
# update next.prevSize:
|
||||||
sysAssert((nxt and PageMask) == 0, "freeOsChunks")
|
var c = cast[PChunk](p)
|
||||||
var next = cast[PChunk](nxt)
|
var nxt = cast[ByteAddress](p) +% c.size
|
||||||
if pageIndex(next) in a.chunkStarts:
|
sysAssert((nxt and PageMask) == 0, "freeOsChunks")
|
||||||
next.prevSize = 0 # XXX used
|
var next = cast[PChunk](nxt)
|
||||||
excl(a.chunkStarts, pageIndex(p))
|
if pageIndex(next) in a.chunkStarts:
|
||||||
osDeallocPages(p, size)
|
next.prevSize = 0 # XXX used
|
||||||
decCurrMem(a, size)
|
excl(a.chunkStarts, pageIndex(p))
|
||||||
dec(a.freeMem, size)
|
osDeallocPages(p, size)
|
||||||
#c_fprintf(stdout, "[Alloc] back to OS: %ld\n", size)
|
decCurrMem(a, size)
|
||||||
|
dec(a.freeMem, size)
|
||||||
|
#c_fprintf(stdout, "[Alloc] back to OS: %ld\n", size)
|
||||||
|
|
||||||
proc isAccessible(a: MemRegion, p: pointer): bool {.inline.} =
|
proc isAccessible(a: MemRegion, p: pointer): bool {.inline.} =
|
||||||
result = contains(a.chunkStarts, pageIndex(p))
|
result = contains(a.chunkStarts, pageIndex(p))
|
||||||
@@ -414,19 +419,20 @@ proc freeBigChunk(a: var MemRegion, c: PBigChunk) =
|
|||||||
excl(a.chunkStarts, pageIndex(c))
|
excl(a.chunkStarts, pageIndex(c))
|
||||||
c = cast[PBigChunk](le)
|
c = cast[PBigChunk](le)
|
||||||
|
|
||||||
if c.size < ChunkOsReturn or doNotUnmap or a.locked:
|
#if c.size < ChunkOsReturn or doNotUnmap or a.locked:
|
||||||
incl(a, a.chunkStarts, pageIndex(c))
|
incl(a, a.chunkStarts, pageIndex(c))
|
||||||
updatePrevSize(a, c, c.size)
|
updatePrevSize(a, c, c.size)
|
||||||
listAdd(a.freeChunksList, c)
|
listAdd(a.freeChunksList, c)
|
||||||
c.used = false
|
# set 'used' to false:
|
||||||
else:
|
c.origSize = c.origSize and not 1
|
||||||
freeOsChunks(a, c, c.size)
|
#else:
|
||||||
|
# freeOsChunks(a, c, c.size)
|
||||||
|
|
||||||
proc splitChunk(a: var MemRegion, c: PBigChunk, size: int) =
|
proc splitChunk(a: var MemRegion, c: PBigChunk, size: int) =
|
||||||
var rest = cast[PBigChunk](cast[ByteAddress](c) +% size)
|
var rest = cast[PBigChunk](cast[ByteAddress](c) +% size)
|
||||||
sysAssert(rest notin a.freeChunksList, "splitChunk")
|
sysAssert(rest notin a.freeChunksList, "splitChunk")
|
||||||
rest.size = c.size - size
|
rest.size = c.size - size
|
||||||
rest.used = false
|
rest.origSize = 0 # not used and size irrelevant
|
||||||
rest.next = nil
|
rest.next = nil
|
||||||
rest.prev = nil
|
rest.prev = nil
|
||||||
rest.prevSize = size
|
rest.prevSize = size
|
||||||
@@ -461,7 +467,8 @@ proc getBigChunk(a: var MemRegion, size: int): PBigChunk =
|
|||||||
if result.size > size:
|
if result.size > size:
|
||||||
splitChunk(a, result, size)
|
splitChunk(a, result, size)
|
||||||
result.prevSize = 0 # XXX why is this needed?
|
result.prevSize = 0 # XXX why is this needed?
|
||||||
result.used = true
|
# set 'used' to to true:
|
||||||
|
result.origSize = result.origSize or 1
|
||||||
incl(a, a.chunkStarts, pageIndex(result))
|
incl(a, a.chunkStarts, pageIndex(result))
|
||||||
dec(a.freeMem, size)
|
dec(a.freeMem, size)
|
||||||
|
|
||||||
@@ -704,18 +711,26 @@ proc realloc(allocator: var MemRegion, p: pointer, newsize: Natural): pointer =
|
|||||||
|
|
||||||
proc deallocOsPages(a: var MemRegion) =
|
proc deallocOsPages(a: var MemRegion) =
|
||||||
# we free every 'ordinarily' allocated page by iterating over the page bits:
|
# we free every 'ordinarily' allocated page by iterating over the page bits:
|
||||||
for p in elements(a.chunkStarts):
|
var it = a.heapLink
|
||||||
var page = cast[PChunk](p shl PageShift)
|
while it != nil:
|
||||||
when not doNotUnmap:
|
let next = it.heapLink
|
||||||
var size = if page.size < PageSize: PageSize else: page.size
|
sysAssert it.origSize >= PageSize, "origSize too small"
|
||||||
osDeallocPages(page, size)
|
# note:
|
||||||
else:
|
osDeallocPages(it, it.origSize and not 1)
|
||||||
# Linux on PowerPC for example frees MORE than asked if 'munmap'
|
it = next
|
||||||
# receives the start of an originally mmap'ed memory block. This is not
|
when false:
|
||||||
# too bad, but we must not access 'page.size' then as that could trigger
|
for p in elements(a.chunkStarts):
|
||||||
# a segfault. But we don't need to access 'page.size' here anyway,
|
var page = cast[PChunk](p shl PageShift)
|
||||||
# because calling munmap with PageSize suffices:
|
when not doNotUnmap:
|
||||||
osDeallocPages(page, PageSize)
|
var size = if page.size < PageSize: PageSize else: page.size
|
||||||
|
osDeallocPages(page, size)
|
||||||
|
else:
|
||||||
|
# Linux on PowerPC for example frees MORE than asked if 'munmap'
|
||||||
|
# receives the start of an originally mmap'ed memory block. This is not
|
||||||
|
# too bad, but we must not access 'page.size' then as that could trigger
|
||||||
|
# a segfault. But we don't need to access 'page.size' here anyway,
|
||||||
|
# because calling munmap with PageSize suffices:
|
||||||
|
osDeallocPages(page, PageSize)
|
||||||
# And then we free the pages that are in use for the page bits:
|
# And then we free the pages that are in use for the page bits:
|
||||||
llDeallocAll(a)
|
llDeallocAll(a)
|
||||||
|
|
||||||
|
|||||||
@@ -132,7 +132,7 @@ elif defined(windows):
|
|||||||
header: "<windows.h>", stdcall, importc: "VirtualAlloc".}
|
header: "<windows.h>", stdcall, importc: "VirtualAlloc".}
|
||||||
|
|
||||||
proc virtualFree(lpAddress: pointer, dwSize: int,
|
proc virtualFree(lpAddress: pointer, dwSize: int,
|
||||||
dwFreeType: int32) {.header: "<windows.h>", stdcall,
|
dwFreeType: int32): cint {.header: "<windows.h>", stdcall,
|
||||||
importc: "VirtualFree".}
|
importc: "VirtualFree".}
|
||||||
|
|
||||||
proc osAllocPages(size: int): pointer {.inline.} =
|
proc osAllocPages(size: int): pointer {.inline.} =
|
||||||
@@ -151,7 +151,10 @@ elif defined(windows):
|
|||||||
# Windows :-(. We have to live with MEM_DECOMMIT instead.
|
# Windows :-(. We have to live with MEM_DECOMMIT instead.
|
||||||
# Well that used to be the case but MEM_DECOMMIT fragments the address
|
# Well that used to be the case but MEM_DECOMMIT fragments the address
|
||||||
# space heavily, so we now treat Windows as a strange unmap target.
|
# space heavily, so we now treat Windows as a strange unmap target.
|
||||||
when reallyOsDealloc: virtualFree(p, 0, MEM_RELEASE)
|
when reallyOsDealloc:
|
||||||
|
if virtualFree(p, 0, MEM_RELEASE) == 0:
|
||||||
|
cprintf "yes, failing!"
|
||||||
|
quit 1
|
||||||
#VirtualFree(p, size, MEM_DECOMMIT)
|
#VirtualFree(p, size, MEM_DECOMMIT)
|
||||||
|
|
||||||
elif hostOS == "standalone":
|
elif hostOS == "standalone":
|
||||||
|
|||||||
Reference in New Issue
Block a user