mirror of
https://github.com/nim-lang/Nim.git
synced 2026-01-08 22:13:29 +00:00
Merge branch 'devel' of github.com:nim-lang/Nim into devel
This commit is contained in:
@@ -576,7 +576,7 @@ proc getTypeDescAux(m: BModule, typ: PType, check: var IntSet): Rope =
|
||||
result = getTypeDescAux(m, et, check) & star
|
||||
idTablePut(m.typeCache, t, result)
|
||||
of tyOpenArray, tyVarargs:
|
||||
result = getTypeDescAux(m, t.sons[0], check) & "*"
|
||||
result = getTypeDescWeak(m, t.sons[0], check) & "*"
|
||||
idTablePut(m.typeCache, t, result)
|
||||
of tyProc:
|
||||
result = getTypeName(t)
|
||||
@@ -654,7 +654,7 @@ proc getTypeDescAux(m: BModule, typ: PType, check: var IntSet): Rope =
|
||||
else:
|
||||
result = cppName & "<"
|
||||
for i in 1 .. typ.len-2:
|
||||
if i > 1: result.add(", ")
|
||||
if i > 1: result.add(" COMMA ")
|
||||
result.add(getTypeDescAux(m, typ.sons[i], check))
|
||||
result.add("> ")
|
||||
# always call for sideeffects:
|
||||
|
||||
@@ -1243,13 +1243,16 @@ proc genPatternCall(p: PProc; n: PNode; pat: string; typ: PType;
|
||||
|
||||
proc genInfixCall(p: PProc, n: PNode, r: var TCompRes) =
|
||||
# don't call '$' here for efficiency:
|
||||
let pat = n.sons[0].sym.loc.r.data
|
||||
internalAssert pat != nil
|
||||
if pat.contains({'#', '(', '@'}):
|
||||
var typ = skipTypes(n.sons[0].typ, abstractInst)
|
||||
assert(typ.kind == tyProc)
|
||||
genPatternCall(p, n, pat, typ, r)
|
||||
return
|
||||
let f = n[0].sym
|
||||
if f.loc.r == nil: f.loc.r = mangleName(f, p.target)
|
||||
if sfInfixCall in f.flags:
|
||||
let pat = n.sons[0].sym.loc.r.data
|
||||
internalAssert pat != nil
|
||||
if pat.contains({'#', '(', '@'}):
|
||||
var typ = skipTypes(n.sons[0].typ, abstractInst)
|
||||
assert(typ.kind == tyProc)
|
||||
genPatternCall(p, n, pat, typ, r)
|
||||
return
|
||||
gen(p, n.sons[1], r)
|
||||
if r.typ == etyBaseIndex:
|
||||
if r.address == nil:
|
||||
@@ -1266,7 +1269,7 @@ proc genInfixCall(p: PProc, n: PNode, r: var TCompRes) =
|
||||
genArgs(p, n, r, 2)
|
||||
|
||||
proc genCall(p: PProc, n: PNode, r: var TCompRes) =
|
||||
if thisParam(p, n.sons[0].typ) != nil:
|
||||
if n.sons[0].kind == nkSym and thisParam(p, n.sons[0].typ) != nil:
|
||||
genInfixCall(p, n, r)
|
||||
return
|
||||
if p.target == targetPHP:
|
||||
@@ -1645,8 +1648,11 @@ proc genMagic(p: PProc, n: PNode, r: var TCompRes) =
|
||||
of mChr, mArrToSeq: gen(p, n.sons[1], r) # nothing to do
|
||||
of mOrd: genOrd(p, n, r)
|
||||
of mLengthStr:
|
||||
unaryExpr(p, n, r, "", "($1 != null ? $1.length-1 : 0)" |
|
||||
"strlen($1)")
|
||||
if p.target == targetJS and n.sons[1].typ.skipTypes(abstractInst).kind == tyCString:
|
||||
unaryExpr(p, n, r, "", "($1 != null ? $1.length : 0)")
|
||||
else:
|
||||
unaryExpr(p, n, r, "", "($1 != null ? $1.length-1 : 0)" |
|
||||
"strlen($1)")
|
||||
of mXLenStr: unaryExpr(p, n, r, "", "$1.length-1" | "strlen($1)")
|
||||
of mLengthSeq, mLengthOpenArray, mLengthArray:
|
||||
unaryExpr(p, n, r, "", "($1 != null ? $1.length : 0)" |
|
||||
|
||||
@@ -802,7 +802,7 @@ proc doParamsAux(g: var TSrcGen, params: PNode) =
|
||||
gsemicolon(g, params, 1)
|
||||
put(g, tkParRi, ")")
|
||||
|
||||
if params.sons[0].kind != nkEmpty:
|
||||
if params.len > 0 and params.sons[0].kind != nkEmpty:
|
||||
putWithSpace(g, tkOpr, "->")
|
||||
gsub(g, params.sons[0])
|
||||
|
||||
|
||||
@@ -65,15 +65,6 @@ template semIdeForTemplateOrGeneric(c: PContext; n: PNode;
|
||||
# echo "passing to safeSemExpr: ", renderTree(n)
|
||||
discard safeSemExpr(c, n)
|
||||
|
||||
proc typeMismatch(n: PNode, formal, actual: PType) =
|
||||
if formal.kind != tyError and actual.kind != tyError:
|
||||
let named = typeToString(formal)
|
||||
let desc = typeToString(formal, preferDesc)
|
||||
let x = if named == desc: named else: named & " = " & desc
|
||||
localError(n.info, errGenerated, msgKindToString(errTypeMismatch) &
|
||||
typeToString(actual) & ") " &
|
||||
`%`(msgKindToString(errButExpectedX), [x]))
|
||||
|
||||
proc fitNode(c: PContext, formal: PType, arg: PNode): PNode =
|
||||
if arg.typ.isNil:
|
||||
localError(arg.info, errExprXHasNoType,
|
||||
|
||||
@@ -361,7 +361,19 @@ proc explicitGenericInstError(n: PNode): PNode =
|
||||
|
||||
proc explicitGenericSym(c: PContext, n: PNode, s: PSym): PNode =
|
||||
var m: TCandidate
|
||||
initCandidate(c, m, s, n)
|
||||
# binding has to stay 'nil' for this to work!
|
||||
initCandidate(c, m, s, nil)
|
||||
|
||||
for i in 1..sonsLen(n)-1:
|
||||
let formal = s.ast.sons[genericParamsPos].sons[i-1].typ
|
||||
let arg = n[i].typ
|
||||
let tm = typeRel(m, formal, arg, true)
|
||||
if tm in {isNone, isConvertible}:
|
||||
if formal.sonsLen > 0 and formal.sons[0].kind != tyNone:
|
||||
typeMismatch(n, formal.sons[0], arg)
|
||||
else:
|
||||
typeMismatch(n, formal, arg)
|
||||
break
|
||||
var newInst = generateInstance(c, s, m.bindings, n.info)
|
||||
markUsed(n.info, s)
|
||||
styleCheckUse(n.info, s)
|
||||
|
||||
@@ -74,8 +74,12 @@ proc semSymGenericInstantiation(c: PContext, n: PNode, s: PSym): PNode =
|
||||
|
||||
proc inlineConst(n: PNode, s: PSym): PNode {.inline.} =
|
||||
result = copyTree(s.ast)
|
||||
result.typ = s.typ
|
||||
result.info = n.info
|
||||
if result.isNil:
|
||||
localError(n.info, "constant of type '" & typeToString(s.typ) & "' has no value")
|
||||
result = newSymNode(s)
|
||||
else:
|
||||
result.typ = s.typ
|
||||
result.info = n.info
|
||||
|
||||
type
|
||||
TConvStatus = enum
|
||||
@@ -83,8 +87,9 @@ type
|
||||
convNotNeedeed,
|
||||
convNotLegal
|
||||
|
||||
proc checkConversionBetweenObjects(castDest, src: PType): TConvStatus =
|
||||
return if inheritanceDiff(castDest, src) == high(int):
|
||||
proc checkConversionBetweenObjects(castDest, src: PType; pointers: int): TConvStatus =
|
||||
let diff = inheritanceDiff(castDest, src)
|
||||
return if diff == high(int) or (pointers > 1 and diff != 0):
|
||||
convNotLegal
|
||||
else:
|
||||
convOK
|
||||
@@ -101,13 +106,15 @@ proc checkConvertible(c: PContext, castDest, src: PType): TConvStatus =
|
||||
return
|
||||
var d = skipTypes(castDest, abstractVar)
|
||||
var s = skipTypes(src, abstractVar-{tyTypeDesc})
|
||||
var pointers = 0
|
||||
while (d != nil) and (d.kind in {tyPtr, tyRef}) and (d.kind == s.kind):
|
||||
d = d.lastSon
|
||||
s = s.lastSon
|
||||
inc pointers
|
||||
if d == nil:
|
||||
result = convNotLegal
|
||||
elif d.kind == tyObject and s.kind == tyObject:
|
||||
result = checkConversionBetweenObjects(d, s)
|
||||
result = checkConversionBetweenObjects(d, s, pointers)
|
||||
elif (skipTypes(castDest, abstractVarRange).kind in IntegralTypes) and
|
||||
(skipTypes(src, abstractVarRange-{tyTypeDesc}).kind in IntegralTypes):
|
||||
# accept conversion between integral types
|
||||
|
||||
@@ -111,9 +111,9 @@ proc removeDefaultParamValues(n: PNode) =
|
||||
# not possible... XXX We don't solve this issue here.
|
||||
a.sons[L-1] = ast.emptyNode
|
||||
|
||||
proc freshGenSyms(n: PNode, owner: PSym, symMap: var TIdTable) =
|
||||
proc freshGenSyms(n: PNode, owner, orig: PSym, symMap: var TIdTable) =
|
||||
# we need to create a fresh set of gensym'ed symbols:
|
||||
if n.kind == nkSym and sfGenSym in n.sym.flags:
|
||||
if n.kind == nkSym and sfGenSym in n.sym.flags and n.sym.owner == orig:
|
||||
let s = n.sym
|
||||
var x = PSym(idTableGet(symMap, s))
|
||||
if x == nil:
|
||||
@@ -122,7 +122,7 @@ proc freshGenSyms(n: PNode, owner: PSym, symMap: var TIdTable) =
|
||||
idTablePut(symMap, s, x)
|
||||
n.sym = x
|
||||
else:
|
||||
for i in 0 .. <safeLen(n): freshGenSyms(n.sons[i], owner, symMap)
|
||||
for i in 0 .. <safeLen(n): freshGenSyms(n.sons[i], owner, orig, symMap)
|
||||
|
||||
proc addParamOrResult(c: PContext, param: PSym, kind: TSymKind)
|
||||
|
||||
@@ -137,7 +137,7 @@ proc addProcDecls(c: PContext, fn: PSym) =
|
||||
|
||||
maybeAddResult(c, fn, fn.ast)
|
||||
|
||||
proc instantiateBody(c: PContext, n, params: PNode, result: PSym) =
|
||||
proc instantiateBody(c: PContext, n, params: PNode, result, orig: PSym) =
|
||||
if n.sons[bodyPos].kind != nkEmpty:
|
||||
inc c.inGenericInst
|
||||
# add it here, so that recursive generic procs are possible:
|
||||
@@ -149,7 +149,7 @@ proc instantiateBody(c: PContext, n, params: PNode, result: PSym) =
|
||||
let param = params[i].sym
|
||||
if sfGenSym in param.flags:
|
||||
idTablePut(symMap, params[i].sym, result.typ.n[param.position+1].sym)
|
||||
freshGenSyms(b, result, symMap)
|
||||
freshGenSyms(b, result, orig, symMap)
|
||||
b = semProcBody(c, b)
|
||||
b = hloBody(c, b)
|
||||
n.sons[bodyPos] = transformBody(c.module, b, result)
|
||||
@@ -165,7 +165,7 @@ proc fixupInstantiatedSymbols(c: PContext, s: PSym) =
|
||||
openScope(c)
|
||||
var n = oldPrc.ast
|
||||
n.sons[bodyPos] = copyTree(s.getBody)
|
||||
instantiateBody(c, n, nil, oldPrc)
|
||||
instantiateBody(c, n, nil, oldPrc, s)
|
||||
closeScope(c)
|
||||
popInfoContext()
|
||||
|
||||
@@ -312,7 +312,7 @@ proc generateInstance(c: PContext, fn: PSym, pt: TIdTable,
|
||||
pragma(c, result, n.sons[pragmasPos], allRoutinePragmas)
|
||||
if isNil(n.sons[bodyPos]):
|
||||
n.sons[bodyPos] = copyTree(fn.getBody)
|
||||
instantiateBody(c, n, fn.typ.n, result)
|
||||
instantiateBody(c, n, fn.typ.n, result, fn)
|
||||
sideEffectsCheck(c, result)
|
||||
paramsTypeCheck(c, result.typ)
|
||||
else:
|
||||
|
||||
@@ -778,11 +778,16 @@ proc typeSectionFinalPass(c: PContext, n: PNode) =
|
||||
var s = a.sons[0].sym
|
||||
# compute the type's size and check for illegal recursions:
|
||||
if a.sons[1].kind == nkEmpty:
|
||||
if a.sons[2].kind in {nkSym, nkIdent, nkAccQuoted}:
|
||||
var x = a[2]
|
||||
while x.kind in {nkStmtList, nkStmtListExpr} and x.len > 0:
|
||||
x = x.lastSon
|
||||
if x.kind notin {nkObjectTy, nkDistinctTy, nkEnumTy, nkEmpty}:
|
||||
# type aliases are hard:
|
||||
#MessageOut('for type ' + typeToString(s.typ));
|
||||
var t = semTypeNode(c, a.sons[2], nil)
|
||||
if t.kind in {tyObject, tyEnum}:
|
||||
var t = semTypeNode(c, x, nil)
|
||||
assert t != nil
|
||||
if t.kind in {tyObject, tyEnum, tyDistinct}:
|
||||
assert s.typ != nil
|
||||
assignType(s.typ, t)
|
||||
s.typ.id = t.id # same id
|
||||
checkConstructedType(s.info, s.typ)
|
||||
|
||||
@@ -513,7 +513,7 @@ proc typeRangeRel(f, a: PType): TTypeRelation {.noinline.} =
|
||||
proc matchUserTypeClass*(c: PContext, m: var TCandidate,
|
||||
ff, a: PType): TTypeRelation =
|
||||
var body = ff.skipTypes({tyUserTypeClassInst})
|
||||
if c.inTypeClass > 20:
|
||||
if c.inTypeClass > 4:
|
||||
localError(body.n[3].info, $body.n[3] & " too nested for type matching")
|
||||
return isNone
|
||||
|
||||
@@ -598,6 +598,10 @@ proc tryResolvingStaticExpr(c: var TCandidate, n: PNode): PNode =
|
||||
let instantiated = replaceTypesInBody(c.c, c.bindings, n, nil)
|
||||
result = c.c.semExpr(c.c, instantiated)
|
||||
|
||||
template subtypeCheck() =
|
||||
if result <= isSubrange and f.lastSon.skipTypes(abstractInst).kind in {tyRef, tyPtr, tyVar}:
|
||||
result = isNone
|
||||
|
||||
proc typeRel(c: var TCandidate, f, aOrig: PType, doBind = true): TTypeRelation =
|
||||
# typeRel can be used to establish various relationships between types:
|
||||
#
|
||||
@@ -737,6 +741,7 @@ proc typeRel(c: var TCandidate, f, aOrig: PType, doBind = true): TTypeRelation =
|
||||
of tyVar:
|
||||
if aOrig.kind == tyVar: result = typeRel(c, f.base, aOrig.base)
|
||||
else: result = typeRel(c, f.base, aOrig)
|
||||
subtypeCheck()
|
||||
of tyArray, tyArrayConstr:
|
||||
# tyArrayConstr cannot happen really, but
|
||||
# we wanna be safe here
|
||||
@@ -867,6 +872,7 @@ proc typeRel(c: var TCandidate, f, aOrig: PType, doBind = true): TTypeRelation =
|
||||
for i in 0..f.len-2:
|
||||
if typeRel(c, f.sons[i], a.sons[i]) == isNone: return isNone
|
||||
result = typeRel(c, f.lastSon, a.lastSon)
|
||||
subtypeCheck()
|
||||
if result <= isConvertible: result = isNone
|
||||
elif tfNotNil in f.flags and tfNotNil notin a.flags:
|
||||
result = isNilConversion
|
||||
|
||||
@@ -414,8 +414,8 @@ proc transformConv(c: PTransf, n: PNode): PTransNode =
|
||||
result = newTransNode(nkChckRange, n, 3)
|
||||
dest = skipTypes(n.typ, abstractVar)
|
||||
result[0] = transform(c, n.sons[1])
|
||||
result[1] = newIntTypeNode(nkIntLit, firstOrd(dest), source).PTransNode
|
||||
result[2] = newIntTypeNode(nkIntLit, lastOrd(dest), source).PTransNode
|
||||
result[1] = newIntTypeNode(nkIntLit, firstOrd(dest), dest).PTransNode
|
||||
result[2] = newIntTypeNode(nkIntLit, lastOrd(dest), dest).PTransNode
|
||||
of tyFloat..tyFloat128:
|
||||
# XXX int64 -> float conversion?
|
||||
if skipTypes(n.typ, abstractVar).kind == tyRange:
|
||||
|
||||
@@ -1505,3 +1505,12 @@ proc skipHiddenSubConv*(n: PNode): PNode =
|
||||
result.typ = dest
|
||||
else:
|
||||
result = n
|
||||
|
||||
proc typeMismatch*(n: PNode, formal, actual: PType) =
|
||||
if formal.kind != tyError and actual.kind != tyError:
|
||||
let named = typeToString(formal)
|
||||
let desc = typeToString(formal, preferDesc)
|
||||
let x = if named == desc: named else: named & " = " & desc
|
||||
localError(n.info, errGenerated, msgKindToString(errTypeMismatch) &
|
||||
typeToString(actual) & ") " &
|
||||
`%`(msgKindToString(errButExpectedX), [x]))
|
||||
|
||||
@@ -243,7 +243,9 @@ Define Effect
|
||||
``useFork`` Makes ``osproc`` use ``fork`` instead of ``posix_spawn``.
|
||||
``useNimRtl`` Compile and link against ``nimrtl.dll``.
|
||||
``useMalloc`` Makes Nim use C's `malloc`:idx: instead of Nim's
|
||||
own memory manager. This only works with ``gc:none``.
|
||||
own memory manager, ableit prefixing each allocation with
|
||||
its size to support clearing memory on reallocation.
|
||||
This only works with ``gc:none``.
|
||||
``useRealtimeGC`` Enables support of Nim's GC for *soft* realtime
|
||||
systems. See the documentation of the `gc <gc.html>`_
|
||||
for further information.
|
||||
|
||||
@@ -222,6 +222,8 @@ __clang__
|
||||
|
||||
/* ----------------------------------------------------------------------- */
|
||||
|
||||
#define COMMA ,
|
||||
|
||||
#include <limits.h>
|
||||
#include <stddef.h>
|
||||
|
||||
|
||||
@@ -2627,3 +2627,17 @@ proc utimes*(path: cstring, times: ptr array [2, Timeval]): int {.
|
||||
## Returns zero on success.
|
||||
##
|
||||
## For more information read http://www.unix.com/man-page/posix/3/utimes/.
|
||||
|
||||
proc handle_signal(sig: cint, handler: proc (a: cint) {.noconv.}) {.importc: "signal", header: "<signal.h>".}
|
||||
|
||||
template onSignal*(signals: varargs[cint], body: untyped): stmt =
|
||||
## Setup code to be executed when Unix signals are received. Example:
|
||||
## from posix import SIGINT, SIGTERM
|
||||
## onSignal(SIGINT, SIGTERM):
|
||||
## echo "bye"
|
||||
|
||||
for s in signals:
|
||||
handle_signal(s,
|
||||
proc (sig: cint) {.noconv.} =
|
||||
body
|
||||
)
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
|
||||
include "system/inclrtl"
|
||||
|
||||
import os, oids, tables, strutils, macros, times
|
||||
import os, oids, tables, strutils, macros, times, heapqueue
|
||||
|
||||
import nativesockets, net
|
||||
|
||||
@@ -354,21 +354,27 @@ proc `or`*[T, Y](fut1: Future[T], fut2: Future[Y]): Future[void] =
|
||||
|
||||
type
|
||||
PDispatcherBase = ref object of RootRef
|
||||
timers: seq[tuple[finishAt: float, fut: Future[void]]]
|
||||
timers: HeapQueue[tuple[finishAt: float, fut: Future[void]]]
|
||||
|
||||
proc processTimers(p: PDispatcherBase) =
|
||||
var oldTimers = p.timers
|
||||
p.timers = @[]
|
||||
for t in oldTimers:
|
||||
if epochTime() >= t.finishAt:
|
||||
t.fut.complete()
|
||||
else:
|
||||
p.timers.add(t)
|
||||
proc processTimers(p: PDispatcherBase) {.inline.} =
|
||||
while p.timers.len > 0 and epochTime() >= p.timers[0].finishAt:
|
||||
p.timers.pop().fut.complete()
|
||||
|
||||
proc adjustedTimeout(p: PDispatcherBase, timeout: int): int {.inline.} =
|
||||
# If dispatcher has active timers this proc returns the timeout
|
||||
# of the nearest timer. Returns `timeout` otherwise.
|
||||
result = timeout
|
||||
if p.timers.len > 0:
|
||||
let timerTimeout = p.timers[0].finishAt
|
||||
let curTime = epochTime()
|
||||
if timeout == -1 or (curTime + (timeout / 1000)) > timerTimeout:
|
||||
result = int((timerTimeout - curTime) * 1000)
|
||||
if result < 0: result = 0
|
||||
|
||||
when defined(windows) or defined(nimdoc):
|
||||
import winlean, sets, hashes
|
||||
type
|
||||
CompletionKey = Dword
|
||||
CompletionKey = ULONG_PTR
|
||||
|
||||
CompletionData* = object
|
||||
fd*: AsyncFD # TODO: Rename this.
|
||||
@@ -396,7 +402,7 @@ when defined(windows) or defined(nimdoc):
|
||||
new result
|
||||
result.ioPort = createIoCompletionPort(INVALID_HANDLE_VALUE, 0, 0, 1)
|
||||
result.handles = initSet[AsyncFD]()
|
||||
result.timers = @[]
|
||||
result.timers.newHeapQueue()
|
||||
|
||||
var gDisp{.threadvar.}: PDispatcher ## Global dispatcher
|
||||
proc getGlobalDispatcher*(): PDispatcher =
|
||||
@@ -427,11 +433,13 @@ when defined(windows) or defined(nimdoc):
|
||||
raise newException(ValueError,
|
||||
"No handles or timers registered in dispatcher.")
|
||||
|
||||
let llTimeout =
|
||||
if timeout == -1: winlean.INFINITE
|
||||
else: timeout.int32
|
||||
let at = p.adjustedTimeout(timeout)
|
||||
var llTimeout =
|
||||
if at == -1: winlean.INFINITE
|
||||
else: at.int32
|
||||
|
||||
var lpNumberOfBytesTransferred: Dword
|
||||
var lpCompletionKey: ULONG
|
||||
var lpCompletionKey: ULONG_PTR
|
||||
var customOverlapped: PCustomOverlapped
|
||||
let res = getQueuedCompletionStatus(p.ioPort,
|
||||
addr lpNumberOfBytesTransferred, addr lpCompletionKey,
|
||||
@@ -956,7 +964,7 @@ else:
|
||||
proc newDispatcher*(): PDispatcher =
|
||||
new result
|
||||
result.selector = newSelector()
|
||||
result.timers = @[]
|
||||
result.timers.newHeapQueue()
|
||||
|
||||
var gDisp{.threadvar.}: PDispatcher ## Global dispatcher
|
||||
proc getGlobalDispatcher*(): PDispatcher =
|
||||
@@ -1014,7 +1022,7 @@ else:
|
||||
|
||||
proc poll*(timeout = 500) =
|
||||
let p = getGlobalDispatcher()
|
||||
for info in p.selector.select(timeout):
|
||||
for info in p.selector.select(p.adjustedTimeout(timeout)):
|
||||
let data = PData(info.key.data)
|
||||
assert data.fd == info.key.fd.AsyncFD
|
||||
#echo("In poll ", data.fd.cint)
|
||||
@@ -1215,7 +1223,7 @@ proc sleepAsync*(ms: int): Future[void] =
|
||||
## ``ms`` milliseconds.
|
||||
var retFuture = newFuture[void]("sleepAsync")
|
||||
let p = getGlobalDispatcher()
|
||||
p.timers.add((epochTime() + (ms / 1000), retFuture))
|
||||
p.timers.push((epochTime() + (ms / 1000), retFuture))
|
||||
return retFuture
|
||||
|
||||
proc accept*(socket: AsyncFD,
|
||||
|
||||
@@ -162,7 +162,7 @@ proc read*(f: AsyncFile, size: int): Future[string] =
|
||||
# Request completed immediately.
|
||||
var bytesRead: DWord
|
||||
let overlappedRes = getOverlappedResult(f.fd.Handle,
|
||||
cast[POverlapped](ol)[], bytesRead, false.WinBool)
|
||||
cast[POverlapped](ol), bytesRead, false.WinBool)
|
||||
if not overlappedRes.bool:
|
||||
let err = osLastError()
|
||||
if err.int32 == ERROR_HANDLE_EOF:
|
||||
@@ -282,7 +282,7 @@ proc write*(f: AsyncFile, data: string): Future[void] =
|
||||
# Request completed immediately.
|
||||
var bytesWritten: DWord
|
||||
let overlappedRes = getOverlappedResult(f.fd.Handle,
|
||||
cast[POverlapped](ol)[], bytesWritten, false.WinBool)
|
||||
cast[POverlapped](ol), bytesWritten, false.WinBool)
|
||||
if not overlappedRes.bool:
|
||||
retFuture.fail(newException(OSError, osErrorMsg(osLastError())))
|
||||
else:
|
||||
|
||||
107
lib/pure/collections/heapqueue.nim
Normal file
107
lib/pure/collections/heapqueue.nim
Normal file
@@ -0,0 +1,107 @@
|
||||
##[ Heap queue algorithm (a.k.a. priority queue). Ported from Python heapq.
|
||||
|
||||
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
|
||||
all k, counting elements from 0. For the sake of comparison,
|
||||
non-existing elements are considered to be infinite. The interesting
|
||||
property of a heap is that a[0] is always its smallest element.
|
||||
|
||||
]##
|
||||
|
||||
type HeapQueue*[T] = distinct seq[T]
|
||||
|
||||
proc newHeapQueue*[T](): HeapQueue[T] {.inline.} = HeapQueue[T](newSeq[T]())
|
||||
proc newHeapQueue*[T](h: var HeapQueue[T]) {.inline.} = h = HeapQueue[T](newSeq[T]())
|
||||
|
||||
proc len*[T](h: HeapQueue[T]): int {.inline.} = seq[T](h).len
|
||||
proc `[]`*[T](h: HeapQueue[T], i: int): T {.inline.} = seq[T](h)[i]
|
||||
proc `[]=`[T](h: var HeapQueue[T], i: int, v: T) {.inline.} = seq[T](h)[i] = v
|
||||
proc add[T](h: var HeapQueue[T], v: T) {.inline.} = seq[T](h).add(v)
|
||||
|
||||
proc heapCmp[T](x, y: T): bool {.inline.} =
|
||||
return (x < y)
|
||||
|
||||
# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos
|
||||
# is the index of a leaf with a possibly out-of-order value. Restore the
|
||||
# heap invariant.
|
||||
proc siftdown[T](heap: var HeapQueue[T], startpos, p: int) =
|
||||
var pos = p
|
||||
var newitem = heap[pos]
|
||||
# Follow the path to the root, moving parents down until finding a place
|
||||
# newitem fits.
|
||||
while pos > startpos:
|
||||
let parentpos = (pos - 1) shr 1
|
||||
let parent = heap[parentpos]
|
||||
if heapCmp(newitem, parent):
|
||||
heap[pos] = parent
|
||||
pos = parentpos
|
||||
else:
|
||||
break
|
||||
heap[pos] = newitem
|
||||
|
||||
proc siftup[T](heap: var HeapQueue[T], p: int) =
|
||||
let endpos = len(heap)
|
||||
var pos = p
|
||||
let startpos = pos
|
||||
let newitem = heap[pos]
|
||||
# Bubble up the smaller child until hitting a leaf.
|
||||
var childpos = 2*pos + 1 # leftmost child position
|
||||
while childpos < endpos:
|
||||
# Set childpos to index of smaller child.
|
||||
let rightpos = childpos + 1
|
||||
if rightpos < endpos and not heapCmp(heap[childpos], heap[rightpos]):
|
||||
childpos = rightpos
|
||||
# Move the smaller child up.
|
||||
heap[pos] = heap[childpos]
|
||||
pos = childpos
|
||||
childpos = 2*pos + 1
|
||||
# The leaf at pos is empty now. Put newitem there, and bubble it up
|
||||
# to its final resting place (by sifting its parents down).
|
||||
heap[pos] = newitem
|
||||
siftdown(heap, startpos, pos)
|
||||
|
||||
proc push*[T](heap: var HeapQueue[T], item: T) =
|
||||
## Push item onto heap, maintaining the heap invariant.
|
||||
(seq[T](heap)).add(item)
|
||||
siftdown(heap, 0, len(heap)-1)
|
||||
|
||||
proc pop*[T](heap: var HeapQueue[T]): T =
|
||||
## Pop the smallest item off the heap, maintaining the heap invariant.
|
||||
let lastelt = seq[T](heap).pop()
|
||||
if heap.len > 0:
|
||||
result = heap[0]
|
||||
heap[0] = lastelt
|
||||
siftup(heap, 0)
|
||||
else:
|
||||
result = lastelt
|
||||
|
||||
proc replace*[T](heap: var HeapQueue[T], item: T): T =
|
||||
## Pop and return the current smallest value, and add the new item.
|
||||
## This is more efficient than pop() followed by push(), and can be
|
||||
## more appropriate when using a fixed-size heap. Note that the value
|
||||
## returned may be larger than item! That constrains reasonable uses of
|
||||
## this routine unless written as part of a conditional replacement:
|
||||
|
||||
## if item > heap[0]:
|
||||
## item = replace(heap, item)
|
||||
result = heap[0]
|
||||
heap[0] = item
|
||||
siftup(heap, 0)
|
||||
|
||||
proc pushpop*[T](heap: var HeapQueue[T], item: T): T =
|
||||
## Fast version of a push followed by a pop.
|
||||
if heap.len > 0 and heapCmp(heap[0], item):
|
||||
swap(item, heap[0])
|
||||
siftup(heap, 0)
|
||||
return item
|
||||
|
||||
when isMainModule:
|
||||
# Simple sanity test
|
||||
var heap = newHeapQueue[int]()
|
||||
let data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0]
|
||||
for item in data:
|
||||
push(heap, item)
|
||||
doAssert(heap[0] == 0)
|
||||
var sort = newSeq[int]()
|
||||
while heap.len > 0:
|
||||
sort.add(pop(heap))
|
||||
doAssert(sort == @[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
|
||||
@@ -663,6 +663,27 @@ proc sort*[A, B](t: OrderedTableRef[A, B],
|
||||
## contrast to the `sort` for count tables).
|
||||
t[].sort(cmp)
|
||||
|
||||
proc del*[A, B](t: var OrderedTable[A, B], key: A) =
|
||||
## deletes `key` from ordered hash table `t`. O(n) comlexity.
|
||||
var prev = -1
|
||||
let hc = hash(key)
|
||||
forAllOrderedPairs:
|
||||
if t.data[h].hcode == hc:
|
||||
if t.first == h:
|
||||
t.first = t.data[h].next
|
||||
else:
|
||||
t.data[prev].next = t.data[h].next
|
||||
var zeroValue : type(t.data[h])
|
||||
t.data[h] = zeroValue
|
||||
dec t.counter
|
||||
break
|
||||
else:
|
||||
prev = h
|
||||
|
||||
proc del*[A, B](t: var OrderedTableRef[A, B], key: A) =
|
||||
## deletes `key` from ordered hash table `t`. O(n) comlexity.
|
||||
t[].del(key)
|
||||
|
||||
# ------------------------------ count tables -------------------------------
|
||||
|
||||
type
|
||||
@@ -984,6 +1005,26 @@ when isMainModule:
|
||||
s3[p1] = 30_000
|
||||
s3[p2] = 45_000
|
||||
|
||||
block: # Ordered table should preserve order after deletion
|
||||
var
|
||||
s4 = initOrderedTable[int, int]()
|
||||
s4[1] = 1
|
||||
s4[2] = 2
|
||||
s4[3] = 3
|
||||
|
||||
var prev = 0
|
||||
for i in s4.values:
|
||||
doAssert(prev < i)
|
||||
prev = i
|
||||
|
||||
s4.del(2)
|
||||
doAssert(2 notin s4)
|
||||
doAssert(s4.len == 2)
|
||||
prev = 0
|
||||
for i in s4.values:
|
||||
doAssert(prev < i)
|
||||
prev = i
|
||||
|
||||
var
|
||||
t1 = initCountTable[string]()
|
||||
t2 = initCountTable[string]()
|
||||
|
||||
@@ -54,6 +54,7 @@ type
|
||||
lvlAll, ## all levels active
|
||||
lvlDebug, ## debug level (and any above) active
|
||||
lvlInfo, ## info level (and any above) active
|
||||
lvlNotice, ## info notice (and any above) active
|
||||
lvlWarn, ## warn level (and any above) active
|
||||
lvlError, ## error level (and any above) active
|
||||
lvlFatal, ## fatal level (and any above) active
|
||||
@@ -61,7 +62,7 @@ type
|
||||
|
||||
const
|
||||
LevelNames*: array [Level, string] = [
|
||||
"DEBUG", "DEBUG", "INFO", "WARN", "ERROR", "FATAL", "NONE"
|
||||
"DEBUG", "DEBUG", "INFO", "NOTICE", "WARN", "ERROR", "FATAL", "NONE"
|
||||
]
|
||||
|
||||
defaultFmtStr* = "$levelname " ## default format string
|
||||
@@ -258,22 +259,47 @@ template log*(level: Level, args: varargs[string, `$`]) =
|
||||
|
||||
template debug*(args: varargs[string, `$`]) =
|
||||
## Logs a debug message to all registered handlers.
|
||||
##
|
||||
## Messages that are useful to the application developer only and are usually
|
||||
## turned off in release.
|
||||
log(lvlDebug, args)
|
||||
|
||||
template info*(args: varargs[string, `$`]) =
|
||||
## Logs an info message to all registered handlers.
|
||||
##
|
||||
## Messages that are generated during the normal operation of an application
|
||||
## and are of no particular importance. Useful to aggregate for potential
|
||||
## later analysis.
|
||||
log(lvlInfo, args)
|
||||
|
||||
template notice*(args: varargs[string, `$`]) =
|
||||
## Logs an notice message to all registered handlers.
|
||||
##
|
||||
## Semantically very similar to `info`, but meant to be messages you want to
|
||||
## be actively notified about (depending on your application).
|
||||
## These could be, for example, grouped by hour and mailed out.
|
||||
log(lvlNotice, args)
|
||||
|
||||
template warn*(args: varargs[string, `$`]) =
|
||||
## Logs a warning message to all registered handlers.
|
||||
##
|
||||
## A non-error message that may indicate a potential problem rising or
|
||||
## impacted performance.
|
||||
log(lvlWarn, args)
|
||||
|
||||
template error*(args: varargs[string, `$`]) =
|
||||
## Logs an error message to all registered handlers.
|
||||
##
|
||||
## A application-level error condition. For example, some user input generated
|
||||
## an exception. The application will continue to run, but functionality or
|
||||
## data was impacted, possibly visible to users.
|
||||
log(lvlError, args)
|
||||
|
||||
template fatal*(args: varargs[string, `$`]) =
|
||||
## Logs a fatal error message to all registered handlers.
|
||||
##
|
||||
## A application-level fatal condition. FATAL usually means that the application
|
||||
## cannot go on and will exit (but this logging event will not do that for you).
|
||||
log(lvlFatal, args)
|
||||
|
||||
proc addHandler*(handler: Logger) =
|
||||
|
||||
@@ -20,9 +20,9 @@
|
||||
## var msg = createMessage("Hello from Nim's SMTP",
|
||||
## "Hello!.\n Is this awesome or what?",
|
||||
## @["foo@gmail.com"])
|
||||
## var smtp = connect("smtp.gmail.com", 465, true, true)
|
||||
## smtp.auth("username", "password")
|
||||
## smtp.sendmail("username@gmail.com", @["foo@gmail.com"], $msg)
|
||||
## var smtpConn = connect("smtp.gmail.com", Port 465, true, true)
|
||||
## smtpConn.auth("username", "password")
|
||||
## smtpConn.sendmail("username@gmail.com", @["foo@gmail.com"], $msg)
|
||||
##
|
||||
##
|
||||
## For SSL support this module relies on OpenSSL. If you want to
|
||||
@@ -31,6 +31,8 @@
|
||||
import net, strutils, strtabs, base64, os
|
||||
import asyncnet, asyncdispatch
|
||||
|
||||
export Port
|
||||
|
||||
type
|
||||
Smtp* = object
|
||||
sock: Socket
|
||||
@@ -258,8 +260,8 @@ when not defined(testing) and isMainModule:
|
||||
# "Hello, my name is dom96.\n What\'s yours?", @["dominik@localhost"])
|
||||
#echo(msg)
|
||||
|
||||
#var smtp = connect("localhost", 25, False, True)
|
||||
#smtp.sendmail("root@localhost", @["dominik@localhost"], $msg)
|
||||
#var smtpConn = connect("localhost", Port 25, false, true)
|
||||
#smtpConn.sendmail("root@localhost", @["dominik@localhost"], $msg)
|
||||
|
||||
#echo(decode("a17sm3701420wbe.12"))
|
||||
proc main() {.async.} =
|
||||
|
||||
@@ -1291,7 +1291,7 @@ const
|
||||
when hasThreadSupport and defined(tcc) and not compileOption("tlsEmulation"):
|
||||
# tcc doesn't support TLS
|
||||
{.error: "``--tlsEmulation:on`` must be used when using threads with tcc backend".}
|
||||
|
||||
|
||||
when defined(boehmgc):
|
||||
when defined(windows):
|
||||
const boehmLib = "boehmgc.dll"
|
||||
@@ -2565,8 +2565,9 @@ when not defined(JS): #and not defined(nimscript):
|
||||
{.push stack_trace: off, profiler:off.}
|
||||
|
||||
when not defined(nimscript) and not defined(nogc):
|
||||
proc initGC()
|
||||
when not defined(boehmgc) and not defined(useMalloc) and not defined(gogc):
|
||||
when not defined(gcStack):
|
||||
proc initGC()
|
||||
when not defined(boehmgc) and not defined(useMalloc) and not defined(gogc) and not defined(gcStack):
|
||||
proc initAllocator() {.inline.}
|
||||
|
||||
proc initStackBottom() {.inline, compilerproc.} =
|
||||
|
||||
@@ -36,33 +36,34 @@ type
|
||||
BaseChunk = object
|
||||
next: Chunk
|
||||
size: int
|
||||
head, last: ptr ObjHeader # first and last object in chunk that
|
||||
head, tail: ptr ObjHeader # first and last object in chunk that
|
||||
# has a finalizer attached to it
|
||||
|
||||
type
|
||||
StackPtr = object
|
||||
chunk: pointer
|
||||
bump: pointer
|
||||
remaining: int
|
||||
current: Chunk
|
||||
|
||||
MemRegion* = object
|
||||
remaining: int
|
||||
chunk: pointer
|
||||
head, last: Chunk
|
||||
bump: pointer
|
||||
head, tail: Chunk
|
||||
nextChunkSize, totalSize: int
|
||||
hole: ptr Hole # we support individual freeing
|
||||
lock: SysLock
|
||||
when hasThreadSupport:
|
||||
lock: SysLock
|
||||
|
||||
var
|
||||
region {.threadVar.}: MemRegion
|
||||
tlRegion {.threadVar.}: MemRegion
|
||||
|
||||
template withRegion*(r: MemRegion; body: untyped) =
|
||||
let oldRegion = region
|
||||
region = r
|
||||
let oldRegion = tlRegion
|
||||
tlRegion = r
|
||||
try:
|
||||
body
|
||||
finally:
|
||||
region = oldRegion
|
||||
tlRegion = oldRegion
|
||||
|
||||
template inc(p: pointer, s: int) =
|
||||
p = cast[pointer](cast[int](p) +% s)
|
||||
@@ -71,7 +72,7 @@ template `+!`(p: pointer, s: int): pointer =
|
||||
cast[pointer](cast[int](p) +% s)
|
||||
|
||||
template `-!`(p: pointer, s: int): pointer =
|
||||
cast[pointer](cast[int](p) +% s)
|
||||
cast[pointer](cast[int](p) -% s)
|
||||
|
||||
proc allocSlowPath(r: var MemRegion; size: int) =
|
||||
# we need to ensure that the underlying linked list
|
||||
@@ -84,7 +85,7 @@ proc allocSlowPath(r: var MemRegion; size: int) =
|
||||
r.nextChunkSize =
|
||||
if r.totalSize < 64 * 1024: PageSize*4
|
||||
else: r.nextChunkSize*2
|
||||
var s = align(size+sizeof(BaseChunk), PageSize)
|
||||
var s = roundup(size+sizeof(BaseChunk), PageSize)
|
||||
var fresh: Chunk
|
||||
if s > r.nextChunkSize:
|
||||
fresh = cast[Chunk](osAllocPages(s))
|
||||
@@ -97,22 +98,25 @@ proc allocSlowPath(r: var MemRegion; size: int) =
|
||||
else:
|
||||
s = r.nextChunkSize
|
||||
fresh.size = s
|
||||
fresh.final = nil
|
||||
r.totalSize += s
|
||||
let old = r.last
|
||||
fresh.head = nil
|
||||
fresh.tail = nil
|
||||
inc r.totalSize, s
|
||||
let old = r.tail
|
||||
if old == nil:
|
||||
r.head = fresh
|
||||
else:
|
||||
r.last.next = fresh
|
||||
r.chunk = fresh +! sizeof(BaseChunk)
|
||||
r.last = fresh
|
||||
r.tail.next = fresh
|
||||
r.bump = fresh +! sizeof(BaseChunk)
|
||||
r.tail = fresh
|
||||
r.remaining = s - sizeof(BaseChunk)
|
||||
|
||||
proc alloc(r: var MemRegion; size: int): pointer {.inline.} =
|
||||
if unlikely(r.remaining < size): allocSlowPath(r, size)
|
||||
if size > r.remaining:
|
||||
allocSlowPath(r, size)
|
||||
sysAssert(size <= r.remaining, "size <= r.remaining")
|
||||
dec(r.remaining, size)
|
||||
result = r.chunk
|
||||
inc r.chunk, size
|
||||
result = r.bump
|
||||
inc r.bump, size
|
||||
|
||||
proc runFinalizers(c: Chunk) =
|
||||
var it = c.head
|
||||
@@ -120,228 +124,241 @@ proc runFinalizers(c: Chunk) =
|
||||
# indivually freed objects with finalizer stay in the list, but
|
||||
# their typ is nil then:
|
||||
if it.typ != nil and it.typ.finalizer != nil:
|
||||
(cast[Finalizer](cell.typ.finalizer))(cell+!sizeof(ObjHeader))
|
||||
it = it.next
|
||||
(cast[Finalizer](it.typ.finalizer))(it+!sizeof(ObjHeader))
|
||||
it = it.nextFinal
|
||||
|
||||
proc dealloc(r: var MemRegion; p: pointer) =
|
||||
let it = p-!sizeof(ObjHeader)
|
||||
let it = cast[ptr ObjHeader](p-!sizeof(ObjHeader))
|
||||
if it.typ != nil and it.typ.finalizer != nil:
|
||||
(cast[Finalizer](cell.typ.finalizer))(p)
|
||||
(cast[Finalizer](it.typ.finalizer))(p)
|
||||
it.typ = nil
|
||||
|
||||
proc deallocAll(head: Chunk) =
|
||||
proc deallocAll(r: var MemRegion; head: Chunk) =
|
||||
var it = head
|
||||
while it != nil:
|
||||
let nxt = it.next
|
||||
runFinalizers(it)
|
||||
dec r.totalSize, it.size
|
||||
osDeallocPages(it, it.size)
|
||||
it = it.next
|
||||
it = nxt
|
||||
|
||||
proc deallocAll*(r: var MemRegion) =
|
||||
deallocAll(r.head)
|
||||
deallocAll(r, r.head)
|
||||
zeroMem(addr r, sizeof r)
|
||||
|
||||
proc obstackPtr*(r: MemRegion): StackPtr =
|
||||
result.chunk = r.chunk
|
||||
result.bump = r.bump
|
||||
result.remaining = r.remaining
|
||||
result.current = r.last
|
||||
result.current = r.tail
|
||||
|
||||
proc setObstackPtr*(r: MemRegion; sp: StackPtr) =
|
||||
template computeRemaining(r): untyped =
|
||||
r.tail.size -% (cast[int](r.bump) -% cast[int](r.tail))
|
||||
|
||||
proc setObstackPtr*(r: var MemRegion; sp: StackPtr) =
|
||||
# free everything after 'sp':
|
||||
if sp.current != nil:
|
||||
deallocAll(sp.current.next)
|
||||
r.chunk = sp.chunk
|
||||
deallocAll(r, sp.current.next)
|
||||
sp.current.next = nil
|
||||
else:
|
||||
deallocAll(r, r.head)
|
||||
r.head = nil
|
||||
r.bump = sp.bump
|
||||
r.tail = sp.current
|
||||
r.remaining = sp.remaining
|
||||
r.last = sp.current
|
||||
|
||||
proc obstackPtr*(): StackPtr = tlRegion.obstackPtr()
|
||||
proc setObstackPtr*(sp: StackPtr) = tlRegion.setObstackPtr(sp)
|
||||
|
||||
proc joinRegion*(dest: var MemRegion; src: MemRegion) =
|
||||
# merging is not hard.
|
||||
if dest.head.isNil:
|
||||
dest.head = src.head
|
||||
else:
|
||||
dest.last.next = src.head
|
||||
dest.last = src.last
|
||||
dest.chunk = src.chunk
|
||||
dest.tail.next = src.head
|
||||
dest.tail = src.tail
|
||||
dest.bump = src.bump
|
||||
dest.remaining = src.remaining
|
||||
dest.nextChunkSize = max(dest.nextChunkSize, src.nextChunkSize)
|
||||
dest.totalSize += src.totalSize
|
||||
if dest.hole.size < src.hole.size:
|
||||
dest.hole = src.hole
|
||||
inc dest.totalSize, src.totalSize
|
||||
|
||||
proc isOnHeap*(r: MemRegion; p: pointer): bool =
|
||||
# the last chunk is the largest, so check it first. It's also special
|
||||
# the tail chunk is the largest, so check it first. It's also special
|
||||
# in that contains the current bump pointer:
|
||||
if r.last >= p and p < r.chunk:
|
||||
if r.tail >= p and p < r.bump:
|
||||
return true
|
||||
var it = r.head
|
||||
while it != r.last:
|
||||
while it != r.tail:
|
||||
if it >= p and p <= it+!it.size: return true
|
||||
it = it.next
|
||||
|
||||
proc isInteriorPointer(r: MemRegion; p: pointer): pointer =
|
||||
discard " we cannot patch stack pointers anyway!"
|
||||
when false:
|
||||
# essential feature for later: copy data over from one region to another
|
||||
|
||||
type
|
||||
PointerStackChunk = object
|
||||
next, prev: ptr PointerStackChunk
|
||||
len: int
|
||||
data: array[128, pointer]
|
||||
proc isInteriorPointer(r: MemRegion; p: pointer): pointer =
|
||||
discard " we cannot patch stack pointers anyway!"
|
||||
|
||||
template head(s: PointerStackChunk): untyped = s.prev
|
||||
template tail(s: PointerStackChunk): untyped = s.next
|
||||
type
|
||||
PointerStackChunk = object
|
||||
next, prev: ptr PointerStackChunk
|
||||
len: int
|
||||
data: array[128, pointer]
|
||||
|
||||
include chains
|
||||
template head(s: PointerStackChunk): untyped = s.prev
|
||||
template tail(s: PointerStackChunk): untyped = s.next
|
||||
|
||||
proc push(r: var MemRegion; s: var PointerStackChunk; x: pointer) =
|
||||
if s.len < high(s.data):
|
||||
s.data[s.len] = x
|
||||
inc s.len
|
||||
else:
|
||||
let fresh = cast[ptr PointerStackChunk](alloc(r, sizeof(PointerStackChunk)))
|
||||
fresh.len = 1
|
||||
fresh.data[0] = x
|
||||
fresh.next = nil
|
||||
fresh.prev = nil
|
||||
append(s, fresh)
|
||||
include chains
|
||||
|
||||
|
||||
proc genericDeepCopyAux(dr: var MemRegion; stack: var PointerStackChunk;
|
||||
dest, src: pointer, mt: PNimType) {.benign.}
|
||||
proc genericDeepCopyAux(dr: var MemRegion; stack: var PointerStackChunk;
|
||||
dest, src: pointer, n: ptr TNimNode) {.benign.} =
|
||||
var
|
||||
d = cast[ByteAddress](dest)
|
||||
s = cast[ByteAddress](src)
|
||||
case n.kind
|
||||
of nkSlot:
|
||||
genericDeepCopyAux(cast[pointer](d +% n.offset),
|
||||
cast[pointer](s +% n.offset), n.typ)
|
||||
of nkList:
|
||||
for i in 0..n.len-1:
|
||||
genericDeepCopyAux(dest, src, n.sons[i])
|
||||
of nkCase:
|
||||
var dd = selectBranch(dest, n)
|
||||
var m = selectBranch(src, n)
|
||||
# reset if different branches are in use; note different branches also
|
||||
# imply that's not self-assignment (``x = x``)!
|
||||
if m != dd and dd != nil:
|
||||
genericResetAux(dest, dd)
|
||||
copyMem(cast[pointer](d +% n.offset), cast[pointer](s +% n.offset),
|
||||
n.typ.size)
|
||||
if m != nil:
|
||||
genericDeepCopyAux(dest, src, m)
|
||||
of nkNone: sysAssert(false, "genericDeepCopyAux")
|
||||
|
||||
proc copyDeepString(dr: var MemRegion; stack: var PointerStackChunk; src: NimString): NimString {.inline.} =
|
||||
result = rawNewStringNoInit(dr, src.len)
|
||||
result.len = src.len
|
||||
c_memcpy(result.data, src.data, src.len + 1)
|
||||
|
||||
proc genericDeepCopyAux(dr: var MemRegion; stack: var PointerStackChunk;
|
||||
dest, src: pointer, mt: PNimType) =
|
||||
var
|
||||
d = cast[ByteAddress](dest)
|
||||
s = cast[ByteAddress](src)
|
||||
sysAssert(mt != nil, "genericDeepCopyAux 2")
|
||||
case mt.kind
|
||||
of tyString:
|
||||
var x = cast[PPointer](dest)
|
||||
var s2 = cast[PPointer](s)[]
|
||||
if s2 == nil:
|
||||
x[] = nil
|
||||
proc push(r: var MemRegion; s: var PointerStackChunk; x: pointer) =
|
||||
if s.len < high(s.data):
|
||||
s.data[s.len] = x
|
||||
inc s.len
|
||||
else:
|
||||
x[] = copyDeepString(cast[NimString](s2))
|
||||
of tySequence:
|
||||
var s2 = cast[PPointer](src)[]
|
||||
var seq = cast[PGenericSeq](s2)
|
||||
var x = cast[PPointer](dest)
|
||||
if s2 == nil:
|
||||
x[] = nil
|
||||
return
|
||||
sysAssert(dest != nil, "genericDeepCopyAux 3")
|
||||
x[] = newSeq(mt, seq.len)
|
||||
var dst = cast[ByteAddress](cast[PPointer](dest)[])
|
||||
for i in 0..seq.len-1:
|
||||
genericDeepCopyAux(dr, stack,
|
||||
cast[pointer](dst +% i*% mt.base.size +% GenericSeqSize),
|
||||
cast[pointer](cast[ByteAddress](s2) +% i *% mt.base.size +%
|
||||
GenericSeqSize),
|
||||
mt.base)
|
||||
of tyObject:
|
||||
# we need to copy m_type field for tyObject, as it could be empty for
|
||||
# sequence reallocations:
|
||||
var pint = cast[ptr PNimType](dest)
|
||||
pint[] = cast[ptr PNimType](src)[]
|
||||
if mt.base != nil:
|
||||
genericDeepCopyAux(dr, stack, dest, src, mt.base)
|
||||
genericDeepCopyAux(dr, stack, dest, src, mt.node)
|
||||
of tyTuple:
|
||||
genericDeepCopyAux(dr, stack, dest, src, mt.node)
|
||||
of tyArray, tyArrayConstr:
|
||||
for i in 0..(mt.size div mt.base.size)-1:
|
||||
genericDeepCopyAux(dr, stack,
|
||||
cast[pointer](d +% i*% mt.base.size),
|
||||
cast[pointer](s +% i*% mt.base.size), mt.base)
|
||||
of tyRef:
|
||||
let s2 = cast[PPointer](src)[]
|
||||
if s2 == nil:
|
||||
cast[PPointer](dest)[] = nil
|
||||
else:
|
||||
# we modify the header of the cell temporarily; instead of the type
|
||||
# field we store a forwarding pointer. XXX This is bad when the cloning
|
||||
# fails due to OOM etc.
|
||||
let x = usrToCell(s2)
|
||||
let forw = cast[int](x.typ)
|
||||
if (forw and 1) == 1:
|
||||
# we stored a forwarding pointer, so let's use that:
|
||||
let z = cast[pointer](forw and not 1)
|
||||
unsureAsgnRef(cast[PPointer](dest), z)
|
||||
let fresh = cast[ptr PointerStackChunk](alloc(r, sizeof(PointerStackChunk)))
|
||||
fresh.len = 1
|
||||
fresh.data[0] = x
|
||||
fresh.next = nil
|
||||
fresh.prev = nil
|
||||
append(s, fresh)
|
||||
|
||||
|
||||
proc genericDeepCopyAux(dr: var MemRegion; stack: var PointerStackChunk;
|
||||
dest, src: pointer, mt: PNimType) {.benign.}
|
||||
proc genericDeepCopyAux(dr: var MemRegion; stack: var PointerStackChunk;
|
||||
dest, src: pointer, n: ptr TNimNode) {.benign.} =
|
||||
var
|
||||
d = cast[ByteAddress](dest)
|
||||
s = cast[ByteAddress](src)
|
||||
case n.kind
|
||||
of nkSlot:
|
||||
genericDeepCopyAux(cast[pointer](d +% n.offset),
|
||||
cast[pointer](s +% n.offset), n.typ)
|
||||
of nkList:
|
||||
for i in 0..n.len-1:
|
||||
genericDeepCopyAux(dest, src, n.sons[i])
|
||||
of nkCase:
|
||||
var dd = selectBranch(dest, n)
|
||||
var m = selectBranch(src, n)
|
||||
# reset if different branches are in use; note different branches also
|
||||
# imply that's not self-assignment (``x = x``)!
|
||||
if m != dd and dd != nil:
|
||||
genericResetAux(dest, dd)
|
||||
copyMem(cast[pointer](d +% n.offset), cast[pointer](s +% n.offset),
|
||||
n.typ.size)
|
||||
if m != nil:
|
||||
genericDeepCopyAux(dest, src, m)
|
||||
of nkNone: sysAssert(false, "genericDeepCopyAux")
|
||||
|
||||
proc copyDeepString(dr: var MemRegion; stack: var PointerStackChunk; src: NimString): NimString {.inline.} =
|
||||
result = rawNewStringNoInit(dr, src.len)
|
||||
result.len = src.len
|
||||
c_memcpy(result.data, src.data, src.len + 1)
|
||||
|
||||
proc genericDeepCopyAux(dr: var MemRegion; stack: var PointerStackChunk;
|
||||
dest, src: pointer, mt: PNimType) =
|
||||
var
|
||||
d = cast[ByteAddress](dest)
|
||||
s = cast[ByteAddress](src)
|
||||
sysAssert(mt != nil, "genericDeepCopyAux 2")
|
||||
case mt.kind
|
||||
of tyString:
|
||||
var x = cast[PPointer](dest)
|
||||
var s2 = cast[PPointer](s)[]
|
||||
if s2 == nil:
|
||||
x[] = nil
|
||||
else:
|
||||
let realType = x.typ
|
||||
let z = newObj(realType, realType.base.size)
|
||||
x[] = copyDeepString(cast[NimString](s2))
|
||||
of tySequence:
|
||||
var s2 = cast[PPointer](src)[]
|
||||
var seq = cast[PGenericSeq](s2)
|
||||
var x = cast[PPointer](dest)
|
||||
if s2 == nil:
|
||||
x[] = nil
|
||||
return
|
||||
sysAssert(dest != nil, "genericDeepCopyAux 3")
|
||||
x[] = newSeq(mt, seq.len)
|
||||
var dst = cast[ByteAddress](cast[PPointer](dest)[])
|
||||
for i in 0..seq.len-1:
|
||||
genericDeepCopyAux(dr, stack,
|
||||
cast[pointer](dst +% i*% mt.base.size +% GenericSeqSize),
|
||||
cast[pointer](cast[ByteAddress](s2) +% i *% mt.base.size +%
|
||||
GenericSeqSize),
|
||||
mt.base)
|
||||
of tyObject:
|
||||
# we need to copy m_type field for tyObject, as it could be empty for
|
||||
# sequence reallocations:
|
||||
var pint = cast[ptr PNimType](dest)
|
||||
pint[] = cast[ptr PNimType](src)[]
|
||||
if mt.base != nil:
|
||||
genericDeepCopyAux(dr, stack, dest, src, mt.base)
|
||||
genericDeepCopyAux(dr, stack, dest, src, mt.node)
|
||||
of tyTuple:
|
||||
genericDeepCopyAux(dr, stack, dest, src, mt.node)
|
||||
of tyArray, tyArrayConstr:
|
||||
for i in 0..(mt.size div mt.base.size)-1:
|
||||
genericDeepCopyAux(dr, stack,
|
||||
cast[pointer](d +% i*% mt.base.size),
|
||||
cast[pointer](s +% i*% mt.base.size), mt.base)
|
||||
of tyRef:
|
||||
let s2 = cast[PPointer](src)[]
|
||||
if s2 == nil:
|
||||
cast[PPointer](dest)[] = nil
|
||||
else:
|
||||
# we modify the header of the cell temporarily; instead of the type
|
||||
# field we store a forwarding pointer. XXX This is bad when the cloning
|
||||
# fails due to OOM etc.
|
||||
let x = usrToCell(s2)
|
||||
let forw = cast[int](x.typ)
|
||||
if (forw and 1) == 1:
|
||||
# we stored a forwarding pointer, so let's use that:
|
||||
let z = cast[pointer](forw and not 1)
|
||||
unsureAsgnRef(cast[PPointer](dest), z)
|
||||
else:
|
||||
let realType = x.typ
|
||||
let z = newObj(realType, realType.base.size)
|
||||
|
||||
unsureAsgnRef(cast[PPointer](dest), z)
|
||||
x.typ = cast[PNimType](cast[int](z) or 1)
|
||||
genericDeepCopyAux(dr, stack, z, s2, realType.base)
|
||||
x.typ = realType
|
||||
else:
|
||||
copyMem(dest, src, mt.size)
|
||||
|
||||
proc joinAliveDataFromRegion*(dest: var MemRegion; src: var MemRegion;
|
||||
root: pointer): pointer =
|
||||
# we mark the alive data and copy only alive data over to 'dest'.
|
||||
# This is O(liveset) but it nicely compacts memory, so it's fine.
|
||||
# We use the 'typ' field as a forwarding pointer. The forwarding
|
||||
# pointers have bit 0 set, so we can disambiguate them.
|
||||
# We allocate a temporary stack in 'src' that we later free:
|
||||
var s: PointerStackChunk
|
||||
s.len = 1
|
||||
s.data[0] = root
|
||||
while s.len > 0:
|
||||
var p: pointer
|
||||
if s.tail == nil:
|
||||
p = s.data[s.len-1]
|
||||
dec s.len
|
||||
unsureAsgnRef(cast[PPointer](dest), z)
|
||||
x.typ = cast[PNimType](cast[int](z) or 1)
|
||||
genericDeepCopyAux(dr, stack, z, s2, realType.base)
|
||||
x.typ = realType
|
||||
else:
|
||||
p = s.tail.data[s.tail.len-1]
|
||||
dec s.tail.len
|
||||
if s.tail.len == 0:
|
||||
unlink(s, s.tail)
|
||||
copyMem(dest, src, mt.size)
|
||||
|
||||
proc joinAliveDataFromRegion*(dest: var MemRegion; src: var MemRegion;
|
||||
root: pointer): pointer =
|
||||
# we mark the alive data and copy only alive data over to 'dest'.
|
||||
# This is O(liveset) but it nicely compacts memory, so it's fine.
|
||||
# We use the 'typ' field as a forwarding pointer. The forwarding
|
||||
# pointers have bit 0 set, so we can disambiguate them.
|
||||
# We allocate a temporary stack in 'src' that we later free:
|
||||
var s: PointerStackChunk
|
||||
s.len = 1
|
||||
s.data[0] = root
|
||||
while s.len > 0:
|
||||
var p: pointer
|
||||
if s.tail == nil:
|
||||
p = s.data[s.len-1]
|
||||
dec s.len
|
||||
else:
|
||||
p = s.tail.data[s.tail.len-1]
|
||||
dec s.tail.len
|
||||
if s.tail.len == 0:
|
||||
unlink(s, s.tail)
|
||||
|
||||
proc rawNewObj(r: var MemRegion, typ: PNimType, size: int): pointer =
|
||||
var res = cast[ptr ObjHeader](alloc(r, size + sizeof(ObjHeader)))
|
||||
res.typ = typ
|
||||
if typ.finalizer != nil:
|
||||
res.nextFinal = r.chunk.head
|
||||
r.chunk.head = res
|
||||
res.nextFinal = r.head.head
|
||||
r.head.head = res
|
||||
result = res +! sizeof(ObjHeader)
|
||||
|
||||
proc newObj(typ: PNimType, size: int): pointer {.compilerRtl.} =
|
||||
result = rawNewObj(typ, size, region)
|
||||
result = rawNewObj(tlRegion, typ, size)
|
||||
zeroMem(result, size)
|
||||
when defined(memProfiler): nimProfile(size)
|
||||
|
||||
proc newObjNoInit(typ: PNimType, size: int): pointer {.compilerRtl.} =
|
||||
result = rawNewObj(typ, size, region)
|
||||
result = rawNewObj(tlRegion, typ, size)
|
||||
when defined(memProfiler): nimProfile(size)
|
||||
|
||||
proc newSeq(typ: PNimType, len: int): pointer {.compilerRtl.} =
|
||||
@@ -351,7 +368,7 @@ proc newSeq(typ: PNimType, len: int): pointer {.compilerRtl.} =
|
||||
cast[PGenericSeq](result).reserved = len
|
||||
|
||||
proc newObjRC1(typ: PNimType, size: int): pointer {.compilerRtl.} =
|
||||
result = rawNewObj(typ, size, gch)
|
||||
result = rawNewObj(tlRegion, typ, size)
|
||||
zeroMem(result, size)
|
||||
|
||||
proc newSeqRC1(typ: PNimType, len: int): pointer {.compilerRtl.} =
|
||||
@@ -360,23 +377,63 @@ proc newSeqRC1(typ: PNimType, len: int): pointer {.compilerRtl.} =
|
||||
cast[PGenericSeq](result).len = len
|
||||
cast[PGenericSeq](result).reserved = len
|
||||
|
||||
proc growObj(old: pointer, newsize: int, gch: var GcHeap): pointer =
|
||||
collectCT(gch)
|
||||
var ol = usrToCell(old)
|
||||
sysAssert(ol.typ != nil, "growObj: 1")
|
||||
gcAssert(ol.typ.kind in {tyString, tySequence}, "growObj: 2")
|
||||
|
||||
var res = cast[PCell](rawAlloc(gch.region, newsize + sizeof(Cell)))
|
||||
var elemSize = 1
|
||||
if ol.typ.kind != tyString: elemSize = ol.typ.base.size
|
||||
|
||||
var oldsize = cast[PGenericSeq](old).len*elemSize + GenericSeqSize
|
||||
copyMem(res, ol, oldsize + sizeof(Cell))
|
||||
zeroMem(cast[pointer](cast[ByteAddress](res)+% oldsize +% sizeof(Cell)),
|
||||
newsize-oldsize)
|
||||
sysAssert((cast[ByteAddress](res) and (MemAlign-1)) == 0, "growObj: 3")
|
||||
result = cellToUsr(res)
|
||||
proc growObj(region: var MemRegion; old: pointer, newsize: int): pointer =
|
||||
let typ = cast[ptr ObjHeader](old -! sizeof(ObjHeader)).typ
|
||||
result = rawNewObj(region, typ, newsize)
|
||||
let elemSize = if typ.kind == tyString: 1 else: typ.base.size
|
||||
let oldsize = cast[PGenericSeq](old).len*elemSize + GenericSeqSize
|
||||
copyMem(result, old, oldsize)
|
||||
zeroMem(result +! oldsize, newsize-oldsize)
|
||||
|
||||
proc growObj(old: pointer, newsize: int): pointer {.rtl.} =
|
||||
result = growObj(old, newsize, region)
|
||||
result = growObj(tlRegion, old, newsize)
|
||||
|
||||
proc unsureAsgnRef(dest: PPointer, src: pointer) {.compilerproc, inline.} =
|
||||
dest[] = src
|
||||
proc asgnRef(dest: PPointer, src: pointer) {.compilerproc, inline.} =
|
||||
dest[] = src
|
||||
proc asgnRefNoCycle(dest: PPointer, src: pointer) {.compilerproc, inline.} =
|
||||
dest[] = src
|
||||
|
||||
proc alloc(size: Natural): pointer =
|
||||
result = cmalloc(size)
|
||||
if result == nil: raiseOutOfMem()
|
||||
proc alloc0(size: Natural): pointer =
|
||||
result = alloc(size)
|
||||
zeroMem(result, size)
|
||||
proc realloc(p: pointer, newsize: Natural): pointer =
|
||||
result = crealloc(p, newsize)
|
||||
if result == nil: raiseOutOfMem()
|
||||
proc dealloc(p: pointer) = cfree(p)
|
||||
|
||||
proc allocShared(size: Natural): pointer =
|
||||
result = cmalloc(size)
|
||||
if result == nil: raiseOutOfMem()
|
||||
proc allocShared0(size: Natural): pointer =
|
||||
result = alloc(size)
|
||||
zeroMem(result, size)
|
||||
proc reallocShared(p: pointer, newsize: Natural): pointer =
|
||||
result = crealloc(p, newsize)
|
||||
if result == nil: raiseOutOfMem()
|
||||
proc deallocShared(p: pointer) = cfree(p)
|
||||
|
||||
when hasThreadSupport:
|
||||
proc getFreeSharedMem(): int = 0
|
||||
proc getTotalSharedMem(): int = 0
|
||||
proc getOccupiedSharedMem(): int = 0
|
||||
|
||||
proc GC_disable() = discard
|
||||
proc GC_enable() = discard
|
||||
proc GC_fullCollect() = discard
|
||||
proc GC_setStrategy(strategy: GC_Strategy) = discard
|
||||
proc GC_enableMarkAndSweep() = discard
|
||||
proc GC_disableMarkAndSweep() = discard
|
||||
proc GC_getStatistics(): string = return ""
|
||||
|
||||
proc getOccupiedMem(): int =
|
||||
result = tlRegion.totalSize - tlRegion.remaining
|
||||
proc getFreeMem(): int = tlRegion.remaining
|
||||
proc getTotalMem(): int =
|
||||
result = tlRegion.totalSize
|
||||
|
||||
proc setStackBottom(theStackBottom: pointer) = discard
|
||||
|
||||
@@ -97,6 +97,8 @@ proc rawWriteStackTrace(): string =
|
||||
else:
|
||||
result = "No stack traceback available\n"
|
||||
|
||||
proc getStackTrace*(): string = rawWriteStackTrace()
|
||||
|
||||
proc unhandledException(e: ref Exception) {.
|
||||
compilerproc, asmNoStackFrame.} =
|
||||
when NimStackTrace:
|
||||
|
||||
@@ -389,15 +389,30 @@ elif defined(nogc) and defined(useMalloc):
|
||||
|
||||
when not defined(useNimRtl):
|
||||
proc alloc(size: Natural): pointer =
|
||||
result = cmalloc(size)
|
||||
if result == nil: raiseOutOfMem()
|
||||
var x = cmalloc(size + sizeof(size))
|
||||
if x == nil: raiseOutOfMem()
|
||||
|
||||
cast[ptr int](x)[] = size
|
||||
result = cast[pointer](cast[int](x) + sizeof(size))
|
||||
|
||||
proc alloc0(size: Natural): pointer =
|
||||
result = alloc(size)
|
||||
zeroMem(result, size)
|
||||
proc realloc(p: pointer, newsize: Natural): pointer =
|
||||
result = crealloc(p, newsize)
|
||||
if result == nil: raiseOutOfMem()
|
||||
proc dealloc(p: pointer) = cfree(p)
|
||||
var x = cast[pointer](cast[int](p) - sizeof(newsize))
|
||||
let oldsize = cast[ptr int](x)[]
|
||||
|
||||
x = crealloc(x, newsize + sizeof(newsize))
|
||||
|
||||
if x == nil: raiseOutOfMem()
|
||||
|
||||
cast[ptr int](x)[] = newsize
|
||||
result = cast[pointer](cast[int](x) + sizeof(newsize))
|
||||
|
||||
if newsize > oldsize:
|
||||
zeroMem(cast[pointer](cast[int](result) + oldsize), newsize - oldsize)
|
||||
|
||||
proc dealloc(p: pointer) = cfree(cast[pointer](cast[int](p) - sizeof(int)))
|
||||
|
||||
proc allocShared(size: Natural): pointer =
|
||||
result = cmalloc(size)
|
||||
@@ -511,11 +526,12 @@ elif defined(nogc):
|
||||
include "system/cellsets"
|
||||
|
||||
else:
|
||||
include "system/alloc"
|
||||
when not defined(gcStack):
|
||||
include "system/alloc"
|
||||
|
||||
include "system/cellsets"
|
||||
when not leakDetector and not useCellIds:
|
||||
sysAssert(sizeof(Cell) == sizeof(FreeCell), "sizeof FreeCell")
|
||||
include "system/cellsets"
|
||||
when not leakDetector and not useCellIds:
|
||||
sysAssert(sizeof(Cell) == sizeof(FreeCell), "sizeof FreeCell")
|
||||
when compileOption("gc", "v2"):
|
||||
include "system/gc2"
|
||||
elif defined(gcStack):
|
||||
|
||||
@@ -121,3 +121,5 @@ else:
|
||||
importc: "pthread_cond_signal", header: "<pthread.h>", noSideEffect.}
|
||||
proc deinitSysCond(cond: var SysCond) {.noSideEffect,
|
||||
importc: "pthread_cond_destroy", header: "<pthread.h>".}
|
||||
|
||||
{.pop.}
|
||||
|
||||
@@ -228,7 +228,8 @@ proc setLengthSeq(seq: PGenericSeq, elemSize, newLen: int): PGenericSeq {.
|
||||
elif newLen < result.len:
|
||||
# we need to decref here, otherwise the GC leaks!
|
||||
when not defined(boehmGC) and not defined(nogc) and
|
||||
not defined(gcMarkAndSweep) and not defined(gogc):
|
||||
not defined(gcMarkAndSweep) and not defined(gogc) and
|
||||
not defined(gcStack):
|
||||
when false: # compileOption("gc", "v2"):
|
||||
for i in newLen..result.len-1:
|
||||
let len0 = gch.tempStack.len
|
||||
|
||||
@@ -36,6 +36,8 @@ type
|
||||
DWORD* = int32
|
||||
PDWORD* = ptr DWORD
|
||||
LPINT* = ptr int32
|
||||
ULONG_PTR* = uint
|
||||
PULONG_PTR* = ptr uint
|
||||
HDC* = Handle
|
||||
HGLRC* = Handle
|
||||
|
||||
@@ -759,17 +761,17 @@ const
|
||||
ERROR_NETNAME_DELETED* = 64
|
||||
|
||||
proc createIoCompletionPort*(FileHandle: Handle, ExistingCompletionPort: Handle,
|
||||
CompletionKey: DWORD,
|
||||
CompletionKey: ULONG_PTR,
|
||||
NumberOfConcurrentThreads: DWORD): Handle{.stdcall,
|
||||
dynlib: "kernel32", importc: "CreateIoCompletionPort".}
|
||||
|
||||
proc getQueuedCompletionStatus*(CompletionPort: Handle,
|
||||
lpNumberOfBytesTransferred: PDWORD, lpCompletionKey: PULONG,
|
||||
lpNumberOfBytesTransferred: PDWORD, lpCompletionKey: PULONG_PTR,
|
||||
lpOverlapped: ptr POVERLAPPED,
|
||||
dwMilliseconds: DWORD): WINBOOL{.stdcall,
|
||||
dynlib: "kernel32", importc: "GetQueuedCompletionStatus".}
|
||||
|
||||
proc getOverlappedResult*(hFile: Handle, lpOverlapped: OVERLAPPED,
|
||||
proc getOverlappedResult*(hFile: Handle, lpOverlapped: POVERLAPPED,
|
||||
lpNumberOfBytesTransferred: var DWORD, bWait: WINBOOL): WINBOOL{.
|
||||
stdcall, dynlib: "kernel32", importc: "GetOverlappedResult".}
|
||||
|
||||
|
||||
12
tests/ccgbugs/tweakopenarray.nim
Normal file
12
tests/ccgbugs/tweakopenarray.nim
Normal file
@@ -0,0 +1,12 @@
|
||||
# bug #4089
|
||||
|
||||
type
|
||||
Proc = proc(args: openArray[Bar]): Bar
|
||||
|
||||
Foo = object
|
||||
p: Proc
|
||||
|
||||
Bar = object
|
||||
f: Foo
|
||||
|
||||
proc bar(val: Foo): Bar = Bar()
|
||||
9
tests/cpp/ttemplatetype.nim
Normal file
9
tests/cpp/ttemplatetype.nim
Normal file
@@ -0,0 +1,9 @@
|
||||
type
|
||||
Map {.importcpp: "std::map", header: "<map>".} [T,U] = object
|
||||
|
||||
proc cInitMap(T: typedesc, U: typedesc): Map[T,U] {.importcpp: "std::map<'*1,'*2>()", nodecl.}
|
||||
|
||||
proc initMap[T, U](): Map[T, U] =
|
||||
result = cInitMap(T, U)
|
||||
|
||||
var x: Map[cstring, cint] = initMap[cstring, cint]()
|
||||
16
tests/generics/twrong_explicit_typeargs.nim
Normal file
16
tests/generics/twrong_explicit_typeargs.nim
Normal file
@@ -0,0 +1,16 @@
|
||||
discard """
|
||||
errormsg: "type mismatch: got (string) but expected 'int32 or int64'"
|
||||
line: 16
|
||||
"""
|
||||
|
||||
# bug #4084
|
||||
type
|
||||
Image[T] = object
|
||||
data: seq[T]
|
||||
|
||||
proc newImage[T: int32|int64](w, h: int): ref Image[T] =
|
||||
new(result)
|
||||
result.data = newSeq[T](w * h)
|
||||
|
||||
var correct = newImage[int32](320, 200)
|
||||
var wrong = newImage[string](320, 200)
|
||||
@@ -49,7 +49,7 @@ proc makeDesktop(): PDesktop = new(TDesktop)
|
||||
|
||||
proc makeWindow(): PWindow = new(TWindow)
|
||||
|
||||
proc thisCausesError(a: var PView, b: PView) =
|
||||
proc thisCausesError(a: PView, b: PView) =
|
||||
discard
|
||||
|
||||
var dd = makeDesktop()
|
||||
|
||||
17
tests/template/typedescids.nim
Normal file
17
tests/template/typedescids.nim
Normal file
@@ -0,0 +1,17 @@
|
||||
discard """
|
||||
output: '''2 3'''
|
||||
"""
|
||||
|
||||
# bug #4097
|
||||
|
||||
var i {.compileTime.} = 2
|
||||
|
||||
template defineId*(t: typedesc): stmt =
|
||||
const id {.genSym.} = i
|
||||
static: inc(i)
|
||||
proc idFor*(T: typedesc[t]): int {.inline, raises: [].} = id
|
||||
|
||||
defineId(int8)
|
||||
defineId(int16)
|
||||
|
||||
echo idFor(int8), " ", idFor(int16)
|
||||
25
tests/types/typeof_produces_alias.nim
Normal file
25
tests/types/typeof_produces_alias.nim
Normal file
@@ -0,0 +1,25 @@
|
||||
|
||||
# bug #4124
|
||||
|
||||
import sequtils
|
||||
|
||||
type
|
||||
Foo = distinct string
|
||||
|
||||
var
|
||||
foo: Foo
|
||||
|
||||
type
|
||||
Alias = (type(foo))
|
||||
var
|
||||
a: Alias
|
||||
|
||||
a = foo
|
||||
|
||||
when true:
|
||||
var xs = @[1,2,3]
|
||||
|
||||
proc asFoo(i: string): Foo =
|
||||
Foo(i)
|
||||
|
||||
var xx = xs.mapIt(asFoo($(it + 5)))
|
||||
@@ -32,6 +32,11 @@ Changes affecting backwards compatibility
|
||||
raises a ``KeyError`` exception. You can compile with the ``-d:nimJsonGet``
|
||||
flag to get a list of usages of ``[]``, as well as to restore the operator's
|
||||
previous behaviour.
|
||||
- When using ``useMalloc``, an additional header containing the size of the
|
||||
allocation will be allocated, to support zeroing memory on realloc as expected
|
||||
by the language. With this change, ``alloc`` and ``dealloc`` are no longer
|
||||
aliases for ``malloc`` and ``free`` - use ``c_malloc`` and ``c_free`` if
|
||||
you need that.
|
||||
|
||||
|
||||
Library Additions
|
||||
|
||||
Reference in New Issue
Block a user