Merge branch 'devel' into OpenBSDFix

This commit is contained in:
Charlie Barto
2014-05-09 14:08:16 -04:00
26 changed files with 587 additions and 62 deletions

View File

@@ -549,7 +549,7 @@ type
mFields, mFieldPairs, mOmpParFor,
mAppendStrCh, mAppendStrStr, mAppendSeqElem,
mInRange, mInSet, mRepr, mExit, mSetLengthStr, mSetLengthSeq,
mIsPartOf, mAstToStr, mRand,
mIsPartOf, mAstToStr, mParallel,
mSwap, mIsNil, mArrToSeq, mCopyStr, mCopyStrLast,
mNewString, mNewStringOfCap,
mReset,
@@ -597,10 +597,9 @@ const
mIntToStr, mInt64ToStr, mFloatToStr, mCStrToStr, mStrToStr, mEnumToStr,
mAnd, mOr, mEqStr, mLeStr, mLtStr, mEqSet, mLeSet, mLtSet, mMulSet,
mPlusSet, mMinusSet, mSymDiffSet, mConStrStr, mConArrArr, mConArrT,
mConTArr, mConTT, mSlice,
mConTArr, mConTT,
mAppendStrCh, mAppendStrStr, mAppendSeqElem,
mInRange, mInSet, mRepr,
mRand,
mCopyStr, mCopyStrLast}
# magics that require special semantic checking and
# thus cannot be overloaded (also documented in the spec!):
@@ -873,7 +872,7 @@ const
skMacro, skTemplate, skConverter, skEnumField, skLet, skStub}
PersistentNodeFlags*: TNodeFlags = {nfBase2, nfBase8, nfBase16,
nfDotSetter, nfDotField,
nfAllConst,nfIsRef}
nfIsRef}
namePos* = 0
patternPos* = 1 # empty except for term rewriting macros
genericParamsPos* = 2

View File

@@ -77,18 +77,38 @@ proc isInCurrentFrame(p: BProc, n: PNode): bool =
proc openArrayLoc(p: BProc, n: PNode): PRope =
var a: TLoc
initLocExpr(p, n, a)
case skipTypes(a.t, abstractVar).kind
of tyOpenArray, tyVarargs:
result = ropef("$1, $1Len0", [rdLoc(a)])
of tyString, tySequence:
if skipTypes(n.typ, abstractInst).kind == tyVar:
result = ropef("(*$1)->data, (*$1)->$2", [a.rdLoc, lenField()])
else:
result = ropef("$1->data, $1->$2", [a.rdLoc, lenField()])
of tyArray, tyArrayConstr:
result = ropef("$1, $2", [rdLoc(a), toRope(lengthOrd(a.t))])
else: internalError("openArrayLoc: " & typeToString(a.t))
let q = skipConv(n)
if getMagic(q) == mSlice:
# magic: pass slice to openArray:
var b, c: TLoc
initLocExpr(p, q[1], a)
initLocExpr(p, q[2], b)
initLocExpr(p, q[3], c)
let fmt =
case skipTypes(a.t, abstractVar).kind
of tyOpenArray, tyVarargs, tyArray, tyArrayConstr:
"($1)+($2), ($3)-($2)+1"
of tyString, tySequence:
if skipTypes(n.typ, abstractInst).kind == tyVar:
"(*$1)->data+($2), ($3)-($2)+1"
else:
"$1->data+($2), ($3)-($2)+1"
else: (internalError("openArrayLoc: " & typeToString(a.t)); "")
result = ropef(fmt, [rdLoc(a), rdLoc(b), rdLoc(c)])
else:
initLocExpr(p, n, a)
case skipTypes(a.t, abstractVar).kind
of tyOpenArray, tyVarargs:
result = ropef("$1, $1Len0", [rdLoc(a)])
of tyString, tySequence:
if skipTypes(n.typ, abstractInst).kind == tyVar:
result = ropef("(*$1)->data, (*$1)->$2", [a.rdLoc, lenField()])
else:
result = ropef("$1->data, $1->$2", [a.rdLoc, lenField()])
of tyArray, tyArrayConstr:
result = ropef("$1, $2", [rdLoc(a), toRope(lengthOrd(a.t))])
else: internalError("openArrayLoc: " & typeToString(a.t))
proc genArgStringToCString(p: BProc,
n: PNode): PRope {.inline.} =

View File

@@ -1623,7 +1623,7 @@ proc genMagicExpr(p: BProc, e: PNode, d: var TLoc, op: TMagic) =
of mIncl, mExcl, mCard, mLtSet, mLeSet, mEqSet, mMulSet, mPlusSet, mMinusSet,
mInSet:
genSetOp(p, e, d, op)
of mNewString, mNewStringOfCap, mCopyStr, mCopyStrLast, mExit, mRand:
of mNewString, mNewStringOfCap, mCopyStr, mCopyStrLast, mExit:
var opr = e.sons[0].sym
if lfNoDecl notin opr.loc.flags:
discard cgsym(p.module, opr.loc.r.ropeToStr)

View File

@@ -284,11 +284,11 @@ proc resetMemory =
echo GC_getStatistics()
const
SimiluateCaasMemReset = false
SimulateCaasMemReset = false
PrintRopeCacheStats = false
proc mainCommand* =
when SimiluateCaasMemReset:
when SimulateCaasMemReset:
gGlobalOptions.incl(optCaasEnabled)
# In "nimrod serve" scenario, each command must reset the registered passes
@@ -454,6 +454,6 @@ proc mainCommand* =
echo " efficiency: ", formatFloat(1-(gCacheMisses.float/gCacheTries.float),
ffDecimal, 3)
when SimiluateCaasMemReset:
when SimulateCaasMemReset:
resetMemory()

View File

@@ -10,7 +10,7 @@
## This module implements the pattern matching features for term rewriting
## macro support.
import strutils, ast, astalgo, types, msgs, idents, renderer, wordrecg
import strutils, ast, astalgo, types, msgs, idents, renderer, wordrecg, trees
# we precompile the pattern here for efficiency into some internal
# stack based VM :-) Why? Because it's fun; I did no benchmarks to see if that
@@ -215,6 +215,9 @@ proc isAssignable*(owner: PSym, n: PNode): TAssignableResult =
result = arLValue
of nkObjUpConv, nkObjDownConv, nkCheckedFieldExpr:
result = isAssignable(owner, n.sons[0])
of nkCallKinds:
# builtin slice keeps lvalue-ness:
if getMagic(n) == mSlice: result = isAssignable(owner, n.sons[1])
else:
discard

View File

@@ -405,10 +405,8 @@ proc evalOp(m: TMagic, n, a, b, c: PNode): PNode =
mExit, mInc, ast.mDec, mEcho, mSwap, mAppendStrCh,
mAppendStrStr, mAppendSeqElem, mSetLengthStr, mSetLengthSeq,
mParseExprToAst, mParseStmtToAst, mExpandToAst, mTypeTrait,
mNLen..mNError, mEqRef, mSlurp, mStaticExec, mNGenSym, mSpawn:
mNLen..mNError, mEqRef, mSlurp, mStaticExec, mNGenSym, mSpawn, mParallel:
discard
of mRand:
result = newIntNodeT(math.random(a.getInt.int), n)
else: internalError(a.info, "evalOp(" & $m & ')')
proc getConstIfExpr(c: PSym, n: PNode): PNode =

View File

@@ -190,7 +190,7 @@ proc semCase(c: PContext, n: PNode): PNode =
var typ = commonTypeBegin
var hasElse = false
case skipTypes(n.sons[0].typ, abstractVarRange-{tyTypeDesc}).kind
of tyInt..tyInt64, tyChar, tyEnum, tyUInt..tyUInt32:
of tyInt..tyInt64, tyChar, tyEnum, tyUInt..tyUInt32, tyBool:
chckCovered = true
of tyFloat..tyFloat128, tyString, tyError:
discard

View File

@@ -127,7 +127,8 @@ proc createStrKeepNode(x: var TFullReg) =
elif x.node.kind == nkNilLit:
system.reset(x.node[])
x.node.kind = nkStrLit
elif x.node.kind notin {nkStrLit..nkTripleStrLit}:
elif x.node.kind notin {nkStrLit..nkTripleStrLit} or
nfAllConst in x.node.flags:
# XXX this is hacky; tests/txmlgen triggers it:
x.node = newNode(nkStrLit)
# debug x.node

View File

@@ -207,7 +207,7 @@ const
largeInstrs* = { # instructions which use 2 int32s instead of 1:
opcSubStr, opcConv, opcCast, opcNewSeq, opcOf}
slotSomeTemp* = slotTempUnknown
relativeJumps* = {opcTJmp, opcFJmp, opcJmp}
relativeJumps* = {opcTJmp, opcFJmp, opcJmp, opcJmpBack}
template opcode*(x: TInstr): TOpcode {.immediate.} = TOpcode(x.uint32 and 0xff'u32)
template regA*(x: TInstr): TRegister {.immediate.} = TRegister(x.uint32 shr 8'u32 and 0xff'u32)

View File

@@ -331,6 +331,7 @@ proc canonValue*(n: PNode): PNode =
proc rawGenLiteral(c: PCtx; n: PNode): int =
result = c.constants.len
assert(n.kind != nkCall)
n.flags.incl nfAllConst
c.constants.add n.canonValue
internalAssert result < 0x7fff
@@ -1622,7 +1623,7 @@ proc genProc(c: PCtx; s: PSym): int =
c.gABC(body, opcEof, eofInstr.regA)
c.optimizeJumps(result)
s.offset = c.prc.maxSlots
#if s.name.s == "addStuff":
#if s.name.s == "parse_until_symbol":
# echo renderTree(body)
# c.echoCode(result)
c.prc = oldPrc

View File

@@ -528,7 +528,7 @@ suite is not integrated with the main test suite and you have to
run it manually. First you have to compile the tester::
$ cd my/nimrod/checkout/tests
$ nimrod c caasdriver.nim
$ nimrod c testament/caasdriver.nim
Running the ``caasdriver`` without parameters will attempt to process
all the test cases in all three operation modes. If a test succeeds

View File

@@ -535,7 +535,7 @@ Database support
* `odbcsql <odbcsql.html>`_
interface to the ODBC driver.
* `sphinx <sphinx.html>`_
Nimrod wrapper for ``shpinx``.
Nimrod wrapper for ``sphinx``.
XML Processing

View File

@@ -123,7 +123,7 @@ This means that all the control structures are recognized by indentation.
Indentation consists only of spaces; tabulators are not allowed.
The indentation handling is implemented as follows: The lexer annotates the
following token with the preceeding number of spaces; indentation is not
following token with the preceding number of spaces; indentation is not
a separate token. This trick allows parsing of Nimrod with only 1 token of
lookahead.
@@ -617,7 +617,7 @@ Ordinal types
Integers, bool, characters and enumeration types (and subranges of these
types) belong to ordinal types. For reasons of simplicity of implementation
the types ``uint`` and ``uint64`` are no ordinal types.
the types ``uint`` and ``uint64`` are not ordinal types.
Pre-defined integer types
@@ -686,7 +686,7 @@ kinds of integer types are used: the smaller type is converted to the larger.
A `narrowing type conversion`:idx: converts a larger to a smaller type (for
example ``int32 -> int16``. A `widening type conversion`:idx: converts a
smaller type to a larger type (for example ``int16 -> int32``). In Nimrod only
widening type conversion are *implicit*:
widening type conversions are *implicit*:
.. code-block:: nimrod
var myInt16 = 5i16
@@ -1519,7 +1519,7 @@ Most calling conventions exist only for the Windows 32-bit platform.
Assigning/passing a procedure to a procedural variable is only allowed if one
of the following conditions hold:
1) The procedure that is accessed resists in the current module.
1) The procedure that is accessed resides in the current module.
2) The procedure is marked with the ``procvar`` pragma (see `procvar pragma`_).
3) The procedure has a calling convention that differs from ``nimcall``.
4) The procedure is anonymous.
@@ -1527,8 +1527,8 @@ of the following conditions hold:
The rules' purpose is to prevent the case that extending a non-``procvar``
procedure with default parameters breaks client code.
The default calling convention is ``nimcall``, unless it is an inner proc (
a proc inside of a proc). For an inner proc an analysis is performed whether it
The default calling convention is ``nimcall``, unless it is an inner proc (a
proc inside of a proc). For an inner proc an analysis is performed whether it
accesses its environment. If it does so, it has the calling convention
``closure``, otherwise it has the calling convention ``nimcall``.

View File

@@ -167,7 +167,7 @@ might contain some cruft even when dead code elimination is turned on. So
the final release build should be done with ``--symbolFiles:off``.
Due to the aggregation of C code it is also recommended that each project
resists in its own directory so that the generated ``nimcache`` directory
resides in its own directory so that the generated ``nimcache`` directory
is not shared between different projects.

View File

@@ -167,7 +167,7 @@ const
cleanExt = [
".ppu", ".o", ".obj", ".dcu", ".~pas", ".~inc", ".~dsk", ".~dpr",
".map", ".tds", ".err", ".bak", ".pyc", ".exe", ".rod", ".pdb", ".idb",
".idx"
".idx", ".ilk"
]
ignore = [
".bzrignore", "nimrod", "nimrod.exe", "koch", "koch.exe", ".gitignore"

View File

@@ -811,7 +811,7 @@ template createVar(futSymName: string, asyncProc: PNimrodNode,
result.add generateExceptionCheck(futSym, exceptBranch, rootReceiver)
proc processBody(node, retFutureSym: PNimrodNode,
subtypeName: string,
subTypeIsVoid: bool,
exceptBranch: PNimrodNode): PNimrodNode {.compileTime.} =
#echo(node.treeRepr)
result = node
@@ -819,14 +819,14 @@ proc processBody(node, retFutureSym: PNimrodNode,
of nnkReturnStmt:
result = newNimNode(nnkStmtList)
if node[0].kind == nnkEmpty:
if subtypeName != "void":
if not subtypeIsVoid:
result.add newCall(newIdentNode("complete"), retFutureSym,
newIdentNode("result"))
else:
result.add newCall(newIdentNode("complete"), retFutureSym)
else:
result.add newCall(newIdentNode("complete"), retFutureSym,
node[0].processBody(retFutureSym, subtypeName, exceptBranch))
node[0].processBody(retFutureSym, subtypeIsVoid, exceptBranch))
result.add newNimNode(nnkReturnStmt).add(newNilLit())
return # Don't process the children of this return stmt
@@ -869,7 +869,8 @@ proc processBody(node, retFutureSym: PNimrodNode,
else: discard
of nnkDiscardStmt:
# discard await x
if node[0][0].kind == nnkIdent and node[0][0].ident == !"await":
if node[0].kind != nnkEmpty and node[0][0].kind == nnkIdent and
node[0][0].ident == !"await":
var newDiscard = node
createVar("futureDiscard_" & $toStrLit(node[0][1]), node[0][1],
newDiscard[0], newDiscard)
@@ -880,7 +881,7 @@ proc processBody(node, retFutureSym: PNimrodNode,
res: PNimrodNode): bool {.compileTime.} =
result = false
while i < n[0].len:
var processed = processBody(n[0][i], retFutureSym, subtypeName, n[1])
var processed = processBody(n[0][i], retFutureSym, subtypeIsVoid, n[1])
if processed.kind != n[0][i].kind or processed.len != n[0][i].len:
expectKind(processed, nnkStmtList)
expectKind(processed[2][1], nnkElse)
@@ -900,7 +901,7 @@ proc processBody(node, retFutureSym: PNimrodNode,
else: discard
for i in 0 .. <result.len:
result[i] = processBody(result[i], retFutureSym, subtypeName, exceptBranch)
result[i] = processBody(result[i], retFutureSym, subtypeIsVoid, exceptBranch)
proc getName(node: PNimrodNode): string {.compileTime.} =
case node.kind
@@ -920,35 +921,36 @@ macro async*(prc: stmt): stmt {.immediate.} =
hint("Processing " & prc[0].getName & " as an async proc.")
let returnType = prc[3][0]
var subtypeName = ""
# Verify that the return type is a PFuture[T]
if returnType.kind == nnkIdent:
error("Expected return type of 'PFuture' got '" & $returnType & "'")
elif returnType.kind == nnkBracketExpr:
if $returnType[0] != "PFuture":
error("Expected return type of 'PFuture' got '" & $returnType[0] & "'")
subtypeName = $returnType[1].ident
elif returnType.kind == nnkEmpty:
subtypeName = "void"
let subtypeIsVoid = returnType.kind == nnkEmpty
var outerProcBody = newNimNode(nnkStmtList)
# -> var retFuture = newFuture[T]()
var retFutureSym = genSym(nskVar, "retFuture")
var subRetType =
if returnType.kind == nnkEmpty: newIdentNode("void")
else: returnType[1]
outerProcBody.add(
newVarStmt(retFutureSym,
newCall(
newNimNode(nnkBracketExpr).add(
newIdentNode(!"newFuture"), # TODO: Strange bug here? Remove the `!`.
newIdentNode(subtypeName))))) # Get type from return type of this proc
subRetType)))) # Get type from return type of this proc
# -> iterator nameIter(): PFutureBase {.closure.} =
# -> var result: T
# -> <proc_body>
# -> complete(retFuture, result)
var iteratorNameSym = genSym(nskIterator, $prc[0].getName & "Iter")
var procBody = prc[6].processBody(retFutureSym, subtypeName, nil)
if subtypeName != "void":
var procBody = prc[6].processBody(retFutureSym, subtypeIsVoid, nil)
if not subtypeIsVoid:
procBody.insert(0, newNimNode(nnkVarSection).add(
newIdentDefs(newIdentNode("result"), returnType[1]))) # -> var result: T
procBody.add(
@@ -977,7 +979,7 @@ macro async*(prc: stmt): stmt {.immediate.} =
for i in 0 .. <result[4].len:
if result[4][i].ident == !"async":
result[4].del(i)
if subtypeName == "void":
if subtypeIsVoid:
# Add discardable pragma.
result[4].add(newIdentNode("discardable"))
if returnType.kind == nnkEmpty:

View File

@@ -66,6 +66,7 @@ type
TTable* {.final, myShallow.}[A, B] = object ## generic hash table
data: TKeyValuePairSeq[A, B]
counter: int
PTable*[A,B] = ref TTable[A, B]
when not defined(nimhygiene):
{.pragma: dirty.}
@@ -231,7 +232,7 @@ proc `$`*[A, B](t: TTable[A, B]): string =
## The `$` operator for hash tables.
dollarImpl()
proc `==`*[A, B](s, t: TTable[A, B]): bool =
template equalsImpl() =
if s.counter == t.counter:
# different insertion orders mean different 'data' seqs, so we have
# to use the slow route here:
@@ -240,6 +241,9 @@ proc `==`*[A, B](s, t: TTable[A, B]): bool =
if t[key] != val: return false
return true
proc `==`*[A, B](s, t: TTable[A, B]): bool =
equalsImpl()
proc indexBy*[A, B, C](collection: A, index: proc(x: B): C): TTable[C, B] =
## Index the collection with the proc provided.
# TODO: As soon as supported, change collection: A to collection: A[B]
@@ -247,6 +251,88 @@ proc indexBy*[A, B, C](collection: A, index: proc(x: B): C): TTable[C, B] =
for item in collection:
result[index(item)] = item
proc len*[A, B](t: PTable[A, B]): int =
## returns the number of keys in `t`.
result = t.counter
iterator pairs*[A, B](t: PTable[A, B]): tuple[key: A, val: B] =
## iterates over any (key, value) pair in the table `t`.
for h in 0..high(t.data):
if t.data[h].slot == seFilled: yield (t.data[h].key, t.data[h].val)
iterator mpairs*[A, B](t: PTable[A, B]): tuple[key: A, val: var B] =
## iterates over any (key, value) pair in the table `t`. The values
## can be modified.
for h in 0..high(t.data):
if t.data[h].slot == seFilled: yield (t.data[h].key, t.data[h].val)
iterator keys*[A, B](t: PTable[A, B]): A =
## iterates over any key in the table `t`.
for h in 0..high(t.data):
if t.data[h].slot == seFilled: yield t.data[h].key
iterator values*[A, B](t: PTable[A, B]): B =
## iterates over any value in the table `t`.
for h in 0..high(t.data):
if t.data[h].slot == seFilled: yield t.data[h].val
iterator mvalues*[A, B](t: PTable[A, B]): var B =
## iterates over any value in the table `t`. The values can be modified.
for h in 0..high(t.data):
if t.data[h].slot == seFilled: yield t.data[h].val
proc `[]`*[A, B](t: PTable[A, B], key: A): B =
## retrieves the value at ``t[key]``. If `key` is not in `t`,
## default empty value for the type `B` is returned
## and no exception is raised. One can check with ``hasKey`` whether the key
## exists.
result = t[][key]
proc mget*[A, B](t: PTable[A, B], key: A): var B =
## retrieves the value at ``t[key]``. The value can be modified.
## If `key` is not in `t`, the ``EInvalidKey`` exception is raised.
t[].mget(key)
proc hasKey*[A, B](t: PTable[A, B], key: A): bool =
## returns true iff `key` is in the table `t`.
result = t[].hasKey(key)
proc `[]=`*[A, B](t: PTable[A, B], key: A, val: B) =
## puts a (key, value)-pair into `t`.
t[][key] = val
proc add*[A, B](t: PTable[A, B], key: A, val: B) =
## puts a new (key, value)-pair into `t` even if ``t[key]`` already exists.
t[].add(key, val)
proc del*[A, B](t: PTable[A, B], key: A) =
## deletes `key` from hash table `t`.
t[].del(key)
proc newTable*[A, B](initialSize=64): PTable[A, B] =
new(result)
result[] = initTable[A, B](initialSize)
proc newTable*[A, B](pairs: openArray[tuple[key: A,
val: B]]): PTable[A, B] =
## creates a new hash table that contains the given `pairs`.
new(result)
result[] = toTable[A, B](pairs)
proc `$`*[A, B](t: PTable[A, B]): string =
## The `$` operator for hash tables.
dollarImpl()
proc `==`*[A, B](s, t: PTable[A, B]): bool =
equalsImpl()
proc newTableFrom*[A, B, C](collection: A, index: proc(x: B): C): PTable[C, B] =
## Index the collection with the proc provided.
# TODO: As soon as supported, change collection: A to collection: A[B]
result = newTable[C, B]()
for item in collection:
result[index(item)] = item
# ------------------------------ ordered table ------------------------------
type
@@ -257,6 +343,7 @@ type
final, myShallow.}[A, B] = object ## table that remembers insertion order
data: TOrderedKeyValuePairSeq[A, B]
counter, first, last: int
POrderedTable*[A, B] = ref TOrderedTable[A, B]
proc len*[A, B](t: TOrderedTable[A, B]): int {.inline.} =
## returns the number of keys in `t`.
@@ -417,6 +504,96 @@ proc sort*[A, B](t: var TOrderedTable[A, B],
t.first = list
t.last = tail
proc len*[A, B](t: POrderedTable[A, B]): int {.inline.} =
## returns the number of keys in `t`.
result = t.counter
template forAllOrderedPairs(yieldStmt: stmt) {.dirty, immediate.} =
var h = t.first
while h >= 0:
var nxt = t.data[h].next
if t.data[h].slot == seFilled: yieldStmt
h = nxt
iterator pairs*[A, B](t: POrderedTable[A, B]): tuple[key: A, val: B] =
## iterates over any (key, value) pair in the table `t` in insertion
## order.
forAllOrderedPairs:
yield (t.data[h].key, t.data[h].val)
iterator mpairs*[A, B](t: POrderedTable[A, B]): tuple[key: A, val: var B] =
## iterates over any (key, value) pair in the table `t` in insertion
## order. The values can be modified.
forAllOrderedPairs:
yield (t.data[h].key, t.data[h].val)
iterator keys*[A, B](t: POrderedTable[A, B]): A =
## iterates over any key in the table `t` in insertion order.
forAllOrderedPairs:
yield t.data[h].key
iterator values*[A, B](t: POrderedTable[A, B]): B =
## iterates over any value in the table `t` in insertion order.
forAllOrderedPairs:
yield t.data[h].val
iterator mvalues*[A, B](t: POrderedTable[A, B]): var B =
## iterates over any value in the table `t` in insertion order. The values
## can be modified.
forAllOrderedPairs:
yield t.data[h].val
proc `[]`*[A, B](t: POrderedTable[A, B], key: A): B =
## retrieves the value at ``t[key]``. If `key` is not in `t`,
## default empty value for the type `B` is returned
## and no exception is raised. One can check with ``hasKey`` whether the key
## exists.
result = t[][key]
proc mget*[A, B](t: POrderedTable[A, B], key: A): var B =
## retrieves the value at ``t[key]``. The value can be modified.
## If `key` is not in `t`, the ``EInvalidKey`` exception is raised.
result = t[].mget(key)
proc hasKey*[A, B](t: POrderedTable[A, B], key: A): bool =
## returns true iff `key` is in the table `t`.
result = t[].hasKey(key)
proc `[]=`*[A, B](t: POrderedTable[A, B], key: A, val: B) =
## puts a (key, value)-pair into `t`.
t[][key] = val
proc add*[A, B](t: POrderedTable[A, B], key: A, val: B) =
## puts a new (key, value)-pair into `t` even if ``t[key]`` already exists.
t[].add(key, val)
proc newOrderedTable*[A, B](initialSize=64): POrderedTable[A, B] =
## creates a new ordered hash table that is empty.
##
## `initialSize` needs to be a power of two. If you need to accept runtime
## values for this you could use the ``nextPowerOfTwo`` proc from the
## `math <math.html>`_ module.
new(result)
result[] = initOrderedTable[A, B]()
proc newOrderedTable*[A, B](pairs: openArray[tuple[key: A,
val: B]]): POrderedTable[A, B] =
## creates a new ordered hash table that contains the given `pairs`.
result = newOrderedTable[A, B](nextPowerOfTwo(pairs.len+10))
for key, val in items(pairs): result[key] = val
proc `$`*[A, B](t: POrderedTable[A, B]): string =
## The `$` operator for ordered hash tables.
dollarImpl()
proc sort*[A, B](t: POrderedTable[A, B],
cmp: proc (x,y: tuple[key: A, val: B]): int) =
## sorts `t` according to `cmp`. This modifies the internal list
## that kept the insertion order, so insertion order is lost after this
## call but key lookup and insertions remain possible after `sort` (in
## contrast to the `sort` for count tables).
t[].sort(cmp)
# ------------------------------ count tables -------------------------------
type
@@ -424,6 +601,7 @@ type
A] = object ## table that counts the number of each key
data: seq[tuple[key: A, val: int]]
counter: int
PCountTable*[A] = ref TCountTable[A]
proc len*[A](t: TCountTable[A]): int =
## returns the number of keys in `t`.
@@ -567,6 +745,93 @@ proc sort*[A](t: var TCountTable[A]) =
if j < h: break
if h == 1: break
proc len*[A](t: PCountTable[A]): int =
## returns the number of keys in `t`.
result = t.counter
iterator pairs*[A](t: PCountTable[A]): tuple[key: A, val: int] =
## iterates over any (key, value) pair in the table `t`.
for h in 0..high(t.data):
if t.data[h].val != 0: yield (t.data[h].key, t.data[h].val)
iterator mpairs*[A](t: PCountTable[A]): tuple[key: A, val: var int] =
## iterates over any (key, value) pair in the table `t`. The values can
## be modified.
for h in 0..high(t.data):
if t.data[h].val != 0: yield (t.data[h].key, t.data[h].val)
iterator keys*[A](t: PCountTable[A]): A =
## iterates over any key in the table `t`.
for h in 0..high(t.data):
if t.data[h].val != 0: yield t.data[h].key
iterator values*[A](t: PCountTable[A]): int =
## iterates over any value in the table `t`.
for h in 0..high(t.data):
if t.data[h].val != 0: yield t.data[h].val
iterator mvalues*[A](t: PCountTable[A]): var int =
## iterates over any value in the table `t`. The values can be modified.
for h in 0..high(t.data):
if t.data[h].val != 0: yield t.data[h].val
proc `[]`*[A](t: PCountTable[A], key: A): int =
## retrieves the value at ``t[key]``. If `key` is not in `t`,
## 0 is returned. One can check with ``hasKey`` whether the key
## exists.
result = t[][key]
proc mget*[A](t: PCountTable[A], key: A): var int =
## retrieves the value at ``t[key]``. The value can be modified.
## If `key` is not in `t`, the ``EInvalidKey`` exception is raised.
result = t[].mget(key)
proc hasKey*[A](t: PCountTable[A], key: A): bool =
## returns true iff `key` is in the table `t`.
result = t[].hasKey(key)
proc `[]=`*[A](t: PCountTable[A], key: A, val: int) =
## puts a (key, value)-pair into `t`. `val` has to be positive.
assert val > 0
t[][key] = val
proc newCountTable*[A](initialSize=64): PCountTable[A] =
## creates a new count table that is empty.
##
## `initialSize` needs to be a power of two. If you need to accept runtime
## values for this you could use the ``nextPowerOfTwo`` proc from the
## `math <math.html>`_ module.
new(result)
result[] = initCountTable[A](initialSize)
proc newCountTable*[A](keys: openArray[A]): PCountTable[A] =
## creates a new count table with every key in `keys` having a count of 1.
result = newCountTable[A](nextPowerOfTwo(keys.len+10))
for key in items(keys): result[key] = 1
proc `$`*[A](t: PCountTable[A]): string =
## The `$` operator for count tables.
dollarImpl()
proc inc*[A](t: PCountTable[A], key: A, val = 1) =
## increments `t[key]` by `val`.
t[].inc(key, val)
proc smallest*[A](t: PCountTable[A]): tuple[key: A, val: int] =
## returns the largest (key,val)-pair. Efficiency: O(n)
t[].smallest
proc largest*[A](t: PCountTable[A]): tuple[key: A, val: int] =
## returns the (key,val)-pair with the largest `val`. Efficiency: O(n)
t[].largest
proc sort*[A](t: PCountTable[A]) =
## sorts the count table so that the entry with the highest counter comes
## first. This is destructive! You must not modify `t` afterwards!
## You can use the iterators `pairs`, `keys`, and `values` to iterate over
## `t` in the sorted order.
t[].sort
when isMainModule:
type
Person = object

View File

@@ -2620,7 +2620,7 @@ proc `[]=`*[Idx, T](a: var array[Idx, T], x: TSlice[int], b: openArray[T]) =
if L == b.len:
for i in 0 .. <L: a[i+x.a] = b[i]
else:
sysFatal(EOutOfRange, "differing lengths for slice assignment")
sysFatal(EOutOfRange, "different lengths for slice assignment")
proc `[]`*[Idx, T](a: array[Idx, T], x: TSlice[Idx]): seq[T] =
## slice operation for arrays. Negative indexes are **not** supported
@@ -2642,7 +2642,7 @@ proc `[]=`*[Idx, T](a: var array[Idx, T], x: TSlice[Idx], b: openArray[T]) =
a[j] = b[i]
inc(j)
else:
sysFatal(EOutOfRange, "differing lengths for slice assignment")
sysFatal(EOutOfRange, "different lengths for slice assignment")
proc `[]`*[T](s: seq[T], x: TSlice[int]): seq[T] =
## slice operation for sequences. Negative indexes are supported.
@@ -2719,9 +2719,6 @@ proc `/=`*[T: float|float32|float64] (x: var T, y: T) {.inline, noSideEffect.} =
proc `&=`* (x: var string, y: string) {.magic: "AppendStrStr", noSideEffect.}
proc rand*(max: int): int {.magic: "Rand", sideEffect.}
## compile-time `random` function. Useful for debugging.
proc astToStr*[T](x: T): string {.magic: "AstToStr", noSideEffect.}
## converts the AST of `x` into a string representation. This is very useful
## for debugging.

View File

@@ -313,6 +313,7 @@ proc mark(gch: var TGcHeap, c: PCell) =
if not containsOrIncl(gch.marked, d):
forAllChildren(d, waMarkPrecise)
else:
# XXX no 'if c.refCount != rcBlack' here?
c.refCount = rcBlack
gcAssert gch.tempStack.len == 0, "stack not empty!"
forAllChildren(c, waMarkPrecise)

View File

@@ -640,8 +640,8 @@ proc unmapViewOfFile*(lpBaseAddress: pointer): WINBOOL {.stdcall,
type
TOVERLAPPED* {.pure, inheritable.} = object
Internal*: DWORD
InternalHigh*: DWORD
Internal*: PULONG
InternalHigh*: PULONG
Offset*: DWORD
OffsetHigh*: DWORD
hEvent*: THANDLE
@@ -718,4 +718,12 @@ proc WSASend*(s: TSocketHandle, buf: ptr TWSABuf, bufCount: DWORD,
stdcall, importc: "WSASend", dynlib: "Ws2_32.dll".}
proc get_osfhandle*(fd:TFileHandle): THandle {.
importc:"_get_osfhandle", header:"<io.h>".}
importc: "_get_osfhandle", header:"<io.h>".}
proc getSystemTimes*(lpIdleTime, lpKernelTime,
lpUserTime: var TFILETIME): WINBOOL {.stdcall,
dynlib: "kernel32", importc: "GetSystemTimes".}
proc getProcessTimes*(hProcess: THandle; lpCreationTime, lpExitTime,
lpKernelTime, lpUserTime: var TFILETIME): WINBOOL {.stdcall,
dynlib: "kernel32", importc: "GetProcessTimes".}

View File

@@ -0,0 +1,39 @@
discard """
output: '''
1
2
3
4
1
2
1
6
'''
"""
import asyncio, asyncdispatch, asyncnet
proc main {.async.} =
proc f: PFuture[int] {.async.} =
discard
echo 1
discard
result = 2
discard
let x = await f()
echo x
echo 3
proc g: PFuture[int] {.async.} =
discard
echo 4
discard
result = 6
discard
echo await f()
discard await f()
discard await g()
echo 6
main()

View File

@@ -0,0 +1,8 @@
import asyncdispatch, asyncnet
proc main {.async.} =
proc f: PFuture[seq[int]] {.async.} =
await newAsyncSocket().connect("www.google.com", TPort(80))
let x = await f()
main()

View File

@@ -0,0 +1,35 @@
# bug #1140
import parseutils, macros
proc parse_until_symbol(node: PNimrodNode, value: string, index: var int): bool {.compiletime.} =
var splitValue: string
var read = value.parseUntil(splitValue, '$', index)
# when false:
if false:
var identifier: string
read = value.parseWhile(identifier, {}, index)
node.add newCall("add", ident("result"), newCall("$", ident(identifier)))
if splitValue.len > 0:
node.insert node.len, newCall("add", ident("result"), newStrLitNode(splitValue))
proc parse_template(node: PNimrodNode, value: string) {.compiletime.} =
var index = 0
while index < value.len and
parse_until_symbol(node, value, index): discard
macro tmpli*(body: expr): stmt =
result = newStmtList()
result.add parseExpr("result = \"\"")
result.parse_template body[1].strVal
proc actual: string = tmpli html"""
<p>Test!</p>
"""
proc another: string = tmpli html"""
<p>what</p>
"""

128
tests/table/ptables.nim Normal file
View File

@@ -0,0 +1,128 @@
discard """
output: '''true'''
"""
import hashes, tables
const
data = {
"34": 123456, "12": 789,
"90": 343, "0": 34404,
"1": 344004, "2": 344774,
"3": 342244, "4": 3412344,
"5": 341232144, "6": 34214544,
"7": 3434544, "8": 344544,
"9": 34435644, "---00": 346677844,
"10": 34484, "11": 34474, "19": 34464,
"20": 34454, "30": 34141244, "40": 344114,
"50": 344490, "60": 344491, "70": 344492,
"80": 344497}
sorteddata = {
"---00": 346677844,
"0": 34404,
"1": 344004,
"10": 34484,
"11": 34474,
"12": 789,
"19": 34464,
"2": 344774, "20": 34454,
"3": 342244, "30": 34141244,
"34": 123456,
"4": 3412344, "40": 344114,
"5": 341232144, "50": 344490,
"6": 34214544, "60": 344491,
"7": 3434544, "70": 344492,
"8": 344544, "80": 344497,
"9": 34435644,
"90": 343}
block tableTest1:
var t = newTable[tuple[x, y: int], string]()
t[(0,0)] = "00"
t[(1,0)] = "10"
t[(0,1)] = "01"
t[(1,1)] = "11"
for x in 0..1:
for y in 0..1:
assert t[(x,y)] == $x & $y
assert($t ==
"{(x: 0, y: 0): 00, (x: 0, y: 1): 01, (x: 1, y: 0): 10, (x: 1, y: 1): 11}")
block tableTest2:
var t = newTable[string, float]()
t["test"] = 1.2345
t["111"] = 1.000043
t["123"] = 1.23
t.del("111")
t["012"] = 67.9
t["123"] = 1.5 # test overwriting
assert t["123"] == 1.5
assert t["111"] == 0.0 # deleted
assert(not hasKey(t, "111"))
for key, val in items(data): t[key] = val.toFloat
for key, val in items(data): assert t[key] == val.toFloat
block orderedTableTest1:
var t = newOrderedTable[string, int](2)
for key, val in items(data): t[key] = val
for key, val in items(data): assert t[key] == val
var i = 0
# `pairs` needs to yield in insertion order:
for key, val in pairs(t):
assert key == data[i][0]
assert val == data[i][1]
inc(i)
for key, val in mpairs(t): val = 99
for val in mvalues(t): assert val == 99
block countTableTest1:
var s = data.toTable
var t = newCountTable[string]()
for k in s.Keys: t.inc(k)
for k in t.keys: assert t[k] == 1
t.inc("90", 3)
t.inc("12", 2)
t.inc("34", 1)
assert t.largest()[0] == "90"
t.sort()
var i = 0
for k, v in t.pairs:
case i
of 0: assert k == "90" and v == 4
of 1: assert k == "12" and v == 3
of 2: assert k == "34" and v == 2
else: break
inc i
block SyntaxTest:
var x = newTable[int, string]({:})
proc orderedTableSortTest() =
var t = newOrderedTable[string, int](2)
for key, val in items(data): t[key] = val
for key, val in items(data): assert t[key] == val
t.sort(proc (x, y: tuple[key: string, val: int]): int = cmp(x.key, y.key))
var i = 0
# `pairs` needs to yield in sorted order:
for key, val in pairs(t):
doAssert key == sorteddata[i][0]
doAssert val == sorteddata[i][1]
inc(i)
# check that lookup still works:
for key, val in pairs(t):
doAssert val == t[key]
# check that insert still works:
t["newKeyHere"] = 80
orderedTableSortTest()
echo "true"

20
tests/table/ptables2.nim Normal file
View File

@@ -0,0 +1,20 @@
discard """
output: '''true'''
"""
import tables
proc TestHashIntInt() =
var tab = newTable[int,int]()
for i in 1..1_000_000:
tab[i] = i
for i in 1..1_000_000:
var x = tab[i]
if x != i : echo "not found ", i
proc run1() = # occupied Memory stays constant, but
for i in 1 .. 50: # aborts at run: 44 on win32 with 3.2GB with out of memory
TestHashIntInt()
run1()
echo "true"

View File

@@ -25,7 +25,7 @@ const
silentReplaceText = "--verbosity:0 --hints:off"
var
TesterDir = getAppDir()
TesterDir = getAppDir() / ".."
NimrodBin = TesterDir / "../bin/nimrod"
proc replaceVars(session: var TNimrodSession, text: string): string =