Merge remote-tracking branch 'upstream/devel' into devel

This commit is contained in:
deansher
2019-01-28 08:12:24 -05:00
106 changed files with 3991 additions and 4893 deletions

1
.gitignore vendored
View File

@@ -52,6 +52,7 @@ xcuserdata/
/run.json
# for `nim doc foo.nim`
/*.html
lib/**/*.html
#/testresults.html #covered by /*.html
/testresults.json

View File

@@ -120,6 +120,8 @@ proc enumToString*(enums: openArray[enum]): string =
- Added `os.getCurrentCompilerExe` (implmented as `getAppFilename` at CT),
can be used to retrieve the currently executing compiler.
- Added `xmltree.toXmlAttributes`.
### Library changes

View File

@@ -321,7 +321,7 @@ proc resetLoc(p: BProc, loc: var TLoc) =
else:
if optNilCheck in p.options:
linefmt(p, cpsStmts, "#chckNil((void*)$1);$n", addrLoc(p.config, loc))
if loc.storage != OnStack:
if loc.storage != OnStack and containsGcRef:
linefmt(p, cpsStmts, "#genericReset((void*)$1, $2);$n",
addrLoc(p.config, loc), genTypeInfo(p.module, loc.t, loc.lode.info))
# XXX: generated reset procs should not touch the m_type
@@ -1056,9 +1056,22 @@ proc genVarPrototype(m: BModule, n: PNode) =
if sfVolatile in sym.flags: add(m.s[cfsVars], " volatile")
addf(m.s[cfsVars], " $1;$n", [sym.loc.r])
const
frameDefines = """
$1 define nimfr_(proc, file) \
TFrame FR_; \
FR_.procname = proc; FR_.filename = file; FR_.line = 0; FR_.len = 0; #nimFrame(&FR_);
$1 define nimfrs_(proc, file, slots, length) \
struct {TFrame* prev;NCSTRING procname;NI line;NCSTRING filename; NI len; VarSlot s[slots];} FR_; \
FR_.procname = proc; FR_.filename = file; FR_.line = 0; FR_.len = length; #nimFrame((TFrame*)&FR_);
$1 define nimln_(n, file) \
FR_.line = n; FR_.filename = file;
"""
proc addIntTypes(result: var Rope; conf: ConfigRef) {.inline.} =
addf(result, "#define NIM_NEW_MANGLING_RULES\L" &
"#define NIM_INTBITS $1\L", [
addf(result, "#define NIM_INTBITS $1\L", [
platform.CPU[conf.target.targetCPU].intSize.rope])
if conf.cppCustomNamespace.len > 0:
result.add("#define USE_NIM_NAMESPACE ")
@@ -1302,6 +1315,7 @@ proc genInitCode(m: BModule) =
## this function is called in cgenWriteModules after all modules are closed,
## it means raising dependency on the symbols is too late as it will not propogate
## into other modules, only simple rope manipulations are allowed
appcg(m, m.s[cfsForwardTypes], frameDefines, [rope("#")])
var moduleInitRequired = false
let initname = getInitName(m.module)
@@ -1313,9 +1327,9 @@ proc genInitCode(m: BModule) =
appcg(m, m.s[cfsTypeInit1], "static #TNimType $1[$2];$n",
[m.nimTypesName, rope(m.nimTypes)])
# Give this small function its own scope
addf(prc, "{$N", [])
block:
if m.preInitProc.s(cpsInit).len > 0 or m.preInitProc.s(cpsStmts).len > 0:
# Give this small function its own scope
addf(prc, "{$N", [])
# Keep a bogus frame in case the code needs one
add(prc, ~"\tTFrame FR_; FR_.len = 0;$N")
@@ -1336,8 +1350,11 @@ proc genInitCode(m: BModule) =
add(prc, genSectionStart(cpsStmts, m.config))
add(prc, m.preInitProc.s(cpsStmts))
add(prc, genSectionEnd(cpsStmts, m.config))
addf(prc, "}$N", [])
addf(prc, "}$N", [])
# add new scope for following code, because old vcc compiler need variable
# be defined at the top of the block
addf(prc, "{$N", [])
if m.initProc.gcFrameId > 0:
moduleInitRequired = true
add(prc, initGCFrame(m.initProc))
@@ -1374,6 +1391,7 @@ proc genInitCode(m: BModule) =
if m.initProc.gcFrameId > 0:
moduleInitRequired = true
add(prc, deinitGCFrame(m.initProc))
addf(prc, "}$N", [])
addf(prc, "}$N$N", [])

View File

@@ -746,6 +746,8 @@ proc processSwitch*(switch, arg: string, pass: TCmdLinePass, info: TLineInfo;
else:
conf.cppCustomNamespace = "Nim"
defineSymbol(conf.symbols, "cppCompileToNamespace", conf.cppCustomNamespace)
of "docinternal":
processOnOffSwitchG(conf, {optDocInternal}, arg, pass, info)
else:
if strutils.find(switch, '.') >= 0: options.setConfigVar(conf, switch, arg)
else: invalidCmdLineOption(conf, pass, switch, info)

View File

@@ -132,61 +132,36 @@ type
emptyNode: PNode
otherRead: PNode
proc isHarmlessVar*(s: PSym; c: Con): bool =
# 's' is harmless if it used only once and its
# definition/usage are not split by any labels:
#
# let s = foo()
# while true:
# a[i] = s
#
# produces:
#
# def s
# L1:
# use s
# goto L1
#
# let s = foo()
# if cond:
# a[i] = s
# else:
# a[j] = s
#
# produces:
#
# def s
# fork L2
# use s
# goto L3
# L2:
# use s
# L3
#
# So this analysis is for now overly conservative, but correct.
var defsite = -1
var usages = 0
for i in 0..<c.g.len:
case c.g[i].kind
proc isLastRead(s: PSym; c: var Con; pc, comesFrom: int): int =
var pc = pc
while pc < c.g.len:
case c.g[pc].kind
of def:
if c.g[i].sym == s:
if defsite < 0: defsite = i
else: return false
if c.g[pc].sym == s:
# the path lead to a redefinition of 's' --> abandon it.
return high(int)
inc pc
of use:
if c.g[i].sym == s:
if defsite < 0: return false
for j in defsite .. i:
# not within the same basic block?
if j in c.jumpTargets: return false
# if we want to die after the first 'use':
if usages > 1: return false
inc usages
#of useWithinCall:
# if c.g[i].sym == s: return false
of goto, fork:
discard "we do not perform an abstract interpretation yet"
result = usages <= 1
if c.g[pc].sym == s:
c.otherRead = c.g[pc].n
return -1
inc pc
of goto:
pc = pc + c.g[pc].dest
of fork:
# every branch must lead to the last read of the location:
var variantA = isLastRead(s, c, pc+1, pc)
if variantA < 0: return -1
let variantB = isLastRead(s, c, pc + c.g[pc].dest, pc)
if variantB < 0: return -1
elif variantA == high(int):
variantA = variantB
pc = variantA
of InstrKind.join:
let dest = pc + c.g[pc].dest
if dest == comesFrom: return pc + 1
inc pc
return pc
proc isLastRead(n: PNode; c: var Con): bool =
# first we need to search for the instruction that belongs to 'n':
@@ -195,59 +170,52 @@ proc isLastRead(n: PNode; c: var Con): bool =
var instr = -1
for i in 0..<c.g.len:
if c.g[i].n == n:
if instr < 0: instr = i
else:
# eh, we found two positions that belong to 'n'?
# better return 'false' then:
return false
if instr < 0:
instr = i
break
if instr < 0: return false
# we go through all paths beginning from 'instr+1' and need to
# ensure that we don't find another 'use X' instruction.
if instr+1 >= c.g.len: return true
let s = n.sym
var pcs: seq[int] = @[instr+1]
var takenGotos: IntSet
var takenForks = initIntSet()
while pcs.len > 0:
var pc = pcs.pop
when true:
result = isLastRead(n.sym, c, instr+1, -1) >= 0
else:
let s = n.sym
var pcs: seq[int] = @[instr+1]
var takenGotos: IntSet
var takenForks = initIntSet()
while pcs.len > 0:
var pc = pcs.pop
takenGotos = initIntSet()
while pc < c.g.len:
case c.g[pc].kind
of def:
if c.g[pc].sym == s:
# the path lead to a redefinition of 's' --> abandon it.
when false:
# Too complex thinking ahead: In reality it is enough to find
# the 'def x' here on the current path to make the 'use x' valid.
# but for this the definition needs to dominate the usage:
var dominates = true
for j in pc+1 .. instr:
# not within the same basic block?
if c.g[j].kind in {goto, fork} and (j + c.g[j].dest) in (pc+1 .. instr):
#if j in c.jumpTargets:
dominates = false
if dominates: break
break
inc pc
of use:
if c.g[pc].sym == s:
c.otherRead = c.g[pc].n
return false
inc pc
of goto:
# we must leave endless loops eventually:
if not takenGotos.containsOrIncl(pc):
pc = pc + c.g[pc].dest
else:
takenGotos = initIntSet()
while pc < c.g.len:
case c.g[pc].kind
of def:
if c.g[pc].sym == s:
# the path lead to a redefinition of 's' --> abandon it.
break
inc pc
of fork:
# we follow the next instruction but push the dest onto our "work" stack:
if not takenForks.containsOrIncl(pc):
pcs.add pc + c.g[pc].dest
inc pc
#echo c.graph.config $ n.info, " last read here!"
return true
of use:
if c.g[pc].sym == s:
c.otherRead = c.g[pc].n
return false
inc pc
of goto:
# we must leave endless loops eventually:
if not takenGotos.containsOrIncl(pc):
pc = pc + c.g[pc].dest
else:
inc pc
of fork:
# we follow the next instruction but push the dest onto our "work" stack:
if not takenForks.containsOrIncl(pc):
pcs.add pc + c.g[pc].dest
inc pc
of InstrKind.join:
inc pc
#echo c.graph.config $ n.info, " last read here!"
return true
template interestingSym(s: PSym): bool =
s.owner == c.owner and s.kind in InterestingSyms and hasDestructor(s.typ)

View File

@@ -34,12 +34,12 @@ import ast, astalgo, types, intsets, tables, msgs, options, lineinfos
type
InstrKind* = enum
goto, fork, def, use
goto, fork, join, def, use
Instr* = object
n*: PNode
case kind*: InstrKind
of def, use: sym*: PSym
of goto, fork: dest*: int
of goto, fork, join: dest*: int
ControlFlowGraph* = seq[Instr]
@@ -56,6 +56,7 @@ type
inCall, inTryStmt: int
blocks: seq[TBlock]
tryStmtFixups: seq[TPosition]
forks: seq[TPosition]
owner: PSym
proc debugInfo(info: TLineInfo): string =
@@ -67,18 +68,18 @@ proc codeListing(c: ControlFlowGraph, result: var string, start=0; last = -1) =
var jumpTargets = initIntSet()
let last = if last < 0: c.len-1 else: min(last, c.len-1)
for i in start..last:
if c[i].kind in {goto, fork}:
if c[i].kind in {goto, fork, join}:
jumpTargets.incl(i+c[i].dest)
var i = start
while i <= last:
if i in jumpTargets: result.add("L" & $i & ":\n")
result.add "\t"
result.add $c[i].kind
result.add ($i & " " & $c[i].kind)
result.add "\t"
case c[i].kind
of def, use:
result.add c[i].sym.name.s
of goto, fork:
of goto, fork, join:
result.add "L"
result.add c[i].dest+i
result.add("\t#")
@@ -98,11 +99,166 @@ proc echoCfg*(c: ControlFlowGraph; start=0; last = -1) {.deprecated.} =
proc forkI(c: var Con; n: PNode): TPosition =
result = TPosition(c.code.len)
c.code.add Instr(n: n, kind: fork, dest: 0)
c.forks.add result
proc gotoI(c: var Con; n: PNode): TPosition =
result = TPosition(c.code.len)
c.code.add Instr(n: n, kind: goto, dest: 0)
#[
Design of join
==============
block:
if cond: break
def(x)
use(x)
Generates:
L0: fork L1
join L0 # patched.
goto Louter
L1:
def x
join L0
Louter:
use x
block outer:
while a:
while b:
if foo:
if bar:
break outer # --> we need to 'join' every pushed 'fork' here
This works and then our abstract interpretation needs to deal with 'fork'
differently. It really causes a split in execution. Two threads are
"spawned" and both need to reach the 'join L' instruction. Afterwards
the abstract interpretations are joined and execution resumes single
threaded.
Abstract Interpretation
-----------------------
proc interpret(pc, state, comesFrom): state =
result = state
# we need an explicit 'create' instruction (an explicit heap), in order
# to deal with 'var x = create(); var y = x; var z = y; destroy(z)'
while true:
case pc
of fork:
let a = interpret(pc+1, result, pc)
let b = interpret(forkTarget, result, pc)
result = a ++ b # ++ is a union operation
inc pc
of join:
if joinTarget == comesFrom: return result
else: inc pc
of use X:
if not result.contains(x):
error "variable not initialized " & x
inc pc
of def X:
if not result.contains(x):
result.incl X
else:
error "overwrite of variable causes memory leak " & x
inc pc
of destroy X:
result.excl X
This is correct but still can lead to false positives:
proc p(cond: bool) =
if cond:
new(x)
otherThings()
if cond:
destroy x
Is not a leak. We should find a way to model *data* flow, not just
control flow. One solution is to rewrite the 'if' without a fork
instruction. The unstructured aspect can now be easily dealt with
the 'goto' and 'join' instructions.
proc p(cond: bool) =
L0: fork Lend
new(x)
# do not 'join' here!
Lend:
otherThings()
join L0 # SKIP THIS FOR new(x) SOMEHOW
destroy x
join L0 # but here.
But if we follow 'goto Louter' we will never come to the join point.
We restore the bindings after popping pc from the stack then there
"no" problem?!
while cond:
prelude()
if not condB: break
postlude()
--->
var setFlag = true
while cond and not setFlag:
prelude()
if not condB:
setFlag = true # BUT: Dependency
if not setFlag: # HERE
postlude()
--->
var setFlag = true
while cond and not setFlag:
prelude()
if not condB:
postlude()
setFlag = true
-------------------------------------------------
while cond:
prelude()
if more:
if not condB: break
stuffHere()
postlude()
-->
var setFlag = true
while cond and not setFlag:
prelude()
if more:
if not condB:
setFlag = false
else:
stuffHere()
postlude()
else:
postlude()
This is getting complicated. Instead we keep the whole 'join' idea but
duplicate the 'join' instructions on breaks and return exits!
]#
proc joinI(c: var Con; fromFork: TPosition; n: PNode) =
let dist = fromFork.int - c.code.len
c.code.add Instr(n: n, kind: join, dest: dist)
proc genLabel(c: Con): TPosition =
result = TPosition(c.code.len)
@@ -135,30 +291,97 @@ proc isTrue(n: PNode): bool =
proc gen(c: var Con; n: PNode) # {.noSideEffect.}
proc genWhile(c: var Con; n: PNode) =
# L1:
# cond, tmp
# fork tmp, L2
# body
# jmp L1
# L2:
let L1 = c.genLabel
withBlock(nil):
when true:
proc genWhile(c: var Con; n: PNode) =
# We unroll every loop 3 times. We emulate 0, 1, 2 iterations
# through the loop. We need to prove this is correct for our
# purposes. But Herb Sutter claims it is. (Proof by authority.)
#[
while cond:
body
Becomes:
if cond:
body
if cond:
body
if cond:
body
We still need to ensure 'break' resolves properly, so an AST to AST
translation is impossible.
So the code to generate is:
cond
fork L4 # F1
body
cond
fork L5 # F2
body
cond
fork L6 # F3
body
L6:
join F3
L5:
join F2
L4:
join F1
]#
if isTrue(n.sons[0]):
c.gen(n.sons[1])
c.jmpBack(n, L1)
# 'while true' is an idiom in Nim and so we produce
# better code for it:
for i in 0..2:
withBlock(nil):
c.gen(n.sons[1])
else:
c.gen(n.sons[0])
let L2 = c.forkI(n)
c.gen(n.sons[1])
c.jmpBack(n, L1)
c.patch(L2)
let oldForksLen = c.forks.len
var endings: array[3, TPosition]
for i in 0..2:
withBlock(nil):
c.gen(n.sons[0])
endings[i] = c.forkI(n)
c.gen(n.sons[1])
for i in countdown(endings.high, 0):
let endPos = endings[i]
c.patch(endPos)
c.joinI(c.forks.pop(), n)
doAssert(c.forks.len == oldForksLen)
else:
proc genWhile(c: var Con; n: PNode) =
# L1:
# cond, tmp
# fork tmp, L2
# body
# jmp L1
# L2:
let oldForksLen = c.forks.len
let L1 = c.genLabel
withBlock(nil):
if isTrue(n.sons[0]):
c.gen(n.sons[1])
c.jmpBack(n, L1)
else:
c.gen(n.sons[0])
let L2 = c.forkI(n)
c.gen(n.sons[1])
c.jmpBack(n, L1)
c.patch(L2)
setLen(c.forks, oldForksLen)
proc genBlock(c: var Con; n: PNode) =
withBlock(n.sons[0].sym):
c.gen(n.sons[1])
proc genJoins(c: var Con; n: PNode) =
for i in countdown(c.forks.high, 0): joinI(c, c.forks[i], n)
proc genBreak(c: var Con; n: PNode) =
genJoins(c, n)
let L1 = c.gotoI(n)
if n.sons[0].kind == nkSym:
#echo cast[int](n.sons[0].sym)
@@ -170,28 +393,76 @@ proc genBreak(c: var Con; n: PNode) =
else:
c.blocks[c.blocks.high].fixups.add L1
template forkT(n, body) =
let oldLen = c.forks.len
let L1 = c.forkI(n)
body
c.patch(L1)
c.joinI(L1, n)
setLen(c.forks, oldLen)
proc genIf(c: var Con, n: PNode) =
#[
if cond:
A
elif condB:
B
elif condC:
C
else:
D
cond
fork L1
A
goto Lend
L1:
condB
fork L2
B
goto Lend2
L2:
condC
fork L3
C
goto Lend3
L3:
D
goto Lend3 # not eliminated to simplify the join generation
Lend3:
join F3
Lend2:
join F2
Lend:
join F1
]#
let oldLen = c.forks.len
var endings: seq[TPosition] = @[]
for i in countup(0, len(n) - 1):
var it = n.sons[i]
c.gen(it.sons[0])
if it.len == 2:
let elsePos = c.forkI(it.sons[1])
let elsePos = forkI(c, it[1])
c.gen(it.sons[1])
if i < sonsLen(n)-1:
endings.add(c.gotoI(it.sons[1]))
endings.add(c.gotoI(it.sons[1]))
c.patch(elsePos)
for endPos in endings: c.patch(endPos)
for i in countdown(endings.high, 0):
let endPos = endings[i]
c.patch(endPos)
c.joinI(c.forks.pop(), n)
doAssert(c.forks.len == oldLen)
proc genAndOr(c: var Con; n: PNode) =
# asgn dest, a
# fork L1
# asgn dest, b
# L1:
# join F1
c.gen(n.sons[1])
let L1 = c.forkI(n)
c.gen(n.sons[2])
c.patch(L1)
forkT(n):
c.gen(n.sons[2])
proc genCase(c: var Con; n: PNode) =
# if (!expr1) goto L1;
@@ -204,72 +475,94 @@ proc genCase(c: var Con; n: PNode) =
# L2:
# elsePart
# Lend:
when false:
# XXX Exhaustiveness is not yet mapped to the control flow graph as
# it seems to offer no benefits for the 'last read of' question.
let isExhaustive = skipTypes(n.sons[0].typ,
abstractVarRange-{tyTypeDesc}).kind in {tyFloat..tyFloat128, tyString} or
lastSon(n).kind == nkElse
let isExhaustive = skipTypes(n.sons[0].typ,
abstractVarRange-{tyTypeDesc}).kind notin {tyFloat..tyFloat128, tyString}
var endings: seq[TPosition] = @[]
let oldLen = c.forks.len
c.gen(n.sons[0])
for i in 1 ..< n.len:
let it = n.sons[i]
if it.len == 1:
c.gen(it.sons[0])
elif i == n.len-1 and isExhaustive:
# treat the last branch as 'else' if this is an exhaustive case statement.
c.gen(it.lastSon)
else:
let elsePos = c.forkI(it.lastSon)
c.gen(it.lastSon)
if i < sonsLen(n)-1:
endings.add(c.gotoI(it.lastSon))
endings.add(c.gotoI(it.lastSon))
c.patch(elsePos)
for endPos in endings: c.patch(endPos)
for i in countdown(endings.high, 0):
let endPos = endings[i]
c.patch(endPos)
c.joinI(c.forks.pop(), n)
doAssert(c.forks.len == oldLen)
proc genTry(c: var Con; n: PNode) =
let oldLen = c.forks.len
var endings: seq[TPosition] = @[]
inc c.inTryStmt
var newFixups: seq[TPosition]
swap(newFixups, c.tryStmtFixups)
let oldFixups = c.tryStmtFixups.len
let elsePos = c.forkI(n)
#let elsePos = c.forkI(n)
c.gen(n.sons[0])
dec c.inTryStmt
for f in newFixups:
for i in oldFixups..c.tryStmtFixups.high:
let f = c.tryStmtFixups[i]
c.patch(f)
swap(newFixups, c.tryStmtFixups)
# we also need to produce join instructions
# for the 'fork' that might preceed the goto instruction
if f.int-1 >= 0 and c.code[f.int-1].kind == fork:
c.joinI(TPosition(f.int-1), n)
c.patch(elsePos)
setLen(c.tryStmtFixups, oldFixups)
#c.patch(elsePos)
for i in 1 ..< n.len:
let it = n.sons[i]
if it.kind != nkFinally:
var blen = len(it)
let endExcept = c.forkI(it)
c.gen(it.lastSon)
if i < sonsLen(n)-1:
endings.add(c.gotoI(it))
endings.add(c.gotoI(it))
c.patch(endExcept)
for endPos in endings: c.patch(endPos)
for i in countdown(endings.high, 0):
let endPos = endings[i]
c.patch(endPos)
c.joinI(c.forks.pop(), n)
# join the 'elsePos' forkI instruction:
#c.joinI(c.forks.pop(), n)
let fin = lastSon(n)
if fin.kind == nkFinally:
c.gen(fin.sons[0])
doAssert(c.forks.len == oldLen)
template genNoReturn(c: var Con; n: PNode) =
# leave the graph
c.code.add Instr(n: n, kind: goto, dest: high(int) - c.code.len)
proc genRaise(c: var Con; n: PNode) =
genJoins(c, n)
gen(c, n.sons[0])
if c.inTryStmt > 0:
c.tryStmtFixups.add c.gotoI(n)
else:
c.code.add Instr(n: n, kind: goto, dest: high(int) - c.code.len)
genNoReturn(c, n)
proc genImplicitReturn(c: var Con) =
if c.owner.kind in {skProc, skFunc, skMethod, skIterator, skConverter} and resultPos < c.owner.ast.len:
gen(c, c.owner.ast.sons[resultPos])
proc genReturn(c: var Con; n: PNode) =
genJoins(c, n)
if n.sons[0].kind != nkEmpty:
gen(c, n.sons[0])
else:
genImplicitReturn(c)
c.code.add Instr(n: n, kind: goto, dest: high(int) - c.code.len)
genNoReturn(c, n)
const
InterestingSyms = {skVar, skResult, skLet, skParam}
@@ -287,6 +580,14 @@ proc genDef(c: var Con; n: PNode) =
if n.kind == nkSym and n.sym.kind in InterestingSyms:
c.code.add Instr(n: n, kind: def, sym: n.sym)
proc canRaise(fn: PNode): bool =
const magicsThatCanRaise = {
mNone, mSlurp, mStaticExec, mParseExprToAst, mParseStmtToAst}
if fn.kind == nkSym and fn.sym.magic notin magicsThatCanRaise:
result = false
else:
result = true
proc genCall(c: var Con; n: PNode) =
gen(c, n[0])
var t = n[0].typ
@@ -297,8 +598,16 @@ proc genCall(c: var Con; n: PNode) =
if t != nil and i < t.len and t.sons[i].kind == tyVar:
genDef(c, n[i])
# every call can potentially raise:
if c.inTryStmt > 0:
c.tryStmtFixups.add c.forkI(n)
if c.inTryStmt > 0 and canRaise(n[0]):
# we generate the instruction sequence:
# fork L1
# goto exceptionHandler (except or finally)
# L1:
# join F1
let endGoto = c.forkI(n)
c.tryStmtFixups.add c.gotoI(n)
c.patch(endGoto)
c.joinI(c.forks.pop(), n)
dec c.inCall
proc genMagic(c: var Con; n: PNode; m: TMagic) =
@@ -307,9 +616,6 @@ proc genMagic(c: var Con; n: PNode; m: TMagic) =
of mNew, mNewFinalize:
genDef(c, n[1])
for i in 2..<n.len: gen(c, n[i])
of mExit:
genCall(c, n)
c.code.add Instr(n: n, kind: goto, dest: high(int) - c.code.len)
else:
genCall(c, n)
@@ -334,6 +640,8 @@ proc gen(c: var Con; n: PNode) =
genMagic(c, n, s.magic)
else:
genCall(c, n)
if sfNoReturn in n.sons[0].sym.flags:
genNoReturn(c, n)
else:
genCall(c, n)
of nkCharLit..nkNilLit: discard
@@ -368,114 +676,48 @@ proc gen(c: var Con; n: PNode) =
doAssert false, "dfa construction pass requires the elimination of 'defer'"
else: discard
proc dfa(code: seq[Instr]; conf: ConfigRef) =
var u = newSeq[IntSet](code.len) # usages
var d = newSeq[IntSet](code.len) # defs
var c = newSeq[IntSet](code.len) # consumed
var backrefs = initTable[int, int]()
for i in 0..<code.len:
u[i] = initIntSet()
d[i] = initIntSet()
c[i] = initIntSet()
case code[i].kind
of use: u[i].incl(code[i].sym.id)
of def: d[i].incl(code[i].sym.id)
of fork, goto:
let d = i+code[i].dest
backrefs.add(d, i)
var w = @[0]
var maxIters = 50
var someChange = true
var takenGotos = initIntSet()
var consuming = -1
while w.len > 0 and maxIters > 0: # and someChange:
dec maxIters
var pc = w.pop() # w[^1]
var prevPc = -1
# this simulates a single linear control flow execution:
while pc < code.len:
if prevPc >= 0:
someChange = false
# merge step and test for changes (we compute the fixpoints here):
# 'u' needs to be the union of prevPc, pc
# 'd' needs to be the intersection of 'pc'
for id in u[prevPc]:
if not u[pc].containsOrIncl(id):
someChange = true
# in (a; b) if ``a`` sets ``v`` so does ``b``. The intersection
# is only interesting on merge points:
for id in d[prevPc]:
if not d[pc].containsOrIncl(id):
someChange = true
# if this is a merge point, we take the intersection of the 'd' sets:
if backrefs.hasKey(pc):
var intersect = initIntSet()
assign(intersect, d[pc])
var first = true
for prevPc in backrefs.allValues(pc):
for def in d[pc]:
if def notin d[prevPc]:
excl(intersect, def)
someChange = true
when defined(debugDfa):
echo "Excluding ", pc, " prev ", prevPc
assign d[pc], intersect
if consuming >= 0:
if not c[pc].containsOrIncl(consuming):
someChange = true
consuming = -1
# our interpretation ![I!]:
prevPc = pc
case code[pc].kind
of goto:
# we must leave endless loops eventually:
if not takenGotos.containsOrIncl(pc) or someChange:
pc = pc + code[pc].dest
else:
inc pc
of fork:
# we follow the next instruction but push the dest onto our "work" stack:
#if someChange:
w.add pc + code[pc].dest
inc pc
of use:
#if not d[prevPc].missingOrExcl():
# someChange = true
consuming = code[pc].sym.id
inc pc
of def:
if not d[pc].containsOrIncl(code[pc].sym.id):
someChange = true
inc pc
when defined(useDfa) and defined(debugDfa):
for i in 0..<code.len:
echo "PC ", i, ": defs: ", d[i], "; uses ", u[i], "; consumes ", c[i]
# now check the condition we're interested in:
for i in 0..<code.len:
case code[i].kind
of use:
let s = code[i].sym
if s.id notin d[i]:
localError(conf, code[i].n.info, "usage of uninitialized variable: " & s.name.s)
if s.id in c[i]:
localError(conf, code[i].n.info, "usage of an already consumed variable: " & s.name.s)
else: discard
proc dataflowAnalysis*(s: PSym; body: PNode; conf: ConfigRef) =
var c = Con(code: @[], blocks: @[])
gen(c, body)
genImplicitReturn(c)
when defined(useDfa) and defined(debugDfa): echoCfg(c.code)
dfa(c.code, conf)
proc constructCfg*(s: PSym; body: PNode): ControlFlowGraph =
## constructs a control flow graph for ``body``.
var c = Con(code: @[], blocks: @[], owner: s)
gen(c, body)
genImplicitReturn(c)
shallowCopy(result, c.code)
proc interpret(code: ControlFlowGraph; pc: int, state: seq[PSym], comesFrom: int; threadId: int): (seq[PSym], int) =
var res = state
var pc = pc
while pc < code.len:
#echo threadId, " ", code[pc].kind
case code[pc].kind
of goto:
pc = pc + code[pc].dest
of fork:
let target = pc + code[pc].dest
let (branchA, pcA) = interpret(code, pc+1, res, pc, threadId+1)
let (branchB, _) = interpret(code, target, res, pc, threadId+2)
# we add vars if they are in both branches:
for v in branchB:
if v in branchA:
if v notin res:
res.add v
pc = pcA+1
of join:
let target = pc + code[pc].dest
if comesFrom == target: return (res, pc)
inc pc
of use:
let v = code[pc].sym
if v notin res and v.kind != skParam:
echo "attempt to read uninitialized variable ", v.name.s
inc pc
of def:
let v = code[pc].sym
if v notin res:
res.add v
inc pc
return (res, pc)
proc dataflowAnalysis*(s: PSym; body: PNode) =
let c = constructCfg(s, body)
#echoCfg c
discard interpret(c, 0, @[], -1, 1)

View File

@@ -144,28 +144,28 @@ proc newDocumentor*(filename: AbsoluteFile; cache: IdentCache; conf: ConfigRef,
initStrTable result.types
result.onTestSnippet =
proc (gen: var RstGenerator; filename, cmd: string; status: int; content: string) =
var d = TDocumentor(gen)
var outp: AbsoluteFile
if filename.len == 0:
inc(d.id)
let nameOnly = splitFile(d.filename).name
let subdir = getNimcacheDir(conf) / RelativeDir(nameOnly)
createDir(subdir)
outp = subdir / RelativeFile(nameOnly & "_snippet_" & $d.id & ".nim")
elif isAbsolute(filename):
outp = AbsoluteFile filename
else:
# Nim's convention: every path is relative to the file it was written in:
outp = splitFile(d.filename).dir.AbsoluteDir / RelativeFile(filename)
# Include the current file if we're parsing a nim file
let importStmt = if d.isPureRst: "" else: "import \"$1\"\n" % [d.filename]
writeFile(outp, importStmt & content)
let c = if cmd.startsWith("nim "): os.getAppFilename() & cmd.substr(3)
else: cmd
let c2 = c % quoteShell(outp)
rawMessage(conf, hintExecuting, c2)
if execShellCmd(c2) != status:
rawMessage(conf, errGenerated, "executing of external program failed: " & c2)
var d = TDocumentor(gen)
var outp: AbsoluteFile
if filename.len == 0:
inc(d.id)
let nameOnly = splitFile(d.filename).name
let subdir = getNimcacheDir(conf) / RelativeDir(nameOnly)
createDir(subdir)
outp = subdir / RelativeFile(nameOnly & "_snippet_" & $d.id & ".nim")
elif isAbsolute(filename):
outp = AbsoluteFile filename
else:
# Nim's convention: every path is relative to the file it was written in:
outp = splitFile(d.filename).dir.AbsoluteDir / RelativeFile(filename)
# Include the current file if we're parsing a nim file
let importStmt = if d.isPureRst: "" else: "import \"$1\"\n" % [d.filename]
writeFile(outp, importStmt & content)
let c = if cmd.startsWith("nim "): os.getAppFilename() & cmd.substr(3)
else: cmd
let c2 = c % quoteShell(outp)
rawMessage(conf, hintExecuting, c2)
if execShellCmd(c2) != status:
rawMessage(conf, errGenerated, "executing of external program failed: " & c2)
result.emitted = initIntSet()
result.destFile = getOutFile2(conf, relativeTo(filename, conf.projectPath),
outExt, RelativeDir"htmldocs", false)
@@ -300,13 +300,17 @@ proc externalDep(d: PDoc; module: PSym): string =
else:
result = extractFilename toFullPath(d.conf, FileIndex module.position)
proc nodeToHighlightedHtml(d: PDoc; n: PNode; result: var Rope; renderFlags: TRenderFlags = {}) =
proc nodeToHighlightedHtml(d: PDoc; n: PNode; result: var Rope; renderFlags: TRenderFlags = {};
procLink: Rope) =
var r: TSrcGen
var literal = ""
initTokRender(r, n, renderFlags)
var kind = tkEof
var tokenPos = 0
var procTokenPos = 0
while true:
getNextTok(r, kind, literal)
inc tokenPos
case kind
of tkEof:
break
@@ -314,6 +318,8 @@ proc nodeToHighlightedHtml(d: PDoc; n: PNode; result: var Rope; renderFlags: TRe
dispA(d.conf, result, "<span class=\"Comment\">$1</span>", "\\spanComment{$1}",
[rope(esc(d.target, literal))])
of tokKeywordLow..tokKeywordHigh:
if kind in {tkProc, tkMethod, tkIterator, tkMacro, tkTemplate, tkFunc, tkConverter}:
procTokenPos = tokenPos
dispA(d.conf, result, "<span class=\"Keyword\">$1</span>", "\\spanKeyword{$1}",
[rope(literal)])
of tkOpr:
@@ -333,7 +339,11 @@ proc nodeToHighlightedHtml(d: PDoc; n: PNode; result: var Rope; renderFlags: TRe
"\\spanFloatNumber{$1}", [rope(esc(d.target, literal))])
of tkSymbol:
let s = getTokSym(r)
if s != nil and s.kind == skType and sfExported in s.flags and
# -2 because of the whitespace in between:
if procTokenPos == tokenPos-2 and procLink != nil:
dispA(d.conf, result, "<a href=\"#$2\"><span class=\"Identifier\">$1</span></a>",
"\\spanIdentifier{$1}", [rope(esc(d.target, literal)), procLink])
elif s != nil and s.kind == skType and sfExported in s.flags and
s.owner != nil and belongsToPackage(d.conf, s.owner) and
d.target == outHtml:
let external = externalDep(d, s.owner)
@@ -445,7 +455,7 @@ proc getAllRunnableExamplesRec(d: PDoc; n, orig: PNode; dest: var Rope) =
for b in body:
if i > 0: dest.add "\n"
inc i
nodeToHighlightedHtml(d, b, dest, {})
nodeToHighlightedHtml(d, b, dest, {}, nil)
dest.add(d.config.getOrDefault"doc.listing_end" % id)
else: discard
for i in 0 ..< n.safeLen:
@@ -464,7 +474,10 @@ proc isVisible(d: PDoc; n: PNode): bool =
# we cannot generate code for forwarded symbols here as we have no
# exception tracking information here. Instead we copy over the comment
# from the proc header.
result = {sfExported, sfFromGeneric, sfForward}*n.sym.flags == {sfExported}
if optDocInternal in d.conf.globalOptions:
result = {sfFromGeneric, sfForward}*n.sym.flags == {}
else:
result = {sfExported, sfFromGeneric, sfForward}*n.sym.flags == {sfExported}
if result and containsOrIncl(d.emitted, n.sym.id):
result = false
elif n.kind == nkPragmaExpr:
@@ -545,16 +558,19 @@ proc complexName(k: TSymKind, n: PNode, baseName: string): string =
## If you modify the output of this proc, please update the anchor generation
## section of ``doc/docgen.txt``.
result = baseName
case k:
of skProc, skFunc: result.add(defaultParamSeparator)
of skMacro: result.add(".m" & defaultParamSeparator)
of skMethod: result.add(".e" & defaultParamSeparator)
of skIterator: result.add(".i" & defaultParamSeparator)
of skTemplate: result.add(".t" & defaultParamSeparator)
of skConverter: result.add(".c" & defaultParamSeparator)
case k
of skProc, skFunc: discard
of skMacro: result.add(".m")
of skMethod: result.add(".e")
of skIterator: result.add(".i")
of skTemplate: result.add(".t")
of skConverter: result.add(".c")
else: discard
if len(n) > paramsPos and n[paramsPos].kind == nkFormalParams:
result.add(renderParamTypes(n[paramsPos]))
let params = renderParamTypes(n[paramsPos])
if params.len > 0:
result.add(defaultParamSeparator)
result.add(params)
proc isCallable(n: PNode): bool =
## Returns true if `n` contains a callable node.
@@ -612,9 +628,6 @@ proc genItem(d: PDoc, n, nameNode: PNode, k: TSymKind) =
break
plainName.add(literal)
nodeToHighlightedHtml(d, n, result, {renderNoBody, renderNoComments,
renderDocComments, renderSyms})
inc(d.id)
let
plainNameRope = rope(xmltree.escape(plainName.strip))
@@ -627,6 +640,9 @@ proc genItem(d: PDoc, n, nameNode: PNode, k: TSymKind) =
symbolOrIdRope = symbolOrId.rope
symbolOrIdEncRope = encodeUrl(symbolOrId).rope
nodeToHighlightedHtml(d, n, result, {renderNoBody, renderNoComments,
renderDocComments, renderSyms}, symbolOrIdEncRope)
var seeSrcRope: Rope = nil
let docItemSeeSrc = getConfigVar(d.conf, "doc.item.seesrc")
if docItemSeeSrc.len > 0:

View File

@@ -24,11 +24,11 @@ proc readOutput(p: Process): (string, int) =
proc opGorge*(cmd, input, cache: string, info: TLineInfo; conf: ConfigRef): (string, int) =
let workingDir = parentDir(toFullPath(conf, info))
if cache.len > 0:# and optForceFullMake notin gGlobalOptions:
if cache.len > 0:
let h = secureHash(cmd & "\t" & input & "\t" & cache)
let filename = toGeneratedFile(conf, AbsoluteFile("gorge_" & $h), "txt").string
var f: File
if open(f, filename):
if optForceFullMake notin conf.globalOptions and open(f, filename):
result = (f.readAll, 0)
f.close
return

View File

@@ -203,7 +203,7 @@ proc toMsgFilename*(conf: ConfigRef; info: TLineInfo): string =
result = absPath
else:
let relPath = conf.m.fileInfos[info.fileIndex.int32].projPath.string
result = if absPath.len < relPath.len: absPath else: relPath
result = if relPath.count("..") > 2: absPath else: relPath
proc toLinenumber*(info: TLineInfo): int {.inline.} =
result = int info.line
@@ -432,7 +432,7 @@ proc addSourceLine(conf: ConfigRef; fileIdx: FileIndex, line: string) =
proc sourceLine*(conf: ConfigRef; i: TLineInfo): string =
if i.fileIndex.int32 < 0: return ""
if not optPreserveOrigSource(conf) and conf.m.fileInfos[i.fileIndex.int32].lines.len == 0:
if conf.m.fileInfos[i.fileIndex.int32].lines.len == 0:
try:
for line in lines(toFullPath(conf, i)):
addSourceLine conf, i.fileIndex, line.string

View File

@@ -77,6 +77,7 @@ type # please make sure we have under 32 options
optExcessiveStackTrace # fully qualified module filenames
optShowAllMismatches # show all overloading resolution candidates
optWholeProject # for 'doc2': output any dependency
optDocInternal # generate documentation for non-exported symbols
optMixedMode # true if some module triggered C++ codegen
optListFullPaths # use full paths in toMsgFilename, toFilename
optNoNimblePath

View File

@@ -450,17 +450,18 @@ proc semMacroExpr(c: PContext, n, nOrig: PNode, sym: PSym,
flags: TExprFlags = {}): PNode =
pushInfoContext(c.config, nOrig.info, sym.detailedInfo)
markUsed(c.config, n.info, sym, c.graph.usageSym)
onUse(n.info, sym)
let info = getCallLineInfo(n)
markUsed(c.config, info, sym, c.graph.usageSym)
onUse(info, sym)
if sym == c.p.owner:
globalError(c.config, n.info, "recursive dependency: '$1'" % sym.name.s)
globalError(c.config, info, "recursive dependency: '$1'" % sym.name.s)
let genericParams = if sfImmediate in sym.flags: 0
else: sym.ast[genericParamsPos].len
let suppliedParams = max(n.safeLen - 1, 0)
if suppliedParams < genericParams:
globalError(c.config, n.info, errMissingGenericParamsForTemplate % n.renderTree)
globalError(c.config, info, errMissingGenericParamsForTemplate % n.renderTree)
#if c.evalContext == nil:
# c.evalContext = c.createEvalContext(emStatic)

View File

@@ -462,16 +462,23 @@ proc updateDefaultParams(call: PNode) =
if nfDefaultRefsParam in def.flags: call.flags.incl nfDefaultRefsParam
call[i] = def
proc getCallLineInfo(n: PNode): TLineInfo =
case n.kind
of nkAccQuoted, nkBracketExpr, nkCall, nkCommand: getCallLineInfo(n.sons[0])
of nkDotExpr: getCallLineInfo(n.sons[1])
else: n.info
proc semResolvedCall(c: PContext, x: TCandidate,
n: PNode, flags: TExprFlags): PNode =
assert x.state == csMatch
var finalCallee = x.calleeSym
markUsed(c.config, n.sons[0].info, finalCallee, c.graph.usageSym)
onUse(n.sons[0].info, finalCallee)
let info = getCallLineInfo(n)
markUsed(c.config, info, finalCallee, c.graph.usageSym)
onUse(info, finalCallee)
assert finalCallee.ast != nil
if x.hasFauxMatch:
result = x.call
result.sons[0] = newSymNode(finalCallee, result.sons[0].info)
result.sons[0] = newSymNode(finalCallee, getCallLineInfo(result.sons[0]))
if containsGenericType(result.typ) or x.fauxMatch == tyUnknown:
result.typ = newTypeS(x.fauxMatch, c)
return
@@ -496,7 +503,7 @@ proc semResolvedCall(c: PContext, x: TCandidate,
result = x.call
instGenericConvertersSons(c, result, x)
result[0] = newSymNode(finalCallee, result[0].info)
result[0] = newSymNode(finalCallee, getCallLineInfo(result[0]))
result.typ = finalCallee.typ.sons[0]
updateDefaultParams(result)
@@ -551,7 +558,7 @@ proc semOverloadedCall(c: PContext, n, nOrig: PNode,
notFoundError(c, n, errors)
proc explicitGenericInstError(c: PContext; n: PNode): PNode =
localError(c.config, n.info, errCannotInstantiateX % renderTree(n))
localError(c.config, getCallLineInfo(n), errCannotInstantiateX % renderTree(n))
result = n
proc explicitGenericSym(c: PContext, n: PNode, s: PSym): PNode =
@@ -574,9 +581,10 @@ proc explicitGenericSym(c: PContext, n: PNode, s: PSym): PNode =
if tm in {isNone, isConvertible}: return nil
var newInst = generateInstance(c, s, m.bindings, n.info)
newInst.typ.flags.excl tfUnresolved
markUsed(c.config, n.info, s, c.graph.usageSym)
onUse(n.info, s)
result = newSymNode(newInst, n.info)
let info = getCallLineInfo(n)
markUsed(c.config, info, s, c.graph.usageSym)
onUse(info, s)
result = newSymNode(newInst, info)
proc explicitGenericInstantiation(c: PContext, n: PNode, s: PSym): PNode =
assert n.kind == nkBracketExpr
@@ -593,7 +601,7 @@ proc explicitGenericInstantiation(c: PContext, n: PNode, s: PSym): PNode =
# number of generic type parameters:
if safeLen(s.ast.sons[genericParamsPos]) != n.len-1:
let expected = safeLen(s.ast.sons[genericParamsPos])
localError(c.config, n.info, errGenerated, "cannot instantiate: '" & renderTree(n) &
localError(c.config, getCallLineInfo(n), errGenerated, "cannot instantiate: '" & renderTree(n) &
"'; got " & $(n.len-1) & " type(s) but expected " & $expected)
return n
result = explicitGenericSym(c, n, s)
@@ -602,7 +610,7 @@ proc explicitGenericInstantiation(c: PContext, n: PNode, s: PSym): PNode =
# choose the generic proc with the proper number of type parameters.
# XXX I think this could be improved by reusing sigmatch.paramTypesMatch.
# It's good enough for now.
result = newNodeI(a.kind, n.info)
result = newNodeI(a.kind, getCallLineInfo(n))
for i in countup(0, len(a)-1):
var candidate = a.sons[i].sym
if candidate.kind in {skProc, skMethod, skConverter,

View File

@@ -24,15 +24,18 @@ const
proc semTemplateExpr(c: PContext, n: PNode, s: PSym,
flags: TExprFlags = {}): PNode =
markUsed(c.config, n.info, s, c.graph.usageSym)
onUse(n.info, s)
let info = getCallLineInfo(n)
markUsed(c.config, info, s, c.graph.usageSym)
onUse(info, s)
# Note: This is n.info on purpose. It prevents template from creating an info
# context when called from an another template
pushInfoContext(c.config, n.info, s.detailedInfo)
result = evalTemplate(n, s, getCurrOwner(c), c.config, efFromHlo in flags)
if efNoSemCheck notin flags: result = semAfterMacroCall(c, n, result, s, flags)
popInfoContext(c.config)
# XXX: A more elaborate line info rewrite might be needed
result.info = n.info
result.info = info
proc semFieldAccess(c: PContext, n: PNode, flags: TExprFlags = {}): PNode
@@ -1084,8 +1087,9 @@ proc semSym(c: PContext, n: PNode, sym: PSym, flags: TExprFlags): PNode =
if efNoEvaluateGeneric in flags and s.ast[genericParamsPos].len > 0 or
(n.kind notin nkCallKinds and s.requiredParams > 0) or
sfCustomPragma in sym.flags:
markUsed(c.config, n.info, s, c.graph.usageSym)
onUse(n.info, s)
let info = getCallLineInfo(n)
markUsed(c.config, info, s, c.graph.usageSym)
onUse(info, s)
result = symChoice(c, n, s, scClosed)
else:
result = semTemplateExpr(c, n, s, flags)
@@ -1171,9 +1175,10 @@ proc semSym(c: PContext, n: PNode, sym: PSym, flags: TExprFlags): PNode =
onUse(n.info, s)
result = newSymNode(s, n.info)
else:
markUsed(c.config, n.info, s, c.graph.usageSym)
onUse(n.info, s)
result = newSymNode(s, n.info)
let info = getCallLineInfo(n)
markUsed(c.config, info, s, c.graph.usageSym)
onUse(info, s)
result = newSymNode(s, info)
proc builtinFieldAccess(c: PContext, n: PNode, flags: TExprFlags): PNode =
## returns nil if it's not a built-in field access
@@ -1286,7 +1291,11 @@ proc builtinFieldAccess(c: PContext, n: PNode, flags: TExprFlags): PNode =
if ty.sons[0] == nil: break
ty = skipTypes(ty.sons[0], skipPtrs)
if f != nil:
if fieldVisible(c, f):
let visibilityCheckNeeded =
if n[1].kind == nkSym and n[1].sym == f:
false # field lookup was done already, likely by hygienic template or bindSym
else: true
if not visibilityCheckNeeded or fieldVisible(c, f):
# is the access to a public field or in the same module or in a friend?
markUsed(c.config, n.sons[1].info, f, c.graph.usageSym)
onUse(n.sons[1].info, f)

View File

@@ -410,4 +410,9 @@ proc magicsAfterOverloadResolution(c: PContext, n: PNode,
result = n
else:
result = plugin(c, n)
of mNewFinalize:
# Make sure the finalizer procedure refers to a procedure
if n[^1].kind == nkSym and n[^1].sym.kind notin {skProc, skFunc}:
localError(c.config, n.info, "finalizer must be a direct reference to a procedure")
result = n
else: result = n

View File

@@ -353,6 +353,8 @@ proc trackTryStmt(tracked: PEffects, n: PNode) =
var branches = 1
var hasFinally = false
# Collect the exceptions caught by the except branches
for i in 1 ..< n.len:
let b = n.sons[i]
let blen = sonsLen(b)
@@ -368,12 +370,18 @@ proc trackTryStmt(tracked: PEffects, n: PNode) =
else:
assert(b.sons[j].kind == nkType)
catches(tracked, b.sons[j].typ)
else:
assert b.kind == nkFinally
# Add any other exception raised in the except bodies
for i in 1 ..< n.len:
let b = n.sons[i]
let blen = sonsLen(b)
if b.kind == nkExceptBranch:
setLen(tracked.init, oldState)
track(tracked, b.sons[blen-1])
for i in oldState..<tracked.init.len:
addToIntersection(inter, tracked.init[i])
else:
assert b.kind == nkFinally
setLen(tracked.init, oldState)
track(tracked, b.sons[blen-1])
hasFinally = true
@@ -1013,7 +1021,7 @@ proc trackProc*(g: ModuleGraph; s: PSym, body: PNode) =
"declared lock level is $1, but real lock level is $2" %
[$s.typ.lockLevel, $t.maxLockLevel])
when defined(useDfa):
if s.kind == skFunc:
if s.name.s == "testp":
dataflowAnalysis(s, body)
when false: trackWrites(s, body)

View File

@@ -279,7 +279,8 @@ proc semTry(c: PContext, n: PNode; flags: TExprFlags): PNode =
for i in 1..last:
var it = n.sons[i]
let j = it.len-1
it.sons[j] = fitNode(c, typ, it.sons[j], it.sons[j].info)
if not endsInNoReturn(it.sons[j]):
it.sons[j] = fitNode(c, typ, it.sons[j], it.sons[j].info)
result.typ = typ
proc fitRemoveHiddenConv(c: PContext, typ: PType, n: PNode): PNode =
@@ -432,7 +433,6 @@ proc setVarType(c: PContext; v: PSym, typ: PType) =
proc semVarOrLet(c: PContext, n: PNode, symkind: TSymKind): PNode =
var b: PNode
result = copyNode(n)
var hasCompileTime = false
for i in countup(0, sonsLen(n)-1):
var a = n.sons[i]
if c.config.cmd == cmdIdeTools: suggestStmt(c, a)
@@ -440,11 +440,11 @@ proc semVarOrLet(c: PContext, n: PNode, symkind: TSymKind): PNode =
if a.kind notin {nkIdentDefs, nkVarTuple, nkConstDef}: illFormedAst(a, c.config)
checkMinSonsLen(a, 3, c.config)
var length = sonsLen(a)
var typ: PType
var typ: PType = nil
if a.sons[length-2].kind != nkEmpty:
typ = semTypeNode(c, a.sons[length-2], nil)
else:
typ = nil
var def: PNode = c.graph.emptyNode
if a.sons[length-1].kind != nkEmpty:
def = semExprWithType(c, a.sons[length-1], {efAllowDestructor})
@@ -556,13 +556,12 @@ proc semVarOrLet(c: PContext, n: PNode, symkind: TSymKind): PNode =
else: v.typ = tup
b.sons[j] = newSymNode(v)
checkNilable(c, v)
if sfCompileTime in v.flags: hasCompileTime = true
if sfCompileTime in v.flags:
var x = newNodeI(result.kind, v.info)
addSon(x, result[i])
vm.setupCompileTimeVar(c.module, c.graph, x)
if v.flags * {sfGlobal, sfThread} == {sfGlobal}:
message(c.config, v.info, hintGlobalVar)
if hasCompileTime:
vm.setupCompileTimeVar(c.module, c.graph, result)
# handled by the VM codegen:
#c.graph.recordStmt(c.graph, c.module, result)
proc semConst(c: PContext, n: PNode): PNode =
result = copyNode(n)
@@ -583,9 +582,19 @@ proc semConst(c: PContext, n: PNode): PNode =
if def == nil:
localError(c.config, a.sons[length-1].info, errConstExprExpected)
continue
if def.typ.kind == tyTypeDesc and c.p.owner.kind != skMacro:
# prevent the all too common 'const x = int' bug:
localError(c.config, def.info, "'typedesc' metatype is not valid here; typed '=' instead of ':'?")
def.typ = errorType(c)
# check type compatibility between def.typ and typ:
if typ != nil:
def = fitRemoveHiddenConv(c, typ, def)
if typ.isMetaType:
def = inferWithMetatype(c, typ, def)
typ = def.typ
else:
def = fitRemoveHiddenConv(c, typ, def)
else:
typ = def.typ
if typ == nil:

View File

@@ -58,25 +58,26 @@ proc symChoice(c: PContext, n: PNode, s: PSym, r: TSymChoiceRule): PNode =
inc(i)
if i > 1: break
a = nextOverloadIter(o, c, n)
let info = getCallLineInfo(n)
if i <= 1 and r != scForceOpen:
# XXX this makes more sense but breaks bootstrapping for now:
# (s.kind notin routineKinds or s.magic != mNone):
# for instance 'nextTry' is both in tables.nim and astalgo.nim ...
result = newSymNode(s, n.info)
markUsed(c.config, n.info, s, c.graph.usageSym)
onUse(n.info, s)
result = newSymNode(s, info)
markUsed(c.config, info, s, c.graph.usageSym)
onUse(info, s)
else:
# semantic checking requires a type; ``fitNode`` deals with it
# appropriately
let kind = if r == scClosed or n.kind == nkDotExpr: nkClosedSymChoice
else: nkOpenSymChoice
result = newNodeIT(kind, n.info, newTypeS(tyNone, c))
result = newNodeIT(kind, info, newTypeS(tyNone, c))
a = initOverloadIter(o, c, n)
while a != nil:
if a.kind != skModule:
incl(a.flags, sfUsed)
addSon(result, newSymNode(a, n.info))
onUse(n.info, a)
addSon(result, newSymNode(a, info))
onUse(info, a)
a = nextOverloadIter(o, c, n)
proc semBindStmt(c: PContext, n: PNode, toBind: var IntSet): PNode =

View File

@@ -1015,8 +1015,8 @@ proc liftParamType(c: PContext, procKind: TSymKind, genericParams: PNode,
result = addImplicitGeneric(copyType(paramType, getCurrOwner(c), false))
of tyGenericParam:
markUsed(c.config, info, paramType.sym, c.graph.usageSym)
onUse(info, paramType.sym)
markUsed(c.config, paramType.sym.info, paramType.sym, c.graph.usageSym)
onUse(paramType.sym.info, paramType.sym)
if tfWildcard in paramType.flags:
paramType.flags.excl tfWildcard
paramType.sym.kind = skType
@@ -1474,7 +1474,7 @@ proc semTypeNode(c: PContext, n: PNode, prev: PType): PType =
if c.config.cmd == cmdIdeTools: suggestExpr(c, n)
case n.kind
of nkEmpty: discard
of nkEmpty: result = n.typ
of nkTypeOfExpr:
# for ``type(countup(1,3))``, see ``tests/ttoseq``.
checkSonsLen(n, 1, c.config)

View File

@@ -490,7 +490,12 @@ proc replaceTypeVarsTAux(cl: var TReplTypeVars, t: PType): PType =
result.kind = tyUserTypeClassInst
of tyGenericBody:
localError(cl.c.config, cl.info, "cannot instantiate: '" & typeToString(t) & "'")
localError(
cl.c.config,
cl.info,
"cannot instantiate: '" &
typeToString(t, preferDesc) &
"'; Maybe generic arguments are missing?")
result = errorType(cl.c)
#result = replaceTypeVarsT(cl, lastSon(t))
@@ -555,6 +560,14 @@ proc replaceTypeVarsTAux(cl: var TReplTypeVars, t: PType): PType =
for i in countup(0, sonsLen(result) - 1):
if result.sons[i] != nil:
if result.sons[i].kind == tyGenericBody:
localError(
cl.c.config,
t.sym.info,
"cannot instantiate '" &
typeToString(result.sons[i], preferDesc) &
"' inside of type definition: '" &
t.owner.name.s & "'; Maybe generic arguments are missing?")
var r = replaceTypeVarsT(cl, result.sons[i])
if result.kind == tyObject:
# carefully coded to not skip the precious tyGenericInst:

View File

@@ -1316,7 +1316,7 @@ proc typeRelImpl(c: var TCandidate, f, aOrig: PType,
if typeRel(c, f.sons[i], a.sons[i]) == isNone: return isNone
result = typeRel(c, f.lastSon, a.lastSon, flags + {trNoCovariance})
subtypeCheck()
if result <= isConvertible: result = isNone
if result <= isIntConv: result = isNone
elif tfNotNil in f.flags and tfNotNil notin a.flags:
result = isNilConversion
elif a.kind == tyNil: result = f.allowsNil

View File

@@ -33,7 +33,7 @@
# included from sigmatch.nim
import algorithm, prefixmatches, lineinfos, pathutils
from wordrecg import wDeprecated, wError
from wordrecg import wDeprecated, wError, wAddr, wYield, specialWords
when defined(nimsuggest):
import passes, tables # importer
@@ -109,7 +109,11 @@ proc symToSuggest(conf: ConfigRef; s: PSym, isLocal: bool, section: IdeCmd, info
result.qualifiedPath.add(ow2.origModuleName)
if ow != nil:
result.qualifiedPath.add(ow.origModuleName)
result.qualifiedPath.add(s.name.s)
if s.name.s[0] in OpChars + {'[', '{', '('} or
s.name.id in ord(wAddr)..ord(wYield):
result.qualifiedPath.add('`' & s.name.s & '`')
else:
result.qualifiedPath.add(s.name.s)
if s.typ != nil:
result.forth = typeToString(s.typ)

View File

@@ -456,12 +456,18 @@ proc typeToString(typ: PType, prefer: TPreferedDesc = preferName): string =
result = $t.n.intVal
else:
result = "int literal(" & $t.n.intVal & ")"
of tyGenericBody, tyGenericInst, tyGenericInvocation:
of tyGenericInst, tyGenericInvocation:
result = typeToString(t.sons[0]) & '['
for i in countup(1, sonsLen(t)-1-ord(t.kind != tyGenericInvocation)):
if i > 1: add(result, ", ")
add(result, typeToString(t.sons[i], preferGenericArg))
add(result, ']')
of tyGenericBody:
result = typeToString(t.lastSon) & '['
for i in countup(0, sonsLen(t)-2):
if i > 0: add(result, ", ")
add(result, typeToString(t.sons[i], preferTypeName))
add(result, ']')
of tyTypeDesc:
if t.sons[0].kind == tyNone: result = "typedesc"
else: result = "type " & typeToString(t.sons[0])
@@ -612,7 +618,6 @@ proc typeToString(typ: PType, prefer: TPreferedDesc = preferName): string =
result = typeToStr[t.kind]
result.addTypeFlags(t)
proc firstOrd*(conf: ConfigRef; t: PType): BiggestInt =
case t.kind
of tyBool, tyChar, tySequence, tyOpenArray, tyString, tyVarargs, tyProxy:

View File

@@ -399,6 +399,11 @@ proc opConv(c: PCtx; dest: var TFullReg, src: TFullReg, desttyp, srctyp: PType):
dest.floatVal = toBiggestFloat(src.intVal)
else:
dest.floatVal = src.floatVal
of tyObject:
if srctyp.skipTypes(abstractRange).kind != tyObject:
internalError(c.config, "invalid object-to-object conversion")
# A object-to-object conversion is essentially a no-op
moveConst(dest, src)
else:
asgnComplex(dest, src)
@@ -929,7 +934,8 @@ proc rawExecute(c: PCtx, start: int, tos: PStackFrame): TFullReg =
stackTrace(c, tos, pc, errNilAccess)
of opcGetImpl:
decodeB(rkNode)
let a = regs[rb].node
var a = regs[rb].node
if a.kind == nkVarTy: a = a[0]
if a.kind == nkSym:
regs[ra].node = if a.sym.ast.isNil: newNode(nkNilLit)
else: copyTree(a.sym.ast)

View File

@@ -326,8 +326,14 @@ proc genWhile(c: PCtx; n: PNode) =
c.patch(L2)
proc genBlock(c: PCtx; n: PNode; dest: var TDest) =
let oldRegisterCount = c.prc.maxSlots
withBlock(n.sons[0].sym):
c.gen(n.sons[1], dest)
for i in oldRegisterCount ..< c.prc.maxSlots:
if c.prc.slots[i].kind in {slotFixedVar, slotFixedLet}:
c.prc.slots[i] = (inUse: false, kind: slotEmpty)
c.clearDest(n, dest)
proc genBreak(c: PCtx; n: PNode) =

View File

@@ -58,6 +58,7 @@ Advanced options:
--project document the whole project (doc2)
--docSeeSrcUrl:url activate 'see source' for doc and doc2 commands
(see doc.item.seesrc in config/nimdoc.cfg)
--docInternal also generate documentation for non-exported symbols
--lineDir:on|off generation of #line directive on|off
--embedsrc:on|off embeds the original source code as comments
in the generated output
@@ -97,8 +98,9 @@ Advanced options:
symbol matching is fuzzy so
that --dynlibOverride:lua matches
dynlib: "liblua.so.3"
--dynlibOverrideAll:on|off makes the dynlib pragma have no effect
--listCmd:on|off list the commands used to execute external programs
--dynlibOverrideAll
disables the effects of the dynlib pragma
--listCmd list the commands used to execute external programs
--parallelBuild:0|1|... perform a parallel build
value = number of processors (0 for auto-detect)
--incremental:on|off only recompile the changed modules (experimental!)

View File

@@ -35,7 +35,7 @@ Options:
--debugger:native|endb use native debugger (gdb) | ENDB (experimental)
--app:console|gui|lib|staticlib
generate a console app|GUI app|DLL|static library
-r, --run:on|off run the compiled program with given arguments
-r, --run run the compiled program with given arguments
--fullhelp show all command line switches
-h, --help show this help

View File

@@ -1,6 +1,10 @@
============
Contributing
============
.. contents::
Contributing happens via "Pull requests" (PR) on github. Every PR needs to be
reviewed before it can be merged and the Continuous Integration should be green.
@@ -60,16 +64,17 @@ Compiler
--------
The tests for the compiler use a testing tool called ``testament``. They are all
located in ``tests/`` (eg: ``tests/destructor/tdestructor3.nim``).
located in ``tests/`` (e.g.: ``tests/destructor/tdestructor3.nim``).
Each test has its own file. All test files are prefixed with ``t``. If you want
to create a file for import into another test only, use the prefix ``m``.
At the beginning of every test is the expected behavior of the test.
Possible keys are:
- output: The expected output, most likely via ``echo``
- cmd: A compilation command template e.g. "nim $target --threads:on $options $file"
- output: The expected output (stdout + stderr), most likely via ``echo``
- exitcode: Exit code of the test (via ``exit(number)``)
- errormsg: The expected error message
- errormsg: The expected compiler error message
- file: The file the errormsg was produced at
- line: The line the errormsg was produced at
@@ -115,6 +120,13 @@ list of these, see ``testament/categories.nim``, at the bottom.
./koch tests c lib
To run a single test:
::
./koch tests c <category>/<name>
E.g. ``./koch test run stdlib/thttpclient_ssl``
For reproducible tests (to reproduce an environment more similar to the one
run by Continuous Integration on travis/appveyor), you may want to disable your
@@ -398,6 +410,26 @@ Code reviews
saves time explaining the change or applying it; see also
https://forum.nim-lang.org/t/4317
2. When reviewing large diffs that may involve code moving around, github's interface
doesn't help much as it doesn't highlight moves. Instead you can use something
like this, see visual results `here <https://github.com/nim-lang/Nim/pull/10431#issuecomment-456968196>`_:
.. code-block:: sh
git fetch origin pull/10431/head && git checkout FETCH_HEAD
git show --color-moved-ws=allow-indentation-change --color-moved=blocks HEAD^
3. In addition, you can view github-like diffs locally to identify what was changed
within a code block using `diff-highlight` or `diff-so-fancy`, eg:
.. code-block:: sh
# put this in ~/.gitconfig:
[core]
pager = "diff-so-fancy | less -R" # or: use: `diff-highlight`
.. include:: docstyle.rst

View File

@@ -283,7 +283,7 @@ symbols in the `system module <system.html>`_.
* ``const NimVersion = "0.0.0"`` **=>**
`#NimVersion <system.html#NimVersion>`_
* ``proc getTotalMem(): int {.rtl, raises: [], tags: [].}`` **=>**
`#getTotalMem, <system.html#getTotalMem,>`_
`#getTotalMem, <system.html#getTotalMem>`_
* ``proc len[T](x: seq[T]): int {.magic: "LengthSeq", noSideEffect.}`` **=>**
`#len,seq[T] <system.html#len,seq[T]>`_
* ``iterator pairs[T](a: seq[T]): tuple[key: int, val: T] {.inline.}`` **=>**

View File

@@ -67,6 +67,8 @@ Core
* `lenientops <lenientops.html>`_
Provides binary operators for mixed integer/float expressions for convenience.
* `bitops <bitops.html>`_
Provides a series of low level methods for bit manipulation.
Collections and algorithms
@@ -74,26 +76,38 @@ Collections and algorithms
* `algorithm <algorithm.html>`_
Implements some common generic algorithms like sort or binary search.
* `tables <tables.html>`_
Nim hash table support. Contains tables, ordered tables and count tables.
* `sets <sets.html>`_
Nim hash and bit set support.
* `lists <lists.html>`_
Nim linked list support. Contains singly and doubly linked lists and
circular lists ("rings").
* `deques <deques.html>`_
Implementation of a double-ended queue.
The underlying implementation uses a ``seq``.
* `heapqueue <heapqueue.html>`_
Implementation of a heap data structure that can be used as a priority queue.
* `intsets <intsets.html>`_
Efficient implementation of a set of ints as a sparse bit set.
* `critbits <critbits.html>`_
This module implements a *crit bit tree* which is an efficient
container for a sorted set of strings, or for a sorted mapping of strings.
* `sequtils <sequtils.html>`_
This module implements operations for the built-in seq type
which were inspired by functional programming languages.
* `sharedtables <sharedtables.html>`_
Nim shared hash table support. Contains shared tables.
* `sharedlist <sharedlist.html>`_
Nim shared linked list support. Contains shared singly linked list.
@@ -129,6 +143,13 @@ String handling
* `unicode <unicode.html>`_
This module provides support to handle the Unicode UTF-8 encoding.
* `unidecode <unidecode.html>`_
It provides a single proc that does Unicode to ASCII transliterations.
Based on Python's Unidecode module.
* `punycode <punycode.html>`_
Implements a representation of Unicode with the limited ASCII character subset.
* `encodings <encodings.html>`_
Converts between different character encodings. On UNIX, this uses
the ``iconv`` library, on Windows the Windows API.
@@ -194,10 +215,16 @@ Generic Operating System Services
``asyncdispatch``.
* `distros <distros.html>`_
This module implements the basics for OS distribution ("distro") detection and the OS's native package manager.
Its primary purpose is to produce output for Nimble packages, but it also contains the widely used **Distribution** enum
This module implements the basics for OS distribution ("distro") detection
and the OS's native package manager.
Its primary purpose is to produce output for Nimble packages,
but it also contains the widely used **Distribution** enum
that is useful for writing platform specific code.
* `volatile <volatile.html>`_
This module contains code for generating volatile loads and stores,
which are useful in embedded and systems programming.
Math libraries
--------------
@@ -276,6 +303,7 @@ Internet Protocols and Support
This module implements a selector API with backends specific to each OS.
Currently epoll on Linux and select on other operating systems.
Parsers
-------
@@ -402,6 +430,7 @@ Miscellaneous
* `segfaults <segfaults.html>`_
Turns access violations or segfaults into a ``NilAccessError`` exception.
Modules for JS backend
----------------------
@@ -430,7 +459,6 @@ Regular expressions
expressions. The current implementation uses PCRE.
Database support
----------------
@@ -447,13 +475,13 @@ Database support
for other databases too.
Wrappers
========
The generated HTML for some of these wrappers is so huge that it is
not contained in the distribution. You can then find them on the website.
Windows specific
----------------
@@ -509,5 +537,5 @@ Nimble is a package manager for the Nim programming language.
For instructions on how to install Nimble packages see
`its README <https://github.com/nim-lang/nimble#readme>`_.
To see a list of Nimble's packages, check out `https://nimble.directory/ <https://nimble.directory/>`_
To see a list of Nimble's packages, check out `<https://nimble.directory/>`_
or the `packages repo <https://github.com/nim-lang/packages>`_ on GitHub.

View File

@@ -6922,7 +6922,7 @@ statement as seen in stack backtraces:
template myassert*(cond: untyped, msg = "") =
if not cond:
# change run-time line information of the 'raise' statement:
{.line: InstantiationInfo().}:
{.line: instantiationInfo().}:
raise newException(EAssertionFailed, msg)
If the ``line`` pragma is used with a parameter, the parameter needs be a

View File

@@ -362,6 +362,7 @@ Define Effect
``useRealtimeGC`` Enables support of Nim's GC for *soft* realtime
systems. See the documentation of the `gc <gc.html>`_
for further information.
``logGC`` Enable GC logging to stdout.
``nodejs`` The JS target is actually ``node.js``.
``ssl`` Enables OpenSSL support for the sockets module.
``memProfiler`` Enables memory profiling for the native GC.

69
doc/packaging.rst Normal file
View File

@@ -0,0 +1,69 @@
=============
Packaging Nim
=============
Supported architectures
-----------------------
Nim runs on a wide variety of platforms. Support on amd64 and i386 is tested regularly, while less popular platforms are tested by the community.
- amd64
- arm64 (aka aarch64)
- armel
- armhf
- i386
- m68k
- mips64el
- mipsel
- powerpc
- ppc64
- ppc64el (aka ppc64le)
- riscv64
The following platforms are seldomly tested:
- alpha
- hppa
- ia64
- mips
- s390x
- sparc64
Packaging for Linux
-------------------
See https://github.com/nim-lang/Nim/labels/Installation for installation-related bugs.
Build Nim from the released tarball at https://nim-lang.org/install_unix.html
It is different from the GitHub sources as it contains Nimble, C sources & other tools.
The Debian package ships bash and ksh completion and manpages that can be reused.
Hints on the build process:
::
# build from C sources and then using koch
./build.sh --os $os_type --cpu $cpu_arch
./bin/nim c koch
./koch boot -d:release
# optionally generate docs into doc/html
./koch docs
./koch tools -d:release
# extract files to be really installed
./install.sh <tempdir>
# also include the tools
for fn in nimble nimsuggest nimgrep; do cp ./bin/$fn <tempdir>/nim/bin/; done
What to install:
- The expected stdlib location is /usr/lib/nim
- Global configuration files under /etc/nim
- Optionally: manpages, documentation, shell completion
- When installing documentation, .idx files are not required
- The "compiler" directory contains compiler sources and should not be part of the compiler binary package

View File

@@ -80,6 +80,9 @@ let kochExe* = when isMainModule: os.getAppFilename() # always correct when koch
proc kochExec*(cmd: string) =
exec kochExe.quoteShell & " " & cmd
proc kochExecFold*(desc, cmd: string) =
execFold(desc, kochExe.quoteShell & " " & cmd)
template withDir(dir, body) =
let old = getCurrentDir()
try:
@@ -453,11 +456,11 @@ proc runCI(cmd: string) =
# note(@araq): Do not replace these commands with direct calls (eg boot())
# as that would weaken our testing efforts.
when defined(posix): # appveyor (on windows) didn't run this
kochExec "boot"
kochExec "boot -d:release"
kochExecFold("Boot", "boot")
kochExecFold("Boot in release mode", "boot -d:release")
## build nimble early on to enable remainder to depend on it if needed
kochExec "nimble"
kochExecFold("Build Nimble", "nimble")
when false:
for pkg in "zip opengl sdl1 jester@#head niminst".split:
@@ -466,23 +469,23 @@ proc runCI(cmd: string) =
buildTools() # altenatively, kochExec "tools --toolsNoNimble"
## run tests
exec "nim e tests/test_nimscript.nims"
execFold("Test nimscript", "nim e tests/test_nimscript.nims")
when defined(windows):
# note: will be over-written below
exec "nim c -d:nimCoroutines --os:genode -d:posix --compileOnly testament/tester"
execFold("Compile tester", "nim c -d:nimCoroutines --os:genode -d:posix --compileOnly testament/tester")
# main bottleneck here
exec "nim c -r -d:nimCoroutines testament/tester --pedantic all -d:nimCoroutines"
execFold("Run tester", "nim c -r -d:nimCoroutines testament/tester --pedantic all -d:nimCoroutines")
exec "nim c -r nimdoc/tester"
exec "nim c -r nimpretty/tester.nim"
execFold("Run nimdoc tests", "nim c -r nimdoc/tester")
execFold("Run nimpretty tests", "nim c -r nimpretty/tester.nim")
when defined(posix):
exec "nim c -r nimsuggest/tester"
execFold("Run nimsuggest tests", "nim c -r nimsuggest/tester")
## remaining actions
when defined(posix):
kochExec "docs --git.commit:devel"
kochExec "csource"
kochExecFold("Docs", "docs --git.commit:devel")
kochExecFold("C sources", "csource")
elif defined(windows):
when false:
kochExec "csource"

View File

@@ -1,711 +0,0 @@
#
#
# Nim's Runtime Library
# (c) Copyright 2012 Andreas Rumpf, Dominik Picheta
# See the file "copying.txt", included in this
# distribution, for details about the copyright.
#
include "system/inclrtl"
import sockets, os
##
## **Warning:** This module is deprecated since version 0.10.2.
## Use the brand new `asyncdispatch <asyncdispatch.html>`_ module together
## with the `asyncnet <asyncnet.html>`_ module.
## This module implements an asynchronous event loop together with asynchronous
## sockets which use this event loop.
## It is akin to Python's asyncore module. Many modules that use sockets
## have an implementation for this module, those modules should all have a
## ``register`` function which you should use to add the desired objects to a
## dispatcher which you created so
## that you can receive the events associated with that module's object.
##
## Once everything is registered in a dispatcher, you need to call the ``poll``
## function in a while loop.
##
## **Note:** Most modules have tasks which need to be ran regularly, this is
## why you should not call ``poll`` with a infinite timeout, or even a
## very long one. In most cases the default timeout is fine.
##
## **Note:** This module currently only supports select(), this is limited by
## FD_SETSIZE, which is usually 1024. So you may only be able to use 1024
## sockets at a time.
##
## Most (if not all) modules that use asyncio provide a userArg which is passed
## on with the events. The type that you set userArg to must be inheriting from
## ``RootObj``!
##
## **Note:** If you want to provide async ability to your module please do not
## use the ``Delegate`` object, instead use ``AsyncSocket``. It is possible
## that in the future this type's fields will not be exported therefore breaking
## your code.
##
## **Warning:** The API of this module is unstable, and therefore is subject
## to change.
##
## Asynchronous sockets
## ====================
##
## For most purposes you do not need to worry about the ``Delegate`` type. The
## ``AsyncSocket`` is what you are after. It's a reference to
## the ``AsyncSocketObj`` object. This object defines events which you should
## overwrite by your own procedures.
##
## For server sockets the only event you need to worry about is the ``handleAccept``
## event, in your handleAccept proc you should call ``accept`` on the server
## socket which will give you the client which is connecting. You should then
## set any events that you want to use on that client and add it to your dispatcher
## using the ``register`` procedure.
##
## An example ``handleAccept`` follows:
##
## .. code-block:: nim
##
## var disp = newDispatcher()
## ...
## proc handleAccept(s: AsyncSocket) =
## echo("Accepted client.")
## var client: AsyncSocket
## new(client)
## s.accept(client)
## client.handleRead = ...
## disp.register(client)
## ...
##
## For client sockets you should only be interested in the ``handleRead`` and
## ``handleConnect`` events. The former gets called whenever the socket has
## received messages and can be read from and the latter gets called whenever
## the socket has established a connection to a server socket; from that point
## it can be safely written to.
##
## Getting a blocking client from an AsyncSocket
## =============================================
##
## If you need a asynchronous server socket but you wish to process the clients
## synchronously then you can use the ``getSocket`` converter to get
## a ``Socket`` from the ``AsyncSocket`` object, this can then be combined
## with ``accept`` like so:
##
## .. code-block:: nim
##
## proc handleAccept(s: AsyncSocket) =
## var client: Socket
## getSocket(s).accept(client)
{.deprecated.}
when defined(windows):
from winlean import TimeVal, SocketHandle, FD_SET, FD_ZERO, TFdSet,
FD_ISSET, select
else:
from posix import TimeVal, Time, Suseconds, SocketHandle, FD_SET, FD_ZERO,
TFdSet, FD_ISSET, select
type
DelegateObj* = object
fd*: SocketHandle
deleVal*: RootRef
handleRead*: proc (h: RootRef) {.nimcall, gcsafe.}
handleWrite*: proc (h: RootRef) {.nimcall, gcsafe.}
handleError*: proc (h: RootRef) {.nimcall, gcsafe.}
hasDataBuffered*: proc (h: RootRef): bool {.nimcall, gcsafe.}
open*: bool
task*: proc (h: RootRef) {.nimcall, gcsafe.}
mode*: FileMode
Delegate* = ref DelegateObj
Dispatcher* = ref DispatcherObj
DispatcherObj = object
delegates: seq[Delegate]
AsyncSocket* = ref AsyncSocketObj
AsyncSocketObj* = object of RootObj
socket: Socket
info: SocketStatus
handleRead*: proc (s: AsyncSocket) {.closure, gcsafe.}
handleWrite: proc (s: AsyncSocket) {.closure, gcsafe.}
handleConnect*: proc (s: AsyncSocket) {.closure, gcsafe.}
handleAccept*: proc (s: AsyncSocket) {.closure, gcsafe.}
handleTask*: proc (s: AsyncSocket) {.closure, gcsafe.}
lineBuffer: TaintedString ## Temporary storage for ``readLine``
sendBuffer: string ## Temporary storage for ``send``
sslNeedAccept: bool
proto: Protocol
deleg: Delegate
SocketStatus* = enum
SockIdle, SockConnecting, SockConnected, SockListening, SockClosed,
SockUDPBound
proc newDelegate*(): Delegate =
## Creates a new delegate.
new(result)
result.handleRead = (proc (h: RootRef) = discard)
result.handleWrite = (proc (h: RootRef) = discard)
result.handleError = (proc (h: RootRef) = discard)
result.hasDataBuffered = (proc (h: RootRef): bool = return false)
result.task = (proc (h: RootRef) = discard)
result.mode = fmRead
proc newAsyncSocket(): AsyncSocket =
new(result)
result.info = SockIdle
result.handleRead = (proc (s: AsyncSocket) = discard)
result.handleWrite = nil
result.handleConnect = (proc (s: AsyncSocket) = discard)
result.handleAccept = (proc (s: AsyncSocket) = discard)
result.handleTask = (proc (s: AsyncSocket) = discard)
result.lineBuffer = "".TaintedString
result.sendBuffer = ""
proc asyncSocket*(domain: Domain = AF_INET, typ: SockType = SOCK_STREAM,
protocol: Protocol = IPPROTO_TCP,
buffered = true): AsyncSocket =
## Initialises an AsyncSocket object. If a socket cannot be initialised
## OSError is raised.
result = newAsyncSocket()
result.socket = socket(domain, typ, protocol, buffered)
result.proto = protocol
if result.socket == invalidSocket: raiseOSError(osLastError())
result.socket.setBlocking(false)
proc toAsyncSocket*(sock: Socket, state: SocketStatus = SockConnected): AsyncSocket =
## Wraps an already initialized ``Socket`` into a AsyncSocket.
## This is useful if you want to use an already connected Socket as an
## asynchronous AsyncSocket in asyncio's event loop.
##
## ``state`` may be overriden, i.e. if ``sock`` is not connected it should be
## adjusted properly. By default it will be assumed that the socket is
## connected. Please note this is only applicable to TCP client sockets, if
## ``sock`` is a different type of socket ``state`` needs to be adjusted!!!
##
## ================ ================================================================
## Value Meaning
## ================ ================================================================
## SockIdle Socket has only just been initialised, not connected or closed.
## SockConnected Socket is connected to a server.
## SockConnecting Socket is in the process of connecting to a server.
## SockListening Socket is a server socket and is listening for connections.
## SockClosed Socket has been closed.
## SockUDPBound Socket is a UDP socket which is listening for data.
## ================ ================================================================
##
## **Warning**: If ``state`` is set incorrectly the resulting ``AsyncSocket``
## object may not work properly.
##
## **Note**: This will set ``sock`` to be non-blocking.
result = newAsyncSocket()
result.socket = sock
result.proto = if state == SockUDPBound: IPPROTO_UDP else: IPPROTO_TCP
result.socket.setBlocking(false)
result.info = state
proc asyncSockHandleRead(h: RootRef) =
when defined(ssl):
if AsyncSocket(h).socket.isSSL and not
AsyncSocket(h).socket.gotHandshake:
return
if AsyncSocket(h).info != SockListening:
if AsyncSocket(h).info != SockConnecting:
AsyncSocket(h).handleRead(AsyncSocket(h))
else:
AsyncSocket(h).handleAccept(AsyncSocket(h))
proc close*(sock: AsyncSocket) {.gcsafe.}
proc asyncSockHandleWrite(h: RootRef) =
when defined(ssl):
if AsyncSocket(h).socket.isSSL and not
AsyncSocket(h).socket.gotHandshake:
return
if AsyncSocket(h).info == SockConnecting:
AsyncSocket(h).handleConnect(AsyncSocket(h))
AsyncSocket(h).info = SockConnected
# Stop receiving write events if there is no handleWrite event.
if AsyncSocket(h).handleWrite == nil:
AsyncSocket(h).deleg.mode = fmRead
else:
AsyncSocket(h).deleg.mode = fmReadWrite
else:
if AsyncSocket(h).sendBuffer != "":
let sock = AsyncSocket(h)
try:
let bytesSent = sock.socket.sendAsync(sock.sendBuffer)
if bytesSent == 0:
# Apparently the socket cannot be written to. Even though select
# just told us that it can be... This used to be an assert. Just
# do nothing instead.
discard
elif bytesSent != sock.sendBuffer.len:
sock.sendBuffer = sock.sendBuffer[bytesSent .. ^1]
elif bytesSent == sock.sendBuffer.len:
sock.sendBuffer = ""
if AsyncSocket(h).handleWrite != nil:
AsyncSocket(h).handleWrite(AsyncSocket(h))
except OSError:
# Most likely the socket closed before the full buffer could be sent to it.
sock.close() # TODO: Provide a handleError for users?
else:
if AsyncSocket(h).handleWrite != nil:
AsyncSocket(h).handleWrite(AsyncSocket(h))
else:
AsyncSocket(h).deleg.mode = fmRead
when defined(ssl):
proc asyncSockDoHandshake(h: RootRef) {.gcsafe.} =
if AsyncSocket(h).socket.isSSL and not
AsyncSocket(h).socket.gotHandshake:
if AsyncSocket(h).sslNeedAccept:
var d = ""
let ret = AsyncSocket(h).socket.acceptAddrSSL(AsyncSocket(h).socket, d)
assert ret != AcceptNoClient
if ret == AcceptSuccess:
AsyncSocket(h).info = SockConnected
else:
# handshake will set socket's ``sslNoHandshake`` field.
discard AsyncSocket(h).socket.handshake()
proc asyncSockTask(h: RootRef) =
when defined(ssl):
h.asyncSockDoHandshake()
AsyncSocket(h).handleTask(AsyncSocket(h))
proc toDelegate(sock: AsyncSocket): Delegate =
result = newDelegate()
result.deleVal = sock
result.fd = getFD(sock.socket)
# We need this to get write events, just to know when the socket connects.
result.mode = fmReadWrite
result.handleRead = asyncSockHandleRead
result.handleWrite = asyncSockHandleWrite
result.task = asyncSockTask
# TODO: Errors?
#result.handleError = (proc (h: PObject) = assert(false))
result.hasDataBuffered =
proc (h: RootRef): bool {.nimcall.} =
return AsyncSocket(h).socket.hasDataBuffered()
sock.deleg = result
if sock.info notin {SockIdle, SockClosed}:
sock.deleg.open = true
else:
sock.deleg.open = false
proc connect*(sock: AsyncSocket, name: string, port = Port(0),
af: Domain = AF_INET) =
## Begins connecting ``sock`` to ``name``:``port``.
sock.socket.connectAsync(name, port, af)
sock.info = SockConnecting
if sock.deleg != nil:
sock.deleg.open = true
proc close*(sock: AsyncSocket) =
## Closes ``sock``. Terminates any current connections.
sock.socket.close()
sock.info = SockClosed
if sock.deleg != nil:
sock.deleg.open = false
proc bindAddr*(sock: AsyncSocket, port = Port(0), address = "") =
## Equivalent to ``sockets.bindAddr``.
sock.socket.bindAddr(port, address)
if sock.proto == IPPROTO_UDP:
sock.info = SockUDPBound
if sock.deleg != nil:
sock.deleg.open = true
proc listen*(sock: AsyncSocket) =
## Equivalent to ``sockets.listen``.
sock.socket.listen()
sock.info = SockListening
if sock.deleg != nil:
sock.deleg.open = true
proc acceptAddr*(server: AsyncSocket, client: var AsyncSocket,
address: var string) =
## Equivalent to ``sockets.acceptAddr``. This procedure should be called in
## a ``handleAccept`` event handler **only** once.
##
## **Note**: ``client`` needs to be initialised.
assert(client != nil)
client = newAsyncSocket()
var c: Socket
new(c)
when defined(ssl):
if server.socket.isSSL:
var ret = server.socket.acceptAddrSSL(c, address)
# The following shouldn't happen because when this function is called
# it is guaranteed that there is a client waiting.
# (This should be called in handleAccept)
assert(ret != AcceptNoClient)
if ret == AcceptNoHandshake:
client.sslNeedAccept = true
else:
client.sslNeedAccept = false
client.info = SockConnected
else:
server.socket.acceptAddr(c, address)
client.sslNeedAccept = false
client.info = SockConnected
else:
server.socket.acceptAddr(c, address)
client.sslNeedAccept = false
client.info = SockConnected
if c == invalidSocket: raiseSocketError(server.socket)
c.setBlocking(false) # TODO: Needs to be tested.
# deleg.open is set in ``toDelegate``.
client.socket = c
client.lineBuffer = "".TaintedString
client.sendBuffer = ""
client.info = SockConnected
proc accept*(server: AsyncSocket, client: var AsyncSocket) =
## Equivalent to ``sockets.accept``.
var dummyAddr = ""
server.acceptAddr(client, dummyAddr)
proc acceptAddr*(server: AsyncSocket): tuple[sock: AsyncSocket,
address: string] {.deprecated.} =
## Equivalent to ``sockets.acceptAddr``.
##
## **Deprecated since version 0.9.0:** Please use the function above.
var client = newAsyncSocket()
var address: string = ""
acceptAddr(server, client, address)
return (client, address)
proc accept*(server: AsyncSocket): AsyncSocket {.deprecated.} =
## Equivalent to ``sockets.accept``.
##
## **Deprecated since version 0.9.0:** Please use the function above.
new(result)
var address = ""
server.acceptAddr(result, address)
proc newDispatcher*(): Dispatcher =
new(result)
result.delegates = @[]
proc register*(d: Dispatcher, deleg: Delegate) =
## Registers delegate ``deleg`` with dispatcher ``d``.
d.delegates.add(deleg)
proc register*(d: Dispatcher, sock: AsyncSocket): Delegate {.discardable.} =
## Registers async socket ``sock`` with dispatcher ``d``.
result = sock.toDelegate()
d.register(result)
proc unregister*(d: Dispatcher, deleg: Delegate) =
## Unregisters deleg ``deleg`` from dispatcher ``d``.
for i in 0..len(d.delegates)-1:
if d.delegates[i] == deleg:
d.delegates.del(i)
return
raise newException(IndexError, "Could not find delegate.")
proc isWriteable*(s: AsyncSocket): bool =
## Determines whether socket ``s`` is ready to be written to.
var writeSock = @[s.socket]
return selectWrite(writeSock, 1) != 0 and s.socket notin writeSock
converter getSocket*(s: AsyncSocket): Socket =
return s.socket
proc isConnected*(s: AsyncSocket): bool =
## Determines whether ``s`` is connected.
return s.info == SockConnected
proc isListening*(s: AsyncSocket): bool =
## Determines whether ``s`` is listening for incoming connections.
return s.info == SockListening
proc isConnecting*(s: AsyncSocket): bool =
## Determines whether ``s`` is connecting.
return s.info == SockConnecting
proc isClosed*(s: AsyncSocket): bool =
## Determines whether ``s`` has been closed.
return s.info == SockClosed
proc isSendDataBuffered*(s: AsyncSocket): bool =
## Determines whether ``s`` has data waiting to be sent, i.e. whether this
## socket's sendBuffer contains data.
return s.sendBuffer.len != 0
proc setHandleWrite*(s: AsyncSocket,
handleWrite: proc (s: AsyncSocket) {.closure, gcsafe.}) =
## Setter for the ``handleWrite`` event.
##
## To remove this event you should use the ``delHandleWrite`` function.
## It is advised to use that function instead of just setting the event to
## ``proc (s: AsyncSocket) = nil`` as that would mean that that function
## would be called constantly.
s.deleg.mode = fmReadWrite
s.handleWrite = handleWrite
proc delHandleWrite*(s: AsyncSocket) =
## Removes the ``handleWrite`` event handler on ``s``.
s.handleWrite = nil
{.push warning[deprecated]: off.}
proc recvLine*(s: AsyncSocket, line: var TaintedString): bool {.deprecated.} =
## Behaves similar to ``sockets.recvLine``, however it handles non-blocking
## sockets properly. This function guarantees that ``line`` is a full line,
## if this function can only retrieve some data; it will save this data and
## add it to the result when a full line is retrieved.
##
## Unlike ``sockets.recvLine`` this function will raise an OSError or SslError
## exception if an error occurs.
##
## **Deprecated since version 0.9.2**: This function has been deprecated in
## favour of readLine.
setLen(line.string, 0)
var dataReceived = "".TaintedString
var ret = s.socket.recvLineAsync(dataReceived)
case ret
of RecvFullLine:
if s.lineBuffer.len > 0:
string(line).add(s.lineBuffer.string)
setLen(s.lineBuffer.string, 0)
string(line).add(dataReceived.string)
if string(line) == "":
line = "\c\L".TaintedString
result = true
of RecvPartialLine:
string(s.lineBuffer).add(dataReceived.string)
result = false
of RecvDisconnected:
result = true
of RecvFail:
s.raiseSocketError(async = true)
result = false
{.pop.}
proc readLine*(s: AsyncSocket, line: var TaintedString): bool =
## Behaves similar to ``sockets.readLine``, however it handles non-blocking
## sockets properly. This function guarantees that ``line`` is a full line,
## if this function can only retrieve some data; it will save this data and
## add it to the result when a full line is retrieved, when this happens
## False will be returned. True will only be returned if a full line has been
## retrieved or the socket has been disconnected in which case ``line`` will
## be set to "".
##
## This function will raise an OSError exception when a socket error occurs.
setLen(line.string, 0)
var dataReceived = "".TaintedString
var ret = s.socket.readLineAsync(dataReceived)
case ret
of ReadFullLine:
if s.lineBuffer.len > 0:
string(line).add(s.lineBuffer.string)
setLen(s.lineBuffer.string, 0)
string(line).add(dataReceived.string)
if string(line) == "":
line = "\c\L".TaintedString
result = true
of ReadPartialLine:
string(s.lineBuffer).add(dataReceived.string)
result = false
of ReadNone:
result = false
of ReadDisconnected:
result = true
proc send*(sock: AsyncSocket, data: string) =
## Sends ``data`` to socket ``sock``. This is basically a nicer implementation
## of ``sockets.sendAsync``.
##
## If ``data`` cannot be sent immediately it will be buffered and sent
## when ``sock`` becomes writeable (during the ``handleWrite`` event).
## It's possible that only a part of ``data`` will be sent immediately, while
## the rest of it will be buffered and sent later.
if sock.sendBuffer.len != 0:
sock.sendBuffer.add(data)
return
let bytesSent = sock.socket.sendAsync(data)
assert bytesSent >= 0
if bytesSent == 0:
sock.sendBuffer.add(data)
sock.deleg.mode = fmReadWrite
elif bytesSent != data.len:
sock.sendBuffer.add(data[bytesSent .. ^1])
sock.deleg.mode = fmReadWrite
proc timeValFromMilliseconds(timeout = 500): Timeval =
if timeout != -1:
var seconds = timeout div 1000
when defined(posix):
result.tv_sec = seconds.Time
result.tv_usec = ((timeout - seconds * 1000) * 1000).Suseconds
else:
result.tv_sec = seconds.int32
result.tv_usec = ((timeout - seconds * 1000) * 1000).int32
proc createFdSet(fd: var TFdSet, s: seq[Delegate], m: var int) =
FD_ZERO(fd)
for i in items(s):
m = max(m, int(i.fd))
FD_SET(i.fd, fd)
proc pruneSocketSet(s: var seq[Delegate], fd: var TFdSet) =
var i = 0
var L = s.len
while i < L:
if FD_ISSET(s[i].fd, fd) != 0'i32:
s[i] = s[L-1]
dec(L)
else:
inc(i)
setLen(s, L)
proc select(readfds, writefds, exceptfds: var seq[Delegate],
timeout = 500): int =
var tv {.noInit.}: Timeval = timeValFromMilliseconds(timeout)
var rd, wr, ex: TFdSet
var m = 0
createFdSet(rd, readfds, m)
createFdSet(wr, writefds, m)
createFdSet(ex, exceptfds, m)
if timeout != -1:
result = int(select(cint(m+1), addr(rd), addr(wr), addr(ex), addr(tv)))
else:
result = int(select(cint(m+1), addr(rd), addr(wr), addr(ex), nil))
pruneSocketSet(readfds, (rd))
pruneSocketSet(writefds, (wr))
pruneSocketSet(exceptfds, (ex))
proc poll*(d: Dispatcher, timeout: int = 500): bool =
## This function checks for events on all the delegates in the `PDispatcher`.
## It then proceeds to call the correct event handler.
##
## This function returns ``True`` if there are file descriptors that are still
## open, otherwise ``False``. File descriptors that have been
## closed are immediately removed from the dispatcher automatically.
##
## **Note:** Each delegate has a task associated with it. This gets called
## after each select() call, if you set timeout to ``-1`` the tasks will
## only be executed after one or more file descriptors becomes readable or
## writeable.
result = true
var readDg, writeDg, errorDg: seq[Delegate] = @[]
var len = d.delegates.len
var dc = 0
while dc < len:
let deleg = d.delegates[dc]
if (deleg.mode != fmWrite or deleg.mode != fmAppend) and deleg.open:
readDg.add(deleg)
if (deleg.mode != fmRead) and deleg.open:
writeDg.add(deleg)
if deleg.open:
errorDg.add(deleg)
inc dc
else:
# File/socket has been closed. Remove it from dispatcher.
d.delegates[dc] = d.delegates[len-1]
dec len
d.delegates.setLen(len)
var hasDataBufferedCount = 0
for d in d.delegates:
if d.hasDataBuffered(d.deleVal):
hasDataBufferedCount.inc()
d.handleRead(d.deleVal)
if hasDataBufferedCount > 0: return true
if readDg.len() == 0 and writeDg.len() == 0:
## TODO: Perhaps this shouldn't return if errorDg has something?
return false
if select(readDg, writeDg, errorDg, timeout) != 0:
for i in 0..len(d.delegates)-1:
if i > len(d.delegates)-1: break # One delegate might've been removed.
let deleg = d.delegates[i]
if not deleg.open: continue # This delegate might've been closed.
if (deleg.mode != fmWrite or deleg.mode != fmAppend) and
deleg notin readDg:
deleg.handleRead(deleg.deleVal)
if (deleg.mode != fmRead) and deleg notin writeDg:
deleg.handleWrite(deleg.deleVal)
if deleg notin errorDg:
deleg.handleError(deleg.deleVal)
# Execute tasks
for i in items(d.delegates):
i.task(i.deleVal)
proc len*(disp: Dispatcher): int =
## Retrieves the amount of delegates in ``disp``.
return disp.delegates.len
when not defined(testing) and isMainModule:
proc testConnect(s: AsyncSocket, no: int) =
echo("Connected! " & $no)
proc testRead(s: AsyncSocket, no: int) =
echo("Reading! " & $no)
var data = ""
if not s.readLine(data): return
if data == "":
echo("Closing connection. " & $no)
s.close()
echo(data)
echo("Finished reading! " & $no)
proc testAccept(s: AsyncSocket, disp: Dispatcher, no: int) =
echo("Accepting client! " & $no)
var client: AsyncSocket
new(client)
var address = ""
s.acceptAddr(client, address)
echo("Accepted ", address)
client.handleRead =
proc (s: AsyncSocket) =
testRead(s, 2)
disp.register(client)
proc main =
var d = newDispatcher()
var s = asyncSocket()
s.connect("amber.tenthbit.net", Port(6667))
s.handleConnect =
proc (s: AsyncSocket) =
testConnect(s, 1)
s.handleRead =
proc (s: AsyncSocket) =
testRead(s, 1)
d.register(s)
var server = asyncSocket()
server.handleAccept =
proc (s: AsyncSocket) =
testAccept(s, d, 78)
server.bindAddr(Port(5555))
server.listen()
d.register(server)
while d.poll(-1): discard
main()

View File

@@ -1,669 +0,0 @@
#
#
# Nim's Runtime Library
# (c) Copyright 2015 Dominik Picheta
# See the file "copying.txt", included in this
# distribution, for details about the copyright.
#
include "system/inclrtl"
import sockets, strutils, parseutils, times, os, asyncio
from asyncnet import nil
from nativesockets import nil
from asyncdispatch import Future
## **Note**: This module is deprecated since version 0.11.3.
## You should use the async version of this module
## `asyncftpclient <asyncftpclient.html>`_.
##
## ----
##
## This module **partially** implements an FTP client as specified
## by `RFC 959 <http://tools.ietf.org/html/rfc959>`_.
##
## This module provides both a synchronous and asynchronous implementation.
## The asynchronous implementation requires you to use the ``asyncFTPClient``
## function. You are then required to register the ``AsyncFTPClient`` with a
## asyncio dispatcher using the ``register`` function. Take a look at the
## asyncio module documentation for more information.
##
## **Note**: The asynchronous implementation is only asynchronous for long
## file transfers, calls to functions which use the command socket will block.
##
## Here is some example usage of this module:
##
## .. code-block:: Nim
## var ftp = ftpClient("example.org", user = "user", pass = "pass")
## ftp.connect()
## ftp.retrFile("file.ext", "file.ext")
##
## **Warning:** The API of this module is unstable, and therefore is subject
## to change.
{.deprecated.}
type
FtpBase*[SockType] = ref FtpBaseObj[SockType]
FtpBaseObj*[SockType] = object
csock*: SockType
dsock*: SockType
when SockType is asyncio.AsyncSocket:
handleEvent*: proc (ftp: AsyncFTPClient, ev: FTPEvent){.closure,gcsafe.}
disp: Dispatcher
asyncDSockID: Delegate
user*, pass*: string
address*: string
when SockType is asyncnet.AsyncSocket:
port*: nativesockets.Port
else:
port*: Port
jobInProgress*: bool
job*: FTPJob[SockType]
dsockConnected*: bool
FTPJobType* = enum
JRetrText, JRetr, JStore
FtpJob[T] = ref FtpJobObj[T]
FTPJobObj[T] = object
prc: proc (ftp: FTPBase[T], async: bool): bool {.nimcall, gcsafe.}
case typ*: FTPJobType
of JRetrText:
lines: string
of JRetr, JStore:
file: File
filename: string
total: BiggestInt # In bytes.
progress: BiggestInt # In bytes.
oneSecond: BiggestInt # Bytes transferred in one second.
lastProgressReport: float # Time
toStore: string # Data left to upload (Only used with async)
FtpClientObj* = FtpBaseObj[Socket]
FtpClient* = ref FtpClientObj
AsyncFtpClient* = ref AsyncFtpClientObj ## Async alternative to TFTPClient.
AsyncFtpClientObj* = FtpBaseObj[asyncio.AsyncSocket]
FTPEventType* = enum
EvTransferProgress, EvLines, EvRetr, EvStore
FTPEvent* = object ## Event
filename*: string
case typ*: FTPEventType
of EvLines:
lines*: string ## Lines that have been transferred.
of EvRetr, EvStore: ## Retr/Store operation finished.
nil
of EvTransferProgress:
bytesTotal*: BiggestInt ## Bytes total.
bytesFinished*: BiggestInt ## Bytes transferred.
speed*: BiggestInt ## Speed in bytes/s
currentJob*: FTPJobType ## The current job being performed.
ReplyError* = object of IOError
FTPError* = object of IOError
const multiLineLimit = 10000
proc ftpClient*(address: string, port = Port(21),
user, pass = ""): FtpClient =
## Create a ``FtpClient`` object.
new(result)
result.user = user
result.pass = pass
result.address = address
result.port = port
result.dsockConnected = false
result.csock = socket()
if result.csock == invalidSocket: raiseOSError(osLastError())
template blockingOperation(sock: Socket, body: untyped) =
body
template blockingOperation(sock: asyncio.AsyncSocket, body: untyped) =
sock.setBlocking(true)
body
sock.setBlocking(false)
proc expectReply[T](ftp: FtpBase[T]): TaintedString =
result = TaintedString""
blockingOperation(ftp.csock):
when T is Socket:
ftp.csock.readLine(result)
else:
discard ftp.csock.readLine(result)
var count = 0
while result[3] == '-':
## Multi-line reply.
var line = TaintedString""
when T is Socket:
ftp.csock.readLine(line)
else:
discard ftp.csock.readLine(line)
result.add("\n" & line)
count.inc()
if count >= multiLineLimit:
raise newException(ReplyError, "Reached maximum multi-line reply count.")
proc send*[T](ftp: FtpBase[T], m: string): TaintedString =
## Send a message to the server, and wait for a primary reply.
## ``\c\L`` is added for you.
##
## **Note:** The server may return multiple lines of coded replies.
blockingOperation(ftp.csock):
ftp.csock.send(m & "\c\L")
return ftp.expectReply()
proc assertReply(received: TaintedString, expected: string) =
if not received.string.startsWith(expected):
raise newException(ReplyError,
"Expected reply '$1' got: $2" % [
expected, received.string])
proc assertReply(received: TaintedString, expected: varargs[string]) =
for i in items(expected):
if received.string.startsWith(i): return
raise newException(ReplyError,
"Expected reply '$1' got: $2" %
[expected.join("' or '"), received.string])
proc createJob[T](ftp: FtpBase[T],
prc: proc (ftp: FtpBase[T], async: bool): bool {.
nimcall,gcsafe.},
cmd: FTPJobType) =
if ftp.jobInProgress:
raise newException(FTPError, "Unable to do two jobs at once.")
ftp.jobInProgress = true
new(ftp.job)
ftp.job.prc = prc
ftp.job.typ = cmd
case cmd
of JRetrText:
ftp.job.lines = ""
of JRetr, JStore:
ftp.job.toStore = ""
proc deleteJob[T](ftp: FtpBase[T]) =
assert ftp.jobInProgress
ftp.jobInProgress = false
case ftp.job.typ
of JRetrText:
ftp.job.lines = ""
of JRetr, JStore:
ftp.job.file.close()
ftp.dsock.close()
proc handleTask(s: AsyncSocket, ftp: AsyncFTPClient) =
if ftp.jobInProgress:
if ftp.job.typ in {JRetr, JStore}:
if epochTime() - ftp.job.lastProgressReport >= 1.0:
var r: FTPEvent
ftp.job.lastProgressReport = epochTime()
r.typ = EvTransferProgress
r.bytesTotal = ftp.job.total
r.bytesFinished = ftp.job.progress
r.speed = ftp.job.oneSecond
r.filename = ftp.job.filename
r.currentJob = ftp.job.typ
ftp.job.oneSecond = 0
ftp.handleEvent(ftp, r)
proc handleWrite(s: AsyncSocket, ftp: AsyncFTPClient) =
if ftp.jobInProgress:
if ftp.job.typ == JStore:
assert (not ftp.job.prc(ftp, true))
proc handleConnect(s: AsyncSocket, ftp: AsyncFTPClient) =
ftp.dsockConnected = true
assert(ftp.jobInProgress)
if ftp.job.typ == JStore:
s.setHandleWrite(proc (s: AsyncSocket) = handleWrite(s, ftp))
else:
s.delHandleWrite()
proc handleRead(s: AsyncSocket, ftp: AsyncFTPClient) =
assert ftp.jobInProgress
assert ftp.job.typ != JStore
# This can never return true, because it shouldn't check for code
# 226 from csock.
assert(not ftp.job.prc(ftp, true))
proc pasv[T](ftp: FtpBase[T]) =
## Negotiate a data connection.
when T is Socket:
ftp.dsock = socket()
if ftp.dsock == invalidSocket: raiseOSError(osLastError())
elif T is AsyncSocket:
ftp.dsock = asyncSocket()
ftp.dsock.handleRead =
proc (s: AsyncSocket) =
handleRead(s, ftp)
ftp.dsock.handleConnect =
proc (s: AsyncSocket) =
handleConnect(s, ftp)
ftp.dsock.handleTask =
proc (s: AsyncSocket) =
handleTask(s, ftp)
ftp.disp.register(ftp.dsock)
else:
{.fatal: "Incorrect socket instantiation".}
var pasvMsg = ftp.send("PASV").string.strip.TaintedString
assertReply(pasvMsg, "227")
var betweenParens = captureBetween(pasvMsg.string, '(', ')')
var nums = betweenParens.split(',')
var ip = nums[0.. ^3]
var port = nums[^2.. ^1]
var properPort = port[0].parseInt()*256+port[1].parseInt()
ftp.dsock.connect(ip.join("."), Port(properPort.toU16))
when T is AsyncSocket:
ftp.dsockConnected = false
else:
ftp.dsockConnected = true
proc normalizePathSep(path: string): string =
return replace(path, '\\', '/')
proc connect*[T](ftp: FtpBase[T]) =
## Connect to the FTP server specified by ``ftp``.
when T is AsyncSocket:
blockingOperation(ftp.csock):
ftp.csock.connect(ftp.address, ftp.port)
elif T is Socket:
ftp.csock.connect(ftp.address, ftp.port)
else:
{.fatal: "Incorrect socket instantiation".}
var reply = ftp.expectReply()
if reply.startsWith("120"):
# 120 Service ready in nnn minutes.
# We wait until we receive 220.
reply = ftp.expectReply()
# Handle 220 messages from the server
assertReply ftp.expectReply(), "220"
if ftp.user != "":
assertReply(ftp.send("USER " & ftp.user), "230", "331")
if ftp.pass != "":
assertReply ftp.send("PASS " & ftp.pass), "230"
proc pwd*[T](ftp: FtpBase[T]): string =
## Returns the current working directory.
var wd = ftp.send("PWD")
assertReply wd, "257"
return wd.string.captureBetween('"') # "
proc cd*[T](ftp: FtpBase[T], dir: string) =
## Changes the current directory on the remote FTP server to ``dir``.
assertReply ftp.send("CWD " & dir.normalizePathSep), "250"
proc cdup*[T](ftp: FtpBase[T]) =
## Changes the current directory to the parent of the current directory.
assertReply ftp.send("CDUP"), "200"
proc getLines[T](ftp: FtpBase[T], async: bool = false): bool =
## Downloads text data in ASCII mode
## Returns true if the download is complete.
## It doesn't if `async` is true, because it doesn't check for 226 then.
if ftp.dsockConnected:
var r = TaintedString""
when T is AsyncSocket:
if ftp.asyncDSock.readLine(r):
if r.string == "":
ftp.dsockConnected = false
else:
ftp.job.lines.add(r.string & "\n")
elif T is Socket:
assert(not async)
ftp.dsock.readLine(r)
if r.string == "":
ftp.dsockConnected = false
else:
ftp.job.lines.add(r.string & "\n")
else:
{.fatal: "Incorrect socket instantiation".}
if not async:
var readSocks: seq[Socket] = @[ftp.csock]
# This is only needed here. Asyncio gets this socket...
blockingOperation(ftp.csock):
if readSocks.select(1) != 0 and ftp.csock in readSocks:
assertReply ftp.expectReply(), "226"
return true
proc listDirs*[T](ftp: FtpBase[T], dir: string = "",
async = false): seq[string] =
## Returns a list of filenames in the given directory. If ``dir`` is "",
## the current directory is used. If ``async`` is true, this
## function will return immediately and it will be your job to
## use asyncio's ``poll`` to progress this operation.
ftp.createJob(getLines[T], JRetrText)
ftp.pasv()
assertReply ftp.send("NLST " & dir.normalizePathSep), ["125", "150"]
if not async:
while not ftp.job.prc(ftp, false): discard
result = splitLines(ftp.job.lines)
ftp.deleteJob()
else: return @[]
proc fileExists*(ftp: FtpClient, file: string): bool {.deprecated.} =
## **Deprecated since version 0.9.0:** Please use ``existsFile``.
##
## Determines whether ``file`` exists.
##
## Warning: This function may block. Especially on directories with many
## files, because a full list of file names must be retrieved.
var files = ftp.listDirs()
for f in items(files):
if f.normalizePathSep == file.normalizePathSep: return true
proc existsFile*(ftp: FtpClient, file: string): bool =
## Determines whether ``file`` exists.
##
## Warning: This function may block. Especially on directories with many
## files, because a full list of file names must be retrieved.
var files = ftp.listDirs()
for f in items(files):
if f.normalizePathSep == file.normalizePathSep: return true
proc createDir*[T](ftp: FtpBase[T], dir: string, recursive: bool = false) =
## Creates a directory ``dir``. If ``recursive`` is true, the topmost
## subdirectory of ``dir`` will be created first, following the secondmost...
## etc. this allows you to give a full path as the ``dir`` without worrying
## about subdirectories not existing.
if not recursive:
assertReply ftp.send("MKD " & dir.normalizePathSep), "257"
else:
var reply = TaintedString""
var previousDirs = ""
for p in split(dir, {os.DirSep, os.AltSep}):
if p != "":
previousDirs.add(p)
reply = ftp.send("MKD " & previousDirs)
previousDirs.add('/')
assertReply reply, "257"
proc chmod*[T](ftp: FtpBase[T], path: string,
permissions: set[FilePermission]) =
## Changes permission of ``path`` to ``permissions``.
var userOctal = 0
var groupOctal = 0
var otherOctal = 0
for i in items(permissions):
case i
of fpUserExec: userOctal.inc(1)
of fpUserWrite: userOctal.inc(2)
of fpUserRead: userOctal.inc(4)
of fpGroupExec: groupOctal.inc(1)
of fpGroupWrite: groupOctal.inc(2)
of fpGroupRead: groupOctal.inc(4)
of fpOthersExec: otherOctal.inc(1)
of fpOthersWrite: otherOctal.inc(2)
of fpOthersRead: otherOctal.inc(4)
var perm = $userOctal & $groupOctal & $otherOctal
assertReply ftp.send("SITE CHMOD " & perm &
" " & path.normalizePathSep), "200"
proc list*[T](ftp: FtpBase[T], dir: string = "", async = false): string =
## Lists all files in ``dir``. If ``dir`` is ``""``, uses the current
## working directory. If ``async`` is true, this function will return
## immediately and it will be your job to call asyncio's
## ``poll`` to progress this operation.
ftp.createJob(getLines[T], JRetrText)
ftp.pasv()
assertReply(ftp.send("LIST" & " " & dir.normalizePathSep), ["125", "150"])
if not async:
while not ftp.job.prc(ftp, false): discard
result = ftp.job.lines
ftp.deleteJob()
else:
return ""
proc retrText*[T](ftp: FtpBase[T], file: string, async = false): string =
## Retrieves ``file``. File must be ASCII text.
## If ``async`` is true, this function will return immediately and
## it will be your job to call asyncio's ``poll`` to progress this operation.
ftp.createJob(getLines[T], JRetrText)
ftp.pasv()
assertReply ftp.send("RETR " & file.normalizePathSep), ["125", "150"]
if not async:
while not ftp.job.prc(ftp, false): discard
result = ftp.job.lines
ftp.deleteJob()
else:
return ""
proc getFile[T](ftp: FtpBase[T], async = false): bool =
if ftp.dsockConnected:
var r = "".TaintedString
var bytesRead = 0
var returned = false
if async:
when T is Socket:
raise newException(FTPError, "FTPClient must be async.")
else:
bytesRead = ftp.dsock.recvAsync(r, BufferSize)
returned = bytesRead != -1
else:
bytesRead = ftp.dsock.recv(r, BufferSize)
returned = true
let r2 = r.string
if r2 != "":
ftp.job.progress.inc(r2.len)
ftp.job.oneSecond.inc(r2.len)
ftp.job.file.write(r2)
elif returned and r2 == "":
ftp.dsockConnected = false
when T is Socket:
if not async:
var readSocks: seq[Socket] = @[ftp.csock]
blockingOperation(ftp.csock):
if readSocks.select(1) != 0 and ftp.csock in readSocks:
assertReply ftp.expectReply(), "226"
return true
proc retrFile*[T](ftp: FtpBase[T], file, dest: string, async = false) =
## Downloads ``file`` and saves it to ``dest``. Usage of this function
## asynchronously is recommended to view the progress of the download.
## The ``EvRetr`` event is passed to the specified ``handleEvent`` function
## when the download is finished, and the ``filename`` field will be equal
## to ``file``.
ftp.createJob(getFile[T], JRetr)
ftp.job.file = open(dest, mode = fmWrite)
ftp.pasv()
var reply = ftp.send("RETR " & file.normalizePathSep)
assertReply reply, ["125", "150"]
if {'(', ')'} notin reply.string:
raise newException(ReplyError, "Reply has no file size.")
var fileSize: BiggestInt
if reply.string.captureBetween('(', ')').parseBiggestInt(fileSize) == 0:
raise newException(ReplyError, "Reply has no file size.")
ftp.job.total = fileSize
ftp.job.lastProgressReport = epochTime()
ftp.job.filename = file.normalizePathSep
if not async:
while not ftp.job.prc(ftp, false): discard
ftp.deleteJob()
proc doUpload[T](ftp: FtpBase[T], async = false): bool =
if ftp.dsockConnected:
if ftp.job.toStore.len() > 0:
assert(async)
let bytesSent = ftp.dsock.sendAsync(ftp.job.toStore)
if bytesSent == ftp.job.toStore.len:
ftp.job.toStore = ""
elif bytesSent != ftp.job.toStore.len and bytesSent != 0:
ftp.job.toStore = ftp.job.toStore[bytesSent .. ^1]
ftp.job.progress.inc(bytesSent)
ftp.job.oneSecond.inc(bytesSent)
else:
var s = newStringOfCap(4000)
var len = ftp.job.file.readBuffer(addr(s[0]), 4000)
setLen(s, len)
if len == 0:
# File finished uploading.
ftp.dsock.close()
ftp.dsockConnected = false
if not async:
assertReply ftp.expectReply(), "226"
return true
return false
if not async:
ftp.dsock.send(s)
else:
let bytesSent = ftp.dsock.sendAsync(s)
if bytesSent == 0:
ftp.job.toStore.add(s)
elif bytesSent != s.len:
ftp.job.toStore.add(s[bytesSent .. ^1])
len = bytesSent
ftp.job.progress.inc(len)
ftp.job.oneSecond.inc(len)
proc store*[T](ftp: FtpBase[T], file, dest: string, async = false) =
## Uploads ``file`` to ``dest`` on the remote FTP server. Usage of this
## function asynchronously is recommended to view the progress of
## the download.
## The ``EvStore`` event is passed to the specified ``handleEvent`` function
## when the upload is finished, and the ``filename`` field will be
## equal to ``file``.
ftp.createJob(doUpload[T], JStore)
ftp.job.file = open(file)
ftp.job.total = ftp.job.file.getFileSize()
ftp.job.lastProgressReport = epochTime()
ftp.job.filename = file
ftp.pasv()
assertReply ftp.send("STOR " & dest.normalizePathSep), ["125", "150"]
if not async:
while not ftp.job.prc(ftp, false): discard
ftp.deleteJob()
proc close*[T](ftp: FtpBase[T]) =
## Terminates the connection to the server.
assertReply ftp.send("QUIT"), "221"
if ftp.jobInProgress: ftp.deleteJob()
ftp.csock.close()
ftp.dsock.close()
proc csockHandleRead(s: AsyncSocket, ftp: AsyncFTPClient) =
if ftp.jobInProgress:
assertReply ftp.expectReply(), "226" # Make sure the transfer completed.
var r: FTPEvent
case ftp.job.typ
of JRetrText:
r.typ = EvLines
r.lines = ftp.job.lines
of JRetr:
r.typ = EvRetr
r.filename = ftp.job.filename
if ftp.job.progress != ftp.job.total:
raise newException(FTPError, "Didn't download full file.")
of JStore:
r.typ = EvStore
r.filename = ftp.job.filename
if ftp.job.progress != ftp.job.total:
raise newException(FTPError, "Didn't upload full file.")
ftp.deleteJob()
ftp.handleEvent(ftp, r)
proc asyncFTPClient*(address: string, port = Port(21),
user, pass = "",
handleEvent: proc (ftp: AsyncFTPClient, ev: FTPEvent) {.closure,gcsafe.} =
(proc (ftp: AsyncFTPClient, ev: FTPEvent) = discard)): AsyncFTPClient =
## Create a ``AsyncFTPClient`` object.
##
## Use this if you want to use asyncio's dispatcher.
var dres: AsyncFtpClient
new(dres)
dres.user = user
dres.pass = pass
dres.address = address
dres.port = port
dres.dsockConnected = false
dres.handleEvent = handleEvent
dres.csock = asyncSocket()
dres.csock.handleRead =
proc (s: AsyncSocket) =
csockHandleRead(s, dres)
result = dres
proc register*(d: Dispatcher, ftp: AsyncFTPClient): Delegate {.discardable.} =
## Registers ``ftp`` with dispatcher ``d``.
ftp.disp = d
return ftp.disp.register(ftp.csock)
when not defined(testing) and isMainModule:
proc main =
var d = newDispatcher()
let hev =
proc (ftp: AsyncFTPClient, event: FTPEvent) =
case event.typ
of EvStore:
echo("Upload finished!")
ftp.retrFile("payload.jpg", "payload2.jpg", async = true)
of EvTransferProgress:
var time: int64 = -1
if event.speed != 0:
time = (event.bytesTotal - event.bytesFinished) div event.speed
echo(event.currentJob)
echo(event.speed div 1000, " kb/s. - ",
event.bytesFinished, "/", event.bytesTotal,
" - ", time, " seconds")
echo(d.len)
of EvRetr:
echo("Download finished!")
ftp.close()
echo d.len
else: assert(false)
var ftp = asyncFTPClient("example.com", user = "foo", pass = "bar", handleEvent = hev)
d.register(ftp)
d.len.echo()
ftp.connect()
echo "connected"
ftp.store("payload.jpg", "payload.jpg", async = true)
d.len.echo()
echo "uploading..."
while true:
if not d.poll(): break
main()
when not defined(testing) and isMainModule:
var ftp = ftpClient("example.com", user = "foo", pass = "bar")
ftp.connect()
echo ftp.pwd()
echo ftp.list()
echo("uploading")
ftp.store("payload.jpg", "payload.jpg", async = false)
echo("Upload complete")
ftp.retrFile("payload.jpg", "payload2.jpg", async = false)
echo("Download complete")
sleep(5000)
ftp.close()
sleep(200)

File diff suppressed because it is too large Load Diff

View File

@@ -128,7 +128,7 @@ proc getErrInfo(db: var DbConn): tuple[res: int, ss, ne, msg: string] {.
cast[PSQLCHAR](sqlState.addr),
cast[PSQLCHAR](nativeErr.addr),
cast[PSQLCHAR](errMsg.addr),
511.TSqlSmallInt, retSz.addr.PSQLSMALLINT)
511.TSqlSmallInt, retSz.addr)
except:
discard
return (res.int, $(addr sqlState), $(addr nativeErr), $(addr errMsg))
@@ -277,14 +277,9 @@ iterator fastRows*(db: var DbConn, query: SqlQuery,
## Rows are retrieved from the server at each iteration.
var
rowRes: Row
sz: TSqlSmallInt = 0
cCnt: TSqlSmallInt = 0.TSqlSmallInt
res: TSqlSmallInt = 0.TSqlSmallInt
tempcCnt: TSqlSmallInt # temporary cCnt,Fix the field values to be null when the release schema is compiled.
# tempcCnt,A field to store the number of temporary variables, for unknown reasons,
# after performing a sqlgetdata function and circulating variables cCnt value will be changed to 0,
# so the values of the temporary variable to store the cCnt.
# After every cycle and specified to cCnt. To ensure the traversal of all fields.
sz: TSqlInteger = 0
cCnt: TSqlSmallInt = 0
res: TSqlSmallInt = 0
res = db.prepareFetch(query, args)
if res == SQL_NO_DATA:
discard
@@ -292,14 +287,13 @@ iterator fastRows*(db: var DbConn, query: SqlQuery,
res = SQLNumResultCols(db.stmt, cCnt)
rowRes = newRow(cCnt)
rowRes.setLen(max(cCnt,0))
tempcCnt = cCnt
while res == SQL_SUCCESS:
for colId in 1..cCnt:
buf[0] = '\0'
db.sqlCheck(SQLGetData(db.stmt, colId.SqlUSmallInt, SQL_C_CHAR,
cast[cstring](buf.addr), 4095.TSqlSmallInt, sz.addr))
cast[cstring](buf.addr), 4095.TSqlSmallInt,
sz.addr))
rowRes[colId-1] = $(addr buf)
cCnt = tempcCnt
yield rowRes
res = SQLFetch(db.stmt)
properFreeResult(SQL_HANDLE_STMT, db.stmt)
@@ -312,14 +306,9 @@ iterator instantRows*(db: var DbConn, query: SqlQuery,
## on demand using []. Returned handle is valid only within the interator body.
var
rowRes: Row = @[]
sz: TSqlSmallInt = 0
cCnt: TSqlSmallInt = 0.TSqlSmallInt
res: TSqlSmallInt = 0.TSqlSmallInt
tempcCnt: TSqlSmallInt # temporary cCnt,Fix the field values to be null when the release schema is compiled.
# tempcCnt,A field to store the number of temporary variables, for unknown reasons,
# after performing a sqlgetdata function and circulating variables cCnt value will be changed to 0,
# so the values of the temporary variable to store the cCnt.
# After every cycle and specified to cCnt. To ensure the traversal of all fields.
sz: TSqlInteger = 0
cCnt: TSqlSmallInt = 0
res: TSqlSmallInt = 0
res = db.prepareFetch(query, args)
if res == SQL_NO_DATA:
discard
@@ -327,14 +316,13 @@ iterator instantRows*(db: var DbConn, query: SqlQuery,
res = SQLNumResultCols(db.stmt, cCnt)
rowRes = newRow(cCnt)
rowRes.setLen(max(cCnt,0))
tempcCnt = cCnt
while res == SQL_SUCCESS:
for colId in 1..cCnt:
buf[0] = '\0'
db.sqlCheck(SQLGetData(db.stmt, colId.SqlUSmallInt, SQL_C_CHAR,
cast[cstring](buf.addr), 4095.TSqlSmallInt, sz.addr))
cast[cstring](buf.addr), 4095.TSqlSmallInt,
sz.addr))
rowRes[colId-1] = $(addr buf)
cCnt = tempcCnt
yield (row: rowRes, len: cCnt.int)
res = SQLFetch(db.stmt)
properFreeResult(SQL_HANDLE_STMT, db.stmt)
@@ -355,14 +343,9 @@ proc getRow*(db: var DbConn, query: SqlQuery,
## will return a Row with empty strings for each column.
var
rowRes: Row
sz: TSqlSmallInt = 0.TSqlSmallInt
cCnt: TSqlSmallInt = 0.TSqlSmallInt
res: TSqlSmallInt = 0.TSqlSmallInt
tempcCnt: TSqlSmallInt # temporary cCnt,Fix the field values to be null when the release schema is compiled.
## tempcCnt,A field to store the number of temporary variables, for unknown reasons,
## after performing a sqlgetdata function and circulating variables cCnt value will be changed to 0,
## so the values of the temporary variable to store the cCnt.
## After every cycle and specified to cCnt. To ensure the traversal of all fields.
sz: TSqlInteger = 0
cCnt: TSqlSmallInt = 0
res: TSqlSmallInt = 0
res = db.prepareFetch(query, args)
if res == SQL_NO_DATA:
result = @[]
@@ -370,13 +353,12 @@ proc getRow*(db: var DbConn, query: SqlQuery,
res = SQLNumResultCols(db.stmt, cCnt)
rowRes = newRow(cCnt)
rowRes.setLen(max(cCnt,0))
tempcCnt = cCnt
for colId in 1..cCnt:
buf[0] = '\0'
db.sqlCheck(SQLGetData(db.stmt, colId.SqlUSmallInt, SQL_C_CHAR,
cast[cstring](buf.addr), 4095.TSqlSmallInt, sz.addr))
cast[cstring](buf.addr), 4095.TSqlSmallInt,
sz.addr))
rowRes[colId-1] = $(addr buf)
cCnt = tempcCnt
res = SQLFetch(db.stmt)
result = rowRes
properFreeResult(SQL_HANDLE_STMT, db.stmt)
@@ -389,14 +371,9 @@ proc getAllRows*(db: var DbConn, query: SqlQuery,
var
rows: seq[Row] = @[]
rowRes: Row
sz: TSqlSmallInt = 0
cCnt: TSqlSmallInt = 0.TSqlSmallInt
res: TSqlSmallInt = 0.TSqlSmallInt
tempcCnt: TSqlSmallInt # temporary cCnt,Fix the field values to be null when the release schema is compiled.
## tempcCnt,A field to store the number of temporary variables, for unknown reasons,
## after performing a sqlgetdata function and circulating variables cCnt value will be changed to 0,
## so the values of the temporary variable to store the cCnt.
## After every cycle and specified to cCnt. To ensure the traversal of all fields.
sz: TSqlInteger = 0
cCnt: TSqlSmallInt = 0
res: TSqlSmallInt = 0
res = db.prepareFetch(query, args)
if res == SQL_NO_DATA:
result = @[]
@@ -404,14 +381,13 @@ proc getAllRows*(db: var DbConn, query: SqlQuery,
res = SQLNumResultCols(db.stmt, cCnt)
rowRes = newRow(cCnt)
rowRes.setLen(max(cCnt,0))
tempcCnt = cCnt
while res == SQL_SUCCESS:
for colId in 1..cCnt:
buf[0] = '\0'
db.sqlCheck(SQLGetData(db.stmt, colId.SqlUSmallInt, SQL_C_CHAR,
cast[cstring](buf.addr), 4095.TSqlSmallInt, sz.addr))
cast[cstring](buf.addr), 4095.TSqlSmallInt,
sz.addr))
rowRes[colId-1] = $(addr buf)
cCnt = tempcCnt
rows.add(rowRes)
res = SQLFetch(db.stmt)
result = rows

View File

@@ -8,19 +8,59 @@
#
## This module implements some common generic algorithms.
##
## Basic usage
## ===========
##
## .. code-block::
## import algorithm
##
## type People = tuple
## year: int
## name: string
##
## var a: seq[People]
##
## a.add((2000, "John"))
## a.add((2005, "Marie"))
## a.add((2010, "Jane"))
##
## # Sorting with default system.cmp
## a.sort()
## assert a == @[(year: 2000, name: "John"), (year: 2005, name: "Marie"),
## (year: 2010, name: "Jane")]
##
## proc myCmp(x, y: People): int =
## if x.name < y.name: -1 else: 1
##
## # Sorting with custom proc
## a.sort(myCmp)
## assert a == @[(year: 2010, name: "Jane"), (year: 2000, name: "John"),
## (year: 2005, name: "Marie")]
##
##
## See also
## ========
## * `sequtils module<sequtils.html>`_ for working with the built-in seq type
## * `tables module<tables.html>`_ for sorting tables
type
SortOrder* = enum
Descending, Ascending
proc `*`*(x: int, order: SortOrder): int {.inline.} =
## flips ``x`` if ``order == Descending``.
## Flips ``x`` if ``order == Descending``.
## If ``order == Ascending`` then ``x`` is returned.
##
## ``x`` is supposed to be the result of a comparator, i.e.
## | ``< 0`` for *less than*,
## | ``== 0`` for *equal*,
## | ``> 0`` for *greater than*.
runnableExamples:
assert `*`(-123, Descending) == 123
assert `*`(123, Descending) == -123
assert `*`(-123, Ascending) == -123
assert `*`(123, Ascending) == 123
var y = order.ord - 1
result = (x xor y) - y
@@ -31,28 +71,44 @@ template fillImpl[T](a: var openArray[T], first, last: int, value: T) =
inc(x)
proc fill*[T](a: var openArray[T], first, last: Natural, value: T) =
## fills the slice ``a[first..last]`` with ``value``.
## Fills the slice ``a[first..last]`` with ``value``.
##
## If an invalid range is passed, it raises IndexError.
runnableExamples:
var a: array[6, int]
a.fill(1, 3, 9)
doAssert a == [0, 9, 9, 9, 0, 0]
var a: array[6, int]
a.fill(1, 3, 9)
assert a == [0, 9, 9, 9, 0, 0]
a.fill(3, 5, 7)
assert a == [0, 9, 9, 7, 7, 7]
doAssertRaises(IndexError, a.fill(1, 7, 9))
fillImpl(a, first, last, value)
proc fill*[T](a: var openArray[T], value: T) =
## fills the container ``a`` with ``value``.
## Fills the container ``a`` with ``value``.
runnableExamples:
var a: array[6, int]
a.fill(9)
doAssert a == [9, 9, 9, 9, 9, 9]
var a: array[6, int]
a.fill(9)
assert a == [9, 9, 9, 9, 9, 9]
a.fill(4)
assert a == [4, 4, 4, 4, 4, 4]
fillImpl(a, 0, a.high, value)
proc reverse*[T](a: var openArray[T], first, last: Natural) =
## reverses the slice ``a[first..last]``.
## Reverses the slice ``a[first..last]``.
##
## If an invalid range is passed, it raises IndexError.
##
## **See also:**
## * `reversed proc<#reversed,openArray[T],Natural,int>`_ reverse a slice and returns a ``seq[T]``
## * `reversed proc<#reversed,openArray[T]>`_ reverse and returns a ``seq[T]``
runnableExamples:
var a = [1, 2, 3, 4, 5, 6]
a.reverse(1, 3)
doAssert a == [1, 4, 3, 2, 5, 6]
var a = [1, 2, 3, 4, 5, 6]
a.reverse(1, 3)
assert a == [1, 4, 3, 2, 5, 6]
a.reverse(1, 3)
assert a == [1, 2, 3, 4, 5, 6]
doAssertRaises(IndexError, a.reverse(1, 7))
var x = first
var y = last
while x < y:
@@ -61,20 +117,32 @@ proc reverse*[T](a: var openArray[T], first, last: Natural) =
inc(x)
proc reverse*[T](a: var openArray[T]) =
## reverses the contents of the container ``a``.
## Reverses the contents of the container ``a``.
##
## **See also:**
## * `reversed proc<#reversed,openArray[T],Natural,int>`_ reverse a slice and returns a ``seq[T]``
## * `reversed proc<#reversed,openArray[T]>`_ reverse and returns a ``seq[T]``
runnableExamples:
var a = [1, 2, 3, 4, 5, 6]
a.reverse()
doAssert a == [6, 5, 4, 3, 2, 1]
var a = [1, 2, 3, 4, 5, 6]
a.reverse()
assert a == [6, 5, 4, 3, 2, 1]
a.reverse()
assert a == [1, 2, 3, 4, 5, 6]
reverse(a, 0, max(0, a.high))
proc reversed*[T](a: openArray[T], first: Natural, last: int): seq[T] =
## returns the reverse of the slice ``a[first..last]``.
## Returns the reverse of the slice ``a[first..last]``.
##
## If an invalid range is passed, it raises IndexError.
##
## **See also:**
## * `reverse proc<#reverse,openArray[T],Natural,Natural>`_ reverse a slice
## * `reverse proc<#reverse,openArray[T]>`_
runnableExamples:
let
a = [1, 2, 3, 4, 5, 6]
b = reversed(a, 1, 3)
doAssert b == @[4, 3, 2]
let
a = [1, 2, 3, 4, 5, 6]
b = a.reversed(1, 3)
assert b == @[4, 3, 2]
assert last >= first-1
var i = last - first
var x = first.int
@@ -85,12 +153,16 @@ proc reversed*[T](a: openArray[T], first: Natural, last: int): seq[T] =
inc(x)
proc reversed*[T](a: openArray[T]): seq[T] =
## returns the reverse of the container ``a``.
## Returns the reverse of the container ``a``.
##
## **See also:**
## * `reverse proc<#reverse,openArray[T],Natural,Natural>`_ reverse a slice
## * `reverse proc<#reverse,openArray[T]>`_
runnableExamples:
let
a = [1, 2, 3, 4, 5, 6]
b = reversed(a)
doAssert b == @[6, 5, 4, 3, 2, 1]
let
a = [1, 2, 3, 4, 5, 6]
b = reversed(a)
assert b == @[6, 5, 4, 3, 2, 1]
reversed(a, 0, a.high)
proc binarySearch*[T, K](a: openArray[T], key: K,
@@ -99,6 +171,9 @@ proc binarySearch*[T, K](a: openArray[T], key: K,
##
## ``cmp`` is the comparator function to use, the expected return values are
## the same as that of system.cmp.
runnableExamples:
assert binarySearch(["a","b","c","d"], "d", system.cmp[string]) == 3
assert binarySearch(["a","b","d","c"], "d", system.cmp[string]) == 2
if a.len == 0:
return -1
@@ -141,31 +216,41 @@ proc binarySearch*[T, K](a: openArray[T], key: K,
proc binarySearch*[T](a: openArray[T], key: T): int =
## Binary search for ``key`` in ``a``. Returns -1 if not found.
runnableExamples:
assert binarySearch([0, 1, 2, 3, 4], 4) == 4
assert binarySearch([0, 1, 4, 2, 3], 4) == 2
binarySearch(a, key, cmp[T])
proc smartBinarySearch*[T](a: openArray[T], key: T): int {.deprecated.} =
## **Deprecated since version 0.18.1**; Use ``binarySearch`` instead.
## **Deprecated since version 0.18.1**; Use `binarySearch proc
## <#binarySearch,openArray[T],T>`_ instead.
binarySearch(a, key, cmp[T])
const
onlySafeCode = true
proc lowerBound*[T, K](a: openArray[T], key: K, cmp: proc(x: T, k: K): int {.closure.}): int =
## returns a position to the first element in the ``a`` that is greater than
## Returns a position to the first element in the ``a`` that is greater than
## ``key``, or last if no such element is found.
## In other words if you have a sorted sequence and you call
## ``insert(thing, elm, lowerBound(thing, elm))``
## the sequence will still be sorted.
##
## The first version uses ``cmp`` to compare the elements.
## If an invalid range is passed, it raises IndexError.
##
## The version uses ``cmp`` to compare the elements.
## The expected return values are the same as that of ``system.cmp``.
## The second version uses the default comparison function ``cmp``.
##
## .. code-block:: nim
##
## var arr = @[1,2,3,5,6,7,8,9]
## arr.insert(4, arr.lowerBound(4))
## # after running the above arr is `[1,2,3,4,5,6,7,8,9]`
## **See also:**
## * `upperBound proc<#upperBound,openArray[T],K,proc(T,K)>`_ sorted by ``cmp`` in the specified order
## * `upperBound proc<#upperBound,openArray[T],T>`_
runnableExamples:
var arr = @[1,2,3,5,6,7,8,9]
assert arr.lowerBound(3, system.cmp[int]) == 2
assert arr.lowerBound(4, system.cmp[int]) == 3
assert arr.lowerBound(5, system.cmp[int]) == 3
arr.insert(4, arr.lowerBound(4, system.cmp[int]))
assert arr == [1,2,3,4,5,6,7,8,9]
result = a.low
var count = a.high - a.low + 1
var step, pos: int
@@ -179,23 +264,40 @@ proc lowerBound*[T, K](a: openArray[T], key: K, cmp: proc(x: T, k: K): int {.clo
count = step
proc lowerBound*[T](a: openArray[T], key: T): int = lowerBound(a, key, cmp[T])
## Returns a position to the first element in the ``a`` that is greater than
## ``key``, or last if no such element is found.
## In other words if you have a sorted sequence and you call
## ``insert(thing, elm, lowerBound(thing, elm))``
## the sequence will still be sorted.
##
## The version uses the default comparison function ``cmp``.
##
## **See also:**
## * `upperBound proc<#upperBound,openArray[T],K,proc(T,K)>`_ sorted by ``cmp`` in the specified order
## * `upperBound proc<#upperBound,openArray[T],T>`_
proc upperBound*[T, K](a: openArray[T], key: K, cmp: proc(x: T, k: K): int {.closure.}): int =
## returns a position to the first element in the ``a`` that is not less
## Returns a position to the first element in the ``a`` that is not less
## (i.e. greater or equal to) than ``key``, or last if no such element is found.
## In other words if you have a sorted sequence and you call
## ``insert(thing, elm, upperBound(thing, elm))``
## the sequence will still be sorted.
##
## The first version uses ``cmp`` to compare the elements. The expected
## If an invalid range is passed, it raises IndexError.
##
## The version uses ``cmp`` to compare the elements. The expected
## return values are the same as that of ``system.cmp``.
## The second version uses the default comparison function ``cmp``.
##
## .. code-block:: nim
##
## var arr = @[1,2,3,4,6,7,8,9]
## arr.insert(5, arr.upperBound(4))
## # after running the above arr is `[1,2,3,4,5,6,7,8,9]`
## **See also:**
## * `lowerBound proc<#lowerBound,openArray[T],K,proc(T,K)>`_ sorted by ``cmp`` in the specified order
## * `lowerBound proc<#lowerBound,openArray[T],T>`_
runnableExamples:
var arr = @[1,2,3,5,6,7,8,9]
assert arr.upperBound(2, system.cmp[int]) == 2
assert arr.upperBound(3, system.cmp[int]) == 3
assert arr.upperBound(4, system.cmp[int]) == 3
arr.insert(4, arr.upperBound(3, system.cmp[int]))
assert arr == [1,2,3,4,5,6,7,8,9]
result = a.low
var count = a.high - a.low + 1
var step, pos: int
@@ -209,6 +311,17 @@ proc upperBound*[T, K](a: openArray[T], key: K, cmp: proc(x: T, k: K): int {.clo
count = step
proc upperBound*[T](a: openArray[T], key: T): int = upperBound(a, key, cmp[T])
## Returns a position to the first element in the ``a`` that is not less
## (i.e. greater or equal to) than ``key``, or last if no such element is found.
## In other words if you have a sorted sequence and you call
## ``insert(thing, elm, upperBound(thing, elm))``
## the sequence will still be sorted.
##
## The version uses the default comparison function ``cmp``.
##
## **See also:**
## * `lowerBound proc<#lowerBound,openArray[T],K,proc(T,K)>`_ sorted by ``cmp`` in the specified order
## * `lowerBound proc<#lowerBound,openArray[T],T>`_
template `<-` (a, b) =
when false:
@@ -263,6 +376,7 @@ func sort*[T](a: var openArray[T],
## Default Nim sort (an implementation of merge sort). The sorting
## is guaranteed to be stable and the worst case is guaranteed to
## be O(n log n).
##
## The current implementation uses an iterative
## mergesort to achieve this. It uses a temporary sequence of
## length ``a.len div 2``. If you do not wish to provide your own
@@ -272,7 +386,6 @@ func sort*[T](a: var openArray[T],
## .. code-block:: nim
##
## sort(myIntArray, system.cmp[int])
##
## # do not use cmp[string] here as we want to use the specialized
## # overload:
## sort(myStrArray, system.cmp)
@@ -286,6 +399,19 @@ func sort*[T](a: var openArray[T],
## result = cmp(x.surname, y.surname)
## if result == 0:
## result = cmp(x.name, y.name)
##
## **See also:**
## * `sort proc<#sort,openArray[T]>`_
## * `sorted proc<#sorted,openArray[T],proc(T,T)>`_ sorted by ``cmp`` in the specified order
## * `sorted proc<#sorted,openArray[T]>`_
## * `sortedByIt template<#sortedByIt.t,untyped,untyped>`_
runnableExamples:
var d = ["boo", "fo", "barr", "qux"]
proc myCmp(x, y: string): int =
if x.len() > y.len() or x.len() == y.len(): 1
else: -1
sort(d, myCmp)
assert d == ["fo", "qux", "boo", "barr"]
var n = a.len
var b: seq[T]
newSeq(b, n div 2)
@@ -299,17 +425,30 @@ func sort*[T](a: var openArray[T],
proc sort*[T](a: var openArray[T], order = SortOrder.Ascending) = sort[T](a, system.cmp[T], order)
## Shortcut version of ``sort`` that uses ``system.cmp[T]`` as the comparison function.
##
## **See also:**
## * `sort func<#sort,openArray[T],proc(T,T)>`_
## * `sorted proc<#sorted,openArray[T],proc(T,T)>`_ sorted by ``cmp`` in the specified order
## * `sorted proc<#sorted,openArray[T]>`_
## * `sortedByIt template<#sortedByIt.t,untyped,untyped>`_
proc sorted*[T](a: openArray[T], cmp: proc(x, y: T): int {.closure.},
order = SortOrder.Ascending): seq[T] =
## returns ``a`` sorted by ``cmp`` in the specified ``order``.
## Returns ``a`` sorted by ``cmp`` in the specified ``order``.
##
## **See also:**
## * `sort func<#sort,openArray[T],proc(T,T)>`_
## * `sort proc<#sort,openArray[T]>`_
## * `sortedByIt template<#sortedByIt.t,untyped,untyped>`_
runnableExamples:
let
a = [2, 3, 1, 5, 4]
b = sorted(a, system.cmp)
c = sorted(a, system.cmp, Descending)
doAssert b == @[1, 2, 3, 4, 5]
doAssert c == @[5, 4, 3, 2, 1]
let
a = [2, 3, 1, 5, 4]
b = sorted(a, system.cmp[int])
c = sorted(a, system.cmp[int], Descending)
d = sorted(["adam", "dande", "brian", "cat"], system.cmp[string])
assert b == @[1, 2, 3, 4, 5]
assert c == @[5, 4, 3, 2, 1]
assert d == @["adam", "brian", "cat", "dande"]
result = newSeq[T](a.len)
for i in 0 .. a.high:
result[i] = a[i]
@@ -317,33 +456,48 @@ proc sorted*[T](a: openArray[T], cmp: proc(x, y: T): int {.closure.},
proc sorted*[T](a: openArray[T], order = SortOrder.Ascending): seq[T] =
## Shortcut version of ``sorted`` that uses ``system.cmp[T]`` as the comparison function.
##
## **See also:**
## * `sort func<#sort,openArray[T],proc(T,T)>`_
## * `sort proc<#sort,openArray[T]>`_
## * `sortedByIt template<#sortedByIt.t,untyped,untyped>`_
runnableExamples:
let
a = [2, 3, 1, 5, 4]
b = sorted(a)
c = sorted(a, Descending)
d = sorted(["adam", "dande", "brian", "cat"])
assert b == @[1, 2, 3, 4, 5]
assert c == @[5, 4, 3, 2, 1]
assert d == @["adam", "brian", "cat", "dande"]
sorted[T](a, system.cmp[T], order)
template sortedByIt*(seq1, op: untyped): untyped =
## Convenience template around the ``sorted`` proc to reduce typing.
##
## The template injects the ``it`` variable which you can use directly in an
## expression. Example:
##
## .. code-block:: nim
##
## type Person = tuple[name: string, age: int]
## var
## p1: Person = (name: "p1", age: 60)
## p2: Person = (name: "p2", age: 20)
## p3: Person = (name: "p3", age: 30)
## p4: Person = (name: "p4", age: 30)
## people = @[p1,p2,p4,p3]
##
## echo people.sortedByIt(it.name)
## expression.
##
## Because the underlying ``cmp()`` is defined for tuples you can do
## a nested sort like in the following example:
##
## .. code-block:: nim
##
## echo people.sortedByIt((it.age, it.name))
## a nested sort.
##
## **See also:**
## * `sort func<#sort,openArray[T],proc(T,T)>`_
## * `sort proc<#sort,openArray[T]>`_
## * `sorted proc<#sorted,openArray[T],proc(T,T)>`_ sorted by ``cmp`` in the specified order
## * `sorted proc<#sorted,openArray[T]>`_
runnableExamples:
type Person = tuple[name: string, age: int]
var
p1: Person = (name: "p1", age: 60)
p2: Person = (name: "p2", age: 20)
p3: Person = (name: "p3", age: 30)
p4: Person = (name: "p4", age: 30)
people = @[p1,p2,p4,p3]
assert people.sortedByIt(it.name) == @[(name: "p1", age: 60), (name: "p2", age: 20), (name: "p3", age: 30), (name: "p4", age: 30)]
# Nested sort
assert people.sortedByIt((it.age, it.name)) == @[(name: "p2", age: 20), (name: "p3", age: 30), (name: "p4", age: 30), (name: "p1", age: 60)]
var result = sorted(seq1, proc(x, y: type(seq1[0])): int =
var it {.inject.} = x
let a = op
@@ -355,9 +509,25 @@ template sortedByIt*(seq1, op: untyped): untyped =
func isSorted*[T](a: openArray[T],
cmp: proc(x, y: T): int {.closure.},
order = SortOrder.Ascending): bool =
## checks to see whether ``a`` is already sorted in ``order``
## Checks to see whether ``a`` is already sorted in ``order``
## using ``cmp`` for the comparison. Parameters identical
## to ``sort``.
##
## **See also:**
## * `isSorted proc<#isSorted,openArray[T]>`_
runnableExamples:
let
a = [2, 3, 1, 5, 4]
b = [1, 2, 3, 4, 5]
c = [5, 4, 3, 2, 1]
d = ["adam", "brian", "cat", "dande"]
e = ["adam", "dande", "brian", "cat"]
assert isSorted(a) == false
assert isSorted(b) == true
assert isSorted(c) == false
assert isSorted(c, Descending) == true
assert isSorted(d) == true
assert isSorted(e) == false
result = true
for i in 0..<len(a)-1:
if cmp(a[i],a[i+1]) * order > 0:
@@ -365,11 +535,30 @@ func isSorted*[T](a: openArray[T],
proc isSorted*[T](a: openarray[T], order = SortOrder.Ascending): bool =
## Shortcut version of ``isSorted`` that uses ``system.cmp[T]`` as the comparison function.
##
## **See also:**
## * `isSorted func<#isSorted,openArray[T],proc(T,T)>`_
runnableExamples:
let
a = [2, 3, 1, 5, 4]
b = [1, 2, 3, 4, 5]
c = [5, 4, 3, 2, 1]
d = ["adam", "brian", "cat", "dande"]
e = ["adam", "dande", "brian", "cat"]
assert isSorted(a) == false
assert isSorted(b) == true
assert isSorted(c) == false
assert isSorted(c, Descending) == true
assert isSorted(d) == true
assert isSorted(e) == false
isSorted(a, system.cmp[T], order)
proc product*[T](x: openArray[seq[T]]): seq[seq[T]] =
## produces the Cartesian product of the array. Warning: complexity
## Produces the Cartesian product of the array. Warning: complexity
## may explode.
runnableExamples:
assert product(@[@[1], @[2]]) == @[@[1, 2]]
assert product(@[@["A", "K"], @["Q"]]) == @[@["K", "Q"], @["A", "Q"]]
result = newSeq[seq[T]]()
if x.len == 0:
return
@@ -401,15 +590,26 @@ proc product*[T](x: openArray[seq[T]]): seq[seq[T]] =
indexes[index] -= 1
proc nextPermutation*[T](x: var openarray[T]): bool {.discardable.} =
## calculates the next lexicographic permutation, directly modifying ``x``.
## Calculates the next lexicographic permutation, directly modifying ``x``.
## The result is whether a permutation happened, otherwise we have reached
## the last-ordered permutation.
##
## .. code-block:: nim
## If you start with an unsorted array/seq, the repeated permutations
## will **not** give you all permutations but stop with last.
##
## var v = @[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
## v.nextPermutation()
## echo v # @[0, 1, 2, 3, 4, 5, 6, 7, 9, 8]
## **See also:**
## * `prevPermutation proc<#prevPermutation,openArray[T]>`_
runnableExamples:
var v = @[0, 1, 2, 3]
assert v.nextPermutation() == true
assert v == @[0, 1, 3, 2]
assert v.nextPermutation() == true
assert v == @[0, 2, 1, 3]
assert v.prevPermutation() == true
assert v == @[0, 1, 3, 2]
v = @[3, 2, 1, 0]
assert v.nextPermutation() == false
assert v == @[3, 2, 1, 0]
if x.len < 2:
return false
@@ -430,15 +630,20 @@ proc nextPermutation*[T](x: var openarray[T]): bool {.discardable.} =
result = true
proc prevPermutation*[T](x: var openarray[T]): bool {.discardable.} =
## calculates the previous lexicographic permutation, directly modifying
## Calculates the previous lexicographic permutation, directly modifying
## ``x``. The result is whether a permutation happened, otherwise we have
## reached the first-ordered permutation.
##
## .. code-block:: nim
##
## var v = @[0, 1, 2, 3, 4, 5, 6, 7, 9, 8]
## v.prevPermutation()
## echo v # @[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
## **See also:**
## * `nextPermutation proc<#nextPermutation,openArray[T]>`_
runnableExamples:
var v = @[0, 1, 2, 3]
assert v.prevPermutation() == false
assert v == @[0, 1, 2, 3]
assert v.nextPermutation() == true
assert v == @[0, 1, 3, 2]
assert v.prevPermutation() == true
assert v == @[0, 1, 2, 3]
if x.len < 2:
return false
@@ -542,7 +747,7 @@ proc rotatedInternal[T](arg: openarray[T]; first, middle, last: int): seq[T] =
result[i] = arg[i]
proc rotateLeft*[T](arg: var openarray[T]; slice: HSlice[int, int]; dist: int): int {.discardable.} =
## performs a left rotation on a range of elements. If you want to rotate
## Performs a left rotation on a range of elements. If you want to rotate
## right, use a negative ``dist``. Specifically, ``rotateLeft`` rotates
## the elements at ``slice`` by ``dist`` positions.
##
@@ -553,6 +758,7 @@ proc rotateLeft*[T](arg: var openarray[T]; slice: HSlice[int, int]; dist: int):
##
## Elements outside of ``slice`` will be left unchanged.
## The time complexity is linear to ``slice.b - slice.a + 1``.
## If an invalid range (``HSlice``) is passed, it raises IndexError.
##
## ``slice``
## The indices of the element range that should be rotated.
@@ -561,11 +767,18 @@ proc rotateLeft*[T](arg: var openarray[T]; slice: HSlice[int, int]; dist: int):
## The distance in amount of elements that the data should be rotated.
## Can be negative, can be any number.
##
## .. code-block:: nim
##
## var list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
## list.rotateLeft(1 .. 8, 3)
## doAssert list == [0, 4, 5, 6, 7, 8, 1, 2, 3, 9, 10]
## **See also:**
## * `rotateLeft proc<#rotateLeft,openArray[T],int>`_ for a version which rotates the whole container
## * `rotatedLeft proc<#rotatedLeft,openArray[T],HSlice[int,int],int>`_ for a version which returns a ``seq[T]``
runnableExamples:
var a = [0, 1, 2, 3, 4, 5]
a.rotateLeft(1 .. 4, 3)
assert a == [0, 4, 1, 2, 3, 5]
a.rotateLeft(1 .. 4, 3)
assert a == [0, 3, 4, 1, 2, 5]
a.rotateLeft(1 .. 4, -3)
assert a == [0, 4, 1, 2, 3, 5]
doAssertRaises(IndexError, a.rotateLeft(1 .. 7, 2))
let sliceLen = slice.b + 1 - slice.a
let distLeft = ((dist mod sliceLen) + sliceLen) mod sliceLen
arg.rotateInternal(slice.a, slice.a+distLeft, slice.b + 1)
@@ -573,10 +786,18 @@ proc rotateLeft*[T](arg: var openarray[T]; slice: HSlice[int, int]; dist: int):
proc rotateLeft*[T](arg: var openarray[T]; dist: int): int {.discardable.} =
## Default arguments for slice, so that this procedure operates on the entire
## ``arg``, and not just on a part of it.
##
## **See also:**
## * `rotateLeft proc<#rotateLeft,openArray[T],HSlice[int,int],int>`_ for a version which rotates a range
## * `rotatedLeft proc<#rotatedLeft,openArray[T],int>`_ for a version which returns a ``seq[T]``
runnableExamples:
var a = [1, 2, 3, 4, 5]
a.rotateLeft(2)
doAssert a == [3, 4, 5, 1, 2]
var a = [1, 2, 3, 4, 5]
a.rotateLeft(2)
assert a == [3, 4, 5, 1, 2]
a.rotateLeft(4)
assert a == [2, 3, 4, 5, 1]
a.rotateLeft(-6)
assert a == [1, 2, 3, 4, 5]
let arglen = arg.len
let distLeft = ((dist mod arglen) + arglen) mod arglen
arg.rotateInternal(0, distLeft, arglen)
@@ -584,6 +805,28 @@ proc rotateLeft*[T](arg: var openarray[T]; dist: int): int {.discardable.} =
proc rotatedLeft*[T](arg: openarray[T]; slice: HSlice[int, int], dist: int): seq[T] =
## Same as ``rotateLeft``, just with the difference that it does
## not modify the argument. It creates a new ``seq`` instead.
##
## Elements outside of ``slice`` will be left unchanged.
## If an invalid range (``HSlice``) is passed, it raises IndexError.
##
## ``slice``
## The indices of the element range that should be rotated.
##
## ``dist``
## The distance in amount of elements that the data should be rotated.
## Can be negative, can be any number.
##
## **See also:**
## * `rotateLeft proc<#rotateLeft,openArray[T],HSlice[int,int],int>`_ for the in-place version of this proc
## * `rotatedLeft proc<#rotatedLeft,openArray[T],int>`_ for a version which rotates the whole container
runnableExamples:
var a = @[1, 2, 3, 4, 5]
a = rotatedLeft(a, 1 .. 4, 3)
assert a == @[1, 5, 2, 3, 4]
a = rotatedLeft(a, 1 .. 3, 2)
assert a == @[1, 3, 5, 2, 4]
a = rotatedLeft(a, 1 .. 3, -2)
assert a == @[1, 5, 2, 3, 4]
let sliceLen = slice.b + 1 - slice.a
let distLeft = ((dist mod sliceLen) + sliceLen) mod sliceLen
arg.rotatedInternal(slice.a, slice.a+distLeft, slice.b+1)
@@ -591,6 +834,18 @@ proc rotatedLeft*[T](arg: openarray[T]; slice: HSlice[int, int], dist: int): seq
proc rotatedLeft*[T](arg: openarray[T]; dist: int): seq[T] =
## Same as ``rotateLeft``, just with the difference that it does
## not modify the argument. It creates a new ``seq`` instead.
##
## **See also:**
## * `rotateLeft proc<#rotateLeft,openArray[T],int>`_ for the in-place version of this proc
## * `rotatedLeft proc<#rotatedLeft,openArray[T],HSlice[int,int],int>`_ for a version which rotates a range
runnableExamples:
var a = @[1, 2, 3, 4, 5]
a = rotatedLeft(a, 2)
assert a == @[3, 4, 5, 1, 2]
a = rotatedLeft(a, 4)
assert a == @[2, 3, 4, 5, 1]
a = rotatedLeft(a, -6)
assert a == @[1, 2, 3, 4, 5]
let arglen = arg.len
let distLeft = ((dist mod arglen) + arglen) mod arglen
arg.rotatedInternal(0, distLeft, arg.len)

View File

@@ -1674,7 +1674,7 @@ template asyncAddrInfoLoop(addrInfo: ptr AddrInfo, fd: untyped,
curFd = fdPerDomain[ord(domain)]
if curFd == osInvalidSocket.AsyncFD:
try:
curFd = newAsyncNativeSocket(domain, sockType, protocol)
curFd = createAsyncNativeSocket(domain, sockType, protocol)
except:
freeAddrInfo(addrInfo)
closeUnusedFds()
@@ -1806,47 +1806,6 @@ proc readAll*(future: FutureStream[string]): Future[string] {.async.} =
else:
break
proc recvLine*(socket: AsyncFD): Future[string] {.async, deprecated.} =
## Reads a line of data from ``socket``. Returned future will complete once
## a full line is read or an error occurs.
##
## If a full line is read ``\r\L`` is not
## added to ``line``, however if solely ``\r\L`` is read then ``line``
## will be set to it.
##
## If the socket is disconnected, ``line`` will be set to ``""``.
##
## If the socket is disconnected in the middle of a line (before ``\r\L``
## is read) then line will be set to ``""``.
## The partial line **will be lost**.
##
## **Warning**: This assumes that lines are delimited by ``\r\L``.
##
## **Note**: This procedure is mostly used for testing. You likely want to
## use ``asyncnet.recvLine`` instead.
##
## **Deprecated since version 0.15.0**: Use ``asyncnet.recvLine()`` instead.
template addNLIfEmpty(): typed =
if result.len == 0:
result.add("\c\L")
result = ""
var c = ""
while true:
c = await recv(socket, 1)
if c.len == 0:
return ""
if c == "\r":
c = await recv(socket, 1)
assert c == "\l"
addNLIfEmpty()
return
elif c == "\L":
addNLIfEmpty()
return
add(result, c)
proc callSoon*(cbproc: proc ()) =
## Schedule `cbproc` to be called as soon as possible.
## The callback is called when control returns to the event loop.

View File

@@ -75,14 +75,54 @@
## waitFor(main())
import asyncdispatch, asyncnet, strutils, parseutils, os, times
from ftpclient import FtpBaseObj, ReplyError, FtpEvent
import asyncdispatch, asyncnet, nativesockets, strutils, parseutils, os, times
from net import BufferSize
type
AsyncFtpClientObj* = FtpBaseObj[AsyncSocket]
AsyncFtpClient* = ref AsyncFtpClientObj
AsyncFtpClient* = ref object
csock*: AsyncSocket
dsock*: AsyncSocket
user*, pass*: string
address*: string
port*: Port
jobInProgress*: bool
job*: FTPJob
dsockConnected*: bool
FTPJobType* = enum
JRetrText, JRetr, JStore
FtpJob = ref object
prc: proc (ftp: AsyncFtpClient, async: bool): bool {.nimcall, gcsafe.}
case typ*: FTPJobType
of JRetrText:
lines: string
of JRetr, JStore:
file: File
filename: string
total: BiggestInt # In bytes.
progress: BiggestInt # In bytes.
oneSecond: BiggestInt # Bytes transferred in one second.
lastProgressReport: float # Time
toStore: string # Data left to upload (Only used with async)
FTPEventType* = enum
EvTransferProgress, EvLines, EvRetr, EvStore
FTPEvent* = object ## Event
filename*: string
case typ*: FTPEventType
of EvLines:
lines*: string ## Lines that have been transferred.
of EvRetr, EvStore: ## Retr/Store operation finished.
nil
of EvTransferProgress:
bytesTotal*: BiggestInt ## Bytes total.
bytesFinished*: BiggestInt ## Bytes transferred.
speed*: BiggestInt ## Speed in bytes/s
currentJob*: FTPJobType ## The current job being performed.
ReplyError* = object of IOError
ProgressChangedProc* =
proc (total, progress: BiggestInt, speed: float):
@@ -183,7 +223,7 @@ proc listDirs*(ftp: AsyncFtpClient, dir = ""): Future[seq[string]] {.async.} =
## Returns a list of filenames in the given directory. If ``dir`` is "",
## the current directory is used. If ``async`` is true, this
## function will return immediately and it will be your job to
## use asyncio's ``poll`` to progress this operation.
## use asyncdispatch's ``poll`` to progress this operation.
await ftp.pasv()
assertReply(await(ftp.send("NLST " & dir.normalizePathSep)), ["125", "150"])

View File

@@ -245,6 +245,12 @@ proc asyncSingleProc(prc: NimNode): NimNode {.compileTime.} =
var outerProcBody = newNimNode(nnkStmtList, prc.body)
# Extract the documentation comment from the original procedure declaration.
# Note that we're not removing it from the body in order not to make this
# transformation even more complex.
if prc.body.len > 1 and prc.body[0].kind == nnkCommentStmt:
outerProcBody.add(prc.body[0])
# -> var retFuture = newFuture[T]()
var retFutureSym = genSym(nskVar, "retFuture")
var subRetType =

View File

@@ -8,7 +8,8 @@
#
## This module implements a series of low level methods for bit manipulation.
## By default, this module use compiler intrinsics to improve performance
## By default, this module use compiler intrinsics where possible to improve performance
## on supported compilers: ``GCC``, ``LLVM_GCC``, ``CLANG``, ``VCC``, ``ICC``.
##
## The module will fallback to pure nim procs incase the backend is not supported.
@@ -32,6 +33,63 @@ const useICC_builtins = defined(icc) and useBuiltins
const useVCC_builtins = defined(vcc) and useBuiltins
const arch64 = sizeof(int) == 8
when defined(nimHasalignOf):
import macros
type BitsRange*[T] = range[0..sizeof(T)*8-1]
## Returns a range with all bit positions for type ``T``
proc setMask*[T: SomeInteger](v: var T, mask: T) {.inline.} =
## Returns ``v``, with all the ``1`` bits from ``mask`` set to 1
v = v or mask
proc clearMask*[T: SomeInteger](v: var T, mask: T) {.inline.} =
## Returns ``v``, with all the ``1`` bits from ``mask`` set to 0
v = v and not mask
proc flipMask*[T: SomeInteger](v: var T, mask: T) {.inline.} =
## Returns ``v``, with all the ``1`` bits from ``mask`` flipped
v = v xor mask
proc setBit*[T: SomeInteger](v: var T, bit: BitsRange[T]) {.inline.} =
## Returns ``v``, with the bit at position ``bit`` set to 1
v.setMask(1.T shl bit)
proc clearBit*[T: SomeInteger](v: var T, bit: BitsRange[T]) {.inline.} =
## Returns ``v``, with the bit at position ``bit`` set to 0
v.clearMask(1.T shl bit)
proc flipBit*[T: SomeInteger](v: var T, bit: BitsRange[T]) {.inline.} =
## Returns ``v``, with the bit at position ``bit`` flipped
v.flipMask(1.T shl bit)
macro setBits*(v: typed, bits: varargs[typed]): untyped =
## Returns ``v``, with the bits at positions ``bits`` set to 1
bits.expectKind(nnkBracket)
result = newStmtList()
for bit in bits:
result.add newCall("setBit", v, bit)
macro clearBits*(v: typed, bits: varargs[typed]): untyped =
## Returns ``v``, with the bits at positions ``bits`` set to 0
bits.expectKind(nnkBracket)
result = newStmtList()
for bit in bits:
result.add newCall("clearBit", v, bit)
macro flipBits*(v: typed, bits: varargs[typed]): untyped =
## Returns ``v``, with the bits at positions ``bits`` set to 0
bits.expectKind(nnkBracket)
result = newStmtList()
for bit in bits:
result.add newCall("flipBit", v, bit)
proc testBit*[T: SomeInteger](v: var T, bit: BitsRange[T]): bool {.inline.} =
## Returns true if the bit in ``v`` at positions ``bit`` is set to 1
let mask = 1.T shl bit
return (v and mask) == mask
# #### Pure Nim version ####
proc firstSetBit_nim(x: uint32): int {.inline, nosideeffect.} =

View File

@@ -1,4 +1,3 @@
#
#
# Nim's Runtime Library
@@ -7,32 +6,74 @@
# See the file "copying.txt", included in this
# distribution, for details about the copyright.
##[ Heap queue algorithm (a.k.a. priority queue). Ported from Python heapq.
##[
The `heapqueue` module implements a
`heap data structure<https://en.wikipedia.org/wiki/Heap_(data_structure)>`_
that can be used as a
`priority queue<https://en.wikipedia.org/wiki/Priority_queue>`_.
Heaps are arrays for which `a[k] <= a[2*k+1]` and `a[k] <= a[2*k+2]` for
all `k`, counting elements from 0. The interesting property of a heap is that
`a[0]` is always its smallest element.
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
Basic usage
-----------
.. code-block:: Nim
import heapqueue
var heap = initHeapQueue[int]()
heap.push(8)
heap.push(2)
heap.push(5)
# The first element is the lowest element
assert heap[0] == 2
# Remove and return the lowest element
assert heap.pop() == 2
# The lowest element remaining is 5
assert heap[0] == 5
Usage with custom object
------------------------
To use a `HeapQueue` with a custom object, the `<` operator must be
implemented.
.. code-block:: Nim
import heapqueue
type Job = object
priority: int
proc `<`(a, b: Job): bool = a.priority < b.priority
var jobs = initHeapQueue[Job]()
jobs.push(Job(priority: 1))
jobs.push(Job(priority: 2))
assert jobs[0].priority == 1
]##
type HeapQueue*[T] = distinct seq[T]
type HeapQueue*[T] = object
## A heap queue, commonly known as a priority queue.
data: seq[T]
proc newHeapQueue*[T](): HeapQueue[T] {.inline.} = HeapQueue[T](newSeq[T]())
proc newHeapQueue*[T](h: var HeapQueue[T]) {.inline.} = h = HeapQueue[T](newSeq[T]())
proc initHeapQueue*[T](): HeapQueue[T] =
## Create a new empty heap.
discard
proc len*[T](h: HeapQueue[T]): int {.inline.} = seq[T](h).len
proc `[]`*[T](h: HeapQueue[T], i: int): T {.inline.} = seq[T](h)[i]
proc `[]=`[T](h: var HeapQueue[T], i: int, v: T) {.inline.} = seq[T](h)[i] = v
proc add[T](h: var HeapQueue[T], v: T) {.inline.} = seq[T](h).add(v)
proc len*[T](heap: HeapQueue[T]): int {.inline.} =
## Return the number of elements of `heap`.
heap.data.len
proc `[]`*[T](heap: HeapQueue[T], i: Natural): T {.inline.} =
## Access the i-th element of `heap`.
heap.data[i]
proc heapCmp[T](x, y: T): bool {.inline.} =
return (x < y)
# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos
# is the index of a leaf with a possibly out-of-order value. Restore the
# heap invariant.
proc siftdown[T](heap: var HeapQueue[T], startpos, p: int) =
## 'heap' is a heap at all indices >= startpos, except possibly for pos. pos
## is the index of a leaf with a possibly out-of-order value. Restore the
## heap invariant.
var pos = p
var newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
@@ -41,11 +82,11 @@ proc siftdown[T](heap: var HeapQueue[T], startpos, p: int) =
let parentpos = (pos - 1) shr 1
let parent = heap[parentpos]
if heapCmp(newitem, parent):
heap[pos] = parent
heap.data[pos] = parent
pos = parentpos
else:
break
heap[pos] = newitem
heap.data[pos] = newitem
proc siftup[T](heap: var HeapQueue[T], p: int) =
let endpos = len(heap)
@@ -60,48 +101,50 @@ proc siftup[T](heap: var HeapQueue[T], p: int) =
if rightpos < endpos and not heapCmp(heap[childpos], heap[rightpos]):
childpos = rightpos
# Move the smaller child up.
heap[pos] = heap[childpos]
heap.data[pos] = heap[childpos]
pos = childpos
childpos = 2*pos + 1
# The leaf at pos is empty now. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
heap[pos] = newitem
heap.data[pos] = newitem
siftdown(heap, startpos, pos)
proc push*[T](heap: var HeapQueue[T], item: T) =
## Push item onto heap, maintaining the heap invariant.
(seq[T](heap)).add(item)
## Push `item` onto heap, maintaining the heap invariant.
heap.data.add(item)
siftdown(heap, 0, len(heap)-1)
proc pop*[T](heap: var HeapQueue[T]): T =
## Pop the smallest item off the heap, maintaining the heap invariant.
let lastelt = seq[T](heap).pop()
## Pop and return the smallest item from `heap`,
## maintaining the heap invariant.
let lastelt = heap.data.pop()
if heap.len > 0:
result = heap[0]
heap[0] = lastelt
heap.data[0] = lastelt
siftup(heap, 0)
else:
result = lastelt
proc del*[T](heap: var HeapQueue[T], index: int) =
## Removes element at `index`, maintaining the heap invariant.
swap(seq[T](heap)[^1], seq[T](heap)[index])
proc del*[T](heap: var HeapQueue[T], index: Natural) =
## Removes the element at `index` from `heap`, maintaining the heap invariant.
swap(heap.data[^1], heap.data[index])
let newLen = heap.len - 1
seq[T](heap).setLen(newLen)
heap.data.setLen(newLen)
if index < newLen:
heap.siftup(index)
proc replace*[T](heap: var HeapQueue[T], item: T): T =
## Pop and return the current smallest value, and add the new item.
## This is more efficient than pop() followed by push(), and can be
## more appropriate when using a fixed-size heap. Note that the value
## returned may be larger than item! That constrains reasonable uses of
## more appropriate when using a fixed-size heap. Note that the value
## returned may be larger than item! That constrains reasonable uses of
## this routine unless written as part of a conditional replacement:
##
## .. code-block:: nim
## if item > heap[0]:
## item = replace(heap, item)
result = heap[0]
heap[0] = item
heap.data[0] = item
siftup(heap, 0)
proc pushpop*[T](heap: var HeapQueue[T], item: T): T =
@@ -111,6 +154,36 @@ proc pushpop*[T](heap: var HeapQueue[T], item: T): T =
siftup(heap, 0)
return item
proc clear*[T](heap: var HeapQueue[T]) =
## Remove all elements from `heap`, making it empty.
runnableExamples:
var heap = initHeapQueue[int]()
heap.push(1)
heap.clear()
assert heap.len == 0
heap.data.setLen(0)
proc `$`*[T](heap: HeapQueue[T]): string =
## Turn a heap into its string representation.
runnableExamples:
var heap = initHeapQueue[int]()
heap.push(1)
heap.push(2)
assert $heap == "[1, 2]"
result = "["
for x in heap.data:
if result.len > 1: result.add(", ")
result.addQuoted(x)
result.add("]")
proc newHeapQueue*[T](): HeapQueue[T] {.deprecated.} =
## **Deprecated since v0.20.0:** use ``initHeapQueue`` instead.
initHeapQueue[T]()
proc newHeapQueue*[T](heap: var HeapQueue[T]) {.deprecated.} =
## **Deprecated since v0.20.0:** use ``clear`` instead.
heap.clear()
when isMainModule:
proc toSortedSeq[T](h: HeapQueue[T]): seq[T] =
var tmp = h
@@ -119,7 +192,7 @@ when isMainModule:
result.add(pop(tmp))
block: # Simple sanity test
var heap = newHeapQueue[int]()
var heap = initHeapQueue[int]()
let data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0]
for item in data:
push(heap, item)
@@ -127,27 +200,27 @@ when isMainModule:
doAssert(heap.toSortedSeq == @[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
block: # Test del
var heap = newHeapQueue[int]()
var heap = initHeapQueue[int]()
let data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0]
for item in data: push(heap, item)
heap.del(0)
doAssert(heap[0] == 1)
heap.del(seq[int](heap).find(7))
heap.del(heap.data.find(7))
doAssert(heap.toSortedSeq == @[1, 2, 3, 4, 5, 6, 8, 9])
heap.del(seq[int](heap).find(5))
heap.del(heap.data.find(5))
doAssert(heap.toSortedSeq == @[1, 2, 3, 4, 6, 8, 9])
heap.del(seq[int](heap).find(6))
heap.del(heap.data.find(6))
doAssert(heap.toSortedSeq == @[1, 2, 3, 4, 8, 9])
heap.del(seq[int](heap).find(2))
heap.del(heap.data.find(2))
doAssert(heap.toSortedSeq == @[1, 3, 4, 8, 9])
block: # Test del last
var heap = newHeapQueue[int]()
var heap = initHeapQueue[int]()
let data = [1, 2, 3]
for item in data: push(heap, item)

View File

@@ -7,13 +7,16 @@
# distribution, for details about the copyright.
#
## The ``intsets`` module implements an efficient int set implemented as a
## The ``intsets`` module implements an efficient `int` set implemented as a
## `sparse bit set`:idx:.
## **Note**: Currently the assignment operator ``=`` for ``intsets``
##
## **Note**: Currently the assignment operator ``=`` for ``IntSet``
## performs some rather meaningless shallow copy. Since Nim currently does
## not allow the assignment operator to be overloaded, use ``assign`` to
## get a deep copy.
## not allow the assignment operator to be overloaded, use `assign proc
## <#assign,IntSet,IntSet>`_ to get a deep copy.
##
## **See also:**
## * `sets module <sets.html>`_ for more general hash sets
import
@@ -40,7 +43,7 @@ type
bits: array[0..IntsPerTrunk - 1, BitScalar] # a bit vector
TrunkSeq = seq[PTrunk]
IntSet* = object ## an efficient set of 'int' implemented as a sparse bit set
IntSet* = object ## An efficient set of `int` implemented as a sparse bit set.
elems: int # only valid for small numbers
counter, max: int
head: PTrunk
@@ -96,18 +99,33 @@ proc intSetPut(t: var IntSet, key: int): PTrunk =
t.head = result
t.data[h] = result
proc contains*(s: IntSet, key: int): bool =
## Returns true iff `key` is in `s`.
proc bitincl(s: var IntSet, key: int) {.inline.} =
var t = intSetPut(s, `shr`(key, TrunkShift))
var u = key and TrunkMask
t.bits[`shr`(u, IntShift)] = t.bits[`shr`(u, IntShift)] or
`shl`(1, u and IntMask)
proc exclImpl(s: var IntSet, key: int) =
if s.elems <= s.a.len:
for i in 0..<s.elems:
if s.a[i] == key: return true
if s.a[i] == key:
s.a[i] = s.a[s.elems-1]
dec s.elems
return
else:
var t = intSetGet(s, `shr`(key, TrunkShift))
if t != nil:
var u = key and TrunkMask
result = (t.bits[`shr`(u, IntShift)] and `shl`(1, u and IntMask)) != 0
else:
result = false
t.bits[`shr`(u, IntShift)] = t.bits[`shr`(u, IntShift)] and
not `shl`(1, u and IntMask)
template dollarImpl(): untyped =
result = "{"
for key in items(s):
if result.len > 1: result.add(", ")
result.add($key)
result.add("}")
iterator items*(s: IntSet): int {.inline.} =
## Iterates over any included element of `s`.
@@ -131,14 +149,62 @@ iterator items*(s: IntSet): int {.inline.} =
inc(i)
r = r.next
proc bitincl(s: var IntSet, key: int) {.inline.} =
var t = intSetPut(s, `shr`(key, TrunkShift))
var u = key and TrunkMask
t.bits[`shr`(u, IntShift)] = t.bits[`shr`(u, IntShift)] or
`shl`(1, u and IntMask)
proc initIntSet*: IntSet =
## Returns an empty IntSet.
runnableExamples:
var a = initIntSet()
assert len(a) == 0
# newSeq(result.data, InitIntSetSize)
# result.max = InitIntSetSize-1
result = IntSet(
elems: 0,
counter: 0,
max: 0,
head: nil,
data: when defined(nimNoNilSeqs): @[] else: nil)
# a: array[0..33, int] # profiling shows that 34 elements are enough
proc contains*(s: IntSet, key: int): bool =
## Returns true if `key` is in `s`.
##
## This allows the usage of `in` operator.
runnableExamples:
var a = initIntSet()
for x in [1, 3, 5]:
a.incl(x)
assert a.contains(3)
assert 3 in a
assert(not a.contains(8))
assert 8 notin a
if s.elems <= s.a.len:
for i in 0..<s.elems:
if s.a[i] == key: return true
else:
var t = intSetGet(s, `shr`(key, TrunkShift))
if t != nil:
var u = key and TrunkMask
result = (t.bits[`shr`(u, IntShift)] and `shl`(1, u and IntMask)) != 0
else:
result = false
proc incl*(s: var IntSet, key: int) =
## Includes an element `key` in `s`.
##
## This doesn't do anything if `key` is already in `s`.
##
## See also:
## * `excl proc <#excl,IntSet,int>`_ for excluding an element
## * `incl proc <#incl,IntSet,IntSet>`_ for including other set
## * `containsOrIncl proc <#containsOrIncl,IntSet,int>`_
runnableExamples:
var a = initIntSet()
a.incl(3)
a.incl(3)
assert len(a) == 1
if s.elems <= s.a.len:
for i in 0..<s.elems:
if s.a[i] == key: return
@@ -156,40 +222,42 @@ proc incl*(s: var IntSet, key: int) =
proc incl*(s: var IntSet, other: IntSet) =
## Includes all elements from `other` into `s`.
##
## This is the in-place version of `s + other <#+,IntSet,IntSet>`_.
##
## See also:
## * `excl proc <#excl,IntSet,IntSet>`_ for excluding other set
## * `incl proc <#incl,IntSet,int>`_ for including an element
## * `containsOrIncl proc <#containsOrIncl,IntSet,int>`_
runnableExamples:
var
a = initIntSet()
b = initIntSet()
a.incl(1)
b.incl(5)
a.incl(b)
assert len(a) == 2
assert 5 in a
for item in other: incl(s, item)
proc exclImpl(s: var IntSet, key: int) =
if s.elems <= s.a.len:
for i in 0..<s.elems:
if s.a[i] == key:
s.a[i] = s.a[s.elems-1]
dec s.elems
return
else:
var t = intSetGet(s, `shr`(key, TrunkShift))
if t != nil:
var u = key and TrunkMask
t.bits[`shr`(u, IntShift)] = t.bits[`shr`(u, IntShift)] and
not `shl`(1, u and IntMask)
proc excl*(s: var IntSet, key: int) =
## Excludes `key` from the set `s`.
exclImpl(s, key)
proc excl*(s: var IntSet, other: IntSet) =
## Excludes all elements from `other` from `s`.
for item in other: excl(s, item)
proc missingOrExcl*(s: var IntSet, key: int) : bool =
## Returns true if `s` does not contain `key`, otherwise
## `key` is removed from `s` and false is returned.
var count = s.elems
exclImpl(s, key)
result = count == s.elems
proc containsOrIncl*(s: var IntSet, key: int): bool =
## Returns true if `s` contains `key`, otherwise `key` is included in `s`
## and false is returned.
## Includes `key` in the set `s` and tells if `key` was already in `s`.
##
## The difference with regards to the `incl proc <#incl,IntSet,int>`_ is
## that this proc returns `true` if `s` already contained `key`. The
## proc will return `false` if `key` was added as a new value to `s` during
## this call.
##
## See also:
## * `incl proc <#incl,IntSet,int>`_ for including an element
## * `missingOrExcl proc <#missingOrExcl,IntSet,int>`_
runnableExamples:
var a = initIntSet()
assert a.containsOrIncl(3) == false
assert a.containsOrIncl(3) == true
assert a.containsOrIncl(4) == false
if s.elems <= s.a.len:
for i in 0..<s.elems:
if s.a[i] == key:
@@ -208,25 +276,76 @@ proc containsOrIncl*(s: var IntSet, key: int): bool =
incl(s, key)
result = false
proc initIntSet*: IntSet =
## Returns an empty IntSet. Example:
proc excl*(s: var IntSet, key: int) =
## Excludes `key` from the set `s`.
##
## .. code-block ::
## var a = initIntSet()
## a.incl(2)
## This doesn't do anything if `key` is not found in `s`.
##
## See also:
## * `incl proc <#incl,IntSet,int>`_ for including an element
## * `excl proc <#excl,IntSet,IntSet>`_ for excluding other set
## * `missingOrExcl proc <#missingOrExcl,IntSet,int>`_
runnableExamples:
var a = initIntSet()
a.incl(3)
a.excl(3)
a.excl(3)
a.excl(99)
assert len(a) == 0
exclImpl(s, key)
# newSeq(result.data, InitIntSetSize)
# result.max = InitIntSetSize-1
result = IntSet(
elems: 0,
counter: 0,
max: 0,
head: nil,
data: when defined(nimNoNilSeqs): @[] else: nil)
# a: array[0..33, int] # profiling shows that 34 elements are enough
proc excl*(s: var IntSet, other: IntSet) =
## Excludes all elements from `other` from `s`.
##
## This is the in-place version of `s - other <#-,IntSet,IntSet>`_.
##
## See also:
## * `incl proc <#incl,IntSet,IntSet>`_ for including other set
## * `excl proc <#excl,IntSet,int>`_ for excluding an element
## * `missingOrExcl proc <#missingOrExcl,IntSet,int>`_
runnableExamples:
var
a = initIntSet()
b = initIntSet()
a.incl(1)
a.incl(5)
b.incl(5)
a.excl(b)
assert len(a) == 1
assert 5 notin a
for item in other: excl(s, item)
proc missingOrExcl*(s: var IntSet, key: int) : bool =
## Excludes `key` in the set `s` and tells if `key` was already missing from `s`.
##
## The difference with regards to the `excl proc <#excl,IntSet,int>`_ is
## that this proc returns `true` if `key` was missing from `s`.
## The proc will return `false` if `key` was in `s` and it was removed
## during this call.
##
## See also:
## * `excl proc <#excl,IntSet,int>`_ for excluding an element
## * `excl proc <#excl,IntSet,IntSet>`_ for excluding other set
## * `containsOrIncl proc <#containsOrIncl,IntSet,int>`_
runnableExamples:
var a = initIntSet()
a.incl(5)
assert a.missingOrExcl(5) == false
assert a.missingOrExcl(5) == true
var count = s.elems
exclImpl(s, key)
result = count == s.elems
proc clear*(result: var IntSet) =
## Clears the IntSet back to an empty state.
runnableExamples:
var a = initIntSet()
a.incl(5)
a.incl(7)
clear(a)
assert len(a) == 0
# setLen(result.data, InitIntSetSize)
# for i in 0..InitIntSetSize-1: result.data[i] = nil
@@ -243,8 +362,17 @@ proc clear*(result: var IntSet) =
proc isNil*(x: IntSet): bool {.inline.} = x.head.isNil and x.elems == 0
proc assign*(dest: var IntSet, src: IntSet) =
## copies `src` to `dest`. `dest` does not need to be initialized by
## `initIntSet`.
## Copies `src` to `dest`.
## `dest` does not need to be initialized by `initIntSet proc <#initIntSet>`_.
runnableExamples:
var
a = initIntSet()
b = initIntSet()
b.incl(5)
b.incl(7)
a.assign(b)
assert len(a) == 2
if src.elems <= src.a.len:
when defined(nimNoNilSeqs):
dest.data = @[]
@@ -276,11 +404,33 @@ proc assign*(dest: var IntSet, src: IntSet) =
proc union*(s1, s2: IntSet): IntSet =
## Returns the union of the sets `s1` and `s2`.
##
## The same as `s1 + s2 <#+,IntSet,IntSet>`_.
runnableExamples:
var
a = initIntSet()
b = initIntSet()
a.incl(1); a.incl(2); a.incl(3)
b.incl(3); b.incl(4); b.incl(5)
assert union(a, b).len == 5
## {1, 2, 3, 4, 5}
result.assign(s1)
incl(result, s2)
proc intersection*(s1, s2: IntSet): IntSet =
## Returns the intersection of the sets `s1` and `s2`.
##
## The same as `s1 * s2 <#*,IntSet,IntSet>`_.
runnableExamples:
var
a = initIntSet()
b = initIntSet()
a.incl(1); a.incl(2); a.incl(3)
b.incl(3); b.incl(4); b.incl(5)
assert intersection(a, b).len == 1
## {3}
result = initIntSet()
for item in s1:
if contains(s2, item):
@@ -288,6 +438,17 @@ proc intersection*(s1, s2: IntSet): IntSet =
proc difference*(s1, s2: IntSet): IntSet =
## Returns the difference of the sets `s1` and `s2`.
##
## The same as `s1 - s2 <#-,IntSet,IntSet>`_.
runnableExamples:
var
a = initIntSet()
b = initIntSet()
a.incl(1); a.incl(2); a.incl(3)
b.incl(3); b.incl(4); b.incl(5)
assert difference(a, b).len == 2
## {1, 2}
result = initIntSet()
for item in s1:
if not contains(s2, item):
@@ -295,31 +456,50 @@ proc difference*(s1, s2: IntSet): IntSet =
proc symmetricDifference*(s1, s2: IntSet): IntSet =
## Returns the symmetric difference of the sets `s1` and `s2`.
runnableExamples:
var
a = initIntSet()
b = initIntSet()
a.incl(1); a.incl(2); a.incl(3)
b.incl(3); b.incl(4); b.incl(5)
assert symmetricDifference(a, b).len == 4
## {1, 2, 4, 5}
result.assign(s1)
for item in s2:
if containsOrIncl(result, item): excl(result, item)
proc `+`*(s1, s2: IntSet): IntSet {.inline.} =
## Alias for `union(s1, s2) <#union>`_.
## Alias for `union(s1, s2) <#union,IntSet,IntSet>`_.
result = union(s1, s2)
proc `*`*(s1, s2: IntSet): IntSet {.inline.} =
## Alias for `intersection(s1, s2) <#intersection>`_.
## Alias for `intersection(s1, s2) <#intersection,IntSet,IntSet>`_.
result = intersection(s1, s2)
proc `-`*(s1, s2: IntSet): IntSet {.inline.} =
## Alias for `difference(s1, s2) <#difference>`_.
## Alias for `difference(s1, s2) <#difference,IntSet,IntSet>`_.
result = difference(s1, s2)
proc disjoint*(s1, s2: IntSet): bool =
## Returns true if the sets `s1` and `s2` have no items in common.
runnableExamples:
var
a = initIntSet()
b = initIntSet()
a.incl(1); a.incl(2)
b.incl(2); b.incl(3)
assert disjoint(a, b) == false
b.excl(2)
assert disjoint(a, b) == true
for item in s1:
if contains(s2, item):
return false
return true
proc len*(s: IntSet): int {.inline.} =
## Returns the number of keys in `s`.
## Returns the number of elements in `s`.
if s.elems < s.a.len:
result = s.elems
else:
@@ -328,35 +508,59 @@ proc len*(s: IntSet): int {.inline.} =
inc(result)
proc card*(s: IntSet): int {.inline.} =
## Alias for `len() <#len>` _.
## Alias for `len() <#len,IntSet>`_.
result = s.len()
proc `<=`*(s1, s2: IntSet): bool =
## Returns true iff `s1` is subset of `s2`.
## Returns true if `s1` is subset of `s2`.
##
## A subset `s1` has all of its elements in `s2`, and `s2` doesn't necessarily
## have more elements than `s1`. That is, `s1` can be equal to `s2`.
runnableExamples:
var
a = initIntSet()
b = initIntSet()
a.incl(1)
b.incl(1); b.incl(2)
assert a <= b
a.incl(2)
assert a <= b
a.incl(3)
assert(not (a <= b))
for item in s1:
if not s2.contains(item):
return false
return true
proc `<`*(s1, s2: IntSet): bool =
## Returns true iff `s1` is proper subset of `s2`.
## Returns true if `s1` is proper subset of `s2`.
##
## A strict or proper subset `s1` has all of its elements in `s2`, but `s2` has
## more elements than `s1`.
runnableExamples:
var
a = initIntSet()
b = initIntSet()
a.incl(1)
b.incl(1); b.incl(2)
assert a < b
a.incl(2)
assert(not (a < b))
return s1 <= s2 and not (s2 <= s1)
proc `==`*(s1, s2: IntSet): bool =
## Returns true if both `s` and `t` have the same members and set size.
## Returns true if both `s1` and `s2` have the same elements and set size.
return s1 <= s2 and s2 <= s1
template dollarImpl(): untyped =
result = "{"
for key in items(s):
if result.len > 1: result.add(", ")
result.add($key)
result.add("}")
proc `$`*(s: IntSet): string =
## The `$` operator for int sets.
##
## Converts the set `s` to a string, mostly for logging and printing purposes.
dollarImpl()
when isMainModule:
import sequtils, algorithm

View File

@@ -93,25 +93,25 @@ type
SinglyLinkedList*[T] = object ## A singly linked list.
##
## Use `initSinglyLinkedList proc <#initSinglyLinkedList,>`_ to create
## Use `initSinglyLinkedList proc <#initSinglyLinkedList>`_ to create
## a new empty list.
head*, tail*: SinglyLinkedNode[T]
DoublyLinkedList*[T] = object ## A doubly linked list.
##
## Use `initDoublyLinkedList proc <#initDoublyLinkedList,>`_ to create
## Use `initDoublyLinkedList proc <#initDoublyLinkedList>`_ to create
## a new empty list.
head*, tail*: DoublyLinkedNode[T]
SinglyLinkedRing*[T] = object ## A singly linked ring.
##
## Use `initSinglyLinkedRing proc <#initSinglyLinkedRing,>`_ to create
## Use `initSinglyLinkedRing proc <#initSinglyLinkedRing>`_ to create
## a new empty ring.
head*, tail*: SinglyLinkedNode[T]
DoublyLinkedRing*[T] = object ## A doubly linked ring.
##
## Use `initDoublyLinkedRing proc <#initDoublyLinkedRing,>`_ to create
## Use `initDoublyLinkedRing proc <#initDoublyLinkedRing>`_ to create
## a new empty ring.
head*: DoublyLinkedNode[T]

View File

@@ -846,6 +846,15 @@ template mapIt*(s: typed, op: untyped): untyped =
result.add(op)
result
template mapIt*(s, typ, op: untyped): untyped {.error:
"Use 'mapIt(seq1, op)' - without specifying the type of the returned seqence".} =
## **Deprecated since version 0.12.0:** Use the `mapIt(seq1, op) template
## <#mapIt.t,typed,untyped>`_ instead.
var result: seq[typ] = @[]
for it {.inject.} in items(s):
result.add(op)
result
template applyIt*(varSeq, op: untyped) =
## Convenience template around the mutable ``apply`` proc to reduce typing.
##

File diff suppressed because it is too large Load Diff

View File

@@ -17,6 +17,7 @@
## ``http://google.com``:
##
## .. code-block:: Nim
## import httpClient
## var client = newHttpClient()
## echo client.getContent("http://google.com")
##
@@ -24,6 +25,7 @@
## ``AsyncHttpClient``:
##
## .. code-block:: Nim
## import httpClient
## var client = newAsyncHttpClient()
## echo await client.getContent("http://google.com")
##

View File

@@ -625,7 +625,7 @@ proc escapeJsonUnquoted*(s: string; result: var string) =
of '\r': result.add("\\r")
of '"': result.add("\\\"")
of '\0'..'\7': result.add("\\u000" & $ord(c))
of '\14'..'\31': result.add("\\u00" & $ord(c))
of '\14'..'\31': result.add("\\u00" & toHex(ord(c), 2))
of '\\': result.add("\\\\")
else: result.add(c)
@@ -1328,6 +1328,12 @@ proc createConstructor(typeSym, jsonNode: NimNode): NimNode =
let obj = getType(typeSym[1])
result = processType(newIdentNode(typeName), obj, jsonNode, true)
of "range":
let typeNode = typeSym
# Deduce the base type from one of the endpoints
let baseType = getType(typeNode[1])
result = createConstructor(baseType, jsonNode)
of "seq":
let seqT = typeSym[1]
let forLoopI = genSym(nskForVar, "i")
@@ -1686,9 +1692,9 @@ when isMainModule:
doAssert(parsed2{"repository", "description"}.str=="IRC Library for Haskell", "Couldn't fetch via multiply nested key using {}")
doAssert escapeJsonUnquoted("\10Foo🎃barÄ") == "\\nFoo🎃barÄ"
doAssert escapeJsonUnquoted("\0\7\20") == "\\u0000\\u0007\\u0020" # for #7887
doAssert escapeJsonUnquoted("\0\7\20") == "\\u0000\\u0007\\u0014" # for #7887
doAssert escapeJson("\10Foo🎃barÄ") == "\"\\nFoo🎃barÄ\""
doAssert escapeJson("\0\7\20") == "\"\\u0000\\u0007\\u0020\"" # for #7887
doAssert escapeJson("\0\7\20") == "\"\\u0000\\u0007\\u0014\"" # for #7887
# Test with extra data
when not defined(js):
@@ -1721,3 +1727,21 @@ when isMainModule:
foo = js.to Foo
doAssert(foo.b == "abc")
# Generate constructors for range[T] types
block:
type
Q1 = range[0..10]
Q2 = range[0'i8..10'i8]
Q3 = range[0'u16..10'u16]
X = object
m1: Q1
m2: Q2
m3: Q3
let
obj = X(m1: 1, m2: 2'i8, m3: 3'u16)
jsonObj = %obj
desObj = to(jsonObj, type(obj))
doAssert(desObj == obj)

View File

@@ -372,9 +372,8 @@ proc `==`*(x, y: MemSlice): bool =
proc `$`*(ms: MemSlice): string {.inline.} =
## Return a Nim string built from a MemSlice.
var buf = newString(ms.size)
copyMem(addr(buf[0]), ms.data, ms.size)
result = buf
result.setLen(ms.size)
copyMem(addr(result[0]), ms.data, ms.size)
iterator memSlices*(mfile: MemFile, delim='\l', eat='\r'): MemSlice {.inline.} =
## Iterates over [optional `eat`] `delim`-delimited slices in MemFile `mfile`.

View File

@@ -10,13 +10,18 @@
## This module implements a simple high performance `CSV`:idx:
## (`comma separated value`:idx:) parser.
##
## Example: How to use the parser
## ==============================
## Basic usage
## ===========
##
## .. code-block:: nim
## import os, parsecsv, streams
## import parsecsv
## from os import paramStr
## from streams import newFileStream
##
## var s = newFileStream(paramStr(1), fmRead)
## if s == nil: quit("cannot open the file" & paramStr(1))
## if s == nil:
## quit("cannot open the file" & paramStr(1))
##
## var x: CsvParser
## open(x, s, paramStr(1))
## while readRow(x):
@@ -26,11 +31,11 @@
## close(x)
##
## For CSV files with a header row, the header can be read and then used as a
## reference for item access with `rowEntry <#rowEntry.CsvParser.string>`_:
## reference for item access with `rowEntry <#rowEntry,CsvParser,string>`_:
##
## .. code-block:: nim
## import parsecsv
## import os
##
## # Prepare a file
## let content = """One,Two,Three,Four
## 1,2,3,4
@@ -47,24 +52,40 @@
## for col in items(p.headers):
## echo "##", col, ":", p.rowEntry(col), "##"
## p.close()
##
## See also
## ========
##
## * `streams module <streams.html>`_ for using
## `open proc <#open,CsvParser,Stream,string,Char,Char,Char>`_
## and other stream processing (like `close proc <streams.html#close,Stream>`_)
## * `parseopt module <parseopt.html>`_ for a command line parser
## * `parsecfg module <parsecfg.html>`_ for a configuration file parser
## * `parsexml module <parsexml.html>`_ for a XML / HTML parser
## * `parsesql module <parsesql.html>`_ for a SQL parser
## * `other parsers <lib.html#pure-libraries-parsers>`_ for other parsers
import
lexbase, streams
type
CsvRow* = seq[string] ## a row in a CSV file
CsvParser* = object of BaseLexer ## the parser object.
row*: CsvRow ## the current row
CsvRow* = seq[string] ## A row in a CSV file.
CsvParser* = object of BaseLexer ## The parser object.
##
## It consists of two public fields:
## * `row` is the current row
## * `headers` are the columns that are defined in the csv file
## (read using `readHeaderRow <#readHeaderRow,CsvParser>`_).
## Used with `rowEntry <#rowEntry,CsvParser,string>`_).
row*: CsvRow
filename: string
sep, quote, esc: char
skipWhite: bool
currRow: int
headers*: seq[string] ## The columns that are defined in the csv file
## (read using `readHeaderRow <#readHeaderRow.CsvParser>`_).
## Used with `rowEntry <#rowEntry.CsvParser.string>`_).
headers*: seq[string]
CsvError* = object of IOError ## exception that is raised if
## a parsing error occurs
CsvError* = object of IOError ## An exception that is raised if
## a parsing error occurs.
proc raiseEInvalidCsv(filename: string, line, col: int,
msg: string) {.noreturn.} =
@@ -82,7 +103,7 @@ proc error(my: CsvParser, pos: int, msg: string) =
proc open*(my: var CsvParser, input: Stream, filename: string,
separator = ',', quote = '"', escape = '\0',
skipInitialSpace = false) =
## initializes the parser with an input stream. `Filename` is only used
## Initializes the parser with an input stream. `Filename` is only used
## for nice error messages. The parser's behaviour can be controlled by
## the diverse optional parameters:
## - `separator`: character used to separate fields
@@ -94,6 +115,18 @@ proc open*(my: var CsvParser, input: Stream, filename: string,
## two `quote` characters are parsed one literal `quote` character.
## - `skipInitialSpace`: If true, whitespace immediately following the
## `separator` is ignored.
##
## See also:
## * `open proc <#open,CsvParser,string,Char,Char,Char>`_ which creates the
## file stream for you
runnableExamples:
import streams
var strm = newStringStream("One,Two,Three\n1,2,3\n10,20,30")
var parser: CsvParser
parser.open(strm, "tmp.csv")
parser.close()
strm.close()
lexbase.open(my, input)
my.filename = filename
my.sep = separator
@@ -106,7 +139,16 @@ proc open*(my: var CsvParser, input: Stream, filename: string,
proc open*(my: var CsvParser, filename: string,
separator = ',', quote = '"', escape = '\0',
skipInitialSpace = false) =
## same as the other `open` but creates the file stream for you.
## Similar to the `other open proc<#open,CsvParser,Stream,string,Char,Char,Char>`_,
## but creates the file stream for you.
runnableExamples:
from os import removeFile
writeFile("tmp.csv", "One,Two,Three\n1,2,3\n10,20,300")
var parser: CsvParser
parser.open("tmp.csv")
parser.close()
removeFile("tmp.csv")
var s = newFileStream(filename, fmRead)
if s == nil: my.error(0, "cannot open: " & filename)
open(my, s, filename, separator,
@@ -159,17 +201,66 @@ proc parseField(my: var CsvParser, a: var string) =
my.bufpos = pos
proc processedRows*(my: var CsvParser): int =
## returns number of the processed rows
## Returns number of the processed rows.
##
## But even if `readRow <#readRow,CsvParser,int>`_ arrived at EOF then
## processed rows counter is incremented.
runnableExamples:
import streams
var strm = newStringStream("One,Two,Three\n1,2,3")
var parser: CsvParser
parser.open(strm, "tmp.csv")
doAssert parser.readRow()
doAssert parser.processedRows() == 1
doAssert parser.readRow()
doAssert parser.processedRows() == 2
## Even if `readRow` arrived at EOF then `processedRows` is incremented.
doAssert parser.readRow() == false
doAssert parser.processedRows() == 3
doAssert parser.readRow() == false
doAssert parser.processedRows() == 4
parser.close()
strm.close()
return my.currRow
proc readRow*(my: var CsvParser, columns = 0): bool =
## reads the next row; if `columns` > 0, it expects the row to have
## Reads the next row; if `columns` > 0, it expects the row to have
## exactly this many columns. Returns false if the end of the file
## has been encountered else true.
##
## Blank lines are skipped.
runnableExamples:
import streams
var strm = newStringStream("One,Two,Three\n1,2,3\n\n10,20,30")
var parser: CsvParser
parser.open(strm, "tmp.csv")
doAssert parser.readRow()
doAssert parser.row == @["One", "Two", "Three"]
doAssert parser.readRow()
doAssert parser.row == @["1", "2", "3"]
## Blank lines are skipped.
doAssert parser.readRow()
doAssert parser.row == @["10", "20", "30"]
var emptySeq: seq[string]
doAssert parser.readRow() == false
doAssert parser.row == emptySeq
doAssert parser.readRow() == false
doAssert parser.row == emptySeq
parser.close()
strm.close()
var col = 0 # current column
let oldpos = my.bufpos
# skip initial empty lines #8365
while true:
case my.buf[my.bufpos]
of '\c': my.bufpos = handleCR(my, my.bufpos)
of '\l': my.bufpos = handleLF(my, my.bufpos)
else: break
while my.buf[my.bufpos] != '\0':
let oldlen = my.row.len
if oldlen < col+1:
@@ -200,12 +291,31 @@ proc readRow*(my: var CsvParser, columns = 0): bool =
inc(my.currRow)
proc close*(my: var CsvParser) {.inline.} =
## closes the parser `my` and its associated input stream.
## Closes the parser `my` and its associated input stream.
lexbase.close(my)
proc readHeaderRow*(my: var CsvParser) =
## Reads the first row and creates a look-up table for column numbers
## See also `rowEntry <#rowEntry.CsvParser.string>`_.
## See also:
## * `rowEntry proc <#rowEntry,CsvParser,string>`_
runnableExamples:
import streams
var strm = newStringStream("One,Two,Three\n1,2,3")
var parser: CsvParser
parser.open(strm, "tmp.csv")
parser.readHeaderRow()
doAssert parser.headers == @["One", "Two", "Three"]
doAssert parser.row == @["One", "Two", "Three"]
doAssert parser.readRow()
doAssert parser.headers == @["One", "Two", "Three"]
doAssert parser.row == @["1", "2", "3"]
parser.close()
strm.close()
let present = my.readRow()
if present:
my.headers = my.row
@@ -213,8 +323,23 @@ proc readHeaderRow*(my: var CsvParser) =
proc rowEntry*(my: var CsvParser, entry: string): var string =
## Acceses a specified `entry` from the current row.
##
## Assumes that `readHeaderRow <#readHeaderRow.CsvParser>`_ has already been
## Assumes that `readHeaderRow <#readHeaderRow,CsvParser>`_ has already been
## called.
runnableExamples:
import streams
var strm = newStringStream("One,Two,Three\n1,2,3\n\n10,20,30")
var parser: CsvParser
parser.open(strm, "tmp.csv")
## Need calling `readHeaderRow`.
parser.readHeaderRow()
doAssert parser.readRow()
doAssert parser.rowEntry("One") == "1"
doAssert parser.rowEntry("Two") == "2"
doAssert parser.rowEntry("Three") == "3"
## `parser.rowEntry("NotExistEntry")` causes SIGSEGV fault.
parser.close()
strm.close()
let index = my.headers.find(entry)
if index >= 0:
result = my.row[index]
@@ -235,7 +360,7 @@ when isMainModule:
import os
import strutils
block: # Tests for reading the header row
let content = "One,Two,Three,Four\n1,2,3,4\n10,20,30,40,\n100,200,300,400\n"
let content = "\nOne,Two,Three,Four\n1,2,3,4\n10,20,30,40,\n100,200,300,400\n"
writeFile("temp.csv", content)
var p: CsvParser
@@ -262,4 +387,3 @@ when isMainModule:
# Tidy up
removeFile("temp.csv")

View File

@@ -11,23 +11,141 @@
## It supports one convenience iterator over all command line options and some
## lower-level features.
##
## Supported syntax with default empty ``shortNoVal``/``longNoVal``:
## Supported Syntax
## ================
##
## 1. short options - ``-abcd``, where a, b, c, d are names
## 2. long option - ``--foo:bar``, ``--foo=bar`` or ``--foo``
## 3. argument - everything else
## The following syntax is supported when arguments for the ``shortNoVal`` and
## ``longNoVal`` parameters, which are
## `described later<#shortnoval-and-longnoval>`_, are not provided:
##
## When ``shortNoVal``/``longNoVal`` are non-empty then the ':' and '=' above
## are still accepted, but become optional. Note that these option key sets
## must be updated along with the set of option keys taking no value, but
## keys which do take values need no special updates as their set evolves.
## 1. Short options: ``-abcd``, ``-e:5``, ``-e=5``
## 2. Long options: ``--foo:bar``, ``--foo=bar``, ``--foo``
## 3. Arguments: everything that does not start with a ``-``
##
## When option values begin with ':' or '=' they need to be doubled up (as in
## These three kinds of tokens are enumerated in the
## `CmdLineKind enum<#CmdLineKind>`_.
##
## When option values begin with ':' or '=', they need to be doubled up (as in
## ``--delim::``) or alternated (as in ``--delim=:``).
##
## The common ``--`` non-option argument delimiter appears as an empty string
## long option key. ``OptParser.cmd``, ``OptParser.pos``, and
## ``os.parseCmdLine`` may be used to complete parsing in that case.
## The ``--`` option, commonly used to denote that every token that follows is
## an argument, is interpreted as a long option, and its name is the empty
## string.
##
## Parsing
## =======
##
## Use an `OptParser<#OptParser>`_ to parse command line options. It can be
## created with `initOptParser<#initOptParser,string,set[char],seq[string]>`_,
## and `next<#next,OptParser>`_ advances the parser by one token.
##
## For each token, the parser's ``kind``, ``key``, and ``val`` fields give
## information about that token. If the token is a long or short option, ``key``
## is the option's name, and ``val`` is either the option's value, if provided,
## or the empty string. For arguments, the ``key`` field contains the argument
## itself, and ``val`` is unused. To check if the end of the command line has
## been reached, check if ``kind`` is equal to ``cmdEnd``.
##
## Here is an example:
##
## .. code-block::
## import parseopt
##
## var p = initOptParser("-ab -e:5 --foo --bar=20 file.txt")
## while true:
## p.next()
## case p.kind
## of cmdEnd: break
## of cmdShortOption, cmdLongOption:
## if p.val == "":
## echo "Option: ", p.key
## else:
## echo "Option and value: ", p.key, ", ", p.val
## of cmdArgument:
## echo "Argument: ", p.key
##
## # Output:
## # Option: a
## # Option: b
## # Option and value: e, 5
## # Option: foo
## # Option and value: bar, 20
## # Argument: file.txt
##
## The `getopt iterator<#getopt.i,OptParser>`_, which is provided for
## convenience, can be used to iterate through all command line options as well.
##
## ``shortNoVal`` and ``longNoVal``
## ================================
##
## The optional ``shortNoVal`` and ``longNoVal`` parameters present in
## `initOptParser<#initOptParser,string,set[char],seq[string]>`_ are for
## specifying which short and long options do not accept values.
##
## When ``shortNoVal`` is non-empty, users are not required to separate short
## options and their values with a ':' or '=' since the parser knows which
## options accept values and which ones do not. This behavior also applies for
## long options if ``longNoVal`` is non-empty. For short options, ``-j4``
## becomes supported syntax, and for long options, ``--foo bar`` becomes
## supported. This is in addition to the `previously mentioned
## syntax<#supported-syntax>`_. Users can still separate options and their
## values with ':' or '=', but that becomes optional.
##
## As more options which do not accept values are added to your program,
## remember to amend ``shortNoVal`` and ``longNoVal`` accordingly.
##
## The following example illustrates the difference between having an empty
## ``shortNoVal`` and ``longNoVal``, which is the default, and providing
## arguments for those two parameters:
##
## .. code-block::
## import parseopt
##
## proc printToken(kind: CmdLineKind, key: string, val: string) =
## case kind
## of cmdEnd: doAssert(false) # Doesn't happen with getopt()
## of cmdShortOption, cmdLongOption:
## if val == "":
## echo "Option: ", key
## else:
## echo "Option and value: ", key, ", ", val
## of cmdArgument:
## echo "Argument: ", key
##
## let cmdLine = "-j4 --first bar"
##
## var emptyNoVal = initOptParser(cmdLine)
## for kind, key, val in emptyNoVal.getopt():
## printToken(kind, key, val)
##
## # Output:
## # Option: j
## # Option: 4
## # Option: first
## # Argument: bar
##
## var withNoVal = initOptParser(cmdLine, shortNoVal = {'c'},
## longNoVal = @["second"])
## for kind, key, val in withNoVal.getopt():
## printToken(kind, key, val)
##
## # Output:
## # Option and value: j, 4
## # Option and value: first, bar
##
## See also
## ========
##
## * `os module<os.html>`_ for lower-level command line parsing procs
## * `parseutils module<parseutils.html>`_ for helpers that parse tokens,
## numbers, identifiers, etc.
## * `strutils module<strutils.html>`_ for common string handling operations
## * `json module<json.html>`_ for a JSON parser
## * `parsecfg module<parsecfg.html>`_ for a configuration file parser
## * `parsecsv module<parsecsv.html>`_ for a simple CSV (comma separated value)
## parser
## * `parsexml module<parsexml.html>`_ for a XML / HTML parser
## * `other parsers<lib.html#pure-libraries-parsers>`_ for more parsers
{.push debugger: off.}
@@ -37,23 +155,26 @@ import
os, strutils
type
CmdLineKind* = enum ## the detected command line token
cmdEnd, ## end of command line reached
cmdArgument, ## argument detected
cmdLongOption, ## a long option ``--option`` detected
cmdShortOption ## a short option ``-c`` detected
CmdLineKind* = enum ## The detected command line token.
cmdEnd, ## End of command line reached
cmdArgument, ## An argument such as a filename
cmdLongOption, ## A long option such as --option
cmdShortOption ## A short option such as -c
OptParser* =
object of RootObj ## this object implements the command line parser
pos*: int # ..empty key or subcmd cmdArg & handle specially
object of RootObj ## Implementation of the command line parser.
##
## To initialize it, use the
## `initOptParser proc<#initOptParser,string,set[char],seq[string]>`_.
pos*: int
inShortState: bool
allowWhitespaceAfterColon: bool
shortNoVal: set[char]
longNoVal: seq[string]
cmds: seq[string]
idx: int
kind*: CmdLineKind ## the dected command line token
key*, val*: TaintedString ## key and value pair; ``key`` is the option
## or the argument, ``value`` is not "" if
kind*: CmdLineKind ## The detected command line token
key*, val*: TaintedString ## Key and value pair; the key is the option
## or the argument, and the value is not "" if
## the option was given a value
proc parseWord(s: string, i: int, w: var string,
@@ -79,13 +200,24 @@ when declared(os.paramCount):
proc initOptParser*(cmdline = "", shortNoVal: set[char]={},
longNoVal: seq[string] = @[];
allowWhitespaceAfterColon = true): OptParser =
## inits the option parser. If ``cmdline == ""``, the real command line
## (as provided by the ``OS`` module) is taken. If ``shortNoVal`` is
## provided command users do not need to delimit short option keys and
## values with a ':' or '='. If ``longNoVal`` is provided command users do
## not need to delimit long option keys and values with a ':' or '='
## (though they still need at least a space). In both cases, ':' or '='
## may still be used if desired. They just become optional.
## Initializes the command line parser.
##
## If ``cmdline == ""``, the real command line as provided by the
## ``os`` module is retrieved instead.
##
## ``shortNoVal`` and ``longNoVal`` are used to specify which options
## do not take values. See the `documentation about these
## parameters<#shortnoval-and-longnoval>`_ for more information on
## how this affects parsing.
##
## See also:
## * `getopt iterator<#getopt.i,OptParser>`_
runnableExamples:
var p = initOptParser()
p = initOptParser("--left --debug:3 -l -r:2")
p = initOptParser("--left --debug:3 -l -r:2",
shortNoVal = {'l'}, longNoVal = @["left"])
result.pos = 0
result.idx = 0
result.inShortState = false
@@ -106,9 +238,21 @@ when declared(os.paramCount):
proc initOptParser*(cmdline: seq[TaintedString], shortNoVal: set[char]={},
longNoVal: seq[string] = @[];
allowWhitespaceAfterColon = true): OptParser =
## inits the option parser. If ``cmdline.len == 0``, the real command line
## (as provided by the ``OS`` module) is taken. ``shortNoVal`` and
## ``longNoVal`` behavior is the same as for ``initOptParser(string,...)``.
## Initializes the command line parser.
##
## If ``cmdline.len == 0``, the real command line as provided by the
## ``os`` module is retrieved instead. Behavior of the other parameters
## remains the same as in `initOptParser(string, ...)
## <#initOptParser,string,set[char],seq[string]>`_.
##
## See also:
## * `getopt iterator<#getopt.i,seq[TaintedString],set[char],seq[string]>`_
runnableExamples:
var p = initOptParser()
p = initOptParser(@["--left", "--debug:3", "-l", "-r:2"])
p = initOptParser(@["--left", "--debug:3", "-l", "-r:2"],
shortNoVal = {'l'}, longNoVal = @["left"])
result.pos = 0
result.idx = 0
result.inShortState = false
@@ -153,8 +297,21 @@ proc handleShortOption(p: var OptParser; cmd: string) =
inc p.idx
proc next*(p: var OptParser) {.rtl, extern: "npo$1".} =
## parses the first or next option; ``p.kind`` describes what token has been
## parsed. ``p.key`` and ``p.val`` are set accordingly.
## Parses the next token.
##
## ``p.kind`` describes what kind of token has been parsed. ``p.key`` and
## ``p.val`` are set accordingly.
runnableExamples:
var p = initOptParser("--left -r:2 file.txt")
p.next()
doAssert p.kind == cmdLongOption and p.key == "left"
p.next()
doAssert p.kind == cmdShortOption and p.key == "r" and p.val == "2"
p.next()
doAssert p.kind == cmdArgument and p.key == "file.txt"
p.next()
doAssert p.kind == cmdEnd
if p.idx >= p.cmds.len:
p.kind = cmdEnd
return
@@ -209,20 +366,61 @@ proc next*(p: var OptParser) {.rtl, extern: "npo$1".} =
when declared(os.paramCount):
proc cmdLineRest*(p: OptParser): TaintedString {.rtl, extern: "npo$1".} =
## retrieves the rest of the command line that has not been parsed yet.
## Retrieves the rest of the command line that has not been parsed yet.
##
## See also:
## * `remainingArgs proc<#remainingArgs,OptParser>`_
##
## **Examples:**
##
## .. code-block::
## var p = initOptParser("--left -r:2 -- foo.txt bar.txt")
## while true:
## p.next()
## if p.kind == cmdLongOption and p.key == "": # Look for "--"
## break
## else: continue
## doAssert p.cmdLineRest == "foo.txt bar.txt"
result = p.cmds[p.idx .. ^1].quoteShellCommand.TaintedString
proc remainingArgs*(p: OptParser): seq[TaintedString] {.rtl, extern: "npo$1".} =
## retrieves the rest of the command line that has not been parsed yet.
## Retrieves a sequence of the arguments that have not been parsed yet.
##
## See also:
## * `cmdLineRest proc<#cmdLineRest,OptParser>`_
##
## **Examples:**
##
## .. code-block::
## var p = initOptParser("--left -r:2 -- foo.txt bar.txt")
## while true:
## p.next()
## if p.kind == cmdLongOption and p.key == "": # Look for "--"
## break
## else: continue
## doAssert p.remainingArgs == @["foo.txt", "bar.txt"]
result = @[]
for i in p.idx..<p.cmds.len: result.add TaintedString(p.cmds[i])
iterator getopt*(p: var OptParser): tuple[kind: CmdLineKind, key, val: TaintedString] =
## This is an convenience iterator for iterating over the given OptParser object.
## Example:
## Convenience iterator for iterating over the given
## `OptParser<#OptParser>`_.
##
## .. code-block:: nim
## There is no need to check for ``cmdEnd`` while iterating.
##
## See also:
## * `initOptParser proc<#initOptParser,string,set[char],seq[string]>`_
##
## **Examples:**
##
## .. code-block::
## # these are placeholders, of course
## proc writeHelp() = discard
## proc writeVersion() = discard
##
## var filename: string
## var p = initOptParser("--left --debug:3 -l -r:2")
##
## for kind, key, val in p.getopt():
## case kind
## of cmdArgument:
@@ -233,7 +431,7 @@ iterator getopt*(p: var OptParser): tuple[kind: CmdLineKind, key, val: TaintedSt
## of "version", "v": writeVersion()
## of cmdEnd: assert(false) # cannot happen
## if filename == "":
## # no filename has been given, so we show the help:
## # no filename has been given, so we show the help
## writeHelp()
p.pos = 0
p.idx = 0
@@ -246,15 +444,34 @@ when declared(initOptParser):
iterator getopt*(cmdline: seq[TaintedString] = commandLineParams(),
shortNoVal: set[char]={}, longNoVal: seq[string] = @[]):
tuple[kind: CmdLineKind, key, val: TaintedString] =
## This is an convenience iterator for iterating over command line arguments.
## This creates a new OptParser. See the above ``getopt(var OptParser)``
## example for using default empty ``NoVal`` parameters. This example is
## for the same option keys as that example but here option key-value
## separators become optional for command users:
## Convenience iterator for iterating over command line arguments.
##
## .. code-block:: nim
## for kind, key, val in getopt(shortNoVal = { 'l' },
## longNoVal = @[ "left" ]):
## This creates a new `OptParser<#OptParser>`_. If no command line
## arguments are provided, the real command line as provided by the
## ``os`` module is retrieved instead.
##
## ``shortNoVal`` and ``longNoVal`` are used to specify which options
## do not take values. See the `documentation about these
## parameters<#shortnoval-and-longnoval>`_ for more information on
## how this affects parsing.
##
## There is no need to check for ``cmdEnd`` while iterating.
##
## See also:
## * `initOptParser proc<#initOptParser,seq[TaintedString],set[char],seq[string]>`_
##
## **Examples:**
##
## .. code-block::
##
## # these are placeholders, of course
## proc writeHelp() = discard
## proc writeVersion() = discard
##
## var filename: string
## let params = @["--left", "--debug:3", "-l", "-r:2"]
##
## for kind, key, val in getopt(params):
## case kind
## of cmdArgument:
## filename = key
@@ -264,8 +481,8 @@ when declared(initOptParser):
## of "version", "v": writeVersion()
## of cmdEnd: assert(false) # cannot happen
## if filename == "":
## # no filename has been written, so we show the help
## writeHelp()
##
var p = initOptParser(cmdline, shortNoVal=shortNoVal, longNoVal=longNoVal)
while true:
next(p)

View File

@@ -33,8 +33,7 @@
## import uri
## let res = parseUri("sftp://127.0.0.1:4343")
## if isAbsolute(res):
## echo "Connect to port: " & res.port
## # --> Connect to port: 4343
## assert res.port == "4343"
## else:
## echo "Wrong format"
@@ -189,7 +188,7 @@ proc parseUri*(uri: string, result: var Uri) =
##
## **See also:**
## * `Uri type <#Uri>`_ for available fields in the URI type
## * `initUri proc <#initUri,>`_ for initializing a URI
## * `initUri proc <#initUri>`_ for initializing a URI
runnableExamples:
var res = initUri()
parseUri("https://nim-lang.org/docs/manual.html", res)
@@ -343,9 +342,9 @@ proc combine*(uris: varargs[Uri]): Uri =
## **See also:**
## * `/ proc <#/,Uri,string>`_ for building URIs
runnableExamples:
let foo = combine(parseUri("https://nim-lang.org/blog.html"), parseUri("/install.html"))
let foo = combine(parseUri("https://nim-lang.org/"), parseUri("docs/"), parseUri("manual.html"))
assert foo.hostname == "nim-lang.org"
assert foo.path == "/install.html"
assert foo.path == "/docs/manual.html"
result = uris[0]
for i in 1 ..< uris.len:
result = combine(result, uris[i])

View File

@@ -7,21 +7,52 @@
# distribution, for details about the copyright.
#
## A simple XML tree.
## A simple XML tree generator.
##
## .. code-block::
## import xmltree
##
## var g = newElement("myTag")
## g.add newText("some text")
## g.add newComment("this is comment")
##
## var h = newElement("secondTag")
## h.add newEntity("some entity")
##
## let att = {"key1": "first value", "key2": "second value"}.toXmlAttributes
## let k = newXmlTree("treeTag", [g, h], att)
##
## echo k
## # <treeTag key2="second value" key1="first value">
## # <myTag>some text<!-- this is comment --></myTag>
## # <secondTag>&some entity;</secondTag>
## # </treeTag>
##
##
## **See also:**
## * `xmlparser module <xmlparser.html>`_ for high-level XML parsing
## * `parsexml module <parsexml.html>`_ for low-level XML parsing
## * `htmlgen module <htmlgen.html>`_ for html code generator
import macros, strtabs, strutils
type
XmlNode* = ref XmlNodeObj ## an XML tree consists of ``XmlNode``'s.
XmlNode* = ref XmlNodeObj ## An XML tree consisting of XML nodes.
##
## Use `newXmlTree proc <#newXmlTree,string,openArray[XmlNode],XmlAttributes>`_
## for creating a new tree.
XmlNodeKind* = enum ## different kinds of ``XmlNode``'s
XmlNodeKind* = enum ## Different kinds of XML nodes.
xnText, ## a text element
xnElement, ## an element with 0 or more children
xnCData, ## a CDATA node
xnEntity, ## an entity (like ``&thing;``)
xnComment ## an XML comment
XmlAttributes* = StringTableRef ## an alias for a string to string mapping
XmlAttributes* = StringTableRef ## An alias for a string to string mapping.
##
## Use `toXmlAttributes proc <#toXmlAttributes,varargs[tuple[string,string]]>`_
## to create `XmlAttributes`.
XmlNodeObj {.acyclic.} = object
case k: XmlNodeKind # private, use the kind() proc to read this field.
@@ -33,67 +64,203 @@ type
fAttr: XmlAttributes
fClientData: int ## for other clients
const
xmlHeader* = "<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n"
## Header to use for complete XML output.
proc newXmlNode(kind: XmlNodeKind): XmlNode =
## creates a new ``XmlNode``.
## Creates a new ``XmlNode``.
new(result)
result.k = kind
proc newElement*(tag: string): XmlNode =
## creates a new ``PXmlNode`` of kind ``xnText`` with the given `tag`.
## Creates a new ``XmlNode`` of kind ``xnElement`` with the given `tag`.
##
## See also:
## * `newXmlTree proc <#newXmlTree,string,openArray[XmlNode],XmlAttributes>`_
## * [<> macro](#<>.m,untyped)
runnableExamples:
var a = newElement("firstTag")
a.add newElement("childTag")
assert a.kind == xnElement
assert $a == "<firstTag><childTag /></firstTag>"
result = newXmlNode(xnElement)
result.fTag = tag
result.s = @[]
# init attributes lazily to safe memory
# init attributes lazily to save memory
proc newText*(text: string): XmlNode =
## creates a new ``PXmlNode`` of kind ``xnText`` with the text `text`.
## Creates a new ``XmlNode`` of kind ``xnText`` with the text `text`.
runnableExamples:
var b = newText("my text")
assert b.kind == xnText
assert $b == "my text"
result = newXmlNode(xnText)
result.fText = text
proc newComment*(comment: string): XmlNode =
## creates a new ``PXmlNode`` of kind ``xnComment`` with the text `comment`.
## Creates a new ``XmlNode`` of kind ``xnComment`` with the text `comment`.
runnableExamples:
var c = newComment("my comment")
assert c.kind == xnComment
assert $c == "<!-- my comment -->"
result = newXmlNode(xnComment)
result.fText = comment
proc newCData*(cdata: string): XmlNode =
## creates a new ``PXmlNode`` of kind ``xnComment`` with the text `cdata`.
## Creates a new ``XmlNode`` of kind ``xnCData`` with the text `cdata`.
runnableExamples:
var d = newCData("my cdata")
assert d.kind == xnCData
assert $d == "<![CDATA[my cdata]]>"
result = newXmlNode(xnCData)
result.fText = cdata
proc newEntity*(entity: string): XmlNode =
## creates a new ``PXmlNode`` of kind ``xnEntity`` with the text `entity`.
## Creates a new ``XmlNode`` of kind ``xnEntity`` with the text `entity`.
runnableExamples:
var e = newEntity("my entity")
assert e.kind == xnEntity
assert $e == "&my entity;"
result = newXmlNode(xnEntity)
result.fText = entity
proc newXmlTree*(tag: string, children: openArray[XmlNode],
attributes: XmlAttributes = nil): XmlNode =
## Creates a new XML tree with `tag`, `children` and `attributes`.
##
## See also:
## * `newElement proc <#newElement,string>`_
## * [<> macro](#<>.m,untyped)
runnableExamples:
from strutils import unindent
var g = newElement("myTag")
g.add newText("some text")
g.add newComment("this is comment")
var h = newElement("secondTag")
h.add newEntity("some entity")
let att = {"key1": "first value", "key2": "second value"}.toXmlAttributes
let k = newXmlTree("treeTag", [g, h], att)
assert ($k).unindent == """<treeTag key2="second value" key1="first value">
<myTag>some text<!-- this is comment --></myTag>
<secondTag>&some entity;</secondTag>
</treeTag>""".unindent
result = newXmlNode(xnElement)
result.fTag = tag
newSeq(result.s, children.len)
for i in 0..children.len-1: result.s[i] = children[i]
result.fAttr = attributes
proc text*(n: XmlNode): string {.inline.} =
## gets the associated text with the node `n`. `n` can be a CDATA, Text,
## comment, or entity node.
## Gets the associated text with the node `n`.
##
## `n` can be a CDATA, Text, comment, or entity node.
##
## See also:
## * `text= proc <#text=,XmlNode,string>`_ for text setter
## * `tag proc <#tag,XmlNode>`_ for tag getter
## * `tag= proc <#tag=,XmlNode,string>`_ for tag setter
## * `innerText proc <#innerText,XmlNode>`_
runnableExamples:
var c = newComment("my comment")
assert $c == "<!-- my comment -->"
assert c.text == "my comment"
assert n.k in {xnText, xnComment, xnCData, xnEntity}
result = n.fText
proc `text=`*(n: XmlNode, text: string){.inline.} =
## sets the associated text with the node `n`. `n` can be a CDATA, Text,
## comment, or entity node.
## Sets the associated text with the node `n`.
##
## `n` can be a CDATA, Text, comment, or entity node.
##
## See also:
## * `text proc <#text,XmlNode>`_ for text getter
## * `tag proc <#tag,XmlNode>`_ for tag getter
## * `tag= proc <#tag=,XmlNode,string>`_ for tag setter
runnableExamples:
var e = newEntity("my entity")
assert $e == "&my entity;"
e.text = "a new entity text"
assert $e == "&a new entity text;"
assert n.k in {xnText, xnComment, xnCData, xnEntity}
n.fText = text
proc tag*(n: XmlNode): string {.inline.} =
## Gets the tag name of `n`.
##
## `n` has to be an ``xnElement`` node.
##
## See also:
## * `text proc <#text,XmlNode>`_ for text getter
## * `text= proc <#text=,XmlNode,string>`_ for text setter
## * `tag= proc <#tag=,XmlNode,string>`_ for tag setter
## * `innerText proc <#innerText,XmlNode>`_
runnableExamples:
var a = newElement("firstTag")
a.add newElement("childTag")
assert $a == "<firstTag><childTag /></firstTag>"
assert a.tag == "firstTag"
assert n.k == xnElement
result = n.fTag
proc `tag=`*(n: XmlNode, tag: string) {.inline.} =
## Sets the tag name of `n`.
##
## `n` has to be an ``xnElement`` node.
##
## See also:
## * `text proc <#text,XmlNode>`_ for text getter
## * `text= proc <#text=,XmlNode,string>`_ for text setter
## * `tag proc <#tag,XmlNode>`_ for tag getter
runnableExamples:
var a = newElement("firstTag")
a.add newElement("childTag")
assert $a == "<firstTag><childTag /></firstTag>"
a.tag = "newTag"
assert $a == "<newTag><childTag /></newTag>"
assert n.k == xnElement
n.fTag = tag
proc rawText*(n: XmlNode): string {.inline.} =
## returns the underlying 'text' string by reference.
## Returns the underlying 'text' string by reference.
##
## This is only used for speed hacks.
shallowCopy(result, n.fText)
proc rawTag*(n: XmlNode): string {.inline.} =
## returns the underlying 'tag' string by reference.
## Returns the underlying 'tag' string by reference.
##
## This is only used for speed hacks.
shallowCopy(result, n.fTag)
proc innerText*(n: XmlNode): string =
## gets the inner text of `n`:
## Gets the inner text of `n`:
##
## - If `n` is `xnText` or `xnEntity`, returns its content.
## - If `n` is `xnElement`, runs recursively on each child node and
## concatenates the results.
## - Otherwise returns an empty string.
##
## See also:
## * `text proc <#text,XmlNode>`_
runnableExamples:
var f = newElement("myTag")
f.add newText("my text")
f.add newComment("my comment")
f.add newEntity("my entity")
assert $f == "<myTag>my text<!-- my comment -->&my entity;</myTag>"
assert innerText(f) == "my textmy entity"
proc worker(res: var string, n: XmlNode) =
case n.k
of xnText, xnEntity:
@@ -107,89 +274,218 @@ proc innerText*(n: XmlNode): string =
result = ""
worker(result, n)
proc tag*(n: XmlNode): string {.inline.} =
## gets the tag name of `n`. `n` has to be an ``xnElement`` node.
assert n.k == xnElement
result = n.fTag
proc `tag=`*(n: XmlNode, tag: string) {.inline.} =
## sets the tag name of `n`. `n` has to be an ``xnElement`` node.
assert n.k == xnElement
n.fTag = tag
proc add*(father, son: XmlNode) {.inline.} =
## adds the child `son` to `father`.
## Adds the child `son` to `father`.
##
## See also:
## * `insert proc <#insert,XmlNode,XmlNode,int>`_
## * `delete proc <#delete,XmlNode,Natural>`_
runnableExamples:
var f = newElement("myTag")
f.add newText("my text")
f.add newElement("sonTag")
f.add newEntity("my entity")
assert $f == "<myTag>my text<sonTag />&my entity;</myTag>"
add(father.s, son)
proc insert*(father, son: XmlNode, index: int) {.inline.} =
## insert the child `son` to a given position in `father`.
## Insert the child `son` to a given position in `father`.
##
## `father` and `son` must be of `xnElement` kind.
##
## See also:
## * `add proc <#add,XmlNode,XmlNode>`_
## * `delete proc <#delete,XmlNode,Natural>`_
runnableExamples:
from strutils import unindent
var f = newElement("myTag")
f.add newElement("first")
f.insert(newElement("second"), 0)
assert ($f).unindent == "<myTag>\n<second />\n<first />\n</myTag>"
assert father.k == xnElement and son.k == xnElement
if len(father.s) > index:
insert(father.s, son, index)
else:
insert(father.s, son, len(father.s))
proc len*(n: XmlNode): int {.inline.} =
## returns the number `n`'s children.
if n.k == xnElement: result = len(n.s)
proc kind*(n: XmlNode): XmlNodeKind {.inline.} =
## returns `n`'s kind.
result = n.k
proc `[]`* (n: XmlNode, i: int): XmlNode {.inline.} =
## returns the `i`'th child of `n`.
assert n.k == xnElement
result = n.s[i]
proc delete*(n: XmlNode, i: Natural) {.noSideEffect.} =
## delete the `i`'th child of `n`.
## Delete the `i`'th child of `n`.
##
## See also:
## * `add proc <#add,XmlNode,XmlNode>`_
## * `insert proc <#insert,XmlNode,XmlNode,int>`_
runnableExamples:
var f = newElement("myTag")
f.add newElement("first")
f.insert(newElement("second"), 0)
f.delete(0)
assert $f == "<myTag><first /></myTag>"
assert n.k == xnElement
n.s.delete(i)
proc len*(n: XmlNode): int {.inline.} =
## Returns the number of `n`'s children.
runnableExamples:
var f = newElement("myTag")
f.add newElement("first")
f.insert(newElement("second"), 0)
assert len(f) == 2
if n.k == xnElement: result = len(n.s)
proc kind*(n: XmlNode): XmlNodeKind {.inline.} =
## Returns `n`'s kind.
runnableExamples:
var a = newElement("firstTag")
assert a.kind == xnElement
var b = newText("my text")
assert b.kind == xnText
result = n.k
proc `[]`* (n: XmlNode, i: int): XmlNode {.inline.} =
## Returns the `i`'th child of `n`.
runnableExamples:
var f = newElement("myTag")
f.add newElement("first")
f.insert(newElement("second"), 0)
assert $f[1] == "<first />"
assert $f[0] == "<second />"
assert n.k == xnElement
result = n.s[i]
proc `[]`* (n: var XmlNode, i: int): var XmlNode {.inline.} =
## returns the `i`'th child of `n` so that it can be modified
## Returns the `i`'th child of `n` so that it can be modified.
assert n.k == xnElement
result = n.s[i]
iterator items*(n: XmlNode): XmlNode {.inline.} =
## iterates over any child of `n`.
## Iterates over any child of `n`.
##
## **Examples:**
##
## .. code-block::
## var g = newElement("myTag")
## g.add newText("some text")
## g.add newComment("this is comment")
##
## var h = newElement("secondTag")
## h.add newEntity("some entity")
## g.add h
##
## assert $g == "<myTag>some text<!-- this is comment --><secondTag>&some entity;</secondTag></myTag>"
## for x in g: # the same as `for x in items(g):`
## echo x
##
## # some text
## # <!-- this is comment -->
## # <secondTag>&some entity;<![CDATA[some cdata]]></secondTag>
assert n.k == xnElement
for i in 0 .. n.len-1: yield n[i]
iterator mitems*(n: var XmlNode): var XmlNode {.inline.} =
## iterates over any child of `n`.
## Iterates over any child of `n` so that it can be modified.
assert n.k == xnElement
for i in 0 .. n.len-1: yield n[i]
proc toXmlAttributes*(keyValuePairs: varargs[tuple[key, val: string]]): XmlAttributes =
## Converts `{key: value}` pairs into `XmlAttributes`.
runnableExamples:
let att = {"key1": "first value", "key2": "second value"}.toXmlAttributes
var j = newElement("myTag")
j.attrs = att
assert $j == """<myTag key2="second value" key1="first value" />"""
newStringTable(keyValuePairs)
proc attrs*(n: XmlNode): XmlAttributes {.inline.} =
## gets the attributes belonging to `n`.
## Gets the attributes belonging to `n`.
##
## Returns `nil` if attributes have not been initialised for this node.
##
## See also:
## * `attrs= proc <#attrs=,XmlNode,XmlAttributes>`_ for XmlAttributes setter
## * `attrsLen proc <#attrsLen,XmlNode>`_ for numbef of attributes
## * `attr proc <#attr,XmlNode,string>`_ for finding an attribute
runnableExamples:
var j = newElement("myTag")
assert j.attrs == nil
let att = {"key1": "first value", "key2": "second value"}.toXmlAttributes
j.attrs = att
assert j.attrs == att
assert n.k == xnElement
result = n.fAttr
proc `attrs=`*(n: XmlNode, attr: XmlAttributes) {.inline.} =
## sets the attributes belonging to `n`.
## Sets the attributes belonging to `n`.
##
## See also:
## * `attrs proc <#attrs,XmlNode>`_ for XmlAttributes getter
## * `attrsLen proc <#attrsLen,XmlNode>`_ for numbef of attributes
## * `attr proc <#attr,XmlNode,string>`_ for finding an attribute
runnableExamples:
var j = newElement("myTag")
assert j.attrs == nil
let att = {"key1": "first value", "key2": "second value"}.toXmlAttributes
j.attrs = att
assert j.attrs == att
assert n.k == xnElement
n.fAttr = attr
proc attrsLen*(n: XmlNode): int {.inline.} =
## returns the number of `n`'s attributes.
## Returns the number of `n`'s attributes.
##
## See also:
## * `attrs proc <#attrs,XmlNode>`_ for XmlAttributes getter
## * `attrs= proc <#attrs=,XmlNode,XmlAttributes>`_ for XmlAttributes setter
## * `attr proc <#attr,XmlNode,string>`_ for finding an attribute
runnableExamples:
var j = newElement("myTag")
assert j.attrsLen == 0
let att = {"key1": "first value", "key2": "second value"}.toXmlAttributes
j.attrs = att
assert j.attrsLen == 2
assert n.k == xnElement
if not isNil(n.fAttr): result = len(n.fAttr)
proc attr*(n: XmlNode, name: string): string =
## Finds the first attribute of `n` with a name of `name`.
## Returns "" on failure.
##
## See also:
## * `attrs proc <#attrs,XmlNode>`_ for XmlAttributes getter
## * `attrs= proc <#attrs=,XmlNode,XmlAttributes>`_ for XmlAttributes setter
## * `attrsLen proc <#attrsLen,XmlNode>`_ for numbef of attributes
runnableExamples:
var j = newElement("myTag")
let att = {"key1": "first value", "key2": "second value"}.toXmlAttributes
j.attrs = att
assert j.attr("key1") == "first value"
assert j.attr("key2") == "second value"
assert n.kind == xnElement
if n.attrs == nil: return ""
return n.attrs.getOrDefault(name)
proc clientData*(n: XmlNode): int {.inline.} =
## gets the client data of `n`. The client data field is used by the HTML
## parser and generator.
## Gets the client data of `n`.
##
## The client data field is used by the HTML parser and generator.
result = n.fClientData
proc `clientData=`*(n: XmlNode, data: int) {.inline.} =
## sets the client data of `n`. The client data field is used by the HTML
## parser and generator.
## Sets the client data of `n`.
##
## The client data field is used by the HTML parser and generator.
n.fClientData = data
proc addEscaped*(result: var string, s: string) =
## same as ``result.add(escape(s))``, but more efficient.
## The same as `result.add(escape(s)) <#escape,string>`_, but more efficient.
for c in items(s):
case c
of '<': result.add("&lt;")
@@ -201,7 +497,8 @@ proc addEscaped*(result: var string, s: string) =
else: result.add(c)
proc escape*(s: string): string =
## escapes `s` for inclusion into an XML document.
## Escapes `s` for inclusion into an XML document.
##
## Escapes these characters:
##
## ------------ -------------------
@@ -214,6 +511,8 @@ proc escape*(s: string): string =
## ``'`` ``&#x27;``
## ``/`` ``&#x2F;``
## ------------ -------------------
##
## You can also use `addEscaped proc <#addEscaped,string,string>`_.
result = newStringOfCap(s.len)
addEscaped(result, s)
@@ -223,14 +522,22 @@ proc addIndent(result: var string, indent: int, addNewLines: bool) =
for i in 1..indent: result.add(' ')
proc noWhitespace(n: XmlNode): bool =
#for i in 1..n.len-1:
# if n[i].kind != n[0].kind: return true
for i in 0..n.len-1:
if n[i].kind in {xnText, xnEntity}: return true
proc add*(result: var string, n: XmlNode, indent = 0, indWidth = 2,
addNewLines=true) =
## adds the textual representation of `n` to `result`.
## Adds the textual representation of `n` to string `result`.
runnableExamples:
var
a = newElement("firstTag")
b = newText("my text")
c = newComment("my comment")
s = ""
s.add(c)
s.add(a)
s.add(b)
assert s == "<!-- my comment --><firstTag />my text"
proc addEscapedAttr(result: var string, s: string) =
# `addEscaped` alternative with less escaped characters.
@@ -291,24 +598,76 @@ proc add*(result: var string, n: XmlNode, indent = 0, indWidth = 2,
result.add(n.fText)
result.add(';')
const
xmlHeader* = "<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n"
## header to use for complete XML output
proc `$`*(n: XmlNode): string =
## converts `n` into its string representation. No ``<$xml ...$>`` declaration
## is produced, so that the produced XML fragments are composable.
## Converts `n` into its string representation.
##
## No ``<$xml ...$>`` declaration is produced, so that the produced
## XML fragments are composable.
result = ""
result.add(n)
proc newXmlTree*(tag: string, children: openArray[XmlNode],
attributes: XmlAttributes = nil): XmlNode =
## creates a new XML tree with `tag`, `children` and `attributes`
result = newXmlNode(xnElement)
result.fTag = tag
newSeq(result.s, children.len)
for i in 0..children.len-1: result.s[i] = children[i]
result.fAttr = attributes
proc child*(n: XmlNode, name: string): XmlNode =
## Finds the first child element of `n` with a name of `name`.
## Returns `nil` on failure.
runnableExamples:
var f = newElement("myTag")
f.add newElement("firstSon")
f.add newElement("secondSon")
f.add newElement("thirdSon")
assert $(f.child("secondSon")) == "<secondSon />"
assert n.kind == xnElement
for i in items(n):
if i.kind == xnElement:
if i.tag == name:
return i
proc findAll*(n: XmlNode, tag: string, result: var seq[XmlNode]) =
## Iterates over all the children of `n` returning those matching `tag`.
##
## Found nodes satisfying the condition will be appended to the `result`
## sequence.
runnableExamples:
var
b = newElement("good")
c = newElement("bad")
d = newElement("bad")
e = newElement("good")
b.add newText("b text")
c.add newText("c text")
d.add newText("d text")
e.add newText("e text")
let a = newXmlTree("father", [b, c, d, e])
var s = newSeq[XmlNode]()
a.findAll("good", s)
assert $s == "@[<good>b text</good>, <good>e text</good>]"
assert n.k == xnElement
for child in n.items():
if child.k != xnElement:
continue
if child.tag == tag:
result.add(child)
child.findAll(tag, result)
proc findAll*(n: XmlNode, tag: string): seq[XmlNode] =
## A shortcut version to assign in let blocks.
runnableExamples:
var
b = newElement("good")
c = newElement("bad")
d = newElement("bad")
e = newElement("good")
b.add newText("b text")
c.add newText("c text")
d.add newText("d text")
e.add newText("e text")
let a = newXmlTree("father", [b, c, d, e])
assert $(a.findAll("good")) == "@[<good>b text</good>, <good>e text</good>]"
assert $(a.findAll("bad")) == "@[<bad>c text</bad>, <bad>d text</bad>]"
newSeq(result, 0)
findAll(n, tag, result)
proc xmlConstructor(a: NimNode): NimNode {.compileTime.} =
if a.kind == nnkCall:
@@ -346,56 +705,6 @@ macro `<>`*(x: untyped): untyped =
##
result = xmlConstructor(x)
proc child*(n: XmlNode, name: string): XmlNode =
## Finds the first child element of `n` with a name of `name`.
## Returns `nil` on failure.
assert n.kind == xnElement
for i in items(n):
if i.kind == xnElement:
if i.tag == name:
return i
proc attr*(n: XmlNode, name: string): string =
## Finds the first attribute of `n` with a name of `name`.
## Returns "" on failure.
assert n.kind == xnElement
if n.attrs == nil: return ""
return n.attrs.getOrDefault(name)
proc findAll*(n: XmlNode, tag: string, result: var seq[XmlNode]) =
## Iterates over all the children of `n` returning those matching `tag`.
##
## Found nodes satisfying the condition will be appended to the `result`
## sequence, which can't be nil or the proc will crash. Usage example:
##
## .. code-block::
## var
## html: XmlNode
## tags: seq[XmlNode] = @[]
##
## html = buildHtml()
## findAll(html, "img", tags)
## for imgTag in tags:
## process(imgTag)
assert n.k == xnElement
for child in n.items():
if child.k != xnElement:
continue
if child.tag == tag:
result.add(child)
child.findAll(tag, result)
proc findAll*(n: XmlNode, tag: string): seq[XmlNode] =
## Shortcut version to assign in let blocks. Example:
##
## .. code-block::
## var html: XmlNode
##
## html = buildHtml(html)
## for imgTag in html.findAll("img"):
## process(imgTag)
newSeq(result, 0)
findAll(n, tag, result)
when isMainModule:
assert """<a href="http://nim-lang.org">Nim rules.</a>""" ==

View File

@@ -3463,7 +3463,7 @@ when not defined(JS): #and not defined(nimscript):
## allows you to override the behaviour of your application when CTRL+C
## is pressed. Only one such hook is supported.
when not defined(useNimRtl):
when not defined(noSignalHandler) and not defined(useNimRtl):
proc unsetControlCHook*()
## reverts a call to setControlCHook

View File

@@ -197,26 +197,40 @@ when asmVersion and not defined(gcc) and not defined(llvm_gcc):
proc divInt(a, b: int): int {.compilerProc, asmNoStackFrame.} =
asm """
mov eax, ecx
mov ecx, edx
xor edx, edx
idiv ecx
jno theEnd
call `raiseOverflow`
theEnd:
test edx, edx
jne L_NOT_ZERO
call `raiseDivByZero`
L_NOT_ZERO:
cmp ecx, 0x80000000
jne L_DO_DIV
cmp edx, -1
jne L_DO_DIV
call `raiseOverflow`
L_DO_DIV:
mov eax, ecx
mov ecx, edx
cdq
idiv ecx
ret
"""
proc modInt(a, b: int): int {.compilerProc, asmNoStackFrame.} =
asm """
mov eax, ecx
mov ecx, edx
xor edx, edx
idiv ecx
jno theEnd
call `raiseOverflow`
theEnd:
mov eax, edx
test edx, edx
jne L_NOT_ZERO
call `raiseDivByZero`
L_NOT_ZERO:
cmp ecx, 0x80000000
jne L_DO_DIV
cmp edx, -1
jne L_DO_DIV
call `raiseOverflow`
L_DO_DIV:
mov eax, ecx
mov ecx, edx
cdq
idiv ecx
mov eax, edx
ret
"""

View File

@@ -347,24 +347,17 @@ proc raiseExceptionAux(e: ref Exception) =
if globalRaiseHook != nil:
if not globalRaiseHook(e): return
when defined(cpp) and not defined(noCppExceptions):
if e[] of OutOfMemError:
showErrorMessage(e.name)
quitOrDebug()
else:
pushCurrentException(e)
raiseCounter.inc
if raiseCounter == 0:
raiseCounter.inc # skip zero at overflow
e.raiseId = raiseCounter
{.emit: "`e`->raise();".}
pushCurrentException(e)
raiseCounter.inc
if raiseCounter == 0:
raiseCounter.inc # skip zero at overflow
e.raiseId = raiseCounter
{.emit: "`e`->raise();".}
else:
if excHandler != nil:
if not excHandler.hasRaiseAction or excHandler.raiseAction(e):
pushCurrentException(e)
c_longjmp(excHandler.context, 1)
elif e[] of OutOfMemError:
showErrorMessage(e.name)
quitOrDebug()
else:
when hasSomeStackTrace:
var buf = newStringOfCap(2000)
@@ -453,27 +446,21 @@ when not defined(gcDestructors):
## a ``seq``. This is not yet available for the JS backend.
shallowCopy(result, e.trace)
when defined(nimRequiresNimFrame):
const nimCallDepthLimit {.intdefine.} = 2000
const nimCallDepthLimit {.intdefine.} = 2000
proc callDepthLimitReached() {.noinline.} =
writeStackTrace()
showErrorMessage("Error: call depth limit reached in a debug build (" &
$nimCallDepthLimit & " function calls). You can change it with " &
"-d:nimCallDepthLimit=<int> but really try to avoid deep " &
"recursions instead.\n")
quitOrDebug()
proc callDepthLimitReached() {.noinline.} =
writeStackTrace()
showErrorMessage("Error: call depth limit reached in a debug build (" &
$nimCallDepthLimit & " function calls). You can change it with " &
"-d:nimCallDepthLimit=<int> but really try to avoid deep " &
"recursions instead.\n")
quitOrDebug()
proc nimFrame(s: PFrame) {.compilerRtl, inl, exportc: "nimFrame".} =
s.calldepth = if framePtr == nil: 0 else: framePtr.calldepth+1
s.prev = framePtr
framePtr = s
if s.calldepth == nimCallDepthLimit: callDepthLimitReached()
else:
proc pushFrame(s: PFrame) {.compilerRtl, inl, exportc: "nimFrame".} =
# XXX only for backwards compatibility
s.prev = framePtr
framePtr = s
proc nimFrame(s: PFrame) {.compilerRtl, inl.} =
s.calldepth = if framePtr == nil: 0 else: framePtr.calldepth+1
s.prev = framePtr
framePtr = s
if s.calldepth == nimCallDepthLimit: callDepthLimitReached()
when defined(endb):
var
@@ -537,7 +524,7 @@ proc setControlCHook(hook: proc () {.noconv.}) =
type SignalHandler = proc (sign: cint) {.noconv, benign.}
c_signal(SIGINT, cast[SignalHandler](hook))
when not defined(useNimRtl):
when not defined(noSignalHandler) and not defined(useNimRtl):
proc unsetControlCHook() =
# proc to unset a hook set by setControlCHook
c_signal(SIGINT, signalHandler)

View File

@@ -105,6 +105,8 @@ template gcAssert(cond: bool, msg: string) =
when defined(useGcAssert):
if not cond:
echo "[GCASSERT] ", msg
when defined(logGC):
echo "[GCASSERT] statistics:\L", GC_getStatistics()
GC_disable()
writeStackTrace()
#var x: ptr int
@@ -159,6 +161,10 @@ when defined(logGC):
c_fprintf(stdout, "[GC] %s: %p %d %s rc=%ld; thread=%ld\n",
msg, c, kind, typName, c.refcount shr rcShift, gch.gcThreadId)
template logCell(msg: cstring, c: PCell) =
when defined(logGC):
writeCell(msg, c)
template gcTrace(cell, state: untyped) =
when traceGC: traceCell(cell, state)
@@ -174,7 +180,7 @@ proc incRef(c: PCell) {.inline.} =
gcAssert(isAllocatedPtr(gch.region, c), "incRef: interiorPtr")
c.refcount = c.refcount +% rcIncrement
# and not colorMask
#writeCell("incRef", c)
logCell("incRef", c)
proc nimGCref(p: pointer) {.compilerProc.} =
# we keep it from being collected by pretending it's not even allocated:
@@ -192,6 +198,7 @@ proc decRef(c: PCell) {.inline.} =
c.refcount = c.refcount -% rcIncrement
if c.refcount <% rcIncrement:
rtlAddZCT(c)
logCell("decRef", c)
proc nimGCunref(p: pointer) {.compilerProc.} =
let cell = usrToCell(p)
@@ -410,7 +417,7 @@ proc rawNewObj(typ: PNimType, size: int, gch: var GcHeap): pointer =
sysAssert(isAllocatedPtr(gch.region, res), "newObj: 3")
# its refcount is zero, so add it to the ZCT:
addNewObjToZCT(res, gch)
when logGC: writeCell("new cell", res)
logCell("new cell", res)
track("rawNewObj", res, size)
gcTrace(res, csAllocated)
when useCellIds:
@@ -455,7 +462,7 @@ proc newObjRC1(typ: PNimType, size: int): pointer {.compilerRtl.} =
setFrameInfo(res)
res.refcount = rcIncrement # refcount is 1
sysAssert(isAllocatedPtr(gch.region, res), "newObj: 3")
when logGC: writeCell("new cell", res)
logCell("new cell", res)
track("newObjRC1", res, size)
gcTrace(res, csAllocated)
when useCellIds:
@@ -493,9 +500,8 @@ proc growObj(old: pointer, newsize: int, gch: var GcHeap): pointer =
# This can be wrong for intermediate temps that are nevertheless on the
# heap because of lambda lifting:
#gcAssert(res.refcount shr rcShift <=% 1, "growObj: 4")
when logGC:
writeCell("growObj old cell", ol)
writeCell("growObj new cell", res)
logCell("growObj old cell", ol)
logCell("growObj new cell", res)
gcTrace(ol, csZctFreed)
gcTrace(res, csAllocated)
track("growObj old", ol, 0)
@@ -547,7 +553,7 @@ proc freeCyclicCell(gch: var GcHeap, c: PCell) =
prepareDealloc(c)
gcTrace(c, csCycFreed)
track("cycle collector dealloc cell", c, 0)
when logGC: writeCell("cycle collector dealloc cell", c)
logCell("cycle collector dealloc cell", c)
when reallyDealloc:
sysAssert(allocInv(gch.region), "free cyclic cell")
beforeDealloc(gch, c, "freeCyclicCell: stack trash")
@@ -616,7 +622,7 @@ proc doOperation(p: pointer, op: WalkOp) =
# c_fprintf(stdout, "[GC] decref bug: %p", c)
gcAssert(isAllocatedPtr(gch.region, c), "decRef: waZctDecRef")
gcAssert(c.refcount >=% rcIncrement, "doOperation 2")
when logGC: writeCell("decref (from doOperation)", c)
logCell("decref (from doOperation)", c)
track("waZctDecref", p, 0)
decRef(c)
of waPush:
@@ -704,7 +710,7 @@ proc collectZCT(gch: var GcHeap): bool =
# as this might be too slow.
# In any case, it should be removed from the ZCT. But not
# freed. **KEEP THIS IN MIND WHEN MAKING THIS INCREMENTAL!**
when logGC: writeCell("zct dealloc cell", c)
logCell("zct dealloc cell", c)
track("zct dealloc cell", c, 0)
gcTrace(c, csZctFreed)
# We are about to free the object, call the finalizer BEFORE its
@@ -858,6 +864,7 @@ when not defined(useNimRtl):
for stack in items(gch.stack):
result.add "[GC] stack " & stack.bottom.repr & "[GC] max stack size " & cast[pointer](stack.maxStackSize).repr & "\n"
else:
result.add "[GC] stack bottom: " & gch.stack.bottom.repr
result.add "[GC] max stack size: " & $gch.stat.maxStackSize & "\n"
{.pop.} # profiler: off, stackTrace: off

View File

@@ -257,14 +257,14 @@ proc deallocAll*() = tlRegion.deallocAll()
proc deallocOsPages(r: var MemRegion) = r.deallocAll()
template withScratchRegion*(body: untyped) =
when false:
let obs = obstackPtr()
try:
body
finally:
setObstackPtr(obs)
when false:
template withScratchRegion*(body: untyped) =
var scratch: MemRegion
let oldRegion = tlRegion
tlRegion = scratch

View File

@@ -7,6 +7,7 @@
# distribution, for details about the copyright.
#
## To learn about scripting in Nim see `NimScript<nims.html>`_
# Nim's configuration system now uses Nim for scripting. This module provides
# a few things that are required for this to work.

View File

@@ -830,7 +830,7 @@ proc SQLStatistics*(hstmt: SqlHStmt, CatalogName: PSQLCHAR,
proc SQLErr*(henv: SqlHEnv, hdbc: SqlHDBC, hstmt: SqlHStmt,
szSqlState, pfNativeError, szErrorMsg: PSQLCHAR,
cbErrorMsgMax: TSqlSmallInt,
pcbErrorMsg: PSQLINTEGER): TSqlSmallInt {.
pcbErrorMsg: PSQLSMALLINT): TSqlSmallInt {.
dynlib: odbclib, importc: "SQLError".}
{.pop.}

View File

@@ -26,7 +26,8 @@ proc test(dir: string; fixup = false) =
copyFile(produced, expected)
else:
echo "SUCCESS: files identical: ", produced
removeDir(dir / "htmldocs")
if failures == 0:
removeDir(dir / "htmldocs")
test("nimdoc/testproject", defined(fixup))
if failures > 0: quit($failures & " failures occurred.")

View File

@@ -1255,7 +1255,7 @@ function main() {
<li>
<a class="reference reference-toplevel" href="#12" id="62">Procs</a>
<ul class="simple simple-toc-section">
<li><a class="reference" href="#someType%2C"
<li><a class="reference" href="#someType_2"
title="someType(): SomeType"><wbr />some<wbr />Type<span class="attachedType" style="visibility:hidden">SomeType</span></a></li>
</ul>
@@ -1263,9 +1263,9 @@ function main() {
<li>
<a class="reference reference-toplevel" href="#18" id="68">Templates</a>
<ul class="simple simple-toc-section">
<li><a class="reference" href="#aEnum.t%2C"
<li><a class="reference" href="#aEnum.t"
title="aEnum(): untyped"><wbr />a<wbr />Enum<span class="attachedType" style="visibility:hidden"></span></a></li>
<li><a class="reference" href="#bEnum.t%2C"
<li><a class="reference" href="#bEnum.t"
title="bEnum(): untyped"><wbr />b<wbr />Enum<span class="attachedType" style="visibility:hidden"></span></a></li>
</ul>
@@ -1304,8 +1304,8 @@ function main() {
<div class="section" id="12">
<h1><a class="toc-backref" href="#12">Procs</a></h1>
<dl class="item">
<a id="someType,"></a>
<dt><pre><span class="Keyword">proc</span> <span class="Identifier">someType</span><span class="Other">(</span><span class="Other">)</span><span class="Other">:</span> <a href="utils.html#SomeType"><span class="Identifier">SomeType</span></a> <span><span class="Other">{</span><span class="Other pragmadots">...</span><span class="Other">}</span></span><span class="pragmawrap"><span class="Other">{.</span><span class="pragma"><span class="Identifier">raises</span><span class="Other">:</span> <span class="Other">[</span><span class="Other">]</span><span class="Other">,</span> <span class="Identifier">tags</span><span class="Other">:</span> <span class="Other">[</span><span class="Other">]</span></span><span class="Other">.}</span></span></pre></dt>
<a id="someType_2"></a>
<dt><pre><span class="Keyword">proc</span> <a href="#someType_2"><span class="Identifier">someType</span></a><span class="Other">(</span><span class="Other">)</span><span class="Other">:</span> <a href="utils.html#SomeType"><span class="Identifier">SomeType</span></a> <span><span class="Other">{</span><span class="Other pragmadots">...</span><span class="Other">}</span></span><span class="pragmawrap"><span class="Other">{.</span><span class="pragma"><span class="Identifier">raises</span><span class="Other">:</span> <span class="Other">[</span><span class="Other">]</span><span class="Other">,</span> <span class="Identifier">tags</span><span class="Other">:</span> <span class="Other">[</span><span class="Other">]</span></span><span class="Other">.}</span></span></pre></dt>
<dd>
constructor.
@@ -1315,14 +1315,14 @@ constructor.
<div class="section" id="18">
<h1><a class="toc-backref" href="#18">Templates</a></h1>
<dl class="item">
<a id="aEnum.t,"></a>
<dt><pre><span class="Keyword">template</span> <span class="Identifier">aEnum</span><span class="Other">(</span><span class="Other">)</span><span class="Other">:</span> <span class="Identifier">untyped</span></pre></dt>
<a id="aEnum.t"></a>
<dt><pre><span class="Keyword">template</span> <a href="#aEnum.t"><span class="Identifier">aEnum</span></a><span class="Other">(</span><span class="Other">)</span><span class="Other">:</span> <span class="Identifier">untyped</span></pre></dt>
<dd>
</dd>
<a id="bEnum.t,"></a>
<dt><pre><span class="Keyword">template</span> <span class="Identifier">bEnum</span><span class="Other">(</span><span class="Other">)</span><span class="Other">:</span> <span class="Identifier">untyped</span></pre></dt>
<a id="bEnum.t"></a>
<dt><pre><span class="Keyword">template</span> <a href="#bEnum.t"><span class="Identifier">bEnum</span></a><span class="Other">(</span><span class="Other">)</span><span class="Other">:</span> <span class="Identifier">untyped</span></pre></dt>
<dd>

View File

@@ -1277,7 +1277,7 @@ function main() {
<li>
<a class="reference reference-toplevel" href="#13" id="63">Funcs</a>
<ul class="simple simple-toc-section">
<li><a class="reference" href="#someFunc%2C"
<li><a class="reference" href="#someFunc"
title="someFunc()"><wbr />some<wbr />Func<span class="attachedType" style="visibility:hidden"></span></a></li>
</ul>
@@ -1285,7 +1285,7 @@ function main() {
<li>
<a class="reference reference-toplevel" href="#17" id="67">Macros</a>
<ul class="simple simple-toc-section">
<li><a class="reference" href="#bar.m%2C"
<li><a class="reference" href="#bar.m"
title="bar(): untyped"><wbr />bar<span class="attachedType" style="visibility:hidden"></span></a></li>
</ul>
@@ -1350,13 +1350,13 @@ The enum B.
<h1><a class="toc-backref" href="#12">Procs</a></h1>
<dl class="item">
<a id="bar,T,T"></a>
<dt><pre><span class="Keyword">proc</span> <span class="Identifier">bar</span><span class="Other">[</span><span class="Identifier">T</span><span class="Other">]</span><span class="Other">(</span><span class="Identifier">a</span><span class="Other">,</span> <span class="Identifier">b</span><span class="Other">:</span> <span class="Identifier">T</span><span class="Other">)</span><span class="Other">:</span> <span class="Identifier">T</span></pre></dt>
<dt><pre><span class="Keyword">proc</span> <a href="#bar%2CT%2CT"><span class="Identifier">bar</span></a><span class="Other">[</span><span class="Identifier">T</span><span class="Other">]</span><span class="Other">(</span><span class="Identifier">a</span><span class="Other">,</span> <span class="Identifier">b</span><span class="Other">:</span> <span class="Identifier">T</span><span class="Other">)</span><span class="Other">:</span> <span class="Identifier">T</span></pre></dt>
<dd>
</dd>
<a id="isValid,T"></a>
<dt><pre><span class="Keyword">proc</span> <span class="Identifier">isValid</span><span class="Other">[</span><span class="Identifier">T</span><span class="Other">]</span><span class="Other">(</span><span class="Identifier">x</span><span class="Other">:</span> <span class="Identifier">T</span><span class="Other">)</span><span class="Other">:</span> <span class="Identifier">bool</span></pre></dt>
<dt><pre><span class="Keyword">proc</span> <a href="#isValid%2CT"><span class="Identifier">isValid</span></a><span class="Other">[</span><span class="Identifier">T</span><span class="Other">]</span><span class="Other">(</span><span class="Identifier">x</span><span class="Other">:</span> <span class="Identifier">T</span><span class="Other">)</span><span class="Other">:</span> <span class="Identifier">bool</span></pre></dt>
<dd>
@@ -1366,8 +1366,8 @@ The enum B.
<div class="section" id="13">
<h1><a class="toc-backref" href="#13">Funcs</a></h1>
<dl class="item">
<a id="someFunc,"></a>
<dt><pre><span class="Keyword">func</span> <span class="Identifier">someFunc</span><span class="Other">(</span><span class="Other">)</span> <span><span class="Other">{</span><span class="Other pragmadots">...</span><span class="Other">}</span></span><span class="pragmawrap"><span class="Other">{.</span><span class="pragma"><span class="Identifier">raises</span><span class="Other">:</span> <span class="Other">[</span><span class="Other">]</span><span class="Other">,</span> <span class="Identifier">tags</span><span class="Other">:</span> <span class="Other">[</span><span class="Other">]</span></span><span class="Other">.}</span></span></pre></dt>
<a id="someFunc"></a>
<dt><pre><span class="Keyword">func</span> <a href="#someFunc"><span class="Identifier">someFunc</span></a><span class="Other">(</span><span class="Other">)</span> <span><span class="Other">{</span><span class="Other pragmadots">...</span><span class="Other">}</span></span><span class="pragmawrap"><span class="Other">{.</span><span class="pragma"><span class="Identifier">raises</span><span class="Other">:</span> <span class="Other">[</span><span class="Other">]</span><span class="Other">,</span> <span class="Identifier">tags</span><span class="Other">:</span> <span class="Other">[</span><span class="Other">]</span></span><span class="Other">.}</span></span></pre></dt>
<dd>
My someFunc. Stuff in <tt class="docutils literal"><span class="pre">quotes</span></tt> here. <a class="reference external" href="https://nim-lang.org">Some link</a>
@@ -1377,8 +1377,8 @@ My someFunc. Stuff in <tt class="docutils literal"><span class="pre">quotes</spa
<div class="section" id="17">
<h1><a class="toc-backref" href="#17">Macros</a></h1>
<dl class="item">
<a id="bar.m,"></a>
<dt><pre><span class="Keyword">macro</span> <span class="Identifier">bar</span><span class="Other">(</span><span class="Other">)</span><span class="Other">:</span> <span class="Identifier">untyped</span></pre></dt>
<a id="bar.m"></a>
<dt><pre><span class="Keyword">macro</span> <a href="#bar.m"><span class="Identifier">bar</span></a><span class="Other">(</span><span class="Other">)</span><span class="Other">:</span> <span class="Identifier">untyped</span></pre></dt>
<dd>
@@ -1389,7 +1389,7 @@ My someFunc. Stuff in <tt class="docutils literal"><span class="pre">quotes</spa
<h1><a class="toc-backref" href="#18">Templates</a></h1>
<dl class="item">
<a id="foo.t,SomeType,SomeType"></a>
<dt><pre><span class="Keyword">template</span> <span class="Identifier">foo</span><span class="Other">(</span><span class="Identifier">a</span><span class="Other">,</span> <span class="Identifier">b</span><span class="Other">:</span> <a href="subdir/subdir_b/utils.html#SomeType"><span class="Identifier">SomeType</span></a><span class="Other">)</span></pre></dt>
<dt><pre><span class="Keyword">template</span> <a href="#foo.t%2CSomeType%2CSomeType"><span class="Identifier">foo</span></a><span class="Other">(</span><span class="Identifier">a</span><span class="Other">,</span> <span class="Identifier">b</span><span class="Other">:</span> <a href="subdir/subdir_b/utils.html#SomeType"><span class="Identifier">SomeType</span></a><span class="Other">)</span></pre></dt>
<dd>
This does nothing

View File

@@ -1227,7 +1227,7 @@ function main() {
</ul></dd>
<dt><a name="aEnum" href="#aEnum"><span>aEnum:</span></a></dt><dd><ul class="simple">
<li><a class="reference external"
data-doc-search-tag="utils: aEnum(): untyped" href="subdir/subdir_b/utils.html#aEnum.t%2C">utils: aEnum(): untyped</a></li>
data-doc-search-tag="utils: aEnum(): untyped" href="subdir/subdir_b/utils.html#aEnum.t">utils: aEnum(): untyped</a></li>
</ul></dd>
<dt><a name="aVariable" href="#aVariable"><span>aVariable:</span></a></dt><dd><ul class="simple">
<li><a class="reference external"
@@ -1241,11 +1241,11 @@ function main() {
<li><a class="reference external"
data-doc-search-tag="testproject: bar[T](a, b: T): T" href="testproject.html#bar%2CT%2CT">testproject: bar[T](a, b: T): T</a></li>
<li><a class="reference external"
data-doc-search-tag="testproject: bar(): untyped" href="testproject.html#bar.m%2C">testproject: bar(): untyped</a></li>
data-doc-search-tag="testproject: bar(): untyped" href="testproject.html#bar.m">testproject: bar(): untyped</a></li>
</ul></dd>
<dt><a name="bEnum" href="#bEnum"><span>bEnum:</span></a></dt><dd><ul class="simple">
<li><a class="reference external"
data-doc-search-tag="utils: bEnum(): untyped" href="subdir/subdir_b/utils.html#bEnum.t%2C">utils: bEnum(): untyped</a></li>
data-doc-search-tag="utils: bEnum(): untyped" href="subdir/subdir_b/utils.html#bEnum.t">utils: bEnum(): untyped</a></li>
</ul></dd>
<dt><a name="enumValueA" href="#enumValueA"><span>enumValueA:</span></a></dt><dd><ul class="simple">
<li><a class="reference external"
@@ -1269,7 +1269,7 @@ function main() {
</ul></dd>
<dt><a name="someFunc" href="#someFunc"><span>someFunc:</span></a></dt><dd><ul class="simple">
<li><a class="reference external"
data-doc-search-tag="testproject: someFunc()" href="testproject.html#someFunc%2C">testproject: someFunc()</a></li>
data-doc-search-tag="testproject: someFunc()" href="testproject.html#someFunc">testproject: someFunc()</a></li>
</ul></dd>
<dt><a name="SomeType" href="#SomeType"><span>SomeType:</span></a></dt><dd><ul class="simple">
<li><a class="reference external"
@@ -1277,7 +1277,7 @@ function main() {
</ul></dd>
<dt><a name="someType" href="#someType"><span>someType:</span></a></dt><dd><ul class="simple">
<li><a class="reference external"
data-doc-search-tag="utils: someType(): SomeType" href="subdir/subdir_b/utils.html#someType%2C">utils: someType(): SomeType</a></li>
data-doc-search-tag="utils: someType(): SomeType" href="subdir/subdir_b/utils.html#someType_2">utils: someType(): SomeType</a></li>
</ul></dd>
</dl>
<div class="row">

View File

@@ -240,6 +240,10 @@ proc skipDisabledTest(test: Test): bool =
proc runEpcTest(filename: string): int =
let s = parseTest(filename, true)
if s.skipDisabledTest: return 0
for req, _ in items(s.script):
if req.startsWith("highlight"):
echo "disabled epc: " & s.filename
return 0
for cmd in s.startup:
if not runCmd(cmd, s.dest):
quit "invalid command: " & cmd

View File

@@ -1,7 +1,6 @@
proc `$$$`#[!]#
discard """
disabled:true
$nimsuggest --tester $file
>highlight $1
highlight;;skProc;;1;;6;;3

View File

@@ -0,0 +1,20 @@
newSeq[int]()
system.newSeq[int]()#[!]#
offsetOf[int]()
discard """
$nimsuggest --tester $file
>highlight $1
highlight;;skType;;1;;7;;3
highlight;;skProc;;1;;0;;6
highlight;;skProc;;1;;0;;6
highlight;;skType;;1;;7;;3
highlight;;skProc;;1;;0;;6
highlight;;skType;;2;;14;;3
highlight;;skProc;;2;;7;;6
highlight;;skProc;;2;;7;;6
highlight;;skType;;2;;14;;3
highlight;;skProc;;2;;7;;6
highlight;;skTemplate;;3;;0;;8
highlight;;skType;;3;;9;;3
"""

View File

@@ -0,0 +1,13 @@
macro a(b: string): untyped = discard
a "string"#[!]#
discard """
$nimsuggest --tester $file
>highlight $1
highlight;;skMacro;;1;;6;;1
highlight;;skType;;1;;11;;6
highlight;;skType;;1;;20;;7
highlight;;skMacro;;3;;0;;1
highlight;;skMacro;;3;;0;;1
"""

View File

@@ -0,0 +1,14 @@
system.echo#[!]#
system.once
system.`$` 1
discard """
$nimsuggest --tester $file
>highlight $1
highlight;;skProc;;1;;7;;4
highlight;;skProc;;1;;7;;4
highlight;;skTemplate;;2;;7;;4
highlight;;skTemplate;;2;;7;;4
highlight;;skTemplate;;2;;7;;4
highlight;;skProc;;3;;8;;1
"""

View File

@@ -0,0 +1,10 @@
proc `%%%`(a: int) = discard
proc `cast`() = discard
tsug_accquote.#[!]#
discard """
$nimsuggest --tester $file
>sug $1
sug;;skProc;;tsug_accquote.`%%%`;;proc (a: int);;$file;;1;;5;;"";;100;;None
sug;;skProc;;tsug_accquote.`cast`;;proc ();;$file;;2;;5;;"";;100;;None
"""

View File

@@ -0,0 +1,9 @@
doAssert true#[!]#
discard """
$nimsuggest --tester $1
>highlight $1
highlight;;skTemplate;;1;;0;;8
highlight;;skTemplate;;1;;0;;8
highlight;;skEnumField;;1;;9;;4
"""

View File

@@ -6,7 +6,6 @@ type
TypeE* {.unchecked.} = array[0, int]#[!]#
discard """
disabled:true
$nimsuggest --tester $file
>highlight $1
highlight;;skType;;2;;2;;5

View File

@@ -5,6 +5,7 @@ For more information about Nim, including downloads and documentation for
the latest release, check out [Nim's website][nim-site] or [bleeding edge docs](https://nim-lang.github.io/Nim/).
## Community
[![Join the IRC chat][badge-nim-irc]][nim-irc]
[![Join the Gitter chat][badge-nim-gitter]][nim-gitter]
[![Get help][badge-nim-forum-gethelp]][nim-forum]
@@ -23,6 +24,7 @@ the latest release, check out [Nim's website][nim-site] or [bleeding edge docs](
* [Github Wiki][nim-wiki] - Misc user-contributed content.
## Compiling
The compiler currently officially supports the following platform and
architecture combinations:
@@ -56,6 +58,8 @@ Nim from source using ``gcc``, ``git`` and the ``koch`` build tool.
For most users, installing the latest stable version is enough. Check out
the installation instructions on the website to do so: https://nim-lang.org/install.html.
For package mantainers: see [packaging guidelines](https://nim-lang.github.io/Nim/packaging.html).
```
# step 1:
git clone https://github.com/nim-lang/Nim.git
@@ -83,6 +87,7 @@ Finally, once you have finished the build steps (on Windows, Mac or Linux) you
should add the ``bin`` directory to your PATH.
## Koch
``koch`` is the build tool used to build various parts of Nim and to generate
documentation and the website, among other things. The ``koch`` tool can also
be used to run the Nim test suite.
@@ -106,6 +111,7 @@ This project exists thanks to all the people who contribute.
<a href="https://github.com/nim-lang/Nim/graphs/contributors"><img src="https://opencollective.com/Nim/contributors.svg?width=890" /></a>
## Contributing
[![Backers on Open Collective](https://opencollective.com/nim/backers/badge.svg)](#backers) [![Sponsors on Open Collective](https://opencollective.com/nim/sponsors/badge.svg)](#sponsors)
[![Setup a bounty via Bountysource][badge-nim-bountysource]][nim-bountysource]
[![Donate Bitcoins][badge-nim-bitcoin]][nim-bitcoin]

View File

@@ -27,6 +27,7 @@ dflfdjkl__abcdefgasfsgdfgsgdfggsdfasdfsafewfkljdsfajs
dflfdjkl__abcdefgasfsgdfgsgdfggsdfasdfsafewfkljdsfajsdf
kgdchlfniambejop
fjpmholcibdgeakn
2.0
'''
joinable: false
"""
@@ -538,3 +539,12 @@ block trelaxedindextyp:
proc foo(x: seq[int]; idx: uint64) = echo x[idx]
proc foo(x: string|cstring; idx: uint64) = echo x[idx]
proc foo(x: openArray[int]; idx: uint64) = echo x[idx]
block t3899:
# https://github.com/nim-lang/Nim/issues/3899
type O = object
a: array[1..2,float]
template `[]`(x: O, i: int): float =
x.a[i]
const c = O(a: [1.0,2.0])
echo c[2]

View File

@@ -1,7 +1,7 @@
discard """
output: "5000"
"""
import asyncdispatch, nativesockets, net, strutils, os
import asyncdispatch, asyncnet, nativesockets, net, strutils, os
var msgCount = 0
@@ -12,20 +12,22 @@ const
var clientCount = 0
proc sendMessages(client: AsyncFD) {.async.} =
for i in 0 .. <messagesToSend:
for i in 0 ..< messagesToSend:
await send(client, "Message " & $i & "\c\L")
proc launchSwarm(port: Port) {.async.} =
for i in 0 .. <swarmSize:
var sock = newAsyncNativeSocket()
for i in 0 ..< swarmSize:
var sock = createAsyncNativeSocket()
await connect(sock, "localhost", port)
await sendMessages(sock)
closeSocket(sock)
proc readMessages(client: AsyncFD) {.async.} =
# wrapping the AsyncFd into a AsyncSocket object
var sockObj = newAsyncSocket(client)
while true:
var line = await recvLine(client)
var line = await recvLine(sockObj)
if line == "":
closeSocket(client)
clientCount.inc
@@ -37,7 +39,7 @@ proc readMessages(client: AsyncFD) {.async.} =
doAssert false
proc createServer(port: Port) {.async.} =
var server = newAsyncNativeSocket()
var server = createAsyncNativeSocket()
block:
var name: Sockaddr_in
name.sin_family = toInt(AF_INET).uint16

View File

@@ -9,7 +9,7 @@ type
AsyncScgiState* = object of RootObj ## SCGI state object
#bug #442
import sockets, asyncio, strtabs
import asyncnet, strtabs
proc handleSCGIRequest[TScgi: ScgiState | AsyncScgiState](s: TScgi) =
discard
proc handleSCGIRequest(client: AsyncSocket, headers: StringTableRef,

View File

@@ -3,7 +3,7 @@ discard """
# bug #5404
import parseopt2
import parseopt
{.emit: """typedef struct {
int key;
@@ -12,5 +12,5 @@ import parseopt2
type foo* {.importc: "foo", nodecl.} = object
key* {.importc: "key".}: cint
for kind, key, value in parseopt2.getopt():
for kind, key, value in parseopt.getopt():
discard

View File

@@ -1,3 +1,6 @@
discard """
disabled: true
"""
# This is a regression of the new lambda lifting; detected by Aporia
import asyncio, sockets

View File

@@ -166,3 +166,12 @@ seq4 =
var ii = 1
let arr2 = [newMySeq(2, 5.0), if i > 1: newMySeq(3, 1.0) else: newMySeq(0, 0.0)]
var seqOfSeq2 = @[newMySeq(2, 5.0), newMySeq(3, 1.0)]
## issue #10462
proc myfuncLoop(x: int): MySeqNonCopyable =
for i in 0..<x:
var cc = newMySeq(i, 5.0)
result = cc
discard myfuncLoop(3)

View File

@@ -0,0 +1,14 @@
discard """
errormsg: "can raise an unlisted exception: ref FloatingPointError"
line: 10
"""
proc foo() {.raises: [].} =
try:
discard
except KeyError:
raise newException(FloatingPointError, "foo")
except Exception:
discard
foo()

31
tests/errmsgs/t10376.nim Normal file
View File

@@ -0,0 +1,31 @@
discard """
errormsg: "finalizer must be a direct reference to a procedure"
line: 29
"""
type
A = ref object
proc my_callback(a: A) {. nimcall .} =
discard
proc foo(callback: proc(a: A) {. nimcall .}) =
var x1: A
new(x1, proc (x: A) {.nimcall.} = discard)
var x2: A
new(x2, func (x: A) {.nimcall.} = discard)
var x3: A
proc foo1(a: A) {.nimcall.} = discard
new(x3, foo1)
var x4: A
func foo2(a: A) {.nimcall.} = discard
new(x4, foo2)
var x5: A
new(x5, my_callback)
var x6: A
new(x6, callback)
foo(my_callback)

5
tests/errmsgs/t8610.nim Normal file
View File

@@ -0,0 +1,5 @@
discard """
errmsg: "'typedesc' metatype is not valid here; typed '=' instead of ':'?"
"""
## issue #8610
const Foo = int

View File

@@ -64,3 +64,13 @@ proc return_in_except =
try: return_in_except()
except: echo "RECOVER"
block: #10417
proc moo() {.noreturn.} = discard
let bar =
try:
1
except:
moo()
doAssert(bar == 1)

View File

@@ -0,0 +1,11 @@
discard """
errormsg: "cannot instantiate 'GenericParentType[T]' inside of type definition: 'GenericChildType'; Maybe generic arguments are missing?"
line: 8
"""
type
GenericParentType[T] = ref object of RootObj
GenericChildType[T] = ref object of GenericParentType # missing the [T]
val: T
var instance : GenericChildType[int] = nil

View File

@@ -1,5 +1,5 @@
discard """
errormsg: "cannot instantiate: 'GenericNodeObj'"
errormsg: "cannot instantiate: 'GenericNodeObj[T]'; Maybe generic arguments are missing?"
line: 21
"""
# bug #2509

View File

@@ -4,6 +4,7 @@ output: '''
Hallo Welt
Hallo Welt
1
()
'''
"""
@@ -34,3 +35,17 @@ macro t(): untyped =
t()
echo tp()
# https://github.com/nim-lang/Nim/issues/9866
type
# Foo = int # works
Foo = object # fails
macro dispatchGen(): untyped =
var shOpt: Foo
result = quote do:
let baz = `shOpt`
echo `shOpt`
dispatchGen()

View File

@@ -1,4 +1,4 @@
import genpacket_enet, sockets, md5, enet
import genpacket_enet, nativesockets, net, md5, enet
defPacketImports()
type

View File

@@ -21,14 +21,7 @@ kind: cmdShortOption key:val -- r:1
kind: cmdShortOption key:val -- r:0
kind: cmdShortOption key:val -- l:
kind: cmdShortOption key:val -- r:4
parseopt2
first round
kind: cmdLongOption key:val -- left:
second round
kind: cmdLongOption key:val -- left:
kind: cmdLongOption key:val -- debug:3
kind: cmdShortOption key:val -- l:4
kind: cmdShortOption key:val -- r:2'''
'''
joinable: false
"""
@@ -42,7 +35,6 @@ when defined(testament_tparseopt):
main()
else:
from parseopt import nil
from parseopt2 import nil
block:
echo "parseopt"
@@ -76,28 +68,11 @@ else:
for kind, key, val in parseopt.getopt(p):
echo "kind: ", kind, "\tkey:val -- ", key, ":", val
block:
echo "parseopt2"
for kind, key, val in parseopt2.getopt():
echo "kind: ", kind, "\tkey:val -- ", key, ":", val
# pass custom cmdline arguments
echo "first round"
var argv: seq[string] = @["--left", "--debug:3", "-l=4", "-r:2"]
var p = parseopt2.initOptParser(argv)
for kind, key, val in parseopt2.getopt(p):
echo "kind: ", kind, "\tkey:val -- ", key, ":", val
break
# reset getopt iterator and check arguments are returned correctly.
echo "second round"
for kind, key, val in parseopt2.getopt(p):
echo "kind: ", kind, "\tkey:val -- ", key, ":", val
import osproc, os, strutils
from stdtest/specialpaths import buildDir
import "../.." / compiler/unittest_light
block: # fix #9951 (and make it work for parseopt and parseopt2)
block: # fix #9951
template runTest(parseoptCustom) =
var p = parseoptCustom.initOptParser(@["echo \"quoted\""])
let expected = when defined(windows):
@@ -117,7 +92,6 @@ else:
doAssert "a5'b" == "a5\'b"
assertEquals parseoptCustom.cmdLineRest(p2), expected2
runTest(parseopt)
runTest(parseopt2)
block: # fix #9842
let exe = buildDir / "D20190112T145450".addFileExt(ExeExt)

View File

@@ -1,23 +0,0 @@
discard """
output: '''
100
200
300
400
'''
"""
import threadpool, os
proc single(time: int) =
sleep time
echo time
proc sleepsort(nums: openArray[int]) =
parallel:
var i = 0
while i <= len(nums) + -1:
spawn single(nums[i])
i += 1
sleepsort([400,100,300,200])

View File

@@ -137,3 +137,20 @@ block:
type
Coord[N: static[int]] = tuple[col, row: range[0'i8 .. (N.int8-1)]]
Point[N: static[int]] = range[0'i16 .. N.int16 * N.int16 - 1]
# https://github.com/nim-lang/Nim/issues/10339
block:
type
MicroKernel = object
a: float
b: int
macro extractA(ukernel: static MicroKernel): untyped =
result = newLit ukernel.a
proc tFunc[ukernel: static MicroKernel]() =
const x = ukernel.extractA
doAssert x == 5.5
const uk = MicroKernel(a: 5.5, b: 1)
tFunc[uk]()

View File

@@ -162,6 +162,63 @@ proc main() =
doAssert( U64A.rotateLeftBits(64) == U64A)
doAssert( U64A.rotateRightBits(64) == U64A)
block:
# mask operations
var v: uint8
v.setMask(0b1100_0000)
v.setMask(0b0000_1100)
doAssert(v == 0b1100_1100)
v.flipMask(0b0101_0101)
doAssert(v == 0b1001_1001)
v.clearMask(0b1000_1000)
doAssert(v == 0b0001_0001)
v.clearMask(0b0001_0001)
doAssert(v == 0b0000_0000)
block:
# single bit operations
var v: uint8
v.setBit(0)
doAssert v == 0x0000_0001
v.setBit(1)
doAssert v == 0b0000_0011
v.flipBit(7)
doAssert v == 0b1000_0011
v.clearBit(0)
doAssert v == 0b1000_0010
v.flipBit(1)
doAssert v == 0b1000_0000
doAssert v.testbit(7)
doAssert not v.testbit(6)
block:
# multi bit operations
var v: uint8
v.setBits(0, 1, 7)
doAssert v == 0b1000_0011
v.flipBits(2, 3)
doAssert v == 0b1000_1111
v.clearBits(7, 0, 1)
doAssert v == 0b0000_1100
block:
# signed
var v: int8
v.setBit(7)
doAssert v == -128
block:
var v: uint64
v.setBit(63)
doAssert v == 0b1000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000_0000'u64
block:
# Test if RangeError is thrown if indexing out of range
try:
var v: uint32
var i = 32
v.setBit(i)
doAssert false
except RangeError:
discard
except:
doAssert false
echo "OK"
main()

View File

@@ -1,13 +1,5 @@
discard """
output: '''true
true
true
true
true
true
true
true
true
output: '''
All:
__really_obscure_dir_name/are.x
__really_obscure_dir_name/created
@@ -27,31 +19,13 @@ __really_obscure_dir_name/created
__really_obscure_dir_name/dirs
__really_obscure_dir_name/some
__really_obscure_dir_name/test
false
false
false
false
false
false
false
false
false
true
true
Raises
Raises
true
true
true
true
true
true
'''
"""
# test os path creation, iteration, and deletion
import os, strutils
import os, strutils, pathnorm
block fileOperations:
let files = @["these.txt", "are.x", "testing.r", "files.q"]
@@ -60,17 +34,17 @@ block fileOperations:
let dname = "__really_obscure_dir_name"
createDir(dname)
echo dirExists(dname)
doAssert dirExists(dname)
# Test creating files and dirs
for dir in dirs:
createDir(dname/dir)
echo dirExists(dname/dir)
doAssert dirExists(dname/dir)
for file in files:
let fh = open(dname/file, fmReadWrite)
fh.close()
echo fileExists(dname/file)
doAssert fileExists(dname/file)
echo "All:"
@@ -93,23 +67,23 @@ block fileOperations:
# Test removal of files dirs
for dir in dirs:
removeDir(dname/dir)
echo dirExists(dname/dir)
doAssert: not dirExists(dname/dir)
for file in files:
removeFile(dname/file)
echo fileExists(dname/file)
doAssert: not fileExists(dname/file)
removeDir(dname)
echo dirExists(dname)
doAssert: not dirExists(dname)
# createDir should create recursive directories
createDir(dirs[0] / dirs[1])
echo dirExists(dirs[0] / dirs[1]) # true
doAssert dirExists(dirs[0] / dirs[1]) # true
removeDir(dirs[0])
# createDir should properly handle trailing separator
createDir(dname / "")
echo dirExists(dname) # true
doAssert dirExists(dname) # true
removeDir(dname)
# createDir should raise IOError if the path exists
@@ -138,10 +112,10 @@ block fileOperations:
copyDir("a", "../dest/a")
removeDir("a")
echo dirExists("../dest/a/b")
echo fileExists("../dest/a/b/file.txt")
doAssert dirExists("../dest/a/b")
doAssert fileExists("../dest/a/b/file.txt")
echo fileExists("../dest/a/b/c/fileC.txt")
doAssert fileExists("../dest/a/b/c/fileC.txt")
removeDir("../dest")
# test copyDir:
@@ -152,8 +126,8 @@ block fileOperations:
copyDir("a/", "../dest/a/")
removeDir("a")
echo dirExists("../dest/a/b")
echo fileExists("../dest/a/file.txt")
doAssert dirExists("../dest/a/b")
doAssert fileExists("../dest/a/file.txt")
removeDir("../dest")
import times
@@ -165,9 +139,9 @@ block modificationTime:
setLastModificationTime("a", tm)
when defined(macosx):
echo "true"
doAssert true
else:
echo getLastModificationTime("a") == tm
doAssert getLastModificationTime("a") == tm
removeFile("a")
block walkDirRec:
@@ -265,3 +239,97 @@ block splitFile:
doAssert splitFile("a/..") == ("a", "..", "")
# execShellCmd is tested in tosproc
block ospaths:
doAssert unixToNativePath("") == ""
doAssert unixToNativePath(".") == $CurDir
doAssert unixToNativePath("..") == $ParDir
doAssert isAbsolute(unixToNativePath("/"))
doAssert isAbsolute(unixToNativePath("/", "a"))
doAssert isAbsolute(unixToNativePath("/a"))
doAssert isAbsolute(unixToNativePath("/a", "a"))
doAssert isAbsolute(unixToNativePath("/a/b"))
doAssert isAbsolute(unixToNativePath("/a/b", "a"))
doAssert unixToNativePath("a/b") == joinPath("a", "b")
when defined(macos):
doAssert unixToNativePath("./") == ":"
doAssert unixToNativePath("./abc") == ":abc"
doAssert unixToNativePath("../abc") == "::abc"
doAssert unixToNativePath("../../abc") == ":::abc"
doAssert unixToNativePath("/abc", "a") == "abc"
doAssert unixToNativePath("/abc/def", "a") == "abc:def"
elif doslikeFileSystem:
doAssert unixToNativePath("./") == ".\\"
doAssert unixToNativePath("./abc") == ".\\abc"
doAssert unixToNativePath("../abc") == "..\\abc"
doAssert unixToNativePath("../../abc") == "..\\..\\abc"
doAssert unixToNativePath("/abc", "a") == "a:\\abc"
doAssert unixToNativePath("/abc/def", "a") == "a:\\abc\\def"
else:
#Tests for unix
doAssert unixToNativePath("./") == "./"
doAssert unixToNativePath("./abc") == "./abc"
doAssert unixToNativePath("../abc") == "../abc"
doAssert unixToNativePath("../../abc") == "../../abc"
doAssert unixToNativePath("/abc", "a") == "/abc"
doAssert unixToNativePath("/abc/def", "a") == "/abc/def"
block extractFilenameTest:
doAssert extractFilename("") == ""
when defined(posix):
doAssert extractFilename("foo/bar") == "bar"
doAssert extractFilename("foo/bar.txt") == "bar.txt"
doAssert extractFilename("foo/") == ""
doAssert extractFilename("/") == ""
when doslikeFileSystem:
doAssert extractFilename(r"foo\bar") == "bar"
doAssert extractFilename(r"foo\bar.txt") == "bar.txt"
doAssert extractFilename(r"foo\") == ""
doAssert extractFilename(r"C:\") == ""
block lastPathPartTest:
doAssert lastPathPart("") == ""
when defined(posix):
doAssert lastPathPart("foo/bar.txt") == "bar.txt"
doAssert lastPathPart("foo/") == "foo"
doAssert lastPathPart("/") == ""
when doslikeFileSystem:
doAssert lastPathPart(r"foo\bar.txt") == "bar.txt"
doAssert lastPathPart(r"foo\") == "foo"
template canon(x): untyped = normalizePath(x, '/')
doAssert canon"/foo/../bar" == "/bar"
doAssert canon"foo/../bar" == "bar"
doAssert canon"/f/../bar///" == "/bar"
doAssert canon"f/..////bar" == "bar"
doAssert canon"../bar" == "../bar"
doAssert canon"/../bar" == "/../bar"
doAssert canon("foo/../../bar/") == "../bar"
doAssert canon("./bla/blob/") == "bla/blob"
doAssert canon(".hiddenFile") == ".hiddenFile"
doAssert canon("./bla/../../blob/./zoo.nim") == "../blob/zoo.nim"
doAssert canon("C:/file/to/this/long") == "C:/file/to/this/long"
doAssert canon("") == ""
doAssert canon("foobar") == "foobar"
doAssert canon("f/////////") == "f"
doAssert relativePath("/foo/bar//baz.nim", "/foo", '/') == "bar/baz.nim"
doAssert normalizePath("./foo//bar/../baz", '/') == "foo/baz"
doAssert relativePath("/Users/me/bar/z.nim", "/Users/other/bad", '/') == "../../me/bar/z.nim"
doAssert relativePath("/Users/me/bar/z.nim", "/Users/other", '/') == "../me/bar/z.nim"
doAssert relativePath("/Users///me/bar//z.nim", "//Users/", '/') == "me/bar/z.nim"
doAssert relativePath("/Users/me/bar/z.nim", "/Users/me", '/') == "bar/z.nim"
doAssert relativePath("", "/users/moo", '/') == ""
doAssert relativePath("foo", "", '/') == "foo"
doAssert joinPath("usr", "") == unixToNativePath"usr/"
doAssert joinPath("", "lib") == "lib"
doAssert joinPath("", "/lib") == unixToNativePath"/lib"
doAssert joinPath("usr/", "/lib") == unixToNativePath"usr/lib"

View File

@@ -1,99 +0,0 @@
discard """
output: ""
"""
# test the ospaths module
import os, pathnorm
doAssert unixToNativePath("") == ""
doAssert unixToNativePath(".") == $CurDir
doAssert unixToNativePath("..") == $ParDir
doAssert isAbsolute(unixToNativePath("/"))
doAssert isAbsolute(unixToNativePath("/", "a"))
doAssert isAbsolute(unixToNativePath("/a"))
doAssert isAbsolute(unixToNativePath("/a", "a"))
doAssert isAbsolute(unixToNativePath("/a/b"))
doAssert isAbsolute(unixToNativePath("/a/b", "a"))
doAssert unixToNativePath("a/b") == joinPath("a", "b")
when defined(macos):
doAssert unixToNativePath("./") == ":"
doAssert unixToNativePath("./abc") == ":abc"
doAssert unixToNativePath("../abc") == "::abc"
doAssert unixToNativePath("../../abc") == ":::abc"
doAssert unixToNativePath("/abc", "a") == "abc"
doAssert unixToNativePath("/abc/def", "a") == "abc:def"
elif doslikeFileSystem:
doAssert unixToNativePath("./") == ".\\"
doAssert unixToNativePath("./abc") == ".\\abc"
doAssert unixToNativePath("../abc") == "..\\abc"
doAssert unixToNativePath("../../abc") == "..\\..\\abc"
doAssert unixToNativePath("/abc", "a") == "a:\\abc"
doAssert unixToNativePath("/abc/def", "a") == "a:\\abc\\def"
else:
#Tests for unix
doAssert unixToNativePath("./") == "./"
doAssert unixToNativePath("./abc") == "./abc"
doAssert unixToNativePath("../abc") == "../abc"
doAssert unixToNativePath("../../abc") == "../../abc"
doAssert unixToNativePath("/abc", "a") == "/abc"
doAssert unixToNativePath("/abc/def", "a") == "/abc/def"
block extractFilenameTest:
doAssert extractFilename("") == ""
when defined(posix):
doAssert extractFilename("foo/bar") == "bar"
doAssert extractFilename("foo/bar.txt") == "bar.txt"
doAssert extractFilename("foo/") == ""
doAssert extractFilename("/") == ""
when doslikeFileSystem:
doAssert extractFilename(r"foo\bar") == "bar"
doAssert extractFilename(r"foo\bar.txt") == "bar.txt"
doAssert extractFilename(r"foo\") == ""
doAssert extractFilename(r"C:\") == ""
block lastPathPartTest:
doAssert lastPathPart("") == ""
when defined(posix):
doAssert lastPathPart("foo/bar.txt") == "bar.txt"
doAssert lastPathPart("foo/") == "foo"
doAssert lastPathPart("/") == ""
when doslikeFileSystem:
doAssert lastPathPart(r"foo\bar.txt") == "bar.txt"
doAssert lastPathPart(r"foo\") == "foo"
template canon(x): untyped = normalizePath(x, '/')
doAssert canon"/foo/../bar" == "/bar"
doAssert canon"foo/../bar" == "bar"
doAssert canon"/f/../bar///" == "/bar"
doAssert canon"f/..////bar" == "bar"
doAssert canon"../bar" == "../bar"
doAssert canon"/../bar" == "/../bar"
doAssert canon("foo/../../bar/") == "../bar"
doAssert canon("./bla/blob/") == "bla/blob"
doAssert canon(".hiddenFile") == ".hiddenFile"
doAssert canon("./bla/../../blob/./zoo.nim") == "../blob/zoo.nim"
doAssert canon("C:/file/to/this/long") == "C:/file/to/this/long"
doAssert canon("") == ""
doAssert canon("foobar") == "foobar"
doAssert canon("f/////////") == "f"
doAssert relativePath("/foo/bar//baz.nim", "/foo", '/') == "bar/baz.nim"
doAssert normalizePath("./foo//bar/../baz", '/') == "foo/baz"
doAssert relativePath("/Users/me/bar/z.nim", "/Users/other/bad", '/') == "../../me/bar/z.nim"
doAssert relativePath("/Users/me/bar/z.nim", "/Users/other", '/') == "../me/bar/z.nim"
doAssert relativePath("/Users///me/bar//z.nim", "//Users/", '/') == "me/bar/z.nim"
doAssert relativePath("/Users/me/bar/z.nim", "/Users/me", '/') == "bar/z.nim"
doAssert relativePath("", "/users/moo", '/') == ""
doAssert relativePath("foo", "", '/') == "foo"
doAssert joinPath("usr", "") == unixToNativePath"usr/"
doAssert joinPath("", "lib") == "lib"
doAssert joinPath("", "/lib") == unixToNativePath"/lib"
doAssert joinPath("usr/", "/lib") == unixToNativePath"usr/lib"

View File

@@ -5,7 +5,7 @@ joinable: false
# not joinable because it executes itself with parameters
import os
import osproc
import parseopt2
import parseopt
import sequtils
let argv = commandLineParams()
@@ -17,6 +17,6 @@ else:
let f = toSeq(getopt())
doAssert f[0].kind == cmdArgument and f[0].key == "foo bar" and f[0].val == ""
doAssert f[1].kind == cmdLongOption and f[1].key == "aa" and f[1].val == "bar=a"
doAssert f[2].kind == cmdLongOption and f[2].key == "a=c" and f[2].val == "d"
doAssert f[2].kind == cmdLongOption and f[2].key == "a" and f[2].val == "c:d"
doAssert f[3].kind == cmdLongOption and f[3].key == "ab" and f[3].val == ""
doAssert f[4].kind == cmdShortOption and f[4].key == "c" and f[4].val == ""

8
tests/typerel/tptrs.nim Normal file
View File

@@ -0,0 +1,8 @@
discard """
errormsg: "type mismatch: got <ptr int16> but expected 'ptr int'"
line: 8
"""
var
n: int16
p: ptr int = addr n

View File

@@ -5,7 +5,7 @@ discard """
"""
import events
import sockets
import net
import strutils
import os

View File

@@ -48,3 +48,20 @@ let people = {
}.toTable()
echo people["001"]
# Object downconversion should not copy
type
SomeBaseObj {.inheritable.} = object of RootObj
txt : string
InheritedFromBase = object of SomeBaseObj
other : string
proc initBase(sbo: var SomeBaseObj) =
sbo.txt = "Initialized string from base"
static:
var ifb2: InheritedFromBase
initBase(SomeBaseObj(ifb2))
echo repr(ifb2)
doAssert(ifb2.txt == "Initialized string from base")

15
tests/vm/tvarsection.nim Normal file
View File

@@ -0,0 +1,15 @@
discard """
output: '''-1abc'''
"""
var
a {.compileTime.} = 2
b = -1
c {.compileTime.} = 3
d = "abc"
static:
assert a == 2
assert c == 3
echo b, d

Some files were not shown because too many files have changed in this diff Show More