Merge branch 'devel' of github.com:Araq/Nimrod into devel

Merging mainline devel.
This commit is contained in:
Milos Negovanovic
2014-09-19 13:03:07 +01:00
37 changed files with 947 additions and 273 deletions

View File

@@ -37,6 +37,16 @@ proc genVarTuple(p: BProc, n: PNode) =
var tup, field: TLoc
if n.kind != nkVarTuple: internalError(n.info, "genVarTuple")
var L = sonsLen(n)
# if we have a something that's been captured, use the lowering instead:
var useLowering = false
for i in countup(0, L-3):
if n[i].kind != nkSym:
useLowering = true; break
if useLowering:
genStmts(p, lowerTupleUnpacking(n, p.prc))
return
genLineDir(p, n)
initLocExpr(p, n.sons[L-1], tup)
var t = tup.t

View File

@@ -949,7 +949,7 @@ proc liftLambdas*(fn: PSym, body: PNode): PNode =
discard transformOuterProcBody(o, body, initIter(fn))
result = ex
finishEnvironments(o)
#if fn.name.s == "cbOuter":
#if fn.name.s == "parseLong":
# echo rendertree(result, {renderIds})
proc liftLambdasForTopLevel*(module: PSym, body: PNode): PNode =

View File

@@ -56,6 +56,7 @@ proc lowerTupleUnpacking*(n: PNode; owner: PSym): PNode =
result.add newAsgnStmt(newSymNode(temp), value)
for i in 0 .. n.len-3:
if n.sons[i].kind == nkSym: v.addVar(n.sons[i])
result.add newAsgnStmt(n.sons[i], newTupleAccess(value, i))
proc createObj*(owner: PSym, info: TLineInfo): PType =

View File

@@ -662,7 +662,7 @@ proc evalAtCompileTime(c: PContext, n: PNode): PNode =
# optimization pass: not necessary for correctness of the semantic pass
if {sfNoSideEffect, sfCompileTime} * callee.flags != {} and
{sfForward, sfImportc} * callee.flags == {}:
{sfForward, sfImportc} * callee.flags == {} and n.typ != nil:
if sfCompileTime notin callee.flags and
optImplicitStatic notin gOptions: return

View File

@@ -36,10 +36,11 @@ proc semGenericStmtScope(c: PContext, n: PNode,
template macroToExpand(s: expr): expr =
s.kind in {skMacro, skTemplate} and (s.typ.len == 1 or sfImmediate in s.flags)
proc semGenericStmtSymbol(c: PContext, n: PNode, s: PSym): PNode =
proc semGenericStmtSymbol(c: PContext, n: PNode, s: PSym,
ctx: var TIntSet): PNode =
incl(s.flags, sfUsed)
case s.kind
of skUnknown:
of skUnknown:
# Introduced in this pass! Leave it as an identifier.
result = n
of skProc, skMethod, skIterators, skConverter:
@@ -48,11 +49,13 @@ proc semGenericStmtSymbol(c: PContext, n: PNode, s: PSym): PNode =
if macroToExpand(s):
let n = fixImmediateParams(n)
result = semTemplateExpr(c, n, s, {efNoSemCheck})
result = semGenericStmt(c, result, {}, ctx)
else:
result = symChoice(c, n, s, scOpen)
of skMacro:
if macroToExpand(s):
result = semMacroExpr(c, n, n, s, {efNoSemCheck})
result = semGenericStmt(c, result, {}, ctx)
else:
result = symChoice(c, n, s, scOpen)
of skGenericParam:
@@ -80,7 +83,7 @@ proc lookup(c: PContext, n: PNode, flags: TSemGenericFlags,
elif s.name.id in ctx:
result = symChoice(c, n, s, scForceOpen)
else:
result = semGenericStmtSymbol(c, n, s)
result = semGenericStmtSymbol(c, n, s, ctx)
# else: leave as nkIdent
proc newDot(n, b: PNode): PNode =
@@ -95,8 +98,9 @@ proc fuzzyLookup(c: PContext, n: PNode, flags: TSemGenericFlags,
var s = qualifiedLookUp(c, n, luf)
if s != nil:
result = semGenericStmtSymbol(c, n, s)
result = semGenericStmtSymbol(c, n, s, ctx)
else:
n.sons[0] = semGenericStmt(c, n.sons[0], flags, ctx)
result = n
let n = n[1]
let ident = considerQuotedIdent(n)
@@ -107,7 +111,7 @@ proc fuzzyLookup(c: PContext, n: PNode, flags: TSemGenericFlags,
elif s.name.id in ctx:
result = newDot(result, symChoice(c, n, s, scForceOpen))
else:
let sym = semGenericStmtSymbol(c, n, s)
let sym = semGenericStmtSymbol(c, n, s, ctx)
if sym.kind == nkSym:
result = newDot(result, symChoice(c, n, s, scForceOpen))
else:
@@ -158,6 +162,7 @@ proc semGenericStmt(c: PContext, n: PNode,
of skMacro:
if macroToExpand(s):
result = semMacroExpr(c, n, n, s, {efNoSemCheck})
result = semGenericStmt(c, result, {}, ctx)
else:
n.sons[0] = symChoice(c, n.sons[0], s, scOption)
result = n
@@ -165,6 +170,7 @@ proc semGenericStmt(c: PContext, n: PNode,
if macroToExpand(s):
let n = fixImmediateParams(n)
result = semTemplateExpr(c, n, s, {efNoSemCheck})
result = semGenericStmt(c, result, {}, ctx)
else:
n.sons[0] = symChoice(c, n.sons[0], s, scOption)
result = n

View File

@@ -126,7 +126,7 @@ proc magicsAfterOverloadResolution(c: PContext, n: PNode,
result.typ = getSysType(tyString)
of mInstantiationInfo: result = semInstantiationInfo(c, n)
of mOrd: result = semOrd(c, n)
of mHigh: result = semLowHigh(c, n, mHigh)
of mHigh, mLow: result = semLowHigh(c, n, n[0].sym.magic)
of mShallowCopy: result = semShallowCopy(c, n, flags)
of mNBindSym: result = semBindSym(c, n)
of mLocals: result = semLocals(c, n)

View File

@@ -22,20 +22,23 @@ proc semDiscard(c: PContext, n: PNode): PNode =
proc semBreakOrContinue(c: PContext, n: PNode): PNode =
result = n
checkSonsLen(n, 1)
if n.sons[0].kind != nkEmpty:
var s: PSym
case n.sons[0].kind
of nkIdent: s = lookUp(c, n.sons[0])
of nkSym: s = n.sons[0].sym
else: illFormedAst(n)
if s.kind == skLabel and s.owner.id == c.p.owner.id:
var x = newSymNode(s)
x.info = n.info
incl(s.flags, sfUsed)
n.sons[0] = x
suggestSym(x.info, s)
if n.sons[0].kind != nkEmpty:
if n.kind != nkContinueStmt:
var s: PSym
case n.sons[0].kind
of nkIdent: s = lookUp(c, n.sons[0])
of nkSym: s = n.sons[0].sym
else: illFormedAst(n)
if s.kind == skLabel and s.owner.id == c.p.owner.id:
var x = newSymNode(s)
x.info = n.info
incl(s.flags, sfUsed)
n.sons[0] = x
suggestSym(x.info, s)
else:
localError(n.info, errInvalidControlFlowX, s.name.s)
else:
localError(n.info, errInvalidControlFlowX, s.name.s)
localError(n.info, errGenerated, "'continue' cannot have a label")
elif (c.p.nestedLoopCounter <= 0) and (c.p.nestedBlockCounter <= 0):
localError(n.info, errInvalidControlFlowX,
renderTree(n, {renderNoComments}))

View File

@@ -216,12 +216,16 @@ proc replaceTypeVarsS(cl: var TReplTypeVars, s: PSym): PSym =
result.typ = replaceTypeVarsT(cl, s.typ)
result.ast = replaceTypeVarsN(cl, s.ast)
proc lookupTypeVar(cl: TReplTypeVars, t: PType): PType =
proc lookupTypeVar(cl: var TReplTypeVars, t: PType): PType =
result = PType(idTableGet(cl.typeMap, t))
if result == nil:
if cl.allowMetaTypes or tfRetType in t.flags: return
localError(t.sym.info, errCannotInstantiateX, typeToString(t))
result = errorType(cl.c)
# In order to prevent endless recursions, we must remember
# this bad lookup and replace it with errorType everywhere.
# These code paths are only active in nimrod check
idTablePut(cl.typeMap, t, result)
elif result.kind == tyGenericParam and not cl.allowMetaTypes:
internalError(cl.info, "substitution with generic parameter")
@@ -353,7 +357,7 @@ proc replaceTypeVarsTAux(cl: var TReplTypeVars, t: PType): PType =
of tyGenericBody:
localError(cl.info, errCannotInstantiateX, typeToString(t))
result = t
result = errorType(cl.c)
#result = replaceTypeVarsT(cl, lastSon(t))
of tyFromExpr:

View File

@@ -200,6 +200,18 @@ proc newLabel(c: PTransf, n: PNode): PSym =
result = newSym(skLabel, nil, getCurrOwner(c), n.info)
result.name = getIdent(genPrefix & $result.id)
proc freshLabels(c: PTransf, n: PNode; symMap: var TIdTable) =
if n.kind in {nkBlockStmt, nkBlockExpr}:
if n.sons[0].kind == nkSym:
let x = newLabel(c, n[0])
idTablePut(symMap, n[0].sym, x)
n.sons[0].sym = x
if n.kind == nkSym and n.sym.kind == skLabel:
let x = PSym(idTableGet(symMap, n.sym))
if x != nil: n.sym = x
else:
for i in 0 .. <safeLen(n): freshLabels(c, n.sons[i], symMap)
proc transformBlock(c: PTransf, n: PNode): PTransNode =
var labl: PSym
if n.sons[0].kind != nkEmpty:
@@ -231,21 +243,31 @@ proc transformLoopBody(c: PTransf, n: PNode): PTransNode =
result = transform(c, n)
proc transformWhile(c: PTransf; n: PNode): PTransNode =
let labl = newLabel(c, n)
c.breakSyms.add(labl)
result = newTransNode(nkBlockStmt, n.info, 2)
result[0] = newSymNode(labl).PTransNode
if c.inlining > 0:
result = transformSons(c, n)
else:
let labl = newLabel(c, n)
c.breakSyms.add(labl)
result = newTransNode(nkBlockStmt, n.info, 2)
result[0] = newSymNode(labl).PTransNode
var body = newTransNode(n)
for i in 0..n.len-2:
body[i] = transform(c, n.sons[i])
body[<n.len] = transformLoopBody(c, n.sons[<n.len])
result[1] = body
discard c.breakSyms.pop
var body = newTransNode(n)
for i in 0..n.len-2:
body[i] = transform(c, n.sons[i])
body[<n.len] = transformLoopBody(c, n.sons[<n.len])
result[1] = body
discard c.breakSyms.pop
proc transformBreak(c: PTransf, n: PNode): PTransNode =
if n.sons[0].kind != nkEmpty:
if n.sons[0].kind != nkEmpty or c.inlining > 0:
result = n.PTransNode
when false:
let lablCopy = idNodeTableGet(c.transCon.mapping, n.sons[0].sym)
if lablCopy.isNil:
result = n.PTransNode
else:
result = newTransNode(n.kind, n.info, 1)
result[0] = lablCopy.PTransNode
else:
let labl = c.breakSyms[c.breakSyms.high]
result = transformSons(c, n)
@@ -477,9 +499,9 @@ proc transformFor(c: PTransf, n: PNode): PTransNode =
var formal = skipTypes(iter.typ, abstractInst).n.sons[i].sym
if arg.typ.kind == tyIter: continue
case putArgInto(arg, formal.typ)
of paDirectMapping:
of paDirectMapping:
idNodeTablePut(newC.mapping, formal, arg)
of paFastAsgn:
of paFastAsgn:
# generate a temporary and produce an assignment statement:
var temp = newTemp(c, formal.typ, formal.info)
addVar(v, newSymNode(temp))
@@ -489,8 +511,13 @@ proc transformFor(c: PTransf, n: PNode): PTransNode =
assert(skipTypes(formal.typ, abstractInst).kind == tyVar)
idNodeTablePut(newC.mapping, formal, arg)
# XXX BUG still not correct if the arg has a side effect!
var body = iter.getBody
var body = iter.getBody.copyTree
pushInfoContext(n.info)
# XXX optimize this somehow. But the check "c.inlining" is not correct:
var symMap: TIdTable
initIdTable symMap
freshLabels(c, body, symMap)
inc(c.inlining)
add(stmtList, transform(c, body))
#findWrongOwners(c, stmtList.pnode)
@@ -755,6 +782,8 @@ proc transformBody*(module: PSym, n: PNode, prc: PSym): PNode =
# result = lambdalifting.liftIterator(prc, result)
incl(result.flags, nfTransf)
when useEffectSystem: trackProc(prc, result)
if prc.name.s == "testbody":
echo renderTree(result)
proc transformStmt*(module: PSym, n: PNode): PNode =
if nfTransf in n.flags:

View File

@@ -913,9 +913,11 @@ proc sameTypeAux(x, y: PType, c: var TSameTypeClosure): bool =
result = sameTypeAux(a.sons[0], b.sons[0], c)
else:
result = sameTypeAux(a.sons[0], b.sons[0], c) and sameFlags(a, b)
of tyEnum, tyForward, tyProxy:
of tyEnum, tyForward:
# XXX generic enums do not make much sense, but require structural checking
result = a.id == b.id and sameFlags(a, b)
of tyError:
result = b.kind == tyError
of tyTuple:
cycleCheck()
result = sameTuple(a, b, c) and sameFlags(a, b)
@@ -1390,7 +1392,7 @@ proc skipConv*(n: PNode): PNode =
case n.kind
of nkObjUpConv, nkObjDownConv, nkChckRange, nkChckRangeF, nkChckRange64:
# only skip the conversion if it doesn't lose too important information
# (see bug #
# (see bug #1334)
if n.sons[0].typ.classify == n.typ.classify:
result = n.sons[0]
of nkHiddenStdConv, nkHiddenSubConv, nkConv:

View File

@@ -12,11 +12,11 @@ import ast, types, msgs, osproc, streams, options
proc readOutput(p: PProcess): string =
result = ""
var output = p.outputStream
discard p.waitForExit
while not output.atEnd:
result.add(output.readLine)
result.add("\n")
result.setLen(result.len - "\n".len)
discard p.waitForExit
proc opGorge*(cmd, input: string): string =
var p = startCmd(cmd)

View File

@@ -43,18 +43,19 @@ type
proc debugInfo(info: TLineInfo): string =
result = info.toFilename.splitFile.name & ":" & $info.line
proc codeListing(c: PCtx, result: var string, start=0) =
proc codeListing(c: PCtx, result: var string, start=0; last = -1) =
# first iteration: compute all necessary labels:
var jumpTargets = initIntSet()
for i in start.. < c.code.len:
let last = if last < 0: c.code.len-1 else: min(last, c.code.len-1)
for i in start..last:
let x = c.code[i]
if x.opcode in relativeJumps:
jumpTargets.incl(i+x.regBx-wordExcess)
# for debugging purposes
var i = start
while i < c.code.len:
while i <= last:
if i in jumpTargets: result.addf("L$1:\n", i)
let x = c.code[i]
@@ -82,9 +83,9 @@ proc codeListing(c: PCtx, result: var string, start=0) =
result.add("\n")
inc i
proc echoCode*(c: PCtx, start=0) {.deprecated.} =
proc echoCode*(c: PCtx, start=0; last = -1) {.deprecated.} =
var buf = ""
codeListing(c, buf, start)
codeListing(c, buf, start, last)
echo buf
proc gABC(ctx: PCtx; n: PNode; opc: TOpcode; a, b, c: TRegister = 0) =
@@ -495,6 +496,7 @@ proc genCall(c: PCtx; n: PNode; dest: var TDest) =
c.freeTempRange(x, n.len)
template isGlobal(s: PSym): bool = sfGlobal in s.flags and s.kind != skForVar
proc isGlobal(n: PNode): bool = n.kind == nkSym and isGlobal(n.sym)
proc needsAsgnPatch(n: PNode): bool =
n.kind in {nkBracketExpr, nkDotExpr, nkCheckedFieldExpr,
@@ -637,8 +639,10 @@ proc genBinaryStmt(c: PCtx; n: PNode; opc: TOpcode) =
c.freeTemp(tmp)
proc genBinaryStmtVar(c: PCtx; n: PNode; opc: TOpcode) =
var x = n.sons[1]
if x.kind in {nkAddr, nkHiddenAddr}: x = x.sons[0]
let
dest = c.genx(n.sons[1], {gfAddrOf})
dest = c.genx(x)
tmp = c.genx(n.sons[2])
c.gABC(n, opc, dest, tmp, 0)
#c.genAsgnPatch(n.sons[1], dest)
@@ -1053,6 +1057,8 @@ proc genAddrDeref(c: PCtx; n: PNode; dest: var TDest; opc: TOpcode;
# nkAddr we must not use 'unneededIndirection', but for deref we use it.
if not isAddr and unneededIndirection(n.sons[0]):
gen(c, n.sons[0], dest, newflags)
elif isAddr and isGlobal(n.sons[0]):
gen(c, n.sons[0], dest, flags+{gfAddrOf})
else:
let tmp = c.genx(n.sons[0], newflags)
if dest < 0: dest = c.getTemp(n.typ)
@@ -1247,6 +1253,8 @@ proc genRdVar(c: PCtx; n: PNode; dest: var TDest; flags: TGenFlags) =
c.gABx(n, opcLdGlobal, cc, s.position)
c.gABC(n, opcNodeToReg, dest, cc)
c.freeTemp(cc)
elif gfAddrOf in flags:
c.gABx(n, opcLdGlobalAddr, dest, s.position)
else:
c.gABx(n, opcLdGlobal, dest, s.position)
else:

139
config/nim.cfg Normal file
View File

@@ -0,0 +1,139 @@
# Configuration file for the Nimrod Compiler.
# (c) 2013 Andreas Rumpf
# Feel free to edit the default values as you need.
# You may set environment variables with
# @putenv "key" "val"
# Environment variables cannot be used in the options, however!
cc = gcc
# example of how to setup a cross-compiler:
arm.linux.gcc.exe = "arm-linux-gcc"
arm.linux.gcc.linkerexe = "arm-linux-gcc"
path="$lib/core"
path="$lib/pure"
path="$lib/pure/collections"
path="$lib/pure/concurrency"
path="$lib/impure"
path="$lib/wrappers"
# path="$lib/wrappers/cairo"
# path="$lib/wrappers/gtk"
# path="$lib/wrappers/lua"
# path="$lib/wrappers/opengl"
path="$lib/wrappers/pcre"
path="$lib/wrappers/readline"
path="$lib/wrappers/sdl"
# path="$lib/wrappers/x11"
path="$lib/wrappers/zip"
path="$lib/wrappers/libffi"
path="$lib/windows"
path="$lib/posix"
path="$lib/js"
path="$lib/pure/unidecode"
@if nimbabel:
babelpath="$home/.babel/pkgs/"
@end
@if release or quick:
obj_checks:off
field_checks:off
range_checks:off
bound_checks:off
overflow_checks:off
assertions:off
stacktrace:off
linetrace:off
debugger:off
line_dir:off
dead_code_elim:on
@end
@if release:
opt:speed
@end
# additional options always passed to the compiler:
--parallel_build: "0" # 0 to auto-detect number of processors
hint[LineTooLong]=off
#hint[XDeclaredButNotUsed]=off
@if unix:
@if not bsd:
# -fopenmp
gcc.options.linker = "-ldl"
gpp.options.linker = "-ldl"
clang.options.linker = "-ldl"
tcc.options.linker = "-ldl"
@end
@if bsd or haiku:
# BSD got posix_spawn only recently, so we deactivate it for osproc:
define:useFork
# at least NetBSD has problems with thread local storage:
tlsEmulation:on
@end
@end
# Configuration for the Intel C/C++ compiler:
@if windows:
icl.options.speed = "/Ox /arch:SSE2"
icl.options.always = "/nologo"
@end
# Configuration for the GNU C/C++ compiler:
@if windows:
#gcc.path = r"$nimrod\dist\mingw\bin"
@if gcc:
tlsEmulation:on
@end
@end
@if macosx:
cc = clang
tlsEmulation:on
gcc.options.always = "-w -fasm-blocks"
gpp.options.always = "-w -fasm-blocks -fpermissive"
@else:
gcc.options.always = "-w"
gpp.options.always = "-w -fpermissive"
@end
gcc.options.speed = "-O3 -fno-strict-aliasing"
gcc.options.size = "-Os"
gcc.options.debug = "-g3 -O0"
gpp.options.speed = "-O3 -fno-strict-aliasing"
gpp.options.size = "-Os"
gpp.options.debug = "-g3 -O0"
#passl = "-pg"
# Configuration for the LLVM GCC compiler:
llvm_gcc.options.debug = "-g"
llvm_gcc.options.always = "-w"
llvm_gcc.options.speed = "-O2"
llvm_gcc.options.size = "-Os"
# Configuration for the LLVM CLang compiler:
clang.options.debug = "-g"
clang.options.always = "-w"
clang.options.speed = "-O3"
clang.options.size = "-Os"
# Configuration for the Visual C/C++ compiler:
vcc.options.linker = "/DEBUG /Zi /Fd\"$projectName.pdb\" /F33554432" # set the stack size to 8 MB
vcc.options.debug = "/Zi /Fd\"$projectName.pdb\""
vcc.options.always = "/nologo"
vcc.options.speed = "/Ox /arch:SSE2"
vcc.options.size = "/O1"
# Configuration for the Digital Mars C/C++ compiler:
@if windows:
dmc.path = r"$nimrod\dist\dm\bin"
@end
# Configuration for the Tiny C Compiler:
tcc.options.always = "-w"

View File

@@ -138,6 +138,13 @@ from rst to HTML. It also repeats the same operation but places the result in
the ``web/upload`` which can be used to update the website at
http://nimrod-lang.org.
By default the documentation will be built in parallel using the number of
available CPU cores. If any documentation build sub commands fail, they will
be rerun in serial fashion so that meaninful error output can be gathered for
inspection. The ``--parallelBuild:n`` switch or configuration option can be
used to force a specific number of parallel jobs or run everything serially
from the start (``n == 1``).
zip command
-----------

View File

@@ -1421,7 +1421,7 @@ Examples:
proc forEach(c: proc (x: int) {.cdecl.}) =
...
forEach(printItem) # this will NOT work because calling conventions differ
forEach(printItem) # this will NOT compile because calling conventions differ
.. code-block:: nimrod

View File

@@ -1,14 +0,0 @@
import zmq
var connection = zmq.open("tcp://localhost:5555", server=false)
echo("Connecting...")
for i in 0..10:
echo("Sending hello...", i)
send(connection, "Hello")
var reply = receive(connection)
echo("Received ...", reply)
close(connection)

View File

@@ -1,11 +0,0 @@
import zmq
var connection = zmq.open("tcp://*:5555", server=true)
while True:
var request = receive(connection)
echo("Received: ", request)
send(connection, "World")
close(connection)

View File

@@ -19,12 +19,13 @@ type
EDb* = object of EIO ## exception that is raised if a database error occurs
TSqlQuery* = distinct string ## an SQL query string
TSqlPrepared* = distinct string ## a identifier for the prepared queries
FDb* = object of FIO ## effect that denotes a database operation
FReadDb* = object of FDB ## effect that denotes a read operation
FWriteDb* = object of FDB ## effect that denotes a write operation
proc sql*(query: string): TSqlQuery {.noSideEffect, inline.} =
proc sql*(query: string): TSqlQuery {.noSideEffect, inline.} =
## constructs a TSqlQuery from the string `query`. This is supposed to be
## used as a raw-string-literal modifier:
## ``sql"update user set counter = counter + 1"``
@@ -33,14 +34,14 @@ proc sql*(query: string): TSqlQuery {.noSideEffect, inline.} =
## on, later versions will check the string for valid syntax.
result = TSqlQuery(query)
proc dbError(db: TDbConn) {.noreturn.} =
proc dbError*(db: TDbConn) {.noreturn.} =
## raises an EDb exception.
var e: ref EDb
new(e)
e.msg = $PQerrorMessage(db)
raise e
proc dbError*(msg: string) {.noreturn.} =
proc dbError*(msg: string) {.noreturn.} =
## raises an EDb exception with message `msg`.
var e: ref EDb
new(e)
@@ -61,41 +62,70 @@ proc dbFormat(formatstr: TSqlQuery, args: varargs[string]): string =
if c == '?':
add(result, dbQuote(args[a]))
inc(a)
else:
else:
add(result, c)
proc tryExec*(db: TDbConn, query: TSqlQuery,
proc tryExec*(db: TDbConn, query: TSqlQuery,
args: varargs[string, `$`]): bool {.tags: [FReadDB, FWriteDb].} =
## tries to execute the query and returns true if successful, false otherwise.
var q = dbFormat(query, args)
var res = PQExec(db, q)
var arr = allocCStringArray(args)
var res = PQexecParams(db, query.string, int32(args.len), nil, arr,
nil, nil, 0)
deallocCStringArray(arr)
result = PQresultStatus(res) == PGRES_COMMAND_OK
PQclear(res)
proc exec*(db: TDbConn, query: TSqlQuery, args: varargs[string, `$`]) {.
tags: [FReadDB, FWriteDb].} =
## executes the query and raises EDB if not successful.
var q = dbFormat(query, args)
var res = PQExec(db, q)
var arr = allocCStringArray(args)
var res = PQexecParams(db, query.string, int32(args.len), nil, arr,
nil, nil, 0)
deallocCStringArray(arr)
if PQresultStatus(res) != PGRES_COMMAND_OK: dbError(db)
PQclear(res)
proc exec*(db: TDbConn, stmtName: TSqlPrepared,
args: varargs[string]) {.tags: [FReadDB, FWriteDb].} =
var arr = allocCStringArray(args)
var res = PQexecPrepared(db, stmtName.string, int32(args.len), arr,
nil, nil, 0)
deallocCStringArray(arr)
if PQResultStatus(res) != PGRES_COMMAND_OK: dbError(db)
PQclear(res)
proc newRow(L: int): TRow =
newSeq(result, L)
for i in 0..L-1: result[i] = ""
proc setupQuery(db: TDbConn, query: TSqlQuery,
args: varargs[string]): PPGresult =
var q = dbFormat(query, args)
result = PQExec(db, q)
if PQresultStatus(result) != PGRES_TUPLES_OK: dbError(db)
proc setupQuery(db: TDbConn, query: TSqlQuery,
args: varargs[string]): PPGresult =
var arr = allocCStringArray(args)
result = PQexecParams(db, query.string, int32(args.len), nil, arr,
nil, nil, 0)
deallocCStringArray(arr)
if PQResultStatus(result) != PGRES_TUPLES_OK: dbError(db)
proc setupQuery(db: TDbConn, stmtName: TSqlPrepared,
args: varargs[string]): PPGresult =
var arr = allocCStringArray(args)
result = PQexecPrepared(db, stmtName.string, int32(args.len), arr,
nil, nil, 0)
deallocCStringArray(arr)
if PQResultStatus(result) != PGRES_TUPLES_OK: dbError(db)
proc prepare*(db: TDbConn; stmtName: string, query: TSqlQuery;
nParams: int): TSqlPrepared =
var res = PQprepare(db, stmtName, query.string, int32(nParams), nil)
if PQResultStatus(res) != PGRES_COMMAND_OK: dbError(db)
return TSqlPrepared(stmtName)
proc setRow(res: PPGresult, r: var TRow, line, cols: int32) =
for col in 0..cols-1:
setLen(r[col], 0)
var x = PQgetvalue(res, line, col)
add(r[col], x)
iterator fastRows*(db: TDbConn, query: TSqlQuery,
args: varargs[string, `$`]): TRow {.tags: [FReadDB].} =
## executes the query and iterates over the result dataset. This is very
@@ -109,6 +139,17 @@ iterator fastRows*(db: TDbConn, query: TSqlQuery,
yield result
PQclear(res)
iterator fastRows*(db: TDbConn, stmtName: TSqlPrepared,
args: varargs[string, `$`]): TRow {.tags: [FReadDB].} =
## executes the prepared query and iterates over the result dataset.
var res = setupQuery(db, stmtName, args)
var L = PQnfields(res)
var result = newRow(L)
for i in 0..PQntuples(res)-1:
setRow(res, result, i, L)
yield result
PQclear(res)
proc getRow*(db: TDbConn, query: TSqlQuery,
args: varargs[string, `$`]): TRow {.tags: [FReadDB].} =
## retrieves a single row. If the query doesn't return any rows, this proc
@@ -119,40 +160,55 @@ proc getRow*(db: TDbConn, query: TSqlQuery,
setRow(res, result, 0, L)
PQclear(res)
proc getAllRows*(db: TDbConn, query: TSqlQuery,
proc getRow*(db: TDbConn, stmtName: TSqlPrepared,
args: varargs[string, `$`]): TRow {.tags: [FReadDB].} =
var res = setupQuery(db, stmtName, args)
var L = PQnfields(res)
result = newRow(L)
setRow(res, result, 0, L)
PQclear(res)
proc getAllRows*(db: TDbConn, query: TSqlQuery,
args: varargs[string, `$`]): seq[TRow] {.tags: [FReadDB].} =
## executes the query and returns the whole result dataset.
result = @[]
for r in FastRows(db, query, args):
result.add(r)
iterator rows*(db: TDbConn, query: TSqlQuery,
proc getAllRows*(db: TDbConn, stmtName: TSqlPrepared,
args: varargs[string, `$`]): seq[TRow] {.tags: [FReadDB].} =
## executes the prepared query and returns the whole result dataset.
result = @[]
for r in FastRows(db, stmtName, args):
result.add(r)
iterator rows*(db: TDbConn, query: TSqlQuery,
args: varargs[string, `$`]): TRow {.tags: [FReadDB].} =
## same as `FastRows`, but slower and safe.
for r in items(GetAllRows(db, query, args)): yield r
proc getValue*(db: TDbConn, query: TSqlQuery,
args: varargs[string, `$`]): string {.tags: [FReadDB].} =
proc getValue*(db: TDbConn, query: TSqlQuery,
args: varargs[string, `$`]): string {.tags: [FReadDB].} =
## executes the query and returns the first column of the first row of the
## result dataset. Returns "" if the dataset contains no rows or the database
## value is NULL.
var x = PQgetvalue(setupQuery(db, query, args), 0, 0)
result = if isNil(x): "" else: $x
proc tryInsertID*(db: TDbConn, query: TSqlQuery,
proc tryInsertID*(db: TDbConn, query: TSqlQuery,
args: varargs[string, `$`]): int64 {.tags: [FWriteDb].}=
## executes the query (typically "INSERT") and returns the
## generated ID for the row or -1 in case of an error. For Postgre this adds
## ``RETURNING id`` to the query, so it only works if your primary key is
## named ``id``.
var x = PQgetvalue(setupQuery(db, TSqlQuery(string(query) & " RETURNING id"),
var x = PQgetvalue(setupQuery(db, TSqlQuery(string(query) & " RETURNING id"),
args), 0, 0)
if not isNil(x):
result = ParseBiggestInt($x)
else:
result = -1
proc insertID*(db: TDbConn, query: TSqlQuery,
proc insertID*(db: TDbConn, query: TSqlQuery,
args: varargs[string, `$`]): int64 {.tags: [FWriteDb].} =
## executes the query (typically "INSERT") and returns the
## generated ID for the row. For Postgre this adds
@@ -161,9 +217,9 @@ proc insertID*(db: TDbConn, query: TSqlQuery,
result = TryInsertID(db, query, args)
if result < 0: dbError(db)
proc execAffectedRows*(db: TDbConn, query: TSqlQuery,
proc execAffectedRows*(db: TDbConn, query: TSqlQuery,
args: varargs[string, `$`]): int64 {.tags: [
FReadDB, FWriteDb].} =
FReadDB, FWriteDb].} =
## executes the query (typically "UPDATE") and returns the
## number of affected rows.
var q = dbFormat(query, args)
@@ -172,7 +228,7 @@ proc execAffectedRows*(db: TDbConn, query: TSqlQuery,
result = parseBiggestInt($PQcmdTuples(res))
PQclear(res)
proc close*(db: TDbConn) {.tags: [FDb].} =
proc close*(db: TDbConn) {.tags: [FDb].} =
## closes the database connection.
if db != nil: PQfinish(db)

View File

@@ -32,6 +32,7 @@ export TPort, TSocketFlags
# TODO: The effect system (raises: []) has trouble with my try transformation.
# TODO: Can't await in a 'except' body
# TODO: getCurrentException(Msg) don't work
# TODO: Check if yielded future is nil and throw a more meaningful exception
# -- Futures
@@ -183,7 +184,7 @@ proc asyncCheck*[T](future: PFuture[T]) =
proc `and`*[T, Y](fut1: PFuture[T], fut2: PFuture[Y]): PFuture[void] =
## Returns a future which will complete once both ``fut1`` and ``fut2``
## complete.
var retFuture = newFuture[void]()
var retFuture = newFuture[void]("asyncdispatch.`and`")
fut1.callback =
proc () =
if fut2.finished: retFuture.complete()
@@ -195,11 +196,12 @@ proc `and`*[T, Y](fut1: PFuture[T], fut2: PFuture[Y]): PFuture[void] =
proc `or`*[T, Y](fut1: PFuture[T], fut2: PFuture[Y]): PFuture[void] =
## Returns a future which will complete once either ``fut1`` or ``fut2``
## complete.
var retFuture = newFuture[void]()
var retFuture = newFuture[void]("asyncdispatch.`or`")
proc cb() =
if not retFuture.finished: retFuture.complete()
fut1.callback = cb
fut2.callback = cb
return retFuture
type
PDispatcherBase = ref object of PObject
@@ -1017,10 +1019,10 @@ proc processBody(node, retFutureSym: PNimrodNode,
result.add newNimNode(nnkReturnStmt, node).add(newNilLit())
return # Don't process the children of this return stmt
of nnkCommand:
of nnkCommand, nnkCall:
if node[0].kind == nnkIdent and node[0].ident == !"await":
case node[1].kind
of nnkIdent:
of nnkIdent, nnkInfix:
# await x
result = newNimNode(nnkYieldStmt, node).add(node[1]) # -> yield x
of nnkCall, nnkCommand:
@@ -1030,8 +1032,8 @@ proc processBody(node, retFutureSym: PNimrodNode,
futureValue, node)
else:
error("Invalid node kind in 'await', got: " & $node[1].kind)
elif node[1].kind == nnkCommand and node[1][0].kind == nnkIdent and
node[1][0].ident == !"await":
elif node.len > 1 and node[1].kind == nnkCommand and
node[1][0].kind == nnkIdent and node[1][0].ident == !"await":
# foo await x
var newCommand = node
result.createVar("future" & $node[0].toStrLit, node[1][1], newCommand[1],
@@ -1182,7 +1184,7 @@ macro async*(prc: stmt): stmt {.immediate.} =
result[6] = outerProcBody
#echo(treeRepr(result))
#if prc[0].getName == "processClient":
#if prc[0].getName == "getFile":
# echo(toStrLit(result))
proc recvLine*(socket: TAsyncFD): PFuture[string] {.async.} =
@@ -1224,3 +1226,11 @@ proc runForever*() =
## Begins a never ending global dispatcher poll loop.
while true:
poll()
proc waitFor*[T](fut: PFuture[T]) =
## **Blocks** the current thread until the specified future completes.
while not fut.finished:
poll()
if fut.failed:
raise fut.error

295
lib/pure/asyncftpclient.nim Normal file
View File

@@ -0,0 +1,295 @@
#
#
# Nimrod's Runtime Library
# (c) Copyright 2014 Dominik Picheta
# See the file "copying.txt", included in this
# distribution, for details about the copyright.
#
import asyncdispatch, asyncnet, strutils, parseutils, os, times
from ftpclient import TFtpBase, EInvalidReply, TFtpEvent
from net import bufferSize
type
TAsyncFtpClient* = TFtpBase[PAsyncSocket]
PAsyncFtpClient* = ref TAsyncFtpClient
ProgressChangedProc* =
proc (total, progress: BiggestInt, speed: float):
PFuture[void] {.closure, gcsafe.}
proc expectReply(ftp: PAsyncFtpClient): PFuture[TaintedString] =
result = ftp.csock.recvLine()
proc send*(ftp: PAsyncFtpClient, m: string): PFuture[TaintedString] {.async.} =
## Send a message to the server, and wait for a primary reply.
## ``\c\L`` is added for you.
await ftp.csock.send(m & "\c\L")
return await ftp.expectReply()
proc assertReply(received: TaintedString, expected: varargs[string]) =
for i in items(expected):
if received.string.startsWith(i): return
raise newException(EInvalidReply,
"Expected reply '$1' got: $2" %
[expected.join("' or '"), received.string])
proc pasv(ftp: PAsyncFtpClient) {.async.} =
## Negotiate a data connection.
ftp.dsock = newAsyncSocket()
var pasvMsg = (await ftp.send("PASV")).string.strip.TaintedString
assertReply(pasvMsg, "227")
var betweenParens = captureBetween(pasvMsg.string, '(', ')')
var nums = betweenParens.split(',')
var ip = nums[0.. -3]
var port = nums[-2.. -1]
var properPort = port[0].parseInt()*256+port[1].parseInt()
await ftp.dsock.connect(ip.join("."), TPort(properPort.toU16))
ftp.dsockConnected = True
proc normalizePathSep(path: string): string =
return replace(path, '\\', '/')
proc connect*(ftp: PAsyncFtpClient) {.async.} =
## Connect to the FTP server specified by ``ftp``.
await ftp.csock.connect(ftp.address, ftp.port)
var reply = await ftp.expectReply()
if reply.startsWith("120"):
# 120 Service ready in nnn minutes.
# We wait until we receive 220.
reply = await ftp.expectReply()
assertReply(reply, "220")
if ftp.user != "":
assertReply(await(ftp.send("USER " & ftp.user)), "230", "331")
if ftp.pass != "":
assertReply(await(ftp.send("PASS " & ftp.pass)), "230")
proc pwd*(ftp: PAsyncFtpClient): PFuture[TaintedString] {.async.} =
## Returns the current working directory.
let wd = await ftp.send("PWD")
assertReply wd, "257"
return wd.string.captureBetween('"').TaintedString # "
proc cd*(ftp: PAsyncFtpClient, dir: string) {.async.} =
## Changes the current directory on the remote FTP server to ``dir``.
assertReply(await(ftp.send("CWD " & dir.normalizePathSep)), "250")
proc cdup*(ftp: PAsyncFtpClient) {.async.} =
## Changes the current directory to the parent of the current directory.
assertReply(await(ftp.send("CDUP")), "200")
proc getLines(ftp: PAsyncFtpClient): PFuture[string] {.async.} =
## Downloads text data in ASCII mode
result = ""
assert ftp.dsockConnected
while ftp.dsockConnected:
let r = await ftp.dsock.recvLine()
if r.string == "":
ftp.dsockConnected = false
else:
result.add(r.string & "\n")
assertReply(await(ftp.expectReply()), "226")
proc listDirs*(ftp: PAsyncFtpClient, dir = ""): PFuture[seq[string]] {.async.} =
## Returns a list of filenames in the given directory. If ``dir`` is "",
## the current directory is used. If ``async`` is true, this
## function will return immediately and it will be your job to
## use asyncio's ``poll`` to progress this operation.
await ftp.pasv()
assertReply(await(ftp.send("NLST " & dir.normalizePathSep)), ["125", "150"])
result = splitLines(await ftp.getLines())
proc existsFile*(ftp: PAsyncFtpClient, file: string): PFuture[bool] {.async.} =
## Determines whether ``file`` exists.
var files = await ftp.listDirs()
for f in items(files):
if f.normalizePathSep == file.normalizePathSep: return true
proc createDir*(ftp: PAsyncFtpClient, dir: string, recursive = false){.async.} =
## Creates a directory ``dir``. If ``recursive`` is true, the topmost
## subdirectory of ``dir`` will be created first, following the secondmost...
## etc. this allows you to give a full path as the ``dir`` without worrying
## about subdirectories not existing.
if not recursive:
assertReply(await(ftp.send("MKD " & dir.normalizePathSep)), "257")
else:
var reply = TaintedString""
var previousDirs = ""
for p in split(dir, {os.dirSep, os.altSep}):
if p != "":
previousDirs.add(p)
reply = await ftp.send("MKD " & previousDirs)
previousDirs.add('/')
assertReply reply, "257"
proc chmod*(ftp: PAsyncFtpClient, path: string,
permissions: set[TFilePermission]) {.async.} =
## Changes permission of ``path`` to ``permissions``.
var userOctal = 0
var groupOctal = 0
var otherOctal = 0
for i in items(permissions):
case i
of fpUserExec: userOctal.inc(1)
of fpUserWrite: userOctal.inc(2)
of fpUserRead: userOctal.inc(4)
of fpGroupExec: groupOctal.inc(1)
of fpGroupWrite: groupOctal.inc(2)
of fpGroupRead: groupOctal.inc(4)
of fpOthersExec: otherOctal.inc(1)
of fpOthersWrite: otherOctal.inc(2)
of fpOthersRead: otherOctal.inc(4)
var perm = $userOctal & $groupOctal & $otherOctal
assertReply(await(ftp.send("SITE CHMOD " & perm &
" " & path.normalizePathSep)), "200")
proc list*(ftp: PAsyncFtpClient, dir = ""): PFuture[string] {.async.} =
## Lists all files in ``dir``. If ``dir`` is ``""``, uses the current
## working directory.
await ftp.pasv()
let reply = await ftp.send("LIST" & " " & dir.normalizePathSep)
assertReply(reply, ["125", "150"])
result = await ftp.getLines()
proc retrText*(ftp: PAsyncFtpClient, file: string): PFuture[string] {.async.} =
## Retrieves ``file``. File must be ASCII text.
await ftp.pasv()
let reply = await ftp.send("RETR " & file.normalizePathSep)
assertReply(reply, ["125", "150"])
result = await ftp.getLines()
proc getFile(ftp: PAsyncFtpClient, file: TFile, total: BiggestInt,
onProgressChanged: ProgressChangedProc) {.async.} =
assert ftp.dsockConnected
var progress = 0
var progressInSecond = 0
var countdownFut = sleepAsync(1000)
var dataFut = ftp.dsock.recv(bufferSize)
while ftp.dsockConnected:
await dataFut or countdownFut
if countdownFut.finished:
asyncCheck onProgressChanged(total, progress,
progressInSecond.float)
progressInSecond = 0
countdownFut = sleepAsync(1000)
if dataFut.finished:
let data = dataFut.read
if data != "":
progress.inc(data.len)
progressInSecond.inc(data.len)
file.write(data)
dataFut = ftp.dsock.recv(bufferSize)
else:
ftp.dsockConnected = False
assertReply(await(ftp.expectReply()), "226")
proc defaultOnProgressChanged*(total, progress: BiggestInt,
speed: float): PFuture[void] {.nimcall,gcsafe.} =
## Default FTP ``onProgressChanged`` handler. Does nothing.
result = newFuture[void]()
#echo(total, " ", progress, " ", speed)
result.complete()
proc retrFile*(ftp: PAsyncFtpClient, file, dest: string,
onProgressChanged = defaultOnProgressChanged) {.async.} =
## Downloads ``file`` and saves it to ``dest``.
## The ``EvRetr`` event is passed to the specified ``handleEvent`` function
## when the download is finished. The event's ``filename`` field will be equal
## to ``file``.
var destFile = open(dest, mode = fmWrite)
await ftp.pasv()
var reply = await ftp.send("RETR " & file.normalizePathSep)
assertReply reply, ["125", "150"]
if {'(', ')'} notin reply.string:
raise newException(EInvalidReply, "Reply has no file size.")
var fileSize: biggestInt
if reply.string.captureBetween('(', ')').parseBiggestInt(fileSize) == 0:
raise newException(EInvalidReply, "Reply has no file size.")
await getFile(ftp, destFile, fileSize, onProgressChanged)
proc doUpload(ftp: PAsyncFtpClient, file: TFile,
onProgressChanged: ProgressChangedProc) {.async.} =
assert ftp.dsockConnected
let total = file.getFileSize()
var data = newStringOfCap(4000)
var progress = 0
var progressInSecond = 0
var countdownFut = sleepAsync(1000)
var sendFut: PFuture[void] = nil
while ftp.dsockConnected:
if sendFut == nil or sendFut.finished:
progress.inc(data.len)
progressInSecond.inc(data.len)
# TODO: Async file reading.
let len = file.readBuffer(addr(data[0]), 4000)
setLen(data, len)
if len == 0:
# File finished uploading.
ftp.dsock.close()
ftp.dsockConnected = false
assertReply(await(ftp.expectReply()), "226")
else:
sendFut = ftp.dsock.send(data)
if countdownFut.finished:
asyncCheck onProgressChanged(total, progress, progressInSecond.float)
progressInSecond = 0
countdownFut = sleepAsync(1000)
await countdownFut or sendFut
proc storeFile*(ftp: PAsyncFtpClient, file, dest: string,
onProgressChanged = defaultOnProgressChanged) {.async.} =
## Uploads ``file`` to ``dest`` on the remote FTP server. Usage of this
## function asynchronously is recommended to view the progress of
## the download.
## The ``EvStore`` event is passed to the specified ``handleEvent`` function
## when the upload is finished, and the ``filename`` field will be
## equal to ``file``.
var destFile = open(file)
await ftp.pasv()
let reply = await ftp.send("STOR " & dest.normalizePathSep)
assertReply reply, ["125", "150"]
await doUpload(ftp, destFile, onProgressChanged)
proc newAsyncFtpClient*(address: string, port = TPort(21),
user, pass = ""): PAsyncFtpClient =
## Creates a new ``PAsyncFtpClient`` object.
new result
result.user = user
result.pass = pass
result.address = address
result.port = port
result.dsockConnected = false
result.csock = newAsyncSocket()
when isMainModule:
var ftp = newAsyncFtpClient("example.com", user = "test", pass = "test")
proc main(ftp: PAsyncFtpClient) {.async.} =
await ftp.connect()
echo await ftp.pwd()
echo await ftp.listDirs()
await ftp.storeFile("payload.jpg", "payload.jpg")
await ftp.retrFile("payload.jpg", "payload2.jpg")
echo("Finished")
waitFor main(ftp)

View File

@@ -97,7 +97,8 @@ proc sendStatus(client: PAsyncSocket, status: string): PFuture[void] =
client.send("HTTP/1.1 " & status & "\c\L")
proc processClient(client: PAsyncSocket, address: string,
callback: proc (request: TRequest): PFuture[void]) {.async.} =
callback: proc (request: TRequest):
PFuture[void] {.closure, gcsafe.}) {.async.} =
while true:
# GET /path HTTP/1.1
# Header: val
@@ -184,7 +185,7 @@ proc processClient(client: PAsyncSocket, address: string,
break
proc serve*(server: PAsyncHttpServer, port: TPort,
callback: proc (request: TRequest): PFuture[void] {.gcsafe.},
callback: proc (request: TRequest): PFuture[void] {.closure,gcsafe.},
address = "") {.async.} =
## Starts the process of listening for incoming HTTP connections on the
## specified address and port.

View File

@@ -128,7 +128,7 @@ proc mget*[A](s: var TSet[A], key: A): var A =
## for sharing.
assert s.isValid, "The set needs to be initialized."
var index = rawGet(s, key)
if index >= 0: result = t.data[index].key
if index >= 0: result = s.data[index].key
else: raise newException(EInvalidKey, "key not found: " & $key)
proc contains*[A](s: TSet[A], key: A): bool =

View File

@@ -28,8 +28,9 @@ proc parseCookies*(s: string): PStringTable =
if s[i] == '\0': break
inc(i) # skip ';'
proc setCookie*(key, value: string, domain = "", path = "",
expires = "", noName = false): string =
proc setCookie*(key, value: string, domain = "", path = "",
expires = "", noName = false,
secure = false, httpOnly = false): string =
## Creates a command in the format of
## ``Set-Cookie: key=value; Domain=...; ...``
result = ""
@@ -38,16 +39,20 @@ proc setCookie*(key, value: string, domain = "", path = "",
if domain != "": result.add("; Domain=" & domain)
if path != "": result.add("; Path=" & path)
if expires != "": result.add("; Expires=" & expires)
if secure: result.add("; secure")
if httpOnly: result.add("; HttpOnly")
proc setCookie*(key, value: string, expires: TTimeInfo,
domain = "", path = "", noName = false): string =
domain = "", path = "", noName = false,
secure = false, httpOnly = false): string =
## Creates a command in the format of
## ``Set-Cookie: key=value; Domain=...; ...``
##
## **Note:** UTC is assumed as the timezone for ``expires``.
return setCookie(key, value, domain, path,
format(expires, "ddd',' dd MMM yyyy HH:mm:ss 'UTC'"), noname)
format(expires, "ddd',' dd MMM yyyy HH:mm:ss 'UTC'"),
noname, secure, httpOnly)
when isMainModule:
var tim = TTime(int(getTime()) + 76 * (60 * 60 * 24))
@@ -55,5 +60,3 @@ when isMainModule:
echo(setCookie("test", "value", tim.getGMTime()))
echo parseCookies("uid=1; kp=2")

View File

@@ -10,6 +10,10 @@ include "system/inclrtl"
import sockets, strutils, parseutils, times, os, asyncio
from asyncnet import nil
from rawsockets import nil
from asyncdispatch import PFuture
## This module **partially** implements an FTP client as specified
## by `RFC 959 <http://tools.ietf.org/html/rfc959>`_.
##
@@ -34,34 +38,32 @@ import sockets, strutils, parseutils, times, os, asyncio
type
TFTPClient* = object of TObject
case isAsync: bool
of false:
csock: TSocket # Command connection socket
dsock: TSocket # Data connection socket
else:
dummyA, dummyB: pointer # workaround a Nimrod API issue
asyncCSock: PAsyncSocket
asyncDSock: PAsyncSocket
PFtpBase*[SockType] = ref TFtpBase[SockType]
TFtpBase*[SockType] = object
csock*: SockType
dsock*: SockType
when SockType is asyncio.PAsyncSocket:
handleEvent*: proc (ftp: PAsyncFTPClient, ev: TFTPEvent){.closure,gcsafe.}
disp: PDispatcher
asyncDSockID: PDelegate
user, pass: string
address: string
port: TPort
user*, pass*: string
address*: string
when SockType is asyncnet.PAsyncSocket:
port*: rawsockets.TPort
else:
port*: TPort
jobInProgress: bool
job: ref TFTPJob
jobInProgress*: bool
job*: PFTPJob[SockType]
dsockConnected: bool
PFTPClient* = ref TFTPClient
dsockConnected*: bool
FTPJobType* = enum
JRetrText, JRetr, JStore
TFTPJob = object
prc: proc (ftp: PFTPClient, async: bool): bool {.nimcall, gcsafe.}
PFtpJob[T] = ref TFtpJob[T]
TFTPJob[T] = object
prc: proc (ftp: PFTPBase[T], async: bool): bool {.nimcall, gcsafe.}
case typ*: FTPJobType
of JRetrText:
lines: string
@@ -75,8 +77,11 @@ type
toStore: string # Data left to upload (Only used with async)
else: nil
TFtpClient* = TFtpBase[TSocket]
PFtpClient* = ref TFTPClient
PAsyncFTPClient* = ref TAsyncFTPClient ## Async alternative to TFTPClient.
TAsyncFTPClient* = object of TFTPClient
TAsyncFTPClient* = TFtpBase[asyncio.PAsyncSocket]
FTPEventType* = enum
EvTransferProgress, EvLines, EvRetr, EvStore
@@ -106,30 +111,30 @@ proc ftpClient*(address: string, port = TPort(21),
result.address = address
result.port = port
result.isAsync = false
result.dsockConnected = false
result.csock = socket()
if result.csock == InvalidSocket: osError(osLastError())
proc getDSock(ftp: PFTPClient): TSocket =
if ftp.isAsync: return ftp.asyncDSock else: return ftp.dsock
proc getDSock[T](ftp: PFTPBase[T]): TSocket =
return ftp.dsock
proc getCSock(ftp: PFTPClient): TSocket =
if ftp.isAsync: return ftp.asyncCSock else: return ftp.csock
proc getCSock[T](ftp: PFTPBase[T]): TSocket =
return ftp.csock
template blockingOperation(sock: TSocket, body: stmt) {.immediate.} =
if ftp.isAsync:
sock.setBlocking(true)
body
if ftp.isAsync:
sock.setBlocking(false)
proc expectReply(ftp: PFTPClient): TaintedString =
template blockingOperation(sock: asyncio.PAsyncSocket, body: stmt) {.immediate.} =
sock.setBlocking(true)
body
sock.setBlocking(false)
proc expectReply[T](ftp: PFtpBase[T]): TaintedString =
result = TaintedString""
blockingOperation(ftp.getCSock()):
ftp.getCSock().readLine(result)
proc send*(ftp: PFTPClient, m: string): TaintedString =
proc send*[T](ftp: PFtpBase[T], m: string): TaintedString =
## Send a message to the server, and wait for a primary reply.
## ``\c\L`` is added for you.
blockingOperation(ftp.getCSock()):
@@ -149,8 +154,8 @@ proc assertReply(received: TaintedString, expected: varargs[string]) =
"Expected reply '$1' got: $2" %
[expected.join("' or '"), received.string])
proc createJob(ftp: PFTPClient,
prc: proc (ftp: PFTPClient, async: bool): bool {.
proc createJob[T](ftp: PFtpBase[T],
prc: proc (ftp: PFtpBase[T], async: bool): bool {.
nimcall,gcsafe.},
cmd: FTPJobType) =
if ftp.jobInProgress:
@@ -165,7 +170,7 @@ proc createJob(ftp: PFTPClient,
of JRetr, JStore:
ftp.job.toStore = ""
proc deleteJob(ftp: PFTPClient) =
proc deleteJob[T](ftp: PFtpBase[T]) =
assert ftp.jobInProgress
ftp.jobInProgress = false
case ftp.job.typ
@@ -173,12 +178,9 @@ proc deleteJob(ftp: PFTPClient) =
ftp.job.lines = ""
of JRetr, JStore:
ftp.job.file.close()
if ftp.isAsync:
ftp.asyncDSock.close()
else:
ftp.dsock.close()
ftp.dsock.close()
proc handleTask(s: PAsyncSocket, ftp: PFTPClient) =
proc handleTask(s: PAsyncSocket, ftp: PAsyncFTPClient) =
if ftp.jobInProgress:
if ftp.job.typ in {JRetr, JStore}:
if epochTime() - ftp.job.lastProgressReport >= 1.0:
@@ -193,12 +195,12 @@ proc handleTask(s: PAsyncSocket, ftp: PFTPClient) =
ftp.job.oneSecond = 0
ftp.handleEvent(PAsyncFTPClient(ftp), r)
proc handleWrite(s: PAsyncSocket, ftp: PFTPClient) =
proc handleWrite(s: PAsyncSocket, ftp: PAsyncFTPClient) =
if ftp.jobInProgress:
if ftp.job.typ == JStore:
assert (not ftp.job.prc(ftp, true))
proc handleConnect(s: PAsyncSocket, ftp: PFTPClient) =
proc handleConnect(s: PAsyncSocket, ftp: PAsyncFTPClient) =
ftp.dsockConnected = true
assert(ftp.jobInProgress)
if ftp.job.typ == JStore:
@@ -206,30 +208,32 @@ proc handleConnect(s: PAsyncSocket, ftp: PFTPClient) =
else:
s.delHandleWrite()
proc handleRead(s: PAsyncSocket, ftp: PFTPClient) =
proc handleRead(s: PAsyncSocket, ftp: PAsyncFTPClient) =
assert ftp.jobInProgress
assert ftp.job.typ != JStore
# This can never return true, because it shouldn't check for code
# 226 from csock.
assert(not ftp.job.prc(ftp, true))
proc pasv(ftp: PFTPClient) =
proc pasv[T](ftp: PFtpBase[T]) =
## Negotiate a data connection.
if not ftp.isAsync:
when T is TSocket:
ftp.dsock = socket()
if ftp.dsock == InvalidSocket: osError(osLastError())
else:
ftp.asyncDSock = AsyncSocket()
ftp.asyncDSock.handleRead =
elif T is PAsyncSocket:
ftp.dsock = AsyncSocket()
ftp.dsock.handleRead =
proc (s: PAsyncSocket) =
handleRead(s, ftp)
ftp.asyncDSock.handleConnect =
ftp.dsock.handleConnect =
proc (s: PAsyncSocket) =
handleConnect(s, ftp)
ftp.asyncDSock.handleTask =
ftp.dsock.handleTask =
proc (s: PAsyncSocket) =
handleTask(s, ftp)
ftp.disp.register(ftp.asyncDSock)
ftp.disp.register(ftp.dsock)
else:
{.fatal: "Incorrect socket instantiation".}
var pasvMsg = ftp.send("PASV").string.strip.TaintedString
assertReply(pasvMsg, "227")
@@ -238,23 +242,24 @@ proc pasv(ftp: PFTPClient) =
var ip = nums[0.. -3]
var port = nums[-2.. -1]
var properPort = port[0].parseInt()*256+port[1].parseInt()
if ftp.isAsync:
ftp.asyncDSock.connect(ip.join("."), TPort(properPort.toU16))
ftp.dsock.connect(ip.join("."), TPort(properPort.toU16))
when T is PAsyncSocket:
ftp.dsockConnected = False
else:
ftp.dsock.connect(ip.join("."), TPort(properPort.toU16))
ftp.dsockConnected = True
proc normalizePathSep(path: string): string =
return replace(path, '\\', '/')
proc connect*(ftp: PFTPClient) =
proc connect*[T](ftp: PFtpBase[T]) =
## Connect to the FTP server specified by ``ftp``.
if ftp.isAsync:
blockingOperation(ftp.asyncCSock):
ftp.asyncCSock.connect(ftp.address, ftp.port)
else:
when T is PAsyncSocket:
blockingOperation(ftp.csock):
ftp.csock.connect(ftp.address, ftp.port)
elif T is TSocket:
ftp.csock.connect(ftp.address, ftp.port)
else:
{.fatal: "Incorrect socket instantiation".}
# TODO: Handle 120? or let user handle it.
assertReply ftp.expectReply(), "220"
@@ -279,25 +284,27 @@ proc cdup*(ftp: PFTPClient) =
## Changes the current directory to the parent of the current directory.
assertReply ftp.send("CDUP"), "200"
proc getLines(ftp: PFTPClient, async: bool = false): bool =
proc getLines[T](ftp: PFtpBase[T], async: bool = false): bool =
## Downloads text data in ASCII mode
## Returns true if the download is complete.
## It doesn't if `async` is true, because it doesn't check for 226 then.
if ftp.dsockConnected:
var r = TaintedString""
if ftp.isAsync:
when T is PAsyncSocket:
if ftp.asyncDSock.readLine(r):
if r.string == "":
ftp.dsockConnected = false
else:
ftp.job.lines.add(r.string & "\n")
else:
elif T is TSocket:
assert(not async)
ftp.dsock.readLine(r)
if r.string == "":
ftp.dsockConnected = false
else:
ftp.job.lines.add(r.string & "\n")
else:
{.fatal: "Incorrect socket instantiation".}
if not async:
var readSocks: seq[TSocket] = @[ftp.getCSock()]
@@ -307,14 +314,14 @@ proc getLines(ftp: PFTPClient, async: bool = false): bool =
assertReply ftp.expectReply(), "226"
return true
proc listDirs*(ftp: PFTPClient, dir: string = "",
proc listDirs*[T](ftp: PFtpBase[T], dir: string = "",
async = false): seq[string] =
## Returns a list of filenames in the given directory. If ``dir`` is "",
## the current directory is used. If ``async`` is true, this
## function will return immediately and it will be your job to
## use asyncio's ``poll`` to progress this operation.
ftp.createJob(getLines, JRetrText)
ftp.createJob(getLines[T], JRetrText)
ftp.pasv()
assertReply ftp.send("NLST " & dir.normalizePathSep), ["125", "150"]
@@ -384,12 +391,12 @@ proc chmod*(ftp: PFTPClient, path: string,
assertReply ftp.send("SITE CHMOD " & perm &
" " & path.normalizePathSep), "200"
proc list*(ftp: PFTPClient, dir: string = "", async = false): string =
proc list*[T](ftp: PFtpBase[T], dir: string = "", async = false): string =
## Lists all files in ``dir``. If ``dir`` is ``""``, uses the current
## working directory. If ``async`` is true, this function will return
## immediately and it will be your job to call asyncio's
## ``poll`` to progress this operation.
ftp.createJob(getLines, JRetrText)
ftp.createJob(getLines[T], JRetrText)
ftp.pasv()
assertReply(ftp.send("LIST" & " " & dir.normalizePathSep), ["125", "150"])
@@ -401,11 +408,11 @@ proc list*(ftp: PFTPClient, dir: string = "", async = false): string =
else:
return ""
proc retrText*(ftp: PFTPClient, file: string, async = false): string =
proc retrText*[T](ftp: PFtpBase[T], file: string, async = false): string =
## Retrieves ``file``. File must be ASCII text.
## If ``async`` is true, this function will return immediately and
## it will be your job to call asyncio's ``poll`` to progress this operation.
ftp.createJob(getLines, JRetrText)
ftp.createJob(getLines[T], JRetrText)
ftp.pasv()
assertReply ftp.send("RETR " & file.normalizePathSep), ["125", "150"]
@@ -416,15 +423,17 @@ proc retrText*(ftp: PFTPClient, file: string, async = false): string =
else:
return ""
proc getFile(ftp: PFTPClient, async = false): bool =
proc getFile[T](ftp: PFtpBase[T], async = false): bool =
if ftp.dsockConnected:
var r = "".TaintedString
var bytesRead = 0
var returned = false
if async:
if not ftp.isAsync: raise newException(EFTP, "FTPClient must be async.")
bytesRead = ftp.AsyncDSock.recvAsync(r, BufferSize)
returned = bytesRead != -1
when T is TSocket:
raise newException(EFTP, "FTPClient must be async.")
else:
bytesRead = ftp.dsock.recvAsync(r, BufferSize)
returned = bytesRead != -1
else:
bytesRead = getDSock(ftp).recv(r, BufferSize)
returned = true
@@ -443,13 +452,13 @@ proc getFile(ftp: PFTPClient, async = false): bool =
assertReply ftp.expectReply(), "226"
return true
proc retrFile*(ftp: PFTPClient, file, dest: string, async = false) =
proc retrFile*[T](ftp: PFtpBase[T], file, dest: string, async = false) =
## Downloads ``file`` and saves it to ``dest``. Usage of this function
## asynchronously is recommended to view the progress of the download.
## The ``EvRetr`` event is passed to the specified ``handleEvent`` function
## when the download is finished, and the ``filename`` field will be equal
## to ``file``.
ftp.createJob(getFile, JRetr)
ftp.createJob(getFile[T], JRetr)
ftp.job.file = open(dest, mode = fmWrite)
ftp.pasv()
var reply = ftp.send("RETR " & file.normalizePathSep)
@@ -468,11 +477,11 @@ proc retrFile*(ftp: PFTPClient, file, dest: string, async = false) =
while not ftp.job.prc(ftp, false): discard
ftp.deleteJob()
proc doUpload(ftp: PFTPClient, async = false): bool =
proc doUpload[T](ftp: PFtpBase[T], async = false): bool =
if ftp.dsockConnected:
if ftp.job.toStore.len() > 0:
assert(async)
let bytesSent = ftp.asyncDSock.sendAsync(ftp.job.toStore)
let bytesSent = ftp.dsock.sendAsync(ftp.job.toStore)
if bytesSent == ftp.job.toStore.len:
ftp.job.toStore = ""
elif bytesSent != ftp.job.toStore.len and bytesSent != 0:
@@ -485,7 +494,7 @@ proc doUpload(ftp: PFTPClient, async = false): bool =
setLen(s, len)
if len == 0:
# File finished uploading.
if ftp.isAsync: ftp.asyncDSock.close() else: ftp.dsock.close()
ftp.dsock.close()
ftp.dsockConnected = false
if not async:
@@ -496,7 +505,7 @@ proc doUpload(ftp: PFTPClient, async = false): bool =
if not async:
getDSock(ftp).send(s)
else:
let bytesSent = ftp.asyncDSock.sendAsync(s)
let bytesSent = ftp.dsock.sendAsync(s)
if bytesSent == 0:
ftp.job.toStore.add(s)
elif bytesSent != s.len:
@@ -506,14 +515,14 @@ proc doUpload(ftp: PFTPClient, async = false): bool =
ftp.job.progress.inc(len)
ftp.job.oneSecond.inc(len)
proc store*(ftp: PFTPClient, file, dest: string, async = false) =
proc store*[T](ftp: PFtpBase[T], file, dest: string, async = false) =
## Uploads ``file`` to ``dest`` on the remote FTP server. Usage of this
## function asynchronously is recommended to view the progress of
## the download.
## The ``EvStore`` event is passed to the specified ``handleEvent`` function
## when the upload is finished, and the ``filename`` field will be
## equal to ``file``.
ftp.createJob(doUpload, JStore)
ftp.createJob(doUpload[T], JStore)
ftp.job.file = open(file)
ftp.job.total = ftp.job.file.getFileSize()
ftp.job.lastProgressReport = epochTime()
@@ -526,16 +535,12 @@ proc store*(ftp: PFTPClient, file, dest: string, async = false) =
while not ftp.job.prc(ftp, false): discard
ftp.deleteJob()
proc close*(ftp: PFTPClient) =
proc close*[T](ftp: PFTPBase[T]) =
## Terminates the connection to the server.
assertReply ftp.send("QUIT"), "221"
if ftp.jobInProgress: ftp.deleteJob()
if ftp.isAsync:
ftp.asyncCSock.close()
ftp.asyncDSock.close()
else:
ftp.csock.close()
ftp.dsock.close()
ftp.csock.close()
ftp.dsock.close()
proc csockHandleRead(s: PAsyncSocket, ftp: PAsyncFTPClient) =
if ftp.jobInProgress:
@@ -572,20 +577,18 @@ proc asyncFTPClient*(address: string, port = TPort(21),
dres.pass = pass
dres.address = address
dres.port = port
dres.isAsync = true
dres.dsockConnected = false
dres.handleEvent = handleEvent
dres.asyncCSock = AsyncSocket()
dres.asyncCSock.handleRead =
dres.csock = AsyncSocket()
dres.csock.handleRead =
proc (s: PAsyncSocket) =
csockHandleRead(s, dres)
result = dres
proc register*(d: PDispatcher, ftp: PAsyncFTPClient): PDelegate {.discardable.} =
## Registers ``ftp`` with dispatcher ``d``.
assert ftp.isAsync
ftp.disp = d
return ftp.disp.register(ftp.asyncCSock)
return ftp.disp.register(ftp.csock)
when isMainModule:
proc main =
@@ -595,7 +598,7 @@ when isMainModule:
case event.typ
of EvStore:
echo("Upload finished!")
ftp.retrFile("payload.JPG", "payload2.JPG", async = true)
ftp.retrFile("payload.jpg", "payload2.jpg", async = true)
of EvTransferProgress:
var time: int64 = -1
if event.speed != 0:
@@ -610,13 +613,13 @@ when isMainModule:
ftp.close()
echo d.len
else: assert(false)
var ftp = asyncFTPClient("picheta.me", user = "test", pass = "asf", handleEvent = hev)
var ftp = asyncFTPClient("example.com", user = "foo", pass = "bar", handleEvent = hev)
d.register(ftp)
d.len.echo()
ftp.connect()
echo "connected"
ftp.store("payload.JPG", "payload.JPG", async = true)
ftp.store("payload.jpg", "payload.jpg", async = true)
d.len.echo()
echo "uploading..."
while true:
@@ -624,15 +627,15 @@ when isMainModule:
main()
when isMainModule and false:
var ftp = ftpClient("picheta.me", user = "asdasd", pass = "asfwq")
var ftp = ftpClient("example.com", user = "foo", pass = "bar")
ftp.connect()
echo ftp.pwd()
echo ftp.list()
echo("uploading")
ftp.store("payload.JPG", "payload.JPG", async = false)
ftp.store("payload.jpg", "payload.jpg", async = false)
echo("Upload complete")
ftp.retrFile("payload.JPG", "payload2.JPG", async = false)
ftp.retrFile("payload.jpg", "payload2.jpg", async = false)
echo("Download complete")
sleep(5000)

View File

@@ -654,8 +654,7 @@ when isMainModule:
resp = await client.request("http://nimrod-lang.org/download.html")
echo("Got response: ", resp.status)
asyncCheck main()
runForever()
waitFor main()
else:
#downloadFile("http://force7.de/nimrod/index.html", "nimrodindex.html")

View File

@@ -803,6 +803,36 @@ proc rfind*(s, sub: string, start: int = -1): int {.noSideEffect.} =
if result != -1: return
return -1
proc count*(s: string, sub: string, overlapping: bool = false): int {.noSideEffect,
rtl, extern: "nsuCountString".} =
## Count the occurences of a substring `sub` in the string `s`.
## Overlapping occurences of `sub` only count when `overlapping`
## is set to true.
var i = 0
while true:
i = s.find(sub, i)
if i < 0:
break
if overlapping:
inc i
else:
i += sub.len
inc result
proc count*(s: string, sub: char): int {.noSideEffect,
rtl, extern: "nsuCountChar".} =
## Count the occurences of the character `sub` in the string `s`.
for c in s:
if c == sub:
inc result
proc count*(s: string, subs: set[char]): int {.noSideEffect,
rtl, extern: "nsuCountCharSet".} =
## Count the occurences of the group of character `subs` in the string `s`.
for c in s:
if c in subs:
inc result
proc quoteIfContainsWhite*(s: string): string {.deprecated.} =
## Returns ``'"' & s & '"'`` if `s` contains a space and does not
## start with a quote, else returns `s`.
@@ -1354,3 +1384,8 @@ when isMainModule:
doAssert parseEnum[TMyEnum]("enu_D") == enuD
doAssert parseEnum("invalid enum value", enC) == enC
doAssert count("foofoofoo", "foofoo") == 1
doAssert count("foofoofoo", "foofoo", overlapping = true) == 2
doAssert count("foofoofoo", 'f') == 3
doAssert count("foofoofoobar", {'f','b'}) == 4

View File

@@ -513,7 +513,7 @@ elif defined(JS):
result.setFullYear(timeInfo.year)
result.setDate(timeInfo.monthday)
proc `$`(timeInfo: TTimeInfo): string = return $(TimeInfoToTIme(timeInfo))
proc `$`(timeInfo: TTimeInfo): string = return $(timeInfoToTime(timeInfo))
proc `$`(time: TTime): string = return $time.toLocaleString()
proc `-` (a, b: TTime): int64 =

View File

@@ -151,6 +151,8 @@ proc addEscaped*(result: var string, s: string) =
of '>': result.add("&gt;")
of '&': result.add("&amp;")
of '"': result.add("&quot;")
of '\'': result.add("&#x27;")
of '/': result.add("&#x2F;")
else: result.add(c)
proc escape*(s: string): string =
@@ -164,6 +166,8 @@ proc escape*(s: string): string =
## ``>`` ``&gt;``
## ``&`` ``&amp;``
## ``"`` ``&quot;``
## ``'`` ``&#x27;``
## ``/`` ``&#x2F;``
## ------------ -------------------
result = newStringOfCap(s.len)
addEscaped(result, s)

View File

@@ -849,13 +849,13 @@ proc contains*[T](s: TSlice[T], value: T): bool {.noSideEffect, inline.} =
## assert((1..3).contains(4) == false)
result = s.a <= value and value <= s.b
template `in` * (x, y: expr): expr {.immediate.} = contains(y, x)
template `in` * (x, y: expr): expr {.immediate, dirty.} = contains(y, x)
## Sugar for contains
##
## .. code-block:: Nimrod
## assert(1 in (1..3) == true)
## assert(5 in (1..3) == false)
template `notin` * (x, y: expr): expr {.immediate.} = not contains(y, x)
template `notin` * (x, y: expr): expr {.immediate, dirty.} = not contains(y, x)
## Sugar for not containing
##
## .. code-block:: Nimrod

View File

@@ -515,7 +515,7 @@ proc isFatPointer(ti: PNimType): bool =
proc nimCopy(x: pointer, ti: PNimType): pointer {.compilerproc.}
proc nimCopyAux(dest, src: Pointer, n: ptr TNimNode) {.compilerproc.} =
proc nimCopyAux(dest, src: pointer, n: ptr TNimNode) {.compilerproc.} =
case n.kind
of nkNone: sysAssert(false, "nimCopyAux")
of nkSlot:
@@ -566,7 +566,7 @@ proc nimCopy(x: pointer, ti: PNimType): pointer =
else:
result = x
proc genericReset(x: Pointer, ti: PNimType): pointer {.compilerproc.} =
proc genericReset(x: pointer, ti: PNimType): pointer {.compilerproc.} =
case ti.kind
of tyPtr, tyRef, tyVar, tyNil:
if not isFatPointer(ti):

View File

@@ -131,6 +131,14 @@ when defined(boehmgc):
if result == nil: raiseOutOfMem()
proc deallocShared(p: pointer) = boehmDealloc(p)
when hasThreadSupport:
proc getFreeSharedMem(): int =
boehmGetFreeBytes()
proc getTotalSharedMem(): int =
boehmGetHeapSize()
proc getOccupiedSharedMem(): int =
getTotalSharedMem() - getFreeSharedMem()
#boehmGCincremental()
proc GC_disable() = boehmGC_disable()
@@ -164,11 +172,11 @@ when defined(boehmgc):
proc nimGCref(p: pointer) {.compilerproc, inline.} = discard
proc nimGCunref(p: pointer) {.compilerproc, inline.} = discard
proc unsureAsgnRef(dest: ppointer, src: pointer) {.compilerproc, inline.} =
proc unsureAsgnRef(dest: PPointer, src: pointer) {.compilerproc, inline.} =
dest[] = src
proc asgnRef(dest: ppointer, src: pointer) {.compilerproc, inline.} =
proc asgnRef(dest: PPointer, src: pointer) {.compilerproc, inline.} =
dest[] = src
proc asgnRefNoCycle(dest: ppointer, src: pointer) {.compilerproc, inline.} =
proc asgnRefNoCycle(dest: PPointer, src: pointer) {.compilerproc, inline.} =
dest[] = src
type
@@ -180,7 +188,7 @@ when defined(boehmgc):
proc alloc0(r: var TMemRegion, size: int): pointer =
result = alloc(size)
zeroMem(result, size)
proc dealloc(r: var TMemRegion, p: Pointer) = boehmDealloc(p)
proc dealloc(r: var TMemRegion, p: pointer) = boehmDealloc(p)
proc deallocOsPages(r: var TMemRegion) {.inline.} = discard
proc deallocOsPages() {.inline.} = discard
@@ -239,11 +247,11 @@ elif defined(nogc) and defined(useMalloc):
proc nimGCref(p: pointer) {.compilerproc, inline.} = discard
proc nimGCunref(p: pointer) {.compilerproc, inline.} = discard
proc unsureAsgnRef(dest: ppointer, src: pointer) {.compilerproc, inline.} =
proc unsureAsgnRef(dest: PPointer, src: pointer) {.compilerproc, inline.} =
dest[] = src
proc asgnRef(dest: ppointer, src: pointer) {.compilerproc, inline.} =
proc asgnRef(dest: PPointer, src: pointer) {.compilerproc, inline.} =
dest[] = src
proc asgnRefNoCycle(dest: ppointer, src: pointer) {.compilerproc, inline.} =
proc asgnRefNoCycle(dest: PPointer, src: pointer) {.compilerproc, inline.} =
dest[] = src
type
@@ -292,11 +300,11 @@ elif defined(nogc):
proc nimGCref(p: pointer) {.compilerproc, inline.} = discard
proc nimGCunref(p: pointer) {.compilerproc, inline.} = discard
proc unsureAsgnRef(dest: ppointer, src: pointer) {.compilerproc, inline.} =
proc unsureAsgnRef(dest: PPointer, src: pointer) {.compilerproc, inline.} =
dest[] = src
proc asgnRef(dest: ppointer, src: pointer) {.compilerproc, inline.} =
proc asgnRef(dest: PPointer, src: pointer) {.compilerproc, inline.} =
dest[] = src
proc asgnRefNoCycle(dest: ppointer, src: pointer) {.compilerproc, inline.} =
proc asgnRefNoCycle(dest: PPointer, src: pointer) {.compilerproc, inline.} =
dest[] = src
var allocator {.rtlThreadVar.}: TMemRegion

View File

@@ -1,17 +1,17 @@
#
#
# Nimrod's Runtime Library
# Nim's Runtime Library
# (c) Copyright 2012 Andreas Rumpf
#
# See the file "copying.txt", included in this
# distribution, for details about the copyright.
#
## Thread support for Nimrod. **Note**: This is part of the system module.
## Thread support for Nim. **Note**: This is part of the system module.
## Do not import it directly. To activate thread support you need to compile
## with the ``--threads:on`` command line switch.
##
## Nimrod's memory model for threads is quite different from other common
## Nim's memory model for threads is quite different from other common
## programming languages (C, Pascal): Each thread has its own
## (garbage collected) heap and sharing of memory is restricted. This helps
## to prevent race conditions and improves efficiency. See `the manual for
@@ -19,7 +19,7 @@
##
## Example:
##
## .. code-block:: nimrod
## .. code-block:: Nim
##
## import locks
##
@@ -190,7 +190,7 @@ var globalsSlot = threadVarAlloc()
when emulatedThreadVars:
proc GetThreadLocalVars(): pointer {.compilerRtl, inl.} =
result = addr(cast[PGcThread](ThreadVarGetValue(globalsSlot)).tls)
result = addr(cast[PGcThread](threadVarGetValue(globalsSlot)).tls)
when useStackMaskHack:
proc maskStackPointer(offset: int): pointer {.compilerRtl, inl.} =
@@ -210,7 +210,7 @@ when not defined(useNimRtl):
initGC()
when emulatedThreadVars:
if NimThreadVarsSize() > sizeof(TThreadLocalStorage):
if nimThreadVarsSize() > sizeof(TThreadLocalStorage):
echo "too large thread local storage size requested"
quit 1
@@ -245,14 +245,14 @@ when not defined(useNimRtl):
# the GC can examine the stacks?
proc stopTheWord() = discard
# We jump through some hops here to ensure that Nimrod thread procs can have
# the Nimrod calling convention. This is needed because thread procs are
# We jump through some hops here to ensure that Nim thread procs can have
# the Nim calling convention. This is needed because thread procs are
# ``stdcall`` on Windows and ``noconv`` on UNIX. Alternative would be to just
# use ``stdcall`` since it is mapped to ``noconv`` on UNIX anyway.
type
TThread* {.pure, final.}[TArg] =
object of TGcThread ## Nimrod thread. A thread is a heavy object (~14K)
object of TGcThread ## Nim thread. A thread is a heavy object (~14K)
## that **must not** be part of a message! Use
## a ``TThreadId`` for that.
when TArg is void:
@@ -267,7 +267,7 @@ when not defined(boehmgc) and not hasSharedHeap:
proc deallocOsPages()
template threadProcWrapperBody(closure: expr) {.immediate.} =
when declared(globalsSlot): ThreadVarSetValue(globalsSlot, closure)
when declared(globalsSlot): threadVarSetValue(globalsSlot, closure)
var t = cast[ptr TThread[TArg]](closure)
when useStackMaskHack:
var tls: TThreadLocalStorage
@@ -305,22 +305,26 @@ proc running*[TArg](t: TThread[TArg]): bool {.inline.} =
## returns true if `t` is running.
result = t.dataFn != nil
proc joinThread*[TArg](t: TThread[TArg]) {.inline.} =
## waits for the thread `t` to finish.
when hostOS == "windows":
when hostOS == "windows":
proc joinThread*[TArg](t: TThread[TArg]) {.inline.} =
## waits for the thread `t` to finish.
discard waitForSingleObject(t.sys, -1'i32)
else:
discard pthread_join(t.sys, nil)
proc joinThreads*[TArg](t: varargs[TThread[TArg]]) =
## waits for every thread in `t` to finish.
when hostOS == "windows":
proc joinThreads*[TArg](t: varargs[TThread[TArg]]) =
## waits for every thread in `t` to finish.
var a: array[0..255, TSysThread]
sysAssert a.len >= t.len, "a.len >= t.len"
for i in 0..t.high: a[i] = t[i].sys
discard waitForMultipleObjects(t.len.int32,
discard waitForMultipleObjects(t.len.int32,
cast[ptr TSysThread](addr(a)), 1, -1)
else:
else:
proc joinThread*[TArg](t: TThread[TArg]) {.inline.} =
## waits for the thread `t` to finish.
discard pthread_join(t.sys, nil)
proc joinThreads*[TArg](t: varargs[TThread[TArg]]) =
## waits for every thread in `t` to finish.
for i in 0..t.high: joinThread(t[i])
when false:
@@ -335,22 +339,32 @@ when false:
when declared(registerThread): unregisterThread(addr(t))
t.dataFn = nil
proc createThread*[TArg](t: var TThread[TArg],
tp: proc (arg: TArg) {.thread.},
param: TArg) =
## creates a new thread `t` and starts its execution. Entry point is the
## proc `tp`. `param` is passed to `tp`. `TArg` can be ``void`` if you
## don't need to pass any data to the thread.
when TArg isnot void: t.data = param
t.dataFn = tp
when hasSharedHeap: t.stackSize = ThreadStackSize
when hostOS == "windows":
when hostOS == "windows":
proc createThread*[TArg](t: var TThread[TArg],
tp: proc (arg: TArg) {.thread.},
param: TArg) =
## creates a new thread `t` and starts its execution. Entry point is the
## proc `tp`. `param` is passed to `tp`. `TArg` can be ``void`` if you
## don't need to pass any data to the thread.
when TArg isnot void: t.data = param
t.dataFn = tp
when hasSharedHeap: t.stackSize = ThreadStackSize
var dummyThreadId: int32
t.sys = createThread(nil, ThreadStackSize, threadProcWrapper[TArg],
addr(t), 0'i32, dummyThreadId)
if t.sys <= 0:
raise newException(EResourceExhausted, "cannot create thread")
else:
else:
proc createThread*[TArg](t: var TThread[TArg],
tp: proc (arg: TArg) {.thread.},
param: TArg) =
## creates a new thread `t` and starts its execution. Entry point is the
## proc `tp`. `param` is passed to `tp`. `TArg` can be ``void`` if you
## don't need to pass any data to the thread.
when TArg isnot void: t.data = param
t.dataFn = tp
when hasSharedHeap: t.stackSize = ThreadStackSize
var a {.noinit.}: Tpthread_attr
pthread_attr_init(a)
pthread_attr_setstacksize(a, ThreadStackSize)
@@ -364,7 +378,7 @@ proc threadId*[TArg](t: var TThread[TArg]): TThreadId[TArg] {.inline.} =
proc myThreadId*[TArg](): TThreadId[TArg] =
## returns the thread ID of the thread that calls this proc. This is unsafe
## because the type ``TArg`` is not checked for consistency!
result = cast[TThreadId[TArg]](ThreadVarGetValue(globalsSlot))
result = cast[TThreadId[TArg]](threadVarGetValue(globalsSlot))
when false:
proc mainThreadId*[TArg](): TThreadId[TArg] =

View File

@@ -213,6 +213,8 @@ proc PQexecParams*(conn: PPGconn, command: cstring, nParams: int32,
paramTypes: POid, paramValues: cstringArray,
paramLengths, paramFormats: ptr int32, resultFormat: int32): PPGresult{.
cdecl, dynlib: dllName, importc: "PQexecParams".}
proc PQprepare*(conn: PPGconn, stmtName, query: cstring, nParams: int32,
paramTypes: POid): PPGresult{.cdecl, dynlib: dllName, importc: "PQprepare".}
proc PQexecPrepared*(conn: PPGconn, stmtName: cstring, nParams: int32,
paramValues: cstringArray,
paramLengths, paramFormats: ptr int32, resultFormat: int32): PPGresult{.

View File

@@ -1,5 +1,9 @@
discard """
output: '''10'''
output: '''10
true true
true false
false true
false false'''
"""
var
@@ -31,3 +35,10 @@ iterator permutations: int =
for p in permutations():
break
# regression:
proc main =
for x in [true, false]:
for y in [true, false]:
echo x, " ", y
main()

View File

@@ -6,3 +6,11 @@ type MyObj = object
proc foo*(b: any) =
var o: MyObj
echo b.baz, " ", o.x.baz, " ", b.baz()
import sets
var intset = initSet[int]()
proc func*[T](a: T) =
if a in intset: echo("true")
else: echo("false")

View File

@@ -1,7 +1,10 @@
discard """
output: '''5 5 5'''
output: '''5 5 5
false'''
"""
import mdotlookup
foo(7)
# bug #1444
func(4)

View File

@@ -21,6 +21,7 @@ type
nimrodArgs: string
gitCommit: string
quotations: TTable[string, tuple[quote, author: string]]
numProcessors: int # Set by parallelBuild:n, only works for values > 0.
TRssItem = object
year, month, day, title: string
@@ -42,6 +43,7 @@ proc initConfigData(c: var TConfigData) =
c.ticker = ""
c.vars = newStringTable(modeStyleInsensitive)
c.gitCommit = "master"
c.numProcessors = countProcessors()
# Attempts to obtain the git current commit.
let (output, code) = execCmdEx("git log -n 1 --format=%H")
if code == 0 and output.strip.len == 40:
@@ -121,6 +123,12 @@ proc parseCmdLine(c: var TConfigData) =
stdout.write(Version & "\n")
quit(0)
of "o", "output": c.outdir = val
of "parallelbuild":
try:
let num = parseInt(val)
if num != 0: c.numProcessors = num
except EInvalidValue:
quit("invalid numeric value for --parallelBuild")
of "var":
var idx = val.find('=')
if idx < 0: quit("invalid command line")
@@ -187,6 +195,12 @@ proc parseIniFile(c: var TConfigData) =
of "srcdoc": addFiles(c.srcdoc, "lib", ".nim", split(v, {';'}))
of "srcdoc2": addFiles(c.srcdoc2, "lib", ".nim", split(v, {';'}))
of "webdoc": addFiles(c.webdoc, "lib", ".nim", split(v, {';'}))
of "parallelbuild":
try:
let num = parseInt(v)
if num != 0: c.numProcessors = num
except EInvalidValue:
quit("invalid numeric value for --parallelBuild in config")
else: quit(errorStr(p, "unknown variable: " & k.key))
of "quotations":
let vSplit = v.split('-')
@@ -215,6 +229,20 @@ proc exec(cmd: string) =
echo(cmd)
if os.execShellCmd(cmd) != 0: quit("external program failed")
proc sexec(cmds: openarray[string]) =
## Serial queue wrapper around exec.
for cmd in cmds: exec(cmd)
proc mexec(cmds: openarray[string], processors: int) =
## Multiprocessor version of exec
if processors < 2:
sexec(cmds)
return
if 0 != execProcesses(cmds, {poStdErrToStdOut, poParentStreams, poEchoCmd}):
echo "external program failed, retrying serial work queue for logs!"
sexec(cmds)
proc buildDocSamples(c: var TConfigData, destPath: string) =
## Special case documentation sample proc.
##
@@ -229,18 +257,26 @@ proc buildDocSamples(c: var TConfigData, destPath: string) =
proc buildDoc(c: var TConfigData, destPath: string) =
# call nim for the documentation:
var
commands = newSeq[string](len(c.doc) + len(c.srcdoc) + len(c.srcdoc2))
i = 0
for d in items(c.doc):
exec("nimrod rst2html $# --docSeeSrcUrl:$# -o:$# --index:on $#" %
commands[i] = "nimrod rst2html $# --docSeeSrcUrl:$# -o:$# --index:on $#" %
[c.nimrodArgs, c.gitCommit,
destPath / changeFileExt(splitFile(d).name, "html"), d])
destPath / changeFileExt(splitFile(d).name, "html"), d]
i.inc
for d in items(c.srcdoc):
exec("nimrod doc $# --docSeeSrcUrl:$# -o:$# --index:on $#" %
commands[i] = "nimrod doc $# --docSeeSrcUrl:$# -o:$# --index:on $#" %
[c.nimrodArgs, c.gitCommit,
destPath / changeFileExt(splitFile(d).name, "html"), d])
destPath / changeFileExt(splitFile(d).name, "html"), d]
i.inc
for d in items(c.srcdoc2):
exec("nimrod doc2 $# --docSeeSrcUrl:$# -o:$# --index:on $#" %
commands[i] = "nimrod doc2 $# --docSeeSrcUrl:$# -o:$# --index:on $#" %
[c.nimrodArgs, c.gitCommit,
destPath / changeFileExt(splitFile(d).name, "html"), d])
destPath / changeFileExt(splitFile(d).name, "html"), d]
i.inc
mexec(commands, c.numProcessors)
exec("nimrod buildIndex -o:$1/theindex.html $1" % [destPath])
proc buildPdfDoc(c: var TConfigData, destPath: string) =
@@ -264,10 +300,12 @@ proc buildPdfDoc(c: var TConfigData, destPath: string) =
proc buildAddDoc(c: var TConfigData, destPath: string) =
# build additional documentation (without the index):
for d in items(c.webdoc):
exec("nimrod doc $# --docSeeSrcUrl:$# -o:$# $#" %
var commands = newSeq[string](c.webdoc.len)
for i, doc in pairs(c.webdoc):
commands[i] = "nimrod doc $# --docSeeSrcUrl:$# -o:$# $#" %
[c.nimrodArgs, c.gitCommit,
destPath / changeFileExt(splitFile(d).name, "html"), d])
destPath / changeFileExt(splitFile(doc).name, "html"), doc]
mexec(commands, c.numProcessors)
proc parseNewsTitles(inputFilename: string): seq[TRssItem] =
# parses the file for titles and returns them as TRssItem blocks.