resolved merge conflicts

This commit is contained in:
Araq
2016-03-29 15:30:44 +02:00
41 changed files with 1090 additions and 255 deletions

View File

@@ -672,9 +672,13 @@ proc genDeref(p: BProc, e: PNode, d: var TLoc; enforceDeref=false) =
expr(p, e.sons[0], d)
else:
var a: TLoc
initLocExprSingleUse(p, e.sons[0], a)
let typ = skipTypes(e.sons[0].typ, abstractInst)
if typ.kind == tyVar and tfVarIsPtr notin typ.flags and p.module.compileToCpp and e.sons[0].kind == nkHiddenAddr:
initLocExprSingleUse(p, e[0][0], d)
return
else:
initLocExprSingleUse(p, e.sons[0], a)
if d.k == locNone:
let typ = skipTypes(a.t, abstractInst)
# dest = *a; <-- We do not know that 'dest' is on the heap!
# It is completely wrong to set 'd.s' here, unless it's not yet
# been assigned to.
@@ -689,9 +693,9 @@ proc genDeref(p: BProc, e: PNode, d: var TLoc; enforceDeref=false) =
return
of tyPtr:
d.s = OnUnknown # BUGFIX!
else: internalError(e.info, "genDeref " & $a.t.kind)
else:
internalError(e.info, "genDeref " & $typ.kind)
elif p.module.compileToCpp:
let typ = skipTypes(a.t, abstractInst)
if typ.kind == tyVar and tfVarIsPtr notin typ.flags and
e.kind == nkHiddenDeref:
putIntoDest(p, d, e.typ, rdLoc(a), a.s)

View File

@@ -211,7 +211,6 @@ proc escapeJSString(s: string): string =
of '\e': result.add("\\e")
of '\v': result.add("\\v")
of '\\': result.add("\\\\")
of '\'': result.add("\\'")
of '\"': result.add("\\\"")
else: add(result, c)
result.add("\"")
@@ -464,6 +463,22 @@ proc arith(p: PProc, n: PNode, r: var TCompRes, op: TMagic) =
of mSubU: binaryUintExpr(p, n, r, "-")
of mMulU: binaryUintExpr(p, n, r, "*")
of mDivU: binaryUintExpr(p, n, r, "/")
of mDivI:
if p.target == targetPHP:
var x, y: TCompRes
gen(p, n.sons[1], x)
gen(p, n.sons[2], y)
r.res = "intval($1 / $2)" % [x.rdLoc, y.rdLoc]
else:
arithAux(p, n, r, op, jsOps)
of mModI:
if p.target == targetPHP:
var x, y: TCompRes
gen(p, n.sons[1], x)
gen(p, n.sons[2], y)
r.res = "($1 % $2)" % [x.rdLoc, y.rdLoc]
else:
arithAux(p, n, r, op, jsOps)
of mShrI:
var x, y: TCompRes
gen(p, n.sons[1], x)
@@ -767,10 +782,17 @@ proc generateHeader(p: PProc, typ: PType): Rope =
add(result, name)
add(result, "_Idx")
elif not (i == 1 and param.name.s == "this"):
if param.typ.skipTypes({tyGenericInst}).kind == tyVar:
let k = param.typ.skipTypes({tyGenericInst}).kind
if k in { tyVar, tyRef, tyPtr, tyPointer }:
add(result, "&")
add(result, "$")
add(result, name)
# XXX I think something like this is needed for PHP to really support
# ptr "inside" strings and seq
#if mapType(param.typ) == etyBaseIndex:
# add(result, ", $")
# add(result, name)
# add(result, "_Idx")
const
nodeKindsNeedNoCopy = {nkCharLit..nkInt64Lit, nkStrLit..nkTripleStrLit,
@@ -953,10 +975,12 @@ proc genArrayAccess(p: PProc, n: PNode, r: var TCompRes) =
if n.sons[0].kind in nkCallKinds+{nkStrLit..nkTripleStrLit}:
useMagic(p, "nimAt")
if ty.kind in {tyString, tyCString}:
# XXX this needs to be more like substr($1,$2)
r.res = "ord(nimAt($1, $2))" % [r.address, r.res]
else:
r.res = "nimAt($1, $2)" % [r.address, r.res]
elif ty.kind in {tyString, tyCString}:
# XXX this needs to be more like substr($1,$2)
r.res = "ord($1[$2])" % [r.address, r.res]
else:
r.res = "$1[$2]" % [r.address, r.res]
@@ -1963,6 +1987,7 @@ proc gen(p: PProc, n: PNode, r: var TCompRes) =
genInfixCall(p, n, r)
else:
genCall(p, n, r)
of nkClosure: gen(p, n[0], r)
of nkCurly: genSetConstr(p, n, r)
of nkBracket: genArrayConstr(p, n, r)
of nkPar: genTupleConstr(p, n, r)

View File

@@ -263,7 +263,7 @@ template eatChar(L: var TLexer, t: var TToken) =
add(t.literal, L.buf[L.bufpos])
inc(L.bufpos)
proc getNumber(L: var TLexer): TToken =
proc getNumber(L: var TLexer, result: var TToken) =
proc matchUnderscoreChars(L: var TLexer, tok: var TToken, chars: set[char]) =
var pos = L.bufpos # use registers for pos, buf
var buf = L.buf
@@ -1061,7 +1061,7 @@ proc rawGetTok*(L: var TLexer, tok: var TToken) =
getCharacter(L, tok)
tok.tokType = tkCharLit
of '0'..'9':
tok = getNumber(L)
getNumber(L, tok)
else:
if c in OpChars:
getOperator(L, tok)

View File

@@ -67,9 +67,12 @@ template semIdeForTemplateOrGeneric(c: PContext; n: PNode;
proc typeMismatch(n: PNode, formal, actual: PType) =
if formal.kind != tyError and actual.kind != tyError:
let named = typeToString(formal)
let desc = typeToString(formal, preferDesc)
let x = if named == desc: named else: named & " = " & desc
localError(n.info, errGenerated, msgKindToString(errTypeMismatch) &
typeToString(actual) & ") " &
`%`(msgKindToString(errButExpectedX), [typeToString(formal)]))
`%`(msgKindToString(errButExpectedX), [x]))
proc fitNode(c: PContext, formal: PType, arg: PNode): PNode =
if arg.typ.isNil:

View File

@@ -620,7 +620,8 @@ proc semFor(c: PContext, n: PNode): PNode =
result.kind = nkParForStmt
else:
result = semForFields(c, n, call.sons[0].sym.magic)
elif isCallExpr and call.sons[0].typ.callConv == ccClosure:
elif isCallExpr and call.sons[0].typ.callConv == ccClosure and
tfIterator in call.sons[0].typ.flags:
# first class iterator:
result = semForVars(c, n)
elif not isCallExpr or call.sons[0].kind != nkSym or

View File

@@ -831,9 +831,11 @@ proc liftParamType(c: PContext, procKind: TSymKind, genericParams: PNode,
result.rawAddSon paramType.lastSon
return addImplicitGeneric(result)
result = instGenericContainer(c, paramType.sym.info, result,
let x = instGenericContainer(c, paramType.sym.info, result,
allowMetaTypes = true)
result = newTypeWithSons(c, tyCompositeTypeClass, @[paramType, result])
result = newTypeWithSons(c, tyCompositeTypeClass, @[paramType, x])
#result = newTypeS(tyCompositeTypeClass, c)
#for i in 0..<x.len: result.rawAddSon(x.sons[i])
result = addImplicitGeneric(result)
of tyGenericInst:

View File

@@ -847,7 +847,10 @@ proc typeRel(c: var TCandidate, f, aOrig: PType, doBind = true): TTypeRelation =
inc(c.inheritancePenalty, depth)
result = isSubtype
of tyDistinct:
if a.kind == tyDistinct and sameDistinctTypes(f, a): result = isEqual
if a.kind == tyDistinct:
if sameDistinctTypes(f, a): result = isEqual
elif f.base.kind == tyAnything: result = isGeneric
elif c.coerceDistincts: result = typeRel(c, f.base, a)
elif c.coerceDistincts: result = typeRel(c, f.base, a)
of tySet:
if a.kind == tySet:
@@ -922,19 +925,7 @@ proc typeRel(c: var TCandidate, f, aOrig: PType, doBind = true): TTypeRelation =
if a.kind == tyEmpty: result = isEqual
of tyGenericInst:
let roota = a.skipGenericAlias
let rootf = f.skipGenericAlias
if a.kind == tyGenericInst and roota.base == rootf.base:
for i in 1 .. rootf.sonsLen-2:
let ff = rootf.sons[i]
let aa = roota.sons[i]
result = typeRel(c, ff, aa)
if result == isNone: return
if ff.kind == tyRange and result != isEqual: return isNone
#result = isGeneric
# XXX See bug #2220. A[int] should match A[int] better than some generic X
else:
result = typeRel(c, lastSon(f), a)
result = typeRel(c, lastSon(f), a)
of tyGenericBody:
considerPreviousT:
@@ -1035,12 +1026,20 @@ proc typeRel(c: var TCandidate, f, aOrig: PType, doBind = true): TTypeRelation =
of tyCompositeTypeClass:
considerPreviousT:
if typeRel(c, f.sons[1], a) != isNone:
put(c.bindings, f, a)
return isGeneric
let roota = a.skipGenericAlias
let rootf = f.lastSon.skipGenericAlias
if a.kind == tyGenericInst and roota.base == rootf.base:
for i in 1 .. rootf.sonsLen-2:
let ff = rootf.sons[i]
let aa = roota.sons[i]
result = typeRel(c, ff, aa)
if result == isNone: return
if ff.kind == tyRange and result != isEqual: return isNone
else:
return isNone
result = typeRel(c, rootf.lastSon, a)
if result != isNone:
put(c.bindings, f, a)
result = isGeneric
of tyGenericParam:
var x = PType(idTableGet(c.bindings, f))
if x == nil:

View File

@@ -575,7 +575,7 @@ name ``c`` should default to type ``Context``, ``n`` should default to
The ``using`` section uses the same indentation based grouping syntax as
a ``var`` or ``let``` section.
a ``var`` or ``let`` section.
If expression

View File

@@ -54,3 +54,13 @@ proc wait*(cond: var Cond, lock: var Lock) {.inline.} =
proc signal*(cond: var Cond) {.inline.} =
## sends a signal to the condition variable `cond`.
signalSysCond(cond)
template withLock*(a: Lock, body: untyped) =
## Acquires the given lock, executes the statements in body and
## releases the lock after the statements finish executing.
a.acquire()
{.locks: [a].}:
try:
body
finally:
a.release()

View File

@@ -206,6 +206,18 @@ proc htons*(x: int16): int16 =
## order, this is a no-op; otherwise, it performs a 2-byte swap operation.
result = sockets.ntohs(x)
template ntohl(x: uint32): expr =
cast[uint32](sockets.ntohl(cast[int32](x)))
template ntohs(x: uint16): expr =
cast[uint16](sockets.ntohs(cast[int16](x)))
template htonl(x: uint32): expr =
sockets.ntohl(x)
template htons(x: uint16): expr =
sockets.ntohs(x)
when defined(Posix):
proc toInt(domain: Domain): cint =
case domain
@@ -451,7 +463,7 @@ proc bindAddr*(socket: Socket, port = Port(0), address = "") {.
name.sin_family = int16(ord(AF_INET))
else:
name.sin_family = posix.AF_INET
name.sin_port = sockets.htons(int16(port))
name.sin_port = sockets.htons(uint16(port))
name.sin_addr.s_addr = sockets.htonl(INADDR_ANY)
if bindSocket(socket.fd, cast[ptr SockAddr](addr(name)),
sizeof(name).SockLen) < 0'i32:
@@ -834,7 +846,7 @@ proc connect*(socket: Socket, address: string, port = Port(0),
when false:
var s: TSockAddrIn
s.sin_addr.s_addr = inet_addr(address)
s.sin_port = sockets.htons(int16(port))
s.sin_port = sockets.htons(uint16(port))
when defined(windows):
s.sin_family = toU16(ord(af))
else:

View File

@@ -38,7 +38,7 @@
##
## .. code-block:: Nim
## import db_odbc
## let db = open("localhost", "user", "password", "dbname")
## var db = open("localhost", "user", "password", "dbname")
## db.close()
##
## Creating a table
@@ -64,7 +64,7 @@
##
## import db_odbc, math
##
## let theDb = open("localhost", "nim", "nim", "test")
## var theDb = open("localhost", "nim", "nim", "test")
##
## theDb.exec(sql"Drop table if exists myTestTbl")
## theDb.exec(sql("create table myTestTbl (" &
@@ -88,9 +88,7 @@
##
## theDb.close()
import strutils, odbcsql
import db_common
export db_common
@@ -169,11 +167,11 @@ proc dbError*(db: var DbConn) {.
properFreeResult(SQL_HANDLE_ENV, db.env)
raise e
proc SqlCheck(db: var DbConn, resVal: TSqlSmallInt) {.raises: [DbError]} =
## Wrapper that checks if ``resVal`` is not SQL_SUCCESS and if so, raises [EDb]
if resVal != SQL_SUCCESS: dbError(db)
proc sqlCheck(db: var DbConn, resVal: TSqlSmallInt) {.raises: [DbError]} =
## Wrapper that raises [EDb] if ``resVal`` is neither SQL_SUCCESS or SQL_NO_DATA
if resVal notIn [SQL_SUCCESS, SQL_NO_DATA]: dbError(db)
proc SqlGetDBMS(db: var DbConn): string {.
proc sqlGetDBMS(db: var DbConn): string {.
tags: [ReadDbEffect, WriteDbEffect], raises: [] .} =
## Returns the ODBC SQL_DBMS_NAME string
const
@@ -182,7 +180,7 @@ proc SqlGetDBMS(db: var DbConn): string {.
sz: TSqlSmallInt = 0
buf[0] = '\0'
try:
db.SqlCheck(SQLGetInfo(db.hDb, SQL_DBMS_NAME, cast[SqlPointer](buf.addr),
db.sqlCheck(SQLGetInfo(db.hDb, SQL_DBMS_NAME, cast[SqlPointer](buf.addr),
4095.TSqlSmallInt, sz.addr))
except: discard
return $buf.cstring
@@ -212,7 +210,7 @@ proc dbFormat(formatstr: SqlQuery, args: varargs[string]): string {.
add(result, c)
proc prepareFetch(db: var DbConn, query: SqlQuery,
args: varargs[string, `$`]) {.
args: varargs[string, `$`]) : TSqlSmallInt {.
tags: [ReadDbEffect, WriteDbEffect], raises: [DbError].} =
# Prepare a statement, execute it and fetch the data to the driver
# ready for retrieval of the data
@@ -220,11 +218,13 @@ proc prepareFetch(db: var DbConn, query: SqlQuery,
# requires calling
# properFreeResult(SQL_HANDLE_STMT, db.stmt)
# when finished
db.SqlCheck(SQLAllocHandle(SQL_HANDLE_STMT, db.hDb, db.stmt))
db.sqlCheck(SQLAllocHandle(SQL_HANDLE_STMT, db.hDb, db.stmt))
var q = dbFormat(query, args)
db.SqlCheck(SQLPrepare(db.stmt, q.PSQLCHAR, q.len.TSqlSmallInt))
db.SqlCheck(SQLExecute(db.stmt))
db.SqlCheck(SQLFetch(db.stmt))
db.sqlCheck(SQLPrepare(db.stmt, q.PSQLCHAR, q.len.TSqlSmallInt))
db.sqlCheck(SQLExecute(db.stmt))
var retcode = SQLFetch(db.stmt)
db.sqlCheck(retcode)
result=retcode
proc prepareFetchDirect(db: var DbConn, query: SqlQuery,
args: varargs[string, `$`]) {.
@@ -235,10 +235,10 @@ proc prepareFetchDirect(db: var DbConn, query: SqlQuery,
# requires calling
# properFreeResult(SQL_HANDLE_STMT, db.stmt)
# when finished
db.SqlCheck(SQLAllocHandle(SQL_HANDLE_STMT, db.hDb, db.stmt))
db.sqlCheck(SQLAllocHandle(SQL_HANDLE_STMT, db.hDb, db.stmt))
var q = dbFormat(query, args)
db.SqlCheck(SQLExecDirect(db.stmt, q.PSQLCHAR, q.len.TSqlSmallInt))
db.SqlCheck(SQLFetch(db.stmt))
db.sqlCheck(SQLExecDirect(db.stmt, q.PSQLCHAR, q.len.TSqlSmallInt))
db.sqlCheck(SQLFetch(db.stmt))
proc tryExec*(db: var DbConn, query: SqlQuery, args: varargs[string, `$`]): bool {.
tags: [ReadDbEffect, WriteDbEffect], raises: [].} =
@@ -285,20 +285,30 @@ iterator fastRows*(db: var DbConn, query: SqlQuery,
rowRes: Row
sz: TSqlSmallInt = 0
cCnt: TSqlSmallInt = 0.TSqlSmallInt
rCnt = -1
db.prepareFetch(query, args)
db.SqlCheck(SQLNumResultCols(db.stmt, cCnt))
db.SqlCheck(SQLRowCount(db.stmt, rCnt))
rowRes = newRow(cCnt)
for rNr in 1..rCnt:
for colId in 1..cCnt:
buf[0] = '\0'
db.SqlCheck(SQLGetData(db.stmt, colId.SqlUSmallInt, SQL_C_CHAR,
cast[cstring](buf.addr), 4095.TSqlSmallInt, sz.addr))
rowRes[colId-1] = $buf.cstring
db.SqlCheck(SQLFetchScroll(db.stmt, SQL_FETCH_NEXT, 1))
yield rowRes
res: TSqlSmallInt = 0.TSqlSmallInt
tempcCnt:TSqlSmallInt # temporary cCnt,Fix the field values to be null when the release schema is compiled.
# tempcCnt,A field to store the number of temporary variables, for unknown reasons,
# after performing a sqlgetdata function and circulating variables cCnt value will be changed to 0,
# so the values of the temporary variable to store the cCnt.
# After every cycle and specified to cCnt. To ensure the traversal of all fields.
res = db.prepareFetch(query, args)
if res == SQL_NO_DATA:
discard
elif res == SQL_SUCCESS:
res = SQLNumResultCols(db.stmt, cCnt)
rowRes = newRow(cCnt)
rowRes.setLen(max(cCnt,0))
tempcCnt = cCnt
while res == SQL_SUCCESS:
for colId in 1..cCnt:
buf[0] = '\0'
db.sqlCheck(SQLGetData(db.stmt, colId.SqlUSmallInt, SQL_C_CHAR,
cast[cstring](buf.addr), 4095.TSqlSmallInt, sz.addr))
rowRes[colId-1] = $buf.cstring
cCnt = tempcCnt
yield rowRes
res = SQLFetch(db.stmt)
db.sqlCheck(res)
properFreeResult(SQL_HANDLE_STMT, db.stmt)
iterator instantRows*(db: var DbConn, query: SqlQuery,
@@ -310,19 +320,30 @@ iterator instantRows*(db: var DbConn, query: SqlQuery,
rowRes: Row
sz: TSqlSmallInt = 0
cCnt: TSqlSmallInt = 0.TSqlSmallInt
rCnt = -1
db.prepareFetch(query, args)
db.SqlCheck(SQLNumResultCols(db.stmt, cCnt))
db.SqlCheck(SQLRowCount(db.stmt, rCnt))
rowRes = newRow(cCnt)
for rNr in 1..rCnt:
for colId in 1..cCnt:
buf[0] = '\0'
db.SqlCheck(SQLGetData(db.stmt, colId.SqlUSmallInt, SQL_C_CHAR,
cast[cstring](buf.addr), 4095.TSqlSmallInt, sz.addr))
rowRes[colId-1] = $buf.cstring
db.SqlCheck(SQLFetchScroll(db.stmt, SQL_FETCH_NEXT, 1))
yield (row: rowRes, len: cCnt.int)
res: TSqlSmallInt = 0.TSqlSmallInt
tempcCnt:TSqlSmallInt # temporary cCnt,Fix the field values to be null when the release schema is compiled.
# tempcCnt,A field to store the number of temporary variables, for unknown reasons,
# after performing a sqlgetdata function and circulating variables cCnt value will be changed to 0,
# so the values of the temporary variable to store the cCnt.
# After every cycle and specified to cCnt. To ensure the traversal of all fields.
res = db.prepareFetch(query, args)
if res == SQL_NO_DATA:
discard
elif res == SQL_SUCCESS:
res = SQLNumResultCols(db.stmt, cCnt)
rowRes = newRow(cCnt)
rowRes.setLen(max(cCnt,0))
tempcCnt = cCnt
while res == SQL_SUCCESS:
for colId in 1..cCnt:
buf[0] = '\0'
db.sqlCheck(SQLGetData(db.stmt, colId.SqlUSmallInt, SQL_C_CHAR,
cast[cstring](buf.addr), 4095.TSqlSmallInt, sz.addr))
rowRes[colId-1] = $buf.cstring
cCnt = tempcCnt
yield (row: rowRes, len: cCnt.int)
res = SQLFetch(db.stmt)
db.sqlCheck(res)
properFreeResult(SQL_HANDLE_STMT, db.stmt)
proc `[]`*(row: InstantRow, col: int): string {.inline.} =
@@ -339,43 +360,68 @@ proc getRow*(db: var DbConn, query: SqlQuery,
## Retrieves a single row. If the query doesn't return any rows, this proc
## will return a Row with empty strings for each column.
var
rowRes: Row
sz: TSqlSmallInt = 0.TSqlSmallInt
cCnt: TSqlSmallInt = 0.TSqlSmallInt
rCnt = -1
result = @[]
db.prepareFetch(query, args)
db.SqlCheck(SQLNumResultCols(db.stmt, cCnt))
db.SqlCheck(SQLRowCount(db.stmt, rCnt))
for colId in 1..cCnt:
db.SqlCheck(SQLGetData(db.stmt, colId.SqlUSmallInt, SQL_C_CHAR,
cast[cstring](buf.addr), 4095.TSqlSmallInt, sz.addr))
result.add($buf.cstring)
db.SqlCheck(SQLFetchScroll(db.stmt, SQL_FETCH_NEXT, 1))
res: TSqlSmallInt = 0.TSqlSmallInt
tempcCnt:TSqlSmallInt # temporary cCnt,Fix the field values to be null when the release schema is compiled.
## tempcCnt,A field to store the number of temporary variables, for unknown reasons,
## after performing a sqlgetdata function and circulating variables cCnt value will be changed to 0,
## so the values of the temporary variable to store the cCnt.
## After every cycle and specified to cCnt. To ensure the traversal of all fields.
res = db.prepareFetch(query, args)
if res == SQL_NO_DATA:
result = @[]
elif res == SQL_SUCCESS:
res = SQLNumResultCols(db.stmt, cCnt)
rowRes = newRow(cCnt)
rowRes.setLen(max(cCnt,0))
tempcCnt = cCnt
for colId in 1..cCnt:
buf[0] = '\0'
db.sqlCheck(SQLGetData(db.stmt, colId.SqlUSmallInt, SQL_C_CHAR,
cast[cstring](buf.addr), 4095.TSqlSmallInt, sz.addr))
rowRes[colId-1] = $buf.cstring
cCnt = tempcCnt
res = SQLFetch(db.stmt)
result = rowRes
db.sqlCheck(res)
properFreeResult(SQL_HANDLE_STMT, db.stmt)
proc getAllRows*(db: var DbConn, query: SqlQuery,
args: varargs[string, `$`]): seq[Row] {.
tags: [ReadDbEffect, WriteDbEffect], raises: [DbError].} =
tags: [ReadDbEffect, WriteDbEffect], raises: [DbError] .} =
## Executes the query and returns the whole result dataset.
var
rows: seq[Row] = @[]
rowRes: Row
sz: TSqlSmallInt = 0
cCnt: TSqlSmallInt = 0.TSqlSmallInt
rCnt = -1
db.prepareFetch(query, args)
db.SqlCheck(SQLNumResultCols(db.stmt, cCnt))
db.SqlCheck(SQLRowCount(db.stmt, rCnt))
result = @[]
for rNr in 1..rCnt:
rowRes = @[]
buf[0] = '\0'
for colId in 1..cCnt:
db.SqlCheck(SQLGetData(db.stmt, colId.SqlUSmallInt, SQL_C_CHAR,
cast[SqlPointer](buf.addr), 4095.TSqlSmallInt, sz.addr))
rowRes.add($buf.cstring)
db.SqlCheck(SQLFetchScroll(db.stmt, SQL_FETCH_NEXT, 1))
result.add(rowRes)
res: TSqlSmallInt = 0.TSqlSmallInt
tempcCnt:TSqlSmallInt # temporary cCnt,Fix the field values to be null when the release schema is compiled.
## tempcCnt,A field to store the number of temporary variables, for unknown reasons,
## after performing a sqlgetdata function and circulating variables cCnt value will be changed to 0,
## so the values of the temporary variable to store the cCnt.
## After every cycle and specified to cCnt. To ensure the traversal of all fields.
res = db.prepareFetch(query, args)
if res == SQL_NO_DATA:
result = @[]
elif res == SQL_SUCCESS:
res = SQLNumResultCols(db.stmt, cCnt)
rowRes = newRow(cCnt)
rowRes.setLen(max(cCnt,0))
tempcCnt = cCnt
while res == SQL_SUCCESS:
for colId in 1..cCnt:
buf[0] = '\0'
db.sqlCheck(SQLGetData(db.stmt, colId.SqlUSmallInt, SQL_C_CHAR,
cast[cstring](buf.addr), 4095.TSqlSmallInt, sz.addr))
rowRes[colId-1] = $buf.cstring
cCnt = tempcCnt
rows.add(rowRes)
res = SQLFetch(db.stmt)
result = rows
db.sqlCheck(res)
properFreeResult(SQL_HANDLE_STMT, db.stmt)
iterator rows*(db: var DbConn, query: SqlQuery,
@@ -407,10 +453,9 @@ proc tryInsertId*(db: var DbConn, query: SqlQuery,
if not tryExec(db, query, args):
result = -1'i64
else:
echo "DBMS: ",SqlGetDBMS(db).toLower()
result = -1'i64
try:
case SqlGetDBMS(db).toLower():
case sqlGetDBMS(db).toLower():
of "postgresql":
result = getValue(db, sql"SELECT LASTVAL();", []).parseInt
of "mysql":
@@ -438,15 +483,12 @@ proc execAffectedRows*(db: var DbConn, query: SqlQuery,
## Runs the query (typically "UPDATE") and returns the
## number of affected rows
result = -1
var res = SQLAllocHandle(SQL_HANDLE_STMT, db.hDb, db.stmt.SqlHandle)
if res != SQL_SUCCESS: dbError(db)
db.sqlCheck(SQLAllocHandle(SQL_HANDLE_STMT, db.hDb, db.stmt.SqlHandle))
var q = dbFormat(query, args)
res = SQLPrepare(db.stmt, q.PSQLCHAR, q.len.TSqlSmallInt)
if res != SQL_SUCCESS: dbError(db)
db.sqlCheck(SQLPrepare(db.stmt, q.PSQLCHAR, q.len.TSqlSmallInt))
rawExec(db, query, args)
var rCnt = -1
result = SQLRowCount(db.hDb, rCnt)
if res != SQL_SUCCESS: dbError(db)
db.sqlCheck(SQLRowCount(db.hDb, rCnt))
properFreeResult(SQL_HANDLE_STMT, db.stmt)
result = rCnt
@@ -501,5 +543,5 @@ proc setEncoding*(connection: DbConn, encoding: string): bool {.
##
## Sets the encoding of a database connection, returns true for
## success, false for failure.
#result = set_character_set(connection, encoding) == 0
dbError("setEncoding() is currently not implemented by the db_odbc module")
##result = set_character_set(connection, encoding) == 0
dbError("setEncoding() is currently not implemented by the db_odbc module")

View File

@@ -108,6 +108,8 @@ __clang__
defined __SUNPRO_C || \
defined __xlC__
# define NIM_THREADVAR __thread
#elif defined __TINYC__
# define NIM_THREADVAR
#else
# error "Cannot define NIM_THREADVAR"
#endif

View File

@@ -1,7 +1,7 @@
#
#
# Nim's Runtime Library
# (c) Copyright 2015 Adam Strzelecki
# (c) Copyright 2016 Eugene Kabanov
#
# See the file "copying.txt", included in this
# distribution, for details about the copyright.
@@ -11,20 +11,35 @@
from posix import Timespec
# Filters:
const
EVFILT_READ* = -1
EVFILT_WRITE* = -2
EVFILT_AIO* = -3
EVFILT_VNODE* = -4
EVFILT_PROC* = -5
EVFILT_SIGNAL* = -6
EVFILT_TIMER* = -7
EVFILT_MACHPORT* = -8
EVFILT_FS* = -9
EVFILT_USER* = -10
# -11 is unused
EVFILT_VM* = -12
when defined(macosx) or defined(freebsd) or defined(openbsd):
const
EVFILT_READ* = -1
EVFILT_WRITE* = -2
EVFILT_AIO* = -3 ## attached to aio requests
EVFILT_VNODE* = -4 ## attached to vnodes
EVFILT_PROC* = -5 ## attached to struct proc
EVFILT_SIGNAL* = -6 ## attached to struct proc
EVFILT_TIMER* = -7 ## timers
elif defined(netbsd):
const
EVFILT_READ* = 0
EVFILT_WRITE* = 1
EVFILT_AIO* = 2 ## attached to aio requests
EVFILT_VNODE* = 3 ## attached to vnodes
EVFILT_PROC* = 4 ## attached to struct proc
EVFILT_SIGNAL* = 5 ## attached to struct proc
EVFILT_TIMER* = 6 ## timers (in ms)
when defined(macosx):
const
EVFILT_MACHPORT* = -8 ## Mach portsets
EVFILT_FS* = -9 ## filesystem events
EVFILT_USER* = -10 ## user events
EVFILT_VM = -12 ## virtual memory events
elif defined(freebsd):
const
EVFILT_FS* = -9 ## filesystem events
EVFILT_LIO* = -10 ## attached to lio requests
EVFILT_USER* = -11 ## user events
# Actions:
const
@@ -40,21 +55,92 @@ const
EV_CLEAR* = 0x0020 ## Clear event state after reporting.
EV_RECEIPT* = 0x0040 ## Force EV_ERROR on success, data == 0
EV_DISPATCH* = 0x0080 ## Disable event after reporting.
EV_SYSFLAGS* = 0xF000 ## Reserved by system
EV_DROP* = 0x1000 ## Not should be dropped
EV_FLAG1* = 0x2000 ## Filter-specific flag
# Return values:
const
EV_EOF* = 0x8000 ## EOF detected
EV_ERROR* = 0x4000 ## Error, data contains errno
when defined(macosx) or defined(freebsd):
# EVFILT_USER is not supported by OpenBSD and NetBSD
#
# data/hint flags/masks for EVFILT_USER, shared with userspace
#
# On input, the top two bits of fflags specifies how the lower twenty four
# bits should be applied to the stored value of fflags.
#
# On output, the top two bits will always be set to NOTE_FFNOP and the
# remaining twenty four bits will contain the stored fflags value.
const
NOTE_FFNOP* = 0x00000000'u32 ## ignore input fflags
NOTE_FFAND* = 0x40000000'u32 ## AND fflags
NOTE_FFOR* = 0x80000000'u32 ## OR fflags
NOTE_FFCOPY* = 0xc0000000'u32 ## copy fflags
NOTE_FFCTRLMASK* = 0xc0000000'u32 ## masks for operations
NOTE_FFLAGSMASK* = 0x00ffffff'u32
NOTE_TRIGGER* = 0x01000000'u32 ## Cause the event to be triggered
## for output.
# data/hint flags for EVFILT_{READ|WRITE}, shared with userspace
const
NOTE_LOWAT* = 0x0001 ## low water mark
# data/hint flags for EVFILT_VNODE, shared with userspace
const
NOTE_DELETE* = 0x0001 ## vnode was removed
NOTE_WRITE* = 0x0002 ## data contents changed
NOTE_EXTEND* = 0x0004 ## size increased
NOTE_ATTRIB* = 0x0008 ## attributes changed
NOTE_LINK* = 0x0010 ## link count changed
NOTE_RENAME* = 0x0020 ## vnode was renamed
NOTE_REVOKE* = 0x0040 ## vnode access was revoked
# data/hint flags for EVFILT_PROC, shared with userspace
const
NOTE_EXIT* = 0x80000000'u32 ## process exited
NOTE_FORK* = 0x40000000'u32 ## process forked
NOTE_EXEC* = 0x20000000'u32 ## process exec'd
NOTE_PCTRLMASK* = 0xf0000000'u32 ## mask for hint bits
NOTE_PDATAMASK* = 0x000fffff'u32 ## mask for pid
# additional flags for EVFILT_PROC
const
NOTE_TRACK* = 0x00000001'u32 ## follow across forks
NOTE_TRACKERR* = 0x00000002'u32 ## could not track child
NOTE_CHILD* = 0x00000004'u32 ## am a child process
when defined(macosx) or defined(freebsd):
# additional flags for EVFILE_TIMER
const
NOTE_SECONDS* = 0x00000001'u32 ## data is seconds
NOTE_MSECONDS* = 0x00000002'u32 ## data is milliseconds
NOTE_USECONDS* = 0x00000004'u32 ## data is microseconds
NOTE_NSECONDS* = 0x00000008'u32 ## data is nanoseconds
else:
# NetBSD and OpenBSD doesnt support NOTE_{TIME} constants, but
# support EVFILT_TIMER with granularity of milliseconds.
const
NOTE_MSECONDS* = 0x00000000'u32
type
## This define not fully satisfy NetBSD "struct kevent"
## but it works and tested.
KEvent* {.importc: "struct kevent",
header: "<sys/event.h>", pure, final.} = object
ident*: cuint ## identifier for this event (uintptr_t)
filter*: cshort ## filter for event
flags*: cushort ## general flags
fflags*: cuint ## filter-specific flags
data*: cuint ## filter-specific data (intptr_t)
#udata*: ptr void ## opaque user data identifier
header: """#include <sys/types.h>
#include <sys/event.h>
#include <sys/time.h>""", pure, final.} = object
ident* : uint ## identifier for this event (uintptr_t)
filter* : cshort ## filter for event
flags* : cushort ## general flags
fflags* : cuint ## filter-specific flags
data* : int ## filter-specific data (intptr_t)
udata* : pointer ## opaque user data identifier
proc kqueue*(): cint {.importc: "kqueue", header: "<sys/event.h>".}
## Creates new queue and returns its descriptor.
@@ -65,7 +151,7 @@ proc kevent*(kqFD: cint,
{.importc: "kevent", header: "<sys/event.h>".}
## Manipulates queue for given ``kqFD`` descriptor.
proc EV_SET*(event: ptr KEvent, ident: cuint, filter: cshort, flags: cushort,
fflags: cuint, data: cuint, udata: ptr void)
proc EV_SET*(event: ptr KEvent, ident: uint, filter: cshort, flags: cushort,
fflags: cuint, data: int, udata: pointer)
{.importc: "EV_SET", header: "<sys/event.h>".}
## Fills event with given data.

View File

@@ -486,11 +486,11 @@ type
l_onoff*: cint ## Indicates whether linger option is enabled.
l_linger*: cint ## Linger time, in seconds.
InPort* = int16 ## unsigned!
InAddrScalar* = int32 ## unsigned!
InPort* = uint16
InAddrScalar* = uint32
InAddrT* {.importc: "in_addr_t", pure, final,
header: "<netinet/in.h>".} = int32 ## unsigned!
header: "<netinet/in.h>".} = uint32
InAddr* {.importc: "struct in_addr", pure, final,
header: "<netinet/in.h>".} = object ## struct in_addr

View File

@@ -508,13 +508,39 @@ template foldl*(sequence, operation: expr): expr =
## assert subtraction == -15, "Subtraction is (((5)-9)-11)"
## assert multiplication == 495, "Multiplication is (((5)*9)*11)"
## assert concatenation == "nimiscool"
assert sequence.len > 0, "Can't fold empty sequences"
var result {.gensym.}: type(sequence[0])
result = sequence[0]
for i in 1..<sequence.len:
let s = sequence
assert s.len > 0, "Can't fold empty sequences"
var result {.gensym.}: type(s[0])
result = s[0]
for i in 1..<s.len:
let
a {.inject.} = result
b {.inject.} = sequence[i]
b {.inject.} = s[i]
result = operation
result
template foldl*(sequence, operation: expr, first): expr =
## Template to fold a sequence from left to right, returning the accumulation.
##
## This version of ``foldl`` gets a starting parameter. This makes it possible
## to accumulate the sequence into a different type than the sequence elements.
##
## The ``operation`` parameter should be an expression which uses the variables
## ``a`` and ``b`` for each step of the fold. The ``first`` parameter is the
## start value (the first ``a``) and therefor defines the type of the result.
## Example:
##
## .. code-block::
## let
## numbers = @[0, 8, 1, 5]
## digits = foldl(numbers, a & (chr(b + ord('0'))), "")
## assert digits == "0815"
var result {.gensym.}: type(first)
result = first
for x in items(sequence):
let
a {.inject.} = result
b {.inject.} = x
result = operation
result
@@ -544,12 +570,13 @@ template foldr*(sequence, operation: expr): expr =
## assert subtraction == 7, "Subtraction is (5-(9-(11)))"
## assert multiplication == 495, "Multiplication is (5*(9*(11)))"
## assert concatenation == "nimiscool"
assert sequence.len > 0, "Can't fold empty sequences"
var result {.gensym.}: type(sequence[0])
result = sequence[sequence.len - 1]
for i in countdown(sequence.len - 2, 0):
let s = sequence
assert s.len > 0, "Can't fold empty sequences"
var result {.gensym.}: type(s[0])
result = sequence[s.len - 1]
for i in countdown(s.len - 2, 0):
let
a {.inject.} = sequence[i]
a {.inject.} = s[i]
b {.inject.} = result
result = operation
result

View File

@@ -0,0 +1,95 @@
#
#
# Nim's Runtime Library
# (c) Copyright 2015 Andreas Rumpf
#
# See the file "copying.txt", included in this
# distribution, for details about the copyright.
#
## Shared list support.
{.push stackTrace:off.}
import
locks
const
ElemsPerNode = 100
type
SharedListNode[A] = ptr object
next: SharedListNode[A]
dataLen: int
d: array[ElemsPerNode, A]
SharedList*[A] = object ## generic shared list
head, tail: SharedListNode[A]
lock*: Lock
template withLock(t, x: untyped) =
acquire(t.lock)
x
release(t.lock)
proc iterAndMutate*[A](x: var SharedList[A]; action: proc(x: A): bool) =
## iterates over the list. If 'action' returns true, the
## current item is removed from the list.
withLock(x):
var n = x.head
while n != nil:
var i = 0
while i < n.dataLen:
# action can add new items at the end, so release the lock:
release(x.lock)
if action(n.d[i]):
acquire(x.lock)
let t = x.tail
n.d[i] = t.d[t.dataLen]
dec t.dataLen
else:
acquire(x.lock)
inc i
n = n.next
iterator items*[A](x: var SharedList[A]): A =
withLock(x):
var it = x.head
while it != nil:
for i in 0..it.dataLen-1:
yield it.d[i]
it = it.next
proc add*[A](x: var SharedList[A]; y: A) =
withLock(x):
var node: SharedListNode[A]
if x.tail == nil or x.tail.dataLen == ElemsPerNode:
node = cast[type node](allocShared0(sizeof(node[])))
node.next = x.tail
x.tail = node
if x.head == nil: x.head = node
else:
node = x.tail
node.d[node.dataLen] = y
inc(node.dataLen)
proc initSharedList*[A](): SharedList[A] =
initLock result.lock
result.head = nil
result.tail = nil
proc clear*[A](t: var SharedList[A]) =
withLock(t):
var it = t.head
while it != nil:
let nxt = it.next
deallocShared(it)
it = nxt
t.head = nil
t.tail = nil
proc deinitSharedList*[A](t: var SharedList[A]) =
clear(t)
deinitLock t.lock
{.pop.}

View File

@@ -129,4 +129,7 @@ template delImpl() {.dirty, immediate.} =
r = t.data[i].hcode and msk # "home" location of key@i
if not ((i >= r and r > j) or (r > j and j > i) or (j > i and i >= r)):
break
shallowCopy(t.data[j], t.data[i]) # data[j] will be marked EMPTY next loop
when defined(js):
t.data[j] = t.data[i]
else:
shallowCopy(t.data[j], t.data[i]) # data[j] will be marked EMPTY next loop

View File

@@ -92,6 +92,10 @@ type
{.deprecated: [TLevel: Level, PLogger: Logger, PConsoleLogger: ConsoleLogger,
PFileLogger: FileLogger, PRollingFileLogger: RollingFileLogger].}
var
level {.threadvar.}: Level ## global log filter
handlers {.threadvar.}: seq[Logger] ## handlers with their own log levels
proc substituteLog*(frmt: string, level: Level, args: varargs[string, `$`]): string =
## Format a log message using the ``frmt`` format string, ``level`` and varargs.
## See the module documentation for the format string syntax.
@@ -133,13 +137,13 @@ method log*(logger: Logger, level: Level, args: varargs[string, `$`]) {.
method log*(logger: ConsoleLogger, level: Level, args: varargs[string, `$`]) =
## Logs to the console using ``logger`` only.
if level >= logger.levelThreshold:
if level >= logging.level and level >= logger.levelThreshold:
writeLine(stdout, substituteLog(logger.fmtStr, level, args))
if level in {lvlError, lvlFatal}: flushFile(stdout)
method log*(logger: FileLogger, level: Level, args: varargs[string, `$`]) =
## Logs to a file using ``logger`` only.
if level >= logger.levelThreshold:
if level >= logging.level and level >= logger.levelThreshold:
writeLine(logger.file, substituteLog(logger.fmtStr, level, args))
if level in {lvlError, lvlFatal}: flushFile(logger.file)
@@ -224,7 +228,7 @@ proc rotate(logger: RollingFileLogger) =
method log*(logger: RollingFileLogger, level: Level, args: varargs[string, `$`]) =
## Logs to a file using rolling ``logger`` only.
if level >= logger.levelThreshold:
if level >= logging.level and level >= logger.levelThreshold:
if logger.curLine >= logger.maxLines:
logger.file.close()
rotate(logger)
@@ -238,9 +242,6 @@ method log*(logger: RollingFileLogger, level: Level, args: varargs[string, `$`])
# --------
var level {.threadvar.}: Level ## global log filter
var handlers {.threadvar.}: seq[Logger] ## handlers with their own log levels
proc logLoop(level: Level, args: varargs[string, `$`]) =
for logger in items(handlers):
if level >= logger.levelThreshold:

View File

@@ -44,6 +44,7 @@ when not defined(js) and not defined(nimscript):
const
PI* = 3.1415926535897932384626433 ## the circle constant PI (Ludolph's number)
TAU* = 2.0 * PI ## the circle constant TAU (= 2 * PI)
E* = 2.71828182845904523536028747 ## Euler's number
MaxFloat64Precision* = 16 ## maximum number of meaningful digits

View File

@@ -219,31 +219,67 @@ proc getAddrInfo*(address: string, port: Port, domain: Domain = AF_INET,
proc dealloc*(ai: ptr AddrInfo) =
freeaddrinfo(ai)
proc ntohl*(x: int32): int32 =
## Converts 32-bit integers from network to host byte order.
proc ntohl*(x: uint32): uint32 =
## Converts 32-bit unsigned integers from network to host byte order.
## On machines where the host byte order is the same as network byte order,
## this is a no-op; otherwise, it performs a 4-byte swap operation.
when cpuEndian == bigEndian: result = x
else: result = (x shr 24'i32) or
(x shr 8'i32 and 0xff00'i32) or
(x shl 8'i32 and 0xff0000'i32) or
(x shl 24'i32)
else: result = (x shr 24'u32) or
(x shr 8'u32 and 0xff00'u32) or
(x shl 8'u32 and 0xff0000'u32) or
(x shl 24'u32)
proc ntohs*(x: int16): int16 =
## Converts 16-bit integers from network to host byte order. On machines
## where the host byte order is the same as network byte order, this is
## a no-op; otherwise, it performs a 2-byte swap operation.
template ntohl*(x: int32): expr {.deprecated.} =
## Converts 32-bit integers from network to host byte order.
## On machines where the host byte order is the same as network byte order,
## this is a no-op; otherwise, it performs a 4-byte swap operation.
## **Warning**: This template is deprecated since 0.14.0, IPv4
## addresses are now treated as unsigned integers. Please use the unsigned
## version of this template.
cast[int32](ntohl(cast[uint32](x)))
proc ntohs*(x: uint16): uint16 =
## Converts 16-bit unsigned integers from network to host byte order. On
## machines where the host byte order is the same as network byte order,
## this is a no-op; otherwise, it performs a 2-byte swap operation.
when cpuEndian == bigEndian: result = x
else: result = (x shr 8'i16) or (x shl 8'i16)
else: result = (x shr 8'u16) or (x shl 8'u16)
template htonl*(x: int32): expr =
template ntohs*(x: int16): expr {.deprecated.} =
## Converts 16-bit integers from network to host byte order. On
## machines where the host byte order is the same as network byte order,
## this is a no-op; otherwise, it performs a 2-byte swap operation.
## **Warning**: This template is deprecated since 0.14.0, where port
## numbers became unsigned integers. Please use the unsigned version of
## this template.
cast[int16](ntohs(cast[uint16](x)))
template htonl*(x: int32): expr {.deprecated.} =
## Converts 32-bit integers from host to network byte order. On machines
## where the host byte order is the same as network byte order, this is
## a no-op; otherwise, it performs a 4-byte swap operation.
## **Warning**: This template is deprecated since 0.14.0, IPv4
## addresses are now treated as unsigned integers. Please use the unsigned
## version of this template.
nativesockets.ntohl(x)
template htons*(x: int16): expr =
## Converts 16-bit positive integers from host to network byte order.
template htonl*(x: uint32): expr =
## Converts 32-bit unsigned integers from host to network byte order. On
## machines where the host byte order is the same as network byte order,
## this is a no-op; otherwise, it performs a 4-byte swap operation.
nativesockets.ntohl(x)
template htons*(x: int16): expr {.deprecated.} =
## Converts 16-bit integers from host to network byte order.
## On machines where the host byte order is the same as network byte
## order, this is a no-op; otherwise, it performs a 2-byte swap operation.
## **Warning**: This template is deprecated since 0.14.0, where port
## numbers became unsigned integers. Please use the unsigned version of
## this template.
nativesockets.ntohs(x)
template htons*(x: uint16): expr =
## Converts 16-bit unsigned integers from host to network byte order.
## On machines where the host byte order is the same as network byte
## order, this is a no-op; otherwise, it performs a 2-byte swap operation.
nativesockets.ntohs(x)

View File

@@ -368,7 +368,7 @@ proc bindAddr*(socket: Socket, port = Port(0), address = "") {.
name.sin_family = toInt(AF_INET).int16
else:
name.sin_family = toInt(AF_INET)
name.sin_port = htons(int16(port))
name.sin_port = htons(port.uint16)
name.sin_addr.s_addr = htonl(INADDR_ANY)
if bindAddr(socket.fd, cast[ptr SockAddr](addr(name)),
sizeof(name).SockLen) < 0'i32:

View File

@@ -1350,15 +1350,12 @@ proc getAppFilename*(): string {.rtl, extern: "nos$1", tags: [ReadIOEffect].} =
## Returns the filename of the application's executable.
##
## This procedure will resolve symlinks.
##
## **Note**: This does not work reliably on BSD.
# Linux: /proc/<pid>/exe
# Solaris:
# /proc/<pid>/object/a.out (filename only)
# /proc/<pid>/path/a.out (complete pathname)
# *BSD (and maybe Darwin too):
# /proc/<pid>/file
# FreeBSD: /proc/<pid>/file
when defined(windows):
when useWinUnicode:
var buf = newWideCString("", 256)
@@ -1368,15 +1365,6 @@ proc getAppFilename*(): string {.rtl, extern: "nos$1", tags: [ReadIOEffect].} =
result = newString(256)
var len = getModuleFileNameA(0, result, 256)
setlen(result, int(len))
elif defined(linux) or defined(aix):
result = getApplAux("/proc/self/exe")
if result.len == 0: result = getApplHeuristic()
elif defined(solaris):
result = getApplAux("/proc/" & $getpid() & "/path/a.out")
if result.len == 0: result = getApplHeuristic()
elif defined(freebsd):
result = getApplAux("/proc/" & $getpid() & "/file")
if result.len == 0: result = getApplHeuristic()
elif defined(macosx):
var size: cuint32
getExecPath1(nil, size)
@@ -1386,9 +1374,15 @@ proc getAppFilename*(): string {.rtl, extern: "nos$1", tags: [ReadIOEffect].} =
if result.len > 0:
result = result.expandFilename
else:
when defined(linux) or defined(aix) or defined(netbsd):
result = getApplAux("/proc/self/exe")
elif defined(solaris):
result = getApplAux("/proc/" & $getpid() & "/path/a.out")
elif defined(freebsd):
result = getApplAux("/proc/" & $getpid() & "/file")
# little heuristic that may work on other POSIX-like systems:
result = string(getEnv("_"))
if result.len == 0: result = getApplHeuristic()
if result.len == 0:
result = getApplHeuristic()
proc getApplicationFilename*(): string {.rtl, extern: "nos$1", deprecated.} =
## Returns the filename of the application's executable.
@@ -1404,7 +1398,6 @@ proc getApplicationDir*(): string {.rtl, extern: "nos$1", deprecated.} =
proc getAppDir*(): string {.rtl, extern: "nos$1", tags: [ReadIOEffect].} =
## Returns the directory of the application's executable.
## **Note**: This does not work reliably on BSD.
result = splitFile(getAppFilename()).dir
proc sleep*(milsecs: int) {.rtl, extern: "nos$1", tags: [TimeEffect].} =

View File

@@ -164,7 +164,7 @@ proc `-` *[T](x: Rational[T], y: T): Rational[T] =
proc `-` *[T](x: T, y: Rational[T]): Rational[T] =
## Subtract rational `y` from int `x`.
result.num = - x * y.den + y.num
result.num = x * y.den - y.num
result.den = y.den
proc `-=` *[T](x: var Rational[T], y: Rational[T]) =

View File

@@ -134,11 +134,16 @@ proc rope*(s: string): Rope {.rtl, extern: "nro$1Str".} =
## Converts a string to a rope.
if s.len == 0:
result = nil
elif cacheEnabled:
result = insertInCache(s, cache)
cache = result
else:
result = newRope(s)
when nimvm:
# No caching in VM context
result = newRope(s)
else:
if cacheEnabled:
result = insertInCache(s, cache)
cache = result
else:
result = newRope(s)
proc rope*(i: BiggestInt): Rope {.rtl, extern: "nro$1BiggestInt".} =
## Converts an int to a rope.

View File

@@ -882,7 +882,7 @@ proc abbrev*(s: string, possibilities: openArray[string]): int =
# ---------------------------------------------------------------------------
proc join*(a: openArray[string], sep: string): string {.
proc join*(a: openArray[string], sep: string = ""): string {.
noSideEffect, rtl, extern: "nsuJoinSep".} =
## Concatenates all strings in `a` separating them with `sep`.
if len(a) > 0:
@@ -896,16 +896,15 @@ proc join*(a: openArray[string], sep: string): string {.
else:
result = ""
proc join*(a: openArray[string]): string {.
noSideEffect, rtl, extern: "nsuJoin".} =
## Concatenates all strings in `a`.
if len(a) > 0:
var L = 0
for i in 0..high(a): inc(L, a[i].len)
result = newStringOfCap(L)
for i in 0..high(a): add(result, a[i])
else:
result = ""
proc join*[T: not string](a: openArray[T], sep: string = ""): string {.
noSideEffect, rtl.} =
## Converts all elements in `a` to strings using `$` and concatenates them
## with `sep`.
result = ""
for i, x in a:
if i > 0:
add(result, sep)
add(result, $x)
type
SkipTable = array[char, int]
@@ -1721,3 +1720,8 @@ when isMainModule:
doAssert(not isUpper("AAcc"))
doAssert(not isUpper("A#$"))
doAssert(unescape(r"\x013", "", "") == "\x013")
doAssert join(["foo", "bar", "baz"]) == "foobarbaz"
doAssert join(@["foo", "bar", "baz"], ", ") == "foo, bar, baz"
doAssert join([1, 2, 3]) == "123"
doAssert join(@[1, 2, 3], ", ") == "1, 2, 3"

View File

@@ -1288,6 +1288,10 @@ const
hasSharedHeap = defined(boehmgc) or defined(gogc) # don't share heaps; every thread has its own
taintMode = compileOption("taintmode")
when hasThreadSupport and defined(tcc) and not compileOption("tlsEmulation"):
# tcc doesn't support TLS
{.error: "``--tlsEmulation:on`` must be used when using threads with tcc backend".}
when defined(boehmgc):
when defined(windows):
const boehmLib = "boehmgc.dll"

View File

@@ -197,6 +197,51 @@ when defined(windows) and not someGcc:
proc cas*[T: bool|int|ptr](p: ptr T; oldValue, newValue: T): bool =
interlockedCompareExchange(p, cast[int](newValue), cast[int](oldValue)) != 0
# XXX fix for 64 bit build
elif defined(tcc) and not defined(windows):
when defined(amd64):
{.emit:"""
static int __tcc_cas(int *ptr, int oldVal, int newVal)
{
unsigned char ret;
__asm__ __volatile__ (
" lock\n"
" cmpxchgq %2,%1\n"
" sete %0\n"
: "=q" (ret), "=m" (*ptr)
: "r" (newVal), "m" (*ptr), "a" (oldVal)
: "memory");
if (ret)
return 0;
else
return 1;
}
""".}
else:
assert sizeof(int) == 4
{.emit:"""
static int __tcc_cas(int *ptr, int oldVal, int newVal)
{
unsigned char ret;
__asm__ __volatile__ (
" lock\n"
" cmpxchgl %2,%1\n"
" sete %0\n"
: "=q" (ret), "=m" (*ptr)
: "r" (newVal), "m" (*ptr), "a" (oldVal)
: "memory");
if (ret)
return 0;
else
return 1;
}
""".}
proc tcc_cas(p: ptr int; oldValue, newValue: int): bool
{.importc: "__tcc_cas", nodecl.}
proc cas*[T: bool|int|ptr](p: ptr T; oldValue, newValue: T): bool =
tcc_cas(cast[ptr int](p), cast[int](oldValue), cast[int](newValue))
else:
# this is valid for GCC and Intel C++
proc cas*[T: bool|int|ptr](p: ptr T; oldValue, newValue: T): bool
@@ -207,7 +252,7 @@ else:
when (defined(x86) or defined(amd64)) and someGcc:
proc cpuRelax* {.inline.} =
{.emit: """asm volatile("pause" ::: "memory");""".}
elif someGcc:
elif someGcc or defined(tcc):
proc cpuRelax* {.inline.} =
{.emit: """asm volatile("" ::: "memory");""".}
elif (defined(x86) or defined(amd64)) and defined(vcc):

View File

@@ -23,16 +23,16 @@ proc rawWrite(f: File, s: string) =
proc nimLoadLibraryError(path: string) =
# carefully written to avoid memory allocation:
stdout.rawWrite("could not load: ")
stdout.rawWrite(path)
stdout.rawWrite("\n")
stderr.rawWrite("could not load: ")
stderr.rawWrite(path)
stderr.rawWrite("\n")
quit(1)
proc procAddrError(name: cstring) {.noinline.} =
# carefully written to avoid memory allocation:
stdout.rawWrite("could not import: ")
stdout.write(name)
stdout.rawWrite("\n")
stderr.rawWrite("could not import: ")
stderr.write(name)
stderr.rawWrite("\n")
quit(1)
# this code was inspired from Lua's source code:
@@ -71,7 +71,7 @@ when defined(posix):
when defined(nimDebugDlOpen):
let error = dlerror()
if error != nil:
c_fprintf(c_stdout, "%s\n", error)
c_fprintf(c_stderr, "%s\n", error)
proc nimGetProcAddr(lib: LibHandle, name: cstring): ProcAddr =
result = dlsym(lib, name)

382
lib/system/gc_stack.nim Normal file
View File

@@ -0,0 +1,382 @@
#
# Nim's Runtime Library
# (c) Copyright 2016 Andreas Rumpf
#
# See the file "copying.txt", included in this
# distribution, for details about the copyright.
#
# "Stack GC" for embedded devices or ultra performance requirements.
include osalloc
# We manage memory as a thread local stack. Since the allocation pointer
# is detached from the control flow pointer, this model is vastly more
# useful than the traditional programming model while almost as safe.
# Individual objects can also be deleted but no coalescing is performed.
# Stacks can also be moved from one thread to another.
# We also support 'finalizers'.
type
Finalizer {.compilerproc.} = proc (self: pointer) {.nimcall, benign.}
# A ref type can have a finalizer that is called before the object's
# storage is freed.
AlignType = BiggestFloat
ObjHeader = object
typ: PNimType
nextFinal: ptr ObjHeader # next object with finalizer
Hole = object # stacks can have holes. Otherwise 'growObj' would be insane.
zeroTyp: pointer # overlaid with 'typ' field. Always 'nil'.
size: int # size of the free slot
Chunk = ptr BaseChunk
BaseChunk = object
next: Chunk
size: int
head, last: ptr ObjHeader # first and last object in chunk that
# has a finalizer attached to it
type
StackPtr = object
chunk: pointer
remaining: int
current: Chunk
MemRegion* = object
remaining: int
chunk: pointer
head, last: Chunk
nextChunkSize, totalSize: int
hole: ptr Hole # we support individual freeing
lock: SysLock
var
region {.threadVar.}: MemRegion
template withRegion*(r: MemRegion; body: untyped) =
let oldRegion = region
region = r
try:
body
finally:
region = oldRegion
template inc(p: pointer, s: int) =
p = cast[pointer](cast[int](p) +% s)
template `+!`(p: pointer, s: int): pointer =
cast[pointer](cast[int](p) +% s)
template `-!`(p: pointer, s: int): pointer =
cast[pointer](cast[int](p) +% s)
proc allocSlowPath(r: var MemRegion; size: int) =
# we need to ensure that the underlying linked list
# stays small. Say we want to grab 16GB of RAM with some
# exponential growth function. So we allocate 16KB, then
# 32 KB, 64 KB, 128KB, 256KB, 512KB, 1MB, 2MB, 4MB,
# 8MB, 16MB, 32MB, 64MB, 128MB, 512MB, 1GB, 2GB, 4GB, 8GB,
# 16GB --> list contains only 20 elements! That's reasonable.
if (r.totalSize and 1) == 0:
r.nextChunkSize =
if r.totalSize < 64 * 1024: PageSize*4
else: r.nextChunkSize*2
var s = align(size+sizeof(BaseChunk), PageSize)
var fresh: Chunk
if s > r.nextChunkSize:
fresh = cast[Chunk](osAllocPages(s))
else:
fresh = cast[Chunk](osTryAllocPages(r.nextChunkSize))
if fresh == nil:
fresh = cast[Chunk](osAllocPages(s))
# lowest bit in totalSize is the "don't increase nextChunkSize"
inc r.totalSize
else:
s = r.nextChunkSize
fresh.size = s
fresh.final = nil
r.totalSize += s
let old = r.last
if old == nil:
r.head = fresh
else:
r.last.next = fresh
r.chunk = fresh +! sizeof(BaseChunk)
r.last = fresh
r.remaining = s - sizeof(BaseChunk)
proc alloc(r: var MemRegion; size: int): pointer {.inline.} =
if unlikely(r.remaining < size): allocSlowPath(r, size)
dec(r.remaining, size)
result = r.chunk
inc r.chunk, size
proc runFinalizers(c: Chunk) =
var it = c.head
while it != nil:
# indivually freed objects with finalizer stay in the list, but
# their typ is nil then:
if it.typ != nil and it.typ.finalizer != nil:
(cast[Finalizer](cell.typ.finalizer))(cell+!sizeof(ObjHeader))
it = it.next
proc dealloc(r: var MemRegion; p: pointer) =
let it = p-!sizeof(ObjHeader)
if it.typ != nil and it.typ.finalizer != nil:
(cast[Finalizer](cell.typ.finalizer))(p)
it.typ = nil
proc deallocAll(head: Chunk) =
var it = head
while it != nil:
runFinalizers(it)
osDeallocPages(it, it.size)
it = it.next
proc deallocAll*(r: var MemRegion) =
deallocAll(r.head)
zeroMem(addr r, sizeof r)
proc obstackPtr*(r: MemRegion): StackPtr =
result.chunk = r.chunk
result.remaining = r.remaining
result.current = r.last
proc setObstackPtr*(r: MemRegion; sp: StackPtr) =
# free everything after 'sp':
if sp.current != nil:
deallocAll(sp.current.next)
r.chunk = sp.chunk
r.remaining = sp.remaining
r.last = sp.current
proc joinRegion*(dest: var MemRegion; src: MemRegion) =
# merging is not hard.
if dest.head.isNil:
dest.head = src.head
else:
dest.last.next = src.head
dest.last = src.last
dest.chunk = src.chunk
dest.remaining = src.remaining
dest.nextChunkSize = max(dest.nextChunkSize, src.nextChunkSize)
dest.totalSize += src.totalSize
if dest.hole.size < src.hole.size:
dest.hole = src.hole
proc isOnHeap*(r: MemRegion; p: pointer): bool =
# the last chunk is the largest, so check it first. It's also special
# in that contains the current bump pointer:
if r.last >= p and p < r.chunk:
return true
var it = r.head
while it != r.last:
if it >= p and p <= it+!it.size: return true
it = it.next
proc isInteriorPointer(r: MemRegion; p: pointer): pointer =
discard " we cannot patch stack pointers anyway!"
type
PointerStackChunk = object
next, prev: ptr PointerStackChunk
len: int
data: array[128, pointer]
template head(s: PointerStackChunk): untyped = s.prev
template tail(s: PointerStackChunk): untyped = s.next
include chains
proc push(r: var MemRegion; s: var PointerStackChunk; x: pointer) =
if s.len < high(s.data):
s.data[s.len] = x
inc s.len
else:
let fresh = cast[ptr PointerStackChunk](alloc(r, sizeof(PointerStackChunk)))
fresh.len = 1
fresh.data[0] = x
fresh.next = nil
fresh.prev = nil
append(s, fresh)
proc genericDeepCopyAux(dr: var MemRegion; stack: var PointerStackChunk;
dest, src: pointer, mt: PNimType) {.benign.}
proc genericDeepCopyAux(dr: var MemRegion; stack: var PointerStackChunk;
dest, src: pointer, n: ptr TNimNode) {.benign.} =
var
d = cast[ByteAddress](dest)
s = cast[ByteAddress](src)
case n.kind
of nkSlot:
genericDeepCopyAux(cast[pointer](d +% n.offset),
cast[pointer](s +% n.offset), n.typ)
of nkList:
for i in 0..n.len-1:
genericDeepCopyAux(dest, src, n.sons[i])
of nkCase:
var dd = selectBranch(dest, n)
var m = selectBranch(src, n)
# reset if different branches are in use; note different branches also
# imply that's not self-assignment (``x = x``)!
if m != dd and dd != nil:
genericResetAux(dest, dd)
copyMem(cast[pointer](d +% n.offset), cast[pointer](s +% n.offset),
n.typ.size)
if m != nil:
genericDeepCopyAux(dest, src, m)
of nkNone: sysAssert(false, "genericDeepCopyAux")
proc copyDeepString(dr: var MemRegion; stack: var PointerStackChunk; src: NimString): NimString {.inline.} =
result = rawNewStringNoInit(dr, src.len)
result.len = src.len
c_memcpy(result.data, src.data, src.len + 1)
proc genericDeepCopyAux(dr: var MemRegion; stack: var PointerStackChunk;
dest, src: pointer, mt: PNimType) =
var
d = cast[ByteAddress](dest)
s = cast[ByteAddress](src)
sysAssert(mt != nil, "genericDeepCopyAux 2")
case mt.kind
of tyString:
var x = cast[PPointer](dest)
var s2 = cast[PPointer](s)[]
if s2 == nil:
x[] = nil
else:
x[] = copyDeepString(cast[NimString](s2))
of tySequence:
var s2 = cast[PPointer](src)[]
var seq = cast[PGenericSeq](s2)
var x = cast[PPointer](dest)
if s2 == nil:
x[] = nil
return
sysAssert(dest != nil, "genericDeepCopyAux 3")
x[] = newSeq(mt, seq.len)
var dst = cast[ByteAddress](cast[PPointer](dest)[])
for i in 0..seq.len-1:
genericDeepCopyAux(dr, stack,
cast[pointer](dst +% i*% mt.base.size +% GenericSeqSize),
cast[pointer](cast[ByteAddress](s2) +% i *% mt.base.size +%
GenericSeqSize),
mt.base)
of tyObject:
# we need to copy m_type field for tyObject, as it could be empty for
# sequence reallocations:
var pint = cast[ptr PNimType](dest)
pint[] = cast[ptr PNimType](src)[]
if mt.base != nil:
genericDeepCopyAux(dr, stack, dest, src, mt.base)
genericDeepCopyAux(dr, stack, dest, src, mt.node)
of tyTuple:
genericDeepCopyAux(dr, stack, dest, src, mt.node)
of tyArray, tyArrayConstr:
for i in 0..(mt.size div mt.base.size)-1:
genericDeepCopyAux(dr, stack,
cast[pointer](d +% i*% mt.base.size),
cast[pointer](s +% i*% mt.base.size), mt.base)
of tyRef:
let s2 = cast[PPointer](src)[]
if s2 == nil:
cast[PPointer](dest)[] = nil
else:
# we modify the header of the cell temporarily; instead of the type
# field we store a forwarding pointer. XXX This is bad when the cloning
# fails due to OOM etc.
let x = usrToCell(s2)
let forw = cast[int](x.typ)
if (forw and 1) == 1:
# we stored a forwarding pointer, so let's use that:
let z = cast[pointer](forw and not 1)
unsureAsgnRef(cast[PPointer](dest), z)
else:
let realType = x.typ
let z = newObj(realType, realType.base.size)
unsureAsgnRef(cast[PPointer](dest), z)
x.typ = cast[PNimType](cast[int](z) or 1)
genericDeepCopyAux(dr, stack, z, s2, realType.base)
x.typ = realType
else:
copyMem(dest, src, mt.size)
proc joinAliveDataFromRegion*(dest: var MemRegion; src: var MemRegion;
root: pointer): pointer =
# we mark the alive data and copy only alive data over to 'dest'.
# This is O(liveset) but it nicely compacts memory, so it's fine.
# We use the 'typ' field as a forwarding pointer. The forwarding
# pointers have bit 0 set, so we can disambiguate them.
# We allocate a temporary stack in 'src' that we later free:
var s: PointerStackChunk
s.len = 1
s.data[0] = root
while s.len > 0:
var p: pointer
if s.tail == nil:
p = s.data[s.len-1]
dec s.len
else:
p = s.tail.data[s.tail.len-1]
dec s.tail.len
if s.tail.len == 0:
unlink(s, s.tail)
proc rawNewObj(r: var MemRegion, typ: PNimType, size: int): pointer =
var res = cast[ptr ObjHeader](alloc(r, size + sizeof(ObjHeader)))
res.typ = typ
if typ.finalizer != nil:
res.nextFinal = r.chunk.head
r.chunk.head = res
result = res +! sizeof(ObjHeader)
proc newObj(typ: PNimType, size: int): pointer {.compilerRtl.} =
result = rawNewObj(typ, size, region)
zeroMem(result, size)
when defined(memProfiler): nimProfile(size)
proc newObjNoInit(typ: PNimType, size: int): pointer {.compilerRtl.} =
result = rawNewObj(typ, size, region)
when defined(memProfiler): nimProfile(size)
proc newSeq(typ: PNimType, len: int): pointer {.compilerRtl.} =
let size = addInt(mulInt(len, typ.base.size), GenericSeqSize)
result = newObj(typ, size)
cast[PGenericSeq](result).len = len
cast[PGenericSeq](result).reserved = len
proc newObjRC1(typ: PNimType, size: int): pointer {.compilerRtl.} =
result = rawNewObj(typ, size, gch)
zeroMem(result, size)
proc newSeqRC1(typ: PNimType, len: int): pointer {.compilerRtl.} =
let size = addInt(mulInt(len, typ.base.size), GenericSeqSize)
result = newObj(typ, size)
cast[PGenericSeq](result).len = len
cast[PGenericSeq](result).reserved = len
proc growObj(old: pointer, newsize: int, gch: var GcHeap): pointer =
collectCT(gch)
var ol = usrToCell(old)
sysAssert(ol.typ != nil, "growObj: 1")
gcAssert(ol.typ.kind in {tyString, tySequence}, "growObj: 2")
var res = cast[PCell](rawAlloc(gch.region, newsize + sizeof(Cell)))
var elemSize = 1
if ol.typ.kind != tyString: elemSize = ol.typ.base.size
var oldsize = cast[PGenericSeq](old).len*elemSize + GenericSeqSize
copyMem(res, ol, oldsize + sizeof(Cell))
zeroMem(cast[pointer](cast[ByteAddress](res)+% oldsize +% sizeof(Cell)),
newsize-oldsize)
sysAssert((cast[ByteAddress](res) and (MemAlign-1)) == 0, "growObj: 3")
result = cellToUsr(res)
proc growObj(old: pointer, newsize: int): pointer {.rtl.} =
result = growObj(old, newsize, region)

View File

@@ -263,9 +263,7 @@ proc toJSStr(s: string): cstring {.asmNoStackFrame, compilerproc.} =
proc mnewString(len: int): string {.asmNoStackFrame, compilerproc.} =
when defined(nimphp):
asm """
$result = array();
for($i = 0; $i < `len`; $i++) $result[] = chr(0);
return $result;
return str_repeat(chr(0),`len`);
"""
else:
asm """

View File

@@ -427,7 +427,7 @@ elif defined(nogc) and defined(useMalloc):
proc initGC() = discard
proc newObj(typ: PNimType, size: int): pointer {.compilerproc.} =
result = alloc(size)
result = alloc0(size)
proc newSeq(typ: PNimType, len: int): pointer {.compilerproc.} =
result = newObj(typ, addInt(mulInt(len, typ.base.size), GenericSeqSize))
cast[PGenericSeq](result).len = len

View File

@@ -94,7 +94,7 @@ elif defined(posix):
proc mmap(adr: pointer, len: int, prot, flags, fildes: cint,
off: int): pointer {.header: "<sys/mman.h>".}
proc munmap(adr: pointer, len: int) {.header: "<sys/mman.h>".}
proc munmap(adr: pointer, len: int): cint {.header: "<sys/mman.h>".}
proc osAllocPages(size: int): pointer {.inline.} =
result = mmap(nil, size, PROT_READ or PROT_WRITE,
@@ -108,7 +108,7 @@ elif defined(posix):
if result == cast[pointer](-1): result = nil
proc osDeallocPages(p: pointer, size: int) {.inline} =
when reallyOsDealloc: munmap(p, size)
when reallyOsDealloc: discard munmap(p, size)
elif defined(windows):
const

View File

@@ -74,22 +74,22 @@ proc reprChar(x: char): string {.compilerRtl.} =
proc reprEnum(e: int, typ: PNimType): string {.compilerRtl.} =
# we read an 'int' but this may have been too large, so mask the other bits:
let e = if typ.size == 1: e and 0xff
elif typ.size == 2: e and 0xffff
elif typ.size == 4: e and 0xffffffff
else: e
let b = (sizeof(int)-typ.size)*8 # bits
let m = 1 shl (b-1) # mask
var o = e and ((1 shl b)-1) # clear upper bits
o = (o xor m) - m # sign extend
# XXX we need a proper narrowing based on signedness here
#e and ((1 shl (typ.size*8)) - 1)
if ntfEnumHole notin typ.flags:
if e <% typ.node.len:
return $typ.node.sons[e].name
if o <% typ.node.len:
return $typ.node.sons[o].name
else:
# ugh we need a slow linear search:
var n = typ.node
var s = n.sons
for i in 0 .. n.len-1:
if s[i].offset == e: return $s[i].name
result = $e & " (invalid data!)"
if s[i].offset == o: return $s[i].name
result = $o & " (invalid data!)"
type
PByteArray = ptr array[0.. 0xffff, int8]
@@ -260,6 +260,7 @@ when not defined(useNimRtl):
of tyInt16: add result, $int(cast[ptr int16](p)[])
of tyInt32: add result, $int(cast[ptr int32](p)[])
of tyInt64: add result, $(cast[ptr int64](p)[])
of tyUInt: add result, $(cast[ptr uint](p)[])
of tyUInt8: add result, $(cast[ptr uint8](p)[])
of tyUInt16: add result, $(cast[ptr uint16](p)[])
of tyUInt32: add result, $(cast[ptr uint32](p)[])

View File

@@ -82,12 +82,11 @@ when NoFakeVars:
const
IOFBF = cint(0)
IONBF = cint(4)
elif defined(macosx) or defined(linux):
else:
# On all systems I could find, including Linux, Mac OS X, and the BSDs
const
IOFBF = cint(0)
IONBF = cint(2)
else:
{.error: "IOFBF not ported to your platform".}
else:
var
IOFBF {.importc: "_IOFBF", nodecl.}: cint
@@ -271,12 +270,33 @@ const
# we always use binary here as for Nim the OS line ending
# should not be translated.
when defined(posix) and not defined(nimscript):
type
Mode {.importc: "mode_t", header: "<sys/types.h>".} = cint
Stat {.importc: "struct stat",
header: "<sys/stat.h>", final, pure.} = object ## struct stat
st_mode: Mode ## Mode of file
proc S_ISDIR(m: Mode): bool {.importc, header: "<sys/stat.h>".}
## Test for a directory.
proc fstat(a1: cint, a2: var Stat): cint {.importc, header: "<sys/stat.h>".}
proc open(f: var File, filename: string,
mode: FileMode = fmRead,
bufSize: int = -1): bool =
var p: pointer = fopen(filename, FormatOpen[mode])
if p != nil:
when defined(posix) and not defined(nimscript):
# How `fopen` handles opening a directory is not specified in ISO C and
# POSIX. We do not want to handle directories as regular files that can
# be opened.
var f2 = cast[File](p)
var res: Stat
if fstat(getFileHandle(f2), res) >= 0'i32 and S_ISDIR(res.st_mode):
close(f2)
return false
result = true
f = cast[File](p)
if bufSize > 0 and bufSize <= high(cint).int:

View File

@@ -412,7 +412,7 @@ const
FD_SETSIZE* = 64
MSG_PEEK* = 2
INADDR_ANY* = 0
INADDR_ANY* = 0'u32
INADDR_LOOPBACK* = 0x7F000001
INADDR_BROADCAST* = -1
INADDR_NONE* = -1
@@ -441,12 +441,12 @@ type
sa_data: array[0..13, char]
InAddr* {.importc: "IN_ADDR", header: "winsock2.h".} = object
s_addr*: int32 # IP address
s_addr*: uint32 # IP address
Sockaddr_in* {.importc: "SOCKADDR_IN",
header: "winsock2.h".} = object
sin_family*: int16
sin_port*: int16 # unsigned
sin_port*: uint16
sin_addr*: InAddr
sin_zero*: array[0..7, char]
@@ -456,7 +456,7 @@ type
Sockaddr_in6* {.importc: "SOCKADDR_IN6",
header: "ws2tcpip.h".} = object
sin6_family*: int16
sin6_port*: int16 # unsigned
sin6_port*: uint16
sin6_flowinfo*: int32 # unsigned
sin6_addr*: In6_addr
sin6_scope_id*: int32 # unsigned
@@ -590,7 +590,7 @@ proc getnameinfo*(a1: ptr SockAddr, a2: SockLen,
a6: SockLen, a7: cint): cint {.
stdcall, importc: "getnameinfo", dynlib: ws2dll.}
proc inet_addr*(cp: cstring): int32 {.
proc inet_addr*(cp: cstring): uint32 {.
stdcall, importc: "inet_addr", dynlib: ws2dll.}
proc WSAFDIsSet(s: SocketHandle, set: var TFdSet): bool {.
@@ -833,9 +833,9 @@ type inet_ntop_proc = proc(family: cint, paddr: pointer, pStringBuffer: cstring,
var inet_ntop_real: inet_ntop_proc = nil
let L = loadLib(ws2dll)
if L != nil:
inet_ntop_real = cast[inet_ntop_proc](symAddr(L, "inet_ntop"))
let ws2 = loadLib(ws2dll)
if ws2 != nil:
inet_ntop_real = cast[inet_ntop_proc](symAddr(ws2, "inet_ntop"))
proc WSAAddressToStringA(pAddr: ptr SockAddr, addrSize: DWORD, unused: pointer, pBuff: cstring, pBuffSize: ptr DWORD): cint {.stdcall, importc, dynlib: ws2dll.}
proc inet_ntop_emulated(family: cint, paddr: pointer, pStringBuffer: cstring,

View File

@@ -45,7 +45,7 @@ proc createServer(port: TPort) {.async.} =
name.sin_family = toInt(AF_INET).int16
else:
name.sin_family = toInt(AF_INET)
name.sin_port = htons(int16(port))
name.sin_port = htons(uint16(port))
name.sin_addr.s_addr = htonl(INADDR_ANY)
if bindAddr(server.SocketHandle, cast[ptr SockAddr](addr(name)),
sizeof(name).Socklen) < 0'i32:

View File

@@ -9,7 +9,6 @@ discard """
34
34
4
4
4'''
"""
@@ -21,4 +20,4 @@ const str = "123456789"
for i in TRange.low .. TRange.high:
echo str[i] #This works fine
echo str[int(i) .. int(TRange.high)] #So does this
echo str[i .. TRange.high] #The compiler complains about this
#echo str[i .. TRange.high] #The compiler complains about this

View File

@@ -0,0 +1,19 @@
discard """
errormsg: "type mismatch"
line: 18
"""
# bug #3998
type Vec3[T] = array[3, T]
var vg: Vec3[float32] = Vec3([1.0f, 2.0f, 3.0f])
echo "vg[0]: " & $vg[0] # prints 1.0 OK
echo "vg[1]: " & $vg[1] # prints 2.0 OK
echo "vg[2]: " & $vg[2] # prints 3.0 OK
echo ""
var ve: Vec3[float64]
ve = vg # compiles, this MUST NOT be allowed!

View File

@@ -110,7 +110,6 @@ type
TGameState* = enum
Lobby, Transitioning, Field
const
TAU* = PI * 2.0
MomentMult* = 0.62 ## global moment of inertia multiplier
var
cfg: PZoneSettings

View File

@@ -13,15 +13,15 @@ type
nil
type
Parser*[T, O] = distinct proc (input: Input[T]): Result[T, O]
Parser*[T, O] = proc (input: Input[T]): Result[T, O]
proc unit*[T, O](v: O): Parser[T, O] =
Parser(proc (inp: Input[T]): Result[T, O] =
Result[T, O](kind: rkSuccess, output: v, input: inp))
result = proc (inp: Input[T]): Result[T, O] =
Result[T, O](kind: rkSuccess, output: v, input: inp)
proc fail*[T, O](): Parser[T, O] =
Parser(proc (inp: Input[T]): Result[T, O] =
Result(kind: rkFailure))
result = proc (inp: Input[T]): Result[T, O] =
Result(kind: rkFailure)
method runInput[T, O](self: Parser[T, O], inp: Input[T]): Result[T, O] =
# hmmm ..
@@ -33,39 +33,39 @@ method run*[T, O](self: Parser[T, O], toks: seq[T]): Result[T, O] =
self.runInput(Input[T](toks: toks, index: 0))
method chain*[T, O1, O2](self: Parser[T, O1], nextp: proc (v: O1): Parser[T, O2]): Parser[T, O2] =
Parser(proc (inp: Input[T]): Result[T, O2] =
result = proc (inp: Input[T]): Result[T, O2] =
let r = self.runInput(inp)
case r.kind:
of rkSuccess:
nextp(r.output).runInput(r.input)
of rkFailure:
Result[T, O2](kind: rkFailure))
Result[T, O2](kind: rkFailure)
method skip[T](self: Input[T], n: int): Input[T] =
Input[T](toks: self.toks, index: self.index + n)
proc pskip*[T](n: int): Parser[T, tuple[]] =
Parser(proc (inp: Input[T]): Result[T, tuple[]] =
result = proc (inp: Input[T]): Result[T, tuple[]] =
if inp.index + n <= inp.toks.len:
Result[T, tuple[]](kind: rkSuccess, output: (), input: inp.skip(n))
else:
Result[T, tuple[]](kind: rkFailure))
Result[T, tuple[]](kind: rkFailure)
proc tok*[T](t: T): Parser[T, T] =
Parser(proc (inp: Input[T]): Result[T, T] =
result = proc (inp: Input[T]): Result[T, T] =
if inp.index < inp.toks.len and inp.toks[inp.index] == t:
pskip[T](1).then(unit[T, T](t)).runInput(inp)
else:
Result[T, T](kind: rkFailure))
Result[T, T](kind: rkFailure)
proc `+`*[T, O](first: Parser[T, O], second: Parser[T, O]): Parser[T, O] =
Parser(proc (inp: Input[T]): Result[T, O] =
result = proc (inp: Input[T]): Result[T, O] =
let r = first.runInput(inp)
case r.kind
of rkSuccess:
r
else:
second.runInput(inp))
second.runInput(inp)
# end of primitives (definitions involving Parser(..))

17
tests/seq/tshallowseq.nim Normal file
View File

@@ -0,0 +1,17 @@
discard """
output: '''@[1, 42, 3]
@[1, 42, 3]
'''
"""
proc xxx() =
var x: seq[int] = @[1, 2, 3]
var y: seq[int]
system.shallowCopy(y, x)
y[1] = 42
echo y
echo x
xxx()