Merge remote-tracking branch 'upstream/devel' into b10

This commit is contained in:
flywind
2021-03-29 10:35:17 +08:00
80 changed files with 1664 additions and 989 deletions

View File

@@ -17,11 +17,11 @@ jobs:
Linux_amd64:
vmImage: 'ubuntu-16.04'
CPU: amd64
# Linux_i386:
# # bug #17325: fails on 'ubuntu-16.04' because it now errors with:
# # g++-multilib : Depends: gcc-multilib (>= 4:5.3.1-1ubuntu1) but it is not going to be installed
# vmImage: 'ubuntu-18.04'
# CPU: i386
Linux_i386:
# bug #17325: fails on 'ubuntu-16.04' because it now errors with:
# g++-multilib : Depends: gcc-multilib (>= 4:5.3.1-1ubuntu1) but it is not going to be installed
vmImage: 'ubuntu-18.04'
CPU: i386
OSX_amd64:
vmImage: 'macOS-10.15'
CPU: amd64

View File

@@ -4,6 +4,8 @@
## Standard library additions and changes
- Added `sections` iterator in `parsecfg`.
- Make custom op in macros.quote work for all statements.
- On Windows the SSL library now checks for valid certificates.
@@ -212,7 +214,7 @@
- `std/options` changed `$some(3)` to `"some(3)"` instead of `"Some(3)"`
and `$none(int)` to `"none(int)"` instead of `"None[int]"`.
- Added `algorithm.merge`.
@@ -265,6 +267,10 @@
- `typedesc[Foo]` now renders as such instead of `type Foo` in compiler messages.
- The unary minus in `-1` is now part of the integer literal, it is now parsed as a single token.
This implies that edge cases like `-128'i8` finally work correctly.
- Custom numeric literals (e.g. `-128'bignum`) are now supported.
## Compiler changes

View File

@@ -1357,9 +1357,6 @@ proc genTypeInfoV2Impl(m: BModule, t, origType: PType, name: Rope; info: TLineIn
if t.kind == tyObject and t.len > 0 and t[0] != nil and optEnableDeepCopy in m.config.globalOptions:
discard genTypeInfoV1(m, t, info)
proc moduleOpenForCodegen(m: BModule; module: int32): bool {.inline.} =
result = module < m.g.modules.len and m.g.modules[module] != nil
proc genTypeInfoV2(m: BModule, t: PType; info: TLineInfo): Rope =
let origType = t
# distinct types can have their own destructors
@@ -1384,7 +1381,7 @@ proc genTypeInfoV2(m: BModule, t: PType; info: TLineInfo): Rope =
m.typeInfoMarkerV2[sig] = result
let owner = t.skipTypes(typedescPtrs).itemId.module
if owner != m.module.position and moduleOpenForCodegen(m, owner):
if owner != m.module.position and moduleOpenForCodegen(m.g.graph, FileIndex owner):
# make sure the type info is created in the owner module
discard genTypeInfoV2(m.g.modules[owner], origType, info)
# reference the type info as extern here
@@ -1463,7 +1460,7 @@ proc genTypeInfoV1(m: BModule, t: PType; info: TLineInfo): Rope =
return prefixTI.rope & result & ")".rope
var owner = t.skipTypes(typedescPtrs).itemId.module
if owner != m.module.position and moduleOpenForCodegen(m, owner):
if owner != m.module.position and moduleOpenForCodegen(m.g.graph, FileIndex owner):
# make sure the type info is created in the owner module
discard genTypeInfoV1(m.g.modules[owner], origType, info)
# reference the type info as extern here

View File

@@ -22,6 +22,7 @@ when not defined(leanCompiler):
import strutils except `%` # collides with ropes.`%`
from ic / ic import ModuleBackendFlag
from modulegraphs import ModuleGraph, PPassContext
from lineinfos import
warnGcMem, errXMustBeCompileTime, hintDependency, errGenerated, errCannotOpenFile
@@ -1501,6 +1502,33 @@ proc genMainProc(m: BModule) =
if m.config.cppCustomNamespace.len > 0:
m.s[cfsProcs].add openNamespaceNim(m.config.cppCustomNamespace)
proc registerInitProcs*(g: BModuleList; m: PSym; flags: set[ModuleBackendFlag]) =
## Called from the IC backend.
if HasDatInitProc in flags:
let datInit = getSomeNameForModule(m) & "DatInit000"
g.mainModProcs.addf("N_LIB_PRIVATE N_NIMCALL(void, $1)(void);$N", [datInit])
g.mainDatInit.addf("\t$1();$N", [datInit])
if HasModuleInitProc in flags:
let init = getSomeNameForModule(m) & "Init000"
g.mainModProcs.addf("N_LIB_PRIVATE N_NIMCALL(void, $1)(void);$N", [init])
let initCall = "\t$1();$N" % [init]
if sfMainModule in m.flags:
g.mainModInit.add(initCall)
elif sfSystemModule in m.flags:
g.mainDatInit.add(initCall) # systemInit must called right after systemDatInit if any
else:
g.otherModsInit.add(initCall)
proc whichInitProcs*(m: BModule): set[ModuleBackendFlag] =
# called from IC.
result = {}
if m.hcrOn or m.preInitProc.s(cpsInit).len > 0 or m.preInitProc.s(cpsStmts).len > 0:
result.incl HasModuleInitProc
for i in cfsTypeInit1..cfsDynLibInit:
if m.s[i].len != 0:
result.incl HasDatInitProc
break
proc registerModuleToMain(g: BModuleList; m: BModule) =
let
init = m.getInitName
@@ -1595,6 +1623,7 @@ proc genDatInitCode(m: BModule) =
if moduleDatInitRequired:
m.s[cfsDatInitProc].add(prc)
#rememberFlag(m.g.graph, m.module, HasDatInitProc)
# Very similar to the contents of symInDynamicLib - basically only the
# things needed for the hot code reloading runtime procs to be loaded
@@ -1725,6 +1754,7 @@ proc genInitCode(m: BModule) =
if moduleInitRequired or sfMainModule in m.module.flags:
m.s[cfsInitProc].add(prc)
#rememberFlag(m.g.graph, m.module, HasModuleInitProc)
genDatInitCode(m)

View File

@@ -130,3 +130,4 @@ proc initDefines*(symbols: StringTableRef) =
defineSymbol("nimHasWarningAsError")
defineSymbol("nimHasHintAsError")
defineSymbol("nimHasSpellSuggest")
defineSymbol("nimHasCustomLiterals")

View File

@@ -416,7 +416,7 @@ proc nodeToHighlightedHtml(d: PDoc; n: PNode; result: var Rope; renderFlags: TRe
of tkOpr:
dispA(d.conf, result, "<span class=\"Operator\">$1</span>", "\\spanOperator{$1}",
[escLit])
of tkStrLit..tkTripleStrLit:
of tkStrLit..tkTripleStrLit, tkCustomLit:
dispA(d.conf, result, "<span class=\"StringLit\">$1</span>",
"\\spanStringLit{$1}", [escLit])
of tkCharLit:

View File

@@ -44,9 +44,11 @@ proc generateCodeForModule(g: ModuleGraph; m: var LoadedModule; alive: var Alive
cgen.genTopLevelStmt(bmod, n)
finalCodegenActions(g, bmod, newNodeI(nkStmtList, m.module.info))
m.fromDisk.backendFlags = cgen.whichInitProcs(bmod)
proc replayTypeInfo(g: ModuleGraph; m: var LoadedModule; origin: FileIndex) =
for x in mitems(m.fromDisk.emittedTypeInfo):
#echo "found type ", x, " for file ", int(origin)
g.emittedTypeInfo[x] = origin
proc addFileToLink(config: ConfigRef; m: PSym) =
@@ -112,12 +114,16 @@ proc generateCode*(g: ModuleGraph) =
resetForBackend(g)
var alive = computeAliveSyms(g.packed, g.config)
when false:
for i in 0..high(g.packed):
echo i, " is of status ", g.packed[i].status, " ", toFullPath(g.config, FileIndex(i))
for i in 0..high(g.packed):
# case statement here to enforce exhaustive checks.
case g.packed[i].status
of undefined:
discard "nothing to do"
of loading:
of loading, stored:
assert false
of storing, outdated:
generateCodeForModule(g, g.packed[i], alive)
@@ -133,3 +139,7 @@ proc generateCode*(g: ModuleGraph) =
else:
addFileToLink(g.config, g.packed[i].module)
replayTypeInfo(g, g.packed[i], FileIndex(i))
if g.backend == nil:
g.backend = cgendata.newModuleList(g)
registerInitProcs(BModuleList(g.backend), g.packed[i].module, g.packed[i].fromDisk.backendFlags)

View File

@@ -22,8 +22,13 @@ type
options: TOptions
globalOptions: TGlobalOptions
ModuleBackendFlag* = enum
HasDatInitProc
HasModuleInitProc
PackedModule* = object ## the parts of a PackedEncoder that are part of the .rod file
definedSymbols: string
moduleFlags: TSymFlags
includes: seq[(LitId, string)] # first entry is the module filename itself
imports: seq[LitId] # the modules this module depends on
toReplay: PackedTree # pragmas and VM specific state to replay.
@@ -43,6 +48,7 @@ type
enumToStringProcs*: seq[(PackedItemId, PackedItemId)]
emittedTypeInfo*: seq[string]
backendFlags*: set[ModuleBackendFlag]
sh*: Shared
cfg: PackedConfig
@@ -134,6 +140,7 @@ proc initEncoder*(c: var PackedEncoder; m: var PackedModule; moduleSym: PSym; co
m.sh = Shared()
c.thisModule = moduleSym.itemId.module
c.config = config
m.moduleFlags = moduleSym.flags
m.bodies = newTreeFrom(m.topLevel)
m.toReplay = newTreeFrom(m.topLevel)
@@ -506,6 +513,7 @@ proc loadRodFile*(filename: AbsoluteFile; m: var PackedModule; config: ConfigRef
f.loadSection configSection
f.loadPrim m.definedSymbols
f.loadPrim m.moduleFlags
f.loadPrim m.cfg
if f.err == ok and not configIdentical(m, config) and not ignoreConfig:
@@ -556,6 +564,9 @@ proc loadRodFile*(filename: AbsoluteFile; m: var PackedModule; config: ConfigRef
loadSeqSection enumToStringProcsSection, m.enumToStringProcs
loadSeqSection typeInfoSection, m.emittedTypeInfo
f.loadSection backendFlagsSection
f.loadPrim m.backendFlags
close(f)
result = f.err
@@ -573,6 +584,7 @@ proc saveRodFile*(filename: AbsoluteFile; encoder: var PackedEncoder; m: var Pac
f.storeHeader()
f.storeSection configSection
f.storePrim m.definedSymbols
f.storePrim m.moduleFlags
f.storePrim m.cfg
template storeSeqSection(section, data) {.dirty.} =
@@ -619,6 +631,9 @@ proc saveRodFile*(filename: AbsoluteFile; encoder: var PackedEncoder; m: var Pac
storeSeqSection enumToStringProcsSection, m.enumToStringProcs
storeSeqSection typeInfoSection, m.emittedTypeInfo
f.storeSection backendFlagsSection
f.storePrim m.backendFlags
close(f)
encoder.disable()
if f.err != ok:
@@ -646,7 +661,8 @@ type
storing, # state is strictly for stress-testing purposes
loading,
loaded,
outdated
outdated,
stored # store is complete, no further additions possible
LoadedModule* = object
status*: ModuleStatus
@@ -673,7 +689,7 @@ proc toFileIndexCached*(c: var PackedDecoder; g: PackedModuleGraph; thisModule:
proc translateLineInfo(c: var PackedDecoder; g: var PackedModuleGraph; thisModule: int;
x: PackedLineInfo): TLineInfo =
assert g[thisModule].status in {loaded, storing}
assert g[thisModule].status in {loaded, storing, stored}
result = TLineInfo(line: x.line, col: x.col,
fileIndex: toFileIndexCached(c, g, thisModule, x.file))
@@ -806,7 +822,7 @@ proc loadSym(c: var PackedDecoder; g: var PackedModuleGraph; thisModule: int; s:
result = nil
else:
let si = moduleIndex(c, g, thisModule, s)
assert g[si].status in {loaded, storing}
assert g[si].status in {loaded, storing, stored}
if not g[si].symsInit:
g[si].symsInit = true
setLen g[si].syms, g[si].fromDisk.sh.syms.len
@@ -852,7 +868,7 @@ proc loadType(c: var PackedDecoder; g: var PackedModuleGraph; thisModule: int; t
result = nil
else:
let si = moduleIndex(c, g, thisModule, t)
assert g[si].status in {loaded, storing}
assert g[si].status in {loaded, storing, stored}
assert t.item > 0
if not g[si].typesInit:
@@ -897,8 +913,7 @@ proc setupLookupTables(g: var PackedModuleGraph; conf: ConfigRef; cache: IdentCa
info: newLineInfo(fileIdx, 1, 1),
position: int(fileIdx))
m.module.owner = newPackage(conf, cache, fileIdx)
if fileIdx == conf.projectMainIdx2:
m.module.flags.incl sfMainModule
m.module.flags = m.fromDisk.moduleFlags
proc loadToReplayNodes(g: var PackedModuleGraph; conf: ConfigRef; cache: IdentCache;
fileIdx: FileIndex; m: var LoadedModule) =
@@ -950,7 +965,7 @@ proc needsRecompile(g: var PackedModuleGraph; conf: ConfigRef; cache: IdentCache
of loading, loaded:
# For loading: Assume no recompile is required.
result = false
of outdated, storing:
of outdated, storing, stored:
result = true
proc moduleFromRodFile*(g: var PackedModuleGraph; conf: ConfigRef; cache: IdentCache;

View File

@@ -333,6 +333,7 @@ template symId*(n: NodePos): SymId = SymId tree.nodes[n.int].operand
proc firstSon*(n: NodePos): NodePos {.inline.} = NodePos(n.int+1)
when false:
# xxx `nkStrLit` or `nkStrLit..nkTripleStrLit:` below?
proc strLit*(tree: PackedTree; n: NodePos): lent string =
assert n.kind == nkStrLit
result = tree.sh.strings[LitId tree.nodes[n.int].operand]

View File

@@ -37,6 +37,7 @@ type
methodsPerTypeSection
enumToStringProcsSection
typeInfoSection # required by the backend
backendFlagsSection
aliveSymsSection # beware, this is stored in a `.alivesyms` file.
RodFileError* = enum

View File

@@ -573,6 +573,7 @@ template processScopeExpr(c: var Con; s: var Scope; ret: PNode, processCall: unt
let tmp = c.getTemp(s.parent[], ret.typ, ret.info)
tmp.sym.flags.incl sfSingleUsedTemp
let cpy = if hasDestructor(c, ret.typ):
s.parent[].final.add c.genDestroy(tmp)
moveOrCopy(tmp, ret, c, s, isDecl = true)
else:
newTree(nkFastAsgn, tmp, p(ret, c, s, normal))

View File

@@ -85,6 +85,9 @@ proc checkIsolate*(n: PNode): bool =
of nkCharLit..nkNilLit:
result = true
of nkCallKinds:
# XXX: as long as we don't update the analysis while examining arguments
# we can do an early check of the return type, otherwise this is a
# bug and needs to be moved below
if n[0].typ.flags * {tfGcSafe, tfNoSideEffect} == {}:
return false
for i in 1..<n.len:
@@ -100,7 +103,12 @@ proc checkIsolate*(n: PNode): bool =
for it in n:
result = checkIsolate(it.lastSon)
if not result: break
of nkCaseStmt, nkObjConstr:
of nkCaseStmt:
for i in 1..<n.len:
result = checkIsolate(n[i].lastSon)
if not result: break
of nkObjConstr:
result = true
for i in 1..<n.len:
result = checkIsolate(n[i].lastSon)
if not result: break
@@ -123,4 +131,3 @@ proc checkIsolate*(n: PNode): bool =
else:
# no ref, no cry:
result = true

View File

@@ -26,6 +26,7 @@ const
SymStartChars*: set[char] = {'a'..'z', 'A'..'Z', '\x80'..'\xFF'}
OpChars*: set[char] = {'+', '-', '*', '/', '\\', '<', '>', '!', '?', '^', '.',
'|', '=', '%', '&', '$', '@', '~', ':'}
UnaryMinusWhitelist = {' ', '\t', '\n', '\r', ',', ';', '(', '[', '{'}
# don't forget to update the 'highlite' module if these charsets should change
@@ -51,22 +52,23 @@ type
tkVar = "var", tkWhen = "when", tkWhile = "while", tkXor = "xor",
tkYield = "yield", # end of keywords
tkIntLit = "tkIntLit", tkInt8Lit = "tkInt8Lit", tkInt16Lit = "tkInt16Lit",
tkIntLit = "tkIntLit", tkInt8Lit = "tkInt8Lit", tkInt16Lit = "tkInt16Lit",
tkInt32Lit = "tkInt32Lit", tkInt64Lit = "tkInt64Lit",
tkUIntLit = "tkUIntLit", tkUInt8Lit = "tkUInt8Lit", tkUInt16Lit = "tkUInt16Lit",
tkUIntLit = "tkUIntLit", tkUInt8Lit = "tkUInt8Lit", tkUInt16Lit = "tkUInt16Lit",
tkUInt32Lit = "tkUInt32Lit", tkUInt64Lit = "tkUInt64Lit",
tkFloatLit = "tkFloatLit", tkFloat32Lit = "tkFloat32Lit",
tkFloat64Lit = "tkFloat64Lit", tkFloat128Lit = "tkFloat128Lit",
tkStrLit = "tkStrLit", tkRStrLit = "tkRStrLit", tkTripleStrLit = "tkTripleStrLit",
tkGStrLit = "tkGStrLit", tkGTripleStrLit = "tkGTripleStrLit", tkCharLit = "tkCharLit",
tkGStrLit = "tkGStrLit", tkGTripleStrLit = "tkGTripleStrLit", tkCharLit = "tkCharLit",
tkCustomLit = "tkCustomLit",
tkParLe = "(", tkParRi = ")", tkBracketLe = "[",
tkBracketRi = "]", tkCurlyLe = "{", tkCurlyRi = "}",
tkBracketDotLe = "[.", tkBracketDotRi = ".]",
tkCurlyDotLe = "{.", tkCurlyDotRi = ".}",
tkParDotLe = "(.", tkParDotRi = ".)",
tkComma = ",", tkSemiColon = ";",
tkColon = ":", tkColonColon = "::", tkEquals = "=",
tkColon = ":", tkColonColon = "::", tkEquals = "=",
tkDot = ".", tkDotDot = "..", tkBracketLeColon = "[:",
tkOpr, tkComment, tkAccent = "`",
# these are fake tokens used by renderer.nim
@@ -312,8 +314,7 @@ proc getNumber(L: var Lexer, result: var Token) =
proc lexMessageLitNum(L: var Lexer, msg: string, startpos: int, msgKind = errGenerated) =
# Used to get slightly human friendlier err messages.
const literalishChars = {'A'..'F', 'a'..'f', '0'..'9', 'X', 'x', 'o', 'O',
'c', 'C', 'b', 'B', '_', '.', '\'', 'd', 'i', 'u'}
const literalishChars = {'A'..'Z', 'a'..'z', '0'..'9', '_', '.', '\''}
var msgPos = L.bufpos
var t: Token
t.literal = ""
@@ -325,15 +326,14 @@ proc getNumber(L: var Lexer, result: var Token) =
t.literal.add(L.buf[L.bufpos])
inc(L.bufpos)
matchChars(L, t, literalishChars)
if L.buf[L.bufpos] in {'\'', 'f', 'F', 'd', 'D', 'i', 'I', 'u', 'U'}:
inc(L.bufpos)
if L.buf[L.bufpos] in literalishChars:
t.literal.add(L.buf[L.bufpos])
inc(L.bufpos)
matchChars(L, t, {'0'..'9'})
L.bufpos = msgPos
lexMessage(L, msgKind, msg % t.literal)
var
startpos, endpos: int
xi: BiggestInt
isBase10 = true
numDigits = 0
@@ -345,8 +345,17 @@ proc getNumber(L: var Lexer, result: var Token) =
result.tokType = tkIntLit # int literal until we know better
result.literal = ""
result.base = base10
startpos = L.bufpos
tokenBegin(result, startpos)
tokenBegin(result, L.bufpos)
var isPositive = true
if L.buf[L.bufpos] == '-':
eatChar(L, result)
isPositive = false
let startpos = L.bufpos
template setNumber(field, value) =
field = (if isPositive: value else: -value)
# First stage: find out base, make verifications, build token literal string
# {'c', 'C'} is added for deprecation reasons to provide a clear error message
@@ -386,200 +395,184 @@ proc getNumber(L: var Lexer, result: var Token) =
discard matchUnderscoreChars(L, result, {'0'..'9'})
if L.buf[L.bufpos] in {'e', 'E'}:
result.tokType = tkFloatLit
eatChar(L, result, 'e')
eatChar(L, result)
if L.buf[L.bufpos] in {'+', '-'}:
eatChar(L, result)
discard matchUnderscoreChars(L, result, {'0'..'9'})
endpos = L.bufpos
let endpos = L.bufpos
# Second stage, find out if there's a datatype suffix and handle it
var postPos = endpos
if L.buf[postPos] in {'\'', 'f', 'F', 'd', 'D', 'i', 'I', 'u', 'U'}:
let errPos = postPos
var customLitPossible = false
if L.buf[postPos] == '\'':
inc(postPos)
customLitPossible = true
case L.buf[postPos]
of 'f', 'F':
inc(postPos)
if (L.buf[postPos] == '3') and (L.buf[postPos + 1] == '2'):
result.tokType = tkFloat32Lit
inc(postPos, 2)
elif (L.buf[postPos] == '6') and (L.buf[postPos + 1] == '4'):
result.tokType = tkFloat64Lit
inc(postPos, 2)
elif (L.buf[postPos] == '1') and
(L.buf[postPos + 1] == '2') and
(L.buf[postPos + 2] == '8'):
result.tokType = tkFloat128Lit
inc(postPos, 3)
else: # "f" alone defaults to float32
result.tokType = tkFloat32Lit
of 'd', 'D': # ad hoc convenience shortcut for f64
inc(postPos)
result.tokType = tkFloat64Lit
of 'i', 'I':
inc(postPos)
if (L.buf[postPos] == '6') and (L.buf[postPos + 1] == '4'):
result.tokType = tkInt64Lit
inc(postPos, 2)
elif (L.buf[postPos] == '3') and (L.buf[postPos + 1] == '2'):
result.tokType = tkInt32Lit
inc(postPos, 2)
elif (L.buf[postPos] == '1') and (L.buf[postPos + 1] == '6'):
result.tokType = tkInt16Lit
inc(postPos, 2)
elif (L.buf[postPos] == '8'):
result.tokType = tkInt8Lit
inc(postPos)
if L.buf[postPos] in SymChars:
var suffix = newStringOfCap(10)
while true:
suffix.add L.buf[postPos]
inc postPos
if L.buf[postPos] notin SymChars+{'_'}: break
let suffixAsLower = suffix.toLowerAscii
case suffixAsLower
of "f", "f32": result.tokType = tkFloat32Lit
of "d", "f64": result.tokType = tkFloat64Lit
of "f128": result.tokType = tkFloat128Lit
of "i8": result.tokType = tkInt8Lit
of "i16": result.tokType = tkInt16Lit
of "i32": result.tokType = tkInt32Lit
of "i64": result.tokType = tkInt64Lit
of "u": result.tokType = tkUIntLit
of "u8": result.tokType = tkUInt8Lit
of "u16": result.tokType = tkUInt16Lit
of "u32": result.tokType = tkUInt32Lit
of "u64": result.tokType = tkUInt64Lit
elif customLitPossible:
# remember the position of the `'` so that the parser doesn't
# have to reparse the custom literal:
result.iNumber = len(result.literal)
result.literal.add '\''
result.literal.add suffix
result.tokType = tkCustomLit
else:
lexMessageLitNum(L, "invalid number: '$1'", startpos)
of 'u', 'U':
inc(postPos)
if (L.buf[postPos] == '6') and (L.buf[postPos + 1] == '4'):
result.tokType = tkUInt64Lit
inc(postPos, 2)
elif (L.buf[postPos] == '3') and (L.buf[postPos + 1] == '2'):
result.tokType = tkUInt32Lit
inc(postPos, 2)
elif (L.buf[postPos] == '1') and (L.buf[postPos + 1] == '6'):
result.tokType = tkUInt16Lit
inc(postPos, 2)
elif (L.buf[postPos] == '8'):
result.tokType = tkUInt8Lit
inc(postPos)
else:
result.tokType = tkUIntLit
lexMessageLitNum(L, "invalid number suffix: '$1'", errPos)
else:
lexMessageLitNum(L, "invalid number: '$1'", startpos)
lexMessageLitNum(L, "invalid number suffix: '$1'", errPos)
# Is there still a literalish char awaiting? Then it's an error!
if L.buf[postPos] in literalishChars or
(L.buf[postPos] == '.' and L.buf[postPos + 1] in {'0'..'9'}):
lexMessageLitNum(L, "invalid number: '$1'", startpos)
# Third stage, extract actual number
L.bufpos = startpos # restore position
var pos: int = startpos
try:
if (L.buf[pos] == '0') and (L.buf[pos + 1] in baseCodeChars):
inc(pos, 2)
xi = 0 # it is a base prefix
if result.tokType != tkCustomLit:
# Third stage, extract actual number
L.bufpos = startpos # restore position
var pos = startpos
try:
if (L.buf[pos] == '0') and (L.buf[pos + 1] in baseCodeChars):
inc(pos, 2)
xi = 0 # it is a base prefix
case L.buf[pos - 1]
of 'b', 'B':
result.base = base2
while pos < endpos:
if L.buf[pos] != '_':
xi = `shl`(xi, 1) or (ord(L.buf[pos]) - ord('0'))
inc(pos)
# 'c', 'C' is deprecated
of 'o', 'c', 'C':
result.base = base8
while pos < endpos:
if L.buf[pos] != '_':
xi = `shl`(xi, 3) or (ord(L.buf[pos]) - ord('0'))
inc(pos)
of 'x', 'X':
result.base = base16
while pos < endpos:
case L.buf[pos]
of '_':
case L.buf[pos - 1]
of 'b', 'B':
result.base = base2
while pos < endpos:
if L.buf[pos] != '_':
xi = `shl`(xi, 1) or (ord(L.buf[pos]) - ord('0'))
inc(pos)
of '0'..'9':
xi = `shl`(xi, 4) or (ord(L.buf[pos]) - ord('0'))
# 'c', 'C' is deprecated (a warning is issued elsewhere)
of 'o', 'c', 'C':
result.base = base8
while pos < endpos:
if L.buf[pos] != '_':
xi = `shl`(xi, 3) or (ord(L.buf[pos]) - ord('0'))
inc(pos)
of 'a'..'f':
xi = `shl`(xi, 4) or (ord(L.buf[pos]) - ord('a') + 10)
inc(pos)
of 'A'..'F':
xi = `shl`(xi, 4) or (ord(L.buf[pos]) - ord('A') + 10)
inc(pos)
else:
break
of 'x', 'X':
result.base = base16
while pos < endpos:
case L.buf[pos]
of '_':
inc(pos)
of '0'..'9':
xi = `shl`(xi, 4) or (ord(L.buf[pos]) - ord('0'))
inc(pos)
of 'a'..'f':
xi = `shl`(xi, 4) or (ord(L.buf[pos]) - ord('a') + 10)
inc(pos)
of 'A'..'F':
xi = `shl`(xi, 4) or (ord(L.buf[pos]) - ord('A') + 10)
inc(pos)
else:
break
else:
internalError(L.config, getLineInfo(L), "getNumber")
case result.tokType
of tkIntLit, tkInt64Lit: setNumber result.iNumber, xi
of tkInt8Lit: setNumber result.iNumber, ashr(xi shl 56, 56)
of tkInt16Lit: setNumber result.iNumber, ashr(xi shl 48, 48)
of tkInt32Lit: setNumber result.iNumber, ashr(xi shl 32, 32)
of tkUIntLit, tkUInt64Lit: setNumber result.iNumber, xi
of tkUInt8Lit: setNumber result.iNumber, xi and 0xff
of tkUInt16Lit: setNumber result.iNumber, xi and 0xffff
of tkUInt32Lit: setNumber result.iNumber, xi and 0xffffffff
of tkFloat32Lit:
setNumber result.fNumber, (cast[PFloat32](addr(xi)))[]
# note: this code is endian neutral!
# XXX: Test this on big endian machine!
of tkFloat64Lit, tkFloatLit:
setNumber result.fNumber, (cast[PFloat64](addr(xi)))[]
else: internalError(L.config, getLineInfo(L), "getNumber")
# Bounds checks. Non decimal literals are allowed to overflow the range of
# the datatype as long as their pattern don't overflow _bitwise_, hence
# below checks of signed sizes against uint*.high is deliberate:
# (0x80'u8 = 128, 0x80'i8 = -128, etc == OK)
if result.tokType notin floatTypes:
let outOfRange =
case result.tokType
of tkUInt8Lit, tkUInt16Lit, tkUInt32Lit: result.iNumber != xi
of tkInt8Lit: (xi > BiggestInt(uint8.high))
of tkInt16Lit: (xi > BiggestInt(uint16.high))
of tkInt32Lit: (xi > BiggestInt(uint32.high))
else: false
if outOfRange:
#echo "out of range num: ", result.iNumber, " vs ", xi
lexMessageLitNum(L, "number out of range: '$1'", startpos)
else:
internalError(L.config, getLineInfo(L), "getNumber")
case result.tokType
of floatTypes:
result.fNumber = parseFloat(result.literal)
of tkUInt64Lit, tkUIntLit:
var iNumber: uint64
var len: int
try:
len = parseBiggestUInt(result.literal, iNumber)
except ValueError:
raise newException(OverflowDefect, "number out of range: " & $result.literal)
if len != result.literal.len:
raise newException(ValueError, "invalid integer: " & $result.literal)
result.iNumber = cast[int64](iNumber)
else:
var iNumber: int64
var len: int
try:
len = parseBiggestInt(result.literal, iNumber)
except ValueError:
raise newException(OverflowDefect, "number out of range: " & $result.literal)
if len != result.literal.len:
raise newException(ValueError, "invalid integer: " & $result.literal)
result.iNumber = iNumber
case result.tokType
of tkIntLit, tkInt64Lit: result.iNumber = xi
of tkInt8Lit: result.iNumber = ashr(xi shl 56, 56)
of tkInt16Lit: result.iNumber = ashr(xi shl 48, 48)
of tkInt32Lit: result.iNumber = ashr(xi shl 32, 32)
of tkUIntLit, tkUInt64Lit: result.iNumber = xi
of tkUInt8Lit: result.iNumber = xi and 0xff
of tkUInt16Lit: result.iNumber = xi and 0xffff
of tkUInt32Lit: result.iNumber = xi and 0xffffffff
of tkFloat32Lit:
result.fNumber = (cast[PFloat32](addr(xi)))[]
# note: this code is endian neutral!
# XXX: Test this on big endian machine!
of tkFloat64Lit, tkFloatLit:
result.fNumber = (cast[PFloat64](addr(xi)))[]
else: internalError(L.config, getLineInfo(L), "getNumber")
# Bounds checks. Non decimal literals are allowed to overflow the range of
# the datatype as long as their pattern don't overflow _bitwise_, hence
# below checks of signed sizes against uint*.high is deliberate:
# (0x80'u8 = 128, 0x80'i8 = -128, etc == OK)
if result.tokType notin floatTypes:
let outOfRange = case result.tokType:
of tkUInt8Lit, tkUInt16Lit, tkUInt32Lit: result.iNumber != xi
of tkInt8Lit: (xi > BiggestInt(uint8.high))
of tkInt16Lit: (xi > BiggestInt(uint16.high))
of tkInt32Lit: (xi > BiggestInt(uint32.high))
else: false
# Explicit bounds checks.
let outOfRange =
case result.tokType
of tkInt8Lit: result.iNumber > int8.high or result.iNumber < int8.low
of tkUInt8Lit: result.iNumber > BiggestInt(uint8.high) or result.iNumber < 0
of tkInt16Lit: result.iNumber > int16.high or result.iNumber < int16.low
of tkUInt16Lit: result.iNumber > BiggestInt(uint16.high) or result.iNumber < 0
of tkInt32Lit: result.iNumber > int32.high or result.iNumber < int32.low
of tkUInt32Lit: result.iNumber > BiggestInt(uint32.high) or result.iNumber < 0
else: false
if outOfRange:
#echo "out of range num: ", result.iNumber, " vs ", xi
lexMessageLitNum(L, "number out of range: '$1'", startpos)
else:
case result.tokType
of floatTypes:
result.fNumber = parseFloat(result.literal)
of tkUInt64Lit, tkUIntLit:
var iNumber: uint64
var len: int
try:
len = parseBiggestUInt(result.literal, iNumber)
except ValueError:
raise newException(OverflowDefect, "number out of range: " & $result.literal)
if len != result.literal.len:
raise newException(ValueError, "invalid integer: " & $result.literal)
result.iNumber = cast[int64](iNumber)
else:
var iNumber: int64
var len: int
try:
len = parseBiggestInt(result.literal, iNumber)
except ValueError:
raise newException(OverflowDefect, "number out of range: " & $result.literal)
if len != result.literal.len:
raise newException(ValueError, "invalid integer: " & $result.literal)
result.iNumber = iNumber
# Promote int literal to int64? Not always necessary, but more consistent
if result.tokType == tkIntLit:
if result.iNumber > high(int32) or result.iNumber < low(int32):
result.tokType = tkInt64Lit
# Explicit bounds checks. Only T.high needs to be considered
# since result.iNumber can't be negative.
let outOfRange =
case result.tokType
of tkInt8Lit: result.iNumber > int8.high
of tkUInt8Lit: result.iNumber > BiggestInt(uint8.high)
of tkInt16Lit: result.iNumber > int16.high
of tkUInt16Lit: result.iNumber > BiggestInt(uint16.high)
of tkInt32Lit: result.iNumber > int32.high
of tkUInt32Lit: result.iNumber > BiggestInt(uint32.high)
else: false
if outOfRange: lexMessageLitNum(L, "number out of range: '$1'", startpos)
# Promote int literal to int64? Not always necessary, but more consistent
if result.tokType == tkIntLit:
if result.iNumber > high(int32):
result.tokType = tkInt64Lit
except ValueError:
lexMessageLitNum(L, "invalid number: '$1'", startpos)
except OverflowDefect, RangeDefect:
lexMessageLitNum(L, "number out of range: '$1'", startpos)
except ValueError:
lexMessageLitNum(L, "invalid number: '$1'", startpos)
except OverflowDefect, RangeDefect:
lexMessageLitNum(L, "number out of range: '$1'", startpos)
tokenEnd(result, postPos-1)
L.bufpos = postPos
@@ -820,8 +813,9 @@ proc getString(L: var Lexer, tok: var Token, mode: StringMode) =
inc(pos)
L.bufpos = pos
proc getCharacter(L: var Lexer, tok: var Token) =
proc getCharacter(L: var Lexer; tok: var Token) =
tokenBegin(tok, L.bufpos)
let startPos = L.bufpos
inc(L.bufpos) # skip '
var c = L.buf[L.bufpos]
case c
@@ -832,10 +826,16 @@ proc getCharacter(L: var Lexer, tok: var Token) =
else:
tok.literal = $c
inc(L.bufpos)
if L.buf[L.bufpos] != '\'':
lexMessage(L, errGenerated, "missing closing ' for character literal")
tokenEndIgnore(tok, L.bufpos)
inc(L.bufpos) # skip '
if L.buf[L.bufpos] == '\'':
tokenEndIgnore(tok, L.bufpos)
inc(L.bufpos) # skip '
else:
if startPos > 0 and L.buf[startPos-1] == '`':
tok.literal = "'"
L.bufpos = startPos+1
else:
lexMessage(L, errGenerated, "missing closing ' for character literal")
tokenEndIgnore(tok, L.bufpos)
proc getSymbol(L: var Lexer, tok: var Token) =
var h: Hash = 0
@@ -1278,6 +1278,19 @@ proc rawGetTok*(L: var Lexer, tok: var Token) =
let c = L.buf[L.bufpos]
if c in SymChars+{'_'}:
lexMessage(L, errGenerated, "invalid token: no whitespace between number and identifier")
of '-':
if L.buf[L.bufpos+1] in {'0'..'9'} and
(L.bufpos-1 == 0 or L.buf[L.bufpos-1] in UnaryMinusWhitelist):
# x)-23 # binary minus
# ,-23 # unary minus
# \n-78 # unary minus? Yes.
# =-3 # parsed as `=-` anyway
getNumber(L, tok)
let c = L.buf[L.bufpos]
if c in SymChars+{'_'}:
lexMessage(L, errGenerated, "invalid token: no whitespace between number and identifier")
else:
getOperator(L, tok)
else:
if c in OpChars:
getOperator(L, tok)

View File

@@ -125,9 +125,11 @@ proc styleCheckUse*(conf: ConfigRef; info: TLineInfo; s: PSym) =
return
let newName = s.name.s
let oldName = differs(conf, info, newName)
if oldName.len > 0:
lintReport(conf, info, newName, oldName)
let badName = differs(conf, info, newName)
if badName.len > 0:
# special rules for historical reasons
let forceHint = badName == "nnkArgList" and newName == "nnkArglist" or badName == "nnkArglist" and newName == "nnkArgList"
lintReport(conf, info, newName, badName, forceHint = forceHint)
proc checkPragmaUse*(conf: ConfigRef; info: TLineInfo; w: TSpecialWord; pragmaName: string) =
let wanted = $w

View File

@@ -455,11 +455,25 @@ proc getModule*(g: ModuleGraph; fileIdx: FileIndex): PSym =
elif fileIdx.int32 < g.ifaces.len:
result = g.ifaces[fileIdx.int32].module
proc moduleOpenForCodegen*(g: ModuleGraph; m: FileIndex): bool {.inline.} =
if g.config.symbolFiles == disabledSf:
result = true
else:
result = g.packed[m.int32].status notin {undefined, stored, loaded}
proc rememberEmittedTypeInfo*(g: ModuleGraph; m: FileIndex; ti: string) =
#assert(not isCachedModule(g, m.int32))
if g.config.symbolFiles != disabledSf:
#assert g.encoders[m.int32].isActive
assert g.packed[m.int32].status != stored
g.packed[m.int32].fromDisk.emittedTypeInfo.add ti
#echo "added typeinfo ", m.int32, " ", ti, " suspicious ", not g.encoders[m.int32].isActive
proc rememberFlag*(g: ModuleGraph; m: PSym; flag: ModuleBackendFlag) =
if g.config.symbolFiles != disabledSf:
#assert g.encoders[m.int32].isActive
assert g.packed[m.position].status != stored
g.packed[m.position].fromDisk.backendFlags.incl flag
proc closeRodFile*(g: ModuleGraph; m: PSym) =
if g.config.symbolFiles in {readOnlySf, v2Sf}:
@@ -469,6 +483,8 @@ proc closeRodFile*(g: ModuleGraph; m: PSym) =
let mint = m.position
saveRodFile(toRodFile(g.config, AbsoluteFile toFullPath(g.config, FileIndex(mint))),
g.encoders[mint], g.packed[mint].fromDisk)
g.packed[mint].status = stored
elif g.config.symbolFiles == stressTest:
# debug code, but maybe a good idea for production? Could reduce the compiler's
# memory consumption considerably at the cost of more loads from disk.

View File

@@ -612,9 +612,9 @@ template internalAssert*(conf: ConfigRef, e: bool) =
let arg = info2.toFileLineCol
internalErrorImpl(conf, unknownLineInfo, arg, info2)
template lintReport*(conf: ConfigRef; info: TLineInfo, beau, got: string) =
let m = "'$2' should be: '$1'" % [beau, got]
let msg = if optStyleError in conf.globalOptions: errGenerated else: hintName
template lintReport*(conf: ConfigRef; info: TLineInfo, beau, got: string, forceHint = false) =
let m = "'$1' should be: '$2'" % [got, beau]
let msg = if optStyleError in conf.globalOptions and not forceHint: errGenerated else: hintName
liMessage(conf, info, msg, m, doNothing, instLoc())
proc quotedFilename*(conf: ConfigRef; i: TLineInfo): Rope =

View File

@@ -812,10 +812,11 @@ proc findModule*(conf: ConfigRef; modulename, currentModule: string): AbsoluteFi
for candidate in stdlibDirs:
let path = (conf.libpath.string / candidate / stripped)
if fileExists(path):
m = path
result = AbsoluteFile path
break
let currentPath = currentModule.splitFile.dir
result = AbsoluteFile currentPath / m
else: # If prefixed with std/ why would we add the current module path!
let currentPath = currentModule.splitFile.dir
result = AbsoluteFile currentPath / m
if not fileExists(result):
result = findFile(conf, m)
patchModule(conf)

View File

@@ -355,7 +355,7 @@ proc parseSymbol(p: var Parser, mode = smNormal): PNode =
let node = newNodeI(nkIdent, lineinfo)
node.ident = p.lex.cache.getIdent(accm)
result.add(node)
of tokKeywordLow..tokKeywordHigh, tkSymbol, tkIntLit..tkCharLit:
of tokKeywordLow..tokKeywordHigh, tkSymbol, tkIntLit..tkCustomLit:
result.add(newIdentNodeP(p.lex.cache.getIdent($p.tok), p))
getTok(p)
else:
@@ -627,7 +627,7 @@ proc identOrLiteral(p: var Parser, mode: PrimaryMode): PNode =
#| | UINT_LIT | UINT8_LIT | UINT16_LIT | UINT32_LIT | UINT64_LIT
#| | FLOAT_LIT | FLOAT32_LIT | FLOAT64_LIT
#| | STR_LIT | RSTR_LIT | TRIPLESTR_LIT
#| | CHAR_LIT
#| | CHAR_LIT | CUSTOM_NUMERIC_LIT
#| | NIL
#| generalizedLit = GENERALIZED_STR_LIT | GENERALIZED_TRIPLESTR_LIT
#| identOrLiteral = generalizedLit | symbol | literal
@@ -710,6 +710,14 @@ proc identOrLiteral(p: var Parser, mode: PrimaryMode): PNode =
of tkCharLit:
result = newIntNodeP(nkCharLit, ord(p.tok.literal[0]), p)
getTok(p)
of tkCustomLit:
let splitPos = p.tok.iNumber.int
let str = newStrNodeP(nkRStrLit, p.tok.literal.substr(0, splitPos-1), p)
let callee = newIdentNodeP(getIdent(p.lex.cache, p.tok.literal.substr(splitPos)), p)
result = newNodeP(nkDotExpr, p)
result.add str
result.add callee
getTok(p)
of tkNil:
result = newNodeP(nkNilLit, p)
getTok(p)
@@ -807,7 +815,7 @@ proc primarySuffix(p: var Parser, r: PNode,
result = commandExpr(p, result, mode)
break
result = namedParams(p, result, nkCurlyExpr, tkCurlyRi)
of tkSymbol, tkAccent, tkIntLit..tkCharLit, tkNil, tkCast,
of tkSymbol, tkAccent, tkIntLit..tkCustomLit, tkNil, tkCast,
tkOpr, tkDotDot, tkVar, tkOut, tkStatic, tkType, tkEnum, tkTuple,
tkObject, tkProc:
# XXX: In type sections we allow the free application of the
@@ -1097,7 +1105,7 @@ proc isExprStart(p: Parser): bool =
case p.tok.tokType
of tkSymbol, tkAccent, tkOpr, tkNot, tkNil, tkCast, tkIf, tkFor,
tkProc, tkFunc, tkIterator, tkBind, tkBuiltInMagics,
tkParLe, tkBracketLe, tkCurlyLe, tkIntLit..tkCharLit, tkVar, tkRef, tkPtr,
tkParLe, tkBracketLe, tkCurlyLe, tkIntLit..tkCustomLit, tkVar, tkRef, tkPtr,
tkTuple, tkObject, tkWhen, tkCase, tkOut:
result = true
else: result = false
@@ -1498,7 +1506,7 @@ proc parseReturnOrRaise(p: var Parser, kind: TNodeKind): PNode =
#| yieldStmt = 'yield' optInd expr?
#| discardStmt = 'discard' optInd expr?
#| breakStmt = 'break' optInd expr?
#| continueStmt = 'break' optInd expr?
#| continueStmt = 'continue' optInd expr?
result = newNodeP(kind, p)
getTok(p)
if p.tok.tokType == tkComment:

View File

@@ -942,7 +942,7 @@ proc skipHiddenNodes(n: PNode): PNode =
else: break
proc accentedName(g: var TSrcGen, n: PNode) =
const backticksNeeded = OpChars + {'[', '{'}
const backticksNeeded = OpChars + {'[', '{', '\''}
if n == nil: return
let isOperator =
if n.kind == nkIdent and n.ident.s.len > 0 and n.ident.s[0] in backticksNeeded: true
@@ -976,6 +976,11 @@ proc infixArgument(g: var TSrcGen, n: PNode, i: int) =
if needsParenthesis:
put(g, tkParRi, ")")
proc isCustomLit(n: PNode): bool =
n.len == 2 and n[0].kind == nkRStrLit and
(n[1].kind == nkIdent and n[1].ident.s.startsWith('\'')) or
(n[1].kind == nkSym and n[1].sym.name.s.startsWith('\''))
proc gsub(g: var TSrcGen, n: PNode, c: TContext) =
if isNil(n): return
var
@@ -1195,9 +1200,14 @@ proc gsub(g: var TSrcGen, n: PNode, c: TContext) =
gcomma(g, n, c)
put(g, tkBracketRi, "]")
of nkDotExpr:
gsub(g, n, 0)
put(g, tkDot, ".")
gsub(g, n, 1)
if isCustomLit(n):
put(g, tkCustomLit, n[0].strVal)
gsub(g, n, 1)
else:
gsub(g, n, 0)
put(g, tkDot, ".")
if n.len > 1:
accentedName(g, n[1])
of nkBind:
putWithSpace(g, tkBind, "bind")
gsub(g, n, 0)

View File

@@ -107,6 +107,7 @@ proc computeDeps(cache: IdentCache; n: PNode, declares, uses: var IntSet; topLev
for i in 0..<n.safeLen: deps(n[i])
of nkMixinStmt, nkBindStmt: discard
else:
# XXX: for callables, this technically adds the return type dep before args
for i in 0..<n.safeLen: deps(n[i])
proc hasIncludes(n:PNode): bool =

View File

@@ -685,7 +685,8 @@ proc getConstExpr(m: PSym, n: PNode; idgen: IdGenerator; g: ModuleGraph): PNode
of nkDerefExpr, nkHiddenDeref:
let a = getConstExpr(m, n[0], idgen, g)
if a != nil and a.kind == nkNilLit:
localError(g.config, n.info, "nil dereference is not allowed")
result = nil
#localError(g.config, n.info, "nil dereference is not allowed")
of nkCast:
var a = getConstExpr(m, n[1], idgen, g)
if a == nil: return

View File

@@ -452,8 +452,6 @@ proc semGenericStmt(c: PContext, n: PNode,
discard
of nkFormalParams:
checkMinSonsLen(n, 1, c.config)
if n[0].kind != nkEmpty:
n[0] = semGenericStmt(c, n[0], flags+{withinTypeDesc}, ctx)
for i in 1..<n.len:
var a = n[i]
if (a.kind != nkIdentDefs): illFormedAst(a, c.config)
@@ -462,6 +460,10 @@ proc semGenericStmt(c: PContext, n: PNode,
a[^1] = semGenericStmt(c, a[^1], flags, ctx)
for j in 0..<a.len-2:
addTempDecl(c, getIdentNode(c, a[j]), skParam)
# XXX: last change was moving this down here, search for "1.." to keep
# going from this file onward
if n[0].kind != nkEmpty:
n[0] = semGenericStmt(c, n[0], flags+{withinTypeDesc}, ctx)
of nkProcDef, nkMethodDef, nkConverterDef, nkMacroDef, nkTemplateDef,
nkFuncDef, nkIteratorDef, nkLambdaKinds:
checkSonsLen(n, bodyPos + 1, c.config)

View File

@@ -1524,7 +1524,7 @@ proc semProcAnnotation(c: PContext, prc: PNode;
return
proc semInferredLambda(c: PContext, pt: TIdTable, n: PNode): PNode {.nosinks.} =
## used for resolving 'auto' in lambdas based on their callsite
## used for resolving 'auto' in lambdas based on their callsite
var n = n
let original = n[namePos].sym
let s = original #copySym(original, false)

View File

@@ -299,7 +299,7 @@ proc semArrayIndex(c: PContext, n: PNode): PType =
result = makeRangeWithStaticExpr(c, e.typ.n)
elif e.kind in {nkIntLit..nkUInt64Lit}:
if e.intVal < 0:
localError(c.config, n[1].info,
localError(c.config, n.info,
"Array length can't be negative, but was " & $e.intVal)
result = makeRangeType(c, 0, e.intVal-1, n.info, e.typ)
elif e.kind == nkSym and e.typ.kind == tyStatic:

View File

@@ -371,8 +371,6 @@ proc wrapProcForSpawn*(g: ModuleGraph; idgen: IdGenerator; owner: PSym; spawnExp
fn = indirectAccess(castExpr, field, n.info)
elif fn.kind == nkSym and fn.sym.kind == skIterator:
localError(g.config, n.info, "iterator in spawn environment is not allowed")
elif fn.typ.callConv == ccClosure:
localError(g.config, n.info, "closure in spawn environment is not allowed")
call.add(fn)
var varSection = newNodeI(nkVarSection, n.info)

View File

@@ -29,8 +29,7 @@ proc transformBody*(g: ModuleGraph; idgen: IdGenerator, prc: PSym, cache: bool):
import closureiters, lambdalifting
type
PTransCon = ref TTransCon
TTransCon{.final.} = object # part of TContext; stackable
PTransCon = ref object # part of TContext; stackable
mapping: TIdNodeTable # mapping from symbols to nodes
owner: PSym # current owner
forStmt: PNode # current for stmt
@@ -40,7 +39,7 @@ type
# if we encounter the 2nd yield statement
next: PTransCon # for stacking
TTransfContext = object
PTransf = ref object
module: PSym
transCon: PTransCon # top of a TransCon stack
inlining: int # > 0 if we are in inlining context (copy vars)
@@ -49,7 +48,6 @@ type
deferDetected, tooEarly: bool
graph: ModuleGraph
idgen: IdGenerator
PTransf = ref TTransfContext
proc newTransNode(a: PNode): PNode {.inline.} =
result = shallowCopy(a)

View File

@@ -1354,9 +1354,10 @@ proc genMagic(c: PCtx; n: PNode; dest: var TDest; m: TMagic) =
if dest < 0: dest = c.getTemp(arg.typ)
gABC(c, arg, whichAsgnOpc(arg, requiresCopy=false), dest, a)
# XXX use ldNullOpcode() here?
c.gABx(n, opcLdNull, a, c.genType(arg.typ))
c.gABx(n, opcNodeToReg, a, a)
c.genAsgnPatch(arg, a)
# Don't zero out the arg for now #17199
# c.gABx(n, opcLdNull, a, c.genType(arg.typ))
# c.gABx(n, opcNodeToReg, a, a)
# c.genAsgnPatch(arg, a)
c.freeTemp(a)
of mNodeId:
c.genUnaryABC(n, dest, opcNodeId)

View File

@@ -1135,7 +1135,7 @@ AST:
.. code-block:: nim
# ...
nnkTypeClassTy( # note this isn't nnkConceptTy!
nnkArglist(
nnkArgList(
# ... idents for x, y, z
)
# ...

View File

@@ -46,7 +46,7 @@ literal = | INT_LIT | INT8_LIT | INT16_LIT | INT32_LIT | INT64_LIT
| UINT_LIT | UINT8_LIT | UINT16_LIT | UINT32_LIT | UINT64_LIT
| FLOAT_LIT | FLOAT32_LIT | FLOAT64_LIT
| STR_LIT | RSTR_LIT | TRIPLESTR_LIT
| CHAR_LIT
| CHAR_LIT | CUSTOM_NUMERIC_LIT
| NIL
generalizedLit = GENERALIZED_STR_LIT | GENERALIZED_TRIPLESTR_LIT
identOrLiteral = generalizedLit | symbol | literal
@@ -100,6 +100,7 @@ postExprBlocks = ':' stmt? ( IND{=} doBlock
| IND{=} 'of' exprList ':' stmt
| IND{=} 'elif' expr ':' stmt
| IND{=} 'except' exprList ':' stmt
| IND{=} 'finally' ':' stmt
| IND{=} 'else' ':' stmt )*
exprStmt = simpleExpr
(( '=' optInd expr colonBody? )

View File

@@ -32,28 +32,29 @@ Path Purpose
`doc` the documentation; it is a bunch of
reStructuredText files
`lib` the Nim library
`web` website of Nim; generated by `nimweb`
from the `*.txt` and `*.nimf` files
============ ===================================================
Bootstrapping the compiler
==========================
**Note**: Add ``.`` to your PATH so that `koch` can be used without the `./`.
Compiling the compiler is a simple matter of running::
nim c koch.nim
./koch boot
koch boot -d:release
For a release version use::
For a debug version use::
nim c koch.nim
./koch boot -d:release
koch boot
And for a debug version compatible with GDB::
nim c koch.nim
./koch boot --debuginfo --linedir:on
koch boot --debuginfo --linedir:on
The `koch` program is Nim's maintenance script. It is a replacement for
make and shell scripting with the advantage that it is much more portable.
@@ -61,12 +62,105 @@ More information about its options can be found in the `koch <koch.html>`_
documentation.
Developing the compiler
=======================
To create a new compiler for each run, use `koch temp`::
koch temp c test.nim
`koch temp` creates a debug build of the compiler, which is useful
to create stacktraces for compiler debugging.
You can of course use GDB or Visual Studio to debug the
compiler (via `--debuginfo --lineDir:on`). However, there
are also lots of procs that aid in debugging:
.. code-block:: nim
# dealing with PNode:
echo renderTree(someNode)
debug(someNode) # some JSON representation
# dealing with PType:
echo typeToString(someType)
debug(someType)
# dealing with PSym:
echo symbol.name.s
debug(symbol)
# pretty prints the Nim ast, but annotates symbol IDs:
echo renderTree(someNode, {renderIds})
if `??`(conf, n.info, "temp.nim"):
# only output when it comes from "temp.nim"
echo renderTree(n)
if `??`(conf, n.info, "temp.nim"):
# why does it process temp.nim here?
writeStackTrace()
These procs may not already be imported by the module you're editing.
You can import them directly for debugging:
.. code-block:: nim
from astalgo import debug
from types import typeToString
from renderer import renderTree
from msgs import `??`
The compiler's architecture
===========================
Nim uses the classic compiler architecture: A lexer/scanner feds tokens to a
parser. The parser builds a syntax tree that is used by the code generators.
This syntax tree is the interface between the parser and the code generator.
It is essential to understand most of the compiler's code.
Semantic analysis is separated from parsing.
.. include:: filelist.txt
The syntax tree
---------------
The syntax tree consists of nodes which may have an arbitrary number of
children. Types and symbols are represented by other nodes, because they
may contain cycles. The AST changes its shape after semantic checking. This
is needed to make life easier for the code generators. See the "ast" module
for the type definitions. The `macros <macros.html>`_ module contains many
examples how the AST represents each syntactic structure.
Bisecting for regressions
=========================
`koch temp` returns 125 as the exit code in case the compiler
compilation fails. This exit code tells `git bisect` to skip the
current commit.::
git bisect start bad-commit good-commit
git bisect run ./koch temp -r c test-source.nim
You can also bisect using custom options to build the compiler, for example if
you don't need a debug version of the compiler (which runs slower), you can replace
`./koch temp` by explicit compilation command, see `Rebuilding the compiler`_.
Runtimes
========
Nim has two different runtimes, the "old runtime" and the "new runtime". The old
runtime supports the old GCs (markAndSweep, refc, Boehm), the new runtime supports
ARC/ORC. The new runtime is active `when defined(nimV2)`.
Coding Guidelines
=================
* Use CamelCase, not underscored_identifiers.
* Indent with two spaces.
* Max line length is 80 characters.
* We follow Nim's official style guide, see `<nep1.html>`_.
* Max line length is 100 characters.
* Provide spaces around binary operators if that enhances readability.
* Use a space after a colon, but not before it.
* [deprecated] Start types with a capital `T`, unless they are
@@ -86,9 +180,9 @@ POSIX-compliant systems on conventional hardware are usually pretty easy to
port: Add the platform to `platform` (if it is not already listed there),
check that the OS, System modules work and recompile Nim.
The only case where things aren't as easy is when the garbage
collector needs some assembler tweaking to work. The standard
version of the GC uses C's `setjmp` function to store all registers
The only case where things aren't as easy is when old runtime's garbage
collectors need some assembler tweaking to work. The default
implementation uses C's `setjmp` function to store all registers
on the hardware stack. It may be necessary that the new platform needs to
replace this generic code by some assembler code.
@@ -96,136 +190,33 @@ replace this generic code by some assembler code.
Runtime type information
========================
**Note**: This section describes the "old runtime".
*Runtime type information* (RTTI) is needed for several aspects of the Nim
programming language:
Garbage collection
The most important reason for RTTI. Generating
traversal procedures produces bigger code and is likely to be slower on
modern hardware as dynamic procedure binding is hard to predict.
The old GCs use the RTTI for traversing abitrary Nim types, but usually
only the `marker` field which contains a proc that does the traversal.
Complex assignments
Sequences and strings are implemented as
pointers to resizeable buffers, but Nim requires copying for
assignments. Apart from RTTI the compiler could generate copy procedures
for any type that needs one. However, this would make the code bigger and
the RTTI is likely already there for the GC.
assignments. Apart from RTTI the compiler also generates copy procedures
as a specialization.
We already know the type information as a graph in the compiler.
Thus we need to serialize this graph as RTTI for C code generation.
Look at the file `lib/system/hti.nim` for more information.
Rebuilding the compiler
Magics and compilerProcs
========================
After an initial build via `sh build_all.sh` on posix or `build_all.bat` on windows,
you can rebuild the compiler as follows:
* `nim c koch` if you need to rebuild koch
* `./koch boot -d:release` this ensures the compiler can rebuild itself
(use `koch` instead of `./koch` on windows), which builds the compiler 3 times.
A faster approach if you don't need to run the full bootstrapping implied by `koch boot`,
is the following:
* `pathto/nim c --lib:lib -d:release -o:bin/nim_temp compiler/nim.nim`
Where `pathto/nim` is any nim binary sufficiently recent (e.g. `bin/nim_cources`
built during bootstrap or `$HOME/.nimble/bin/nim` installed by `choosenim 1.2.0`)
You can pass any additional options such as `-d:leanCompiler` if you don't need
certain features or `-d:debug --stacktrace:on --excessiveStackTrace --stackTraceMsgs`
for debugging the compiler. See also `Debugging the compiler`_.
Debugging the compiler
======================
You can of course use GDB or Visual Studio to debug the
compiler (via `--debuginfo --lineDir:on`). However, there
are also lots of procs that aid in debugging:
.. code-block:: nim
# pretty prints the Nim AST
echo renderTree(someNode)
# outputs some JSON representation
debug(someNode)
# pretty prints some type
echo typeToString(someType)
debug(someType)
echo symbol.name.s
debug(symbol)
# pretty prints the Nim ast, but annotates symbol IDs:
echo renderTree(someNode, {renderIds})
if `??`(conf, n.info, "temp.nim"):
# only output when it comes from "temp.nim"
echo renderTree(n)
if `??`(conf, n.info, "temp.nim"):
# why does it process temp.nim here?
writeStackTrace()
These procs may not be imported by a module. You can import them directly for debugging:
.. code-block:: nim
from astalgo import debug
from types import typeToString
from renderer import renderTree
from msgs import `??`
To create a new compiler for each run, use `koch temp`::
./koch temp c /tmp/test.nim
`koch temp` creates a debug build of the compiler, which is useful
to create stacktraces for compiler debugging. See also
`Rebuilding the compiler`_ if you need more control.
Bisecting for regressions
=========================
`koch temp` returns 125 as the exit code in case the compiler
compilation fails. This exit code tells `git bisect` to skip the
current commit.::
git bisect start bad-commit good-commit
git bisect run ./koch temp -r c test-source.nim
You can also bisect using custom options to build the compiler, for example if
you don't need a debug version of the compiler (which runs slower), you can replace
`./koch temp` by explicit compilation command, see `Rebuilding the compiler`_.
The compiler's architecture
===========================
Nim uses the classic compiler architecture: A lexer/scanner feds tokens to a
parser. The parser builds a syntax tree that is used by the code generator.
This syntax tree is the interface between the parser and the code generator.
It is essential to understand most of the compiler's code.
In order to compile Nim correctly, type-checking has to be separated from
parsing. Otherwise generics cannot work.
.. include:: filelist.txt
The syntax tree
---------------
The syntax tree consists of nodes which may have an arbitrary number of
children. Types and symbols are represented by other nodes, because they
may contain cycles. The AST changes its shape after semantic checking. This
is needed to make life easier for the code generators. See the "ast" module
for the type definitions. The `macros <macros.html>`_ module contains many
examples how the AST represents each syntactic structure.
How the RTL is compiled
=======================
The `system` module contains the part of the RTL which needs support by
compiler magic (and the stuff that needs to be in it because the spec
says so). The C code generator generates the C code for it, just like any other
compiler magic. The C code generator generates the C code for it, just like any other
module. However, calls to some procedures like `addInt` are inserted by
the CCG. Therefore the module `magicsys` contains a table (`compilerprocs`)
the generator. Therefore there is a table (`compilerprocs`)
with all symbols that are marked as `compilerproc`. `compilerprocs` are
needed by the code generator. A `magic` proc is not the same as a
`compilerproc`: A `magic` is a proc that needs compiler magic for its
@@ -233,325 +224,6 @@ semantic checking, a `compilerproc` is a proc that is used by the code
generator.
Compilation cache
=================
The implementation of the compilation cache is tricky: There are lots
of issues to be solved for the front- and backend.
General approach: AST replay
----------------------------
We store a module's AST of a successful semantic check in a SQLite
database. There are plenty of features that require a sub sequence
to be re-applied, for example:
.. code-block:: nim
{.compile: "foo.c".} # even if the module is loaded from the DB,
# "foo.c" needs to be compiled/linked.
The solution is to **re-play** the module's top level statements.
This solves the problem without having to special case the logic
that fills the internal seqs which are affected by the pragmas.
In fact, this describes how the AST should be stored in the database,
as a "shallow" tree. Let's assume we compile module `m` with the
following contents:
.. code-block:: nim
import std/strutils
var x*: int = 90
{.compile: "foo.c".}
proc p = echo "p"
proc q = echo "q"
static:
echo "static"
Conceptually this is the AST we store for the module:
.. code-block:: nim
import std/strutils
var x*
{.compile: "foo.c".}
proc p
proc q
static:
echo "static"
The symbol's `ast` field is loaded lazily, on demand. This is where most
savings come from, only the shallow outer AST is reconstructed immediately.
It is also important that the replay involves the `import` statement so
that dependencies are resolved properly.
Shared global compiletime state
-------------------------------
Nim allows `.global, compiletime` variables that can be filled by macro
invocations across different modules. This feature breaks modularity in a
severe way. Plenty of different solutions have been proposed:
- Restrict the types of global compiletime variables to `Set[T]` or
similar unordered, only-growable collections so that we can track
the module's write effects to these variables and reapply the changes
in a different order.
- In every module compilation, reset the variable to its default value.
- Provide a restrictive API that can load/save the compiletime state to
a file.
(These solutions are not mutually exclusive.)
Since we adopt the "replay the top level statements" idea, the natural
solution to this problem is to emit pseudo top level statements that
reflect the mutations done to the global variable. However, this is
MUCH harder than it sounds, for example `squeaknim` uses this
snippet:
.. code-block:: nim
apicall.add(") module: '" & dllName & "'>\C" &
"\t^self externalCallFailed\C!\C\C")
stCode.add(st & "\C\t\"Generated by NimSqueak\"\C\t" & apicall)
We can "replay" `stCode.add` only if the values of `st`
and `apicall` are known. And even then a hash table's `add` with its
hashing mechanism is too hard to replay.
In practice, things are worse still, consider `someGlobal[i][j].add arg`.
We only know the root is `someGlobal` but the concrete path to the data
is unknown as is the value that is added. We could compute a "diff" between
the global states and use that to compute a symbol patchset, but this is
quite some work, expensive to do at runtime (it would need to run after
every module has been compiled) and would also break for hash tables.
We need an API that hides the complex aliasing problems by not relying
on Nim's global variables. The obvious solution is to use string keys
instead of global variables:
.. code-block:: nim
proc cachePut*(key: string; value: string)
proc cacheGet*(key: string): string
However, the values being strings/json is quite problematic: Many
lookup tables that are built at compiletime embed *proc vars* and
types which have no obvious string representation... Seems like
AST diffing is still the best idea as it will not require to use
an alien API and works with some existing Nimble packages, at least.
On the other hand, in Nim's future I would like to replace the VM
by native code. A diff algorithm wouldn't work for that.
Instead the native code would work with an API like `put`, `get`:
.. code-block:: nim
proc cachePut*(key: string; value: NimNode)
proc cacheGet*(key: string): NimNode
The API should embrace the AST diffing notion: See the
module `macrocache` for the final details.
Methods and type converters
---------------------------
In the following
sections *global* means *shared between modules* or *property of the whole
program*.
Nim contains language features that are *global*. The best example for that
are multi methods: Introducing a new method with the same name and some
compatible object parameter means that the method's dispatcher needs to take
the new method into account. So the dispatching logic is only completely known
after the whole program has been translated!
Other features that are *implicitly* triggered cause problems for modularity
too. Type converters fall into this category:
.. code-block:: nim
# module A
converter toBool(x: int): bool =
result = x != 0
.. code-block:: nim
# module B
import A
if 1:
echo "ugly, but should work"
If in the above example module `B` is re-compiled, but `A` is not then
`B` needs to be aware of `toBool` even though `toBool` is not referenced
in `B` *explicitly*.
Both the multi method and the type converter problems are solved by the
AST replay implementation.
Generics
~~~~~~~~
We cache generic instantiations and need to ensure this caching works
well with the incremental compilation feature. Since the cache is
attached to the `PSym` datastructure, it should work without any
special logic.
Backend issues
--------------
- Init procs must not be "forgotten" to be called.
- Files must not be "forgotten" to be linked.
- Method dispatchers are global.
- DLL loading via `dlsym` is global.
- Emulated thread vars are global.
However the biggest problem is that dead code elimination breaks modularity!
To see why, consider this scenario: The module `G` (for example the huge
Gtk2 module...) is compiled with dead code elimination turned on. So none
of `G`'s procs is generated at all.
Then module `B` is compiled that requires `G.P1`. Ok, no problem,
`G.P1` is loaded from the symbol file and `G.c` now contains `G.P1`.
Then module `A` (that depends on `B` and `G`) is compiled and `B`
and `G` are left unchanged. `A` requires `G.P2`.
So now `G.c` MUST contain both `P1` and `P2`, but we haven't even
loaded `P1` from the symbol file, nor do we want to because we then quickly
would restore large parts of the whole program.
Solution
~~~~~~~~
The backend must have some logic so that if the currently processed module
is from the compilation cache, the `ast` field is not accessed. Instead
the generated C(++) for the symbol's body needs to be cached too and
inserted back into the produced C file. This approach seems to deal with
all the outlined problems above.
Debugging Nim's memory management
=================================
The following paragraphs are mostly a reminder for myself. Things to keep
in mind:
* If an assertion in Nim's memory manager or GC fails, the stack trace
keeps allocating memory! Thus a stack overflow may happen, hiding the
real issue.
* What seem to be C code generation problems is often a bug resulting from
not producing prototypes, so that some types default to `cint`. Testing
without the `-w` option helps!
The Garbage Collector
=====================
Introduction
------------
I use the term *cell* here to refer to everything that is traced
(sequences, refs, strings).
This section describes how the GC works.
The basic algorithm is *Deferrent Reference Counting* with cycle detection.
References on the stack are not counted for better performance and easier C
code generation.
Each cell has a header consisting of a RC and a pointer to its type
descriptor. However the program does not know about these, so they are placed at
negative offsets. In the GC code the type `PCell` denotes a pointer
decremented by the right offset, so that the header can be accessed easily. It
is extremely important that `pointer` is not confused with a `PCell`
as this would lead to a memory corruption.
The CellSet data structure
--------------------------
The GC depends on an extremely efficient datastructure for storing a
set of pointers - this is called a `TCellSet` in the source code.
Inserting, deleting and searching are done in constant time. However,
modifying a `TCellSet` during traversal leads to undefined behaviour.
.. code-block:: Nim
type
TCellSet # hidden
proc cellSetInit(s: var TCellSet) # initialize a new set
proc cellSetDeinit(s: var TCellSet) # empty the set and free its memory
proc incl(s: var TCellSet, elem: PCell) # include an element
proc excl(s: var TCellSet, elem: PCell) # exclude an element
proc `in`(elem: PCell, s: TCellSet): bool # tests membership
iterator elements(s: TCellSet): (elem: PCell)
All the operations have to perform efficiently. Because a Cellset can
become huge a hash table alone is not suitable for this.
We use a mixture of bitset and hash table for this. The hash table maps *pages*
to a page descriptor. The page descriptor contains a bit for any possible cell
address within this page. So including a cell is done as follows:
- Find the page descriptor for the page the cell belongs to.
- Set the appropriate bit in the page descriptor indicating that the
cell points to the start of a memory block.
Removing a cell is analogous - the bit has to be set to zero.
Single page descriptors are never deleted from the hash table. This is not
needed as the data structures needs to be rebuilt periodically anyway.
Complete traversal is done in this way::
for each page descriptor d:
for each bit in d:
if bit == 1:
traverse the pointer belonging to this bit
Further complications
---------------------
In Nim the compiler cannot always know if a reference
is stored on the stack or not. This is caused by var parameters.
Consider this example:
.. code-block:: Nim
proc setRef(r: var ref TNode) =
new(r)
proc usage =
var
r: ref TNode
setRef(r) # here we should not update the reference counts, because
# r is on the stack
setRef(r.left) # here we should update the refcounts!
We have to decide at runtime whether the reference is on the stack or not.
The generated code looks roughly like this:
.. code-block:: C
void setref(TNode** ref) {
unsureAsgnRef(ref, newObj(TNode_TI, sizeof(TNode)))
}
void usage(void) {
setRef(&r)
setRef(&r->left)
}
Note that for systems with a continuous stack (which most systems have)
the check whether the ref is on the stack is very cheap (only two
comparisons).
Code generation for closures
============================
@@ -598,14 +270,14 @@ This should produce roughly this code:
.. code-block:: nim
type
PEnv = ref object
Env = ref object
x: int # data
proc anon(y: int, c: PEnv): int =
proc anon(y: int, c: Env): int =
return y + c.x
proc add(x: int): tuple[prc, data] =
var env: PEnv
var env: Env
new env
env.x = x
result = (anon, env)
@@ -630,25 +302,25 @@ This should produce roughly this code:
.. code-block:: nim
type
PEnvX = ref object
EnvX = ref object
x: int # data
PEnvY = ref object
EnvY = ref object
y: int
ex: PEnvX
ex: EnvX
proc lambdaZ(z: int, ey: PEnvY): int =
proc lambdaZ(z: int, ey: EnvY): int =
return ey.ex.x + ey.y + z
proc lambdaY(y: int, ex: PEnvX): tuple[prc, data: PEnvY] =
var ey: PEnvY
proc lambdaY(y: int, ex: EnvX): tuple[prc, data: EnvY] =
var ey: EnvY
new ey
ey.y = y
ey.ex = ex
result = (lambdaZ, ey)
proc add(x: int): tuple[prc, data: PEnvX] =
var ex: PEnvX
proc add(x: int): tuple[prc, data: EnvX] =
var ex: EnvX
ex.x = x
result = (labmdaY, ex)
@@ -663,17 +335,11 @@ More useful is escape analysis and stack allocation of the environment,
however.
Alternative
-----------
Process the closure of all inner procs in one pass and accumulate the
environments. This is however not always possible.
Accumulator
-----------
.. code-block:: nim
proc getAccumulator(start: int): proc (): int {.closure} =
var i = start
return lambda: int =
@@ -708,20 +374,26 @@ backend somehow. We deal with this by modifying `s.ast[paramPos]` to contain
the formal hidden parameter, but not `s.typ`!
Integer literals:
-----------------
Notes on type and AST representation
====================================
To be expanded.
Integer literals
----------------
In Nim, there is a redundant way to specify the type of an
integer literal. First of all, it should be unsurprising that every
node has a node kind. The node of an integer literal can be any of the
following values:
following values::
nkIntLit, nkInt8Lit, nkInt16Lit, nkInt32Lit, nkInt64Lit,
nkUIntLit, nkUInt8Lit, nkUInt16Lit, nkUInt32Lit, nkUInt64Lit
On top of that, there is also the `typ` field for the type. It the
kind of the `typ` field can be one of the following ones, and it
should be matching the literal kind:
should be matching the literal kind::
tyInt, tyInt8, tyInt16, tyInt32, tyInt64, tyUInt, tyUInt8,
tyUInt16, tyUInt32, tyUInt64
@@ -777,6 +449,7 @@ pointing back to the integer literal node in the ast containing the
integer value. These are the properties that hold true for integer
literal types.
::
n.kind == nkIntLit
n.typ.kind == tyInt
n.typ.n == n

View File

@@ -490,19 +490,26 @@ this. Another reason is that Nim can thus support `array[char, int]` or
type is used for Unicode characters, it can represent any Unicode character.
`Rune` is declared in the `unicode module <unicode.html>`_.
A character literal that does not end in ``'`` is interpreted as ``'`` if there
is a preceeding backtick token. There must be no whitespace between the preceeding
backtick token and the character literal. This special rule ensures that a declaration
like ``proc `'customLiteral`(s: string)`` is valid. See also
`Custom Numeric Literals <#custom-numeric-literals>`_.
Numerical constants
-------------------
Numerical constants are of a single type and have the form::
Numeric Literals
----------------
Numeric literals have the form::
hexdigit = digit | 'A'..'F' | 'a'..'f'
octdigit = '0'..'7'
bindigit = '0'..'1'
HEX_LIT = '0' ('x' | 'X' ) hexdigit ( ['_'] hexdigit )*
DEC_LIT = digit ( ['_'] digit )*
OCT_LIT = '0' 'o' octdigit ( ['_'] octdigit )*
BIN_LIT = '0' ('b' | 'B' ) bindigit ( ['_'] bindigit )*
unary_minus = '-' # See the section about unary minus
HEX_LIT = unary_minus? '0' ('x' | 'X' ) hexdigit ( ['_'] hexdigit )*
DEC_LIT = unary_minus? digit ( ['_'] digit )*
OCT_LIT = unary_minus? '0' 'o' octdigit ( ['_'] octdigit )*
BIN_LIT = unary_minus? '0' ('b' | 'B' ) bindigit ( ['_'] bindigit )*
INT_LIT = HEX_LIT
| DEC_LIT
@@ -521,7 +528,7 @@ Numerical constants are of a single type and have the form::
UINT64_LIT = INT_LIT ['\''] ('u' | 'U') '64'
exponent = ('e' | 'E' ) ['+' | '-'] digit ( ['_'] digit )*
FLOAT_LIT = digit (['_'] digit)* (('.' digit (['_'] digit)* [exponent]) |exponent)
FLOAT_LIT = unary_minus? digit (['_'] digit)* (('.' digit (['_'] digit)* [exponent]) |exponent)
FLOAT32_SUFFIX = ('f' | 'F') ['32']
FLOAT32_LIT = HEX_LIT '\'' FLOAT32_SUFFIX
| (FLOAT_LIT | DEC_LIT | OCT_LIT | BIN_LIT) ['\''] FLOAT32_SUFFIX
@@ -529,12 +536,49 @@ Numerical constants are of a single type and have the form::
FLOAT64_LIT = HEX_LIT '\'' FLOAT64_SUFFIX
| (FLOAT_LIT | DEC_LIT | OCT_LIT | BIN_LIT) ['\''] FLOAT64_SUFFIX
CUSTOM_NUMERIC_LIT = (FLOAT_LIT | INT_LIT) '\'' CUSTOM_NUMERIC_SUFFIX
As can be seen in the productions, numerical constants can contain underscores
# CUSTOM_NUMERIC_SUFFIX is any Nim identifier that is not
# a pre-defined type suffix.
As can be seen in the productions, numeric literals can contain underscores
for readability. Integer and floating-point literals may be given in decimal (no
prefix), binary (prefix `0b`), octal (prefix `0o`), and hexadecimal
(prefix `0x`) notation.
The fact that the unary minus `-` in a number literal like `-1` is considered
to be part of the literal is a late addition to the language. The rationale is that
an expression `-128'i8` should be valid and without this special case, this would
be impossible -- `128` is not a valid `int8` value, only `-128` is.
For the `unary_minus` rule there are further restrictions that are not covered
in the formal grammar. For `-` to be part of the number literal its immediately
preceeding character has to be in the
set `{' ', '\t', '\n', '\r', ',', ';', '(', '[', '{'}`. This set was designed to
cover most cases in a natural manner.
In the following examples, `-1` is a single token:
.. code-block:: nim
echo -1
echo(-1)
echo [-1]
echo 3,-1
"abc";-1
In the following examples, `-1` is parsed as two separate tokens (as `- 1`):
.. code-block:: nim
echo x-1
echo (int)-1
echo [a]-1
"abc"-1
There exists a literal for each numerical type that is
defined. The suffix starting with an apostrophe ('\'') is called a
`type suffix`:idx:. Literals without a type suffix are of an integer type
@@ -546,7 +590,7 @@ is optional if it is not ambiguous (only hexadecimal floating-point literals
with a type suffix can be ambiguous).
The type suffixes are:
The pre-defined type suffixes are:
================= =========================
Type Suffix Resulting type of literal
@@ -578,6 +622,43 @@ the bit width of the datatype, it is accepted.
Hence: 0b10000000'u8 == 0x80'u8 == 128, but, 0b10000000'i8 == 0x80'i8 == -1
instead of causing an overflow error.
Custom Numeric Literals
~~~~~~~~~~~~~~~~~~~~~~~
If the suffix is not predefined, then the suffix is assumed to be a call
to a proc, template, macro or other callable identifier that is passed the
string containing the literal. The callable identifier needs to be declared
with a special ``'`` prefix:
.. code-block:: nim
import strutils
type u4 = distinct uint8 # a 4-bit unsigned integer aka "nibble"
proc `'u4`(n: string): u4 =
# The leading ' is required.
result = (parseInt(n) and 0x0F).u4
var x = 5'u4
More formally, a custom numeric literal `123'custom` is transformed
to r"123".`'custom` in the parsing step. There is no AST node kind that
corresponds to this transformation. The transformation naturally handles
the case that additional parameters are passed to the callee:
.. code-block:: nim
import strutils
type u4 = distinct uint8 # a 4-bit unsigned integer aka "nibble"
proc `'u4`(n: string; moreData: int): u4 =
result = (parseInt(n) and 0x0F).u4
var x = 5'u4(123)
Custom numeric literals are covered by the grammar rule named `CUSTOM_NUMERIC_LIT`.
A custom numeric literal is a single token.
Operators
---------

View File

@@ -1283,7 +1283,7 @@ all the arguments, but also the matched operators in reverse polish notation:
echo x + y * z - x
This passes the expression `x + y * z - x` to the `optM` macro as
an `nnkArglist` node containing::
an `nnkArgList` node containing::
Arglist
Sym "x"

View File

@@ -157,7 +157,8 @@ English word To use Notes
------------------- ------------ --------------------------------------
initialize initFoo initializes a value type `Foo`
new newFoo initializes a reference type `Foo`
via `new`
via `new` or a value type `Foo`
with reference semantics.
this or self self for method like procs, e.g.:
`proc fun(self: Foo, a: int)`
rationale: `self` is more unique in English

View File

@@ -4,17 +4,20 @@
nimgrep User's manual
=========================
.. default-role:: literal
:Author: Andreas Rumpf
:Version: 0.9
:Version: 1.6.0
.. contents::
Nimgrep is a command line tool for search&replace tasks. It can search for
Nimgrep is a command line tool for search and replace tasks. It can search for
regex or peg patterns and can search whole directories at once. User
confirmation for every single replace operation can be requested.
Nimgrep has particularly good support for Nim's
eccentric *style insensitivity*. Apart from that it is a generic text
manipulation tool.
eccentric *style insensitivity* (see option `-y` below).
Apart from that it is a generic text manipulation tool.
Installation
@@ -30,23 +33,38 @@ And copy the executable somewhere in your `$PATH`.
Command line switches
=====================
Usage:
nimgrep [options] [pattern] [replacement] (file/directory)*
Options:
--find, -f find the pattern (default)
--replace, -r replace the pattern
--peg pattern is a peg
--re pattern is a regular expression (default); extended
syntax for the regular expression is always turned on
--recursive process directories recursively
--confirm confirm each occurrence/replacement; there is a chance
to abort any time without touching the file
--stdin read pattern from stdin (to avoid the shell's confusing
quoting rules)
--word, -w the match should have word boundaries (buggy for pegs!)
--ignoreCase, -i be case insensitive
--ignoreStyle, -y be style insensitive
--ext:EX1|EX2|... only search the files with the given extension(s)
--verbose be verbose: list every processed file
--help, -h shows this help
--version, -v shows the version
.. include:: nimgrep_cmdline.txt
Examples
========
All examples below use default PCRE Regex patterns:
+ To search recursively in Nim files using style-insensitive identifiers::
--recursive --ext:'nim|nims' --ignoreStyle
# short: -r --ext:'nim|nims' -y
.. Note:: we used `'` quotes to avoid special treatment of `|` symbol
for shells like Bash
+ To exclude version control directories (Git, Mercurial=hg, Subversion=svn)
from the search::
--excludeDir:'^\.git$' --excludeDir:'^\.hg$' --excludeDir:'^\.svn$'
# short: --ed:'^\.git$' --ed:'^\.hg$' --ed:'^\.svn$'
+ To search only in paths containing the `tests` sub-directory recursively::
--recursive --includeDir:'(^|/)tests($|/)'
# short: -r --id:'(^|/)tests($|/)'
.. Attention:: note the subtle difference between `--excludeDir` and
`--includeDir`: the former is applied to relative directory entries
and the latter is applied to the whole paths
+ Nimgrep can search multi-line, e.g. to find files containing `import`
and then `strutils` use::
'import(.|\n)*?strutils'

114
doc/nimgrep_cmdline.txt Normal file
View File

@@ -0,0 +1,114 @@
Usage:
* To search::
nimgrep [options] PATTERN [(FILE/DIRECTORY)*/-]
* To replace::
nimgrep [options] PATTERN --replace REPLACEMENT (FILE/DIRECTORY)*/-
* To list file names::
nimgrep [options] --filenames [PATTERN] [(FILE/DIRECTORY)*]
Positional arguments, from left to right:
1) PATTERN is either Regex (default) or Peg if `--peg` is specified.
PATTERN and REPLACEMENT should be skipped when `--stdin` is specified.
2) REPLACEMENT supports `$1`, `$#` notations for captured groups in PATTERN.
.. DANGER:: `--replace` mode **DOES NOT** ask confirmation
unless `--confirm` is specified!
3) Final arguments are a list of paths (FILE/DIRECTORY) or a standalone
minus `-` or not specified (empty):
* empty, current directory `.` is assumed (not with `--replace`)
.. Note:: so when no FILE/DIRECTORY/`-` is specified nimgrep
does **not** read the pipe, but searches files in the current
dir instead!
* `-`, read buffer once from stdin: pipe or terminal input;
in `--replace` mode the result is directed to stdout;
it's not compatible with `--stdin`, `--filenames`, or `--confirm`
For any given DIRECTORY nimgrep searches only its immediate files without
traversing sub-directories unless `--recursive` is specified.
In replacement mode we require all 3 positional arguments to avoid damaging.
Options:
* Mode of operation:
--find, -f find the PATTERN (default)
--replace, -! replace the PATTERN to REPLACEMENT, rewriting the files
--confirm confirm each occurrence/replacement; there is a chance
to abort any time without touching the file
--filenames just list filenames. Provide a PATTERN to find it in
the filenames (not in the contents of a file) or run
with empty pattern to just list all files::
nimgrep --filenames # In current dir
nimgrep --filenames "" DIRECTORY
# Note empty pattern "", lists all files in DIRECTORY
* Interprete patterns:
--peg PATTERN and PAT are Peg
--re PATTERN and PAT are regular expressions (default)
--rex, -x use the "extended" syntax for the regular expression
so that whitespace is not significant
--word, -w matches should have word boundaries (buggy for pegs!)
--ignoreCase, -i be case insensitive in PATTERN and PAT
--ignoreStyle, -y be style insensitive in PATTERN and PAT
.. Note:: PATERN and patterns PAT (see below in other options) are all either
Regex or Peg simultaneously and options `--rex`, `--word`, `--ignoreCase`,
and `--ignoreStyle` are applied to all of them.
* File system walk:
--recursive, -r process directories recursively
--follow follow all symlinks when processing recursively
--ext:EX1|EX2|... only search the files with the given extension(s),
empty one ("--ext") means files with missing extension
--noExt:EX1|... exclude files having given extension(s), use empty one to
skip files with no extension (like some binary files are)
--includeFile:PAT search only files whose names contain pattern PAT
--excludeFile:PAT skip files whose names contain pattern PAT
--includeDir:PAT search only files with their whole directory path
containing PAT
--excludeDir:PAT skip directories whose name (not path)
contain pattern PAT
--if,--ef,--id,--ed abbreviations of the 4 options above
--sortTime, -s[:asc|desc]
order files by the last modification time (default: off):
ascending (recent files go last) or descending
* Filter file content:
--match:PAT select files containing a (not displayed) match of PAT
--noMatch:PAT select files not containing any match of PAT
--bin:on|off|only process binary files? (detected by \0 in first 1K bytes)
(default: on - binary and text files treated the same way)
--text, -t process only text files, the same as `--bin:off`
* Represent results:
--nocolor output will be given without any colors
--color[:on] force color even if output is redirected (default: auto)
--colorTheme:THEME select color THEME from `simple` (default),
`bnw` (black and white), `ack`, or `gnu` (GNU grep)
--count only print counts of matches for files that matched
--context:N, -c:N print N lines of leading context before every match and
N lines of trailing context after it (default N: 0)
--afterContext:N, -a:N
print N lines of trailing context after every match
--beforeContext:N, -b:N
print N lines of leading context before every match
--group, -g group matches by file
--newLine, -l display every matching line starting from a new line
--cols[:N] limit max displayed columns/width of output lines from
files by N characters, cropping overflows (default: off)
--cols:auto, -% calculate columns from terminal width for every line
--onlyAscii, -@ display only printable ASCII Latin characters 0x20-0x7E
substitutions: 0 -> ^@, 1 -> ^A, ... 0x1F -> ^_,
0x7F -> '7F, ..., 0xFF -> 'FF
* Miscellaneous:
--threads:N, -j:N speed up search by N additional workers (default: 0, off)
--stdin read PATTERN from stdin (to avoid the shell's confusing
quoting rules) and, if `--replace` given, REPLACEMENT
--verbose be verbose: list every processed file
--help, -h shows this help
--version, -v shows the version

View File

@@ -458,8 +458,8 @@ proc temp(args: string) =
inc i
let d = getAppDir()
var output = d / "compiler" / "nim".exe
var finalDest = d / "bin" / "nim_temp".exe
let output = d / "compiler" / "nim".exe
let finalDest = d / "bin" / "nim_temp".exe
# 125 is the magic number to tell git bisect to skip the current commit.
var (bootArgs, programArgs) = splitArgs(args)
if "doc" notin programArgs and
@@ -483,6 +483,22 @@ proc xtemp(cmd: string) =
finally:
copyExe(d / "bin" / "nim_backup".exe, d / "bin" / "nim".exe)
proc icTest(args: string) =
temp("")
let inp = os.parseCmdLine(args)[0]
let content = readFile(inp)
let nimExe = getAppDir() / "bin" / "nim_temp".exe
var i = 0
for fragment in content.split("#!EDIT!#"):
let file = inp.replace(".nim", "_temp.nim")
writeFile(file, fragment)
var cmd = nimExe & " cpp --ic:on --listcmd "
if i == 0:
cmd.add "-f "
cmd.add quoteShell(file)
exec(cmd)
inc i
proc buildDrNim(args: string) =
if not dirExists("dist/nimz3"):
exec("git clone https://github.com/zevv/nimz3.git dist/nimz3")
@@ -705,6 +721,7 @@ when isMainModule:
of "fusion":
let suffix = if latest: HeadHash else: FusionStableHash
exec("nimble install -y fusion@$#" % suffix)
of "ic": icTest(op.cmdLineRest)
else: showHelp()
break
of cmdEnd: break

View File

@@ -78,7 +78,7 @@ type
nnkSharedTy, # 'shared T'
nnkEnumTy,
nnkEnumFieldDef,
nnkArglist, nnkPattern
nnkArgList, nnkPattern
nnkHiddenTryStmt,
nnkClosure,
nnkGotoState,

View File

@@ -82,6 +82,14 @@
## * ***triple emphasis*** (bold and italic) using \*\*\*
## * ``:idx:`` role for \`interpreted text\` to include the link to this
## text into an index (example: `Nim index`_).
## * double slash `//` in option lists serves as a prefix for any option that
## starts from a word (without any leading symbols like `-`, `--`, `/`)::
##
## //compile compile the project
## //doc generate documentation
##
## Here the dummy `//` will disappear, while options ``compile``
## and ``doc`` will be left in the final document.
##
## .. [cmp:Sphinx] similar but different from the directives of
## Python `Sphinx directives`_ extensions
@@ -548,6 +556,67 @@ proc pushInd(p: var RstParser, ind: int) =
proc popInd(p: var RstParser) =
if p.indentStack.len > 1: setLen(p.indentStack, p.indentStack.len - 1)
# Working with indentation in rst.nim
# -----------------------------------
#
# Every line break has an associated tkIndent.
# The tokenizer writes back the first column of next non-blank line
# in all preceeding tkIndent tokens to the `ival` field of tkIndent.
#
# RST document is separated into body elements (B.E.), every of which
# has a dedicated handler proc (or block of logic when B.E. is a block quote)
# that should follow the next rule:
# Every B.E. handler proc should finish at tkIndent (newline)
# after its B.E. finishes.
# Then its callers (which is `parseSection` or another B.E. handler)
# check for tkIndent ival (without necessity to advance `p.idx`)
# and decide themselves whether they continue processing or also stop.
#
# An example::
#
# L RST text fragment indentation
# +--------------------+
# 1 | | <- (empty line at the start of file) no tokens
# 2 |First paragraph. | <- tkIndent has ival=0, and next tkWord has col=0
# 3 | | <- tkIndent has ival=0
# 4 |* bullet item and | <- tkIndent has ival=0, and next tkPunct has col=0
# 5 | its continuation | <- tkIndent has ival=2, and next tkWord has col=2
# 6 | | <- tkIndent has ival=4
# 7 | Block quote | <- tkIndent has ival=4, and next tkWord has col=4
# 8 | | <- tkIndent has ival=0
# 9 | | <- tkIndent has ival=0
# 10|Final paragraph | <- tkIndent has ival=0, and tkWord has col=0
# +--------------------+
# C:01234
#
# Here parser starts with initial `indentStack=[0]` and then calls the
# 1st `parseSection`:
#
# - `parseSection` calls `parseParagraph` and "First paragraph" is parsed
# - bullet list handler is started at reaching ``*`` (L4 C0), it
# starts bullet item logic (L4 C2), which calls `pushInd(p, ind=2)`,
# then calls `parseSection` (2nd call, nested) which parses
# paragraph "bullet list and its continuation" and then starts
# a block quote logic (L7 C4).
# The block quote logic calls calls `pushInd(p, ind=4)` and
# calls `parseSection` again, so a (simplified) sequence of calls now is::
#
# parseSection -> parseBulletList ->
# parseSection (+block quote logic) -> parseSection
#
# 3rd `parseSection` finishes, block quote logic calls `popInd(p)`,
# it returns to bullet item logic, which sees that next tkIndent has
# ival=0 and stops there since the required indentation for a bullet item
# is 2 and 0<2; the bullet item logic calls `popInd(p)`.
# Then bullet list handler checks that next tkWord (L10 C0) has the
# right indentation but does not have ``*`` so stops at tkIndent (L10).
# - 1st `parseSection` invocation calls `parseParagraph` and the
# "Final paragraph" is parsed.
#
# If a B.E. handler has advanced `p.idx` past tkIndent to check
# whether it should continue its processing or not, and decided not to,
# then this B.E. handler should step back (e.g. do `dec p.idx`).
proc initParser(p: var RstParser, sharedState: PSharedState) =
p.indentStack = @[0]
p.tok = @[]
@@ -852,6 +921,9 @@ proc isInlineMarkupEnd(p: RstParser, markup: string): bool =
if not result: return
# Rule 4:
if p.idx > 0:
# see bug #17260; for now `\` must be written ``\``, likewise with sequences
# ending in an un-escaped `\`; `\\` is legal but not `\\\` for example;
# for this reason we can't use `["``", "`"]` here.
if markup != "``" and prevTok(p).symbol == "\\":
result = false
@@ -1089,11 +1161,19 @@ proc parseUntil(p: var RstParser, father: PRstNode, postfix: string,
if isInlineMarkupEnd(p, postfix):
inc p.idx
break
elif interpretBackslash:
parseBackslash(p, father)
else:
father.add(newLeaf(p))
inc p.idx
if postfix == "`":
if prevTok(p).symbol == "\\" and currentTok(p).symbol == "`":
father.sons[^1] = newLeaf(p) # instead, we should use lookahead
else:
father.add(newLeaf(p))
inc p.idx
else:
if interpretBackslash:
parseBackslash(p, father)
else:
father.add(newLeaf(p))
inc p.idx
of tkAdornment, tkWord, tkOther:
father.add(newLeaf(p))
inc p.idx
@@ -1243,7 +1323,7 @@ proc parseInline(p: var RstParser, father: PRstNode) =
father.add(n)
elif isInlineMarkupStart(p, "`"):
var n = newRstNode(rnInterpretedText)
parseUntil(p, n, "`", true)
parseUntil(p, n, "`", false) # bug #17260
n = parsePostfix(p, n)
father.add(n)
elif isInlineMarkupStart(p, "|"):
@@ -1901,8 +1981,9 @@ proc parseBulletList(p: var RstParser): PRstNode =
proc parseOptionList(p: var RstParser): PRstNode =
result = newRstNodeA(p, rnOptionList)
let col = currentTok(p).col
while true:
if isOptionList(p):
if currentTok(p).col == col and isOptionList(p):
var a = newRstNode(rnOptionGroup)
var b = newRstNode(rnDescription)
var c = newRstNode(rnOptionListItem)
@@ -1925,6 +2006,7 @@ proc parseOptionList(p: var RstParser): PRstNode =
c.add(b)
result.add(c)
else:
dec p.idx # back to tkIndent
break
proc parseDefinitionList(p: var RstParser): PRstNode =

View File

@@ -336,12 +336,12 @@ template `<-`(a, b) =
else:
copyMem(addr(a), addr(b), sizeof(T))
proc merge[T](a, b: var openArray[T], lo, m, hi: int,
proc mergeAlt[T](a, b: var openArray[T], lo, m, hi: int,
cmp: proc (x, y: T): int {.closure.}, order: SortOrder) =
# Optimization: If max(left) <= min(right) there is nothing to do!
# 1 2 3 4 ## 5 6 7 8
# -> O(n) for sorted arrays.
# On random data this saves up to 40% of merge calls.
# On random data this saves up to 40% of mergeAlt calls.
if cmp(a[m], a[m+1]) * order <= 0: return
var j = lo
# copy a[j..m] into b:
@@ -424,7 +424,7 @@ func sort*[T](a: var openArray[T],
while s < n:
var m = n-1-s
while m >= 0:
merge(a, b, max(m-s+1, 0), m, m+s, cmp, order)
mergeAlt(a, b, max(m-s+1, 0), m, m+s, cmp, order)
dec(m, s*2)
s = s*2

View File

@@ -413,7 +413,6 @@ func fastlog2Nim(x: uint64): int {.inline.} =
import system/countbits_impl
const arch64 = sizeof(int) == 8
const useBuiltinsRotate = (defined(amd64) or defined(i386)) and
(defined(gcc) or defined(clang) or defined(vcc) or
(defined(icl) and not defined(cpp))) and useBuiltins

View File

@@ -702,7 +702,7 @@ iterator mpairs*[A, B](t: var Table[A, B]): (A, var B) =
yield (t.data[h].key, t.data[h].val)
assert(len(t) == L, "the length of the table changed while iterating over it")
iterator keys*[A, B](t: Table[A, B]): A =
iterator keys*[A, B](t: Table[A, B]): lent A =
## Iterates over any key in the table `t`.
##
## See also:
@@ -723,7 +723,7 @@ iterator keys*[A, B](t: Table[A, B]): A =
yield t.data[h].key
assert(len(t) == L, "the length of the table changed while iterating over it")
iterator values*[A, B](t: Table[A, B]): B =
iterator values*[A, B](t: Table[A, B]): lent B =
## Iterates over any value in the table `t`.
##
## See also:
@@ -1146,7 +1146,7 @@ iterator mpairs*[A, B](t: TableRef[A, B]): (A, var B) =
yield (t.data[h].key, t.data[h].val)
assert(len(t) == L, "the length of the table changed while iterating over it")
iterator keys*[A, B](t: TableRef[A, B]): A =
iterator keys*[A, B](t: TableRef[A, B]): lent A =
## Iterates over any key in the table `t`.
##
## See also:
@@ -1167,7 +1167,7 @@ iterator keys*[A, B](t: TableRef[A, B]): A =
yield t.data[h].key
assert(len(t) == L, "the length of the table changed while iterating over it")
iterator values*[A, B](t: TableRef[A, B]): B =
iterator values*[A, B](t: TableRef[A, B]): lent B =
## Iterates over any value in the table `t`.
##
## See also:
@@ -1722,7 +1722,7 @@ iterator mpairs*[A, B](t: var OrderedTable[A, B]): (A, var B) =
yield (t.data[h].key, t.data[h].val)
assert(len(t) == L, "the length of the table changed while iterating over it")
iterator keys*[A, B](t: OrderedTable[A, B]): A =
iterator keys*[A, B](t: OrderedTable[A, B]): lent A =
## Iterates over any key in the table `t` in insertion order.
##
## See also:
@@ -1743,7 +1743,7 @@ iterator keys*[A, B](t: OrderedTable[A, B]): A =
yield t.data[h].key
assert(len(t) == L, "the length of the table changed while iterating over it")
iterator values*[A, B](t: OrderedTable[A, B]): B =
iterator values*[A, B](t: OrderedTable[A, B]): lent B =
## Iterates over any value in the table `t` in insertion order.
##
## See also:
@@ -2130,7 +2130,7 @@ iterator mpairs*[A, B](t: OrderedTableRef[A, B]): (A, var B) =
yield (t.data[h].key, t.data[h].val)
assert(len(t) == L, "the length of the table changed while iterating over it")
iterator keys*[A, B](t: OrderedTableRef[A, B]): A =
iterator keys*[A, B](t: OrderedTableRef[A, B]): lent A =
## Iterates over any key in the table `t` in insertion order.
##
## See also:
@@ -2151,7 +2151,7 @@ iterator keys*[A, B](t: OrderedTableRef[A, B]): A =
yield t.data[h].key
assert(len(t) == L, "the length of the table changed while iterating over it")
iterator values*[A, B](t: OrderedTableRef[A, B]): B =
iterator values*[A, B](t: OrderedTableRef[A, B]): lent B =
## Iterates over any value in the table `t` in insertion order.
##
## See also:
@@ -2543,7 +2543,7 @@ iterator mpairs*[A](t: var CountTable[A]): (A, var int) =
yield (t.data[h].key, t.data[h].val)
assert(len(t) == L, "the length of the table changed while iterating over it")
iterator keys*[A](t: CountTable[A]): A =
iterator keys*[A](t: CountTable[A]): lent A =
## Iterates over any key in the table `t`.
##
## See also:

View File

@@ -322,7 +322,7 @@ macro html*(e: varargs[untyped]): untyped =
macro hr*(): untyped =
## Generates the HTML `hr` element.
result = xmlCheckedTag(newNimNode(nnkArglist), "hr", commonAttr, "", true)
result = xmlCheckedTag(newNimNode(nnkArgList), "hr", commonAttr, "", true)
macro i*(e: varargs[untyped]): untyped =
## Generates the HTML `i` element.

View File

@@ -927,7 +927,7 @@ when defined(js):
proc parseNativeJson(x: cstring): JSObject {.importjs: "JSON.parse(#)".}
proc getVarType(x: JSObject): JsonNodeKind =
proc getVarType(x: JSObject, isRawNumber: var bool): JsonNodeKind =
result = JNull
case $getProtoName(x) # TODO: Implicit returns fail here.
of "[object Array]": return JArray
@@ -937,6 +937,7 @@ when defined(js):
if isSafeInteger(x):
return JInt
else:
isRawNumber = true
return JString
else:
return JFloat
@@ -946,13 +947,13 @@ when defined(js):
else: assert false
proc len(x: JSObject): int =
assert x.getVarType == JArray
asm """
`result` = `x`.length;
"""
proc convertObject(x: JSObject): JsonNode =
case getVarType(x)
var isRawNumber = false
case getVarType(x, isRawNumber)
of JArray:
result = newJArray()
for i in 0 ..< x.len:
@@ -973,7 +974,12 @@ when defined(js):
result = newJFloat(x.to(float))
of JString:
# Dunno what to do with isUnquoted here
result = newJString($x.to(cstring))
if isRawNumber:
var value: cstring
{.emit: "`value` = `x`.toString();".}
result = newJRawNumber($value)
else:
result = newJString($x.to(cstring))
of JBool:
result = newJBool(x.to(bool))
of JNull:
@@ -1069,12 +1075,13 @@ when defined(nimFixedForwardGeneric):
dst = jsonNode.copy
proc initFromJson[T: SomeInteger](dst: var T; jsonNode: JsonNode, jsonPath: var string) =
when T is uint|uint64:
when T is uint|uint64 or (not defined(js) and int.sizeof == 4):
verifyJsonKind(jsonNode, {JInt, JString}, jsonPath)
case jsonNode.kind
of JString:
dst = T(parseBiggestUInt(jsonNode.str))
let x = parseBiggestUInt(jsonNode.str)
dst = cast[T](x)
else:
verifyJsonKind(jsonNode, {JInt}, jsonPath)
dst = T(jsonNode.num)
else:
verifyJsonKind(jsonNode, {JInt}, jsonPath)

View File

@@ -174,6 +174,7 @@ runnableExamples:
import strutils, lexbase, streams, tables
import std/private/decode_helpers
import std/private/since
include "system/inclrtl"
@@ -649,3 +650,8 @@ proc delSectionKey*(dict: var Config, section, key: string) =
dict.del(section)
else:
dict[section].del(key)
iterator sections*(dict: Config): lent string {.since: (1, 5).} =
## Iterates through the sections in the `dict`.
for section in dict.keys:
yield section

View File

@@ -39,7 +39,7 @@ proc underscoredCall(n, arg0: NimNode): NimNode =
result.add arg0
proc underscoredCalls*(result, calls, arg0: NimNode) =
expectKind calls, {nnkArglist, nnkStmtList, nnkStmtListExpr}
expectKind calls, {nnkArgList, nnkStmtList, nnkStmtListExpr}
for call in calls:
if call.kind in {nnkStmtList, nnkStmtListExpr}:

View File

@@ -1321,7 +1321,7 @@ proc delete*[T](x: var seq[T], i: Natural) {.noSideEffect.} =
## This is an `O(n)` operation.
##
## See also:
## * `del <#delete,seq[T],Natural>`_ for O(1) operation
## * `del <#del,seq[T],Natural>`_ for O(1) operation
##
## .. code-block:: Nim
## var i = @[1, 2, 3, 4, 5]
@@ -2827,7 +2827,7 @@ when declared(initDebugger):
proc addEscapedChar*(s: var string, c: char) {.noSideEffect, inline.} =
## Adds a char to string `s` and applies the following escaping:
##
## * replaces any `\` by `\\`
## * replaces any ``\`` by `\\`
## * replaces any `'` by `\'`
## * replaces any `"` by `\"`
## * replaces any `\a` by `\\a`
@@ -2838,7 +2838,7 @@ proc addEscapedChar*(s: var string, c: char) {.noSideEffect, inline.} =
## * replaces any `\f` by `\\f`
## * replaces any `\r` by `\\r`
## * replaces any `\e` by `\\e`
## * replaces any other character not in the set `{'\21..'\126'}
## * replaces any other character not in the set `{\21..\126}`
## by `\xHH` where `HH` is its hexadecimal value.
##
## The procedure has been designed so that its output is usable for many

View File

@@ -7,7 +7,40 @@
# distribution, for details about the copyright.
#
# Efficient set of pointers for the GC (and repr)
#[
Efficient set of pointers for the GC (and repr)
-----------------------------------------------
The GC depends on an extremely efficient datastructure for storing a
set of pointers - this is called a `CellSet` in the source code.
Inserting, deleting and searching are done in constant time. However,
modifying a `CellSet` during traversal leads to undefined behaviour.
All operations on a CellSet have to perform efficiently. Because a Cellset can
become huge a hash table alone is not suitable for this.
We use a mixture of bitset and hash table for this. The hash table maps *pages*
to a page descriptor. The page descriptor contains a bit for any possible cell
address within this page. So including a cell is done as follows:
- Find the page descriptor for the page the cell belongs to.
- Set the appropriate bit in the page descriptor indicating that the
cell points to the start of a memory block.
Removing a cell is analogous - the bit has to be set to zero.
Single page descriptors are never deleted from the hash table. This is not
needed as the data structures needs to be rebuilt periodically anyway.
Complete traversal is done in this way::
for each page descriptor d:
for each bit in d:
if bit == 1:
traverse the pointer belonging to this bit
]#
when defined(gcOrc) or defined(gcArc):
type

View File

@@ -18,6 +18,7 @@ const useGCC_builtins* = (defined(gcc) or defined(llvm_gcc) or
defined(clang)) and useBuiltins
const useICC_builtins* = defined(icc) and useBuiltins
const useVCC_builtins* = defined(vcc) and useBuiltins
const arch64* = sizeof(int) == 8
template countBitsImpl(n: uint32): int =
# generic formula is from: https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel

View File

@@ -12,6 +12,53 @@
# Refcounting + Mark&Sweep. Complex algorithms avoided.
# Been there, done that, didn't work.
#[
A *cell* is anything that is traced by the GC
(sequences, refs, strings, closures).
The basic algorithm is *Deferrent Reference Counting* with cycle detection.
References on the stack are not counted for better performance and easier C
code generation.
Each cell has a header consisting of a RC and a pointer to its type
descriptor. However the program does not know about these, so they are placed at
negative offsets. In the GC code the type `PCell` denotes a pointer
decremented by the right offset, so that the header can be accessed easily. It
is extremely important that `pointer` is not confused with a `PCell`.
In Nim the compiler cannot always know if a reference
is stored on the stack or not. This is caused by var parameters.
Consider this example:
.. code-block:: Nim
proc setRef(r: var ref TNode) =
new(r)
proc usage =
var
r: ref TNode
setRef(r) # here we should not update the reference counts, because
# r is on the stack
setRef(r.left) # here we should update the refcounts!
We have to decide at runtime whether the reference is on the stack or not.
The generated code looks roughly like this:
.. code-block:: C
void setref(TNode** ref) {
unsureAsgnRef(ref, newObj(TNode_TI, sizeof(TNode)))
}
void usage(void) {
setRef(&r)
setRef(&r->left)
}
Note that for systems with a continuous stack (which most systems have)
the check whether the ref is on the stack is very cheap (only two
comparisons).
]#
{.push profiler:off.}
const

View File

@@ -300,7 +300,7 @@ stmt = IND{&gt;} stmt ^+ IND{=} DED # list of statements
<p>However, this does not work. The problem is that the procedure should not only <tt class="docutils literal"><span class="pre">return</span></tt>, but return and <strong>continue</strong> after an iteration has finished. This <em>return and continue</em> is called a <tt class="docutils literal"><span class="pre">yield</span></tt> statement. Now the only thing left to do is to replace the <tt class="docutils literal"><span class="pre">proc</span></tt> keyword by <tt class="docutils literal"><span class="pre">iterator</span></tt> and here it is - our first iterator:</p>
<table border="1" class="docutils"><tr><th>A1 header</th><th>A2 | not fooled</th></tr>
<tr><td>C1</td><td>C2 <strong>bold</strong></td></tr>
<tr><td>D1 <tt class="docutils literal"><span class="pre">code |</span></tt></td><td>D2</td></tr>
<tr><td>D1 <tt class="docutils literal"><span class="pre">code \|</span></tt></td><td>D2</td></tr>
<tr><td>E1 | text</td><td></td></tr>
<tr><td></td><td>F2 without pipe</td></tr>
</table><p>not in table </p>

View File

@@ -2,7 +2,10 @@
## Supported Versions
Security fixes are provided in new releases and bugfix releases.
Security advisories are published at:
https://github.com/nim-lang/security/security/advisories?state=published
Security fixes are provided in new releases and in bugfix releases.
We do not backport security fixes to older releases.
@@ -10,8 +13,8 @@ We do not backport security fixes to older releases.
## Reporting a Vulnerability
Please do not report vulnerabilities via GitHub issues.
If you have discovered a vulnerability, it is best to notify us about it via
If you have discovered a vulnerability, please notify us about it via
security@nim-lang.org in order to set up a meeting where we can discuss the next
steps.
Please do not report vulnerabilities via GitHub issues.

View File

@@ -173,7 +173,7 @@ proc gcTests(r: var TResults, cat: Category, options: string) =
test "stackrefleak"
test "cyclecollector"
test "trace_globals"
testWithoutBoehm "trace_globals"
proc longGCTests(r: var TResults, cat: Category, options: string) =
when defined(windows):
@@ -448,7 +448,10 @@ proc testNimblePackages(r: var TResults; cat: Category; packageFilter: string) =
(outp, status) = execCmdEx(cmd, workingDir = workingDir2)
status == QuitSuccess
if not ok:
addResult(r, test, targetC, "", cmd & "\n" & outp, reFailed)
if pkg.allowFailure:
inc r.passed
inc r.failedButAllowed
addResult(r, test, targetC, "", cmd & "\n" & outp, reFailed, allowFailure = pkg.allowFailure)
continue
outp
@@ -461,7 +464,7 @@ proc testNimblePackages(r: var TResults; cat: Category; packageFilter: string) =
discard tryCommand("nimble install --depsOnly -y", maxRetries = 3)
discard tryCommand(pkg.cmd, reFailed = reBuildFailed)
inc r.passed
r.addResult(test, targetC, "", "", reSuccess)
r.addResult(test, targetC, "", "", reSuccess, allowFailure = pkg.allowFailure)
errors = r.total - r.passed
if errors == 0:

View File

@@ -22,22 +22,26 @@ When this is the case, a workaround is to test this package here by adding `--pa
type NimblePackage* = object
name*, cmd*, url*: string
useHead*: bool
allowFailure*: bool
## When true, we still run the test but the test is allowed to fail.
## This is useful for packages that currently fail but that we still want to
## run in CI, e.g. so that we can monitor when they start working again and
## are reminded about those failures without making CI fail for unrelated PRs.
var packages*: seq[NimblePackage]
proc pkg(name: string; cmd = "nimble test"; url = "", useHead = true) =
packages.add NimblePackage(name: name, cmd: cmd, url: url, useHead: useHead)
proc pkg(name: string; cmd = "nimble test"; url = "", useHead = true, allowFailure = false) =
packages.add NimblePackage(name: name, cmd: cmd, url: url, useHead: useHead, allowFailure: allowFailure)
# pkg "alea"
pkg "alea", allowFailure = true
pkg "argparse"
when false:
pkg "arraymancer", "nim c tests/tests_cpu.nim"
# pkg "ast_pattern_matching", "nim c -r --oldgensym:on tests/test1.nim"
pkg "arraymancer", "nim c tests/tests_cpu.nim", allowFailure = true
pkg "ast_pattern_matching", "nim c -r --oldgensym:on tests/test1.nim", allowFailure = true
pkg "awk"
pkg "bigints", url = "https://github.com/Araq/nim-bigints"
pkg "binaryheap", "nim c -r binaryheap.nim"
pkg "BipBuffer"
# pkg "blscurve" # pending https://github.com/status-im/nim-blscurve/issues/39
pkg "blscurve", allowFailure = true # pending https://github.com/status-im/nim-blscurve/issues/39
pkg "bncurve"
pkg "brainfuck", "nim c -d:release -r tests/compile.nim"
pkg "bump", "nim c --gc:arc --path:. -r tests/tbump.nim", "https://github.com/disruptek/bump"
@@ -45,32 +49,30 @@ pkg "c2nim", "nim c testsuite/tester.nim"
pkg "cascade"
pkg "cello"
pkg "chroma"
pkg "chronicles", "nim c -o:chr -r chronicles.nim"
# when not defined(osx): # testdatagram.nim(560, 54): Check failed
# pkg "chronos", "nim c -r -d:release tests/testall"
# pending https://github.com/nim-lang/Nim/issues/17130
pkg "chronicles", "nim c -o:chr -r chronicles.nim", allowFailure = true # pending https://github.com/status-im/nim-chronos/issues/169
pkg "chronos", "nim c -r -d:release tests/testall", allowFailure = true # pending https://github.com/nim-lang/Nim/issues/17130
pkg "cligen", "nim c --path:. -r cligen.nim"
pkg "combparser", "nimble test --gc:orc"
pkg "compactdict"
pkg "comprehension", "nimble test", "https://github.com/alehander42/comprehension"
# pkg "criterion" # pending https://github.com/disruptek/criterion/issues/3 (wrongly closed)
pkg "criterion", allowFailure = true # pending https://github.com/disruptek/criterion/issues/3 (wrongly closed)
pkg "dashing", "nim c tests/functional.nim"
pkg "delaunay"
pkg "docopt"
pkg "easygl", "nim c -o:egl -r src/easygl.nim", "https://github.com/jackmott/easygl"
pkg "elvis"
# pkg "fidget" # pending https://github.com/treeform/fidget/issues/133
pkg "fidget"
pkg "fragments", "nim c -r fragments/dsl.nim"
pkg "fusion"
pkg "gara"
pkg "glob"
pkg "ggplotnim", "nim c -d:noCairo -r tests/tests.nim"
# pkg "gittyup", "nimble test", "https://github.com/disruptek/gittyup"
pkg "gittyup", "nimble test", "https://github.com/disruptek/gittyup", allowFailure = true
pkg "gnuplot", "nim c gnuplot.nim"
# pkg "gram", "nim c -r --gc:arc --define:danger tests/test.nim", "https://github.com/disruptek/gram"
# pending https://github.com/nim-lang/Nim/issues/16509
pkg "hts", "nim c -o:htss src/hts.nim"
# pkg "httpauth"
pkg "httpauth", allowFailure = true
pkg "illwill", "nimble examples"
pkg "inim"
pkg "itertools", "nim doc src/itertools.nim"
@@ -86,28 +88,28 @@ pkg "memo"
pkg "msgpack4nim", "nim c -r tests/test_spec.nim"
pkg "nake", "nim c nakefile.nim"
pkg "neo", "nim c -d:blas=openblas tests/all.nim"
# pkg "nesm", "nimble tests" # notice plural 'tests'
# pkg "nico"
pkg "nesm", "nimble tests", allowFailure = true # notice plural 'tests'
pkg "nico", allowFailure = true
pkg "nicy", "nim c -r src/nicy.nim"
pkg "nigui", "nim c -o:niguii -r src/nigui.nim"
pkg "nimcrypto", "nim r --path:. tests/testall.nim" # `--path:.` workaround needed, see D20210308T165435
pkg "NimData", "nim c -o:nimdataa src/nimdata.nim"
pkg "nimes", "nim c src/nimes.nim"
pkg "nimfp", "nim c -o:nfp -r src/fp.nim"
# pkg "nimgame2", "nim c nimgame2/nimgame.nim" # XXX Doesn't work with deprecated 'randomize', will create a PR.
pkg "nimgame2", "nim c nimgame2/nimgame.nim", allowFailure = true # XXX Doesn't work with deprecated 'randomize', will create a PR.
pkg "nimgen", "nim c -o:nimgenn -r src/nimgen/runcfg.nim"
pkg "nimlsp"
pkg "nimly", "nim c -r tests/test_readme_example.nim"
# pkg "nimongo", "nimble test_ci"
# pkg "nimph", "nimble test", "https://github.com/disruptek/nimph"
pkg "nimongo", "nimble test_ci", allowFailure = true
pkg "nimph", "nimble test", "https://github.com/disruptek/nimph", allowFailure = true
pkg "nimpy", "nim c -r tests/nimfrompy.nim"
pkg "nimquery"
pkg "nimsl"
pkg "nimsvg"
pkg "nimterop", "nimble minitest"
pkg "nimwc", "nim c nimwc.nim"
# pkg "nimx", "nim c --threads:on test/main.nim"
# pkg "nitter", "nim c src/nitter.nim", "https://github.com/zedeus/nitter"
pkg "nimx", "nim c --threads:on test/main.nim", allowFailure = true
pkg "nitter", "nim c src/nitter.nim", "https://github.com/zedeus/nitter", allowFailure = true
pkg "norm", "nim c -r tests/sqlite/trows.nim"
pkg "npeg", "nimble testarc"
pkg "numericalnim", "nim c -r tests/test_integrate.nim"
@@ -149,7 +151,7 @@ pkg "unicodedb", "nim c -d:release -r tests/tests.nim"
pkg "unicodeplus", "nim c -d:release -r tests/tests.nim"
pkg "unpack"
pkg "websocket", "nim c websocket.nim"
# pkg "winim"
pkg "winim", allowFailure = true
pkg "with"
pkg "ws"
pkg "yaml", "nim build"

View File

@@ -79,8 +79,6 @@ type
sortoutput*: bool
output*: string
line*, column*: int
tfile*: string
tline*, tcolumn*: int
exitCode*: int
msg*: string
ccodeCheck*: seq[string]
@@ -277,12 +275,6 @@ proc parseSpec*(filename: string): TSpec =
if result.msg.len == 0 and result.nimout.len == 0:
result.parseErrors.addLine "errormsg or msg needs to be specified before column"
discard parseInt(e.value, result.column)
of "tfile":
result.tfile = e.value
of "tline":
discard parseInt(e.value, result.tline)
of "tcolumn":
discard parseInt(e.value, result.tcolumn)
of "output":
if result.outputCheck != ocSubstr:
result.outputCheck = ocEqual

View File

@@ -66,7 +66,8 @@ proc isNimRepoTests(): bool =
type
Category = distinct string
TResults = object
total, passed, skipped: int
total, passed, failedButAllowed, skipped: int
## xxx rename passed to passedOrAllowedFailure
data: string
TTest = object
name: string
@@ -82,12 +83,6 @@ type
let
pegLineError =
peg"{[^(]*} '(' {\d+} ', ' {\d+} ') ' ('Error') ':' \s* {.*}"
pegLineTemplate =
peg"""
{[^(]*} '(' {\d+} ', ' {\d+} ') '
'template/generic instantiation' ( ' of `' [^`]+ '`' )? ' from here' .*
"""
pegOtherError = peg"'Error:' \s* {.*}"
pegOfInterest = pegLineError / pegOtherError
@@ -165,7 +160,6 @@ proc callCompiler(cmdTemplate, filename, options, nimcache: string,
let outp = p.outputStream
var suc = ""
var err = ""
var tmpl = ""
var x = newStringOfCap(120)
result.nimout = ""
while true:
@@ -174,9 +168,6 @@ proc callCompiler(cmdTemplate, filename, options, nimcache: string,
if x =~ pegOfInterest:
# `err` should contain the last error/warning message
err = x
elif x =~ pegLineTemplate and err == "":
# `tmpl` contains the last template expansion before the error
tmpl = x
elif x.isSuccess:
suc = x
elif not running(p):
@@ -187,14 +178,7 @@ proc callCompiler(cmdTemplate, filename, options, nimcache: string,
result.output = ""
result.line = 0
result.column = 0
result.tfile = ""
result.tline = 0
result.tcolumn = 0
result.err = reNimcCrash
if tmpl =~ pegLineTemplate:
result.tfile = extractFilename(matches[0])
result.tline = parseInt(matches[1])
result.tcolumn = parseInt(matches[2])
if err =~ pegLineError:
result.file = extractFilename(matches[0])
result.line = parseInt(matches[1])
@@ -229,6 +213,7 @@ proc callCCompiler(cmdTemplate, filename, options: string,
proc initResults: TResults =
result.total = 0
result.passed = 0
result.failedButAllowed = 0
result.skipped = 0
result.data = ""
@@ -256,16 +241,20 @@ template maybeStyledEcho(args: varargs[untyped]): untyped =
proc `$`(x: TResults): string =
result = ("Tests passed: $1 / $3 <br />\n" &
"Tests skipped: $2 / $3 <br />\n") %
[$x.passed, $x.skipped, $x.total]
result = """
Tests passed or allowed to fail: $2 / $1 <br />
Tests failed and allowed to fail: $3 / $1 <br />
Tests skipped: $4 / $1 <br />
""" % [$x.total, $x.passed, $x.failedButAllowed, $x.skipped]
proc addResult(r: var TResults, test: TTest, target: TTarget,
expected, given: string, successOrig: TResultEnum) =
expected, given: string, successOrig: TResultEnum, allowFailure = false) =
# test.name is easier to find than test.name.extractFilename
# A bit hacky but simple and works with tests/testament/tshould_not_work.nim
var name = test.name.replace(DirSep, '/')
name.add ' ' & $target
if allowFailure:
name.add " (allowed to fail) "
if test.options.len > 0: name.add ' ' & test.options
let duration = epochTime() - test.startTime
@@ -374,22 +363,13 @@ proc cmpMsgs(r: var TResults, expected, given: TSpec, test: TTest, target: TTarg
r.addResult(test, target, expected.msg, given.msg, reMsgsDiffer)
elif expected.nimout.len > 0 and not greedyOrderedSubsetLines(expected.nimout, given.nimout):
r.addResult(test, target, expected.nimout, given.nimout, reMsgsDiffer)
elif expected.tfile == "" and extractFilename(expected.file) != extractFilename(given.file) and
elif extractFilename(expected.file) != extractFilename(given.file) and
"internal error:" notin expected.msg:
r.addResult(test, target, expected.file, given.file, reFilesDiffer)
elif expected.line != given.line and expected.line != 0 or
expected.column != given.column and expected.column != 0:
r.addResult(test, target, $expected.line & ':' & $expected.column,
$given.line & ':' & $given.column,
reLinesDiffer)
elif expected.tfile != "" and extractFilename(expected.tfile) != extractFilename(given.tfile) and
"internal error:" notin expected.msg:
r.addResult(test, target, expected.tfile, given.tfile, reFilesDiffer)
elif expected.tline != given.tline and expected.tline != 0 or
expected.tcolumn != given.tcolumn and expected.tcolumn != 0:
r.addResult(test, target, $expected.tline & ':' & $expected.tcolumn,
$given.tline & ':' & $given.tcolumn,
reLinesDiffer)
$given.line & ':' & $given.column, reLinesDiffer)
else:
r.addResult(test, target, expected.msg, given.msg, reSuccess)
inc(r.passed)

View File

@@ -390,3 +390,35 @@ proc newPixelBuffer(): PixelBuffer =
discard newPixelBuffer()
# bug #17199
proc passSeq(data: seq[string]) =
# used the system.& proc initially
let wat = data & "hello"
proc test2 =
let name = @["hello", "world"]
passSeq(name)
doAssert name == @["hello", "world"]
static: test2() # was buggy
test2()
proc merge(x: sink seq[string], y: sink string): seq[string] =
newSeq(result, x.len + 1)
for i in 0..x.len-1:
result[i] = move(x[i])
result[x.len] = move(y)
proc passSeq2(data: seq[string]) =
# used the system.& proc initially
let wat = merge(data, "hello")
proc test3 =
let name = @["hello", "world"]
passSeq2(name)
doAssert name == @["hello", "world"]
static: test3() # was buggy
test3()

View File

@@ -107,6 +107,8 @@ sink
destroy
copy
destroy
(f: 1)
destroy
'''
"""
@@ -770,3 +772,15 @@ proc pair(): tuple[a: C, b: C] =
discard pair()
# bug #17450
proc noConsume(x: OO) {.nosinks.} = echo x
proc main3 =
var i = 1
noConsume:
block:
OO(f: i)
main3()

View File

@@ -929,7 +929,7 @@ static:
(x & y & z) is string
ast.peelOff({nnkStmtList, nnkTypeSection}).matchAst:
of nnkTypeDef(_, _, nnkTypeClassTy(nnkArglist, _, _, nnkStmtList)):
of nnkTypeDef(_, _, nnkTypeClassTy(nnkArgList, _, _, nnkStmtList)):
# note this isn't nnkConceptTy!
echo "ok"

View File

@@ -1,6 +1,6 @@
discard """
errormsg: '''
not all cases are covered; missing: {nnkComesFrom, nnkDotCall, nnkHiddenCallConv, nnkVarTuple, nnkCurlyExpr, nnkRange, nnkCheckedFieldExpr, nnkDerefExpr, nnkElifExpr, nnkElseExpr, nnkLambda, nnkDo, nnkBind, nnkClosedSymChoice, nnkHiddenSubConv, nnkConv, nnkStaticExpr, nnkAddr, nnkHiddenAddr, nnkHiddenDeref, nnkObjDownConv, nnkObjUpConv, nnkChckRangeF, nnkChckRange64, nnkChckRange, nnkStringToCString, nnkCStringToString, nnkFastAsgn, nnkGenericParams, nnkFormalParams, nnkOfInherit, nnkImportAs, nnkConverterDef, nnkMacroDef, nnkTemplateDef, nnkIteratorDef, nnkOfBranch, nnkElifBranch, nnkExceptBranch, nnkElse, nnkAsmStmt, nnkTypeDef, nnkFinally, nnkContinueStmt, nnkImportStmt, nnkImportExceptStmt, nnkExportStmt, nnkExportExceptStmt, nnkFromStmt, nnkIncludeStmt, nnkUsingStmt, nnkBlockExpr, nnkStmtListType, nnkBlockType, nnkWith, nnkWithout, nnkTypeOfExpr, nnkObjectTy, nnkTupleTy, nnkTupleClassTy, nnkTypeClassTy, nnkStaticTy, nnkRecList, nnkRecCase, nnkRecWhen, nnkVarTy, nnkConstTy, nnkMutableTy, nnkDistinctTy, nnkProcTy, nnkIteratorTy, nnkSharedTy, nnkEnumTy, nnkEnumFieldDef, nnkArglist, nnkPattern, nnkReturnToken, nnkClosure, nnkGotoState, nnkState, nnkBreakState, nnkFuncDef, nnkTupleConstr}
not all cases are covered; missing: {nnkComesFrom, nnkDotCall, nnkHiddenCallConv, nnkVarTuple, nnkCurlyExpr, nnkRange, nnkCheckedFieldExpr, nnkDerefExpr, nnkElifExpr, nnkElseExpr, nnkLambda, nnkDo, nnkBind, nnkClosedSymChoice, nnkHiddenSubConv, nnkConv, nnkStaticExpr, nnkAddr, nnkHiddenAddr, nnkHiddenDeref, nnkObjDownConv, nnkObjUpConv, nnkChckRangeF, nnkChckRange64, nnkChckRange, nnkStringToCString, nnkCStringToString, nnkFastAsgn, nnkGenericParams, nnkFormalParams, nnkOfInherit, nnkImportAs, nnkConverterDef, nnkMacroDef, nnkTemplateDef, nnkIteratorDef, nnkOfBranch, nnkElifBranch, nnkExceptBranch, nnkElse, nnkAsmStmt, nnkTypeDef, nnkFinally, nnkContinueStmt, nnkImportStmt, nnkImportExceptStmt, nnkExportStmt, nnkExportExceptStmt, nnkFromStmt, nnkIncludeStmt, nnkUsingStmt, nnkBlockExpr, nnkStmtListType, nnkBlockType, nnkWith, nnkWithout, nnkTypeOfExpr, nnkObjectTy, nnkTupleTy, nnkTupleClassTy, nnkTypeClassTy, nnkStaticTy, nnkRecList, nnkRecCase, nnkRecWhen, nnkVarTy, nnkConstTy, nnkMutableTy, nnkDistinctTy, nnkProcTy, nnkIteratorTy, nnkSharedTy, nnkEnumTy, nnkEnumFieldDef, nnkArgList, nnkPattern, nnkReturnToken, nnkClosure, nnkGotoState, nnkState, nnkBreakState, nnkFuncDef, nnkTupleConstr}
'''
"""
@@ -62,7 +62,7 @@ type
nnkSharedTy, # 'shared T'
nnkEnumTy,
nnkEnumFieldDef,
nnkArglist, nnkPattern
nnkArgList, nnkPattern
nnkReturnToken,
nnkClosure,
nnkGotoState,

View File

@@ -5,7 +5,8 @@ discard """
@[(input: @["KXSC", "BGMC"]), (input: @["PXFX"]), (input: @["WXRQ", "ZSCZD"])]
14
First tasks completed.
Second tasks completed.'''
Second tasks completed.
test1'''
"""
import strutils, os, std / wordwrap
@@ -241,3 +242,13 @@ when true:
test_string_b.setLen new_len_b
echo "Second tasks completed."
# bug #17450
proc main =
var i = 1
echo:
block:
"test" & $i
main()

View File

@@ -1,11 +1,20 @@
discard """
output: '''10000000
output: '''
10000000
10000000
10000000'''
"""
# bug #17085
#[
refs https://github.com/nim-lang/Nim/issues/17085#issuecomment-786466595
with --gc:boehm, this warning sometimes gets generated:
Warning: Repeated allocation of very large block (appr. size 14880768):
May lead to memory leak and poor performance.
nim CI now runs this test with `testWithoutBoehm` to avoid running it with --gc:boehm.
]#
proc init(): string =
for a in 0..<10000000:
result.add 'c'
@@ -16,6 +25,8 @@ proc f() =
var c {.global.} = init()
echo a.len
# `echo` intentional according to
# https://github.com/nim-lang/Nim/pull/17469/files/0c9e94cb6b9ebca9da7cb19a063fba7aa409748e#r600016573
echo b.len
echo c.len

View File

@@ -1,6 +1,5 @@
discard """
output: "bar"
disabled: "true"
"""
import tables

45
tests/iter/t16076.nim Normal file
View File

@@ -0,0 +1,45 @@
discard """
targets: "c js"
"""
proc main() =
block: # bug #17485
type
O = ref object
i: int
iterator t(o: O): int =
if o != nil:
yield o.i
yield 0
proc m =
var data = ""
for i in t(nil):
data.addInt i
doAssert data == "0"
m()
block: # bug #16076
type
R = ref object
z: int
var data = ""
iterator foo(x: int; y: R = nil): int {.inline.} =
if y == nil:
yield x
else:
yield y.z
for b in foo(10):
data.addInt b
doAssert data == "10"
static: main()
main()

View File

@@ -0,0 +1,9 @@
import macros
macro lispReprStr*(a: untyped): untyped = newLit(a.lispRepr)
macro assertAST*(expected: string, struct: untyped): untyped =
var ast = newLit(struct.treeRepr)
result = quote do:
if `ast` != `expected`:
doAssert false, "\nGot:\n" & `ast`.indent(2) & "\nExpected:\n" & `expected`.indent(2)

View File

@@ -0,0 +1,185 @@
discard """
targets: "c cpp js"
"""
# Test tkStrNumLit
import std/[macros, strutils]
import mlexerutils
# AST checks
assertAST dedent """
StmtList
ProcDef
AccQuoted
Ident "\'"
Ident "wrap"
Empty
Empty
FormalParams
Ident "string"
IdentDefs
Ident "number"
Ident "string"
Empty
Empty
Empty
StmtList
Asgn
Ident "result"
Infix
Ident "&"
Infix
Ident "&"
StrLit "[["
Ident "number"
StrLit "]]"""":
proc `'wrap`(number: string): string =
result = "[[" & number & "]]"
assertAST dedent """
StmtList
DotExpr
RStrLit "-38383839292839283928392839283928392839283.928493849385935898243e-50000"
Ident "\'wrap"""":
-38383839292839283928392839283928392839283.928493849385935898243e-50000'wrap
proc `'wrap`(number: string): string = "[[" & number & "]]"
proc wrap2(number: string): string = "[[" & number & "]]"
doAssert lispReprStr(-1'wrap) == """(DotExpr (RStrLit "-1") (Ident "\'wrap"))"""
template main =
block: # basic suffix usage
template `'twrap`(number: string): untyped =
number.`'wrap`
proc extraContext(): string =
22.40'wrap
proc `*`(left, right: string): string =
result = left & "times" & right
proc `+`(left, right: string): string =
result = left & "plus" & right
doAssert 1'wrap == "[[1]]"
doAssert -1'wrap == "[[-1]]":
"unable to resolve a negative integer-suffix pattern"
doAssert 12345.67890'wrap == "[[12345.67890]]"
doAssert 1'wrap*1'wrap == "[[1]]times[[1]]":
"unable to resolve an operator between two suffixed numeric literals"
doAssert 1'wrap+ -1'wrap == "[[1]]plus[[-1]]": # will generate a compiler warning about inconsistent spacing
"unable to resolve a negative suffixed numeric literal following an operator"
doAssert 1'wrap + -1'wrap == "[[1]]plus[[-1]]"
doAssert 1'twrap == "[[1]]"
doAssert extraContext() == "[[22.40]]":
"unable to return a suffixed numeric literal by an implicit return"
doAssert 0x5a3a'wrap == "[[0x5a3a]]"
doAssert 0o5732'wrap == "[[0o5732]]"
doAssert 0b0101111010101'wrap == "[[0b0101111010101]]"
doAssert -38383839292839283928392839283928392839283.928493849385935898243e-50000'wrap == "[[-38383839292839283928392839283928392839283.928493849385935898243e-50000]]"
doAssert 1234.56'wrap == "[[1234.56]]":
"unable to properly account for context with suffixed numeric literals"
block: # verify that the i64, f32, etc builtin suffixes still parse correctly
const expectedF32: float32 = 123.125
proc `'f9`(number: string): string = # proc starts with 'f' just like 'f32'
"[[" & number & "]]"
proc `'f32a`(number: string): string = # looks even more like 'f32'
"[[" & number & "]]"
proc `'d9`(number: string): string = # proc starts with 'd' just like the d suffix
"[[" & number & "]]"
proc `'i9`(number: string): string = # proc starts with 'i' just like 'i64'
"[[" & number & "]]"
proc `'u9`(number: string): string = # proc starts with 'u' just like 'u8'
"[[" & number & "]]"
doAssert 123.125f32 == expectedF32:
"failing to support non-quoted legacy f32 floating point suffix"
doAssert 123.125'f32 == expectedF32
doAssert 123.125e0'f32 == expectedF32
doAssert 1234.56'wrap == 1234.56'f9
doAssert 1234.56'wrap == 1234.56'f32a
doAssert 1234.56'wrap == 1234.56'd9
doAssert 1234.56'wrap == 1234.56'i9
doAssert 1234.56'wrap == 1234.56'u9
doAssert lispReprStr(1234.56'u9) == """(DotExpr (RStrLit "1234.56") (Ident "\'u9"))""":
"failed to properly build AST for suffix that starts with u"
doAssert -128'i8 == (-128).int8
block: # case checks
doAssert 1E2 == 100:
"lexer not handling upper-case exponent"
doAssert 1.0E2 == 100.0
doAssert 1e2 == 100
doAssert 0xdeadBEEF'wrap == "[[0xdeadBEEF]]":
"lexer not maintaining original case"
doAssert 0.1E12'wrap == "[[0.1E12]]"
doAssert 0.0e12'wrap == "[[0.0e12]]"
doAssert 0.0e+12'wrap == "[[0.0e+12]]"
doAssert 0.0e-12'wrap == "[[0.0e-12]]"
doAssert 0e-12'wrap == "[[0e-12]]"
block: # macro and template usage
template `'foo`(a: string): untyped = (a, 2)
doAssert -12'foo == ("-12", 2)
template `'fooplus`(a: string, b: int): untyped = (a, b)
doAssert -12'fooplus(2) == ("-12", 2)
template `'fooplusopt`(a: string, b: int = 99): untyped = (a, b)
doAssert -12'fooplusopt(2) == ("-12", 2)
doAssert -12'fooplusopt() == ("-12", 99)
doAssert -12'fooplusopt == ("-12", 99)
macro `'bar`(a: static string): untyped = newLit(a.repr)
doAssert -12'bar == "\"-12\""
macro deb(a): untyped = newLit(a.repr)
doAssert deb(-12'bar) == "-12'bar"
block: # bug 1 from https://github.com/nim-lang/Nim/pull/17020#issuecomment-803193947
macro deb1(a): untyped = newLit a.repr
macro deb2(a): untyped = newLit a.lispRepr
doAssert deb1(-12'wrap) == "-12'wrap"
doAssert deb1(-12'nonexistant) == "-12'nonexistant"
doAssert deb2(-12'nonexistant) == """(DotExpr (RStrLit "-12") (Ident "\'nonexistant"))"""
when false: # xxx bug:
# this holds:
doAssert deb2(-12.wrap2) == """(DotExpr (IntLit -12) (Sym "wrap2"))"""
doAssert deb2(-12'wrap) == """(DotExpr (RStrLit "-12") (Sym "\'wrap"))"""
# but instead this should hold:
doAssert deb2(-12.wrap2) == """(DotExpr (IntLit -12) (Ident "wrap2"))"""
doAssert deb2(-12'wrap) == """(DotExpr (RStrLit "-12") (Ident "\'wrap"))"""
block: # bug 2 from https://github.com/nim-lang/Nim/pull/17020#issuecomment-803193947
template toSuf(`'suf`): untyped =
let x = -12'suf
x
doAssert toSuf(`'wrap`) == "[[-12]]"
block: # bug 10 from https://github.com/nim-lang/Nim/pull/17020#issuecomment-803193947
proc `myecho`(a: auto): auto = a
template fn1(): untyped =
let a = "abc"
-12'wrap
template fn2(): untyped =
`myecho` -12'wrap
template fn3(): untyped =
-12'wrap
doAssert fn1() == "[[-12]]"
doAssert fn2() == "[[-12]]"
doAssert fn3() == "[[-12]]"
when false: # xxx this fails; bug 9 from https://github.com/nim-lang/Nim/pull/17020#issuecomment-803193947
#[
possible workaround: use `genAst` (https://github.com/nim-lang/Nim/pull/17426) and this:
let a3 = `'wrap3`("-128")
]#
block:
macro metawrap(): untyped =
func wrap1(a: string): string = "{" & a & "}"
func `'wrap3`(a: string): string = "{" & a & "}"
result = quote do:
let a1 = wrap1"-128"
let a2 = -128'wrap3
metawrap()
doAssert a1 == "{-128}"
doAssert a2 == "{-128}"
static: main()
main()

View File

@@ -0,0 +1,81 @@
discard """
targets: "c cpp js"
"""
# Test numeric literals and handling of minus symbol
import std/[macros, strutils]
import mlexerutils
const one = 1
const minusOne = `-`(one)
# border cases that *should* generate compiler errors:
assertAST dedent """
StmtList
Asgn
Ident "x"
Command
IntLit 4
IntLit -1""":
x = 4 -1
assertAST dedent """
StmtList
VarSection
IdentDefs
Ident "x"
Ident "uint"
IntLit -1""":
var x: uint = -1
template bad() =
x = 4 -1
doAssert not compiles(bad())
template main =
block: # check when a minus (-) is a negative sign for a literal
doAssert -1 == minusOne:
"unable to parse a spaced-prefixed negative int"
doAssert lispReprStr(-1) == """(IntLit -1)"""
doAssert -1.0'f64 == minusOne.float64
doAssert lispReprStr(-1.000'f64) == """(Float64Lit -1.0)"""
doAssert lispReprStr( -1.000'f64) == """(Float64Lit -1.0)"""
doAssert [-1].contains(minusOne):
"unable to handle negatives after square bracket"
doAssert lispReprStr([-1]) == """(Bracket (IntLit -1))"""
doAssert (-1, 2)[0] == minusOne:
"unable to handle negatives after parenthesis"
doAssert lispReprStr((-1, 2)) == """(Par (IntLit -1) (IntLit 2))"""
proc x(): int =
var a = 1;-1 # the -1 should act as the return value
doAssert x() == minusOne:
"unable to handle negatives after semi-colon"
block:
doAssert -0b111 == -7
doAssert -0xff == -255
doAssert -128'i8 == (-128).int8
doAssert $(-128'i8) == "-128"
doAssert -32768'i16 == int16.low
doAssert -2147483648'i32 == int32.low
when int.sizeof > 4:
doAssert -9223372036854775808 == int.low
when not defined(js):
doAssert -9223372036854775808 == int64.low
block: # check when a minus (-) is an unary op
doAssert -one == minusOne:
"unable to a negative prior to identifier"
block: # check when a minus (-) is a a subtraction op
doAssert 4-1 == 3:
"unable to handle subtraction sans surrounding spaces with a numeric literal"
doAssert 4-one == 3:
"unable to handle subtraction sans surrounding spaces with an identifier"
doAssert 4 - 1 == 3:
"unable to handle subtraction with surrounding spaces with a numeric literal"
doAssert 4 - one == 3:
"unable to handle subtraction with surrounding spaces with an identifier"
static: main()
main()

View File

@@ -64,7 +64,7 @@ block t7723:
block t8706:
macro varargsLen(args:varargs[untyped]): untyped =
doAssert args.kind == nnkArglist
doAssert args.kind == nnkArgList
doAssert args.len == 0
result = newLit(args.len)

28
tests/notnil/tnotnil5.nim Normal file
View File

@@ -0,0 +1,28 @@
discard """
matrix: "--threads:on"
"""
{.experimental: "parallel".}
{.experimental: "notnil".}
import threadpool
type
AO = object
x: int
A = ref AO not nil
proc process(a: A): A =
return A(x: a.x+1)
proc processMany(ayys: openArray[A]): seq[A] =
var newAs: seq[FlowVar[A]]
parallel:
for a in ayys:
newAs.add(spawn process(a))
for newAflow in newAs:
let newA = ^newAflow
if isNil(newA):
return @[]
result.add(newA)

6
tests/proc/t17157.nim Normal file
View File

@@ -0,0 +1,6 @@
discard """
errormsg: "'untyped' is only allowed in templates and macros or magic procs"
"""
template something(op: proc (v: untyped): void): void =
discard

17
tests/sets/t5792.nim Normal file
View File

@@ -0,0 +1,17 @@
discard """
matrix: "--gc:refc; --gc:arc"
"""
type
T = enum
a
b
c
U = object
case k: T
of a:
x: int
of {b, c} - {a}:
y: int
discard U(k: b, y: 1)

View File

@@ -7,68 +7,88 @@ import std/[isolation, json]
proc main() =
proc main(moveZeroesOut: static bool) =
block:
type
Empty = ref object
var x = isolate(Empty())
discard extract(x)
block: # string literals
var data = isolate("string")
doAssert data.extract == "string"
doAssert data.extract == ""
if moveZeroesOut:
doAssert data.extract == ""
block: # string literals
var data = isolate("")
doAssert data.extract == ""
doAssert data.extract == ""
if moveZeroesOut:
doAssert data.extract == ""
block:
var src = "string"
var data = isolate(move src)
doAssert data.extract == "string"
doAssert src.len == 0
if moveZeroesOut:
doAssert src.len == 0
block: # int literals
var data = isolate(1)
doAssert data.extract == 1
doAssert data.extract == 0
if moveZeroesOut:
doAssert data.extract == 0
block: # float literals
var data = isolate(1.6)
doAssert data.extract == 1.6
doAssert data.extract == 0.0
if moveZeroesOut:
doAssert data.extract == 0.0
block:
var data = isolate(@["1", "2"])
doAssert data.extract == @["1", "2"]
doAssert data.extract == @[]
if moveZeroesOut:
doAssert data.extract == @[]
block:
var data = isolate(@["1", "2", "3", "4", "5"])
doAssert data.extract == @["1", "2", "3", "4", "5"]
doAssert data.extract == @[]
if moveZeroesOut:
doAssert data.extract == @[]
block:
var data = isolate(@["", ""])
doAssert data.extract == @["", ""]
doAssert data.extract == @[]
if moveZeroesOut:
doAssert data.extract == @[]
block:
var src = @["1", "2"]
var data = isolate(move src)
doAssert data.extract == @["1", "2"]
doAssert src.len == 0
if moveZeroesOut:
doAssert src.len == 0
block:
var data = isolate(@[1, 2])
doAssert data.extract == @[1, 2]
doAssert data.extract == @[]
if moveZeroesOut:
doAssert data.extract == @[]
block:
var data = isolate(["1", "2"])
doAssert data.extract == ["1", "2"]
doAssert data.extract == ["", ""]
if moveZeroesOut:
doAssert data.extract == ["", ""]
block:
var data = isolate([1, 2])
doAssert data.extract == [1, 2]
doAssert data.extract == [0, 0]
if moveZeroesOut:
doAssert data.extract == [0, 0]
block:
type
@@ -111,5 +131,5 @@ proc main() =
doAssert $x == """@[(value: "1234")]"""
static: main()
main()
static: main(moveZeroesOut = false)
main(moveZeroesOut = true)

View File

@@ -300,3 +300,14 @@ block: # bug #17383
when not defined(js):
testRoundtrip(int64.high): "9223372036854775807"
testRoundtrip(uint64.high): "18446744073709551615"
block:
let a = "18446744073709551615"
let b = a.parseJson
doAssert b.kind == JString
let c = $b
when defined(js):
doAssert c == "18446744073709552000"
else:
doAssert c == "18446744073709551615"

View File

@@ -71,7 +71,7 @@ template fn() =
block:
let a = (int32.high, uint32.high)
testRoundtrip(a): "[2147483647,4294967295]"
when not defined(js):
when int.sizeof > 4:
block:
let a = (int64.high, uint64.high)
testRoundtrip(a): "[9223372036854775807,18446744073709551615]"

View File

@@ -17,10 +17,11 @@ proc test() =
wrapSocket(ctx, socket)
# trying 2 sites makes it more resilent: refs #17458 this could give:
# Error: unhandled exception: Call to 'connect' timed out. [TimeoutError]
# * Call to 'connect' timed out. [TimeoutError]
# * No route to host [OSError]
try:
fn("www.nim-lang.org")
except TimeoutError:
except TimeoutError, OSError:
fn("www.google.com")
test()

View File

@@ -2,7 +2,7 @@ discard """
targets: "c js"
"""
import parsecfg, streams
import parsecfg, streams, sequtils
when not defined(js):
from stdtest/specialpaths import buildDir
@@ -39,19 +39,14 @@ var ss = newStringStream()
dict1.writeConfig(ss)
## Reading a configuration file.
var dict2 = loadConfig(newStringStream(ss.data))
var charset = dict2.getSectionValue("", "charset")
var threads = dict2.getSectionValue("Package", "--threads")
var pname = dict2.getSectionValue("Package", "name")
var name = dict2.getSectionValue("Author", "name")
var qq = dict2.getSectionValue("Author", "qq")
var email = dict2.getSectionValue("Author", "email")
doAssert charset == "utf-8"
doAssert threads == "on"
doAssert pname == "hello"
doAssert name == "lihf8515"
doAssert qq == "10214028"
doAssert email == "lihaifeng@wxm.com"
let dict2 = loadConfig(newStringStream(ss.data))
doAssert dict2.getSectionValue("", "charset") == "utf-8"
doAssert dict2.getSectionValue("Package", "--threads") == "on"
doAssert dict2.getSectionValue("Package", "name") == "hello"
doAssert dict2.getSectionValue("Author", "name") == "lihf8515"
doAssert dict2.getSectionValue("Author", "qq") == "10214028"
doAssert dict2.getSectionValue("Author", "email") == "lihaifeng@wxm.com"
doAssert toSeq(dict2.sections) == @["", "Package", "Author"]
## Modifying a configuration file.
var dict3 = loadConfig(newStringStream(ss.data))

View File

@@ -196,9 +196,15 @@ suite "RST/Markdown general":
| | F2 without pipe
not in table"""
let output1 = input1.toHtml
doAssert output1 == """<table border="1" class="docutils"><tr><th>A1 header</th><th>A2 | not fooled</th></tr>
#[
TODO: `\|` inside a table cell should render as `|`
`|` outside a table cell should render as `\|`
consistently with markdown, see https://stackoverflow.com/a/66557930/1426932
]#
doAssert output1 == """
<table border="1" class="docutils"><tr><th>A1 header</th><th>A2 | not fooled</th></tr>
<tr><td>C1</td><td>C2 <strong>bold</strong></td></tr>
<tr><td>D1 <tt class="docutils literal"><span class="pre">code |</span></tt></td><td>D2</td></tr>
<tr><td>D1 <tt class="docutils literal"><span class="pre">code \|</span></tt></td><td>D2</td></tr>
<tr><td>E1 | text</td><td></td></tr>
<tr><td></td><td>F2 without pipe</td></tr>
</table><p>not in table</p>
@@ -549,6 +555,18 @@ let x = 1
let output2 = input2.toHtml
doAssert "<pre" in output2 and "class=\"Keyword\"" in output2
test "interpreted text":
check """`foo.bar`""".toHtml == """<tt class="docutils literal"><span class="pre">foo.bar</span></tt>"""
check """`foo\`\`bar`""".toHtml == """<tt class="docutils literal"><span class="pre">foo``bar</span></tt>"""
check """`foo\`bar`""".toHtml == """<tt class="docutils literal"><span class="pre">foo`bar</span></tt>"""
check """`\`bar`""".toHtml == """<tt class="docutils literal"><span class="pre">`bar</span></tt>"""
check """`a\b\x\\ar`""".toHtml == """<tt class="docutils literal"><span class="pre">a\b\x\\ar</span></tt>"""
test "inline literal":
check """``foo.bar``""".toHtml == """<tt class="docutils literal"><span class="pre">foo.bar</span></tt>"""
check """``foo\bar``""".toHtml == """<tt class="docutils literal"><span class="pre">foo\bar</span></tt>"""
check """``f\`o\\o\b`ar``""".toHtml == """<tt class="docutils literal"><span class="pre">f\`o\\o\b`ar</span></tt>"""
test "RST comments":
let input1 = """
Check that comment disappears:
@@ -1259,6 +1277,55 @@ Test1
let refline = "Ref. " & ref1 & "! and " & ref2 & ";and " & ref3 & "."
doAssert refline in output1
test "Option lists 1":
# check that "* b" is not consumed by previous bullet item because of
# incorrect indentation handling in option lists
let input = dedent """
* a
-m desc
-n very long
desc
* b"""
let output = input.toHtml
check(output.count("<ul") == 1)
check(output.count("<li>") == 2)
check(output.count("<table") == 1)
check("""<th align="left">-m</th><td align="left">desc</td>""" in output)
check("""<th align="left">-n</th><td align="left">very long desc</td>""" in
output)
test "Option lists 2":
# check that 2nd option list is not united with the 1st
let input = dedent """
* a
-m desc
-n very long
desc
-d option"""
let output = input.toHtml
check(output.count("<ul") == 1)
check(output.count("<table") == 2)
check("""<th align="left">-m</th><td align="left">desc</td>""" in output)
check("""<th align="left">-n</th><td align="left">very long desc</td>""" in
output)
check("""<th align="left">-d</th><td align="left">option</td>""" in
output)
test "Option list 3 (double /)":
let input = dedent """
* a
//compile compile1
//doc doc1
cont
-d option"""
let output = input.toHtml
check(output.count("<ul") == 1)
check(output.count("<table") == 2)
check("""<th align="left">compile</th><td align="left">compile1</td>""" in output)
check("""<th align="left">doc</th><td align="left">doc1 cont</td>""" in
output)
check("""<th align="left">-d</th><td align="left">option</td>""" in
output)
suite "RST/Code highlight":
test "Basic Python code highlight":
let pythonCode = """

7
tests/tuples/t7012.nim Normal file
View File

@@ -0,0 +1,7 @@
discard """
errormsg: "illegal recursion in type 'Node'"
"""
type Node[T] = tuple
next: ref Node[T]
var n: Node[int]

30
tests/vm/t9622.nim Normal file
View File

@@ -0,0 +1,30 @@
discard """
targets: "c cpp"
matrix: "--gc:refc; --gc:arc"
"""
type
GlobNodeKind = enum
LiteralIdent,
Group
GlobNode = object
case kind: GlobNodeKind
of LiteralIdent:
value: string
of Group:
values: seq[string]
PathSegment = object
children: seq[GlobNode]
GlobPattern = seq[PathSegment]
proc parseImpl(): GlobPattern =
if result.len == 0:
result.add PathSegment()
result[^1].children.add GlobNode(kind: LiteralIdent)
block:
const pattern = parseImpl()
doAssert $pattern == """@[(children: @[(kind: LiteralIdent, value: "")])]"""

View File

@@ -255,6 +255,31 @@ block:
doAssert e == @[]
doAssert f == @[]
block: # bug #10815
type
Opcode = enum
iChar, iSet
Inst = object
case code: Opcode
of iChar:
c: char
of iSet:
cs: set[char]
Patt = seq[Inst]
proc `$`(p: Patt): string =
discard
proc P(): Patt =
result.add Inst(code: iSet)
const a = P()
doAssert $a == ""
import tables
block: # bug #8007

View File

@@ -16,106 +16,7 @@ const
Version & """
(c) 2012-2020 Andreas Rumpf
Usage:
* To search:
nimgrep [options] PATTERN [(FILE/DIRECTORY)*/-]
* To replace:
nimgrep [options] PATTERN --replace REPLACEMENT (FILE/DIRECTORY)*/-
* To list file names:
nimgrep [options] --filenames [PATTERN] [(FILE/DIRECTORY)*]
Positional arguments, from left to right:
* PATERN is either Regex (default) or Peg if --peg is specified.
PATTERN and REPLACEMENT should be skipped when --stdin is specified.
* REPLACEMENT supports $1, $# notations for captured groups in PATTERN.
Note: --replace mode DOES NOT ask confirmation unless --confirm is specified!
* Final arguments are a list of paths (FILE/DIRECTORY) or a standalone
minus '-' (pipe) or not specified (empty). Note for the empty case: when
no FILE/DIRECTORY/- is specified nimgrep DOES NOT read the pipe, but
searches files in the current dir instead!
- read buffer once from stdin: pipe or terminal input;
in --replace mode the result is directed to stdout;
it's not compatible with --stdin, --filenames, --confirm
(empty) current directory '.' is assumed (not with --replace)
For any given DIRECTORY nimgrep searches only its immediate files without
traversing sub-directories unless --recursive is specified.
In replacement mode all 3 positional arguments are required to avoid damaging.
Options:
* Mode of operation:
--find, -f find the PATTERN (default)
--replace, -! replace the PATTERN to REPLACEMENT, rewriting the files
--confirm confirm each occurrence/replacement; there is a chance
to abort any time without touching the file
--filenames just list filenames. Provide a PATTERN to find it in
the filenames (not in the contents of a file) or run
with empty pattern to just list all files:
nimgrep --filenames # In current directory
nimgrep --filenames "" DIRECTORY # Note empty pattern ""
* Interprete patterns:
--peg PATTERN and PAT are Peg
--re PATTERN and PAT are regular expressions (default)
--rex, -x use the "extended" syntax for the regular expression
so that whitespace is not significant
--word, -w matches should have word boundaries (buggy for pegs!)
--ignoreCase, -i be case insensitive in PATTERN and PAT
--ignoreStyle, -y be style insensitive in PATTERN and PAT
NOTE: PATERN and patterns PAT (see below in other options) are all either
Regex or Peg simultaneously and options --rex, --word, --ignoreCase,
--ignoreStyle are applied to all of them.
* File system walk:
--recursive, -r process directories recursively
--follow follow all symlinks when processing recursively
--ext:EX1|EX2|... only search the files with the given extension(s),
empty one ("--ext") means files with missing extension
--noExt:EX1|... exclude files having given extension(s), use empty one to
skip files with no extension (like some binary files are)
--includeFile:PAT search only files whose names contain pattern PAT
--excludeFile:PAT skip files whose names contain pattern PAT
--includeDir:PAT search only files with whole directory path containing PAT
--excludeDir:PAT skip directories whose name (not path) contain pattern PAT
--if,--ef,--id,--ed abbreviations of 4 options above
--sortTime order files by the last modification time (default: off):
-s[:asc|desc] ascending (recent files go last) or descending
* Filter file content:
--match:PAT select files containing a (not displayed) match of PAT
--noMatch:PAT select files not containing any match of PAT
--bin:on|off|only process binary files? (detected by \0 in first 1K bytes)
(default: on - binary and text files treated the same way)
--text, -t process only text files, the same as --bin:off
* Represent results:
--nocolor output will be given without any colors
--color[:on] force color even if output is redirected (default: auto)
--colorTheme:THEME select color THEME from 'simple' (default),
'bnw' (black and white) ,'ack', or 'gnu' (GNU grep)
--count only print counts of matches for files that matched
--context:N, -c:N print N lines of leading context before every match and
N lines of trailing context after it (default N: 0)
--afterContext:N,
-a:N print N lines of trailing context after every match
--beforeContext:N,
-b:N print N lines of leading context before every match
--group, -g group matches by file
--newLine, -l display every matching line starting from a new line
--cols[:N] limit max displayed columns/width of output lines from
files by N characters, cropping overflows (default: off)
--cols:auto, -% calculate columns from terminal width for every line
--onlyAscii, -@ display only printable ASCII Latin characters 0x20-0x7E
substitutions: 0 -> ^@, 1 -> ^A, ... 0x1F -> ^_,
0x7F -> '7F, ..., 0xFF -> 'FF
* Miscellaneous:
--threads:N, -j:N speed up search by N additional workers (default: 0, off)
--stdin read PATTERN from stdin (to avoid the shell's confusing
quoting rules) and, if --replace given, REPLACEMENT
--verbose be verbose: list every processed file
--help, -h shows this help
--version, -v shows the version
"""
""" & slurp "../doc/nimgrep_cmdline.txt"
# Limitations / ideas / TODO:
# * No unicode support with --cols