mirror of
https://github.com/nim-lang/Nim.git
synced 2026-01-04 12:07:51 +00:00
IC: bugfixes (WIP) (#16836)
* minor improvements * IC: added the required logic for compilerProcs * LazySym ftw * we need this testing logic * reimplement the old way we use for module package creation * fixes a regression; don't pick module names if you can avoid it
This commit is contained in:
@@ -2362,6 +2362,7 @@ proc genMagicExpr(p: BProc, e: PNode, d: var TLoc, op: TMagic) =
|
||||
# somehow forward-declared from some other usage, but it is *possible*
|
||||
if lfNoDecl notin opr.loc.flags:
|
||||
let prc = magicsys.getCompilerProc(p.module.g.graph, $opr.loc.r)
|
||||
assert prc != nil, $opr.loc.r
|
||||
# HACK:
|
||||
# Explicitly add this proc as declared here so the cgsym call doesn't
|
||||
# add a forward declaration - without this we could end up with the same
|
||||
|
||||
@@ -1144,7 +1144,9 @@ proc genProcNoForward(m: BModule, prc: PSym) =
|
||||
#if prc.loc.k == locNone:
|
||||
# mangle the inline proc based on the module where it is defined -
|
||||
# not on the first module that uses it
|
||||
fillProcLoc(findPendingModule(m, prc), prc.ast[namePos])
|
||||
let m2 = if m.config.symbolFiles != disabledSf: m
|
||||
else: findPendingModule(m, prc)
|
||||
fillProcLoc(m2, prc.ast[namePos])
|
||||
#elif {sfExportc, sfImportc} * prc.flags == {}:
|
||||
# # reset name to restore consistency in case of hashing collisions:
|
||||
# echo "resetting ", prc.id, " by ", m.module.name.s
|
||||
|
||||
@@ -51,10 +51,12 @@ proc addFileToLink(config: ConfigRef; m: PSym) =
|
||||
elif config.backend == backendObjc: ".nim.m"
|
||||
else: ".nim.c"
|
||||
let cfile = changeFileExt(completeCfilePath(config, withPackageName(config, filename)), ext)
|
||||
var cf = Cfile(nimname: m.name.s, cname: cfile,
|
||||
obj: completeCfilePath(config, toObjFile(config, cfile)),
|
||||
flags: {CfileFlag.Cached})
|
||||
addFileToCompile(config, cf)
|
||||
let objFile = completeCfilePath(config, toObjFile(config, cfile))
|
||||
if fileExists(objFile):
|
||||
var cf = Cfile(nimname: m.name.s, cname: cfile,
|
||||
obj: objFile,
|
||||
flags: {CfileFlag.Cached})
|
||||
addFileToCompile(config, cf)
|
||||
|
||||
proc aliveSymsChanged(config: ConfigRef; position: int; alive: AliveSyms): bool =
|
||||
let asymFile = toRodFile(config, AbsoluteFile toFullPath(config, position.FileIndex), ".alivesyms")
|
||||
|
||||
@@ -9,18 +9,20 @@
|
||||
|
||||
## Dead code elimination (=DCE) for IC.
|
||||
|
||||
import std / intsets
|
||||
import ".." / [ast, options, lineinfos]
|
||||
import std / [intsets, tables]
|
||||
import ".." / [ast, options, lineinfos, types]
|
||||
|
||||
import packed_ast, to_packed_ast, bitabs
|
||||
|
||||
type
|
||||
AliveSyms* = seq[IntSet]
|
||||
AliveContext* = object ## Purpose is to fill the 'alive' field.
|
||||
stack: seq[(int, NodePos)] ## A stack for marking symbols as alive.
|
||||
stack: seq[(int, TOptions, NodePos)] ## A stack for marking symbols as alive.
|
||||
decoder: PackedDecoder ## We need a PackedDecoder for module ID address translations.
|
||||
thisModule: int ## The module we're currently analysing for DCE.
|
||||
alive: AliveSyms ## The final result of our computation.
|
||||
options: TOptions
|
||||
compilerProcs: Table[string, (int, int32)]
|
||||
|
||||
proc isExportedToC(c: var AliveContext; g: PackedModuleGraph; symId: int32): bool =
|
||||
## "Exported to C" procs are special (these are marked with '.exportc') because these
|
||||
@@ -36,6 +38,8 @@ proc isExportedToC(c: var AliveContext; g: PackedModuleGraph; symId: int32): boo
|
||||
result = true
|
||||
# XXX: This used to be a condition to:
|
||||
# (sfExportc in prc.flags and lfExportLib in prc.loc.flags) or
|
||||
if sfCompilerProc in flags:
|
||||
c.compilerProcs[g[c.thisModule].fromDisk.sh.strings[symPtr.name]] = (c.thisModule, symId)
|
||||
|
||||
template isNotGeneric(n: NodePos): bool = ithSon(tree, n, genericParamsPos).kind == nkEmpty
|
||||
|
||||
@@ -45,7 +49,40 @@ proc followLater(c: var AliveContext; g: PackedModuleGraph; module: int; item: i
|
||||
if not c.alive[module].containsOrIncl(item):
|
||||
let body = g[module].fromDisk.sh.syms[item].ast
|
||||
if body != emptyNodeId:
|
||||
c.stack.add((module, NodePos(body)))
|
||||
let opt = g[module].fromDisk.sh.syms[item].options
|
||||
c.stack.add((module, opt, NodePos(body)))
|
||||
|
||||
proc requestCompilerProc(c: var AliveContext; g: PackedModuleGraph; name: string) =
|
||||
let (module, item) = c.compilerProcs[name]
|
||||
followLater(c, g, module, item)
|
||||
|
||||
proc loadTypeKind(t: PackedItemId; c: AliveContext; g: PackedModuleGraph; toSkip: set[TTypeKind]): TTypeKind =
|
||||
template kind(t: ItemId): TTypeKind = g[t.module].fromDisk.sh.types[t.item].kind
|
||||
|
||||
var t2 = translateId(t, g, c.thisModule, c.decoder.config)
|
||||
result = t2.kind
|
||||
while result in toSkip:
|
||||
t2 = translateId(g[t2.module].fromDisk.sh.types[t2.item].types[^1], g, t2.module, c.decoder.config)
|
||||
result = t2.kind
|
||||
|
||||
proc rangeCheckAnalysis(c: var AliveContext; g: PackedModuleGraph; tree: PackedTree; n: NodePos) =
|
||||
## Replicates the logic of `ccgexprs.genRangeChck`.
|
||||
## XXX Refactor so that the duplicated logic is avoided. However, for now it's not clear
|
||||
## the approach has enough merit.
|
||||
var dest = loadTypeKind(n.typ, c, g, abstractVar)
|
||||
if optRangeCheck notin c.options or dest in {tyUInt..tyUInt64}:
|
||||
discard "no need to generate a check because it was disabled"
|
||||
else:
|
||||
let n0t = loadTypeKind(n.firstSon.typ, c, g, {})
|
||||
if n0t in {tyUInt, tyUInt64}:
|
||||
c.requestCompilerProc(g, "raiseRangeErrorNoArgs")
|
||||
else:
|
||||
let raiser =
|
||||
case loadTypeKind(n.typ, c, g, abstractVarRange)
|
||||
of tyUInt..tyUInt64, tyChar: "raiseRangeErrorU"
|
||||
of tyFloat..tyFloat128: "raiseRangeErrorF"
|
||||
else: "raiseRangeErrorI"
|
||||
c.requestCompilerProc(g, raiser)
|
||||
|
||||
proc aliveCode(c: var AliveContext; g: PackedModuleGraph; tree: PackedTree; n: NodePos) =
|
||||
## Marks the symbols we encounter when we traverse the AST at `tree[n]` as alive, unless
|
||||
@@ -71,6 +108,8 @@ proc aliveCode(c: var AliveContext; g: PackedModuleGraph; tree: PackedTree; n: N
|
||||
discard
|
||||
of nkVarSection, nkLetSection, nkConstSection:
|
||||
discard
|
||||
of nkChckRangeF, nkChckRange64, nkChckRange:
|
||||
rangeCheckAnalysis(c, g, tree, n)
|
||||
of nkProcDef, nkConverterDef, nkMethodDef, nkLambda, nkDo, nkFuncDef:
|
||||
if n.firstSon.kind == nkSym and isNotGeneric(n):
|
||||
if isExportedToC(c, g, n.firstSon.operand):
|
||||
@@ -85,14 +124,16 @@ proc followNow(c: var AliveContext; g: PackedModuleGraph) =
|
||||
## Mark all entries in the stack. Marking can add more entries
|
||||
## to the stack but eventually we have looked at every alive symbol.
|
||||
while c.stack.len > 0:
|
||||
let (modId, ast) = c.stack.pop()
|
||||
let (modId, opt, ast) = c.stack.pop()
|
||||
c.thisModule = modId
|
||||
c.options = opt
|
||||
aliveCode(c, g, g[modId].fromDisk.bodies, ast)
|
||||
|
||||
proc computeAliveSyms*(g: PackedModuleGraph; conf: ConfigRef): AliveSyms =
|
||||
## Entry point for our DCE algorithm.
|
||||
var c = AliveContext(stack: @[], decoder: PackedDecoder(config: conf),
|
||||
thisModule: -1, alive: newSeq[IntSet](g.len))
|
||||
thisModule: -1, alive: newSeq[IntSet](g.len),
|
||||
options: conf.options)
|
||||
for i in countdown(high(g), 0):
|
||||
if g[i].status != undefined:
|
||||
c.thisModule = i
|
||||
|
||||
@@ -62,6 +62,7 @@ type
|
||||
position*: int
|
||||
offset*: int
|
||||
externalName*: LitId # instead of TLoc
|
||||
locFlags*: TLocFlags
|
||||
annex*: PackedLib
|
||||
when hasFFI:
|
||||
cname*: LitId
|
||||
|
||||
@@ -127,3 +127,19 @@ proc replayGenericCacheInformation*(g: ModuleGraph; module: int) =
|
||||
let sym = loadSymFromId(g.config, g.cache, g.packed, module,
|
||||
PackedItemId(module: LitId(0), item: it))
|
||||
methodDef(g, g.idgen, sym)
|
||||
|
||||
for it in mitems(g.packed[module].fromDisk.compilerProcs):
|
||||
let symId = FullId(module: module, packed: PackedItemId(module: LitId(0), item: it[1]))
|
||||
g.lazyCompilerprocs[g.packed[module].fromDisk.sh.strings[it[0]]] = symId
|
||||
|
||||
for it in mitems(g.packed[module].fromDisk.converters):
|
||||
let symId = FullId(module: module, packed: PackedItemId(module: LitId(0), item: it))
|
||||
g.ifaces[module].converters.add LazySym(id: symId, sym: nil)
|
||||
|
||||
for it in mitems(g.packed[module].fromDisk.trmacros):
|
||||
let symId = FullId(module: module, packed: PackedItemId(module: LitId(0), item: it))
|
||||
g.ifaces[module].patterns.add LazySym(id: symId, sym: nil)
|
||||
|
||||
for it in mitems(g.packed[module].fromDisk.pureEnums):
|
||||
let symId = FullId(module: module, packed: PackedItemId(module: LitId(0), item: it))
|
||||
g.ifaces[module].pureEnums.add LazySym(id: symId, sym: nil)
|
||||
|
||||
@@ -32,8 +32,8 @@ type
|
||||
#producedGenerics*: Table[GenericKey, SymId]
|
||||
exports*: seq[(LitId, int32)]
|
||||
reexports*: seq[(LitId, PackedItemId)]
|
||||
compilerProcs*, trmacros*, converters*, pureEnums*: seq[(LitId, int32)]
|
||||
methods*: seq[int32]
|
||||
compilerProcs*: seq[(LitId, int32)]
|
||||
converters*, methods*, trmacros*, pureEnums*: seq[int32]
|
||||
macroUsages*: seq[(PackedItemId, PackedLineInfo)]
|
||||
|
||||
typeInstCache*: seq[(PackedItemId, PackedItemId)]
|
||||
@@ -154,17 +154,14 @@ proc addExported*(c: var PackedEncoder; m: var PackedModule; s: PSym) =
|
||||
m.exports.add((nameId, s.itemId.item))
|
||||
|
||||
proc addConverter*(c: var PackedEncoder; m: var PackedModule; s: PSym) =
|
||||
let nameId = getOrIncl(m.sh.strings, s.name.s)
|
||||
m.converters.add((nameId, s.itemId.item))
|
||||
m.converters.add(s.itemId.item)
|
||||
|
||||
proc addTrmacro*(c: var PackedEncoder; m: var PackedModule; s: PSym) =
|
||||
let nameId = getOrIncl(m.sh.strings, s.name.s)
|
||||
m.trmacros.add((nameId, s.itemId.item))
|
||||
m.trmacros.add(s.itemId.item)
|
||||
|
||||
proc addPureEnum*(c: var PackedEncoder; m: var PackedModule; s: PSym) =
|
||||
let nameId = getOrIncl(m.sh.strings, s.name.s)
|
||||
assert s.kind == skType
|
||||
m.pureEnums.add((nameId, s.itemId.item))
|
||||
m.pureEnums.add(s.itemId.item)
|
||||
|
||||
proc addMethod*(c: var PackedEncoder; m: var PackedModule; s: PSym) =
|
||||
m.methods.add s.itemId.item
|
||||
@@ -349,6 +346,7 @@ proc storeSym*(s: PSym; c: var PackedEncoder; m: var PackedModule): PackedItemId
|
||||
p.alignment = s.alignment
|
||||
|
||||
p.externalName = toLitId(if s.loc.r.isNil: "" else: $s.loc.r, m)
|
||||
p.locFlags = s.loc.flags
|
||||
c.addMissing s.typ
|
||||
p.typ = s.typ.storeType(c, m)
|
||||
c.addMissing s.owner
|
||||
@@ -453,7 +451,7 @@ proc toPackedNodeTopLevel*(n: PNode, encoder: var PackedEncoder; m: var PackedMo
|
||||
flush encoder, m
|
||||
|
||||
proc loadError(err: RodFileError; filename: AbsoluteFile) =
|
||||
echo "Error: ", $err, "\nloading file: ", filename.string
|
||||
echo "Error: ", $err, " loading file: ", filename.string
|
||||
|
||||
proc loadRodFile*(filename: AbsoluteFile; m: var PackedModule; config: ConfigRef): RodFileError =
|
||||
m.sh = Shared()
|
||||
@@ -751,6 +749,7 @@ proc symBodyFromPacked(c: var PackedDecoder; g: var PackedModuleGraph;
|
||||
let externalName = g[si].fromDisk.sh.strings[s.externalName]
|
||||
if externalName != "":
|
||||
result.loc.r = rope externalName
|
||||
result.loc.flags = s.locFlags
|
||||
|
||||
proc loadSym(c: var PackedDecoder; g: var PackedModuleGraph; thisModule: int; s: PackedItemId): PSym =
|
||||
if s == nilItemId:
|
||||
@@ -819,6 +818,17 @@ proc loadType(c: var PackedDecoder; g: var PackedModuleGraph; thisModule: int; t
|
||||
result = g[si].types[t.item]
|
||||
assert result.itemId.item > 0
|
||||
|
||||
proc newPackage(config: ConfigRef; cache: IdentCache; fileIdx: FileIndex): PSym =
|
||||
let filename = AbsoluteFile toFullPath(config, fileIdx)
|
||||
let name = getIdent(cache, splitFile(filename).name)
|
||||
let info = newLineInfo(fileIdx, 1, 1)
|
||||
let
|
||||
pck = getPackageName(config, filename.string)
|
||||
pck2 = if pck.len > 0: pck else: "unknown"
|
||||
pack = getIdent(cache, pck2)
|
||||
result = newSym(skPackage, getIdent(cache, pck2),
|
||||
ItemId(module: PackageModuleId, item: int32(fileIdx)), nil, info)
|
||||
|
||||
proc setupLookupTables(g: var PackedModuleGraph; conf: ConfigRef; cache: IdentCache;
|
||||
fileIdx: FileIndex; m: var LoadedModule) =
|
||||
m.iface = initTable[PIdent, seq[PackedItemId]]()
|
||||
@@ -836,6 +846,7 @@ proc setupLookupTables(g: var PackedModuleGraph; conf: ConfigRef; cache: IdentCa
|
||||
name: getIdent(cache, splitFile(filename).name),
|
||||
info: newLineInfo(fileIdx, 1, 1),
|
||||
position: int(fileIdx))
|
||||
m.module.owner = newPackage(conf, cache, fileIdx)
|
||||
|
||||
proc loadToReplayNodes(g: var PackedModuleGraph; conf: ConfigRef; cache: IdentCache;
|
||||
fileIdx: FileIndex; m: var LoadedModule) =
|
||||
@@ -851,7 +862,7 @@ proc loadToReplayNodes(g: var PackedModuleGraph; conf: ConfigRef; cache: IdentCa
|
||||
m.module.ast.add loadNodes(decoder, g, int(fileIdx), m.fromDisk.toReplay, p)
|
||||
|
||||
proc needsRecompile(g: var PackedModuleGraph; conf: ConfigRef; cache: IdentCache;
|
||||
fileIdx: FileIndex): bool =
|
||||
fileIdx: FileIndex; cachedModules: var seq[FileIndex]): bool =
|
||||
# Does the file belong to the fileIdx need to be recompiled?
|
||||
let m = int(fileIdx)
|
||||
if m >= g.len:
|
||||
@@ -870,11 +881,12 @@ proc needsRecompile(g: var PackedModuleGraph; conf: ConfigRef; cache: IdentCache
|
||||
let fid = toFileIndex(dep, g[m].fromDisk, conf)
|
||||
# Warning: we need to traverse the full graph, so
|
||||
# do **not use break here**!
|
||||
if needsRecompile(g, conf, cache, fid):
|
||||
if needsRecompile(g, conf, cache, fid, cachedModules):
|
||||
result = true
|
||||
|
||||
if not result:
|
||||
setupLookupTables(g, conf, cache, fileIdx, g[m])
|
||||
cachedModules.add fileIdx
|
||||
g[m].status = if result: outdated else: loaded
|
||||
else:
|
||||
loadError(err, rod)
|
||||
@@ -887,14 +899,16 @@ proc needsRecompile(g: var PackedModuleGraph; conf: ConfigRef; cache: IdentCache
|
||||
result = true
|
||||
|
||||
proc moduleFromRodFile*(g: var PackedModuleGraph; conf: ConfigRef; cache: IdentCache;
|
||||
fileIdx: FileIndex): PSym =
|
||||
fileIdx: FileIndex; cachedModules: var seq[FileIndex]): PSym =
|
||||
## Returns 'nil' if the module needs to be recompiled.
|
||||
if needsRecompile(g, conf, cache, fileIdx):
|
||||
if needsRecompile(g, conf, cache, fileIdx, cachedModules):
|
||||
result = nil
|
||||
else:
|
||||
result = g[int fileIdx].module
|
||||
assert result != nil
|
||||
loadToReplayNodes(g, conf, cache, fileIdx, g[int fileIdx])
|
||||
assert result.position == int(fileIdx)
|
||||
for m in cachedModules:
|
||||
loadToReplayNodes(g, conf, cache, m, g[int m])
|
||||
|
||||
template setupDecoder() {.dirty.} =
|
||||
var decoder = PackedDecoder(
|
||||
@@ -919,7 +933,10 @@ proc loadProcBody*(config: ConfigRef, cache: IdentCache;
|
||||
|
||||
proc loadTypeFromId*(config: ConfigRef, cache: IdentCache;
|
||||
g: var PackedModuleGraph; module: int; id: PackedItemId): PType =
|
||||
result = g[module].types[id.item]
|
||||
if id.item < g[module].types.len:
|
||||
result = g[module].types[id.item]
|
||||
else:
|
||||
result = nil
|
||||
if result == nil:
|
||||
var decoder = PackedDecoder(
|
||||
lastModule: int32(-1),
|
||||
@@ -931,7 +948,10 @@ proc loadTypeFromId*(config: ConfigRef, cache: IdentCache;
|
||||
|
||||
proc loadSymFromId*(config: ConfigRef, cache: IdentCache;
|
||||
g: var PackedModuleGraph; module: int; id: PackedItemId): PSym =
|
||||
result = g[module].syms[id.item]
|
||||
if id.item < g[module].syms.len:
|
||||
result = g[module].syms[id.item]
|
||||
else:
|
||||
result = nil
|
||||
if result == nil:
|
||||
var decoder = PackedDecoder(
|
||||
lastModule: int32(-1),
|
||||
|
||||
@@ -102,8 +102,8 @@ proc rawImportSymbol(c: PContext, s, origin: PSym; importSet: var IntSet) =
|
||||
else:
|
||||
importPureEnumField(c, e)
|
||||
else:
|
||||
if s.kind == skConverter: addConverter(c, s)
|
||||
if hasPattern(s): addPattern(c, s)
|
||||
if s.kind == skConverter: addConverter(c, LazySym(sym: s))
|
||||
if hasPattern(s): addPattern(c, LazySym(sym: s))
|
||||
if s.owner != origin:
|
||||
c.exportIndirections.incl((origin.id, s.id))
|
||||
|
||||
@@ -171,11 +171,11 @@ template addUnnamedIt(c: PContext, fromMod: PSym; filter: untyped) {.dirty.} =
|
||||
addPattern(c, it)
|
||||
for it in c.graph.ifaces[fromMod.position].pureEnums:
|
||||
if filter:
|
||||
importPureEnumFields(c, it, it.typ)
|
||||
importPureEnumFields(c, it.sym, it.sym.typ)
|
||||
|
||||
proc importAllSymbolsExcept(c: PContext, fromMod: PSym, exceptSet: IntSet) =
|
||||
c.addImport ImportedModule(m: fromMod, mode: importExcept, exceptSet: exceptSet)
|
||||
addUnnamedIt(c, fromMod, it.id notin exceptSet)
|
||||
addUnnamedIt(c, fromMod, it.sym.id notin exceptSet)
|
||||
|
||||
proc importAllSymbols*(c: PContext, fromMod: PSym) =
|
||||
c.addImport ImportedModule(m: fromMod, mode: importAll)
|
||||
|
||||
@@ -371,16 +371,29 @@ when defined(nimfix):
|
||||
else:
|
||||
template fixSpelling(n: PNode; ident: PIdent; op: untyped) = discard
|
||||
|
||||
proc errorUseQualifier*(c: PContext; info: TLineInfo; s: PSym) =
|
||||
proc errorUseQualifier(c: PContext; info: TLineInfo; s: PSym; amb: var bool): PSym =
|
||||
var err = "ambiguous identifier: '" & s.name.s & "'"
|
||||
var i = 0
|
||||
var ignoredModules = 0
|
||||
for candidate in importedItems(c, s.name):
|
||||
if i == 0: err.add " -- use one of the following:\n"
|
||||
else: err.add "\n"
|
||||
err.add " " & candidate.owner.name.s & "." & candidate.name.s
|
||||
err.add ": " & typeToString(candidate.typ)
|
||||
if candidate.kind == skModule:
|
||||
inc ignoredModules
|
||||
else:
|
||||
result = candidate
|
||||
inc i
|
||||
localError(c.config, info, errGenerated, err)
|
||||
if ignoredModules != i-1:
|
||||
localError(c.config, info, errGenerated, err)
|
||||
result = nil
|
||||
else:
|
||||
amb = false
|
||||
|
||||
proc errorUseQualifier*(c: PContext; info: TLineInfo; s: PSym) =
|
||||
var amb: bool
|
||||
discard errorUseQualifier(c, info, s, amb)
|
||||
|
||||
proc errorUseQualifier(c: PContext; info: TLineInfo; candidates: seq[PSym]) =
|
||||
var err = "ambiguous identifier: '" & candidates[0].name.s & "'"
|
||||
@@ -426,7 +439,7 @@ proc lookUp*(c: PContext, n: PNode): PSym =
|
||||
return
|
||||
if amb:
|
||||
#contains(c.ambiguousSymbols, result.id):
|
||||
errorUseQualifier(c, n.info, result)
|
||||
result = errorUseQualifier(c, n.info, result, amb)
|
||||
when false:
|
||||
if result.kind == skStub: loadStub(result)
|
||||
|
||||
@@ -462,7 +475,7 @@ proc qualifiedLookUp*(c: PContext, n: PNode, flags: set[TLookupFlag]): PSym =
|
||||
errorUndeclaredIdentifier(c, n.info, ident.s)
|
||||
result = errorSym(c, n)
|
||||
elif checkAmbiguity in flags and result != nil and amb:
|
||||
errorUseQualifier(c, n.info, result)
|
||||
result = errorUseQualifier(c, n.info, result, amb)
|
||||
c.isAmbiguous = amb
|
||||
of nkSym:
|
||||
result = n.sym
|
||||
|
||||
@@ -105,6 +105,8 @@ proc addSonSkipIntLit*(father, son: PType; id: IdGenerator) =
|
||||
proc getCompilerProc*(g: ModuleGraph; name: string): PSym =
|
||||
let ident = getIdent(g.cache, name)
|
||||
result = strTableGet(g.compilerprocs, ident)
|
||||
if result == nil:
|
||||
result = loadCompilerProc(g, name)
|
||||
|
||||
proc registerCompilerProc*(g: ModuleGraph; s: PSym) =
|
||||
strTableAdd(g.compilerprocs, s)
|
||||
|
||||
@@ -19,12 +19,16 @@ import ic / [packed_ast, to_packed_ast]
|
||||
type
|
||||
SigHash* = distinct MD5Digest
|
||||
|
||||
LazySym* = object
|
||||
id*: FullId
|
||||
sym*: PSym
|
||||
|
||||
Iface* = object ## data we don't want to store directly in the
|
||||
## ast.PSym type for s.kind == skModule
|
||||
module*: PSym ## module this "Iface" belongs to
|
||||
converters*: seq[PSym]
|
||||
patterns*: seq[PSym]
|
||||
pureEnums*: seq[PSym]
|
||||
converters*: seq[LazySym]
|
||||
patterns*: seq[LazySym]
|
||||
pureEnums*: seq[LazySym]
|
||||
interf: TStrTable
|
||||
|
||||
Operators* = object
|
||||
@@ -35,10 +39,6 @@ type
|
||||
module*: int
|
||||
packed*: PackedItemId
|
||||
|
||||
LazySym* = object
|
||||
id*: FullId
|
||||
sym*: PSym
|
||||
|
||||
LazyType* = object
|
||||
id*: FullId
|
||||
typ*: PType
|
||||
@@ -61,6 +61,7 @@ type
|
||||
|
||||
startupPackedConfig*: PackedConfig
|
||||
packageSyms*: TStrTable
|
||||
modulesPerPackage*: Table[ItemId, TStrTable]
|
||||
deps*: IntSet # the dependency graph or potentially its transitive closure.
|
||||
importDeps*: Table[FileIndex, seq[FileIndex]] # explicit import module dependencies
|
||||
suggestMode*: bool # whether we are in nimsuggest mode or not.
|
||||
@@ -82,6 +83,7 @@ type
|
||||
systemModule*: PSym
|
||||
sysTypes*: array[TTypeKind, PType]
|
||||
compilerprocs*: TStrTable
|
||||
lazyCompilerprocs*: Table[string, FullId]
|
||||
exposed*: TStrTable
|
||||
packageTypes*: TStrTable
|
||||
emptyNode*: PNode
|
||||
@@ -153,7 +155,7 @@ template semtab*(m: PSym; g: ModuleGraph): TStrTable =
|
||||
g.ifaces[m.position].interf
|
||||
|
||||
proc isCachedModule(g: ModuleGraph; module: int): bool {.inline.} =
|
||||
module < g.packed.len and g.packed[module].status == loaded
|
||||
result = module < g.packed.len and g.packed[module].status == loaded
|
||||
|
||||
proc isCachedModule(g: ModuleGraph; m: PSym): bool {.inline.} =
|
||||
isCachedModule(g, m.position)
|
||||
@@ -173,7 +175,7 @@ type
|
||||
proc initModuleIter*(mi: var ModuleIter; g: ModuleGraph; m: PSym; name: PIdent): PSym =
|
||||
assert m.kind == skModule
|
||||
mi.modIndex = m.position
|
||||
mi.fromRod = mi.modIndex < g.packed.len and g.packed[mi.modIndex].status == loaded
|
||||
mi.fromRod = isCachedModule(g, mi.modIndex)
|
||||
if mi.fromRod:
|
||||
result = initRodIter(mi.rodIt, g.config, g.cache, g.packed, FileIndex mi.modIndex, name)
|
||||
else:
|
||||
@@ -293,6 +295,14 @@ proc copyTypeProps*(g: ModuleGraph; module: int; dest, src: PType) =
|
||||
if op != nil:
|
||||
setAttachedOp(g, module, dest, k, op)
|
||||
|
||||
proc loadCompilerProc*(g: ModuleGraph; name: string): PSym =
|
||||
if g.config.symbolFiles == disabledSf: return nil
|
||||
let t = g.lazyCompilerprocs.getOrDefault(name)
|
||||
if t.module != 0:
|
||||
assert isCachedModule(g, t.module)
|
||||
result = loadSymFromId(g.config, g.cache, g.packed, t.module, t.packed)
|
||||
if result != nil:
|
||||
strTableAdd(g.compilerprocs, result)
|
||||
|
||||
proc `$`*(u: SigHash): string =
|
||||
toBase64a(cast[cstring](unsafeAddr u), sizeof(u))
|
||||
@@ -410,7 +420,7 @@ proc resetAllModules*(g: ModuleGraph) =
|
||||
|
||||
proc getModule*(g: ModuleGraph; fileIdx: FileIndex): PSym =
|
||||
if fileIdx.int32 >= 0:
|
||||
if fileIdx.int32 < g.packed.len and g.packed[fileIdx.int32].status == loaded:
|
||||
if isCachedModule(g, fileIdx.int32):
|
||||
result = g.packed[fileIdx.int32].module
|
||||
elif fileIdx.int32 < g.ifaces.len:
|
||||
result = g.ifaces[fileIdx.int32].module
|
||||
@@ -474,10 +484,11 @@ proc getBody*(g: ModuleGraph; s: PSym): PNode {.inline.} =
|
||||
s.ast[bodyPos] = result
|
||||
assert result != nil
|
||||
|
||||
proc moduleFromRodFile*(g: ModuleGraph; fileIdx: FileIndex): PSym =
|
||||
proc moduleFromRodFile*(g: ModuleGraph; fileIdx: FileIndex;
|
||||
cachedModules: var seq[FileIndex]): PSym =
|
||||
## Returns 'nil' if the module needs to be recompiled.
|
||||
if g.config.symbolFiles in {readOnlySf, v2Sf, stressTest}:
|
||||
result = moduleFromRodFile(g.packed, g.config, g.cache, fileIdx)
|
||||
result = moduleFromRodFile(g.packed, g.config, g.cache, fileIdx, cachedModules)
|
||||
|
||||
proc configComplete*(g: ModuleGraph) =
|
||||
rememberStartupConfig(g.startupPackedConfig, g.config)
|
||||
|
||||
@@ -39,30 +39,22 @@ proc getPackage(graph: ModuleGraph; fileIdx: FileIndex): PSym =
|
||||
#initStrTable(packSym.tab)
|
||||
graph.packageSyms.strTableAdd(result)
|
||||
else:
|
||||
# we now produce a fake Nimble package instead
|
||||
# to resolve the conflicts:
|
||||
let pck3 = fakePackageName(graph.config, filename)
|
||||
# this makes the new `packSym`'s owner be the original `packSym`
|
||||
result = newSym(skPackage, getIdent(graph.cache, pck3), packageId(), result, info)
|
||||
#initStrTable(packSym.tab)
|
||||
graph.packageSyms.strTableAdd(result)
|
||||
|
||||
when false:
|
||||
let existing = strTableGet(packSym.tab, name)
|
||||
if existing != nil and existing.info.fileIndex != info.fileIndex:
|
||||
when false:
|
||||
# we used to produce an error:
|
||||
localError(graph.config, info,
|
||||
"module names need to be unique per Nimble package; module clashes with " &
|
||||
toFullPath(graph.config, existing.info.fileIndex))
|
||||
else:
|
||||
# but starting with version 0.20 we now produce a fake Nimble package instead
|
||||
# to resolve the conflicts:
|
||||
let pck3 = fakePackageName(graph.config, filename)
|
||||
# this makes the new `packSym`'s owner be the original `packSym`
|
||||
packSym = newSym(skPackage, getIdent(graph.cache, pck3), packageId(), packSym, info)
|
||||
#initStrTable(packSym.tab)
|
||||
graph.packageSyms.strTableAdd(packSym)
|
||||
let modules = graph.modulesPerPackage.getOrDefault(result.itemId)
|
||||
let existing = if modules.data.len > 0: strTableGet(modules, name) else: nil
|
||||
if existing != nil and existing.info.fileIndex != info.fileIndex:
|
||||
when false:
|
||||
# we used to produce an error:
|
||||
localError(graph.config, info,
|
||||
"module names need to be unique per Nimble package; module clashes with " &
|
||||
toFullPath(graph.config, existing.info.fileIndex))
|
||||
else:
|
||||
# but starting with version 0.20 we now produce a fake Nimble package instead
|
||||
# to resolve the conflicts:
|
||||
let pck3 = fakePackageName(graph.config, filename)
|
||||
# this makes the new `result`'s owner be the original `result`
|
||||
result = newSym(skPackage, getIdent(graph.cache, pck3), packageId(), result, info)
|
||||
#initStrTable(packSym.tab)
|
||||
graph.packageSyms.strTableAdd(result)
|
||||
|
||||
proc partialInitModule(result: PSym; graph: ModuleGraph; fileIdx: FileIndex; filename: AbsoluteFile) =
|
||||
let packSym = getPackage(graph, fileIdx)
|
||||
@@ -75,7 +67,10 @@ proc partialInitModule(result: PSym; graph: ModuleGraph; fileIdx: FileIndex; fil
|
||||
# This is now implemented via
|
||||
# c.moduleScope.addSym(module) # a module knows itself
|
||||
# in sem.nim, around line 527
|
||||
#strTableAdd(packSym.tab, result)
|
||||
|
||||
if graph.modulesPerPackage.getOrDefault(packSym.itemId).data.len == 0:
|
||||
graph.modulesPerPackage[packSym.itemId] = newStrTable()
|
||||
graph.modulesPerPackage[packSym.itemId].strTableAdd(result)
|
||||
|
||||
proc newModule(graph: ModuleGraph; fileIdx: FileIndex): PSym =
|
||||
let filename = AbsoluteFile toFullPath(graph.config, fileIdx)
|
||||
@@ -101,7 +96,8 @@ proc compileModule*(graph: ModuleGraph; fileIdx: FileIndex; flags: TSymFlags): P
|
||||
elif graph.config.projectIsCmd: s = llStreamOpen(graph.config.cmdInput)
|
||||
discard processModule(graph, result, idGeneratorFromModule(result), s)
|
||||
if result == nil:
|
||||
result = moduleFromRodFile(graph, fileIdx)
|
||||
var cachedModules: seq[FileIndex]
|
||||
result = moduleFromRodFile(graph, fileIdx, cachedModules)
|
||||
let filename = AbsoluteFile toFullPath(graph.config, fileIdx)
|
||||
if result == nil:
|
||||
result = newModule(graph, fileIdx)
|
||||
@@ -109,9 +105,12 @@ proc compileModule*(graph: ModuleGraph; fileIdx: FileIndex; flags: TSymFlags): P
|
||||
registerModule(graph, result)
|
||||
processModuleAux()
|
||||
else:
|
||||
if sfSystemModule in flags:
|
||||
graph.systemModule = result
|
||||
partialInitModule(result, graph, fileIdx, filename)
|
||||
replayStateChanges(result, graph)
|
||||
replayGenericCacheInformation(graph, fileIdx.int)
|
||||
for m in cachedModules:
|
||||
replayStateChanges(graph.packed[m.int].module, graph)
|
||||
replayGenericCacheInformation(graph, m.int)
|
||||
elif graph.isDirty(result):
|
||||
result.flags.excl sfDirty
|
||||
# reset module fields:
|
||||
|
||||
@@ -4,10 +4,8 @@ hint[XDeclaredButNotUsed]:off
|
||||
|
||||
define:booting
|
||||
define:nimcore
|
||||
#define:nimIncremental
|
||||
#import:"$projectpath/testability"
|
||||
|
||||
#define:staticSqlite
|
||||
#import:"$projectpath/testability"
|
||||
|
||||
@if windows:
|
||||
cincludes: "$lib/wrappers/libffi/common"
|
||||
|
||||
@@ -562,6 +562,7 @@ proc isEmptyTree(n: PNode): bool =
|
||||
proc semStmtAndGenerateGenerics(c: PContext, n: PNode): PNode =
|
||||
if c.topStmts == 0 and not isImportSystemStmt(c.graph, n):
|
||||
if sfSystemModule notin c.module.flags and not isEmptyTree(n):
|
||||
assert c.graph.systemModule != nil
|
||||
c.moduleScope.addSym c.graph.systemModule # import the "System" identifier
|
||||
importAllSymbols(c, c.graph.systemModule)
|
||||
inc c.topStmts
|
||||
|
||||
@@ -330,27 +330,31 @@ proc addPragmaComputation*(c: PContext; n: PNode) =
|
||||
if c.config.symbolFiles != disabledSf:
|
||||
addPragmaComputation(c.encoder, c.packedRepr, n)
|
||||
|
||||
proc inclSym(sq: var seq[PSym], s: PSym) =
|
||||
proc inclSym(sq: var seq[PSym], s: PSym): bool =
|
||||
for i in 0..<sq.len:
|
||||
if sq[i].id == s.id: return
|
||||
if sq[i].id == s.id: return false
|
||||
sq.add s
|
||||
result = true
|
||||
|
||||
proc addConverter*(c: PContext, conv: PSym) =
|
||||
inclSym(c.converters, conv)
|
||||
inclSym(c.graph.ifaces[c.module.position].converters, conv)
|
||||
proc addConverter*(c: PContext, conv: LazySym) =
|
||||
assert conv.sym != nil
|
||||
if inclSym(c.converters, conv.sym):
|
||||
add(c.graph.ifaces[c.module.position].converters, conv)
|
||||
if c.config.symbolFiles != disabledSf:
|
||||
addConverter(c.encoder, c.packedRepr, conv)
|
||||
addConverter(c.encoder, c.packedRepr, conv.sym)
|
||||
|
||||
proc addPureEnum*(c: PContext, e: PSym) =
|
||||
inclSym(c.graph.ifaces[c.module.position].pureEnums, e)
|
||||
proc addPureEnum*(c: PContext, e: LazySym) =
|
||||
assert e.sym != nil
|
||||
add(c.graph.ifaces[c.module.position].pureEnums, e)
|
||||
if c.config.symbolFiles != disabledSf:
|
||||
addPureEnum(c.encoder, c.packedRepr, e)
|
||||
addPureEnum(c.encoder, c.packedRepr, e.sym)
|
||||
|
||||
proc addPattern*(c: PContext, p: PSym) =
|
||||
inclSym(c.patterns, p)
|
||||
inclSym(c.graph.ifaces[c.module.position].patterns, p)
|
||||
proc addPattern*(c: PContext, p: LazySym) =
|
||||
assert p.sym != nil
|
||||
if inclSym(c.patterns, p.sym):
|
||||
add(c.graph.ifaces[c.module.position].patterns, p)
|
||||
if c.config.symbolFiles != disabledSf:
|
||||
addTrmacro(c.encoder, c.packedRepr, p)
|
||||
addTrmacro(c.encoder, c.packedRepr, p.sym)
|
||||
|
||||
proc exportSym*(c: PContext; s: PSym) =
|
||||
strTableAdd(c.module.semtab(c.graph), s)
|
||||
@@ -421,7 +425,7 @@ proc makeTypeSymNode*(c: PContext, typ: PType, info: TLineInfo): PNode =
|
||||
typedesc.addSonSkipIntLit(typ, c.idgen)
|
||||
let sym = newSym(skType, c.cache.idAnon, nextSymId(c.idgen), getCurrOwner(c), info,
|
||||
c.config.options).linkTo(typedesc)
|
||||
return newSymNode(sym, info)
|
||||
result = newSymNode(sym, info)
|
||||
|
||||
proc makeTypeFromExpr*(c: PContext, n: PNode): PType =
|
||||
result = newTypeS(tyFromExpr, c)
|
||||
@@ -581,4 +585,4 @@ proc saveRodFile*(c: PContext) =
|
||||
# debug code, but maybe a good idea for production? Could reduce the compiler's
|
||||
# memory consumption considerably at the cost of more loads from disk.
|
||||
simulateCachedModule(c.graph, c.module, c.packedRepr)
|
||||
c.graph.packed[c.module.position].status = loaded
|
||||
c.graph.packed[c.module.position].status = loaded
|
||||
|
||||
@@ -2532,9 +2532,9 @@ proc semExportExcept(c: PContext, n: PNode): PNode =
|
||||
|
||||
proc semExport(c: PContext, n: PNode): PNode =
|
||||
proc specialSyms(c: PContext; s: PSym) {.inline.} =
|
||||
if s.kind == skConverter: addConverter(c, s)
|
||||
if s.kind == skConverter: addConverter(c, LazySym(sym: s))
|
||||
elif s.kind == skType and s.typ != nil and s.typ.kind == tyEnum and sfPure in s.flags:
|
||||
addPureEnum(c, s)
|
||||
addPureEnum(c, LazySym(sym: s))
|
||||
|
||||
result = newNodeI(nkExportStmt, n.info)
|
||||
for i in 0..<n.len:
|
||||
|
||||
@@ -1042,7 +1042,7 @@ proc typeSectionTypeName(c: PContext; n: PNode): PNode =
|
||||
if result.kind != nkSym: illFormedAst(n, c.config)
|
||||
|
||||
proc typeDefLeftSidePass(c: PContext, typeSection: PNode, i: int) =
|
||||
let typeDef= typeSection[i]
|
||||
let typeDef = typeSection[i]
|
||||
checkSonsLen(typeDef, 3, c.config)
|
||||
var name = typeDef[0]
|
||||
var s: PSym
|
||||
@@ -2123,7 +2123,7 @@ proc semConverterDef(c: PContext, n: PNode): PNode =
|
||||
var t = s.typ
|
||||
if t[0] == nil: localError(c.config, n.info, errXNeedsReturnType % "converter")
|
||||
if t.len != 2: localError(c.config, n.info, "a converter takes exactly one argument")
|
||||
addConverter(c, s)
|
||||
addConverter(c, LazySym(sym: s))
|
||||
|
||||
proc semMacroDef(c: PContext, n: PNode): PNode =
|
||||
checkSonsLen(n, bodyPos + 1, c.config)
|
||||
|
||||
@@ -151,7 +151,7 @@ proc semEnum(c: PContext, n: PNode, prev: PType): PType =
|
||||
wrongRedefinition(c, e.info, e.name.s, conflict.info)
|
||||
inc(counter)
|
||||
if isPure and sfExported in result.sym.flags:
|
||||
addPureEnum(c, result.sym)
|
||||
addPureEnum(c, LazySym(sym: result.sym))
|
||||
if tfNotNil in e.typ.flags and not hasNull:
|
||||
result.flags.incl tfRequiresInit
|
||||
setToStringProc(c.graph, result, genEnumToStrProc(result, n.info, c.graph, c.idgen))
|
||||
|
||||
@@ -499,6 +499,49 @@ proc testNimblePackages(r: var TResults; cat: Category; packageFilter: string, p
|
||||
finally:
|
||||
if errors == 0: removeDir(packagesDir)
|
||||
|
||||
# ---------------- IC tests ---------------------------------------------
|
||||
|
||||
proc icTests(r: var TResults; testsDir: string, cat: Category, options: string) =
|
||||
const
|
||||
tooltests = ["compiler/nim.nim", "tools/nimgrep.nim"]
|
||||
writeOnly = " --incremental:writeonly "
|
||||
readOnly = " --incremental:readonly "
|
||||
incrementalOn = " --incremental:on "
|
||||
|
||||
template test(x: untyped) =
|
||||
testSpecWithNimcache(r, makeRawTest(file, x & options, cat), nimcache)
|
||||
|
||||
template editedTest(x: untyped) =
|
||||
var test = makeTest(file, x & options, cat)
|
||||
test.spec.targets = {getTestSpecTarget()}
|
||||
testSpecWithNimcache(r, test, nimcache)
|
||||
|
||||
const tempExt = "_temp.nim"
|
||||
for it in walkDirRec(testsDir / "ic"):
|
||||
if isTestFile(it) and not it.endsWith(tempExt):
|
||||
let nimcache = nimcacheDir(it, options, getTestSpecTarget())
|
||||
removeDir(nimcache)
|
||||
|
||||
let content = readFile(it)
|
||||
for fragment in content.split("#!EDIT!#"):
|
||||
let file = it.replace(".nim", tempExt)
|
||||
writeFile(file, fragment)
|
||||
let oldPassed = r.passed
|
||||
editedTest incrementalOn
|
||||
if r.passed != oldPassed+1: break
|
||||
|
||||
when false:
|
||||
for file in tooltests:
|
||||
let nimcache = nimcacheDir(file, options, getTestSpecTarget())
|
||||
removeDir(nimcache)
|
||||
|
||||
let oldPassed = r.passed
|
||||
test writeOnly
|
||||
|
||||
if r.passed == oldPassed+1:
|
||||
test readOnly
|
||||
if r.passed == oldPassed+2:
|
||||
test readOnly
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
@@ -665,6 +708,8 @@ proc processCategory(r: var TResults, cat: Category,
|
||||
testNimblePackages(r, cat, options, ppTwo)
|
||||
of "niminaction":
|
||||
testNimInAction(r, cat, options)
|
||||
of "ic":
|
||||
icTests(r, testsDir, cat, options)
|
||||
of "untestable":
|
||||
# We can't test it because it depends on a third party.
|
||||
discard # TODO: Move untestable tests to someplace else, i.e. nimble repo.
|
||||
|
||||
29
tests/ic/thallo.nim
Normal file
29
tests/ic/thallo.nim
Normal file
@@ -0,0 +1,29 @@
|
||||
discard """
|
||||
output: "Hello World"
|
||||
disabled: "true"
|
||||
"""
|
||||
|
||||
const str = "Hello World"
|
||||
echo str
|
||||
|
||||
# Splitters are done with this special comment:
|
||||
|
||||
#!EDIT!#
|
||||
|
||||
discard """
|
||||
output: "Hello World B"
|
||||
"""
|
||||
|
||||
const str = "Hello World"
|
||||
echo str, " B"
|
||||
|
||||
#!EDIT!#
|
||||
|
||||
discard """
|
||||
output: "Hello World C"
|
||||
"""
|
||||
|
||||
const str = "Hello World"
|
||||
var x = 7
|
||||
if 3+4 == x:
|
||||
echo str, " C"
|
||||
Reference in New Issue
Block a user