mirror of
https://github.com/nim-lang/Nim.git
synced 2026-02-11 13:58:52 +00:00
IC: massive cleanup, NIF26 support, docs about its inner workings (#25427)
This commit is contained in:
@@ -159,7 +159,6 @@ type
|
||||
inProc: int
|
||||
#writtenTypes: seq[PType] # types written in this module, to be unloaded later
|
||||
#writtenSyms: seq[PSym] # symbols written in this module, to be unloaded later
|
||||
exports: Table[FileIndex, HashSet[string]] # module -> specific symbol names (empty = all)
|
||||
writtenPackages: HashSet[string]
|
||||
|
||||
const
|
||||
@@ -474,6 +473,40 @@ proc trImport(w: var Writer; n: PNode) =
|
||||
w.deps.addStrLit fp # raw string literal, no wrapper needed
|
||||
w.deps.addParRi
|
||||
|
||||
proc trExport(w: var Writer; n: PNode) =
|
||||
# Collect export information for the index
|
||||
# nkExportStmt children are nkSym nodes
|
||||
# When exporting a module (export dollars), the module symbol is a child
|
||||
# followed by all symbols from that module - we use empty set to mean "export all"
|
||||
# When exporting specific symbols (export foo, bar), we collect their names
|
||||
w.deps.addParLe pool.tags.getOrIncl(toNifTag(n.kind)), trLineInfo(w, n.info)
|
||||
w.deps.addDotToken # flags
|
||||
w.deps.addDotToken # type
|
||||
for child in n:
|
||||
if child.kind == nkSym:
|
||||
let s = child.sym
|
||||
if s.kindImpl == skModule:
|
||||
discard "do not write module syms here"
|
||||
else:
|
||||
w.deps.addSymUse pool.syms.getOrIncl(w.toNifSymName(s)), NoLineInfo
|
||||
w.deps.addParRi
|
||||
|
||||
let replayTag = registerTag("replay")
|
||||
let repConverterTag = registerTag("repconverter")
|
||||
let repDestroyTag = registerTag("repdestroy")
|
||||
let repWasMovedTag = registerTag("repwasmoved")
|
||||
let repCopyTag = registerTag("repcopy")
|
||||
let repSinkTag = registerTag("repsink")
|
||||
let repDupTag = registerTag("repdup")
|
||||
let repTraceTag = registerTag("reptrace")
|
||||
let repDeepCopyTag = registerTag("repdeepcopy")
|
||||
let repEnumToStrTag = registerTag("repenumtostr")
|
||||
let repMethodTag = registerTag("repmethod")
|
||||
#let repClassTag = registerTag("repclass")
|
||||
let includeTag = registerTag("include")
|
||||
let importTag = registerTag("import")
|
||||
let implTag = registerTag("implementation")
|
||||
|
||||
proc writeNode(w: var Writer; dest: var TokenBuf; n: PNode; forAst = false) =
|
||||
if n == nil:
|
||||
dest.addDotToken
|
||||
@@ -581,37 +614,9 @@ proc writeNode(w: var Writer; dest: var TokenBuf; n: PNode; forAst = false) =
|
||||
of nkIncludeStmt:
|
||||
trInclude w, n
|
||||
of nkExportStmt, nkExportExceptStmt:
|
||||
# Collect export information for the index
|
||||
# nkExportStmt children are nkSym nodes
|
||||
# When exporting a module (export dollars), the module symbol is a child
|
||||
# followed by all symbols from that module - we use empty set to mean "export all"
|
||||
# When exporting specific symbols (export foo, bar), we collect their names
|
||||
# Note: nkExportExceptStmt is transformed to nkExportStmt by semExportExcept,
|
||||
# but we handle both just in case
|
||||
var exportAllModules = initHashSet[FileIndex]()
|
||||
for child in n:
|
||||
if child.kind == nkSym:
|
||||
let s = child.sym
|
||||
if s.kindImpl == skModule:
|
||||
# Export all from this module - use empty set
|
||||
let modIdx = s.positionImpl.FileIndex
|
||||
exportAllModules.incl modIdx
|
||||
if modIdx notin w.exports:
|
||||
w.exports[modIdx] = initHashSet[string]() # empty means "export all"
|
||||
else:
|
||||
# Export specific symbol, but only if we're not already exporting all from this module
|
||||
let modIdx = s.itemId.module.FileIndex
|
||||
if modIdx notin exportAllModules:
|
||||
if modIdx notin w.exports:
|
||||
w.exports[modIdx] = initHashSet[string]()
|
||||
w.exports[modIdx].incl s.name.s
|
||||
# Write the export statement as a regular node
|
||||
w.withNode dest, n:
|
||||
for i in 0 ..< n.len:
|
||||
if n[i].kind == nkSym and n[i].sym.kindImpl == skModule:
|
||||
discard "do not write module syms here"
|
||||
else:
|
||||
writeNode(w, dest, n[i], forAst)
|
||||
trExport w, n
|
||||
else:
|
||||
w.withNode dest, n:
|
||||
for i in 0 ..< n.len:
|
||||
@@ -663,40 +668,6 @@ proc createStmtList(buf: var TokenBuf; info: PackedLineInfo) {.inline.} =
|
||||
buf.addDotToken # flags
|
||||
buf.addDotToken # type
|
||||
|
||||
proc buildExportBuf(w: var Writer): TokenBuf =
|
||||
## Build the export section for the NIF index from collected exports
|
||||
result = createTokenBuf(32)
|
||||
for modIdx, names in w.exports:
|
||||
let path = toFullPath(w.infos.config, modIdx)
|
||||
if names.len == 0:
|
||||
# Export all from this module
|
||||
result.addParLe(TagId(ExportIdx), NoLineInfo)
|
||||
result.add strToken(pool.strings.getOrIncl(path), NoLineInfo)
|
||||
result.addParRi()
|
||||
else:
|
||||
# Export specific symbols
|
||||
result.addParLe(TagId(FromexportIdx), NoLineInfo)
|
||||
result.add strToken(pool.strings.getOrIncl(path), NoLineInfo)
|
||||
for name in names:
|
||||
result.add identToken(pool.strings.getOrIncl(name), NoLineInfo)
|
||||
result.addParRi()
|
||||
|
||||
let replayTag = registerTag("replay")
|
||||
let repConverterTag = registerTag("repconverter")
|
||||
let repDestroyTag = registerTag("repdestroy")
|
||||
let repWasMovedTag = registerTag("repwasmoved")
|
||||
let repCopyTag = registerTag("repcopy")
|
||||
let repSinkTag = registerTag("repsink")
|
||||
let repDupTag = registerTag("repdup")
|
||||
let repTraceTag = registerTag("reptrace")
|
||||
let repDeepCopyTag = registerTag("repdeepcopy")
|
||||
let repEnumToStrTag = registerTag("repenumtostr")
|
||||
let repMethodTag = registerTag("repmethod")
|
||||
#let repClassTag = registerTag("repclass")
|
||||
let includeTag = registerTag("include")
|
||||
let importTag = registerTag("import")
|
||||
let implTag = registerTag("implementation")
|
||||
|
||||
proc writeOp(w: var Writer; content: var TokenBuf; op: LogEntry) =
|
||||
case op.kind
|
||||
of HookEntry:
|
||||
@@ -784,10 +755,6 @@ proc writeNifModule*(config: ConfigRef; thisModule: int32; n: PNode;
|
||||
|
||||
writeFile(dest, d)
|
||||
|
||||
let exportBuf = buildExportBuf(w)
|
||||
createIndex(d, dest[0].info, false,
|
||||
IndexSections(exportBuf: exportBuf))
|
||||
|
||||
# --------------------------- Loader (lazy!) -----------------------------------------------
|
||||
|
||||
proc nodeKind(n: Cursor): TNodeKind {.inline.} =
|
||||
@@ -845,7 +812,7 @@ type
|
||||
NifModule = ref object
|
||||
stream: nifstreams.Stream
|
||||
symCounter: int32
|
||||
index: NifIndex
|
||||
index: Table[string, NifIndexEntry] # Simple embedded index for offsets
|
||||
suffix: string
|
||||
|
||||
DecodeContext* = object
|
||||
@@ -871,25 +838,61 @@ type
|
||||
LoadFlag* = enum
|
||||
LoadFullAst, AlwaysLoadInterface
|
||||
|
||||
proc readEmbeddedIndex(s: var Stream): Table[string, NifIndexEntry] =
|
||||
## Reads the simple embedded index (index (kv sym offset)...) from indexStartsAt position.
|
||||
result = initTable[string, NifIndexEntry]()
|
||||
let indexPos = indexStartsAt(s.r)
|
||||
if indexPos <= 0:
|
||||
return
|
||||
let contentPos = offset(s.r) # Save position
|
||||
s.r.jumpTo(indexPos)
|
||||
|
||||
var previousOffset = 0
|
||||
var t = next(s)
|
||||
let exportedTagId = pool.tags.getOrIncl("x")
|
||||
if t.kind == ParLe and pool.tags[t.tagId] == ".index":
|
||||
t = next(s)
|
||||
while t.kind != EofToken and t.kind != ParRi:
|
||||
if t.kind == ParLe:
|
||||
let vis = if t.tagId == exportedTagId: Exported else: Hidden
|
||||
let info = t.info
|
||||
t = next(s) # skip (kv
|
||||
var key = ""
|
||||
if t.kind == Symbol:
|
||||
key = pool.syms[t.symId]
|
||||
elif t.kind == Ident:
|
||||
key = pool.strings[t.litId]
|
||||
t = next(s) # skip symbol
|
||||
if t.kind == IntLit:
|
||||
let offset = int(pool.integers[t.intId]) + previousOffset
|
||||
result[key] = NifIndexEntry(offset: offset, info: info, vis: vis)
|
||||
previousOffset = offset
|
||||
t = next(s) # skip offset
|
||||
if t.kind == ParRi:
|
||||
t = next(s) # skip )
|
||||
else:
|
||||
t = next(s)
|
||||
|
||||
s.r.jumpTo(contentPos) # Restore position
|
||||
|
||||
proc moduleId(c: var DecodeContext; suffix: string; flags: set[LoadFlag] = {}): FileIndex =
|
||||
var isKnownFile = false
|
||||
result = c.infos.config.registerNifSuffix(suffix, isKnownFile)
|
||||
if not isKnownFile or AlwaysLoadInterface in flags:
|
||||
let modFile = (getNimcacheDir(c.infos.config) / RelativeFile(suffix & ".nif")).string
|
||||
let idxFile = (getNimcacheDir(c.infos.config) / RelativeFile(suffix & ".s.idx.nif")).string
|
||||
if not fileExists(modFile):
|
||||
raiseAssert "NIF file not found for module suffix '" & suffix & "': " & modFile &
|
||||
". This can happen when loading a module from NIF that references another module " &
|
||||
"whose NIF file hasn't been written yet."
|
||||
c.mods[result] = NifModule(stream: nifstreams.open(modFile), index: readIndex(idxFile), suffix: suffix)
|
||||
var stream = nifstreams.open(modFile)
|
||||
let index = readEmbeddedIndex(stream)
|
||||
c.mods[result] = NifModule(stream: stream, index: index, suffix: suffix)
|
||||
|
||||
proc getOffset(c: var DecodeContext; module: FileIndex; nifName: string): NifIndexEntry =
|
||||
let ii = addr c.mods[module].index
|
||||
result = ii.public.getOrDefault(nifName)
|
||||
result = ii[].getOrDefault(nifName)
|
||||
if result.offset == 0:
|
||||
result = ii.private.getOrDefault(nifName)
|
||||
if result.offset == 0:
|
||||
raiseAssert "symbol has no offset: " & nifName
|
||||
raiseAssert "symbol has no offset: " & nifName
|
||||
|
||||
proc loadNode(c: var DecodeContext; n: var Cursor; thisModule: string;
|
||||
localSyms: var Table[string, PSym]): PNode
|
||||
@@ -1395,83 +1398,30 @@ proc extractBasename(nifName: string): string =
|
||||
proc populateInterfaceTablesFromIndex(c: var DecodeContext; module: FileIndex;
|
||||
interf, interfHidden: var TStrTable; thisModule: string) =
|
||||
## Populates interface tables from the NIF index structure.
|
||||
## Uses the index's public/private tables instead of traversing AST.
|
||||
## Uses the simple embedded index for offsets, exports passed from processTopLevel.
|
||||
|
||||
# Move the public table and exports list out to avoid iterator invalidation
|
||||
# Move the index table out to avoid iterator invalidation
|
||||
# (moduleId can add to c.mods which would invalidate Table iterators)
|
||||
# We move them back after iteration.
|
||||
var publicTab = move c.mods[module].index.public
|
||||
var exportsList = move c.mods[module].index.exports
|
||||
var indexTab = move c.mods[module].index
|
||||
|
||||
# Add all public symbols to interf (exported interface) and interfHidden
|
||||
for nifName, entry in publicTab:
|
||||
# Add all symbols to interf (exported interface) and interfHidden
|
||||
for nifName, entry in indexTab:
|
||||
if not nifName.startsWith("`t"):
|
||||
# do not load types, they are not part of an interface but an implementation detail!
|
||||
#echo "LOADING SYM ", nifName, " ", entry.offset
|
||||
let sym = loadSymFromIndexEntry(c, module, nifName, entry, thisModule)
|
||||
if sym != nil:
|
||||
strTableAdd(interf, sym)
|
||||
strTableAdd(interfHidden, sym)
|
||||
|
||||
# Move public table back
|
||||
c.mods[module].index.public = move publicTab
|
||||
|
||||
# Process exports (re-exports from other modules)
|
||||
for exp in exportsList:
|
||||
let (path, kind, names) = exp
|
||||
# Convert path to module suffix
|
||||
let expSuffix = moduleSuffix(path, cast[seq[string]](c.infos.config.searchPaths))
|
||||
# Load the exported module's index
|
||||
let expModule = moduleId(c, expSuffix)
|
||||
|
||||
# Move the exported module's public table out to avoid iterator invalidation
|
||||
var expPublicTab = move c.mods[expModule].index.public
|
||||
|
||||
# Build a set of names for filtering
|
||||
var nameSet = initHashSet[string]()
|
||||
for nameId in names:
|
||||
nameSet.incl pool.strings[nameId]
|
||||
|
||||
# Add symbols based on export kind
|
||||
for nifName, entry in expPublicTab:
|
||||
if nifName.startsWith("`t"):
|
||||
continue # skip types
|
||||
|
||||
let basename = extractBasename(nifName)
|
||||
let shouldInclude =
|
||||
case kind
|
||||
of ExportIdx: true # export all
|
||||
of FromexportIdx: basename in nameSet # only specific names
|
||||
of ExportexceptIdx: basename notin nameSet # all except specific names
|
||||
else: false
|
||||
|
||||
if shouldInclude:
|
||||
let sym = loadSymFromIndexEntry(c, expModule, nifName, entry, expSuffix)
|
||||
if sym != nil:
|
||||
if entry.vis == Exported:
|
||||
strTableAdd(interf, sym)
|
||||
strTableAdd(interfHidden, sym)
|
||||
|
||||
# Move exported module's public table back
|
||||
c.mods[expModule].index.public = move expPublicTab
|
||||
|
||||
# Move exports list back
|
||||
c.mods[module].index.exports = move exportsList
|
||||
|
||||
when false:
|
||||
# Add private symbols to interfHidden only
|
||||
for nifName, entry in idx.private:
|
||||
let sym = loadSymFromIndexEntry(c, module, nifName, entry, thisModule)
|
||||
if sym != nil:
|
||||
strTableAdd(interfHidden, sym)
|
||||
|
||||
# Move index table back
|
||||
c.mods[module].index = move indexTab
|
||||
|
||||
proc toNifFilename*(conf: ConfigRef; f: FileIndex): string =
|
||||
let suffix = moduleSuffix(conf, f)
|
||||
result = toGeneratedFile(conf, AbsoluteFile(suffix), ".nif").string
|
||||
|
||||
proc toNifIndexFilename*(conf: ConfigRef; f: FileIndex): string =
|
||||
let suffix = moduleSuffix(conf, f)
|
||||
result = toGeneratedFile(conf, AbsoluteFile(suffix), ".s.idx.nif").string
|
||||
|
||||
proc resolveSym(c: var DecodeContext; symAsStr: string; alsoConsiderPrivate: bool): PSym =
|
||||
result = c.syms.getOrDefault(symAsStr)[0]
|
||||
if result != nil:
|
||||
@@ -1482,14 +1432,11 @@ proc resolveSym(c: var DecodeContext; symAsStr: string; alsoConsiderPrivate: boo
|
||||
return nil # Local symbols shouldn't be hooks
|
||||
let module = moduleId(c, sn.module)
|
||||
# Look up the symbol in the module's index
|
||||
var offs = c.mods[module].index.public.getOrDefault(symAsStr)
|
||||
var offs = c.mods[module].index.getOrDefault(symAsStr)
|
||||
if offs.offset == 0:
|
||||
if alsoConsiderPrivate:
|
||||
offs = c.mods[module].index.private.getOrDefault(symAsStr)
|
||||
if offs.offset == 0:
|
||||
return nil
|
||||
else:
|
||||
return nil
|
||||
return nil
|
||||
if not alsoConsiderPrivate and offs.vis == Hidden:
|
||||
return nil
|
||||
# Create a stub symbol
|
||||
let val = addr c.mods[module].symCounter
|
||||
inc val[]
|
||||
@@ -1581,12 +1528,14 @@ proc loadImport(c: var DecodeContext; s: var Stream; deps: var seq[ModuleSuffix]
|
||||
else:
|
||||
raiseAssert "expected ParRi but got " & $tok.kind
|
||||
|
||||
proc processTopLevel(c: var DecodeContext; s: var Stream; flags: set[LoadFlag] = {}; suffix: string; module: int): PrecompiledModule =
|
||||
proc processTopLevel(c: var DecodeContext; s: var Stream; flags: set[LoadFlag];
|
||||
interf: var TStrTable; suffix: string; module: int): PrecompiledModule =
|
||||
result = PrecompiledModule(topLevel: newNode(nkStmtList))
|
||||
var localSyms = initTable[string, PSym]()
|
||||
|
||||
var t = next(s) # skip dot
|
||||
var cont = true
|
||||
let exportTag = pool.tags.getOrIncl"export"
|
||||
while cont and t.kind != EofToken:
|
||||
if t.kind == ParLe:
|
||||
if t.tagId == replayTag:
|
||||
@@ -1627,6 +1576,24 @@ proc processTopLevel(c: var DecodeContext; s: var Stream; flags: set[LoadFlag] =
|
||||
t = loadLogOp(c, result.logOps, s, MethodEntry, attachedTrace, module)
|
||||
#elif t.tagId == repClassTag:
|
||||
# t = loadLogOp(c, logOps, s, ClassEntry, attachedTrace, module)
|
||||
elif t.tagId == exportTag:
|
||||
t = next(s) # skip (export
|
||||
if t.kind == DotToken:
|
||||
t = next(s) # skip dot
|
||||
if t.kind == DotToken:
|
||||
t = next(s) # skip dot
|
||||
while true:
|
||||
if t.kind == Symbol:
|
||||
let symAsStr = pool.syms[t.symId]
|
||||
let sym = resolveSym(c, symAsStr, false)
|
||||
if sym != nil:
|
||||
strTableAdd(interf, sym)
|
||||
t = next(s)
|
||||
elif t.kind == ParRi:
|
||||
break
|
||||
else:
|
||||
raiseAssert "expected Symbol or ParRi but got " & $t.kind
|
||||
t = next(s)
|
||||
elif t.tagId == includeTag:
|
||||
t = skipTree(s)
|
||||
elif t.tagId == importTag:
|
||||
@@ -1649,25 +1616,25 @@ proc processTopLevel(c: var DecodeContext; s: var Stream; flags: set[LoadFlag] =
|
||||
|
||||
proc loadNifModule*(c: var DecodeContext; suffix: ModuleSuffix; interf, interfHidden: var TStrTable;
|
||||
flags: set[LoadFlag] = {}): PrecompiledModule =
|
||||
# Ensure module index is loaded - moduleId returns the FileIndex for this suffix
|
||||
# Ensure module index is loaded - moduleId returns the FileIndex for this suffix
|
||||
let module = moduleId(c, string(suffix), flags)
|
||||
|
||||
# Populate interface tables from the NIF index structure
|
||||
# Symbols are created as stubs (Partial state) and will be loaded lazily via loadSym
|
||||
populateInterfaceTablesFromIndex(c, module, interf, interfHidden, string(suffix))
|
||||
|
||||
# Load the module AST (or just replay actions if loadFullAst is false)
|
||||
# processTopLevel also collects export instructions
|
||||
let s = addr c.mods[module].stream
|
||||
s.r.jumpTo 0 # Start from beginning
|
||||
discard processDirectives(s.r)
|
||||
var t = next(s[])
|
||||
if t.kind == ParLe and pool.tags[t.tagId] == toNifTag(nkStmtList):
|
||||
t = next(s[]) # skip (stmts
|
||||
t = next(s[]) # skip flags
|
||||
result = processTopLevel(c, s[], flags, string(suffix), module.int)
|
||||
result = processTopLevel(c, s[], flags, interf, string(suffix), module.int)
|
||||
else:
|
||||
result = PrecompiledModule(topLevel: newNode(nkStmtList))
|
||||
|
||||
# Populate interface tables from the NIF index structure
|
||||
# Symbols are created as stubs (Partial state) and will be loaded lazily via loadSym
|
||||
# Use exports collected by processTopLevel
|
||||
populateInterfaceTablesFromIndex(c, module, interf, interfHidden, string(suffix))
|
||||
|
||||
proc loadNifModule*(c: var DecodeContext; f: FileIndex; interf, interfHidden: var TStrTable;
|
||||
flags: set[LoadFlag] = {}): PrecompiledModule =
|
||||
let suffix = ModuleSuffix(moduleSuffix(c.infos.config, f))
|
||||
|
||||
@@ -1996,7 +1996,7 @@ proc genTypeInfoV1(m: BModule; t: PType; info: TLineInfo): Rope =
|
||||
owner = m.module.position.int32
|
||||
|
||||
m.g.typeInfoMarker[sig] = (str: result, owner: owner)
|
||||
rememberEmittedTypeInfo(m.g.graph, FileIndex(owner), $result)
|
||||
#rememberEmittedTypeInfo(m.g.graph, FileIndex(owner), $result)
|
||||
|
||||
case t.kind
|
||||
of tyEmpty, tyVoid: result = cIntValue(0)
|
||||
|
||||
@@ -30,7 +30,6 @@ when not defined(leanCompiler):
|
||||
|
||||
import std/strutils except `%`, addf # collides with ropes.`%`
|
||||
|
||||
from ic / ic import ModuleBackendFlag
|
||||
import std/[dynlib, math, tables, sets, os, intsets, hashes]
|
||||
|
||||
const
|
||||
@@ -1926,36 +1925,6 @@ proc genMainProc(m: BModule) =
|
||||
if m.config.cppCustomNamespace.len > 0:
|
||||
openNamespaceNim(m.config.cppCustomNamespace, m.s[cfsProcs])
|
||||
|
||||
proc registerInitProcs*(g: BModuleList; m: PSym; flags: set[ModuleBackendFlag]) =
|
||||
## Called from the IC backend.
|
||||
if HasDatInitProc in flags:
|
||||
let datInit = getSomeNameForModule(g.config, g.config.toFullPath(m.info.fileIndex).AbsoluteFile) & "DatInit000"
|
||||
g.mainModProcs.addDeclWithVisibility(Private):
|
||||
g.mainModProcs.addProcHeader(ccNimCall, datInit, CVoid, cProcParams())
|
||||
g.mainModProcs.finishProcHeaderAsProto()
|
||||
g.mainDatInit.addCallStmt(datInit)
|
||||
if HasModuleInitProc in flags:
|
||||
let init = getSomeNameForModule(g.config, g.config.toFullPath(m.info.fileIndex).AbsoluteFile) & "Init000"
|
||||
g.mainModProcs.addDeclWithVisibility(Private):
|
||||
g.mainModProcs.addProcHeader(ccNimCall, init, CVoid, cProcParams())
|
||||
g.mainModProcs.finishProcHeaderAsProto()
|
||||
if sfMainModule in m.flags:
|
||||
g.mainModInit.addCallStmt(init)
|
||||
elif sfSystemModule in m.flags:
|
||||
g.mainDatInit.addCallStmt(init) # systemInit must called right after systemDatInit if any
|
||||
else:
|
||||
g.otherModsInit.addCallStmt(init)
|
||||
|
||||
proc whichInitProcs*(m: BModule): set[ModuleBackendFlag] =
|
||||
# called from IC.
|
||||
result = {}
|
||||
if m.hcrOn or m.preInitProc.s(cpsInit).buf.len > 0 or m.preInitProc.s(cpsStmts).buf.len > 0:
|
||||
result.incl HasModuleInitProc
|
||||
for i in cfsTypeInit1..cfsDynLibInit:
|
||||
if m.s[i].buf.len != 0:
|
||||
result.incl HasDatInitProc
|
||||
break
|
||||
|
||||
proc registerModuleToMain(g: BModuleList; m: BModule) =
|
||||
let
|
||||
init = m.getInitName
|
||||
|
||||
@@ -494,7 +494,6 @@ proc parseCommand*(command: string): Command =
|
||||
of "gendepend": cmdGendepend
|
||||
of "dump": cmdDump
|
||||
of "parse": cmdParse
|
||||
of "rod": cmdRod
|
||||
of "secret": cmdInteractive
|
||||
of "nop", "help": cmdNop
|
||||
of "jsonscript": cmdJsonscript
|
||||
|
||||
@@ -1,178 +0,0 @@
|
||||
## A BiTable is a table that can be seen as an optimized pair
|
||||
## of `(Table[LitId, Val], Table[Val, LitId])`.
|
||||
|
||||
import std/hashes
|
||||
import rodfiles
|
||||
|
||||
when defined(nimPreviewSlimSystem):
|
||||
import std/assertions
|
||||
|
||||
type
|
||||
LitId* = distinct uint32
|
||||
|
||||
BiTable*[T] = object
|
||||
vals: seq[T] # indexed by LitId
|
||||
keys: seq[LitId] # indexed by hash(val)
|
||||
|
||||
proc initBiTable*[T](): BiTable[T] = BiTable[T](vals: @[], keys: @[])
|
||||
|
||||
proc nextTry(h, maxHash: Hash): Hash {.inline.} =
|
||||
result = (h + 1) and maxHash
|
||||
|
||||
template maxHash(t): untyped = high(t.keys)
|
||||
template isFilled(x: LitId): bool = x.uint32 > 0'u32
|
||||
|
||||
proc `$`*(x: LitId): string {.borrow.}
|
||||
proc `<`*(x, y: LitId): bool {.borrow.}
|
||||
proc `<=`*(x, y: LitId): bool {.borrow.}
|
||||
proc `==`*(x, y: LitId): bool {.borrow.}
|
||||
proc hash*(x: LitId): Hash {.borrow.}
|
||||
|
||||
|
||||
proc len*[T](t: BiTable[T]): int = t.vals.len
|
||||
|
||||
proc mustRehash(length, counter: int): bool {.inline.} =
|
||||
assert(length > counter)
|
||||
result = (length * 2 < counter * 3) or (length - counter < 4)
|
||||
|
||||
const
|
||||
idStart = 1
|
||||
|
||||
template idToIdx(x: LitId): int = x.int - idStart
|
||||
|
||||
proc hasLitId*[T](t: BiTable[T]; x: LitId): bool =
|
||||
let idx = idToIdx(x)
|
||||
result = idx >= 0 and idx < t.vals.len
|
||||
|
||||
proc enlarge[T](t: var BiTable[T]) =
|
||||
var n: seq[LitId]
|
||||
newSeq(n, len(t.keys) * 2)
|
||||
swap(t.keys, n)
|
||||
for i in 0..high(n):
|
||||
let eh = n[i]
|
||||
if isFilled(eh):
|
||||
var j = hash(t.vals[idToIdx eh]) and maxHash(t)
|
||||
while isFilled(t.keys[j]):
|
||||
j = nextTry(j, maxHash(t))
|
||||
t.keys[j] = move n[i]
|
||||
|
||||
proc getKeyId*[T](t: BiTable[T]; v: T): LitId =
|
||||
let origH = hash(v)
|
||||
var h = origH and maxHash(t)
|
||||
if t.keys.len != 0:
|
||||
while true:
|
||||
let litId = t.keys[h]
|
||||
if not isFilled(litId): break
|
||||
if t.vals[idToIdx t.keys[h]] == v: return litId
|
||||
h = nextTry(h, maxHash(t))
|
||||
return LitId(0)
|
||||
|
||||
proc getOrIncl*[T](t: var BiTable[T]; v: T): LitId =
|
||||
let origH = hash(v)
|
||||
var h = origH and maxHash(t)
|
||||
if t.keys.len != 0:
|
||||
while true:
|
||||
let litId = t.keys[h]
|
||||
if not isFilled(litId): break
|
||||
if t.vals[idToIdx t.keys[h]] == v: return litId
|
||||
h = nextTry(h, maxHash(t))
|
||||
# not found, we need to insert it:
|
||||
if mustRehash(t.keys.len, t.vals.len):
|
||||
enlarge(t)
|
||||
# recompute where to insert:
|
||||
h = origH and maxHash(t)
|
||||
while true:
|
||||
let litId = t.keys[h]
|
||||
if not isFilled(litId): break
|
||||
h = nextTry(h, maxHash(t))
|
||||
else:
|
||||
setLen(t.keys, 16)
|
||||
h = origH and maxHash(t)
|
||||
|
||||
result = LitId(t.vals.len + idStart)
|
||||
t.keys[h] = result
|
||||
t.vals.add v
|
||||
|
||||
|
||||
proc `[]`*[T](t: var BiTable[T]; litId: LitId): var T {.inline.} =
|
||||
let idx = idToIdx litId
|
||||
assert idx < t.vals.len
|
||||
result = t.vals[idx]
|
||||
|
||||
proc `[]`*[T](t: BiTable[T]; litId: LitId): lent T {.inline.} =
|
||||
let idx = idToIdx litId
|
||||
assert idx < t.vals.len
|
||||
result = t.vals[idx]
|
||||
|
||||
proc hash*[T](t: BiTable[T]): Hash =
|
||||
## as the keys are hashes of the values, we simply use them instead
|
||||
var h: Hash = 0
|
||||
for i, n in pairs t.keys:
|
||||
h = h !& hash((i, n))
|
||||
result = !$h
|
||||
|
||||
proc store*[T](f: var RodFile; t: BiTable[T]) =
|
||||
storeSeq(f, t.vals)
|
||||
storeSeq(f, t.keys)
|
||||
|
||||
proc load*[T](f: var RodFile; t: var BiTable[T]) =
|
||||
loadSeq(f, t.vals)
|
||||
loadSeq(f, t.keys)
|
||||
|
||||
proc sizeOnDisc*(t: BiTable[string]): int =
|
||||
result = 4
|
||||
for x in t.vals:
|
||||
result += x.len + 4
|
||||
result += t.keys.len * sizeof(LitId)
|
||||
|
||||
when isMainModule:
|
||||
|
||||
var t: BiTable[string]
|
||||
|
||||
echo getOrIncl(t, "hello")
|
||||
|
||||
echo getOrIncl(t, "hello")
|
||||
echo getOrIncl(t, "hello3")
|
||||
echo getOrIncl(t, "hello4")
|
||||
echo getOrIncl(t, "helloasfasdfdsa")
|
||||
echo getOrIncl(t, "hello")
|
||||
echo getKeyId(t, "hello")
|
||||
echo getKeyId(t, "none")
|
||||
|
||||
for i in 0 ..< 100_000:
|
||||
discard t.getOrIncl($i & "___" & $i)
|
||||
|
||||
for i in 0 ..< 100_000:
|
||||
assert t.getOrIncl($i & "___" & $i).idToIdx == i + 4
|
||||
echo "begin"
|
||||
echo t.vals.len
|
||||
|
||||
echo t.vals[0]
|
||||
echo t.vals[1004]
|
||||
|
||||
echo "middle"
|
||||
|
||||
var tf: BiTable[float]
|
||||
|
||||
discard tf.getOrIncl(0.4)
|
||||
discard tf.getOrIncl(16.4)
|
||||
discard tf.getOrIncl(32.4)
|
||||
echo getKeyId(tf, 32.4)
|
||||
|
||||
var f2 = open("testblah.bin", fmWrite)
|
||||
echo store(f2, tf)
|
||||
f2.close
|
||||
|
||||
var f1 = open("testblah.bin", fmRead)
|
||||
|
||||
var t2: BiTable[float]
|
||||
|
||||
echo f1.load(t2)
|
||||
echo t2.vals.len
|
||||
|
||||
echo getKeyId(t2, 32.4)
|
||||
|
||||
echo "end"
|
||||
|
||||
|
||||
f1.close
|
||||
@@ -1,179 +0,0 @@
|
||||
#
|
||||
#
|
||||
# The Nim Compiler
|
||||
# (c) Copyright 2021 Andreas Rumpf
|
||||
#
|
||||
# See the file "copying.txt", included in this
|
||||
# distribution, for details about the copyright.
|
||||
#
|
||||
|
||||
## New entry point into our C/C++ code generator. Ideally
|
||||
## somebody would rewrite the old backend (which is 8000 lines of crufty Nim code)
|
||||
## to work on packed trees directly and produce the C code as an AST which can
|
||||
## then be rendered to text in a very simple manner. Unfortunately nobody wrote
|
||||
## this code. So instead we wrap the existing cgen.nim and its friends so that
|
||||
## we call directly into the existing code generation logic but avoiding the
|
||||
## naive, outdated `passes` design. Thus you will see some
|
||||
## `useAliveDataFromDce in flags` checks in the old code -- the old code is
|
||||
## also doing cross-module dependency tracking and DCE that we don't need
|
||||
## anymore. DCE is now done as prepass over the entire packed module graph.
|
||||
|
||||
import std/[packedsets, algorithm, tables]
|
||||
|
||||
when defined(nimPreviewSlimSystem):
|
||||
import std/assertions
|
||||
|
||||
import ".."/[ast, options, lineinfos, modulegraphs, cgendata, cgen,
|
||||
pathutils, extccomp, msgs, modulepaths]
|
||||
|
||||
import packed_ast, ic, dce, rodfiles
|
||||
|
||||
proc unpackTree(g: ModuleGraph; thisModule: int;
|
||||
tree: PackedTree; n: NodePos): PNode =
|
||||
var decoder = initPackedDecoder(g.config, g.cache)
|
||||
result = loadNodes(decoder, g.packed, thisModule, tree, n)
|
||||
|
||||
proc setupBackendModule(g: ModuleGraph; m: var LoadedModule) =
|
||||
if g.backend == nil:
|
||||
g.backend = cgendata.newModuleList(g)
|
||||
assert g.backend != nil
|
||||
var bmod = cgen.newModule(BModuleList(g.backend), m.module, g.config, idgenFromLoadedModule(m))
|
||||
|
||||
proc generateCodeForModule(g: ModuleGraph; m: var LoadedModule; alive: var AliveSyms) =
|
||||
var bmod = BModuleList(g.backend).mods[m.module.position]
|
||||
assert bmod != nil
|
||||
bmod.flags.incl useAliveDataFromDce
|
||||
bmod.alive = move alive[m.module.position]
|
||||
|
||||
for p in allNodes(m.fromDisk.topLevel):
|
||||
let n = unpackTree(g, m.module.position, m.fromDisk.topLevel, p)
|
||||
cgen.genTopLevelStmt(bmod, n)
|
||||
|
||||
finalCodegenActions(g, bmod, newNodeI(nkStmtList, m.module.info))
|
||||
for disp in getDispatchers(g):
|
||||
genProcLvl3(bmod, disp)
|
||||
m.fromDisk.backendFlags = cgen.whichInitProcs(bmod)
|
||||
|
||||
proc replayTypeInfo(g: ModuleGraph; m: var LoadedModule; origin: FileIndex) =
|
||||
for x in mitems(m.fromDisk.emittedTypeInfo):
|
||||
#echo "found type ", x, " for file ", int(origin)
|
||||
g.emittedTypeInfo[x] = origin
|
||||
|
||||
proc addFileToLink(config: ConfigRef; m: PSym) =
|
||||
let filename = AbsoluteFile toFullPath(config, m.position.FileIndex)
|
||||
let ext =
|
||||
if config.backend == backendCpp: ".nim.cpp"
|
||||
elif config.backend == backendObjc: ".nim.m"
|
||||
else: ".nim.c"
|
||||
let cfile = changeFileExt(completeCfilePath(config,
|
||||
mangleModuleName(config, filename).AbsoluteFile), ext)
|
||||
let objFile = completeCfilePath(config, toObjFile(config, cfile))
|
||||
if fileExists(objFile):
|
||||
var cf = Cfile(nimname: m.name.s, cname: cfile,
|
||||
obj: objFile,
|
||||
flags: {CfileFlag.Cached})
|
||||
addFileToCompile(config, cf)
|
||||
|
||||
when defined(debugDce):
|
||||
import os, std/packedsets
|
||||
|
||||
proc storeAliveSymsImpl(asymFile: AbsoluteFile; s: seq[int32]) =
|
||||
var f = rodfiles.create(asymFile.string)
|
||||
f.storeHeader()
|
||||
f.storeSection aliveSymsSection
|
||||
f.storeSeq(s)
|
||||
close f
|
||||
|
||||
template prepare {.dirty.} =
|
||||
let asymFile = toRodFile(config, AbsoluteFile toFullPath(config, position.FileIndex), ".alivesyms")
|
||||
var s = newSeqOfCap[int32](alive[position].len)
|
||||
for a in items(alive[position]): s.add int32(a)
|
||||
sort(s)
|
||||
|
||||
proc storeAliveSyms(config: ConfigRef; position: int; alive: AliveSyms) =
|
||||
prepare()
|
||||
storeAliveSymsImpl(asymFile, s)
|
||||
|
||||
proc aliveSymsChanged(config: ConfigRef; position: int; alive: AliveSyms): bool =
|
||||
prepare()
|
||||
var f2 = rodfiles.open(asymFile.string)
|
||||
f2.loadHeader()
|
||||
f2.loadSection aliveSymsSection
|
||||
var oldData: seq[int32] = @[]
|
||||
f2.loadSeq(oldData)
|
||||
f2.close
|
||||
if f2.err == ok and oldData == s:
|
||||
result = false
|
||||
else:
|
||||
when defined(debugDce):
|
||||
let oldAsSet = toPackedSet[int32](oldData)
|
||||
let newAsSet = toPackedSet[int32](s)
|
||||
echo "set of live symbols changed ", asymFile.changeFileExt("rod"), " ", position, " ", f2.err
|
||||
echo "in old but not in new ", oldAsSet.difference(newAsSet), " number of entries in old ", oldAsSet.len
|
||||
echo "in new but not in old ", newAsSet.difference(oldAsSet), " number of entries in new ", newAsSet.len
|
||||
#if execShellCmd(getAppFilename() & " rod " & quoteShell(asymFile.changeFileExt("rod"))) != 0:
|
||||
# echo "command failed"
|
||||
result = true
|
||||
storeAliveSymsImpl(asymFile, s)
|
||||
|
||||
proc genPackedModule(g: ModuleGraph, i: int; alive: var AliveSyms) =
|
||||
# case statement here to enforce exhaustive checks.
|
||||
case g.packed[i].status
|
||||
of undefined:
|
||||
discard "nothing to do"
|
||||
of loading, stored:
|
||||
assert false
|
||||
of storing, outdated:
|
||||
storeAliveSyms(g.config, g.packed[i].module.position, alive)
|
||||
generateCodeForModule(g, g.packed[i], alive)
|
||||
closeRodFile(g, g.packed[i].module)
|
||||
of loaded:
|
||||
if g.packed[i].loadedButAliveSetChanged:
|
||||
generateCodeForModule(g, g.packed[i], alive)
|
||||
else:
|
||||
addFileToLink(g.config, g.packed[i].module)
|
||||
replayTypeInfo(g, g.packed[i], FileIndex(i))
|
||||
|
||||
if g.backend == nil:
|
||||
g.backend = cgendata.newModuleList(g)
|
||||
registerInitProcs(BModuleList(g.backend), g.packed[i].module, g.packed[i].fromDisk.backendFlags)
|
||||
|
||||
proc generateCode*(g: ModuleGraph) =
|
||||
## The single entry point, generate C(++) code for the entire
|
||||
## Nim program aka `ModuleGraph`.
|
||||
resetForBackend(g)
|
||||
var alive = computeAliveSyms(g.packed, g.config)
|
||||
|
||||
when false:
|
||||
for i in 0..<len(g.packed):
|
||||
echo i, " is of status ", g.packed[i].status, " ", toFullPath(g.config, FileIndex(i))
|
||||
|
||||
# First pass: Setup all the backend modules for all the modules that have
|
||||
# changed:
|
||||
for i in 0..<len(g.packed):
|
||||
# case statement here to enforce exhaustive checks.
|
||||
case g.packed[i].status
|
||||
of undefined:
|
||||
discard "nothing to do"
|
||||
of loading, stored:
|
||||
assert false
|
||||
of storing, outdated:
|
||||
setupBackendModule(g, g.packed[i])
|
||||
of loaded:
|
||||
# Even though this module didn't change, DCE might trigger a change.
|
||||
# Consider this case: Module A uses symbol S from B and B does not use
|
||||
# S itself. A is then edited not to use S either. Thus we have to
|
||||
# recompile B in order to remove S from the final result.
|
||||
if aliveSymsChanged(g.config, g.packed[i].module.position, alive):
|
||||
g.packed[i].loadedButAliveSetChanged = true
|
||||
setupBackendModule(g, g.packed[i])
|
||||
|
||||
# Second pass: Code generation.
|
||||
let mainModuleIdx = g.config.projectMainIdx2.int
|
||||
# We need to generate the main module last, because only then
|
||||
# all init procs have been registered:
|
||||
for i in 0..<len(g.packed):
|
||||
if i != mainModuleIdx:
|
||||
genPackedModule(g, i, alive)
|
||||
if mainModuleIdx >= 0:
|
||||
genPackedModule(g, mainModuleIdx, alive)
|
||||
@@ -1,169 +0,0 @@
|
||||
#
|
||||
#
|
||||
# The Nim Compiler
|
||||
# (c) Copyright 2021 Andreas Rumpf
|
||||
#
|
||||
# See the file "copying.txt", included in this
|
||||
# distribution, for details about the copyright.
|
||||
#
|
||||
|
||||
## Dead code elimination (=DCE) for IC.
|
||||
|
||||
import std/[intsets, tables]
|
||||
|
||||
when defined(nimPreviewSlimSystem):
|
||||
import std/assertions
|
||||
|
||||
import ".." / [ast, options, lineinfos, types]
|
||||
|
||||
import packed_ast, ic, bitabs
|
||||
|
||||
type
|
||||
AliveSyms* = seq[IntSet]
|
||||
AliveContext* = object ## Purpose is to fill the 'alive' field.
|
||||
stack: seq[(int, TOptions, NodePos)] ## A stack for marking symbols as alive.
|
||||
decoder: PackedDecoder ## We need a PackedDecoder for module ID address translations.
|
||||
thisModule: int ## The module we're currently analysing for DCE.
|
||||
alive: AliveSyms ## The final result of our computation.
|
||||
options: TOptions
|
||||
compilerProcs: Table[string, (int, int32)]
|
||||
|
||||
proc isExportedToC(c: var AliveContext; g: PackedModuleGraph; symId: int32): bool =
|
||||
## "Exported to C" procs are special (these are marked with '.exportc') because these
|
||||
## must not be optimized away!
|
||||
let symPtr = unsafeAddr g[c.thisModule].fromDisk.syms[symId]
|
||||
let flags = symPtr.flags
|
||||
# due to a bug/limitation in the lambda lifting, unused inner procs
|
||||
# are not transformed correctly; issue (#411). However, the whole purpose here
|
||||
# is to eliminate unused procs. So there is no special logic required for this case.
|
||||
if sfCompileTime notin flags:
|
||||
if ({sfExportc, sfCompilerProc} * flags != {}) or
|
||||
(symPtr.kind == skMethod):
|
||||
result = true
|
||||
else:
|
||||
result = false
|
||||
# XXX: This used to be a condition to:
|
||||
# (sfExportc in prc.flags and lfExportLib in prc.loc.flags) or
|
||||
if sfCompilerProc in flags:
|
||||
c.compilerProcs[g[c.thisModule].fromDisk.strings[symPtr.name]] = (c.thisModule, symId)
|
||||
else:
|
||||
result = false
|
||||
|
||||
template isNotGeneric(n: NodePos): bool = ithSon(tree, n, genericParamsPos).kind == nkEmpty
|
||||
|
||||
proc followLater(c: var AliveContext; g: PackedModuleGraph; module: int; item: int32) =
|
||||
## Marks a symbol 'item' as used and later in 'followNow' the symbol's body will
|
||||
## be analysed.
|
||||
if not c.alive[module].containsOrIncl(item):
|
||||
var body = g[module].fromDisk.syms[item].ast
|
||||
if body != emptyNodeId:
|
||||
let opt = g[module].fromDisk.syms[item].options
|
||||
if g[module].fromDisk.syms[item].kind in routineKinds:
|
||||
body = NodeId ithSon(g[module].fromDisk.bodies, NodePos body, bodyPos)
|
||||
c.stack.add((module, opt, NodePos(body)))
|
||||
|
||||
when false:
|
||||
let nid = g[module].fromDisk.syms[item].name
|
||||
if nid != LitId(0):
|
||||
let name = g[module].fromDisk.strings[nid]
|
||||
if name in ["nimFrame", "callDepthLimitReached"]:
|
||||
echo "I was called! ", name, " body exists: ", body != emptyNodeId, " ", module, " ", item
|
||||
|
||||
proc requestCompilerProc(c: var AliveContext; g: PackedModuleGraph; name: string) =
|
||||
let (module, item) = c.compilerProcs[name]
|
||||
followLater(c, g, module, item)
|
||||
|
||||
proc loadTypeKind(t: PackedItemId; c: AliveContext; g: PackedModuleGraph; toSkip: set[TTypeKind]): TTypeKind =
|
||||
template kind(t: ItemId): TTypeKind = g[t.module].fromDisk.types[t.item].kind
|
||||
|
||||
var t2 = translateId(t, g, c.thisModule, c.decoder.config)
|
||||
result = t2.kind
|
||||
while result in toSkip:
|
||||
t2 = translateId(g[t2.module].fromDisk.types[t2.item].types[^1], g, t2.module, c.decoder.config)
|
||||
result = t2.kind
|
||||
|
||||
proc rangeCheckAnalysis(c: var AliveContext; g: PackedModuleGraph; tree: PackedTree; n: NodePos) =
|
||||
## Replicates the logic of `ccgexprs.genRangeChck`.
|
||||
## XXX Refactor so that the duplicated logic is avoided. However, for now it's not clear
|
||||
## the approach has enough merit.
|
||||
var dest = loadTypeKind(n.typ, c, g, abstractVar)
|
||||
if optRangeCheck notin c.options or dest in {tyUInt..tyUInt64}:
|
||||
discard "no need to generate a check because it was disabled"
|
||||
else:
|
||||
let n0t = loadTypeKind(n.firstSon.typ, c, g, {})
|
||||
if n0t in {tyUInt, tyUInt64}:
|
||||
c.requestCompilerProc(g, "raiseRangeErrorNoArgs")
|
||||
else:
|
||||
let raiser =
|
||||
case loadTypeKind(n.typ, c, g, abstractVarRange)
|
||||
of tyUInt..tyUInt64, tyChar: "raiseRangeErrorU"
|
||||
of tyFloat..tyFloat128: "raiseRangeErrorF"
|
||||
else: "raiseRangeErrorI"
|
||||
c.requestCompilerProc(g, raiser)
|
||||
|
||||
proc aliveCode(c: var AliveContext; g: PackedModuleGraph; tree: PackedTree; n: NodePos) =
|
||||
## Marks the symbols we encounter when we traverse the AST at `tree[n]` as alive, unless
|
||||
## it is purely in a declarative context (type section etc.).
|
||||
case n.kind
|
||||
of nkNone..pred(nkSym), succ(nkSym)..nkNilLit:
|
||||
discard "ignore non-sym atoms"
|
||||
of nkSym:
|
||||
# This symbol is alive and everything its body references.
|
||||
followLater(c, g, c.thisModule, tree[n].soperand)
|
||||
of nkModuleRef:
|
||||
let (n1, n2) = sons2(tree, n)
|
||||
assert n1.kind == nkNone
|
||||
assert n2.kind == nkNone
|
||||
let m = n1.litId
|
||||
let item = tree[n2].soperand
|
||||
let otherModule = toFileIndexCached(c.decoder, g, c.thisModule, m).int
|
||||
followLater(c, g, otherModule, item)
|
||||
of nkMacroDef, nkTemplateDef, nkTypeSection, nkTypeOfExpr,
|
||||
nkCommentStmt, nkIncludeStmt,
|
||||
nkImportStmt, nkImportExceptStmt, nkExportStmt, nkExportExceptStmt,
|
||||
nkFromStmt, nkStaticStmt:
|
||||
discard
|
||||
of nkVarSection, nkLetSection, nkConstSection:
|
||||
# XXX ignore the defining local variable name?
|
||||
for son in sonsReadonly(tree, n):
|
||||
aliveCode(c, g, tree, son)
|
||||
of nkChckRangeF, nkChckRange64, nkChckRange:
|
||||
rangeCheckAnalysis(c, g, tree, n)
|
||||
of nkProcDef, nkConverterDef, nkMethodDef, nkFuncDef, nkIteratorDef:
|
||||
if n.firstSon.kind == nkSym and isNotGeneric(n):
|
||||
let item = tree[n.firstSon].soperand
|
||||
if isExportedToC(c, g, item):
|
||||
# This symbol is alive and everything its body references.
|
||||
followLater(c, g, c.thisModule, item)
|
||||
else:
|
||||
for son in sonsReadonly(tree, n):
|
||||
aliveCode(c, g, tree, son)
|
||||
|
||||
proc followNow(c: var AliveContext; g: PackedModuleGraph) =
|
||||
## Mark all entries in the stack. Marking can add more entries
|
||||
## to the stack but eventually we have looked at every alive symbol.
|
||||
while c.stack.len > 0:
|
||||
let (modId, opt, ast) = c.stack.pop()
|
||||
c.thisModule = modId
|
||||
c.options = opt
|
||||
aliveCode(c, g, g[modId].fromDisk.bodies, ast)
|
||||
|
||||
proc computeAliveSyms*(g: PackedModuleGraph; conf: ConfigRef): AliveSyms =
|
||||
## Entry point for our DCE algorithm.
|
||||
var c = AliveContext(stack: @[], decoder: PackedDecoder(config: conf),
|
||||
thisModule: -1, alive: newSeq[IntSet](g.len),
|
||||
options: conf.options)
|
||||
for i in countdown(len(g)-1, 0):
|
||||
if g[i].status != undefined:
|
||||
c.thisModule = i
|
||||
for p in allNodes(g[i].fromDisk.topLevel):
|
||||
aliveCode(c, g, g[i].fromDisk.topLevel, p)
|
||||
|
||||
followNow(c, g)
|
||||
result = move(c.alive)
|
||||
|
||||
proc isAlive*(a: AliveSyms; module: int, item: int32): bool =
|
||||
## Backends use this to query if a symbol is `alive` which means
|
||||
## we need to produce (C/C++/etc) code for it.
|
||||
result = a[module].contains(item)
|
||||
|
||||
@@ -1,56 +0,0 @@
|
||||
====================================
|
||||
Incremental Recompilations
|
||||
====================================
|
||||
|
||||
We split the Nim compiler into a frontend and a backend.
|
||||
The frontend produces a set of `.rod` files. Every `.nim` module
|
||||
produces its own `.rod` file.
|
||||
|
||||
- The IR must be a faithful representation of the AST in memory.
|
||||
- The backend can do its own caching but doesn't have to. In the
|
||||
current implementation the backend also caches its results.
|
||||
|
||||
Advantage of the "set of files" vs the previous global database:
|
||||
- By construction, we either read from the `.rod` file or from the
|
||||
`.nim` file, there can be no inconsistency. There can also be no
|
||||
partial updates.
|
||||
- No dependency to external packages (SQLite). SQLite simply is too
|
||||
slow and the old way of serialization was too slow too. We use a
|
||||
format designed for Nim and expect to base further tools on this
|
||||
file format.
|
||||
|
||||
References to external modules must be (moduleId, symId) pairs.
|
||||
The symbol IDs are module specific. This way no global ID increment
|
||||
mechanism needs to be implemented that we could get wrong. ModuleIds
|
||||
are rod-file specific too.
|
||||
|
||||
|
||||
|
||||
Global state
|
||||
------------
|
||||
|
||||
There is no global state.
|
||||
|
||||
Rod File Format
|
||||
---------------
|
||||
|
||||
It's a simple binary file format. `rodfiles.nim` contains some details.
|
||||
|
||||
|
||||
Backend
|
||||
-------
|
||||
|
||||
Nim programmers have to come to enjoy whole-program dead code elimination,
|
||||
by default. Since this is a "whole program" optimization, it does break
|
||||
modularity. However, thanks to the packed AST representation we can perform
|
||||
this global analysis without having to unpack anything. This is basically
|
||||
a mark&sweep GC algorithm:
|
||||
|
||||
- Start with the top level statements. Every symbol that is referenced
|
||||
from a top level statement is not "dead" and needs to be compiled by
|
||||
the backend.
|
||||
- Every symbol referenced from a referenced symbol also has to be
|
||||
compiled.
|
||||
|
||||
Caching logic: Only if the set of alive symbols is different from the
|
||||
last run, the module has to be regenerated.
|
||||
1349
compiler/ic/ic.nim
1349
compiler/ic/ic.nim
File diff suppressed because it is too large
Load Diff
@@ -1,84 +0,0 @@
|
||||
#
|
||||
#
|
||||
# The Nim Compiler
|
||||
# (c) Copyright 2024 Andreas Rumpf
|
||||
#
|
||||
# See the file "copying.txt", included in this
|
||||
# distribution, for details about the copyright.
|
||||
#
|
||||
|
||||
# For the line information we use 32 bits. They are used as follows:
|
||||
# Bit 0 (AsideBit): If we have inline line information or not. If not, the
|
||||
# remaining 31 bits are used as an index into a seq[(LitId, int, int)].
|
||||
#
|
||||
# We use 10 bits for the "file ID", this means a program can consist of as much
|
||||
# as 1024 different files. (If it uses more files than that, the overflow bit
|
||||
# would be set.)
|
||||
# This means we have 21 bits left to encode the (line, col) pair. We use 7 bits for the column
|
||||
# so 128 is the limit and 14 bits for the line number.
|
||||
# The packed representation supports files with up to 16384 lines.
|
||||
# Keep in mind that whenever any limit is reached the AsideBit is set and the real line
|
||||
# information is kept in a side channel.
|
||||
|
||||
import std / assertions
|
||||
|
||||
const
|
||||
AsideBit = 1
|
||||
FileBits = 10
|
||||
LineBits = 14
|
||||
ColBits = 7
|
||||
FileMax = (1 shl FileBits) - 1
|
||||
LineMax = (1 shl LineBits) - 1
|
||||
ColMax = (1 shl ColBits) - 1
|
||||
|
||||
static:
|
||||
assert AsideBit + FileBits + LineBits + ColBits == 32
|
||||
|
||||
import .. / ic / [bitabs, rodfiles] # for LitId
|
||||
|
||||
type
|
||||
PackedLineInfo* = distinct uint32
|
||||
|
||||
LineInfoManager* = object
|
||||
aside: seq[(LitId, int32, int32)]
|
||||
|
||||
const
|
||||
NoLineInfo* = PackedLineInfo(0'u32)
|
||||
|
||||
proc pack*(m: var LineInfoManager; file: LitId; line, col: int32): PackedLineInfo =
|
||||
if file.uint32 <= FileMax.uint32 and line <= LineMax and col <= ColMax:
|
||||
let col = if col < 0'i32: 0'u32 else: col.uint32
|
||||
let line = if line < 0'i32: 0'u32 else: line.uint32
|
||||
# use inline representation:
|
||||
result = PackedLineInfo((file.uint32 shl 1'u32) or (line shl uint32(AsideBit + FileBits)) or
|
||||
(col shl uint32(AsideBit + FileBits + LineBits)))
|
||||
else:
|
||||
result = PackedLineInfo((m.aside.len shl 1) or AsideBit)
|
||||
m.aside.add (file, line, col)
|
||||
|
||||
proc unpack*(m: LineInfoManager; i: PackedLineInfo): (LitId, int32, int32) =
|
||||
let i = i.uint32
|
||||
if (i and 1'u32) == 0'u32:
|
||||
# inline representation:
|
||||
result = (LitId((i shr 1'u32) and FileMax.uint32),
|
||||
int32((i shr uint32(AsideBit + FileBits)) and LineMax.uint32),
|
||||
int32((i shr uint32(AsideBit + FileBits + LineBits)) and ColMax.uint32))
|
||||
else:
|
||||
result = m.aside[int(i shr 1'u32)]
|
||||
|
||||
proc getFileId*(m: LineInfoManager; i: PackedLineInfo): LitId =
|
||||
result = unpack(m, i)[0]
|
||||
|
||||
proc store*(r: var RodFile; m: LineInfoManager) = storeSeq(r, m.aside)
|
||||
proc load*(r: var RodFile; m: var LineInfoManager) = loadSeq(r, m.aside)
|
||||
|
||||
when isMainModule:
|
||||
var m = LineInfoManager(aside: @[])
|
||||
for i in 0'i32..<16388'i32:
|
||||
for col in 0'i32..<100'i32:
|
||||
let packed = pack(m, LitId(1023), i, col)
|
||||
let u = unpack(m, packed)
|
||||
assert u[0] == LitId(1023)
|
||||
assert u[1] == i
|
||||
assert u[2] == col
|
||||
echo m.aside.len
|
||||
@@ -1,155 +0,0 @@
|
||||
#
|
||||
#
|
||||
# The Nim Compiler
|
||||
# (c) Copyright 2021 Andreas Rumpf
|
||||
#
|
||||
# See the file "copying.txt", included in this
|
||||
# distribution, for details about the copyright.
|
||||
#
|
||||
|
||||
## Integrity checking for a set of .rod files.
|
||||
## The set must cover a complete Nim project.
|
||||
|
||||
import std/[sets, tables]
|
||||
|
||||
when defined(nimPreviewSlimSystem):
|
||||
import std/assertions
|
||||
|
||||
import ".." / [ast, modulegraphs]
|
||||
import packed_ast, bitabs, ic
|
||||
|
||||
type
|
||||
CheckedContext = object
|
||||
g: ModuleGraph
|
||||
thisModule: int32
|
||||
checkedSyms: HashSet[ItemId]
|
||||
checkedTypes: HashSet[ItemId]
|
||||
|
||||
proc checkType(c: var CheckedContext; typeId: PackedItemId)
|
||||
proc checkForeignSym(c: var CheckedContext; symId: PackedItemId)
|
||||
proc checkNode(c: var CheckedContext; tree: PackedTree; n: NodePos)
|
||||
|
||||
proc checkTypeObj(c: var CheckedContext; typ: PackedType) =
|
||||
for child in typ.types:
|
||||
checkType(c, child)
|
||||
if typ.n != emptyNodeId:
|
||||
checkNode(c, c.g.packed[c.thisModule].fromDisk.bodies, NodePos typ.n)
|
||||
if typ.sym != nilItemId:
|
||||
checkForeignSym(c, typ.sym)
|
||||
if typ.owner != nilItemId:
|
||||
checkForeignSym(c, typ.owner)
|
||||
checkType(c, typ.typeInst)
|
||||
|
||||
proc checkType(c: var CheckedContext; typeId: PackedItemId) =
|
||||
if typeId == nilItemId: return
|
||||
let itemId = translateId(typeId, c.g.packed, c.thisModule, c.g.config)
|
||||
if not c.checkedTypes.containsOrIncl(itemId):
|
||||
let oldThisModule = c.thisModule
|
||||
c.thisModule = itemId.module
|
||||
checkTypeObj c, c.g.packed[itemId.module].fromDisk.types[itemId.item]
|
||||
c.thisModule = oldThisModule
|
||||
|
||||
proc checkSym(c: var CheckedContext; s: PackedSym) =
|
||||
if s.name != LitId(0):
|
||||
assert c.g.packed[c.thisModule].fromDisk.strings.hasLitId s.name
|
||||
checkType c, s.typ
|
||||
if s.ast != emptyNodeId:
|
||||
checkNode(c, c.g.packed[c.thisModule].fromDisk.bodies, NodePos s.ast)
|
||||
if s.owner != nilItemId:
|
||||
checkForeignSym(c, s.owner)
|
||||
|
||||
proc checkLocalSym(c: var CheckedContext; item: int32) =
|
||||
let itemId = ItemId(module: c.thisModule, item: item)
|
||||
if not c.checkedSyms.containsOrIncl(itemId):
|
||||
checkSym c, c.g.packed[c.thisModule].fromDisk.syms[item]
|
||||
|
||||
proc checkForeignSym(c: var CheckedContext; symId: PackedItemId) =
|
||||
let itemId = translateId(symId, c.g.packed, c.thisModule, c.g.config)
|
||||
if not c.checkedSyms.containsOrIncl(itemId):
|
||||
let oldThisModule = c.thisModule
|
||||
c.thisModule = itemId.module
|
||||
checkSym c, c.g.packed[itemId.module].fromDisk.syms[itemId.item]
|
||||
c.thisModule = oldThisModule
|
||||
|
||||
proc checkNode(c: var CheckedContext; tree: PackedTree; n: NodePos) =
|
||||
let t = findType(tree, n)
|
||||
if t != nilItemId:
|
||||
checkType(c, t)
|
||||
case n.kind
|
||||
of nkEmpty, nkNilLit, nkType, nkNilRodNode:
|
||||
discard
|
||||
of nkIdent:
|
||||
assert c.g.packed[c.thisModule].fromDisk.strings.hasLitId n.litId
|
||||
of nkSym:
|
||||
checkLocalSym(c, tree[n].soperand)
|
||||
of directIntLit:
|
||||
discard
|
||||
of externIntLit, nkFloatLit..nkFloat128Lit:
|
||||
assert c.g.packed[c.thisModule].fromDisk.numbers.hasLitId n.litId
|
||||
of nkStrLit..nkTripleStrLit:
|
||||
assert c.g.packed[c.thisModule].fromDisk.strings.hasLitId n.litId
|
||||
of nkModuleRef:
|
||||
let (n1, n2) = sons2(tree, n)
|
||||
assert n1.kind == nkNone
|
||||
assert n2.kind == nkNone
|
||||
checkForeignSym(c, PackedItemId(module: n1.litId, item: tree[n2].soperand))
|
||||
else:
|
||||
for n0 in sonsReadonly(tree, n):
|
||||
checkNode(c, tree, n0)
|
||||
|
||||
proc checkTree(c: var CheckedContext; t: PackedTree) =
|
||||
for p in allNodes(t): checkNode(c, t, p)
|
||||
|
||||
proc checkLocalSymIds(c: var CheckedContext; m: PackedModule; symIds: seq[int32]) =
|
||||
for symId in symIds:
|
||||
assert symId >= 0 and symId < m.syms.len, $symId & " " & $m.syms.len
|
||||
|
||||
proc checkModule(c: var CheckedContext; m: PackedModule) =
|
||||
# We check that:
|
||||
# - Every symbol references existing types and symbols.
|
||||
# - Every tree node references existing types and symbols.
|
||||
for _, v in pairs(m.syms):
|
||||
checkLocalSym c, v.id
|
||||
|
||||
checkTree c, m.toReplay
|
||||
checkTree c, m.topLevel
|
||||
|
||||
for e in m.exports:
|
||||
#assert e[1] >= 0 and e[1] < m.syms.len
|
||||
assert e[0] == m.syms[e[1]].name
|
||||
|
||||
for e in m.compilerProcs:
|
||||
#assert e[1] >= 0 and e[1] < m.syms.len
|
||||
assert e[0] == m.syms[e[1]].name
|
||||
|
||||
checkLocalSymIds c, m, m.converters
|
||||
checkLocalSymIds c, m, m.methods
|
||||
checkLocalSymIds c, m, m.trmacros
|
||||
checkLocalSymIds c, m, m.pureEnums
|
||||
#[
|
||||
To do: Check all these fields:
|
||||
|
||||
reexports*: seq[(LitId, PackedItemId)]
|
||||
macroUsages*: seq[(PackedItemId, PackedLineInfo)]
|
||||
|
||||
typeInstCache*: seq[(PackedItemId, PackedItemId)]
|
||||
procInstCache*: seq[PackedInstantiation]
|
||||
attachedOps*: seq[(TTypeAttachedOp, PackedItemId, PackedItemId)]
|
||||
methodsPerGenericType*: seq[(PackedItemId, int, PackedItemId)]
|
||||
enumToStringProcs*: seq[(PackedItemId, PackedItemId)]
|
||||
methodsPerType*: seq[(PackedItemId, PackedItemId)]
|
||||
dispatchers*: seq[PackedItemId]
|
||||
]#
|
||||
|
||||
proc checkIntegrity*(g: ModuleGraph) =
|
||||
var c = CheckedContext(g: g)
|
||||
for i in 0..<len(g.packed):
|
||||
# case statement here to enforce exhaustive checks.
|
||||
case g.packed[i].status
|
||||
of undefined:
|
||||
discard "nothing to do"
|
||||
of loading:
|
||||
assert false, "cannot check integrity: Module still loading"
|
||||
of stored, storing, outdated, loaded:
|
||||
c.thisModule = int32 i
|
||||
checkModule(c, g.packed[i].fromDisk)
|
||||
@@ -1,183 +0,0 @@
|
||||
#
|
||||
#
|
||||
# The Nim Compiler
|
||||
# (c) Copyright 2021 Andreas Rumpf
|
||||
#
|
||||
# See the file "copying.txt", included in this
|
||||
# distribution, for details about the copyright.
|
||||
#
|
||||
|
||||
## Supports the "nim check --ic:legacy --defusages:FILE,LINE,COL"
|
||||
## IDE-like features. It uses the set of .rod files to accomplish
|
||||
## its task. The set must cover a complete Nim project.
|
||||
|
||||
import std/[sets, tables]
|
||||
|
||||
from std/os import nil
|
||||
from std/private/miscdollars import toLocation
|
||||
|
||||
when defined(nimPreviewSlimSystem):
|
||||
import std/assertions
|
||||
|
||||
import ".." / [ast, modulegraphs, msgs, options]
|
||||
import iclineinfos
|
||||
import packed_ast, bitabs, ic
|
||||
|
||||
type
|
||||
UnpackedLineInfo = object
|
||||
file: LitId
|
||||
line, col: int
|
||||
NavContext = object
|
||||
g: ModuleGraph
|
||||
thisModule: int32
|
||||
trackPos: UnpackedLineInfo
|
||||
alreadyEmitted: HashSet[string]
|
||||
outputSep: char # for easier testing, use short filenames and spaces instead of tabs.
|
||||
|
||||
proc isTracked(man: LineInfoManager; current: PackedLineInfo, trackPos: UnpackedLineInfo, tokenLen: int): bool =
|
||||
let (currentFile, currentLine, currentCol) = man.unpack(current)
|
||||
if currentFile == trackPos.file and currentLine == trackPos.line:
|
||||
let col = trackPos.col
|
||||
if col >= currentCol and col < currentCol+tokenLen:
|
||||
result = true
|
||||
else:
|
||||
result = false
|
||||
else:
|
||||
result = false
|
||||
|
||||
proc searchLocalSym(c: var NavContext; s: PackedSym; info: PackedLineInfo): bool =
|
||||
result = s.name != LitId(0) and
|
||||
isTracked(c.g.packed[c.thisModule].fromDisk.man, info, c.trackPos, c.g.packed[c.thisModule].fromDisk.strings[s.name].len)
|
||||
|
||||
proc searchForeignSym(c: var NavContext; s: ItemId; info: PackedLineInfo): bool =
|
||||
let name = c.g.packed[s.module].fromDisk.syms[s.item].name
|
||||
result = name != LitId(0) and
|
||||
isTracked(c.g.packed[c.thisModule].fromDisk.man, info, c.trackPos, c.g.packed[s.module].fromDisk.strings[name].len)
|
||||
|
||||
const
|
||||
EmptyItemId = ItemId(module: -1'i32, item: -1'i32)
|
||||
|
||||
proc search(c: var NavContext; tree: PackedTree): ItemId =
|
||||
# We use the linear representation here directly:
|
||||
for i in 0..<len(tree):
|
||||
let i = NodePos(i)
|
||||
case tree[i].kind
|
||||
of nkSym:
|
||||
let item = tree[i].soperand
|
||||
if searchLocalSym(c, c.g.packed[c.thisModule].fromDisk.syms[item], tree[i].info):
|
||||
return ItemId(module: c.thisModule, item: item)
|
||||
of nkModuleRef:
|
||||
let (currentFile, currentLine, currentCol) = c.g.packed[c.thisModule].fromDisk.man.unpack(tree[i].info)
|
||||
if currentLine == c.trackPos.line and currentFile == c.trackPos.file:
|
||||
let (n1, n2) = sons2(tree, i)
|
||||
assert n1.kind == nkInt32Lit
|
||||
assert n2.kind == nkInt32Lit
|
||||
let pId = PackedItemId(module: n1.litId, item: tree[n2].soperand)
|
||||
let itemId = translateId(pId, c.g.packed, c.thisModule, c.g.config)
|
||||
if searchForeignSym(c, itemId, tree[i].info):
|
||||
return itemId
|
||||
else: discard
|
||||
return EmptyItemId
|
||||
|
||||
proc isDecl(tree: PackedTree; n: NodePos): bool =
|
||||
# XXX This is not correct yet.
|
||||
const declarativeNodes = procDefs + {nkMacroDef, nkTemplateDef,
|
||||
nkLetSection, nkVarSection, nkUsingStmt, nkConstSection, nkTypeSection,
|
||||
nkIdentDefs, nkEnumTy, nkVarTuple}
|
||||
result = n.int >= 0 and tree[n].kind in declarativeNodes
|
||||
|
||||
proc usage(c: var NavContext; info: PackedLineInfo; isDecl: bool) =
|
||||
let (fileId, line, col) = unpack(c.g.packed[c.thisModule].fromDisk.man, info)
|
||||
var m = ""
|
||||
var file = c.g.packed[c.thisModule].fromDisk.strings[fileId]
|
||||
if c.outputSep == ' ':
|
||||
file = os.extractFilename file
|
||||
toLocation(m, file, line, col + ColOffset)
|
||||
if not c.alreadyEmitted.containsOrIncl(m):
|
||||
msgWriteln c.g.config, (if isDecl: "def" else: "usage") & c.outputSep & m
|
||||
|
||||
proc list(c: var NavContext; tree: PackedTree; sym: ItemId) =
|
||||
for i in 0..<len(tree):
|
||||
let i = NodePos(i)
|
||||
case tree[i].kind
|
||||
of nkSym:
|
||||
let item = tree[i].soperand
|
||||
if sym.item == item and sym.module == c.thisModule:
|
||||
usage(c, tree[i].info, isDecl(tree, parent(i)))
|
||||
of nkModuleRef:
|
||||
let (n1, n2) = sons2(tree, i)
|
||||
assert n1.kind == nkNone
|
||||
assert n2.kind == nkNone
|
||||
let pId = PackedItemId(module: n1.litId, item: tree[n2].soperand)
|
||||
let itemId = translateId(pId, c.g.packed, c.thisModule, c.g.config)
|
||||
if itemId.item == sym.item and sym.module == itemId.module:
|
||||
usage(c, tree[i].info, isDecl(tree, parent(i)))
|
||||
else: discard
|
||||
|
||||
proc searchForIncludeFile(g: ModuleGraph; fullPath: string): int =
|
||||
for i in 0..<len(g.packed):
|
||||
for k in 1..high(g.packed[i].fromDisk.includes):
|
||||
# we start from 1 because the first "include" file is
|
||||
# the module's filename.
|
||||
if os.cmpPaths(g.packed[i].fromDisk.strings[g.packed[i].fromDisk.includes[k][0]], fullPath) == 0:
|
||||
return i
|
||||
return -1
|
||||
|
||||
proc nav(g: ModuleGraph) =
|
||||
# translate the track position to a packed position:
|
||||
let unpacked = g.config.m.trackPos
|
||||
var mid = unpacked.fileIndex.int
|
||||
|
||||
let fullPath = toFullPath(g.config, unpacked.fileIndex)
|
||||
|
||||
if g.packed[mid].status == undefined:
|
||||
# check if 'mid' is an include file of some other module:
|
||||
mid = searchForIncludeFile(g, fullPath)
|
||||
|
||||
if mid < 0:
|
||||
localError(g.config, unpacked, "unknown file name: " & fullPath)
|
||||
return
|
||||
|
||||
let fileId = g.packed[mid].fromDisk.strings.getKeyId(fullPath)
|
||||
|
||||
if fileId == LitId(0):
|
||||
internalError(g.config, unpacked, "cannot find a valid file ID")
|
||||
return
|
||||
|
||||
var c = NavContext(
|
||||
g: g,
|
||||
thisModule: int32 mid,
|
||||
trackPos: UnpackedLineInfo(line: unpacked.line.int, col: unpacked.col.int, file: fileId),
|
||||
outputSep: if isDefined(g.config, "nimIcNavigatorTests"): ' ' else: '\t'
|
||||
)
|
||||
var symId = search(c, g.packed[mid].fromDisk.topLevel)
|
||||
if symId == EmptyItemId:
|
||||
symId = search(c, g.packed[mid].fromDisk.bodies)
|
||||
|
||||
if symId == EmptyItemId:
|
||||
localError(g.config, unpacked, "no symbol at this position")
|
||||
return
|
||||
|
||||
for i in 0..<len(g.packed):
|
||||
# case statement here to enforce exhaustive checks.
|
||||
case g.packed[i].status
|
||||
of undefined:
|
||||
discard "nothing to do"
|
||||
of loading:
|
||||
assert false, "cannot check integrity: Module still loading"
|
||||
of stored, storing, outdated, loaded:
|
||||
c.thisModule = int32 i
|
||||
list(c, g.packed[i].fromDisk.topLevel, symId)
|
||||
list(c, g.packed[i].fromDisk.bodies, symId)
|
||||
|
||||
proc navDefinition*(g: ModuleGraph) = nav(g)
|
||||
proc navUsages*(g: ModuleGraph) = nav(g)
|
||||
proc navDefusages*(g: ModuleGraph) = nav(g)
|
||||
|
||||
proc writeRodFiles*(g: ModuleGraph) =
|
||||
for i in 0..<len(g.packed):
|
||||
case g.packed[i].status
|
||||
of undefined, loading, stored, loaded:
|
||||
discard "nothing to do"
|
||||
of storing, outdated:
|
||||
closeRodFile(g, g.packed[i].module)
|
||||
@@ -1,367 +0,0 @@
|
||||
#
|
||||
#
|
||||
# The Nim Compiler
|
||||
# (c) Copyright 2020 Andreas Rumpf
|
||||
#
|
||||
# See the file "copying.txt", included in this
|
||||
# distribution, for details about the copyright.
|
||||
#
|
||||
|
||||
## Packed AST representation, mostly based on a seq of nodes.
|
||||
## For IC support. Far future: Rewrite the compiler passes to
|
||||
## use this representation directly in all the transformations,
|
||||
## it is superior.
|
||||
|
||||
import std/[hashes, tables, strtabs]
|
||||
import bitabs, rodfiles
|
||||
import ".." / [ast, options]
|
||||
|
||||
import iclineinfos
|
||||
|
||||
when defined(nimPreviewSlimSystem):
|
||||
import std/assertions
|
||||
|
||||
type
|
||||
SymId* = distinct int32
|
||||
ModuleId* = distinct int32
|
||||
NodePos* = distinct int
|
||||
|
||||
NodeId* = distinct int32
|
||||
|
||||
PackedItemId* = object
|
||||
module*: LitId # 0 if it's this module
|
||||
item*: int32 # same as the in-memory representation
|
||||
|
||||
const
|
||||
nilItemId* = PackedItemId(module: LitId(0), item: 0.int32)
|
||||
|
||||
const
|
||||
emptyNodeId* = NodeId(-1)
|
||||
|
||||
type
|
||||
PackedLib* = object
|
||||
kind*: TLibKind
|
||||
generated*: bool
|
||||
isOverridden*: bool
|
||||
name*: LitId
|
||||
path*: NodeId
|
||||
|
||||
PackedSym* = object
|
||||
id*: int32
|
||||
kind*: TSymKind
|
||||
name*: LitId
|
||||
typ*: PackedItemId
|
||||
flags*: TSymFlags
|
||||
magic*: TMagic
|
||||
info*: PackedLineInfo
|
||||
ast*: NodeId
|
||||
owner*: PackedItemId
|
||||
guard*: PackedItemId
|
||||
bitsize*: int
|
||||
alignment*: int # for alignment
|
||||
options*: TOptions
|
||||
position*: int
|
||||
offset*: int32
|
||||
disamb*: int32
|
||||
externalName*: LitId # instead of TLoc
|
||||
locFlags*: TLocFlags
|
||||
annex*: PackedLib
|
||||
when hasFFI:
|
||||
cname*: LitId
|
||||
constraint*: NodeId
|
||||
instantiatedFrom*: PackedItemId
|
||||
|
||||
PackedType* = object
|
||||
id*: int32
|
||||
kind*: TTypeKind
|
||||
callConv*: TCallingConvention
|
||||
#nodekind*: TNodeKind
|
||||
flags*: TTypeFlags
|
||||
types*: seq[PackedItemId]
|
||||
n*: NodeId
|
||||
#nodeflags*: TNodeFlags
|
||||
sym*: PackedItemId
|
||||
owner*: PackedItemId
|
||||
size*: BiggestInt
|
||||
align*: int16
|
||||
paddingAtEnd*: int16
|
||||
# not serialized: loc*: TLoc because it is backend-specific
|
||||
typeInst*: PackedItemId
|
||||
nonUniqueId*: int32
|
||||
|
||||
PackedNode* = object # 8 bytes
|
||||
x: uint32
|
||||
info*: PackedLineInfo
|
||||
|
||||
PackedTree* = object ## usually represents a full Nim module
|
||||
nodes: seq[PackedNode]
|
||||
withFlags: seq[(int32, TNodeFlags)]
|
||||
withTypes: seq[(int32, PackedItemId)]
|
||||
|
||||
PackedInstantiation* = object
|
||||
key*, sym*: PackedItemId
|
||||
concreteTypes*: seq[PackedItemId]
|
||||
|
||||
const
|
||||
NodeKindBits = 8'u32
|
||||
NodeKindMask = (1'u32 shl NodeKindBits) - 1'u32
|
||||
|
||||
template kind*(n: PackedNode): TNodeKind = TNodeKind(n.x and NodeKindMask)
|
||||
template uoperand*(n: PackedNode): uint32 = (n.x shr NodeKindBits)
|
||||
template soperand*(n: PackedNode): int32 = int32(uoperand(n))
|
||||
|
||||
template toX(k: TNodeKind; operand: uint32): uint32 =
|
||||
uint32(k) or (operand shl NodeKindBits)
|
||||
|
||||
template toX(k: TNodeKind; operand: LitId): uint32 =
|
||||
uint32(k) or (operand.uint32 shl NodeKindBits)
|
||||
|
||||
template typeId*(n: PackedNode): PackedItemId = n.typ
|
||||
|
||||
proc `==`*(a, b: SymId): bool {.borrow.}
|
||||
proc hash*(a: SymId): Hash {.borrow.}
|
||||
|
||||
proc `==`*(a, b: NodePos): bool {.borrow.}
|
||||
#proc `==`*(a, b: PackedItemId): bool {.borrow.}
|
||||
proc `==`*(a, b: NodeId): bool {.borrow.}
|
||||
|
||||
proc newTreeFrom*(old: PackedTree): PackedTree =
|
||||
result = PackedTree(nodes: @[])
|
||||
when false: result.sh = old.sh
|
||||
|
||||
proc addIdent*(tree: var PackedTree; s: LitId; info: PackedLineInfo) =
|
||||
tree.nodes.add PackedNode(x: toX(nkIdent, uint32(s)), info: info)
|
||||
|
||||
proc addSym*(tree: var PackedTree; s: int32; info: PackedLineInfo) =
|
||||
tree.nodes.add PackedNode(x: toX(nkSym, cast[uint32](s)), info: info)
|
||||
|
||||
proc addSymDef*(tree: var PackedTree; s: SymId; info: PackedLineInfo) =
|
||||
tree.nodes.add PackedNode(x: toX(nkSym, cast[uint32](s)), info: info)
|
||||
|
||||
proc isAtom*(tree: PackedTree; pos: int): bool {.inline.} = tree.nodes[pos].kind <= nkNilLit
|
||||
|
||||
type
|
||||
PatchPos = distinct int
|
||||
|
||||
proc addNode*(t: var PackedTree; kind: TNodeKind; operand: int32;
|
||||
typeId: PackedItemId = nilItemId; info: PackedLineInfo;
|
||||
flags: TNodeFlags = {}) =
|
||||
t.nodes.add PackedNode(x: toX(kind, cast[uint32](operand)), info: info)
|
||||
if flags != {}:
|
||||
t.withFlags.add (t.nodes.len.int32 - 1, flags)
|
||||
if typeId != nilItemId:
|
||||
t.withTypes.add (t.nodes.len.int32 - 1, typeId)
|
||||
|
||||
proc prepare*(tree: var PackedTree; kind: TNodeKind; flags: TNodeFlags; typeId: PackedItemId; info: PackedLineInfo): PatchPos =
|
||||
result = PatchPos tree.nodes.len
|
||||
tree.addNode(kind = kind, flags = flags, operand = 0, info = info, typeId = typeId)
|
||||
|
||||
proc prepare*(dest: var PackedTree; source: PackedTree; sourcePos: NodePos): PatchPos =
|
||||
result = PatchPos dest.nodes.len
|
||||
dest.nodes.add source.nodes[sourcePos.int]
|
||||
|
||||
proc patch*(tree: var PackedTree; pos: PatchPos) =
|
||||
let pos = pos.int
|
||||
let k = tree.nodes[pos].kind
|
||||
assert k > nkNilLit
|
||||
let distance = int32(tree.nodes.len - pos)
|
||||
assert distance > 0
|
||||
tree.nodes[pos].x = toX(k, cast[uint32](distance))
|
||||
|
||||
proc len*(tree: PackedTree): int {.inline.} = tree.nodes.len
|
||||
|
||||
proc `[]`*(tree: PackedTree; i: NodePos): lent PackedNode {.inline.} =
|
||||
tree.nodes[i.int]
|
||||
|
||||
template rawSpan(n: PackedNode): int = int(uoperand(n))
|
||||
|
||||
proc nextChild(tree: PackedTree; pos: var int) {.inline.} =
|
||||
if tree.nodes[pos].kind > nkNilLit:
|
||||
assert tree.nodes[pos].uoperand > 0
|
||||
inc pos, tree.nodes[pos].rawSpan
|
||||
else:
|
||||
inc pos
|
||||
|
||||
iterator sonsReadonly*(tree: PackedTree; n: NodePos): NodePos =
|
||||
var pos = n.int
|
||||
assert tree.nodes[pos].kind > nkNilLit
|
||||
let last = pos + tree.nodes[pos].rawSpan
|
||||
inc pos
|
||||
while pos < last:
|
||||
yield NodePos pos
|
||||
nextChild tree, pos
|
||||
|
||||
iterator sons*(dest: var PackedTree; tree: PackedTree; n: NodePos): NodePos =
|
||||
let patchPos = prepare(dest, tree, n)
|
||||
for x in sonsReadonly(tree, n): yield x
|
||||
patch dest, patchPos
|
||||
|
||||
iterator isons*(dest: var PackedTree; tree: PackedTree;
|
||||
n: NodePos): (int, NodePos) =
|
||||
var i = 0
|
||||
for ch0 in sons(dest, tree, n):
|
||||
yield (i, ch0)
|
||||
inc i
|
||||
|
||||
iterator sonsFrom1*(tree: PackedTree; n: NodePos): NodePos =
|
||||
var pos = n.int
|
||||
assert tree.nodes[pos].kind > nkNilLit
|
||||
let last = pos + tree.nodes[pos].rawSpan
|
||||
inc pos
|
||||
if pos < last:
|
||||
nextChild tree, pos
|
||||
while pos < last:
|
||||
yield NodePos pos
|
||||
nextChild tree, pos
|
||||
|
||||
iterator sonsWithoutLast2*(tree: PackedTree; n: NodePos): NodePos =
|
||||
var count = 0
|
||||
for child in sonsReadonly(tree, n):
|
||||
inc count
|
||||
var pos = n.int
|
||||
assert tree.nodes[pos].kind > nkNilLit
|
||||
let last = pos + tree.nodes[pos].rawSpan
|
||||
inc pos
|
||||
while pos < last and count > 2:
|
||||
yield NodePos pos
|
||||
dec count
|
||||
nextChild tree, pos
|
||||
|
||||
proc parentImpl(tree: PackedTree; n: NodePos): NodePos =
|
||||
# finding the parent of a node is rather easy:
|
||||
var pos = n.int - 1
|
||||
while pos >= 0 and (isAtom(tree, pos) or (pos + tree.nodes[pos].rawSpan - 1 < n.int)):
|
||||
dec pos
|
||||
#assert pos >= 0, "node has no parent"
|
||||
result = NodePos(pos)
|
||||
|
||||
template parent*(n: NodePos): NodePos = parentImpl(tree, n)
|
||||
|
||||
proc hasXsons*(tree: PackedTree; n: NodePos; x: int): bool =
|
||||
var count = 0
|
||||
if tree.nodes[n.int].kind > nkNilLit:
|
||||
for child in sonsReadonly(tree, n): inc count
|
||||
result = count == x
|
||||
|
||||
proc hasAtLeastXsons*(tree: PackedTree; n: NodePos; x: int): bool =
|
||||
if tree.nodes[n.int].kind > nkNilLit:
|
||||
var count = 0
|
||||
for child in sonsReadonly(tree, n):
|
||||
inc count
|
||||
if count >= x: return true
|
||||
return false
|
||||
|
||||
proc firstSon*(tree: PackedTree; n: NodePos): NodePos {.inline.} =
|
||||
NodePos(n.int+1)
|
||||
proc kind*(tree: PackedTree; n: NodePos): TNodeKind {.inline.} =
|
||||
tree.nodes[n.int].kind
|
||||
proc litId*(tree: PackedTree; n: NodePos): LitId {.inline.} =
|
||||
LitId tree.nodes[n.int].uoperand
|
||||
proc info*(tree: PackedTree; n: NodePos): PackedLineInfo {.inline.} =
|
||||
tree.nodes[n.int].info
|
||||
|
||||
proc findType*(tree: PackedTree; n: NodePos): PackedItemId =
|
||||
for x in tree.withTypes:
|
||||
if x[0] == int32(n): return x[1]
|
||||
if x[0] > int32(n): return nilItemId
|
||||
return nilItemId
|
||||
|
||||
proc findFlags*(tree: PackedTree; n: NodePos): TNodeFlags =
|
||||
for x in tree.withFlags:
|
||||
if x[0] == int32(n): return x[1]
|
||||
if x[0] > int32(n): return {}
|
||||
return {}
|
||||
|
||||
template typ*(n: NodePos): PackedItemId =
|
||||
tree.findType(n)
|
||||
template flags*(n: NodePos): TNodeFlags =
|
||||
tree.findFlags(n)
|
||||
|
||||
template uoperand*(n: NodePos): uint32 =
|
||||
tree.nodes[n.int].uoperand
|
||||
|
||||
proc span*(tree: PackedTree; pos: int): int {.inline.} =
|
||||
if isAtom(tree, pos): 1 else: tree.nodes[pos].rawSpan
|
||||
|
||||
proc sons2*(tree: PackedTree; n: NodePos): (NodePos, NodePos) =
|
||||
assert(not isAtom(tree, n.int))
|
||||
let a = n.int+1
|
||||
let b = a + span(tree, a)
|
||||
result = (NodePos a, NodePos b)
|
||||
|
||||
proc sons3*(tree: PackedTree; n: NodePos): (NodePos, NodePos, NodePos) =
|
||||
assert(not isAtom(tree, n.int))
|
||||
let a = n.int+1
|
||||
let b = a + span(tree, a)
|
||||
let c = b + span(tree, b)
|
||||
result = (NodePos a, NodePos b, NodePos c)
|
||||
|
||||
proc ithSon*(tree: PackedTree; n: NodePos; i: int): NodePos =
|
||||
result = default(NodePos)
|
||||
if tree.nodes[n.int].kind > nkNilLit:
|
||||
var count = 0
|
||||
for child in sonsReadonly(tree, n):
|
||||
if count == i: return child
|
||||
inc count
|
||||
assert false, "node has no i-th child"
|
||||
|
||||
when false:
|
||||
proc `@`*(tree: PackedTree; lit: LitId): lent string {.inline.} =
|
||||
tree.sh.strings[lit]
|
||||
|
||||
template kind*(n: NodePos): TNodeKind = tree.nodes[n.int].kind
|
||||
template info*(n: NodePos): PackedLineInfo = tree.nodes[n.int].info
|
||||
template litId*(n: NodePos): LitId = LitId tree.nodes[n.int].uoperand
|
||||
|
||||
template symId*(n: NodePos): SymId = SymId tree.nodes[n.int].soperand
|
||||
|
||||
proc firstSon*(n: NodePos): NodePos {.inline.} = NodePos(n.int+1)
|
||||
|
||||
const
|
||||
externIntLit* = {nkCharLit,
|
||||
nkIntLit,
|
||||
nkInt8Lit,
|
||||
nkInt16Lit,
|
||||
nkInt32Lit,
|
||||
nkInt64Lit,
|
||||
nkUIntLit,
|
||||
nkUInt8Lit,
|
||||
nkUInt16Lit,
|
||||
nkUInt32Lit,
|
||||
nkUInt64Lit}
|
||||
|
||||
externSIntLit* = {nkIntLit, nkInt8Lit, nkInt16Lit, nkInt32Lit, nkInt64Lit}
|
||||
externUIntLit* = {nkUIntLit, nkUInt8Lit, nkUInt16Lit, nkUInt32Lit, nkUInt64Lit}
|
||||
directIntLit* = nkNone
|
||||
|
||||
template copyInto*(dest, n, body) =
|
||||
let patchPos = prepare(dest, tree, n)
|
||||
body
|
||||
patch dest, patchPos
|
||||
|
||||
template copyIntoKind*(dest, kind, info, body) =
|
||||
let patchPos = prepare(dest, kind, info)
|
||||
body
|
||||
patch dest, patchPos
|
||||
|
||||
proc getNodeId*(tree: PackedTree): NodeId {.inline.} = NodeId tree.nodes.len
|
||||
|
||||
iterator allNodes*(tree: PackedTree): NodePos =
|
||||
var p = 0
|
||||
while p < tree.len:
|
||||
yield NodePos(p)
|
||||
let s = span(tree, p)
|
||||
inc p, s
|
||||
|
||||
proc toPackedItemId*(item: int32): PackedItemId {.inline.} =
|
||||
PackedItemId(module: LitId(0), item: item)
|
||||
|
||||
proc load*(f: var RodFile; t: var PackedTree) =
|
||||
loadSeq f, t.nodes
|
||||
loadSeq f, t.withFlags
|
||||
loadSeq f, t.withTypes
|
||||
|
||||
proc store*(f: var RodFile; t: PackedTree) =
|
||||
storeSeq f, t.nodes
|
||||
storeSeq f, t.withFlags
|
||||
storeSeq f, t.withTypes
|
||||
@@ -19,8 +19,6 @@ import std/tables
|
||||
when defined(nimPreviewSlimSystem):
|
||||
import std/assertions
|
||||
|
||||
import packed_ast, ic, bitabs
|
||||
|
||||
proc replayStateChanges*(module: PSym; g: ModuleGraph) =
|
||||
let list = module.ast
|
||||
assert list != nil
|
||||
@@ -88,84 +86,3 @@ proc replayStateChanges*(module: PSym; g: ModuleGraph) =
|
||||
g.cacheSeqs[destKey].add val
|
||||
else:
|
||||
internalAssert g.config, false
|
||||
|
||||
proc replayBackendProcs*(g: ModuleGraph; module: int) =
|
||||
for it in mitems(g.packed[module].fromDisk.attachedOps):
|
||||
let key = translateId(it[0], g.packed, module, g.config)
|
||||
let op = it[1]
|
||||
let tmp = translateId(it[2], g.packed, module, g.config)
|
||||
let symId = FullId(module: tmp.module, packed: it[2])
|
||||
g.attachedOps[op][key] = LazySym(id: symId, sym: nil)
|
||||
|
||||
for it in mitems(g.packed[module].fromDisk.enumToStringProcs):
|
||||
let key = translateId(it[0], g.packed, module, g.config)
|
||||
let tmp = translateId(it[1], g.packed, module, g.config)
|
||||
let symId = FullId(module: tmp.module, packed: it[1])
|
||||
g.enumToStringProcs[key] = LazySym(id: symId, sym: nil)
|
||||
|
||||
for it in mitems(g.packed[module].fromDisk.methodsPerType):
|
||||
let key = translateId(it[0], g.packed, module, g.config)
|
||||
let tmp = translateId(it[1], g.packed, module, g.config)
|
||||
let symId = FullId(module: tmp.module, packed: it[1])
|
||||
g.methodsPerType.mgetOrPut(key, @[]).add LazySym(id: symId, sym: nil)
|
||||
|
||||
for it in mitems(g.packed[module].fromDisk.dispatchers):
|
||||
let tmp = translateId(it, g.packed, module, g.config)
|
||||
let symId = FullId(module: tmp.module, packed: it)
|
||||
g.dispatchers.add LazySym(id: symId, sym: nil)
|
||||
|
||||
proc replayGenericCacheInformation*(g: ModuleGraph; module: int) =
|
||||
## We remember the generic instantiations a module performed
|
||||
## in order to to avoid the code bloat that generic code tends
|
||||
## to imply. This is cheaper than deduplication of identical
|
||||
## generic instantiations. However, deduplication is more
|
||||
## powerful and general and I hope to implement it soon too
|
||||
## (famous last words).
|
||||
assert g.packed[module].status == loaded
|
||||
for it in g.packed[module].fromDisk.typeInstCache:
|
||||
let key = translateId(it[0], g.packed, module, g.config)
|
||||
g.typeInstCache.mgetOrPut(key, @[]).add LazyType(id: FullId(module: module, packed: it[1]), typ: nil)
|
||||
|
||||
for it in mitems(g.packed[module].fromDisk.procInstCache):
|
||||
let key = translateId(it.key, g.packed, module, g.config)
|
||||
let sym = translateId(it.sym, g.packed, module, g.config)
|
||||
var concreteTypes = newSeq[FullId](it.concreteTypes.len)
|
||||
for i in 0..high(it.concreteTypes):
|
||||
let tmp = translateId(it.concreteTypes[i], g.packed, module, g.config)
|
||||
concreteTypes[i] = FullId(module: tmp.module, packed: it.concreteTypes[i])
|
||||
|
||||
g.procInstCache.mgetOrPut(key, @[]).add LazyInstantiation(
|
||||
module: module, sym: FullId(module: sym.module, packed: it.sym),
|
||||
concreteTypes: concreteTypes, inst: nil)
|
||||
|
||||
for it in mitems(g.packed[module].fromDisk.methodsPerGenericType):
|
||||
let key = translateId(it[0], g.packed, module, g.config)
|
||||
let col = it[1]
|
||||
let tmp = translateId(it[2], g.packed, module, g.config)
|
||||
let symId = FullId(module: tmp.module, packed: it[2])
|
||||
g.methodsPerGenericType.mgetOrPut(key, @[]).add (col, LazySym(id: symId, sym: nil))
|
||||
|
||||
replayBackendProcs(g, module)
|
||||
|
||||
for it in mitems(g.packed[module].fromDisk.methods):
|
||||
let sym = loadSymFromId(g.config, g.cache, g.packed, module,
|
||||
PackedItemId(module: LitId(0), item: it))
|
||||
methodDef(g, g.idgen, sym)
|
||||
|
||||
when false:
|
||||
# not used anymore:
|
||||
for it in mitems(g.packed[module].fromDisk.compilerProcs):
|
||||
let symId = FullId(module: module, packed: PackedItemId(module: LitId(0), item: it[1]))
|
||||
g.lazyCompilerprocs[g.packed[module].fromDisk.sh.strings[it[0]]] = symId
|
||||
|
||||
for it in mitems(g.packed[module].fromDisk.converters):
|
||||
let symId = FullId(module: module, packed: PackedItemId(module: LitId(0), item: it))
|
||||
g.ifaces[module].converters.add LazySym(id: symId, sym: nil)
|
||||
|
||||
for it in mitems(g.packed[module].fromDisk.trmacros):
|
||||
let symId = FullId(module: module, packed: PackedItemId(module: LitId(0), item: it))
|
||||
g.ifaces[module].patterns.add LazySym(id: symId, sym: nil)
|
||||
|
||||
for it in mitems(g.packed[module].fromDisk.pureEnums):
|
||||
let symId = FullId(module: module, packed: PackedItemId(module: LitId(0), item: it))
|
||||
g.ifaces[module].pureEnums.add LazySym(id: symId, sym: nil)
|
||||
|
||||
@@ -1,283 +0,0 @@
|
||||
#
|
||||
#
|
||||
# The Nim Compiler
|
||||
# (c) Copyright 2020 Andreas Rumpf
|
||||
#
|
||||
# See the file "copying.txt", included in this
|
||||
# distribution, for details about the copyright.
|
||||
#
|
||||
|
||||
## Low level binary format used by the compiler to store and load various AST
|
||||
## and related data.
|
||||
##
|
||||
## NB: this is incredibly low level and if you're interested in how the
|
||||
## compiler works and less a storage format, you're probably looking for
|
||||
## the `ic` or `packed_ast` modules to understand the logical format.
|
||||
|
||||
from std/typetraits import supportsCopyMem
|
||||
|
||||
when defined(nimPreviewSlimSystem):
|
||||
import std/[syncio, assertions]
|
||||
|
||||
import std / tables
|
||||
|
||||
## Overview
|
||||
## ========
|
||||
## `RodFile` represents a Rod File (versioned binary format), and the
|
||||
## associated data for common interactions such as IO and error tracking
|
||||
## (`RodFileError`). The file format broken up into sections (`RodSection`)
|
||||
## and preceded by a header (see: `cookie`). The precise layout, section
|
||||
## ordering and data following the section are determined by the user. See
|
||||
## `ic.loadRodFile`.
|
||||
##
|
||||
## A basic but "wrong" example of the lifecycle:
|
||||
## ---------------------------------------------
|
||||
## 1. `create` or `open` - create a new one or open an existing
|
||||
## 2. `storeHeader` - header info
|
||||
## 3. `storePrim` or `storeSeq` - save your stuff
|
||||
## 4. `close` - and we're done
|
||||
##
|
||||
## Now read the bits below to understand what's missing.
|
||||
##
|
||||
## ### Issues with the Example
|
||||
## Missing Sections:
|
||||
## This is a low level API, so headers and sections need to be stored and
|
||||
## loaded by the user, see `storeHeader` & `loadHeader` and `storeSection` &
|
||||
## `loadSection`, respectively.
|
||||
##
|
||||
## No Error Handling:
|
||||
## The API is centered around IO and prone to error, each operation checks or
|
||||
## sets the `RodFile.err` field. A user of this API needs to handle these
|
||||
## appropriately.
|
||||
##
|
||||
## API Notes
|
||||
## =========
|
||||
##
|
||||
## Valid inputs for Rod files
|
||||
## --------------------------
|
||||
## ASTs, hopes, dreams, and anything as long as it and any children it may have
|
||||
## support `copyMem`. This means anything that is not a pointer and that does not contain a pointer. At a glance these are:
|
||||
## * string
|
||||
## * objects & tuples (fields are recursed)
|
||||
## * sequences AKA `seq[T]`
|
||||
##
|
||||
## Note on error handling style
|
||||
## ----------------------------
|
||||
## A flag based approach is used where operations no-op in case of a
|
||||
## preexisting error and set the flag if they encounter one.
|
||||
##
|
||||
## Misc
|
||||
## ----
|
||||
## * 'Prim' is short for 'primitive', as in a non-sequence type
|
||||
|
||||
type
|
||||
RodSection* = enum
|
||||
versionSection
|
||||
configSection
|
||||
stringsSection
|
||||
checkSumsSection
|
||||
depsSection
|
||||
numbersSection
|
||||
exportsSection
|
||||
hiddenSection
|
||||
reexportsSection
|
||||
compilerProcsSection
|
||||
trmacrosSection
|
||||
convertersSection
|
||||
methodsSection
|
||||
pureEnumsSection
|
||||
toReplaySection
|
||||
topLevelSection
|
||||
bodiesSection
|
||||
symsSection
|
||||
typesSection
|
||||
typeInstCacheSection
|
||||
procInstCacheSection
|
||||
attachedOpsSection
|
||||
methodsPerGenericTypeSection
|
||||
enumToStringProcsSection
|
||||
methodsPerTypeSection
|
||||
dispatchersSection
|
||||
typeInfoSection # required by the backend
|
||||
backendFlagsSection
|
||||
aliveSymsSection # beware, this is stored in a `.alivesyms` file.
|
||||
sideChannelSection
|
||||
namespaceSection
|
||||
symnamesSection
|
||||
|
||||
RodFileError* = enum
|
||||
ok, tooBig, cannotOpen, ioFailure, wrongHeader, wrongSection, configMismatch,
|
||||
includeFileChanged
|
||||
|
||||
RodFile* = object
|
||||
f*: File
|
||||
currentSection*: RodSection # for error checking
|
||||
err*: RodFileError # little experiment to see if this works
|
||||
# better than exceptions.
|
||||
|
||||
const
|
||||
RodVersion = 2
|
||||
defaultCookie = [byte(0), byte('R'), byte('O'), byte('D'),
|
||||
byte(sizeof(int)*8), byte(system.cpuEndian), byte(0), byte(RodVersion)]
|
||||
|
||||
proc setError(f: var RodFile; err: RodFileError) {.inline.} =
|
||||
f.err = err
|
||||
#raise newException(IOError, "IO error")
|
||||
|
||||
proc storePrim*(f: var RodFile; s: string) =
|
||||
## Stores a string.
|
||||
## The len is prefixed to allow for later retreival.
|
||||
if f.err != ok: return
|
||||
if s.len >= high(int32):
|
||||
setError f, tooBig
|
||||
return
|
||||
var lenPrefix = int32(s.len)
|
||||
if writeBuffer(f.f, addr lenPrefix, sizeof(lenPrefix)) != sizeof(lenPrefix):
|
||||
setError f, ioFailure
|
||||
else:
|
||||
if s.len != 0:
|
||||
if writeBuffer(f.f, unsafeAddr(s[0]), s.len) != s.len:
|
||||
setError f, ioFailure
|
||||
|
||||
proc storePrim*[T](f: var RodFile; x: T) =
|
||||
## Stores a non-sequence/string `T`.
|
||||
## If `T` doesn't support `copyMem` and is an object or tuple then the fields
|
||||
## are written -- the user from context will need to know which `T` to load.
|
||||
if f.err != ok: return
|
||||
when supportsCopyMem(T):
|
||||
if writeBuffer(f.f, unsafeAddr(x), sizeof(x)) != sizeof(x):
|
||||
setError f, ioFailure
|
||||
elif T is tuple:
|
||||
for y in fields(x):
|
||||
storePrim(f, y)
|
||||
elif T is object:
|
||||
for y in fields(x):
|
||||
when y is seq:
|
||||
storeSeq(f, y)
|
||||
else:
|
||||
storePrim(f, y)
|
||||
else:
|
||||
{.error: "unsupported type for 'storePrim'".}
|
||||
|
||||
proc storeSeq*[T](f: var RodFile; s: seq[T]) =
|
||||
## Stores a sequence of `T`s, with the len as a prefix for later retrieval.
|
||||
if f.err != ok: return
|
||||
if s.len >= high(int32):
|
||||
setError f, tooBig
|
||||
return
|
||||
var lenPrefix = int32(s.len)
|
||||
if writeBuffer(f.f, addr lenPrefix, sizeof(lenPrefix)) != sizeof(lenPrefix):
|
||||
setError f, ioFailure
|
||||
else:
|
||||
for i in 0..<s.len:
|
||||
storePrim(f, s[i])
|
||||
|
||||
proc storeOrderedTable*[K, T](f: var RodFile; s: OrderedTable[K, T]) =
|
||||
if f.err != ok: return
|
||||
if s.len >= high(int32):
|
||||
setError f, tooBig
|
||||
return
|
||||
var lenPrefix = int32(s.len)
|
||||
if writeBuffer(f.f, addr lenPrefix, sizeof(lenPrefix)) != sizeof(lenPrefix):
|
||||
setError f, ioFailure
|
||||
else:
|
||||
for _, v in s:
|
||||
storePrim(f, v)
|
||||
|
||||
proc loadPrim*(f: var RodFile; s: var string) =
|
||||
## Read a string, the length was stored as a prefix
|
||||
if f.err != ok: return
|
||||
var lenPrefix = int32(0)
|
||||
if readBuffer(f.f, addr lenPrefix, sizeof(lenPrefix)) != sizeof(lenPrefix):
|
||||
setError f, ioFailure
|
||||
else:
|
||||
s = newString(lenPrefix)
|
||||
if lenPrefix > 0:
|
||||
if readBuffer(f.f, unsafeAddr(s[0]), s.len) != s.len:
|
||||
setError f, ioFailure
|
||||
|
||||
proc loadPrim*[T](f: var RodFile; x: var T) =
|
||||
## Load a non-sequence/string `T`.
|
||||
if f.err != ok: return
|
||||
when supportsCopyMem(T):
|
||||
if readBuffer(f.f, unsafeAddr(x), sizeof(x)) != sizeof(x):
|
||||
setError f, ioFailure
|
||||
elif T is tuple:
|
||||
for y in fields(x):
|
||||
loadPrim(f, y)
|
||||
elif T is object:
|
||||
for y in fields(x):
|
||||
when y is seq:
|
||||
loadSeq(f, y)
|
||||
else:
|
||||
loadPrim(f, y)
|
||||
else:
|
||||
{.error: "unsupported type for 'loadPrim'".}
|
||||
|
||||
proc loadSeq*[T](f: var RodFile; s: var seq[T]) =
|
||||
## `T` must be compatible with `copyMem`, see `loadPrim`
|
||||
if f.err != ok: return
|
||||
var lenPrefix = int32(0)
|
||||
if readBuffer(f.f, addr lenPrefix, sizeof(lenPrefix)) != sizeof(lenPrefix):
|
||||
setError f, ioFailure
|
||||
else:
|
||||
s = newSeq[T](lenPrefix)
|
||||
for i in 0..<lenPrefix:
|
||||
loadPrim(f, s[i])
|
||||
|
||||
proc loadOrderedTable*[K, T](f: var RodFile; s: var OrderedTable[K, T]) =
|
||||
## `T` must be compatible with `copyMem`, see `loadPrim`
|
||||
if f.err != ok: return
|
||||
var lenPrefix = int32(0)
|
||||
if readBuffer(f.f, addr lenPrefix, sizeof(lenPrefix)) != sizeof(lenPrefix):
|
||||
setError f, ioFailure
|
||||
else:
|
||||
s = initOrderedTable[K, T](lenPrefix)
|
||||
for i in 0..<lenPrefix:
|
||||
var x = default T
|
||||
loadPrim(f, x)
|
||||
s[x.id] = x
|
||||
|
||||
proc storeHeader*(f: var RodFile; cookie = defaultCookie) =
|
||||
## stores the header which is described by `cookie`.
|
||||
if f.err != ok: return
|
||||
if f.f.writeBytes(cookie, 0, cookie.len) != cookie.len:
|
||||
setError f, ioFailure
|
||||
|
||||
proc loadHeader*(f: var RodFile; cookie = defaultCookie) =
|
||||
## Loads the header which is described by `cookie`.
|
||||
if f.err != ok: return
|
||||
var thisCookie: array[cookie.len, byte] = default(array[cookie.len, byte])
|
||||
if f.f.readBytes(thisCookie, 0, thisCookie.len) != thisCookie.len:
|
||||
setError f, ioFailure
|
||||
elif thisCookie != cookie:
|
||||
setError f, wrongHeader
|
||||
|
||||
proc storeSection*(f: var RodFile; s: RodSection) =
|
||||
## update `currentSection` and writes the bytes value of s.
|
||||
if f.err != ok: return
|
||||
assert f.currentSection < s
|
||||
f.currentSection = s
|
||||
storePrim(f, s)
|
||||
|
||||
proc loadSection*(f: var RodFile; expected: RodSection) =
|
||||
## read the bytes value of s, sets and error if the section is incorrect.
|
||||
if f.err != ok: return
|
||||
var s: RodSection = default(RodSection)
|
||||
loadPrim(f, s)
|
||||
if expected != s and f.err == ok:
|
||||
setError f, wrongSection
|
||||
|
||||
proc create*(filename: string): RodFile =
|
||||
## create the file and open it for writing
|
||||
result = default(RodFile)
|
||||
if not open(result.f, filename, fmWrite):
|
||||
setError result, cannotOpen
|
||||
|
||||
proc close*(f: var RodFile) = close(f.f)
|
||||
|
||||
proc open*(filename: string): RodFile =
|
||||
## open the file for reading
|
||||
result = default(RodFile)
|
||||
if not open(result.f, filename, fmRead):
|
||||
setError result, cannotOpen
|
||||
@@ -108,8 +108,8 @@ proc rawImportSymbol(c: PContext, s, origin: PSym; importSet: var IntSet) =
|
||||
else:
|
||||
importPureEnumField(c, e)
|
||||
else:
|
||||
if s.kind == skConverter: addConverter(c, LazySym(sym: s))
|
||||
if hasPattern(s): addPattern(c, LazySym(sym: s))
|
||||
if s.kind == skConverter: addConverter(c, s)
|
||||
if hasPattern(s): addPattern(c, s)
|
||||
if s.owner != origin:
|
||||
c.exportIndirections.incl((origin.id, s.id))
|
||||
|
||||
@@ -190,22 +190,19 @@ proc addImport(c: PContext; im: sink ImportedModule) =
|
||||
template addUnnamedIt(c: PContext, fromMod: PSym; filter: untyped) {.dirty.} =
|
||||
for it in mitems c.graph.ifaces[fromMod.position].converters:
|
||||
if filter:
|
||||
loadPackedSym(c.graph, it)
|
||||
if sfExported in it.sym.flags:
|
||||
if sfExported in it.flags:
|
||||
addConverter(c, it)
|
||||
for it in mitems c.graph.ifaces[fromMod.position].patterns:
|
||||
if filter:
|
||||
loadPackedSym(c.graph, it)
|
||||
if sfExported in it.sym.flags:
|
||||
if sfExported in it.flags:
|
||||
addPattern(c, it)
|
||||
for it in mitems c.graph.ifaces[fromMod.position].pureEnums:
|
||||
if filter:
|
||||
loadPackedSym(c.graph, it)
|
||||
importPureEnumFields(c, it.sym, it.sym.typ)
|
||||
importPureEnumFields(c, it, it.typ)
|
||||
|
||||
proc importAllSymbolsExcept(c: PContext, fromMod: PSym, exceptSet: IntSet) =
|
||||
c.addImport ImportedModule(m: fromMod, mode: importExcept, exceptSet: exceptSet)
|
||||
addUnnamedIt(c, fromMod, it.sym.name.id notin exceptSet)
|
||||
addUnnamedIt(c, fromMod, it.name.id notin exceptSet)
|
||||
|
||||
proc importAllSymbols*(c: PContext, fromMod: PSym) =
|
||||
c.addImport ImportedModule(m: fromMod, mode: importAll)
|
||||
|
||||
@@ -412,8 +412,6 @@ proc addDecl*(c: PContext, sym: PSym) {.inline.} =
|
||||
proc addPrelimDecl*(c: PContext, sym: PSym) =
|
||||
discard c.currentScope.addUniqueSym(sym)
|
||||
|
||||
from ic / ic import addHidden
|
||||
|
||||
proc addInterfaceDeclAux(c: PContext, sym: PSym) =
|
||||
## adds symbol to the module for either private or public access.
|
||||
if sfExported in sym.flags:
|
||||
@@ -422,8 +420,6 @@ proc addInterfaceDeclAux(c: PContext, sym: PSym) =
|
||||
else: internalError(c.config, sym.info, "addInterfaceDeclAux")
|
||||
elif sym.kind in ExportableSymKinds and c.module != nil and isTopLevelInsideDeclaration(c, sym):
|
||||
strTableAdd(semtabAll(c.graph, c.module), sym)
|
||||
if c.config.symbolFiles != disabledSf:
|
||||
addHidden(c.encoder, c.packedRepr, sym)
|
||||
|
||||
proc addInterfaceDeclAt*(c: PContext, scope: PScope, sym: PSym) =
|
||||
## adds a symbol on the scope and the interface if appropriate
|
||||
|
||||
@@ -26,8 +26,6 @@ import
|
||||
when defined(nimPreviewSlimSystem):
|
||||
import std/[syncio, assertions]
|
||||
|
||||
import ic / [cbackend, integrity, navigator, ic]
|
||||
|
||||
import ../dist/checksums/src/checksums/sha1
|
||||
|
||||
import pipelines
|
||||
@@ -99,14 +97,6 @@ proc commandCheck(graph: ModuleGraph) =
|
||||
setPipeLinePass(graph, SemPass)
|
||||
compilePipelineProject(graph)
|
||||
|
||||
if conf.symbolFiles != disabledSf:
|
||||
case conf.ideCmd
|
||||
of ideDef: navDefinition(graph)
|
||||
of ideUse: navUsages(graph)
|
||||
of ideDus: navDefusages(graph)
|
||||
else: discard
|
||||
writeRodFiles(graph)
|
||||
|
||||
when not defined(leanCompiler):
|
||||
proc commandDoc2(graph: ModuleGraph; ext: string) =
|
||||
handleDocOutputOptions graph.config
|
||||
@@ -173,15 +163,7 @@ proc commandCompileToC(graph: ModuleGraph) =
|
||||
compilePipelineProject(graph)
|
||||
if graph.config.errorCounter > 0:
|
||||
return # issue #9933
|
||||
if conf.symbolFiles == disabledSf:
|
||||
cgenWriteModules(graph.backend, conf)
|
||||
else:
|
||||
if isDefined(conf, "nimIcIntegrityChecks"):
|
||||
checkIntegrity(graph)
|
||||
generateCode(graph)
|
||||
# graph.backend can be nil under IC when nothing changed at all:
|
||||
if graph.backend != nil:
|
||||
cgenWriteModules(graph.backend, conf)
|
||||
cgenWriteModules(graph.backend, conf)
|
||||
if conf.cmd != cmdTcc and graph.backend != nil:
|
||||
extccomp.callCCompiler(conf)
|
||||
# for now we do not support writing out a .json file with the build instructions when HCR is on
|
||||
@@ -241,10 +223,6 @@ proc commandScan(cache: IdentCache, config: ConfigRef) =
|
||||
else:
|
||||
rawMessage(config, errGenerated, "cannot open file: " & f.string)
|
||||
|
||||
proc commandView(graph: ModuleGraph) =
|
||||
let f = toAbsolute(mainCommandArg(graph.config), AbsoluteDir getCurrentDir()).addFileExt(RodExt)
|
||||
rodViewer(f, graph.config, graph.cache)
|
||||
|
||||
const
|
||||
PrintRopeCacheStats = false
|
||||
|
||||
@@ -342,8 +320,6 @@ proc mainCommand*(graph: ModuleGraph) =
|
||||
case conf.cmd
|
||||
of cmdBackends:
|
||||
compileToBackend()
|
||||
when BenchIC:
|
||||
echoTimes graph.packed
|
||||
of cmdTcc:
|
||||
when hasTinyCBackend:
|
||||
extccomp.setCC(conf, "tcc", unknownLineInfo)
|
||||
@@ -461,10 +437,6 @@ proc mainCommand*(graph: ModuleGraph) =
|
||||
of cmdParse:
|
||||
wantMainModule(conf)
|
||||
discard parseFile(conf.projectMainIdx, cache, conf)
|
||||
of cmdRod:
|
||||
wantMainModule(conf)
|
||||
commandView(graph)
|
||||
#msgWriteln(conf, "Beware: Indentation tokens depend on the parser's state!")
|
||||
of cmdInteractive: commandInteractive(graph)
|
||||
of cmdNimscript:
|
||||
if conf.projectIsCmd or conf.projectIsStdin: discard
|
||||
|
||||
@@ -14,7 +14,6 @@
|
||||
import std/[intsets, tables, hashes, strtabs, os, strutils, parseutils]
|
||||
import ../dist/checksums/src/checksums/md5
|
||||
import ast, astalgo, options, lineinfos,idents, btrees, ropes, msgs, pathutils, packages, suggestsymdb
|
||||
import ic / [packed_ast, ic]
|
||||
|
||||
when not defined(nimKochBootstrap):
|
||||
import ast2nif
|
||||
@@ -28,16 +27,12 @@ when defined(nimPreviewSlimSystem):
|
||||
type
|
||||
SigHash* = distinct MD5Digest
|
||||
|
||||
LazySym* = object
|
||||
id*: FullId
|
||||
sym*: PSym
|
||||
|
||||
Iface* = object ## data we don't want to store directly in the
|
||||
## ast.PSym type for s.kind == skModule
|
||||
module*: PSym ## module this "Iface" belongs to
|
||||
converters*: seq[LazySym]
|
||||
patterns*: seq[LazySym]
|
||||
pureEnums*: seq[LazySym]
|
||||
converters*: seq[PSym]
|
||||
patterns*: seq[PSym]
|
||||
pureEnums*: seq[PSym]
|
||||
interf: TStrTable
|
||||
interfHidden: TStrTable
|
||||
uniqueName*: Rope
|
||||
@@ -46,20 +41,6 @@ type
|
||||
opNot*, opContains*, opLe*, opLt*, opAnd*, opOr*, opIsNil*, opEq*: PSym
|
||||
opAdd*, opSub*, opMul*, opDiv*, opLen*: PSym
|
||||
|
||||
FullId* = object
|
||||
module*: int
|
||||
packed*: PackedItemId
|
||||
|
||||
LazyType* = object
|
||||
id*: FullId
|
||||
typ*: PType
|
||||
|
||||
LazyInstantiation* = object
|
||||
module*: int
|
||||
sym*: FullId
|
||||
concreteTypes*: seq[FullId]
|
||||
inst*: PInstantiation
|
||||
|
||||
PipelinePass* = enum
|
||||
NonePass
|
||||
SemPass
|
||||
@@ -75,21 +56,18 @@ type
|
||||
|
||||
ModuleGraph* {.acyclic.} = ref object
|
||||
ifaces*: seq[Iface] ## indexed by int32 fileIdx
|
||||
packed*: PackedModuleGraph
|
||||
encoders*: seq[PackedEncoder]
|
||||
|
||||
typeInstCache*: Table[ItemId, seq[LazyType]] # A symbol's ItemId.
|
||||
procInstCache*: Table[ItemId, seq[LazyInstantiation]] # A symbol's ItemId.
|
||||
attachedOps*: array[TTypeAttachedOp, Table[ItemId, LazySym]] # Type ID, destructors, etc.
|
||||
typeInstCache*: Table[ItemId, seq[PType]] # A symbol's ItemId.
|
||||
procInstCache*: Table[ItemId, seq[PInstantiation]] # A symbol's ItemId.
|
||||
attachedOps*: array[TTypeAttachedOp, Table[ItemId, PSym]] # Type ID, destructors, etc.
|
||||
loadedOps: array[TTypeAttachedOp, Table[string, PSym]] # This can later by unified with `attachedOps` once it's stable
|
||||
opsLog*: seq[LogEntry]
|
||||
methodsPerGenericType*: Table[ItemId, seq[(int, LazySym)]] # Type ID, attached methods
|
||||
methodsPerGenericType*: Table[ItemId, seq[(int, PSym)]] # Type ID, attached methods
|
||||
memberProcsPerType*: Table[ItemId, seq[PSym]] # Type ID, attached member procs (only c++, virtual,member and ctor so far).
|
||||
initializersPerType*: Table[ItemId, PNode] # Type ID, AST call to the default ctor (c++ only)
|
||||
enumToStringProcs*: Table[ItemId, LazySym]
|
||||
enumToStringProcs*: Table[ItemId, PSym]
|
||||
emittedTypeInfo*: Table[string, FileIndex]
|
||||
|
||||
startupPackedConfig*: PackedConfig
|
||||
packageSyms*: TStrTable
|
||||
deps*: IntSet # the dependency graph or potentially its transitive closure.
|
||||
importDeps*: Table[FileIndex, seq[FileIndex]] # explicit import module dependencies
|
||||
@@ -115,8 +93,8 @@ type
|
||||
methods*: seq[tuple[methods: seq[PSym], dispatcher: PSym]] # needs serialization!
|
||||
bucketTable*: CountTable[ItemId]
|
||||
objectTree*: Table[ItemId, seq[tuple[depth: int, value: PType]]]
|
||||
methodsPerType*: Table[ItemId, seq[LazySym]]
|
||||
dispatchers*: seq[LazySym]
|
||||
methodsPerType*: Table[ItemId, seq[PSym]]
|
||||
dispatchers*: seq[PSym]
|
||||
|
||||
systemModule*: PSym
|
||||
sysTypes*: array[TTypeKind, PType]
|
||||
@@ -146,6 +124,7 @@ type
|
||||
|
||||
procGlobals*: seq[PNode]
|
||||
nifReplayActions*: Table[int32, seq[PNode]] # module position -> replay actions for NIF
|
||||
cachedMods: IntSet
|
||||
|
||||
TPassContext* = object of RootObj # the pass's context
|
||||
idgen*: IdGenerator
|
||||
@@ -228,85 +207,43 @@ proc strTableAdds*(g: ModuleGraph, m: PSym, s: PSym) =
|
||||
strTableAdd(semtabAll(g, m), s)
|
||||
|
||||
proc isCachedModule(g: ModuleGraph; module: int): bool {.inline.} =
|
||||
result = module < g.packed.len and g.packed[module].status == loaded
|
||||
result = module in g.cachedMods
|
||||
|
||||
proc isCachedModule*(g: ModuleGraph; m: PSym): bool {.inline.} =
|
||||
isCachedModule(g, m.position)
|
||||
|
||||
proc simulateCachedModule(g: ModuleGraph; moduleSym: PSym; m: PackedModule) =
|
||||
when false:
|
||||
echo "simulating ", moduleSym.name.s, " ", moduleSym.position
|
||||
simulateLoadedModule(g.packed, g.config, g.cache, moduleSym, m)
|
||||
|
||||
proc initEncoder*(g: ModuleGraph; module: PSym) =
|
||||
let id = module.position
|
||||
if id >= g.encoders.len:
|
||||
setLen g.encoders, id+1
|
||||
ic.initEncoder(g.encoders[id],
|
||||
g.packed[id].fromDisk, module, g.config, g.startupPackedConfig)
|
||||
|
||||
type
|
||||
ModuleIter* = object
|
||||
fromRod: bool
|
||||
modIndex: int
|
||||
ti: TIdentIter
|
||||
rodIt: RodIter
|
||||
importHidden: bool
|
||||
|
||||
proc initModuleIter*(mi: var ModuleIter; g: ModuleGraph; m: PSym; name: PIdent): PSym =
|
||||
assert m.kind == skModule
|
||||
mi.modIndex = m.position
|
||||
mi.fromRod = isCachedModule(g, mi.modIndex)
|
||||
mi.importHidden = optImportHidden in m.options
|
||||
if mi.fromRod:
|
||||
result = initRodIter(mi.rodIt, g.config, g.cache, g.packed, FileIndex mi.modIndex, name, mi.importHidden)
|
||||
else:
|
||||
result = initIdentIter(mi.ti, g.ifaces[mi.modIndex].interfSelect(mi.importHidden), name)
|
||||
result = initIdentIter(mi.ti, g.ifaces[mi.modIndex].interfSelect(mi.importHidden), name)
|
||||
|
||||
proc nextModuleIter*(mi: var ModuleIter; g: ModuleGraph): PSym =
|
||||
if mi.fromRod:
|
||||
result = nextRodIter(mi.rodIt, g.packed)
|
||||
else:
|
||||
result = nextIdentIter(mi.ti, g.ifaces[mi.modIndex].interfSelect(mi.importHidden))
|
||||
result = nextIdentIter(mi.ti, g.ifaces[mi.modIndex].interfSelect(mi.importHidden))
|
||||
|
||||
iterator allSyms*(g: ModuleGraph; m: PSym): PSym =
|
||||
let importHidden = optImportHidden in m.options
|
||||
if isCachedModule(g, m):
|
||||
var rodIt: RodIter = default(RodIter)
|
||||
var r = initRodIterAllSyms(rodIt, g.config, g.cache, g.packed, FileIndex m.position, importHidden)
|
||||
while r != nil:
|
||||
yield r
|
||||
r = nextRodIter(rodIt, g.packed)
|
||||
else:
|
||||
for s in g.ifaces[m.position].interfSelect(importHidden).data:
|
||||
if s != nil:
|
||||
yield s
|
||||
for s in g.ifaces[m.position].interfSelect(importHidden).data:
|
||||
if s != nil:
|
||||
yield s
|
||||
|
||||
proc someSym*(g: ModuleGraph; m: PSym; name: PIdent): PSym =
|
||||
let importHidden = optImportHidden in m.options
|
||||
if isCachedModule(g, m):
|
||||
result = interfaceSymbol(g.config, g.cache, g.packed, FileIndex(m.position), name, importHidden)
|
||||
else:
|
||||
result = strTableGet(g.ifaces[m.position].interfSelect(importHidden), name)
|
||||
result = strTableGet(g.ifaces[m.position].interfSelect(importHidden), name)
|
||||
|
||||
proc someSymAmb*(g: ModuleGraph; m: PSym; name: PIdent; amb: var bool): PSym =
|
||||
let importHidden = optImportHidden in m.options
|
||||
if isCachedModule(g, m):
|
||||
result = nil
|
||||
for s in interfaceSymbols(g.config, g.cache, g.packed, FileIndex(m.position), name, importHidden):
|
||||
if result == nil:
|
||||
# set result to the first symbol
|
||||
result = s
|
||||
else:
|
||||
# another symbol found
|
||||
amb = true
|
||||
break
|
||||
else:
|
||||
var ti: TIdentIter = default(TIdentIter)
|
||||
result = initIdentIter(ti, g.ifaces[m.position].interfSelect(importHidden), name)
|
||||
if result != nil and nextIdentIter(ti, g.ifaces[m.position].interfSelect(importHidden)) != nil:
|
||||
# another symbol exists with same name
|
||||
amb = true
|
||||
var ti: TIdentIter = default(TIdentIter)
|
||||
result = initIdentIter(ti, g.ifaces[m.position].interfSelect(importHidden), name)
|
||||
if result != nil and nextIdentIter(ti, g.ifaces[m.position].interfSelect(importHidden)) != nil:
|
||||
# another symbol exists with same name
|
||||
amb = true
|
||||
|
||||
proc systemModuleSym*(g: ModuleGraph; name: PIdent): PSym =
|
||||
result = someSym(g, g.systemModule, name)
|
||||
@@ -318,56 +255,24 @@ iterator systemModuleSyms*(g: ModuleGraph; name: PIdent): PSym =
|
||||
yield r
|
||||
r = nextModuleIter(mi, g)
|
||||
|
||||
proc resolveType(g: ModuleGraph; t: var LazyType): PType =
|
||||
result = t.typ
|
||||
if result == nil and isCachedModule(g, t.id.module):
|
||||
result = loadTypeFromId(g.config, g.cache, g.packed, t.id.module, t.id.packed)
|
||||
t.typ = result
|
||||
assert result != nil
|
||||
|
||||
proc resolveSym(g: ModuleGraph; t: var LazySym): PSym =
|
||||
result = t.sym
|
||||
if result == nil and isCachedModule(g, t.id.module):
|
||||
result = loadSymFromId(g.config, g.cache, g.packed, t.id.module, t.id.packed)
|
||||
t.sym = result
|
||||
assert result != nil
|
||||
|
||||
proc resolveInst(g: ModuleGraph; t: var LazyInstantiation): PInstantiation =
|
||||
result = t.inst
|
||||
if result == nil and isCachedModule(g, t.module):
|
||||
result = PInstantiation(sym: loadSymFromId(g.config, g.cache, g.packed, t.sym.module, t.sym.packed))
|
||||
result.concreteTypes = newSeq[PType](t.concreteTypes.len)
|
||||
for i in 0..high(result.concreteTypes):
|
||||
result.concreteTypes[i] = loadTypeFromId(g.config, g.cache, g.packed,
|
||||
t.concreteTypes[i].module, t.concreteTypes[i].packed)
|
||||
t.inst = result
|
||||
assert result != nil
|
||||
|
||||
proc resolveAttachedOp*(g: ModuleGraph; t: var LazySym): PSym =
|
||||
result = t.sym
|
||||
if result == nil:
|
||||
result = loadSymFromId(g.config, g.cache, g.packed, t.id.module, t.id.packed)
|
||||
t.sym = result
|
||||
assert result != nil
|
||||
|
||||
iterator typeInstCacheItems*(g: ModuleGraph; s: PSym): PType =
|
||||
if g.typeInstCache.contains(s.itemId):
|
||||
let x = addr(g.typeInstCache[s.itemId])
|
||||
for t in mitems(x[]):
|
||||
yield resolveType(g, t)
|
||||
yield t
|
||||
|
||||
iterator procInstCacheItems*(g: ModuleGraph; s: PSym): PInstantiation =
|
||||
if g.procInstCache.contains(s.itemId):
|
||||
let x = addr(g.procInstCache[s.itemId])
|
||||
for t in mitems(x[]):
|
||||
yield resolveInst(g, t)
|
||||
yield t
|
||||
|
||||
|
||||
proc getAttachedOp*(g: ModuleGraph; t: PType; op: TTypeAttachedOp): PSym =
|
||||
## returns the requested attached operation for type `t`. Can return nil
|
||||
## if no such operation exists.
|
||||
if g.attachedOps[op].contains(t.itemId):
|
||||
result = resolveAttachedOp(g, g.attachedOps[op][t.itemId])
|
||||
result = g.attachedOps[op][t.itemId]
|
||||
elif g.config.cmd in {cmdNifC, cmdM}:
|
||||
# Fall back to key-based lookup for NIF-loaded hooks
|
||||
let key = typeKey(t, g.config, loadTypeCallback, loadSymCallback)
|
||||
@@ -388,36 +293,32 @@ proc setAttachedOp*(g: ModuleGraph; module: int; t: PType; op: TTypeAttachedOp;
|
||||
let ownerModule = if t.sym != nil: t.sym.itemId.module.int else: module
|
||||
g.opsLog.add LogEntry(kind: HookEntry, op: op, module: ownerModule, key: key, sym: value)
|
||||
g.loadedOps[op][key] = value
|
||||
g.attachedOps[op][t.itemId] = LazySym(sym: value)
|
||||
g.attachedOps[op][t.itemId] = value
|
||||
|
||||
proc setAttachedOp*(g: ModuleGraph; module: int; typeId: ItemId; op: TTypeAttachedOp; value: PSym) =
|
||||
## Overload that takes ItemId directly, useful for registering hooks from NIF index.
|
||||
g.attachedOps[op][typeId] = LazySym(sym: value)
|
||||
g.attachedOps[op][typeId] = value
|
||||
|
||||
proc setAttachedOpPartial*(g: ModuleGraph; module: int; t: PType; op: TTypeAttachedOp; value: PSym) =
|
||||
## we also need to record this to the packed module.
|
||||
g.attachedOps[op][t.itemId] = LazySym(sym: value)
|
||||
g.attachedOps[op][t.itemId] = value
|
||||
|
||||
proc completePartialOp*(g: ModuleGraph; module: int; t: PType; op: TTypeAttachedOp; value: PSym) =
|
||||
if g.config.symbolFiles != disabledSf:
|
||||
assert module < g.encoders.len
|
||||
assert isActive(g.encoders[module])
|
||||
toPackedGeneratedProcDef(value, g.encoders[module], g.packed[module].fromDisk)
|
||||
#storeAttachedProcDef(t, op, value, g.encoders[module], g.packed[module].fromDisk)
|
||||
proc completePartialOp*(g: ModuleGraph; module: int; t: PType; op: TTypeAttachedOp; value: PSym) {.inline.} =
|
||||
discard
|
||||
|
||||
iterator getDispatchers*(g: ModuleGraph): PSym =
|
||||
for i in g.dispatchers.mitems:
|
||||
yield resolveSym(g, i)
|
||||
yield i
|
||||
|
||||
proc addDispatchers*(g: ModuleGraph, value: PSym) =
|
||||
# TODO: add it for packed modules
|
||||
g.dispatchers.add LazySym(sym: value)
|
||||
g.dispatchers.add value
|
||||
|
||||
iterator resolveLazySymSeq(g: ModuleGraph, list: var seq[LazySym]): PSym =
|
||||
iterator resolveLazySymSeq(g: ModuleGraph, list: var seq[PSym]): PSym =
|
||||
for it in list.mitems:
|
||||
yield resolveSym(g, it)
|
||||
yield it
|
||||
|
||||
proc setMethodsPerType*(g: ModuleGraph; id: ItemId, methods: seq[LazySym]) =
|
||||
proc setMethodsPerType*(g: ModuleGraph; id: ItemId, methods: seq[PSym]) =
|
||||
# TODO: add it for packed modules
|
||||
g.methodsPerType[id] = methods
|
||||
|
||||
@@ -428,14 +329,14 @@ proc addNifReplayAction*(g: ModuleGraph; module: int32; n: PNode) =
|
||||
iterator getMethodsPerType*(g: ModuleGraph; t: PType): PSym =
|
||||
if g.methodsPerType.contains(t.itemId):
|
||||
for it in mitems g.methodsPerType[t.itemId]:
|
||||
yield resolveSym(g, it)
|
||||
yield it
|
||||
|
||||
proc getToStringProc*(g: ModuleGraph; t: PType): PSym =
|
||||
result = resolveSym(g, g.enumToStringProcs[t.itemId])
|
||||
result = g.enumToStringProcs[t.itemId]
|
||||
assert result != nil
|
||||
|
||||
proc setToStringProc*(g: ModuleGraph; t: PType; value: PSym) =
|
||||
g.enumToStringProcs[t.itemId] = LazySym(sym: value)
|
||||
g.enumToStringProcs[t.itemId] = value
|
||||
let key = typeKey(t, g.config, loadTypeCallback, loadSymCallback)
|
||||
let ownerModule = if t.sym != nil: t.sym.itemId.module.int else: value.itemId.module.int
|
||||
g.opsLog.add LogEntry(kind: EnumToStrEntry, module: ownerModule, key: key, sym: value)
|
||||
@@ -443,10 +344,10 @@ proc setToStringProc*(g: ModuleGraph; t: PType; value: PSym) =
|
||||
iterator methodsForGeneric*(g: ModuleGraph; t: PType): (int, PSym) =
|
||||
if g.methodsPerGenericType.contains(t.itemId):
|
||||
for it in mitems g.methodsPerGenericType[t.itemId]:
|
||||
yield (it[0], resolveSym(g, it[1]))
|
||||
yield (it[0], it[1])
|
||||
|
||||
proc addMethodToGeneric*(g: ModuleGraph; module: int; t: PType; col: int; m: PSym) =
|
||||
g.methodsPerGenericType.mgetOrPut(t.itemId, @[]).add (col, LazySym(sym: m))
|
||||
g.methodsPerGenericType.mgetOrPut(t.itemId, @[]).add (col, m)
|
||||
let key = typeKey(t, g.config, loadTypeCallback, loadSymCallback)
|
||||
let ownerModule = if t.sym != nil: t.sym.itemId.module.int else: module
|
||||
g.opsLog.add LogEntry(kind: MethodEntry, module: ownerModule, key: key, sym: m)
|
||||
@@ -496,20 +397,6 @@ proc loadCompilerProc*(g: ModuleGraph; name: string): PSym =
|
||||
return result
|
||||
return nil
|
||||
|
||||
# slow, linear search, but the results are cached:
|
||||
for module in 0..<len(g.packed):
|
||||
#if isCachedModule(g, module):
|
||||
let x = searchForCompilerproc(g.packed[module], name)
|
||||
if x >= 0:
|
||||
result = loadSymFromId(g.config, g.cache, g.packed, module, toPackedItemId(x))
|
||||
if result != nil:
|
||||
strTableAdd(g.compilerprocs, result)
|
||||
return result
|
||||
|
||||
proc loadPackedSym*(g: ModuleGraph; s: var LazySym) =
|
||||
if s.sym == nil:
|
||||
s.sym = loadSymFromId(g.config, g.cache, g.packed, s.id.module, s.id.packed)
|
||||
|
||||
proc `$`*(u: SigHash): string =
|
||||
toBase64a(cast[cstring](unsafeAddr u), sizeof(u))
|
||||
|
||||
@@ -596,16 +483,13 @@ proc registerModule*(g: ModuleGraph; m: PSym) =
|
||||
if m.position >= g.ifaces.len:
|
||||
setLen(g.ifaces, m.position + 1)
|
||||
|
||||
if m.position >= g.packed.len:
|
||||
setLen(g.packed.pm, m.position + 1)
|
||||
|
||||
if g.ifaces[m.position].module == nil:
|
||||
g.ifaces[m.position] = Iface(module: m, converters: @[], patterns: @[],
|
||||
uniqueName: rope(uniqueModuleName(g.config, m)))
|
||||
initStrTables(g, m)
|
||||
|
||||
proc registerModuleById*(g: ModuleGraph; m: FileIndex) =
|
||||
registerModule(g, g.packed[int m].module)
|
||||
registerModule(g, g.ifaces[int m].module)
|
||||
|
||||
proc initOperators*(g: ModuleGraph): Operators =
|
||||
# These are safe for IC.
|
||||
@@ -674,49 +558,13 @@ proc resetAllModules*(g: ModuleGraph) =
|
||||
initModuleGraphFields(g)
|
||||
|
||||
proc getModule*(g: ModuleGraph; fileIdx: FileIndex): PSym =
|
||||
result = nil
|
||||
if fileIdx.int32 >= 0:
|
||||
if isCachedModule(g, fileIdx.int32):
|
||||
result = g.packed[fileIdx.int32].module
|
||||
elif fileIdx.int32 < g.ifaces.len:
|
||||
result = g.ifaces[fileIdx.int32].module
|
||||
if fileIdx.int32 >= 0 and fileIdx.int32 < g.ifaces.len:
|
||||
result = g.ifaces[fileIdx.int32].module
|
||||
else:
|
||||
result = nil
|
||||
|
||||
proc moduleOpenForCodegen*(g: ModuleGraph; m: FileIndex): bool {.inline.} =
|
||||
if g.config.symbolFiles == disabledSf:
|
||||
result = true
|
||||
else:
|
||||
result = g.packed[m.int32].status notin {undefined, stored, loaded}
|
||||
|
||||
proc rememberEmittedTypeInfo*(g: ModuleGraph; m: FileIndex; ti: string) =
|
||||
#assert(not isCachedModule(g, m.int32))
|
||||
if g.config.symbolFiles != disabledSf:
|
||||
#assert g.encoders[m.int32].isActive
|
||||
assert g.packed[m.int32].status != stored
|
||||
g.packed[m.int32].fromDisk.emittedTypeInfo.add ti
|
||||
#echo "added typeinfo ", m.int32, " ", ti, " suspicious ", not g.encoders[m.int32].isActive
|
||||
|
||||
proc rememberFlag*(g: ModuleGraph; m: PSym; flag: ModuleBackendFlag) =
|
||||
if g.config.symbolFiles != disabledSf:
|
||||
#assert g.encoders[m.int32].isActive
|
||||
assert g.packed[m.position].status != stored
|
||||
g.packed[m.position].fromDisk.backendFlags.incl flag
|
||||
|
||||
proc closeRodFile*(g: ModuleGraph; m: PSym) =
|
||||
if g.config.symbolFiles in {readOnlySf, v2Sf}:
|
||||
# For stress testing we seek to reload the symbols from memory. This
|
||||
# way much of the logic is tested but the test is reproducible as it does
|
||||
# not depend on the hard disk contents!
|
||||
let mint = m.position
|
||||
saveRodFile(toRodFile(g.config, AbsoluteFile toFullPath(g.config, FileIndex(mint))),
|
||||
g.encoders[mint], g.packed[mint].fromDisk)
|
||||
g.packed[mint].status = stored
|
||||
|
||||
elif g.config.symbolFiles == stressTest:
|
||||
# debug code, but maybe a good idea for production? Could reduce the compiler's
|
||||
# memory consumption considerably at the cost of more loads from disk.
|
||||
let mint = m.position
|
||||
simulateCachedModule(g, m, g.packed[mint].fromDisk)
|
||||
g.packed[mint].status = loaded
|
||||
result = true
|
||||
|
||||
proc dependsOn(a, b: int): int {.inline.} = (a shl 15) + b
|
||||
|
||||
@@ -800,19 +648,8 @@ proc needsCompilation*(g: ModuleGraph, fileIdx: FileIndex): bool =
|
||||
|
||||
proc getBody*(g: ModuleGraph; s: PSym): PNode {.inline.} =
|
||||
result = s.ast[bodyPos]
|
||||
if result == nil and g.config.symbolFiles in {readOnlySf, v2Sf, stressTest}:
|
||||
result = loadProcBody(g.config, g.cache, g.packed, s)
|
||||
s.ast[bodyPos] = result
|
||||
assert result != nil
|
||||
|
||||
proc moduleFromRodFile*(g: ModuleGraph; fileIdx: FileIndex;
|
||||
cachedModules: var seq[FileIndex]): PSym =
|
||||
## Returns 'nil' if the module needs to be recompiled.
|
||||
if g.config.symbolFiles in {readOnlySf, v2Sf, stressTest}:
|
||||
result = moduleFromRodFile(g.packed, g.config, g.cache, fileIdx, cachedModules)
|
||||
else:
|
||||
result = nil
|
||||
|
||||
when not defined(nimKochBootstrap):
|
||||
proc moduleFromNifFile*(g: ModuleGraph; fileIdx: FileIndex;
|
||||
flags: set[LoadFlag] = {}): PrecompiledModule =
|
||||
@@ -846,7 +683,7 @@ when not defined(nimKochBootstrap):
|
||||
of HookEntry:
|
||||
g.loadedOps[x.op][x.key] = x.sym
|
||||
of ConverterEntry:
|
||||
g.ifaces[fileIdx.int].converters.add LazySym(sym: x.sym)
|
||||
g.ifaces[fileIdx.int].converters.add x.sym
|
||||
of MethodEntry:
|
||||
discard "todo"
|
||||
of EnumToStrEntry:
|
||||
@@ -857,9 +694,10 @@ when not defined(nimKochBootstrap):
|
||||
discard "todo"
|
||||
|
||||
proc configComplete*(g: ModuleGraph) =
|
||||
rememberStartupConfig(g.startupPackedConfig, g.config)
|
||||
#rememberStartupConfig(g.startupPackedConfig, g.config)
|
||||
discard
|
||||
|
||||
proc onProcessing*(graph: ModuleGraph, fileIdx: FileIndex, moduleStatus: string, fromModule: PSym, ) =
|
||||
proc onProcessing*(graph: ModuleGraph, fileIdx: FileIndex, moduleStatus: string, fromModule: PSym) =
|
||||
let conf = graph.config
|
||||
let isNimscript = conf.isDefined("nimscript")
|
||||
if (not isNimscript) or hintProcessing in conf.cmdlineNotes:
|
||||
|
||||
@@ -156,7 +156,6 @@ type
|
||||
cmdCheck # semantic checking for whole project
|
||||
cmdM # only compile a single
|
||||
cmdParse # parse a single file (for debugging)
|
||||
cmdRod # .rod to some text representation (for debugging)
|
||||
cmdIdeTools # ide tools (e.g. nimsuggest)
|
||||
cmdNimscript # evaluate nimscript
|
||||
cmdDoc0
|
||||
|
||||
@@ -148,11 +148,6 @@ proc processModule*(graph: ModuleGraph; module: PSym; idgen: IdGenerator;
|
||||
closeParser(p)
|
||||
if s.kind != llsStdIn: break
|
||||
closePasses(graph, a)
|
||||
if graph.config.backend notin {backendC, backendCpp, backendObjc}:
|
||||
# We only write rod files here if no C-like backend is active.
|
||||
# The C-like backends have been patched to support the IC mechanism.
|
||||
# They are responsible for closing the rod files. See `cbackend.nim`.
|
||||
closeRodFile(graph, module)
|
||||
result = true
|
||||
|
||||
proc compileModule*(graph: ModuleGraph; fileIdx: FileIndex; flags: TSymFlags, fromModule: PSym = nil): PSym =
|
||||
@@ -168,22 +163,10 @@ proc compileModule*(graph: ModuleGraph; fileIdx: FileIndex; flags: TSymFlags, fr
|
||||
elif graph.config.projectIsCmd: s = llStreamOpen(graph.config.cmdInput)
|
||||
discard processModule(graph, result, idGeneratorFromModule(result), s)
|
||||
if result == nil:
|
||||
var cachedModules: seq[FileIndex] = @[]
|
||||
result = moduleFromRodFile(graph, fileIdx, cachedModules)
|
||||
let filename = AbsoluteFile toFullPath(graph.config, fileIdx)
|
||||
if result == nil:
|
||||
result = newModule(graph, fileIdx)
|
||||
result.incl flags
|
||||
registerModule(graph, result)
|
||||
processModuleAux("import")
|
||||
else:
|
||||
if sfSystemModule in flags:
|
||||
graph.systemModule = result
|
||||
partialInitModule(result, graph, fileIdx, filename)
|
||||
for m in cachedModules:
|
||||
registerModuleById(graph, m)
|
||||
replayStateChanges(graph.packed.pm[m.int].module, graph)
|
||||
replayGenericCacheInformation(graph, m.int)
|
||||
result = newModule(graph, fileIdx)
|
||||
result.incl flags
|
||||
registerModule(graph, result)
|
||||
processModuleAux("import")
|
||||
elif graph.isDirty(result):
|
||||
result.excl sfDirty
|
||||
# reset module fields:
|
||||
|
||||
@@ -261,12 +261,6 @@ proc processPipelineModule*(graph: ModuleGraph; module: PSym; idgen: IdGenerator
|
||||
|
||||
writeNifModule(graph.config, module.position.int32, topLevelStmts, graph.opsLog, replayActions)
|
||||
|
||||
if graph.config.backend notin {backendC, backendCpp, backendObjc} and graph.config.cmd != cmdM:
|
||||
# We only write rod files here if no C-like backend is active.
|
||||
# The C-like backends have been patched to support the IC mechanism.
|
||||
# They are responsible for closing the rod files. See `cbackend.nim`.
|
||||
# cmdM uses NIF files only, not ROD files.
|
||||
closeRodFile(graph, module)
|
||||
result = true
|
||||
|
||||
proc compilePipelineModule*(graph: ModuleGraph; fileIdx: FileIndex; flags: TSymFlags; fromModule: PSym = nil): PSym =
|
||||
@@ -282,7 +276,6 @@ proc compilePipelineModule*(graph: ModuleGraph; fileIdx: FileIndex; flags: TSymF
|
||||
elif graph.config.projectIsCmd: s = llStreamOpen(graph.config.cmdInput)
|
||||
discard processPipelineModule(graph, result, idGeneratorFromModule(result), s)
|
||||
if result == nil:
|
||||
var cachedModules: seq[FileIndex] = @[]
|
||||
when not defined(nimKochBootstrap):
|
||||
# For cmdM: load imports from NIF files (but compile the main module from source)
|
||||
# Skip when withinSystem is true (compiling system.nim itself)
|
||||
@@ -307,9 +300,6 @@ proc compilePipelineModule*(graph: ModuleGraph; fileIdx: FileIndex; flags: TSymF
|
||||
if result.ast != nil:
|
||||
replayStateChanges(result, graph)
|
||||
return result # Return early, don't process from source
|
||||
if result == nil and graph.config.cmd != cmdM:
|
||||
# Fall back to ROD file loading (not used for cmdM which uses NIF only)
|
||||
result = moduleFromRodFile(graph, fileIdx, cachedModules)
|
||||
let path = toFullPath(graph.config, fileIdx)
|
||||
let filename = AbsoluteFile path
|
||||
# it could be a stdinfile/cmdfile
|
||||
@@ -328,16 +318,6 @@ proc compilePipelineModule*(graph: ModuleGraph; fileIdx: FileIndex; flags: TSymF
|
||||
registerModule(graph, result)
|
||||
processModuleAux("import")
|
||||
partialInitModule(result, graph, fileIdx, filename)
|
||||
for m in cachedModules:
|
||||
registerModuleById(graph, m)
|
||||
if graph.config.cmd == cmdM:
|
||||
# cmdM uses NIF files - replay from module AST loaded by loadNifModule
|
||||
let module = graph.getModule(m)
|
||||
if module != nil and module.ast != nil:
|
||||
replayStateChanges(module, graph)
|
||||
else:
|
||||
replayStateChanges(graph.packed.pm[m.int].module, graph)
|
||||
replayGenericCacheInformation(graph, m.int)
|
||||
elif graph.isDirty(result):
|
||||
result.excl sfDirty
|
||||
# reset module fields:
|
||||
@@ -397,7 +377,6 @@ proc compilePipelineProject*(graph: ModuleGraph; projectFileIdx = InvalidFileIdx
|
||||
connectPipelineCallbacks(graph)
|
||||
graph.config.m.systemFileIdx = fileInfoIdx(graph.config,
|
||||
graph.config.libpath / RelativeFile"system.nim")
|
||||
var cachedModules: seq[FileIndex] = @[]
|
||||
when not defined(nimKochBootstrap):
|
||||
let precomp = moduleFromNifFile(graph, graph.config.m.systemFileIdx)
|
||||
graph.systemModule = precomp.module
|
||||
|
||||
@@ -21,8 +21,6 @@ import std/[os, math, strutils]
|
||||
when defined(nimPreviewSlimSystem):
|
||||
import std/assertions
|
||||
|
||||
from ic / ic import addCompilerProc
|
||||
|
||||
const
|
||||
FirstCallConv* = wNimcall
|
||||
LastCallConv* = wNoconv
|
||||
@@ -767,8 +765,6 @@ proc markCompilerProc(c: PContext; s: PSym) =
|
||||
incl(s, sfCompilerProc)
|
||||
incl(s.flagsImpl, sfUsed)
|
||||
registerCompilerProc(c.graph, s)
|
||||
if c.config.symbolFiles != disabledSf:
|
||||
addCompilerProc(c.encoder, c.packedRepr, s)
|
||||
|
||||
proc deprecatedStmt(c: PContext; outerPragma: PNode) =
|
||||
let pragma = outerPragma[1]
|
||||
|
||||
@@ -889,8 +889,6 @@ proc semWithPContext*(c: PContext, n: PNode): PNode =
|
||||
else:
|
||||
result = newNodeI(nkEmpty, n.info)
|
||||
#if c.config.cmd == cmdIdeTools: findSuggest(c, n)
|
||||
storeRodNode(c, result)
|
||||
|
||||
|
||||
proc reportUnusedModules(c: PContext) =
|
||||
if c.config.cmd == cmdM: return
|
||||
|
||||
@@ -19,8 +19,6 @@ import
|
||||
magicsys, vmdef, modulegraphs, lineinfos, pathutils, layeredtable,
|
||||
types, lowerings, trees, parampatterns, astalgo
|
||||
|
||||
import ic / ic
|
||||
|
||||
type
|
||||
TOptionEntry* = object # entries to put on a stack for pragma parsing
|
||||
options*: TOptions
|
||||
@@ -336,28 +334,14 @@ proc newContext*(graph: ModuleGraph; module: PSym): PContext =
|
||||
signatures: initStrTable(),
|
||||
features: graph.config.features
|
||||
)
|
||||
if graph.config.symbolFiles != disabledSf:
|
||||
let id = module.position
|
||||
if graph.config.cmd != cmdM:
|
||||
assert graph.packed[id].status in {undefined, outdated}
|
||||
graph.packed[id].status = storing
|
||||
graph.packed[id].module = module
|
||||
initEncoder graph, module
|
||||
|
||||
template packedRepr*(c): untyped = c.graph.packed[c.module.position].fromDisk
|
||||
template encoder*(c): untyped = c.graph.encoders[c.module.position]
|
||||
|
||||
proc addIncludeFileDep*(c: PContext; f: FileIndex) =
|
||||
if c.config.symbolFiles != disabledSf:
|
||||
addIncludeFileDep(c.encoder, c.packedRepr, f)
|
||||
discard
|
||||
|
||||
proc addImportFileDep*(c: PContext; f: FileIndex) =
|
||||
if c.config.symbolFiles != disabledSf:
|
||||
addImportFileDep(c.encoder, c.packedRepr, f)
|
||||
discard
|
||||
|
||||
proc addPragmaComputation*(c: PContext; n: PNode) =
|
||||
if c.config.symbolFiles != disabledSf:
|
||||
addPragmaComputation(c.encoder, c.packedRepr, n)
|
||||
# Also store for NIF-based IC (cmdM mode or optCompress)
|
||||
if optCompress in c.config.globalOptions or c.config.cmd == cmdM:
|
||||
addNifReplayAction(c.graph, c.module.position.int32, n)
|
||||
@@ -368,38 +352,28 @@ proc inclSym(sq: var seq[PSym], s: PSym): bool =
|
||||
sq.add s
|
||||
result = true
|
||||
|
||||
proc addConverter*(c: PContext, conv: LazySym) =
|
||||
assert conv.sym != nil
|
||||
if inclSym(c.converters, conv.sym):
|
||||
proc addConverter*(c: PContext, conv: PSym) =
|
||||
assert conv != nil
|
||||
if inclSym(c.converters, conv):
|
||||
add(c.graph.ifaces[c.module.position].converters, conv)
|
||||
|
||||
proc addConverterDef*(c: PContext, conv: LazySym) =
|
||||
proc addConverterDef*(c: PContext, conv: PSym) =
|
||||
addConverter(c, conv)
|
||||
if c.config.symbolFiles != disabledSf:
|
||||
addConverter(c.encoder, c.packedRepr, conv.sym)
|
||||
|
||||
proc addPureEnum*(c: PContext, e: LazySym) =
|
||||
assert e.sym != nil
|
||||
proc addPureEnum*(c: PContext, e: PSym) =
|
||||
assert e != nil
|
||||
add(c.graph.ifaces[c.module.position].pureEnums, e)
|
||||
if c.config.symbolFiles != disabledSf:
|
||||
addPureEnum(c.encoder, c.packedRepr, e.sym)
|
||||
|
||||
proc addPattern*(c: PContext, p: LazySym) =
|
||||
assert p.sym != nil
|
||||
if inclSym(c.patterns, p.sym):
|
||||
proc addPattern*(c: PContext, p: PSym) =
|
||||
assert p != nil
|
||||
if inclSym(c.patterns, p):
|
||||
add(c.graph.ifaces[c.module.position].patterns, p)
|
||||
if c.config.symbolFiles != disabledSf:
|
||||
addTrmacro(c.encoder, c.packedRepr, p.sym)
|
||||
|
||||
proc exportSym*(c: PContext; s: PSym) =
|
||||
strTableAdds(c.graph, c.module, s)
|
||||
if c.config.symbolFiles != disabledSf:
|
||||
addExported(c.encoder, c.packedRepr, s)
|
||||
|
||||
proc reexportSym*(c: PContext; s: PSym) =
|
||||
strTableAdds(c.graph, c.module, s)
|
||||
if c.config.symbolFiles != disabledSf:
|
||||
addReexport(c.encoder, c.packedRepr, s)
|
||||
|
||||
proc newLib*(kind: TLibKind): PLib =
|
||||
result = PLib(kind: kind) #result.syms = initObjectSet()
|
||||
@@ -614,19 +588,11 @@ template addExport*(c: PContext; s: PSym) =
|
||||
## convenience to export a symbol from the current module
|
||||
addExport(c.graph, c.module, s)
|
||||
|
||||
proc storeRodNode*(c: PContext, n: PNode) =
|
||||
if c.config.symbolFiles != disabledSf:
|
||||
toPackedNodeTopLevel(n, c.encoder, c.packedRepr)
|
||||
|
||||
proc addToGenericProcCache*(c: PContext; s: PSym; inst: PInstantiation) =
|
||||
c.graph.procInstCache.mgetOrPut(s.itemId, @[]).add LazyInstantiation(module: c.module.position, inst: inst)
|
||||
if c.config.symbolFiles != disabledSf:
|
||||
storeInstantiation(c.encoder, c.packedRepr, s, inst)
|
||||
c.graph.procInstCache.mgetOrPut(s.itemId, @[]).add inst
|
||||
|
||||
proc addToGenericCache*(c: PContext; s: PSym; inst: PType) =
|
||||
c.graph.typeInstCache.mgetOrPut(s.itemId, @[]).add LazyType(typ: inst)
|
||||
if c.config.symbolFiles != disabledSf:
|
||||
storeTypeInst(c.encoder, c.packedRepr, s, inst)
|
||||
c.graph.typeInstCache.mgetOrPut(s.itemId, @[]).add inst
|
||||
|
||||
proc sealRodFile*(c: PContext) =
|
||||
if c.config.symbolFiles != disabledSf:
|
||||
@@ -642,9 +608,8 @@ proc rememberExpansion*(c: PContext; info: TLineInfo; expandedSym: PSym) =
|
||||
## in the sem'checked AST. This is very bad for IDE-like tooling
|
||||
## ("find all usages of this template" would not work). We need special
|
||||
## logic to remember macro/template expansions. This is done here and
|
||||
## delegated to the "rod" file mechanism.
|
||||
if c.config.symbolFiles != disabledSf:
|
||||
storeExpansion(c.encoder, c.packedRepr, info, expandedSym)
|
||||
## delegated to the "NIF" file mechanism.
|
||||
discard "XXX To implement"
|
||||
|
||||
const
|
||||
errVarForOutParamNeededX = "for a 'var' type a variable needs to be passed; but '$1' is immutable"
|
||||
|
||||
@@ -3013,9 +3013,9 @@ proc semExportExcept(c: PContext, n: PNode): PNode =
|
||||
|
||||
proc semExport(c: PContext, n: PNode): PNode =
|
||||
proc specialSyms(c: PContext; s: PSym) {.inline.} =
|
||||
if s.kind == skConverter: addConverter(c, LazySym(sym: s))
|
||||
if s.kind == skConverter: addConverter(c, s)
|
||||
elif s.kind == skType and s.typ != nil and s.typ.kind == tyEnum and sfPure in s.flags:
|
||||
addPureEnum(c, LazySym(sym: s))
|
||||
addPureEnum(c, s)
|
||||
|
||||
result = newNodeI(nkExportStmt, n.info)
|
||||
for i in 0..<n.len:
|
||||
|
||||
@@ -2783,7 +2783,7 @@ proc semConverterDef(c: PContext, n: PNode): PNode =
|
||||
var t = s.typ
|
||||
if t.returnType == nil: localError(c.config, n.info, errXNeedsReturnType % "converter")
|
||||
if t.len != 2: localError(c.config, n.info, "a converter takes exactly one argument")
|
||||
addConverterDef(c, LazySym(sym: s))
|
||||
addConverterDef(c, s)
|
||||
|
||||
proc semMacroDef(c: PContext, n: PNode): PNode =
|
||||
result = semProcAux(c, n, skMacro, macroPragmas)
|
||||
|
||||
@@ -925,4 +925,4 @@ proc semPattern(c: PContext, n: PNode; s: PSym): PNode =
|
||||
elif result.len == 0:
|
||||
localError(c.config, n.info, "a pattern cannot be empty")
|
||||
closeScope(c)
|
||||
addPattern(c, LazySym(sym: s))
|
||||
addPattern(c, s)
|
||||
|
||||
@@ -210,7 +210,7 @@ proc semEnum(c: PContext, n: PNode, prev: PType): PType =
|
||||
)
|
||||
|
||||
if isPure and sfExported in result.sym.flags:
|
||||
addPureEnum(c, LazySym(sym: result.sym))
|
||||
addPureEnum(c, result.sym)
|
||||
if tfNotNil in e.typ.flags and not hasNull:
|
||||
result.incl tfRequiresInit
|
||||
setToStringProc(c.graph, result, genEnumToStrProc(result, n.info, c.graph, c.idgen))
|
||||
|
||||
@@ -82,7 +82,7 @@ proc containGenerics(base: PType, s: seq[tuple[depth: int, value: PType]]): bool
|
||||
break
|
||||
|
||||
proc collectVTableDispatchers*(g: ModuleGraph) =
|
||||
var itemTable = initTable[ItemId, seq[LazySym]]()
|
||||
var itemTable = initTable[ItemId, seq[PSym]]()
|
||||
var rootTypeSeq = newSeq[PType]()
|
||||
var rootItemIdCount = initCountTable[ItemId]()
|
||||
for bucket in 0..<g.methods.len:
|
||||
@@ -95,7 +95,7 @@ proc collectVTableDispatchers*(g: ModuleGraph) =
|
||||
let methodIndexLen = g.bucketTable[baseType.itemId]
|
||||
if baseType.itemId notin itemTable: # once is enough
|
||||
rootTypeSeq.add baseType
|
||||
itemTable[baseType.itemId] = newSeq[LazySym](methodIndexLen)
|
||||
itemTable[baseType.itemId] = newSeq[PSym](methodIndexLen)
|
||||
|
||||
sort(g.objectTree[baseType.itemId], cmp = proc (x, y: tuple[depth: int, value: PType]): int =
|
||||
if x.depth >= y.depth: 1
|
||||
@@ -104,7 +104,7 @@ proc collectVTableDispatchers*(g: ModuleGraph) =
|
||||
|
||||
for item in g.objectTree[baseType.itemId]:
|
||||
if item.value.itemId notin itemTable:
|
||||
itemTable[item.value.itemId] = newSeq[LazySym](methodIndexLen)
|
||||
itemTable[item.value.itemId] = newSeq[PSym](methodIndexLen)
|
||||
|
||||
var mIndex = 0 # here is the correpsonding index
|
||||
if baseType.itemId notin rootItemIdCount:
|
||||
@@ -114,13 +114,13 @@ proc collectVTableDispatchers*(g: ModuleGraph) =
|
||||
rootItemIdCount.inc(baseType.itemId)
|
||||
for idx in 0..<g.methods[bucket].methods.len:
|
||||
let obj = g.methods[bucket].methods[idx].typ.firstParamType.skipTypes(skipPtrs)
|
||||
itemTable[obj.itemId][mIndex] = LazySym(sym: g.methods[bucket].methods[idx])
|
||||
itemTable[obj.itemId][mIndex] = g.methods[bucket].methods[idx]
|
||||
g.addDispatchers genVTableDispatcher(g, g.methods[bucket].methods, mIndex)
|
||||
else: # if the base object doesn't have this method
|
||||
g.addDispatchers genIfDispatcher(g, g.methods[bucket].methods, relevantCols, g.idgen)
|
||||
|
||||
proc sortVTableDispatchers*(g: ModuleGraph) =
|
||||
var itemTable = initTable[ItemId, seq[LazySym]]()
|
||||
var itemTable = initTable[ItemId, seq[PSym]]()
|
||||
var rootTypeSeq = newSeq[ItemId]()
|
||||
var rootItemIdCount = initCountTable[ItemId]()
|
||||
for bucket in 0..<g.methods.len:
|
||||
@@ -133,7 +133,7 @@ proc sortVTableDispatchers*(g: ModuleGraph) =
|
||||
let methodIndexLen = g.bucketTable[baseType.itemId]
|
||||
if baseType.itemId notin itemTable: # once is enough
|
||||
rootTypeSeq.add baseType.itemId
|
||||
itemTable[baseType.itemId] = newSeq[LazySym](methodIndexLen)
|
||||
itemTable[baseType.itemId] = newSeq[PSym](methodIndexLen)
|
||||
|
||||
sort(g.objectTree[baseType.itemId], cmp = proc (x, y: tuple[depth: int, value: PType]): int =
|
||||
if x.depth >= y.depth: 1
|
||||
@@ -142,7 +142,7 @@ proc sortVTableDispatchers*(g: ModuleGraph) =
|
||||
|
||||
for item in g.objectTree[baseType.itemId]:
|
||||
if item.value.itemId notin itemTable:
|
||||
itemTable[item.value.itemId] = newSeq[LazySym](methodIndexLen)
|
||||
itemTable[item.value.itemId] = newSeq[PSym](methodIndexLen)
|
||||
|
||||
var mIndex = 0 # here is the correpsonding index
|
||||
if baseType.itemId notin rootItemIdCount:
|
||||
@@ -152,7 +152,7 @@ proc sortVTableDispatchers*(g: ModuleGraph) =
|
||||
rootItemIdCount.inc(baseType.itemId)
|
||||
for idx in 0..<g.methods[bucket].methods.len:
|
||||
let obj = g.methods[bucket].methods[idx].typ.firstParamType.skipTypes(skipPtrs)
|
||||
itemTable[obj.itemId][mIndex] = LazySym(sym: g.methods[bucket].methods[idx])
|
||||
itemTable[obj.itemId][mIndex] = g.methods[bucket].methods[idx]
|
||||
|
||||
for baseType in rootTypeSeq:
|
||||
g.setMethodsPerType(baseType, itemTable[baseType])
|
||||
@@ -160,7 +160,7 @@ proc sortVTableDispatchers*(g: ModuleGraph) =
|
||||
let typ = item.value.skipTypes(skipPtrs)
|
||||
let idx = typ.itemId
|
||||
for mIndex in 0..<itemTable[idx].len:
|
||||
if itemTable[idx][mIndex].sym == nil:
|
||||
if itemTable[idx][mIndex] == nil:
|
||||
let parentIndex = typ.baseClass.skipTypes(skipPtrs).itemId
|
||||
itemTable[idx][mIndex] = itemTable[parentIndex][mIndex]
|
||||
g.setMethodsPerType(idx, itemTable[idx])
|
||||
|
||||
181
doc/ic.md
Normal file
181
doc/ic.md
Normal file
@@ -0,0 +1,181 @@
|
||||
======================================
|
||||
Incremental Compilation (IC)
|
||||
======================================
|
||||
|
||||
The ``nim ic`` command provides incremental compilation support for Nim projects,
|
||||
allowing faster rebuilds by reusing previously compiled intermediate representations
|
||||
of modules that haven't changed.
|
||||
|
||||
Overview
|
||||
========
|
||||
|
||||
Incremental compilation works by decomposing the compilation process into several stages:
|
||||
|
||||
1. **Parsing** - Source files are parsed into an abstract syntax tree (AST)
|
||||
2. **Semantic Analysis** - Symbols are resolved and type checking is performed
|
||||
3. **Code Generation** - Platform-specific code is generated from the analyzed AST
|
||||
4. **Linking** - The generated code is linked into an executable
|
||||
|
||||
The IC mechanism caches the results of earlier stages in ``.nif`` files
|
||||
(Nim frontend intermediate format). When recompiling, only modules that have
|
||||
changed need to be reprocessed through the semantic analysis and code generation
|
||||
stages, significantly reducing compilation time for large projects.
|
||||
|
||||
NIF File Format
|
||||
===============
|
||||
|
||||
NIF (Nim Frontend Intermediate Format) files are text-based files that use a Lisp-like
|
||||
syntax. They employ a hybrid format where byte offsets into the text are used for
|
||||
efficient access, making them simultaneously human-readable and machine-efficient.
|
||||
The text representation is particularly valuable for debugging and introspection.
|
||||
|
||||
Each ``.nim`` module produces its own ``.nif`` file during compilation.
|
||||
The NIF format contains:
|
||||
|
||||
- **Header** - Version information (e.g., `(.nif24)`)
|
||||
- **Dependencies** - List of source file checksums and their dependencies
|
||||
- **Interface** - Exported symbols and their indices
|
||||
- **Body** - The intermediate representation of the module's code in Lisp-like syntax
|
||||
|
||||
The NIF format is designed specifically for Nim and allows efficient serialization
|
||||
and deserialization of the compiler's intermediate representation while remaining
|
||||
readable and debuggable by tools and developers.
|
||||
|
||||
The ``nim ic`` Switch
|
||||
=====================
|
||||
|
||||
The ``nim ic`` command initiates incremental compilation for a project.
|
||||
It automatically manages the build process by:
|
||||
|
||||
1. Parsing all source files into ``.nif`` format (using the ``nifler`` tool)
|
||||
2. Performing semantic analysis on modified modules
|
||||
3. Generating code only for modules with changes or dependencies on changed modules
|
||||
4. Generating a build file (in NIFMake format) that orchestrates the compilation
|
||||
5. Executing the build file through ``nifmake``
|
||||
|
||||
Prerequisites
|
||||
-------------
|
||||
|
||||
- **nifler** - Tool for parsing Nim source files into NIF format
|
||||
- **nifmake** - Build orchestration tool that follows dependencies
|
||||
|
||||
If these tools are not available, ``nim ic`` will display instructions on how to
|
||||
obtain them.
|
||||
|
||||
Key Modules for IC Logic
|
||||
=========================
|
||||
|
||||
The primary modules in the compiler that handle incremental compilation logic are:
|
||||
|
||||
- **deps.nim** - Dependency analysis and build file generation. Contains the
|
||||
``commandIc`` procedure which is the main entry point for the ``nim ic`` command.
|
||||
This module orchestrates the incremental compilation process, handling NIF generation
|
||||
and build file creation.
|
||||
|
||||
- **ic.nim** - Core incremental compilation module handling the main IC logic,
|
||||
module caching, and NIF serialization/deserialization.
|
||||
|
||||
Additionally, various utility modules in the ``compiler/ic/`` directory support
|
||||
the IC infrastructure for handling NIF data structures, line information mapping,
|
||||
and state replay for pragmas and VM-specific compilations.
|
||||
|
||||
Caching and Consistency
|
||||
=======================
|
||||
|
||||
The IC system ensures correctness through several mechanisms:
|
||||
|
||||
- **Dependency Tracking** - Every module's dependencies are recorded and their
|
||||
checksums stored in the NIF file
|
||||
|
||||
- **Configuration Hashing** - The compiler configuration (options, GC mode, backend)
|
||||
is hashed and stored, invalidating caches when configuration changes
|
||||
|
||||
- **Atomic Operations** - By construction, either a `.nif` file is completely
|
||||
read or completely written, preventing partial/inconsistent updates
|
||||
|
||||
- **No Global State** - Each module's IC cache is independent, avoiding
|
||||
complex global state synchronization issues
|
||||
|
||||
**Code, Logic & Debugging**
|
||||
===========================
|
||||
|
||||
This section focuses on the compiler-side code paths, the logic you will
|
||||
inspect while debugging IC, and a pragmatic manual workflow for bug hunting
|
||||
using local invocations such as ``nim m --nimcache:nifcache``.
|
||||
|
||||
Core places to inspect
|
||||
- **`compiler/deps.nim`**: generates the NIF-based build file and implements
|
||||
``commandIc`` (entry point for ``nim ic``). Look for how build rules are
|
||||
emitted (calls to the NIF builder) and how inputs/outputs are wired.
|
||||
- **`compiler/modulegraphs.nim`** and **`compiler/pipelines.nim`**:
|
||||
dependency graph and compilation pipeline integration — useful when a module
|
||||
is rebuilt unexpectedly.
|
||||
|
||||
Understanding the NIF text
|
||||
- NIF files are human-readable; open the per-module ``.nif`` files in
|
||||
``nifcache/`` to inspect parsed ASTs, dependency lists and interface tables.
|
||||
- Because NIF uses textual nodes and byte offsets, tools can quickly seek to
|
||||
positions in the file — but for debugging you usually only need to read the
|
||||
file top-to-bottom.
|
||||
|
||||
Manual bug-hunting workflow
|
||||
- Prepare a clean nimcache directory (relative to your project):
|
||||
|
||||
```bash
|
||||
mkdir -p nifcache
|
||||
```
|
||||
|
||||
- Parse/semantic-check a single module and write NIF/sem artifacts:
|
||||
|
||||
```bash
|
||||
nim m --nimcache:nifcache path/to/module.nim
|
||||
```
|
||||
|
||||
- ``nim m`` runs the compiler up to the semantic checking stage for the
|
||||
specified module and emits intermediate cache files into ``nifcache/``.
|
||||
- Use this to reproduce and isolate failures in the semantic stage.
|
||||
|
||||
- Inspect the generated files for that module under ``nifcache/`` (look for
|
||||
``.nif``, sem/parsed artifacts). Because NIF is text-based you can open and
|
||||
grep it directly:
|
||||
|
||||
```bash
|
||||
sed -n '1,200p' nifcache/ModuleName.nif
|
||||
grep -n "someSymbol" -n nifcache/ModuleName.nif
|
||||
```
|
||||
|
||||
- To reproduce a full incremental compilation of the project, generate the
|
||||
build file and run it (``nim ic`` automates this). To debug an individual
|
||||
build step, run the command that the build file would execute manually
|
||||
(for example, the semantic step uses ``nim m``; code generation uses ``nim nifc``).
|
||||
|
||||
- Force a cache invalidation for a single module by removing its NIF/sem
|
||||
artifact and re-running the semantic step:
|
||||
|
||||
```bash
|
||||
rm nifcache/ModuleName.nif
|
||||
nim m --nimcache:nifcache path/to/ModuleName.nim
|
||||
```
|
||||
|
||||
- When investigating incorrect replayed state (pragmas, `{.compile: ...}`):
|
||||
inspect the replay actions in ``compiler/ic/replayer.nim`` and open the
|
||||
module's NIF to find the ``toReplay``/action entries that will be executed
|
||||
during reload.
|
||||
|
||||
Tips for efficient debugging
|
||||
- Use ``--path:...`` flags when invoking ``nim m`` to emulate the exact
|
||||
search paths used in your project, e.g. ``--path:lib --path:vendor``.
|
||||
- Compare two successive ``.nif`` files with ``diff`` to see what changed and
|
||||
why a module was rebuilt.
|
||||
|
||||
Where to change behavior
|
||||
- Cache invalidation decisions and build-rule emission are implemented in
|
||||
``compiler/deps.nim``. When investigating surprising
|
||||
rebuilds, instrument those modules to log the footprint/hash/comparison
|
||||
outcome.
|
||||
|
||||
See also
|
||||
========
|
||||
|
||||
- `nif-spec` - NIF format specification (text format and node grammar):
|
||||
[nifspec/doc/nif-spec.md](../nifspec/doc/nif-spec.md)
|
||||
6
koch.nim
6
koch.nim
@@ -16,7 +16,7 @@ const
|
||||
ChecksumsStableCommit = "0b8e46379c5bc1bf73d8b3011908389c60fb9b98" # 2.0.1
|
||||
SatStableCommit = "faf1617f44d7632ee9601ebc13887644925dcc01"
|
||||
|
||||
NimonyStableCommit = "fc8baa61b9911caf4666685a5f5ed41b9c04f6f8" # unversioned \
|
||||
NimonyStableCommit = "deb9b50c573fb55e071825ab55385e293b7216d5" # unversioned \
|
||||
# Note that Nimony uses Nim as a git submodule but we don't want to install
|
||||
# Nimony's dependency to Nim as we are Nim. So a `git clone` without --recursive
|
||||
# is **required** here.
|
||||
@@ -558,9 +558,7 @@ proc icTest(args: string) =
|
||||
for fragment in content.split("#!EDIT!#"):
|
||||
let file = inp.replace(".nim", "_temp.nim")
|
||||
writeFile(file, fragment)
|
||||
var cmd = nimExe & " cpp --ic:legacy -d:nimIcIntegrityChecks --listcmd "
|
||||
if i == 0:
|
||||
cmd.add "-f "
|
||||
var cmd = nimExe & " ic --hint:Conf:off --warnings:off "
|
||||
cmd.add quoteShell(file)
|
||||
exec(cmd)
|
||||
inc i
|
||||
|
||||
@@ -237,7 +237,7 @@ proc clearInstCache(graph: ModuleGraph, projectFileIdx: FileIndex) =
|
||||
for tbl in mitems(graph.attachedOps):
|
||||
var attachedOpsToDelete = newSeq[ItemId]()
|
||||
for id in tbl.keys:
|
||||
if id.module == projectFileIdx.int and sfOverridden in resolveAttachedOp(graph, tbl[id]).flags:
|
||||
if id.module == projectFileIdx.int and sfOverridden in tbl[id].flags:
|
||||
attachedOpsToDelete.add id
|
||||
for id in attachedOpsToDelete:
|
||||
tbl.del id
|
||||
|
||||
Reference in New Issue
Block a user