mirror of
https://github.com/nim-lang/Nim.git
synced 2025-12-28 08:54:53 +00:00
IC: next steps (#16632)
* removed dead code * beginnings of a rodfile reader * IC: record global VM state changes and pragma state changes * IC: replay pragmas and VM state changes * implemented rod load file simuation for easier, extensive testing * critical bugfix * IC: stress test logic; should also help with recursive module dependencies; WIP * IC: loading from .rod files begins to work reliably * removed ugly hacks * yet another silly mistake
This commit is contained in:
@@ -221,6 +221,8 @@ type
|
||||
nkBreakState, # special break statement for easier code generation
|
||||
nkFuncDef, # a func
|
||||
nkTupleConstr # a tuple constructor
|
||||
nkModuleRef # for .rod file support: A (moduleId, itemId) pair
|
||||
nkReplayAction # for .rod file support: A replay action
|
||||
|
||||
TNodeKinds* = set[TNodeKind]
|
||||
|
||||
@@ -1139,6 +1141,9 @@ proc add*(father, son: Indexable) =
|
||||
if isNil(father.sons): father.sons = @[]
|
||||
father.sons.add(son)
|
||||
|
||||
proc addAllowNil*(father, son: Indexable) {.inline.} =
|
||||
father.sons.add(son)
|
||||
|
||||
template `[]`*(n: Indexable, i: int): Indexable = n.sons[i]
|
||||
template `[]=`*(n: Indexable, i: int; x: Indexable) = n.sons[i] = x
|
||||
|
||||
|
||||
@@ -1,421 +0,0 @@
|
||||
#
|
||||
#
|
||||
# The Nim Compiler
|
||||
# (c) Copyright 2015 Andreas Rumpf
|
||||
#
|
||||
# See the file "copying.txt", included in this
|
||||
# distribution, for details about the copyright.
|
||||
#
|
||||
|
||||
## This module implements the canonalization for the various caching mechanisms.
|
||||
|
||||
import strutils, db_sqlite, md5
|
||||
|
||||
var db: DbConn
|
||||
|
||||
# We *hash* the relevant information into 128 bit hashes. This should be good
|
||||
# enough to prevent any collisions.
|
||||
|
||||
type
|
||||
TUid = distinct MD5Digest
|
||||
|
||||
# For name mangling we encode these hashes via a variant of base64 (called
|
||||
# 'base64a') and prepend the *primary* identifier to ease the debugging pain.
|
||||
# So a signature like:
|
||||
#
|
||||
# proc gABI(c: PCtx; n: PNode; opc: TOpcode; a, b: TRegister; imm: BiggestInt)
|
||||
#
|
||||
# is mangled into:
|
||||
# gABI_MTdmOWY5MTQ1MDcyNGQ3ZA
|
||||
#
|
||||
# This is a good compromise between correctness and brevity. ;-)
|
||||
|
||||
const
|
||||
cb64 = [
|
||||
"A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N",
|
||||
"O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z",
|
||||
"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n",
|
||||
"o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z",
|
||||
"0", "1", "2", "3", "4", "5", "6", "7", "8", "9",
|
||||
"_A", "_B"]
|
||||
|
||||
proc toBase64a(s: cstring, len: int): string =
|
||||
## encodes `s` into base64 representation. After `lineLen` characters, a
|
||||
## `newline` is added.
|
||||
result = newStringOfCap(((len + 2) div 3) * 4)
|
||||
var i = 0
|
||||
while i < s.len - 2:
|
||||
let a = ord(s[i])
|
||||
let b = ord(s[i+1])
|
||||
let c = ord(s[i+2])
|
||||
result.add cb64[a shr 2]
|
||||
result.add cb64[((a and 3) shl 4) or ((b and 0xF0) shr 4)]
|
||||
result.add cb64[((b and 0x0F) shl 2) or ((c and 0xC0) shr 6)]
|
||||
result.add cb64[c and 0x3F]
|
||||
inc(i, 3)
|
||||
if i < s.len-1:
|
||||
let a = ord(s[i])
|
||||
let b = ord(s[i+1])
|
||||
result.add cb64[a shr 2]
|
||||
result.add cb64[((a and 3) shl 4) or ((b and 0xF0) shr 4)]
|
||||
result.add cb64[((b and 0x0F) shl 2)]
|
||||
elif i < s.len:
|
||||
let a = ord(s[i])
|
||||
result.add cb64[a shr 2]
|
||||
result.add cb64[(a and 3) shl 4]
|
||||
|
||||
proc toBase64a(u: TUid): string = toBase64a(cast[cstring](u), sizeof(u))
|
||||
|
||||
proc `&=`(c: var MD5Context, s: string) = md5Update(c, s, s.len)
|
||||
|
||||
proc hashSym(c: var MD5Context, s: PSym) =
|
||||
if sfAnon in s.flags or s.kind == skGenericParam:
|
||||
c &= ":anon"
|
||||
else:
|
||||
var it = s.owner
|
||||
while it != nil:
|
||||
hashSym(c, it)
|
||||
c &= "."
|
||||
it = s.owner
|
||||
c &= s.name.s
|
||||
|
||||
proc hashTree(c: var MD5Context, n: PNode) =
|
||||
if n == nil:
|
||||
c &= "\255"
|
||||
return
|
||||
var k = n.kind
|
||||
md5Update(c, cast[cstring](addr(k)), 1)
|
||||
# we really must not hash line information. 'n.typ' is debatable but
|
||||
# shouldn't be necessary for now and avoids potential infinite recursions.
|
||||
case n.kind
|
||||
of nkEmpty, nkNilLit, nkType: discard
|
||||
of nkIdent:
|
||||
c &= n.ident.s
|
||||
of nkSym:
|
||||
hashSym(c, n.sym)
|
||||
of nkCharLit..nkUInt64Lit:
|
||||
var v = n.intVal
|
||||
md5Update(c, cast[cstring](addr(v)), sizeof(v))
|
||||
of nkFloatLit..nkFloat64Lit:
|
||||
var v = n.floatVal
|
||||
md5Update(c, cast[cstring](addr(v)), sizeof(v))
|
||||
of nkStrLit..nkTripleStrLit:
|
||||
c &= n.strVal
|
||||
else:
|
||||
for i in 0..<n.len: hashTree(c, n[i])
|
||||
|
||||
proc hashType(c: var MD5Context, t: PType) =
|
||||
# modelled after 'typeToString'
|
||||
if t == nil:
|
||||
c &= "\254"
|
||||
return
|
||||
|
||||
var k = t.kind
|
||||
md5Update(c, cast[cstring](addr(k)), 1)
|
||||
|
||||
if t.sym != nil and sfAnon notin t.sym.flags:
|
||||
# t.n for literals, but not for e.g. objects!
|
||||
if t.kind in {tyFloat, tyInt}: c.hashNode(t.n)
|
||||
c.hashSym(t.sym)
|
||||
|
||||
case t.kind
|
||||
of tyGenericBody, tyGenericInst, tyGenericInvocation:
|
||||
for i in 0..<t.len-ord(t.kind != tyGenericInvocation):
|
||||
c.hashType t[i]
|
||||
of tyUserTypeClass:
|
||||
internalAssert t.sym != nil and t.sym.owner != nil
|
||||
c &= t.sym.owner.name.s
|
||||
of tyUserTypeClassInst:
|
||||
let body = t.base
|
||||
c.hashSym body.sym
|
||||
for i in 1..<t.len-1:
|
||||
c.hashType t[i]
|
||||
of tyFromExpr:
|
||||
c.hashTree(t.n)
|
||||
of tyArray:
|
||||
c.hashTree(t[0].n)
|
||||
c.hashType(t[1])
|
||||
of tyTuple:
|
||||
if t.n != nil:
|
||||
assert(t.n.len == t.len)
|
||||
for i in 0..<t.n.len:
|
||||
assert(t.n[i].kind == nkSym)
|
||||
c &= t.n[i].sym.name.s
|
||||
c &= ":"
|
||||
c.hashType(t[i])
|
||||
c &= ","
|
||||
else:
|
||||
for i in 0..<t.len: c.hashType t[i]
|
||||
of tyRange:
|
||||
c.hashTree(t.n)
|
||||
c.hashType(t[0])
|
||||
of tyProc:
|
||||
c &= (if tfIterator in t.flags: "iterator " else: "proc ")
|
||||
for i in 0..<t.len: c.hashType(t[i])
|
||||
md5Update(c, cast[cstring](addr(t.callConv)), 1)
|
||||
|
||||
if tfNoSideEffect in t.flags: c &= ".noSideEffect"
|
||||
if tfThread in t.flags: c &= ".thread"
|
||||
else:
|
||||
for i in 0..<t.len: c.hashType(t[i])
|
||||
if tfNotNil in t.flags: c &= "not nil"
|
||||
|
||||
proc canonConst(n: PNode): TUid =
|
||||
var c: MD5Context
|
||||
md5Init(c)
|
||||
c.hashTree(n)
|
||||
c.hashType(n.typ)
|
||||
md5Final(c, MD5Digest(result))
|
||||
|
||||
proc canonSym(s: PSym): TUid =
|
||||
var c: MD5Context
|
||||
md5Init(c)
|
||||
c.hashSym(s)
|
||||
md5Final(c, MD5Digest(result))
|
||||
|
||||
proc pushType(w: PRodWriter, t: PType) =
|
||||
# check so that the stack does not grow too large:
|
||||
if iiTableGet(w.index.tab, t.id) == InvalidKey:
|
||||
w.tstack.add(t)
|
||||
|
||||
proc pushSym(w: PRodWriter, s: PSym) =
|
||||
# check so that the stack does not grow too large:
|
||||
if iiTableGet(w.index.tab, s.id) == InvalidKey:
|
||||
w.sstack.add(s)
|
||||
|
||||
proc encodeNode(w: PRodWriter, fInfo: TLineInfo, n: PNode,
|
||||
result: var string) =
|
||||
if n == nil:
|
||||
# nil nodes have to be stored too:
|
||||
result.add("()")
|
||||
return
|
||||
result.add('(')
|
||||
encodeVInt(ord(n.kind), result)
|
||||
# we do not write comments for now
|
||||
# Line information takes easily 20% or more of the filesize! Therefore we
|
||||
# omit line information if it is the same as the father's line information:
|
||||
if fInfo.fileIndex != n.info.fileIndex:
|
||||
result.add('?')
|
||||
encodeVInt(n.info.col, result)
|
||||
result.add(',')
|
||||
encodeVInt(n.info.line, result)
|
||||
result.add(',')
|
||||
encodeVInt(fileIdx(w, toFilename(n.info)), result)
|
||||
elif fInfo.line != n.info.line:
|
||||
result.add('?')
|
||||
encodeVInt(n.info.col, result)
|
||||
result.add(',')
|
||||
encodeVInt(n.info.line, result)
|
||||
elif fInfo.col != n.info.col:
|
||||
result.add('?')
|
||||
encodeVInt(n.info.col, result)
|
||||
var f = n.flags * PersistentNodeFlags
|
||||
if f != {}:
|
||||
result.add('$')
|
||||
encodeVInt(cast[int32](f), result)
|
||||
if n.typ != nil:
|
||||
result.add('^')
|
||||
encodeVInt(n.typ.id, result)
|
||||
pushType(w, n.typ)
|
||||
case n.kind
|
||||
of nkCharLit..nkInt64Lit:
|
||||
if n.intVal != 0:
|
||||
result.add('!')
|
||||
encodeVBiggestInt(n.intVal, result)
|
||||
of nkFloatLit..nkFloat64Lit:
|
||||
if n.floatVal != 0.0:
|
||||
result.add('!')
|
||||
encodeStr($n.floatVal, result)
|
||||
of nkStrLit..nkTripleStrLit:
|
||||
if n.strVal != "":
|
||||
result.add('!')
|
||||
encodeStr(n.strVal, result)
|
||||
of nkIdent:
|
||||
result.add('!')
|
||||
encodeStr(n.ident.s, result)
|
||||
of nkSym:
|
||||
result.add('!')
|
||||
encodeVInt(n.sym.id, result)
|
||||
pushSym(w, n.sym)
|
||||
else:
|
||||
for i in 0..<n.len:
|
||||
encodeNode(w, n.info, n[i], result)
|
||||
result.add(')')
|
||||
|
||||
proc encodeLoc(w: PRodWriter, loc: TLoc, result: var string) =
|
||||
var oldLen = result.len
|
||||
result.add('<')
|
||||
if loc.k != low(loc.k): encodeVInt(ord(loc.k), result)
|
||||
if loc.s != low(loc.s):
|
||||
result.add('*')
|
||||
encodeVInt(ord(loc.s), result)
|
||||
if loc.flags != {}:
|
||||
result.add('$')
|
||||
encodeVInt(cast[int32](loc.flags), result)
|
||||
if loc.t != nil:
|
||||
result.add('^')
|
||||
encodeVInt(cast[int32](loc.t.id), result)
|
||||
pushType(w, loc.t)
|
||||
if loc.r != nil:
|
||||
result.add('!')
|
||||
encodeStr($loc.r, result)
|
||||
if loc.a != 0:
|
||||
result.add('?')
|
||||
encodeVInt(loc.a, result)
|
||||
if oldLen + 1 == result.len:
|
||||
# no data was necessary, so remove the '<' again:
|
||||
setLen(result, oldLen)
|
||||
else:
|
||||
result.add('>')
|
||||
|
||||
proc encodeType(w: PRodWriter, t: PType, result: var string) =
|
||||
if t == nil:
|
||||
# nil nodes have to be stored too:
|
||||
result.add("[]")
|
||||
return
|
||||
# we need no surrounding [] here because the type is in a line of its own
|
||||
if t.kind == tyForward: internalError("encodeType: tyForward")
|
||||
# for the new rodfile viewer we use a preceding [ so that the data section
|
||||
# can easily be disambiguated:
|
||||
result.add('[')
|
||||
encodeVInt(ord(t.kind), result)
|
||||
result.add('+')
|
||||
encodeVInt(t.id, result)
|
||||
if t.n != nil:
|
||||
encodeNode(w, unknownLineInfo, t.n, result)
|
||||
if t.flags != {}:
|
||||
result.add('$')
|
||||
encodeVInt(cast[int32](t.flags), result)
|
||||
if t.callConv != low(t.callConv):
|
||||
result.add('?')
|
||||
encodeVInt(ord(t.callConv), result)
|
||||
if t.owner != nil:
|
||||
result.add('*')
|
||||
encodeVInt(t.owner.id, result)
|
||||
pushSym(w, t.owner)
|
||||
if t.sym != nil:
|
||||
result.add('&')
|
||||
encodeVInt(t.sym.id, result)
|
||||
pushSym(w, t.sym)
|
||||
if t.size != - 1:
|
||||
result.add('/')
|
||||
encodeVBiggestInt(t.size, result)
|
||||
if t.align != - 1:
|
||||
result.add('=')
|
||||
encodeVInt(t.align, result)
|
||||
encodeLoc(w, t.loc, result)
|
||||
for i in 0..<t.len:
|
||||
if t[i] == nil:
|
||||
result.add("^()")
|
||||
else:
|
||||
result.add('^')
|
||||
encodeVInt(t[i].id, result)
|
||||
pushType(w, t[i])
|
||||
|
||||
proc encodeLib(w: PRodWriter, lib: PLib, info: TLineInfo, result: var string) =
|
||||
result.add('|')
|
||||
encodeVInt(ord(lib.kind), result)
|
||||
result.add('|')
|
||||
encodeStr($lib.name, result)
|
||||
result.add('|')
|
||||
encodeNode(w, info, lib.path, result)
|
||||
|
||||
proc encodeSym(w: PRodWriter, s: PSym, result: var string) =
|
||||
if s == nil:
|
||||
# nil nodes have to be stored too:
|
||||
result.add("{}")
|
||||
return
|
||||
# we need no surrounding {} here because the symbol is in a line of its own
|
||||
encodeVInt(ord(s.kind), result)
|
||||
result.add('+')
|
||||
encodeVInt(s.id, result)
|
||||
result.add('&')
|
||||
encodeStr(s.name.s, result)
|
||||
if s.typ != nil:
|
||||
result.add('^')
|
||||
encodeVInt(s.typ.id, result)
|
||||
pushType(w, s.typ)
|
||||
result.add('?')
|
||||
if s.info.col != -1'i16: encodeVInt(s.info.col, result)
|
||||
result.add(',')
|
||||
if s.info.line != -1'i16: encodeVInt(s.info.line, result)
|
||||
result.add(',')
|
||||
encodeVInt(fileIdx(w, toFilename(s.info)), result)
|
||||
if s.owner != nil:
|
||||
result.add('*')
|
||||
encodeVInt(s.owner.id, result)
|
||||
pushSym(w, s.owner)
|
||||
if s.flags != {}:
|
||||
result.add('$')
|
||||
encodeVInt(cast[int32](s.flags), result)
|
||||
if s.magic != mNone:
|
||||
result.add('@')
|
||||
encodeVInt(ord(s.magic), result)
|
||||
if s.options != w.options:
|
||||
result.add('!')
|
||||
encodeVInt(cast[int32](s.options), result)
|
||||
if s.position != 0:
|
||||
result.add('%')
|
||||
encodeVInt(s.position, result)
|
||||
if s.offset != - 1:
|
||||
result.add('`')
|
||||
encodeVInt(s.offset, result)
|
||||
encodeLoc(w, s.loc, result)
|
||||
if s.annex != nil: encodeLib(w, s.annex, s.info, result)
|
||||
if s.constraint != nil:
|
||||
result.add('#')
|
||||
encodeNode(w, unknownLineInfo, s.constraint, result)
|
||||
# lazy loading will soon reload the ast lazily, so the ast needs to be
|
||||
# the last entry of a symbol:
|
||||
if s.ast != nil:
|
||||
# we used to attempt to save space here by only storing a dummy AST if
|
||||
# it is not necessary, but Nim's heavy compile-time evaluation features
|
||||
# make that unfeasible nowadays:
|
||||
encodeNode(w, s.info, s.ast, result)
|
||||
|
||||
|
||||
proc createDb() =
|
||||
db.exec(sql"""
|
||||
create table if not exists Module(
|
||||
id integer primary key,
|
||||
name varchar(256) not null,
|
||||
fullpath varchar(256) not null,
|
||||
interfHash varchar(256) not null,
|
||||
fullHash varchar(256) not null,
|
||||
|
||||
created timestamp not null default (DATETIME('now'))
|
||||
);""")
|
||||
|
||||
db.exec(sql"""
|
||||
create table if not exists Backend(
|
||||
id integer primary key,
|
||||
strongdeps varchar(max) not null,
|
||||
weakdeps varchar(max) not null,
|
||||
header varchar(max) not null,
|
||||
code varchar(max) not null
|
||||
)
|
||||
|
||||
create table if not exists Symbol(
|
||||
id integer primary key,
|
||||
module integer not null,
|
||||
backend integer not null,
|
||||
name varchar(max) not null,
|
||||
data varchar(max) not null,
|
||||
created timestamp not null default (DATETIME('now')),
|
||||
|
||||
foreign key (module) references Module(id),
|
||||
foreign key (backend) references Backend(id)
|
||||
);""")
|
||||
|
||||
db.exec(sql"""
|
||||
create table if not exists Type(
|
||||
id integer primary key,
|
||||
module integer not null,
|
||||
name varchar(max) not null,
|
||||
data varchar(max) not null,
|
||||
created timestamp not null default (DATETIME('now')),
|
||||
|
||||
foreign key (module) references module(id)
|
||||
);""")
|
||||
|
||||
|
||||
@@ -418,7 +418,7 @@ proc parseCommand*(command: string): Command =
|
||||
of "gendepend": cmdGendepend
|
||||
of "dump": cmdDump
|
||||
of "parse": cmdParse
|
||||
of "scan": cmdScan
|
||||
of "rod": cmdRod
|
||||
of "secret": cmdInteractive
|
||||
of "nop", "help": cmdNop
|
||||
of "jsonscript": cmdJsonscript
|
||||
@@ -800,8 +800,7 @@ proc processSwitch*(switch, arg: string, pass: TCmdLinePass, info: TLineInfo;
|
||||
of "help", "h":
|
||||
expectNoArg(conf, switch, arg, pass, info)
|
||||
helpOnError(conf, pass)
|
||||
of "symbolfiles": discard "ignore for backwards compat"
|
||||
of "incremental", "ic":
|
||||
of "symbolfiles", "incremental", "ic":
|
||||
if pass in {passCmd2, passPP}:
|
||||
case arg.normalize
|
||||
of "on": conf.symbolFiles = v2Sf
|
||||
@@ -809,6 +808,7 @@ proc processSwitch*(switch, arg: string, pass: TCmdLinePass, info: TLineInfo;
|
||||
of "writeonly": conf.symbolFiles = writeOnlySf
|
||||
of "readonly": conf.symbolFiles = readOnlySf
|
||||
of "v2": conf.symbolFiles = v2Sf
|
||||
of "stress": conf.symbolFiles = stressTest
|
||||
else: localError(conf, info, "invalid option for --incremental: " & arg)
|
||||
of "skipcfg":
|
||||
processOnOffSwitchG(conf, {optSkipSystemConfigFile}, arg, pass, info)
|
||||
|
||||
@@ -33,9 +33,28 @@ are rod-file specific too.
|
||||
Global state
|
||||
------------
|
||||
|
||||
Global persistent state will be kept in a project specific `.rod` file.
|
||||
There is no global state.
|
||||
|
||||
Rod File Format
|
||||
---------------
|
||||
|
||||
It's a simple binary file format. `rodfiles.nim` contains some details.
|
||||
|
||||
|
||||
Backend
|
||||
-------
|
||||
|
||||
Nim programmers have to come to enjoy whole-program dead code elimination,
|
||||
by default. Since this is a "whole program" optimization, it does break
|
||||
modularity. However, thanks to the packed AST representation we can perform
|
||||
this global analysis without having to unpack anything. This is basically
|
||||
a mark&sweep GC algorithm:
|
||||
|
||||
- Start with the top level statements. Every symbol that is referenced
|
||||
from a top level statement is not "dead" and needs to be compiled by
|
||||
the backend.
|
||||
- Every symbol referenced from a referenced symbol also has to be
|
||||
compiled.
|
||||
|
||||
Caching logic: Only if the set of alive symbols is different from the
|
||||
last run, the module has to be regenerated.
|
||||
|
||||
@@ -16,9 +16,6 @@ import std / [hashes, tables, strtabs, md5]
|
||||
import bitabs
|
||||
import ".." / [ast, options]
|
||||
|
||||
const
|
||||
nkModuleRef* = nkNone # pair of (ModuleId, SymId)
|
||||
|
||||
type
|
||||
SymId* = distinct int32
|
||||
ModuleId* = distinct int32
|
||||
@@ -310,7 +307,7 @@ template typ*(n: NodePos): PackedItemId =
|
||||
template flags*(n: NodePos): TNodeFlags =
|
||||
tree.nodes[n.int].flags
|
||||
|
||||
proc span(tree: PackedTree; pos: int): int {.inline.} =
|
||||
proc span*(tree: PackedTree; pos: int): int {.inline.} =
|
||||
if isAtom(tree, pos): 1 else: tree.nodes[pos].operand
|
||||
|
||||
proc sons2*(tree: PackedTree; n: NodePos): (NodePos, NodePos) =
|
||||
@@ -471,68 +468,3 @@ when false:
|
||||
dest.add nkStrLit, msg, n.info
|
||||
copyTree(dest, tree, n)
|
||||
patch dest, patchPos
|
||||
|
||||
proc hash*(table: StringTableRef): Hash =
|
||||
## XXX: really should be introduced into strtabs...
|
||||
var h: Hash = 0
|
||||
for pair in pairs table:
|
||||
h = h !& hash(pair)
|
||||
result = !$h
|
||||
|
||||
proc hash*(config: ConfigRef): Hash =
|
||||
## XXX: vet and/or extend this
|
||||
var h: Hash = 0
|
||||
h = h !& hash(config.selectedGC)
|
||||
h = h !& hash(config.features)
|
||||
h = h !& hash(config.legacyFeatures)
|
||||
h = h !& hash(config.configVars)
|
||||
h = h !& hash(config.symbols)
|
||||
result = !$h
|
||||
|
||||
# XXX: lazy hashes for now
|
||||
type
|
||||
LazyHashes = PackedSym or PackedType or PackedLib or
|
||||
PackedLineInfo or PackedTree or PackedNode
|
||||
|
||||
proc hash*(sh: Shared): Hash
|
||||
proc hash*(s: LazyHashes): Hash
|
||||
proc hash*(s: seq[LazyHashes]): Hash
|
||||
|
||||
proc hash*(s: LazyHashes): Hash =
|
||||
var h: Hash = 0
|
||||
for k, v in fieldPairs(s):
|
||||
h = h !& hash((k, v))
|
||||
result = !$h
|
||||
|
||||
proc hash*(s: seq[LazyHashes]): Hash =
|
||||
## critically, we need to hash the indices alongside their values
|
||||
var h: Hash = 0
|
||||
for i, n in pairs s:
|
||||
h = h !& hash((i, n))
|
||||
result = !$h
|
||||
|
||||
proc hash*(sh: Shared): Hash =
|
||||
## might want to edit this...
|
||||
# XXX: these have too many references
|
||||
when false:
|
||||
var h: Hash = 0
|
||||
h = h !& hash(sh.syms)
|
||||
h = h !& hash(sh.types)
|
||||
h = h !& hash(sh.strings)
|
||||
h = h !& hash(sh.integers)
|
||||
h = h !& hash(sh.floats)
|
||||
h = h !& hash(sh.config)
|
||||
result = !$h
|
||||
|
||||
proc hash*(m: Module): Hash =
|
||||
var h: Hash = 0
|
||||
h = h !& hash(m.name)
|
||||
h = h !& hash(m.ast)
|
||||
result = !$h
|
||||
|
||||
template safeItemId*(x: typed; f: untyped): ItemId =
|
||||
## yield a valid ItemId value for the field of a nillable type
|
||||
if x.isNil:
|
||||
nilItemId
|
||||
else:
|
||||
x.`f`
|
||||
|
||||
89
compiler/ic/replayer.nim
Normal file
89
compiler/ic/replayer.nim
Normal file
@@ -0,0 +1,89 @@
|
||||
#
|
||||
#
|
||||
# The Nim Compiler
|
||||
# (c) Copyright 2020 Andreas Rumpf
|
||||
#
|
||||
# See the file "copying.txt", included in this
|
||||
# distribution, for details about the copyright.
|
||||
#
|
||||
|
||||
## Module that contains code to replay global VM state changes and pragma
|
||||
## state like ``{.compile: "foo.c".}``. For IC (= Incremental compilation)
|
||||
## support.
|
||||
|
||||
import ".." / [ast, modulegraphs, trees, extccomp, btrees,
|
||||
msgs, lineinfos, pathutils, options]
|
||||
|
||||
import tables
|
||||
|
||||
proc replayStateChanges*(module: PSym; g: ModuleGraph) =
|
||||
let list = module.ast
|
||||
assert list != nil
|
||||
assert list.kind == nkStmtList
|
||||
for n in list:
|
||||
assert n.kind == nkReplayAction
|
||||
# Fortunately only a tiny subset of the available pragmas need to
|
||||
# be replayed here. This is always a subset of ``pragmas.stmtPragmas``.
|
||||
if n.len >= 2:
|
||||
internalAssert g.config, n[0].kind == nkStrLit and n[1].kind == nkStrLit
|
||||
case n[0].strVal
|
||||
of "hint": message(g.config, n.info, hintUser, n[1].strVal)
|
||||
of "warning": message(g.config, n.info, warnUser, n[1].strVal)
|
||||
of "error": localError(g.config, n.info, errUser, n[1].strVal)
|
||||
of "compile":
|
||||
internalAssert g.config, n.len == 4 and n[2].kind == nkStrLit
|
||||
let cname = AbsoluteFile n[1].strVal
|
||||
var cf = Cfile(nimname: splitFile(cname).name, cname: cname,
|
||||
obj: AbsoluteFile n[2].strVal,
|
||||
flags: {CfileFlag.External},
|
||||
customArgs: n[3].strVal)
|
||||
extccomp.addExternalFileToCompile(g.config, cf)
|
||||
of "link":
|
||||
extccomp.addExternalFileToLink(g.config, AbsoluteFile n[1].strVal)
|
||||
of "passl":
|
||||
extccomp.addLinkOption(g.config, n[1].strVal)
|
||||
of "passc":
|
||||
extccomp.addCompileOption(g.config, n[1].strVal)
|
||||
of "localpassc":
|
||||
extccomp.addLocalCompileOption(g.config, n[1].strVal, toFullPathConsiderDirty(g.config, module.info.fileIndex))
|
||||
of "cppdefine":
|
||||
options.cppDefine(g.config, n[1].strVal)
|
||||
of "inc":
|
||||
let destKey = n[1].strVal
|
||||
let by = n[2].intVal
|
||||
let v = getOrDefault(g.cacheCounters, destKey)
|
||||
g.cacheCounters[destKey] = v+by
|
||||
of "put":
|
||||
let destKey = n[1].strVal
|
||||
let key = n[2].strVal
|
||||
let val = n[3]
|
||||
if not contains(g.cacheTables, destKey):
|
||||
g.cacheTables[destKey] = initBTree[string, PNode]()
|
||||
if not contains(g.cacheTables[destKey], key):
|
||||
g.cacheTables[destKey].add(key, val)
|
||||
else:
|
||||
internalError(g.config, n.info, "key already exists: " & key)
|
||||
of "incl":
|
||||
let destKey = n[1].strVal
|
||||
let val = n[2]
|
||||
if not contains(g.cacheSeqs, destKey):
|
||||
g.cacheSeqs[destKey] = newTree(nkStmtList, val)
|
||||
else:
|
||||
block search:
|
||||
for existing in g.cacheSeqs[destKey]:
|
||||
if exprStructuralEquivalent(existing, val, strictSymEquality=true):
|
||||
break search
|
||||
g.cacheSeqs[destKey].add val
|
||||
of "add":
|
||||
let destKey = n[1].strVal
|
||||
let val = n[2]
|
||||
if not contains(g.cacheSeqs, destKey):
|
||||
g.cacheSeqs[destKey] = newTree(nkStmtList, val)
|
||||
else:
|
||||
g.cacheSeqs[destKey].add val
|
||||
else:
|
||||
internalAssert g.config, false
|
||||
|
||||
# of nkMethodDef:
|
||||
# methodDef(g, n[namePos].sym, fromCache=true)
|
||||
|
||||
@@ -26,13 +26,14 @@ type
|
||||
methodsSection
|
||||
pureEnumsSection
|
||||
macroUsagesSection
|
||||
toReplaySection
|
||||
topLevelSection
|
||||
bodiesSection
|
||||
symsSection
|
||||
typesSection
|
||||
|
||||
RodFileError* = enum
|
||||
ok, tooBig, ioFailure, wrongHeader, wrongSection, configMismatch,
|
||||
ok, tooBig, cannotOpen, ioFailure, wrongHeader, wrongSection, configMismatch,
|
||||
includeFileChanged
|
||||
|
||||
RodFile* = object
|
||||
@@ -146,10 +147,10 @@ proc loadSection*(f: var RodFile; expected: RodSection) =
|
||||
|
||||
proc create*(filename: string): RodFile =
|
||||
if not open(result.f, filename, fmWrite):
|
||||
setError result, ioFailure
|
||||
setError result, cannotOpen
|
||||
|
||||
proc close*(f: var RodFile) = close(f.f)
|
||||
|
||||
proc open*(filename: string): RodFile =
|
||||
if not open(result.f, filename, fmRead):
|
||||
setError result, ioFailure
|
||||
setError result, cannotOpen
|
||||
|
||||
@@ -26,9 +26,9 @@ type
|
||||
definedSymbols: string
|
||||
includes: seq[(LitId, string)] # first entry is the module filename itself
|
||||
imports: seq[LitId] # the modules this module depends on
|
||||
toReplay: PackedTree # pragmas and VM specific state to replay.
|
||||
topLevel*: PackedTree # top level statements
|
||||
bodies*: PackedTree # other trees. Referenced from typ.n and sym.ast by their position.
|
||||
hidden*: PackedTree # instantiated generics and other trees not directly in the source code.
|
||||
#producedGenerics*: Table[GenericKey, SymId]
|
||||
exports*: seq[(LitId, int32)]
|
||||
reexports*: seq[(LitId, PackedItemId)]
|
||||
@@ -39,7 +39,7 @@ type
|
||||
cfg: PackedConfig
|
||||
|
||||
PackedEncoder* = object
|
||||
m: PackedModule
|
||||
#m*: PackedModule
|
||||
thisModule*: int32
|
||||
lastFile*: FileIndex # remember the last lookup entry.
|
||||
lastLit*: LitId
|
||||
@@ -64,12 +64,12 @@ proc definedSymbolsAsString(config: ConfigRef): string =
|
||||
result.add ' '
|
||||
result.add d
|
||||
|
||||
proc rememberConfig(c: var PackedEncoder; config: ConfigRef; pc: PackedConfig) =
|
||||
c.m.definedSymbols = definedSymbolsAsString(config)
|
||||
proc rememberConfig(c: var PackedEncoder; m: var PackedModule; config: ConfigRef; pc: PackedConfig) =
|
||||
m.definedSymbols = definedSymbolsAsString(config)
|
||||
#template rem(x) =
|
||||
# c.m.cfg.x = config.x
|
||||
#primConfigFields rem
|
||||
c.m.cfg = pc
|
||||
m.cfg = pc
|
||||
|
||||
proc configIdentical(m: PackedModule; config: ConfigRef): bool =
|
||||
result = m.definedSymbols == definedSymbolsAsString(config)
|
||||
@@ -93,7 +93,7 @@ proc hashFileCached(conf: ConfigRef; fileIdx: FileIndex): string =
|
||||
result = $secureHashFile(fullpath)
|
||||
msgs.setHash(conf, fileIdx, result)
|
||||
|
||||
proc toLitId(x: FileIndex; c: var PackedEncoder): LitId =
|
||||
proc toLitId(x: FileIndex; c: var PackedEncoder; m: var PackedModule): LitId =
|
||||
## store a file index as a literal
|
||||
if x == c.lastFile:
|
||||
result = c.lastLit
|
||||
@@ -101,7 +101,7 @@ proc toLitId(x: FileIndex; c: var PackedEncoder): LitId =
|
||||
result = c.filenames.getOrDefault(x)
|
||||
if result == LitId(0):
|
||||
let p = msgs.toFullPath(c.config, x)
|
||||
result = getOrIncl(c.m.sh.strings, p)
|
||||
result = getOrIncl(m.sh.strings, p)
|
||||
c.filenames[x] = result
|
||||
c.lastFile = x
|
||||
c.lastLit = result
|
||||
@@ -116,13 +116,13 @@ proc includesIdentical(m: var PackedModule; config: ConfigRef): bool =
|
||||
return false
|
||||
result = true
|
||||
|
||||
proc initEncoder*(c: var PackedEncoder; m: PSym; config: ConfigRef; pc: PackedConfig) =
|
||||
proc initEncoder*(c: var PackedEncoder; m: var PackedModule; moduleSym: PSym; config: ConfigRef; pc: PackedConfig) =
|
||||
## setup a context for serializing to packed ast
|
||||
c.m.sh = Shared()
|
||||
c.thisModule = m.itemId.module
|
||||
m.sh = Shared()
|
||||
c.thisModule = moduleSym.itemId.module
|
||||
c.config = config
|
||||
c.m.bodies = newTreeFrom(c.m.topLevel)
|
||||
c.m.hidden = newTreeFrom(c.m.topLevel)
|
||||
m.bodies = newTreeFrom(m.topLevel)
|
||||
m.toReplay = newTreeFrom(m.topLevel)
|
||||
|
||||
let thisNimFile = FileIndex c.thisModule
|
||||
var h = msgs.getHash(config, thisNimFile)
|
||||
@@ -132,90 +132,91 @@ proc initEncoder*(c: var PackedEncoder; m: PSym; config: ConfigRef; pc: PackedCo
|
||||
# For NimScript compiler API support the main Nim file might be from a stream.
|
||||
h = $secureHashFile(fullpath)
|
||||
msgs.setHash(config, thisNimFile, h)
|
||||
c.m.includes.add((toLitId(thisNimFile, c), h)) # the module itself
|
||||
m.includes.add((toLitId(thisNimFile, c, m), h)) # the module itself
|
||||
|
||||
rememberConfig(c, config, pc)
|
||||
rememberConfig(c, m, config, pc)
|
||||
|
||||
proc addIncludeFileDep*(c: var PackedEncoder; f: FileIndex) =
|
||||
c.m.includes.add((toLitId(f, c), hashFileCached(c.config, f)))
|
||||
proc addIncludeFileDep*(c: var PackedEncoder; m: var PackedModule; f: FileIndex) =
|
||||
m.includes.add((toLitId(f, c, m), hashFileCached(c.config, f)))
|
||||
|
||||
proc addImportFileDep*(c: var PackedEncoder; f: FileIndex) =
|
||||
c.m.imports.add toLitId(f, c)
|
||||
proc addImportFileDep*(c: var PackedEncoder; m: var PackedModule; f: FileIndex) =
|
||||
m.imports.add toLitId(f, c, m)
|
||||
|
||||
proc addExported*(c: var PackedEncoder; s: PSym) =
|
||||
let nameId = getOrIncl(c.m.sh.strings, s.name.s)
|
||||
c.m.exports.add((nameId, s.itemId.item))
|
||||
proc addExported*(c: var PackedEncoder; m: var PackedModule; s: PSym) =
|
||||
let nameId = getOrIncl(m.sh.strings, s.name.s)
|
||||
m.exports.add((nameId, s.itemId.item))
|
||||
|
||||
proc addConverter*(c: var PackedEncoder; s: PSym) =
|
||||
let nameId = getOrIncl(c.m.sh.strings, s.name.s)
|
||||
c.m.converters.add((nameId, s.itemId.item))
|
||||
proc addConverter*(c: var PackedEncoder; m: var PackedModule; s: PSym) =
|
||||
let nameId = getOrIncl(m.sh.strings, s.name.s)
|
||||
m.converters.add((nameId, s.itemId.item))
|
||||
|
||||
proc addTrmacro*(c: var PackedEncoder; s: PSym) =
|
||||
let nameId = getOrIncl(c.m.sh.strings, s.name.s)
|
||||
c.m.trmacros.add((nameId, s.itemId.item))
|
||||
proc addTrmacro*(c: var PackedEncoder; m: var PackedModule; s: PSym) =
|
||||
let nameId = getOrIncl(m.sh.strings, s.name.s)
|
||||
m.trmacros.add((nameId, s.itemId.item))
|
||||
|
||||
proc addPureEnum*(c: var PackedEncoder; s: PSym) =
|
||||
let nameId = getOrIncl(c.m.sh.strings, s.name.s)
|
||||
proc addPureEnum*(c: var PackedEncoder; m: var PackedModule; s: PSym) =
|
||||
let nameId = getOrIncl(m.sh.strings, s.name.s)
|
||||
assert s.kind == skType
|
||||
c.m.pureEnums.add((nameId, s.itemId.item))
|
||||
m.pureEnums.add((nameId, s.itemId.item))
|
||||
|
||||
proc addMethod*(c: var PackedEncoder; s: PSym) =
|
||||
let nameId = getOrIncl(c.m.sh.strings, s.name.s)
|
||||
proc addMethod*(c: var PackedEncoder; m: var PackedModule; s: PSym) =
|
||||
let nameId = getOrIncl(m.sh.strings, s.name.s)
|
||||
discard "to do"
|
||||
# c.m.methods.add((nameId, s.itemId.item))
|
||||
|
||||
proc addReexport*(c: var PackedEncoder; s: PSym) =
|
||||
let nameId = getOrIncl(c.m.sh.strings, s.name.s)
|
||||
c.m.reexports.add((nameId, PackedItemId(module: toLitId(s.itemId.module.FileIndex, c),
|
||||
item: s.itemId.item)))
|
||||
proc addReexport*(c: var PackedEncoder; m: var PackedModule; s: PSym) =
|
||||
let nameId = getOrIncl(m.sh.strings, s.name.s)
|
||||
m.reexports.add((nameId, PackedItemId(module: toLitId(s.itemId.module.FileIndex, c, m),
|
||||
item: s.itemId.item)))
|
||||
|
||||
proc addCompilerProc*(c: var PackedEncoder; s: PSym) =
|
||||
let nameId = getOrIncl(c.m.sh.strings, s.name.s)
|
||||
c.m.compilerProcs.add((nameId, s.itemId.item))
|
||||
proc addCompilerProc*(c: var PackedEncoder; m: var PackedModule; s: PSym) =
|
||||
let nameId = getOrIncl(m.sh.strings, s.name.s)
|
||||
m.compilerProcs.add((nameId, s.itemId.item))
|
||||
|
||||
proc toPackedNode*(n: PNode; ir: var PackedTree; c: var PackedEncoder)
|
||||
proc toPackedSym*(s: PSym; c: var PackedEncoder): PackedItemId
|
||||
proc toPackedType(t: PType; c: var PackedEncoder): PackedItemId
|
||||
proc toPackedNode*(n: PNode; ir: var PackedTree; c: var PackedEncoder; m: var PackedModule)
|
||||
proc toPackedSym*(s: PSym; c: var PackedEncoder; m: var PackedModule): PackedItemId
|
||||
proc toPackedType(t: PType; c: var PackedEncoder; m: var PackedModule): PackedItemId
|
||||
|
||||
proc flush(c: var PackedEncoder) =
|
||||
proc flush(c: var PackedEncoder; m: var PackedModule) =
|
||||
## serialize any pending types or symbols from the context
|
||||
while true:
|
||||
if c.pendingTypes.len > 0:
|
||||
discard toPackedType(c.pendingTypes.pop, c)
|
||||
discard toPackedType(c.pendingTypes.pop, c, m)
|
||||
elif c.pendingSyms.len > 0:
|
||||
discard toPackedSym(c.pendingSyms.pop, c)
|
||||
discard toPackedSym(c.pendingSyms.pop, c, m)
|
||||
else:
|
||||
break
|
||||
|
||||
proc toLitId(x: string; c: var PackedEncoder): LitId =
|
||||
proc toLitId(x: string; m: var PackedModule): LitId =
|
||||
## store a string as a literal
|
||||
result = getOrIncl(c.m.sh.strings, x)
|
||||
result = getOrIncl(m.sh.strings, x)
|
||||
|
||||
proc toLitId(x: BiggestInt; c: var PackedEncoder): LitId =
|
||||
proc toLitId(x: BiggestInt; m: var PackedModule): LitId =
|
||||
## store an integer as a literal
|
||||
result = getOrIncl(c.m.sh.integers, x)
|
||||
result = getOrIncl(m.sh.integers, x)
|
||||
|
||||
proc toPackedInfo(x: TLineInfo; c: var PackedEncoder): PackedLineInfo =
|
||||
PackedLineInfo(line: x.line, col: x.col, file: toLitId(x.fileIndex, c))
|
||||
proc toPackedInfo(x: TLineInfo; c: var PackedEncoder; m: var PackedModule): PackedLineInfo =
|
||||
PackedLineInfo(line: x.line, col: x.col, file: toLitId(x.fileIndex, c, m))
|
||||
|
||||
proc safeItemId(s: PSym; c: var PackedEncoder): PackedItemId {.inline.} =
|
||||
proc safeItemId(s: PSym; c: var PackedEncoder; m: var PackedModule): PackedItemId {.inline.} =
|
||||
## given a symbol, produce an ItemId with the correct properties
|
||||
## for local or remote symbols, packing the symbol as necessary
|
||||
if s == nil:
|
||||
if s == nil or s.kind == skPackage:
|
||||
result = nilItemId
|
||||
elif s.itemId.module == c.thisModule:
|
||||
result = PackedItemId(module: LitId(0), item: s.itemId.item)
|
||||
#elif s.itemId.module == c.thisModule:
|
||||
# result = PackedItemId(module: LitId(0), item: s.itemId.item)
|
||||
else:
|
||||
result = PackedItemId(module: toLitId(s.itemId.module.FileIndex, c),
|
||||
assert int(s.itemId.module) >= 0
|
||||
result = PackedItemId(module: toLitId(s.itemId.module.FileIndex, c, m),
|
||||
item: s.itemId.item)
|
||||
|
||||
proc addModuleRef(n: PNode; ir: var PackedTree; c: var PackedEncoder) =
|
||||
proc addModuleRef(n: PNode; ir: var PackedTree; c: var PackedEncoder; m: var PackedModule) =
|
||||
## add a remote symbol reference to the tree
|
||||
let info = n.info.toPackedInfo(c)
|
||||
ir.nodes.add PackedNode(kind: nkModuleRef, operand: 2.int32, # 2 kids...
|
||||
typeId: toPackedType(n.typ, c), info: info)
|
||||
let info = n.info.toPackedInfo(c, m)
|
||||
ir.nodes.add PackedNode(kind: nkModuleRef, operand: 3.int32, # spans 3 nodes in total
|
||||
typeId: toPackedType(n.typ, c, m), info: info)
|
||||
ir.nodes.add PackedNode(kind: nkInt32Lit, info: info,
|
||||
operand: toLitId(n.sym.itemId.module.FileIndex, c).int32)
|
||||
operand: toLitId(n.sym.itemId.module.FileIndex, c, m).int32)
|
||||
ir.nodes.add PackedNode(kind: nkInt32Lit, info: info,
|
||||
operand: n.sym.itemId.item)
|
||||
|
||||
@@ -234,24 +235,25 @@ proc addMissing(c: var PackedEncoder; p: PType) =
|
||||
template storeNode(dest, src, field) =
|
||||
var nodeId: NodeId
|
||||
if src.field != nil:
|
||||
nodeId = getNodeId(c.m.bodies)
|
||||
toPackedNode(src.field, c.m.bodies, c)
|
||||
nodeId = getNodeId(m.bodies)
|
||||
toPackedNode(src.field, m.bodies, c, m)
|
||||
else:
|
||||
nodeId = emptyNodeId
|
||||
dest.field = nodeId
|
||||
|
||||
proc toPackedType(t: PType; c: var PackedEncoder): PackedItemId =
|
||||
proc toPackedType(t: PType; c: var PackedEncoder; m: var PackedModule): PackedItemId =
|
||||
## serialize a ptype
|
||||
if t.isNil: return nilItemId
|
||||
|
||||
if t.uniqueId.module != c.thisModule:
|
||||
# XXX Assert here that it already was serialized in the foreign module!
|
||||
# it is a foreign type:
|
||||
return PackedItemId(module: toLitId(t.uniqueId.module.FileIndex, c), item: t.uniqueId.item)
|
||||
assert t.uniqueId.module >= 0
|
||||
return PackedItemId(module: toLitId(t.uniqueId.module.FileIndex, c, m), item: t.uniqueId.item)
|
||||
|
||||
if not c.typeMarker.containsOrIncl(t.uniqueId.item):
|
||||
if t.uniqueId.item >= c.m.sh.types.len:
|
||||
setLen c.m.sh.types, t.uniqueId.item+1
|
||||
if t.uniqueId.item >= m.sh.types.len:
|
||||
setLen m.sh.types, t.uniqueId.item+1
|
||||
|
||||
var p = PackedType(kind: t.kind, flags: t.flags, callConv: t.callConv,
|
||||
size: t.size, align: t.align, nonUniqueId: t.itemId.item,
|
||||
@@ -260,140 +262,148 @@ proc toPackedType(t: PType; c: var PackedEncoder): PackedItemId =
|
||||
|
||||
for op, s in pairs t.attachedOps:
|
||||
c.addMissing s
|
||||
p.attachedOps[op] = s.safeItemId(c)
|
||||
p.attachedOps[op] = s.safeItemId(c, m)
|
||||
|
||||
p.typeInst = t.typeInst.toPackedType(c)
|
||||
p.typeInst = t.typeInst.toPackedType(c, m)
|
||||
for kid in items t.sons:
|
||||
p.types.add kid.toPackedType(c)
|
||||
p.types.add kid.toPackedType(c, m)
|
||||
for i, s in items t.methods:
|
||||
c.addMissing s
|
||||
p.methods.add (i, s.safeItemId(c))
|
||||
p.methods.add (i, s.safeItemId(c, m))
|
||||
c.addMissing t.sym
|
||||
p.sym = t.sym.safeItemId(c)
|
||||
p.sym = t.sym.safeItemId(c, m)
|
||||
c.addMissing t.owner
|
||||
p.owner = t.owner.safeItemId(c)
|
||||
p.owner = t.owner.safeItemId(c, m)
|
||||
|
||||
# fill the reserved slot, nothing else:
|
||||
c.m.sh.types[t.uniqueId.item] = p
|
||||
m.sh.types[t.uniqueId.item] = p
|
||||
|
||||
result = PackedItemId(module: LitId(0), item: t.uniqueId.item)
|
||||
assert t.itemId.module >= 0
|
||||
result = PackedItemId(module: toLitId(t.itemId.module.FileIndex, c, m), item: t.uniqueId.item)
|
||||
|
||||
proc toPackedLib(l: PLib; c: var PackedEncoder): PackedLib =
|
||||
proc toPackedLib(l: PLib; c: var PackedEncoder; m: var PackedModule): PackedLib =
|
||||
## the plib hangs off the psym via the .annex field
|
||||
if l.isNil: return
|
||||
result.kind = l.kind
|
||||
result.generated = l.generated
|
||||
result.isOverriden = l.isOverriden
|
||||
result.name = toLitId($l.name, c)
|
||||
result.name = toLitId($l.name, m)
|
||||
storeNode(result, l, path)
|
||||
|
||||
proc toPackedSym*(s: PSym; c: var PackedEncoder): PackedItemId =
|
||||
proc toPackedSym*(s: PSym; c: var PackedEncoder; m: var PackedModule): PackedItemId =
|
||||
## serialize a psym
|
||||
if s.isNil: return nilItemId
|
||||
|
||||
assert s.itemId.module >= 0
|
||||
|
||||
if s.itemId.module != c.thisModule:
|
||||
# XXX Assert here that it already was serialized in the foreign module!
|
||||
# it is a foreign symbol:
|
||||
return PackedItemId(module: toLitId(s.itemId.module.FileIndex, c), item: s.itemId.item)
|
||||
assert s.itemId.module >= 0
|
||||
return PackedItemId(module: toLitId(s.itemId.module.FileIndex, c, m), item: s.itemId.item)
|
||||
|
||||
if not c.symMarker.containsOrIncl(s.itemId.item):
|
||||
if s.itemId.item >= c.m.sh.syms.len:
|
||||
setLen c.m.sh.syms, s.itemId.item+1
|
||||
if s.itemId.item >= m.sh.syms.len:
|
||||
setLen m.sh.syms, s.itemId.item+1
|
||||
|
||||
var p = PackedSym(kind: s.kind, flags: s.flags, info: s.info.toPackedInfo(c), magic: s.magic,
|
||||
var p = PackedSym(kind: s.kind, flags: s.flags, info: s.info.toPackedInfo(c, m), magic: s.magic,
|
||||
position: s.position, offset: s.offset, options: s.options,
|
||||
name: s.name.s.toLitId(c))
|
||||
name: s.name.s.toLitId(m))
|
||||
|
||||
storeNode(p, s, ast)
|
||||
storeNode(p, s, constraint)
|
||||
|
||||
if s.kind in {skLet, skVar, skField, skForVar}:
|
||||
c.addMissing s.guard
|
||||
p.guard = s.guard.safeItemId(c)
|
||||
p.guard = s.guard.safeItemId(c, m)
|
||||
p.bitsize = s.bitsize
|
||||
p.alignment = s.alignment
|
||||
|
||||
p.externalName = toLitId(if s.loc.r.isNil: "" else: $s.loc.r, c)
|
||||
p.externalName = toLitId(if s.loc.r.isNil: "" else: $s.loc.r, m)
|
||||
c.addMissing s.typ
|
||||
p.typ = s.typ.toPackedType(c)
|
||||
p.typ = s.typ.toPackedType(c, m)
|
||||
c.addMissing s.owner
|
||||
p.owner = s.owner.safeItemId(c)
|
||||
p.annex = toPackedLib(s.annex, c)
|
||||
p.owner = s.owner.safeItemId(c, m)
|
||||
p.annex = toPackedLib(s.annex, c, m)
|
||||
when hasFFI:
|
||||
p.cname = toLitId(s.cname, c)
|
||||
p.cname = toLitId(s.cname, m)
|
||||
|
||||
# fill the reserved slot, nothing else:
|
||||
c.m.sh.syms[s.itemId.item] = p
|
||||
m.sh.syms[s.itemId.item] = p
|
||||
|
||||
result = PackedItemId(module: LitId(0), item: s.itemId.item)
|
||||
assert s.itemId.module >= 0
|
||||
result = PackedItemId(module: toLitId(s.itemId.module.FileIndex, c, m), item: s.itemId.item)
|
||||
|
||||
proc toSymNode(n: PNode; ir: var PackedTree; c: var PackedEncoder) =
|
||||
proc toSymNode(n: PNode; ir: var PackedTree; c: var PackedEncoder; m: var PackedModule) =
|
||||
## store a local or remote psym reference in the tree
|
||||
assert n.kind == nkSym
|
||||
template s: PSym = n.sym
|
||||
let id = s.toPackedSym(c).item
|
||||
let id = s.toPackedSym(c, m).item
|
||||
if s.itemId.module == c.thisModule:
|
||||
# it is a symbol that belongs to the module we're currently
|
||||
# packing:
|
||||
ir.addSym(id, toPackedInfo(n.info, c))
|
||||
ir.addSym(id, toPackedInfo(n.info, c, m))
|
||||
else:
|
||||
# store it as an external module reference:
|
||||
addModuleRef(n, ir, c)
|
||||
addModuleRef(n, ir, c, m)
|
||||
|
||||
proc toPackedNode*(n: PNode; ir: var PackedTree; c: var PackedEncoder) =
|
||||
proc toPackedNode*(n: PNode; ir: var PackedTree; c: var PackedEncoder; m: var PackedModule) =
|
||||
## serialize a node into the tree
|
||||
if n.isNil: return
|
||||
let info = toPackedInfo(n.info, c)
|
||||
let info = toPackedInfo(n.info, c, m)
|
||||
case n.kind
|
||||
of nkNone, nkEmpty, nkNilLit, nkType:
|
||||
ir.nodes.add PackedNode(kind: n.kind, flags: n.flags, operand: 0,
|
||||
typeId: toPackedType(n.typ, c), info: info)
|
||||
typeId: toPackedType(n.typ, c, m), info: info)
|
||||
of nkIdent:
|
||||
ir.nodes.add PackedNode(kind: n.kind, flags: n.flags,
|
||||
operand: int32 getOrIncl(c.m.sh.strings, n.ident.s),
|
||||
typeId: toPackedType(n.typ, c), info: info)
|
||||
operand: int32 getOrIncl(m.sh.strings, n.ident.s),
|
||||
typeId: toPackedType(n.typ, c, m), info: info)
|
||||
of nkSym:
|
||||
toSymNode(n, ir, c)
|
||||
toSymNode(n, ir, c, m)
|
||||
of directIntLit:
|
||||
ir.nodes.add PackedNode(kind: n.kind, flags: n.flags,
|
||||
operand: int32(n.intVal),
|
||||
typeId: toPackedType(n.typ, c), info: info)
|
||||
typeId: toPackedType(n.typ, c, m), info: info)
|
||||
of externIntLit:
|
||||
ir.nodes.add PackedNode(kind: n.kind, flags: n.flags,
|
||||
operand: int32 getOrIncl(c.m.sh.integers, n.intVal),
|
||||
typeId: toPackedType(n.typ, c), info: info)
|
||||
operand: int32 getOrIncl(m.sh.integers, n.intVal),
|
||||
typeId: toPackedType(n.typ, c, m), info: info)
|
||||
of nkStrLit..nkTripleStrLit:
|
||||
ir.nodes.add PackedNode(kind: n.kind, flags: n.flags,
|
||||
operand: int32 getOrIncl(c.m.sh.strings, n.strVal),
|
||||
typeId: toPackedType(n.typ, c), info: info)
|
||||
operand: int32 getOrIncl(m.sh.strings, n.strVal),
|
||||
typeId: toPackedType(n.typ, c, m), info: info)
|
||||
of nkFloatLit..nkFloat128Lit:
|
||||
ir.nodes.add PackedNode(kind: n.kind, flags: n.flags,
|
||||
operand: int32 getOrIncl(c.m.sh.floats, n.floatVal),
|
||||
typeId: toPackedType(n.typ, c), info: info)
|
||||
operand: int32 getOrIncl(m.sh.floats, n.floatVal),
|
||||
typeId: toPackedType(n.typ, c, m), info: info)
|
||||
else:
|
||||
let patchPos = ir.prepare(n.kind, n.flags,
|
||||
toPackedType(n.typ, c), info)
|
||||
toPackedType(n.typ, c, m), info)
|
||||
for i in 0..<n.len:
|
||||
toPackedNode(n[i], ir, c)
|
||||
toPackedNode(n[i], ir, c, m)
|
||||
ir.patch patchPos
|
||||
|
||||
when false:
|
||||
ir.flush c # flush any pending types and symbols
|
||||
|
||||
proc toPackedNodeIgnoreProcDefs*(n: PNode, encoder: var PackedEncoder) =
|
||||
proc addPragmaComputation*(c: var PackedEncoder; m: var PackedModule; n: PNode) =
|
||||
toPackedNode(n, m.toReplay, c, m)
|
||||
|
||||
proc toPackedNodeIgnoreProcDefs*(n: PNode, encoder: var PackedEncoder; m: var PackedModule) =
|
||||
case n.kind
|
||||
of routineDefs:
|
||||
# we serialize n[namePos].sym instead
|
||||
if n[namePos].kind == nkSym:
|
||||
discard toPackedSym(n[namePos].sym, encoder)
|
||||
discard toPackedSym(n[namePos].sym, encoder, m)
|
||||
else:
|
||||
toPackedNode(n, encoder.m.topLevel, encoder)
|
||||
toPackedNode(n, m.topLevel, encoder, m)
|
||||
else:
|
||||
toPackedNode(n, encoder.m.topLevel, encoder)
|
||||
toPackedNode(n, m.topLevel, encoder, m)
|
||||
|
||||
proc toPackedNodeTopLevel*(n: PNode, encoder: var PackedEncoder) =
|
||||
toPackedNodeIgnoreProcDefs(n, encoder)
|
||||
flush encoder
|
||||
proc toPackedNodeTopLevel*(n: PNode, encoder: var PackedEncoder; m: var PackedModule) =
|
||||
toPackedNodeIgnoreProcDefs(n, encoder, m)
|
||||
flush encoder, m
|
||||
|
||||
proc storePrim*(f: var RodFile; x: PackedType) =
|
||||
for y in fields(x):
|
||||
@@ -456,6 +466,7 @@ proc loadRodFile*(filename: AbsoluteFile; m: var PackedModule; config: ConfigRef
|
||||
loadSeqSection pureEnumsSection, m.pureEnums
|
||||
loadSeqSection macroUsagesSection, m.macroUsages
|
||||
|
||||
loadSeqSection toReplaySection, m.toReplay.nodes
|
||||
loadSeqSection topLevelSection, m.topLevel.nodes
|
||||
loadSeqSection bodiesSection, m.bodies.nodes
|
||||
loadSeqSection symsSection, m.sh.syms
|
||||
@@ -470,14 +481,14 @@ proc storeError(err: RodFileError; filename: AbsoluteFile) =
|
||||
echo "Error: ", $err, "; couldn't write to ", filename.string
|
||||
removeFile(filename.string)
|
||||
|
||||
proc saveRodFile*(filename: AbsoluteFile; encoder: var PackedEncoder) =
|
||||
proc saveRodFile*(filename: AbsoluteFile; encoder: var PackedEncoder; m: var PackedModule) =
|
||||
#rememberConfig(encoder, encoder.config)
|
||||
|
||||
var f = rodfiles.create(filename.string)
|
||||
f.storeHeader()
|
||||
f.storeSection configSection
|
||||
f.storePrim encoder.m.definedSymbols
|
||||
f.storePrim encoder.m.cfg
|
||||
f.storePrim m.definedSymbols
|
||||
f.storePrim m.cfg
|
||||
|
||||
template storeSeqSection(section, data) {.dirty.} =
|
||||
f.storeSection section
|
||||
@@ -487,33 +498,34 @@ proc saveRodFile*(filename: AbsoluteFile; encoder: var PackedEncoder) =
|
||||
f.storeSection section
|
||||
f.store data
|
||||
|
||||
storeTabSection stringsSection, encoder.m.sh.strings
|
||||
storeTabSection stringsSection, m.sh.strings
|
||||
|
||||
storeSeqSection checkSumsSection, encoder.m.includes
|
||||
storeSeqSection checkSumsSection, m.includes
|
||||
|
||||
storeSeqSection depsSection, encoder.m.imports
|
||||
storeSeqSection depsSection, m.imports
|
||||
|
||||
storeTabSection integersSection, encoder.m.sh.integers
|
||||
storeTabSection floatsSection, encoder.m.sh.floats
|
||||
storeTabSection integersSection, m.sh.integers
|
||||
storeTabSection floatsSection, m.sh.floats
|
||||
|
||||
storeSeqSection exportsSection, encoder.m.exports
|
||||
storeSeqSection exportsSection, m.exports
|
||||
|
||||
storeSeqSection reexportsSection, encoder.m.reexports
|
||||
storeSeqSection reexportsSection, m.reexports
|
||||
|
||||
storeSeqSection compilerProcsSection, encoder.m.compilerProcs
|
||||
storeSeqSection compilerProcsSection, m.compilerProcs
|
||||
|
||||
storeSeqSection trmacrosSection, encoder.m.trmacros
|
||||
storeSeqSection convertersSection, encoder.m.converters
|
||||
storeSeqSection methodsSection, encoder.m.methods
|
||||
storeSeqSection pureEnumsSection, encoder.m.pureEnums
|
||||
storeSeqSection macroUsagesSection, encoder.m.macroUsages
|
||||
storeSeqSection trmacrosSection, m.trmacros
|
||||
storeSeqSection convertersSection, m.converters
|
||||
storeSeqSection methodsSection, m.methods
|
||||
storeSeqSection pureEnumsSection, m.pureEnums
|
||||
storeSeqSection macroUsagesSection, m.macroUsages
|
||||
|
||||
storeSeqSection topLevelSection, encoder.m.topLevel.nodes
|
||||
storeSeqSection toReplaySection, m.toReplay.nodes
|
||||
storeSeqSection topLevelSection, m.topLevel.nodes
|
||||
|
||||
storeSeqSection bodiesSection, encoder.m.bodies.nodes
|
||||
storeSeqSection symsSection, encoder.m.sh.syms
|
||||
storeSeqSection bodiesSection, m.bodies.nodes
|
||||
storeSeqSection symsSection, m.sh.syms
|
||||
|
||||
storeSeqSection typesSection, encoder.m.sh.types
|
||||
storeSeqSection typesSection, m.sh.types
|
||||
close(f)
|
||||
if f.err != ok:
|
||||
storeError(f.err, filename)
|
||||
@@ -528,7 +540,7 @@ proc saveRodFile*(filename: AbsoluteFile; encoder: var PackedEncoder) =
|
||||
|
||||
type
|
||||
PackedDecoder* = object
|
||||
thisModule*: int32
|
||||
lastModule*: int
|
||||
lastLit*: LitId
|
||||
lastFile*: FileIndex # remember the last lookup entry.
|
||||
config*: ConfigRef
|
||||
@@ -537,6 +549,7 @@ type
|
||||
type
|
||||
ModuleStatus* = enum
|
||||
undefined,
|
||||
storing,
|
||||
loading,
|
||||
loaded,
|
||||
outdated
|
||||
@@ -544,7 +557,7 @@ type
|
||||
LoadedModule* = object
|
||||
status*: ModuleStatus
|
||||
symsInit, typesInit: bool
|
||||
fromDisk: PackedModule
|
||||
fromDisk*: PackedModule
|
||||
syms: seq[PSym] # indexed by itemId
|
||||
types: seq[PType]
|
||||
module*: PSym # the one true module symbol.
|
||||
@@ -552,90 +565,91 @@ type
|
||||
|
||||
PackedModuleGraph* = seq[LoadedModule] # indexed by FileIndex
|
||||
|
||||
proc loadType(c: var PackedDecoder; g: var PackedModuleGraph; t: PackedItemId): PType
|
||||
proc loadSym(c: var PackedDecoder; g: var PackedModuleGraph; s: PackedItemId): PSym
|
||||
proc loadType(c: var PackedDecoder; g: var PackedModuleGraph; thisModule: int; t: PackedItemId): PType
|
||||
proc loadSym(c: var PackedDecoder; g: var PackedModuleGraph; thisModule: int; s: PackedItemId): PSym
|
||||
|
||||
proc toFileIndexCached(c: var PackedDecoder; g: var PackedModuleGraph; f: LitId): FileIndex =
|
||||
if c.lastLit == f:
|
||||
proc toFileIndexCached(c: var PackedDecoder; g: var PackedModuleGraph; thisModule: int; f: LitId): FileIndex =
|
||||
if c.lastLit == f and c.lastModule == thisModule:
|
||||
result = c.lastFile
|
||||
else:
|
||||
result = toFileIndex(f, g[c.thisModule].fromDisk, c.config)
|
||||
result = toFileIndex(f, g[thisModule].fromDisk, c.config)
|
||||
c.lastModule = thisModule
|
||||
c.lastLit = f
|
||||
c.lastFile = result
|
||||
|
||||
proc translateLineInfo(c: var PackedDecoder; g: var PackedModuleGraph;
|
||||
proc translateLineInfo(c: var PackedDecoder; g: var PackedModuleGraph; thisModule: int;
|
||||
x: PackedLineInfo): TLineInfo =
|
||||
assert g[c.thisModule].status == loaded
|
||||
assert g[thisModule].status in {loaded, storing}
|
||||
result = TLineInfo(line: x.line, col: x.col,
|
||||
fileIndex: toFileIndexCached(c, g, x.file))
|
||||
fileIndex: toFileIndexCached(c, g, thisModule, x.file))
|
||||
|
||||
proc loadNodes(c: var PackedDecoder; g: var PackedModuleGraph;
|
||||
proc loadNodes(c: var PackedDecoder; g: var PackedModuleGraph; thisModule: int;
|
||||
tree: PackedTree; n: NodePos): PNode =
|
||||
let k = n.kind
|
||||
result = newNodeIT(k, translateLineInfo(c, g, n.info),
|
||||
loadType(c, g, n.typ))
|
||||
result = newNodeIT(k, translateLineInfo(c, g, thisModule, n.info),
|
||||
loadType(c, g, thisModule, n.typ))
|
||||
result.flags = n.flags
|
||||
|
||||
case k
|
||||
of nkEmpty, nkNilLit, nkType:
|
||||
discard
|
||||
of nkIdent:
|
||||
result.ident = getIdent(c.cache, g[c.thisModule].fromDisk.sh.strings[n.litId])
|
||||
result.ident = getIdent(c.cache, g[thisModule].fromDisk.sh.strings[n.litId])
|
||||
of nkSym:
|
||||
result.sym = loadSym(c, g, PackedItemId(module: LitId(0), item: tree.nodes[n.int].operand))
|
||||
result.sym = loadSym(c, g, thisModule, PackedItemId(module: LitId(0), item: tree.nodes[n.int].operand))
|
||||
of directIntLit:
|
||||
result.intVal = tree.nodes[n.int].operand
|
||||
of externIntLit:
|
||||
result.intVal = g[c.thisModule].fromDisk.sh.integers[n.litId]
|
||||
result.intVal = g[thisModule].fromDisk.sh.integers[n.litId]
|
||||
of nkStrLit..nkTripleStrLit:
|
||||
result.strVal = g[c.thisModule].fromDisk.sh.strings[n.litId]
|
||||
result.strVal = g[thisModule].fromDisk.sh.strings[n.litId]
|
||||
of nkFloatLit..nkFloat128Lit:
|
||||
result.floatVal = g[c.thisModule].fromDisk.sh.floats[n.litId]
|
||||
result.floatVal = g[thisModule].fromDisk.sh.floats[n.litId]
|
||||
of nkModuleRef:
|
||||
let (n1, n2) = sons2(tree, n)
|
||||
assert n1.kind == nkInt32Lit
|
||||
assert n2.kind == nkInt32Lit
|
||||
transitionNoneToSym(result)
|
||||
result.sym = loadSym(c, g, PackedItemId(module: n1.litId, item: tree.nodes[n2.int].operand))
|
||||
result.sym = loadSym(c, g, thisModule, PackedItemId(module: n1.litId, item: tree.nodes[n2.int].operand))
|
||||
else:
|
||||
for n0 in sonsReadonly(tree, n):
|
||||
result.add loadNodes(c, g, tree, n0)
|
||||
result.add loadNodes(c, g, thisModule, tree, n0)
|
||||
|
||||
proc loadProcHeader(c: var PackedDecoder; g: var PackedModuleGraph;
|
||||
proc loadProcHeader(c: var PackedDecoder; g: var PackedModuleGraph; thisModule: int;
|
||||
tree: PackedTree; n: NodePos): PNode =
|
||||
# do not load the body of the proc. This will be done later in
|
||||
# getProcBody, if required.
|
||||
let k = n.kind
|
||||
result = newNodeIT(k, translateLineInfo(c, g, n.info),
|
||||
loadType(c, g, n.typ))
|
||||
result = newNodeIT(k, translateLineInfo(c, g, thisModule, n.info),
|
||||
loadType(c, g, thisModule, n.typ))
|
||||
result.flags = n.flags
|
||||
assert k in {nkProcDef, nkMethodDef, nkIteratorDef, nkFuncDef, nkConverterDef}
|
||||
var i = 0
|
||||
for n0 in sonsReadonly(tree, n):
|
||||
if i != bodyPos:
|
||||
result.add loadNodes(c, g, tree, n0)
|
||||
result.add loadNodes(c, g, thisModule, tree, n0)
|
||||
else:
|
||||
result.add nil
|
||||
result.addAllowNil nil
|
||||
inc i
|
||||
|
||||
proc loadProcBody(c: var PackedDecoder; g: var PackedModuleGraph;
|
||||
proc loadProcBody(c: var PackedDecoder; g: var PackedModuleGraph; thisModule: int;
|
||||
tree: PackedTree; n: NodePos): PNode =
|
||||
var i = 0
|
||||
for n0 in sonsReadonly(tree, n):
|
||||
if i == bodyPos:
|
||||
result = loadNodes(c, g, tree, n0)
|
||||
result = loadNodes(c, g, thisModule, tree, n0)
|
||||
inc i
|
||||
|
||||
proc moduleIndex*(c: var PackedDecoder; g: var PackedModuleGraph;
|
||||
proc moduleIndex*(c: var PackedDecoder; g: var PackedModuleGraph; thisModule: int;
|
||||
s: PackedItemId): int32 {.inline.} =
|
||||
result = if s.module == LitId(0): c.thisModule
|
||||
else: toFileIndexCached(c, g, s.module).int32
|
||||
result = if s.module == LitId(0): thisModule.int32
|
||||
else: toFileIndexCached(c, g, thisModule, s.module).int32
|
||||
|
||||
proc symHeaderFromPacked(c: var PackedDecoder; g: var PackedModuleGraph;
|
||||
s: PackedSym; si, item: int32): PSym =
|
||||
result = PSym(itemId: ItemId(module: si, item: item),
|
||||
kind: s.kind, magic: s.magic, flags: s.flags,
|
||||
info: translateLineInfo(c, g, s.info),
|
||||
info: translateLineInfo(c, g, si, s.info),
|
||||
options: s.options,
|
||||
position: s.position,
|
||||
name: getIdent(c.cache, g[si].fromDisk.sh.strings[s.name])
|
||||
@@ -643,11 +657,11 @@ proc symHeaderFromPacked(c: var PackedDecoder; g: var PackedModuleGraph;
|
||||
|
||||
template loadAstBody(p, field) =
|
||||
if p.field != emptyNodeId:
|
||||
result.field = loadNodes(c, g, g[si].fromDisk.bodies, NodePos p.field)
|
||||
result.field = loadNodes(c, g, si, g[si].fromDisk.bodies, NodePos p.field)
|
||||
|
||||
template loadAstBodyLazy(p, field) =
|
||||
if p.field != emptyNodeId:
|
||||
result.field = loadProcHeader(c, g, g[si].fromDisk.bodies, NodePos p.field)
|
||||
result.field = loadProcHeader(c, g, si, g[si].fromDisk.bodies, NodePos p.field)
|
||||
|
||||
proc loadLib(c: var PackedDecoder; g: var PackedModuleGraph;
|
||||
si, item: int32; l: PackedLib): PLib =
|
||||
@@ -661,7 +675,7 @@ proc loadLib(c: var PackedDecoder; g: var PackedModuleGraph;
|
||||
|
||||
proc symBodyFromPacked(c: var PackedDecoder; g: var PackedModuleGraph;
|
||||
s: PackedSym; si, item: int32; result: PSym) =
|
||||
result.typ = loadType(c, g, s.typ)
|
||||
result.typ = loadType(c, g, si, s.typ)
|
||||
loadAstBody(s, constraint)
|
||||
if result.kind in {skProc, skFunc, skIterator, skConverter, skMethod}:
|
||||
loadAstBodyLazy(s, ast)
|
||||
@@ -672,20 +686,20 @@ proc symBodyFromPacked(c: var PackedDecoder; g: var PackedModuleGraph;
|
||||
result.cname = g[si].fromDisk.sh.strings[s.cname]
|
||||
|
||||
if s.kind in {skLet, skVar, skField, skForVar}:
|
||||
result.guard = loadSym(c, g, s.guard)
|
||||
result.guard = loadSym(c, g, si, s.guard)
|
||||
result.bitsize = s.bitsize
|
||||
result.alignment = s.alignment
|
||||
result.owner = loadSym(c, g, s.owner)
|
||||
result.owner = loadSym(c, g, si, s.owner)
|
||||
let externalName = g[si].fromDisk.sh.strings[s.externalName]
|
||||
if externalName != "":
|
||||
result.loc.r = rope externalName
|
||||
|
||||
proc loadSym(c: var PackedDecoder; g: var PackedModuleGraph; s: PackedItemId): PSym =
|
||||
proc loadSym(c: var PackedDecoder; g: var PackedModuleGraph; thisModule: int; s: PackedItemId): PSym =
|
||||
if s == nilItemId:
|
||||
result = nil
|
||||
else:
|
||||
let si = moduleIndex(c, g, s)
|
||||
assert g[si].status == loaded
|
||||
let si = moduleIndex(c, g, thisModule, s)
|
||||
assert g[si].status in {loaded, storing}
|
||||
if not g[si].symsInit:
|
||||
g[si].symsInit = true
|
||||
setLen g[si].syms, g[si].fromDisk.sh.syms.len
|
||||
@@ -714,23 +728,23 @@ proc typeHeaderFromPacked(c: var PackedDecoder; g: var PackedModuleGraph;
|
||||
|
||||
proc typeBodyFromPacked(c: var PackedDecoder; g: var PackedModuleGraph;
|
||||
t: PackedType; si, item: int32; result: PType) =
|
||||
result.sym = loadSym(c, g, t.sym)
|
||||
result.owner = loadSym(c, g, t.owner)
|
||||
result.sym = loadSym(c, g, si, t.sym)
|
||||
result.owner = loadSym(c, g, si, t.owner)
|
||||
for op, item in pairs t.attachedOps:
|
||||
result.attachedOps[op] = loadSym(c, g, item)
|
||||
result.typeInst = loadType(c, g, t.typeInst)
|
||||
result.attachedOps[op] = loadSym(c, g, si, item)
|
||||
result.typeInst = loadType(c, g, si, t.typeInst)
|
||||
for son in items t.types:
|
||||
result.sons.add loadType(c, g, son)
|
||||
result.sons.add loadType(c, g, si, son)
|
||||
loadAstBody(t, n)
|
||||
for gen, id in items t.methods:
|
||||
result.methods.add((gen, loadSym(c, g, id)))
|
||||
result.methods.add((gen, loadSym(c, g, si, id)))
|
||||
|
||||
proc loadType(c: var PackedDecoder; g: var PackedModuleGraph; t: PackedItemId): PType =
|
||||
proc loadType(c: var PackedDecoder; g: var PackedModuleGraph; thisModule: int; t: PackedItemId): PType =
|
||||
if t == nilItemId:
|
||||
result = nil
|
||||
else:
|
||||
let si = moduleIndex(c, g, t)
|
||||
assert g[si].status == loaded
|
||||
let si = moduleIndex(c, g, thisModule, t)
|
||||
assert g[si].status in {loaded, storing}
|
||||
if not g[si].typesInit:
|
||||
g[si].typesInit = true
|
||||
setLen g[si].types, g[si].fromDisk.sh.types.len
|
||||
@@ -744,7 +758,8 @@ proc loadType(c: var PackedDecoder; g: var PackedModuleGraph; t: PackedItemId):
|
||||
else:
|
||||
result = g[si].types[t.item]
|
||||
|
||||
proc setupLookupTables(m: var LoadedModule; conf: ConfigRef; cache: IdentCache; fileIdx: FileIndex) =
|
||||
proc setupLookupTables(g: var PackedModuleGraph; conf: ConfigRef; cache: IdentCache;
|
||||
fileIdx: FileIndex; m: var LoadedModule) =
|
||||
m.iface = initTable[PIdent, seq[PackedItemId]]()
|
||||
for e in m.fromDisk.exports:
|
||||
let nameLit = e[0]
|
||||
@@ -758,7 +773,24 @@ proc setupLookupTables(m: var LoadedModule; conf: ConfigRef; cache: IdentCache;
|
||||
# mechanism, which we do in order to assign each module a persistent ID.
|
||||
m.module = PSym(kind: skModule, itemId: ItemId(module: int32(fileIdx), item: 0'i32),
|
||||
name: getIdent(cache, splitFile(filename).name),
|
||||
info: newLineInfo(fileIdx, 1, 1))
|
||||
info: newLineInfo(fileIdx, 1, 1),
|
||||
position: int(fileIdx))
|
||||
|
||||
proc loadToReplayNodes(g: var PackedModuleGraph; conf: ConfigRef; cache: IdentCache;
|
||||
fileIdx: FileIndex; m: var LoadedModule) =
|
||||
m.module.ast = newNode(nkStmtList)
|
||||
if m.fromDisk.toReplay.len > 0:
|
||||
var decoder = PackedDecoder(
|
||||
lastModule: int32(-1),
|
||||
lastLit: LitId(0),
|
||||
lastFile: FileIndex(-1),
|
||||
config: conf,
|
||||
cache: cache)
|
||||
var p = 0
|
||||
while p < m.fromDisk.toReplay.len:
|
||||
m.module.ast.add loadNodes(decoder, g, int(fileIdx), m.fromDisk.toReplay, NodePos p)
|
||||
let s = span(m.fromDisk.toReplay, p)
|
||||
inc p, s
|
||||
|
||||
proc needsRecompile(g: var PackedModuleGraph; conf: ConfigRef; cache: IdentCache;
|
||||
fileIdx: FileIndex): bool =
|
||||
@@ -783,7 +815,7 @@ proc needsRecompile(g: var PackedModuleGraph; conf: ConfigRef; cache: IdentCache
|
||||
result = true
|
||||
|
||||
if not result:
|
||||
setupLookupTables(g[m], conf, cache, fileIdx)
|
||||
setupLookupTables(g, conf, cache, fileIdx, g[m])
|
||||
g[m].status = if result: outdated else: loaded
|
||||
else:
|
||||
loadError(err, rod)
|
||||
@@ -791,7 +823,7 @@ proc needsRecompile(g: var PackedModuleGraph; conf: ConfigRef; cache: IdentCache
|
||||
result = true
|
||||
of loading, loaded:
|
||||
result = false
|
||||
of outdated:
|
||||
of outdated, storing:
|
||||
result = true
|
||||
|
||||
proc moduleFromRodFile*(g: var PackedModuleGraph; conf: ConfigRef; cache: IdentCache;
|
||||
@@ -802,10 +834,11 @@ proc moduleFromRodFile*(g: var PackedModuleGraph; conf: ConfigRef; cache: IdentC
|
||||
else:
|
||||
result = g[int fileIdx].module
|
||||
assert result != nil
|
||||
loadToReplayNodes(g, conf, cache, fileIdx, g[int fileIdx])
|
||||
|
||||
template setupDecoder() {.dirty.} =
|
||||
var decoder = PackedDecoder(
|
||||
thisModule: int32(module),
|
||||
lastModule: int32(-1),
|
||||
lastLit: LitId(0),
|
||||
lastFile: FileIndex(-1),
|
||||
config: config,
|
||||
@@ -815,55 +848,70 @@ proc loadProcBody*(config: ConfigRef, cache: IdentCache;
|
||||
g: var PackedModuleGraph; s: PSym): PNode =
|
||||
let mId = s.itemId.module
|
||||
var decoder = PackedDecoder(
|
||||
thisModule: mId,
|
||||
lastModule: int32(-1),
|
||||
lastLit: LitId(0),
|
||||
lastFile: FileIndex(-1),
|
||||
config: config,
|
||||
cache: cache)
|
||||
let pos = g[mId].fromDisk.sh.syms[s.itemId.item].ast
|
||||
assert pos != emptyNodeId
|
||||
result = loadProcBody(decoder, g, g[mId].fromDisk.bodies, NodePos pos)
|
||||
result = loadProcBody(decoder, g, mId, g[mId].fromDisk.bodies, NodePos pos)
|
||||
|
||||
proc simulateLoadedModule*(g: var PackedModuleGraph; conf: ConfigRef; cache: IdentCache;
|
||||
moduleSym: PSym; m: PackedModule) =
|
||||
# For now only used for heavy debugging. In the future we could use this to reduce the
|
||||
# compiler's memory consumption.
|
||||
let idx = moduleSym.position
|
||||
assert g[idx].status in {storing}
|
||||
g[idx].status = loaded
|
||||
assert g[idx].module == moduleSym
|
||||
setupLookupTables(g, conf, cache, FileIndex(idx), g[idx])
|
||||
loadToReplayNodes(g, conf, cache, FileIndex(idx), g[idx])
|
||||
|
||||
# ---------------- symbol table handling ----------------
|
||||
|
||||
type
|
||||
RodIter* = object
|
||||
decoder: PackedDecoder
|
||||
values: seq[PackedItemId]
|
||||
i: int
|
||||
i, module: int
|
||||
|
||||
proc initRodIter*(it: var RodIter; config: ConfigRef, cache: IdentCache;
|
||||
g: var PackedModuleGraph; module: FileIndex;
|
||||
name: PIdent): PSym =
|
||||
it.decoder = PackedDecoder(
|
||||
thisModule: int32(module),
|
||||
lastModule: int32(-1),
|
||||
lastLit: LitId(0),
|
||||
lastFile: FileIndex(-1),
|
||||
config: config,
|
||||
cache: cache)
|
||||
it.values = g[int module].iface.getOrDefault(name)
|
||||
it.i = 0
|
||||
it.module = int(module)
|
||||
if it.i < it.values.len:
|
||||
result = loadSym(it.decoder, g, it.values[it.i])
|
||||
result = loadSym(it.decoder, g, int(module), it.values[it.i])
|
||||
inc it.i
|
||||
|
||||
proc initRodIterAllSyms*(it: var RodIter; config: ConfigRef, cache: IdentCache;
|
||||
g: var PackedModuleGraph; module: FileIndex): PSym =
|
||||
it.decoder = PackedDecoder(
|
||||
thisModule: int32(module),
|
||||
lastModule: int32(-1),
|
||||
lastLit: LitId(0),
|
||||
lastFile: FileIndex(-1),
|
||||
config: config,
|
||||
cache: cache)
|
||||
it.values = @[]
|
||||
it.module = int(module)
|
||||
for v in g[int module].iface.values:
|
||||
it.values.add v
|
||||
it.i = 0
|
||||
if it.i < it.values.len:
|
||||
result = loadSym(it.decoder, g, it.values[it.i])
|
||||
result = loadSym(it.decoder, g, int(module), it.values[it.i])
|
||||
inc it.i
|
||||
|
||||
proc nextRodIter*(it: var RodIter; g: var PackedModuleGraph): PSym =
|
||||
if it.i < it.values.len:
|
||||
result = loadSym(it.decoder, g, it.values[it.i])
|
||||
result = loadSym(it.decoder, g, it.module, it.values[it.i])
|
||||
inc it.i
|
||||
|
||||
iterator interfaceSymbols*(config: ConfigRef, cache: IdentCache;
|
||||
@@ -872,7 +920,7 @@ iterator interfaceSymbols*(config: ConfigRef, cache: IdentCache;
|
||||
setupDecoder()
|
||||
let values = g[int module].iface.getOrDefault(name)
|
||||
for pid in values:
|
||||
let s = loadSym(decoder, g, pid)
|
||||
let s = loadSym(decoder, g, int(module), pid)
|
||||
assert s != nil
|
||||
yield s
|
||||
|
||||
@@ -881,5 +929,28 @@ proc interfaceSymbol*(config: ConfigRef, cache: IdentCache;
|
||||
name: PIdent): PSym =
|
||||
setupDecoder()
|
||||
let values = g[int module].iface.getOrDefault(name)
|
||||
result = loadSym(decoder, g, values[0])
|
||||
result = loadSym(decoder, g, int(module), values[0])
|
||||
|
||||
# ------------------------- .rod file viewer ---------------------------------
|
||||
|
||||
proc rodViewer*(rodfile: AbsoluteFile; config: ConfigRef, cache: IdentCache) =
|
||||
var m: PackedModule
|
||||
if loadRodFile(rodfile, m, config) != ok:
|
||||
echo "Error: could not load: ", rodfile.string
|
||||
quit 1
|
||||
|
||||
when true:
|
||||
echo "exports:"
|
||||
for ex in m.exports:
|
||||
echo " ", m.sh.strings[ex[0]]
|
||||
assert ex[0] == m.sh.syms[ex[1]].name
|
||||
# ex[1] int32
|
||||
|
||||
echo "reexports:"
|
||||
for ex in m.reexports:
|
||||
echo " ", m.sh.strings[ex[0]]
|
||||
# reexports*: seq[(LitId, PackedItemId)]
|
||||
echo "symbols: ", m.sh.syms.len, " types: ", m.sh.types.len,
|
||||
" top level nodes: ", m.topLevel.nodes.len, " other nodes: ", m.bodies.nodes.len,
|
||||
" strings: ", m.sh.strings.len, " integers: ", m.sh.integers.len,
|
||||
" floats: ", m.sh.floats.len
|
||||
|
||||
@@ -279,3 +279,4 @@ proc initMsgConfig*(): MsgConfig =
|
||||
result.filenameToIndexTbl = initTable[string, FileIndex]()
|
||||
result.fileInfos = @[]
|
||||
result.errorOutputs = {eStdOut, eStdErr}
|
||||
result.filenameToIndexTbl["???"] = FileIndex(-1)
|
||||
|
||||
@@ -9,71 +9,36 @@
|
||||
|
||||
## This module implements helpers for the macro cache.
|
||||
|
||||
import lineinfos, ast, modulegraphs, vmdef
|
||||
import lineinfos, ast, vmdef
|
||||
|
||||
proc append(c: PCtx; n: PNode) =
|
||||
c.vmstateDiff.add((c.module, n))
|
||||
|
||||
proc recordInc*(c: PCtx; info: TLineInfo; key: string; by: BiggestInt) =
|
||||
var recorded = newNodeI(nkCommentStmt, info)
|
||||
var recorded = newNodeI(nkReplayAction, info)
|
||||
recorded.add newStrNode("inc", info)
|
||||
recorded.add newStrNode(key, info)
|
||||
recorded.add newIntNode(nkIntLit, by)
|
||||
c.graph.recordStmt(c.graph, c.module, recorded)
|
||||
c.append(recorded)
|
||||
|
||||
proc recordPut*(c: PCtx; info: TLineInfo; key: string; k: string; val: PNode) =
|
||||
var recorded = newNodeI(nkCommentStmt, info)
|
||||
var recorded = newNodeI(nkReplayAction, info)
|
||||
recorded.add newStrNode("put", info)
|
||||
recorded.add newStrNode(key, info)
|
||||
recorded.add newStrNode(k, info)
|
||||
recorded.add copyTree(val)
|
||||
c.graph.recordStmt(c.graph, c.module, recorded)
|
||||
c.append(recorded)
|
||||
|
||||
proc recordAdd*(c: PCtx; info: TLineInfo; key: string; val: PNode) =
|
||||
var recorded = newNodeI(nkCommentStmt, info)
|
||||
var recorded = newNodeI(nkReplayAction, info)
|
||||
recorded.add newStrNode("add", info)
|
||||
recorded.add newStrNode(key, info)
|
||||
recorded.add copyTree(val)
|
||||
c.graph.recordStmt(c.graph, c.module, recorded)
|
||||
c.append(recorded)
|
||||
|
||||
proc recordIncl*(c: PCtx; info: TLineInfo; key: string; val: PNode) =
|
||||
var recorded = newNodeI(nkCommentStmt, info)
|
||||
var recorded = newNodeI(nkReplayAction, info)
|
||||
recorded.add newStrNode("incl", info)
|
||||
recorded.add newStrNode(key, info)
|
||||
recorded.add copyTree(val)
|
||||
c.graph.recordStmt(c.graph, c.module, recorded)
|
||||
|
||||
when false:
|
||||
proc genCall3(g: ModuleGraph; m: TMagic; s: string; a, b, c: PNode): PNode =
|
||||
newTree(nkStaticStmt, newTree(nkCall, createMagic(g, s, m).newSymNode, a, b, c))
|
||||
|
||||
proc genCall2(g: ModuleGraph; m: TMagic; s: string; a, b: PNode): PNode =
|
||||
newTree(nkStaticStmt, newTree(nkCall, createMagic(g, s, m).newSymNode, a, b))
|
||||
|
||||
template nodeFrom(s: string): PNode =
|
||||
var res = newStrNode(s, info)
|
||||
res.typ = getSysType(g, info, tyString)
|
||||
res
|
||||
|
||||
template nodeFrom(i: BiggestInt): PNode =
|
||||
var res = newIntNode(i, info)
|
||||
res.typ = getSysType(g, info, tyInt)
|
||||
res
|
||||
|
||||
template nodeFrom(n: PNode): PNode = copyTree(n)
|
||||
|
||||
template record(call) =
|
||||
g.recordStmt(g, c.module, call)
|
||||
|
||||
proc recordInc*(c: PCtx; info: TLineInfo; key: string; by: BiggestInt) =
|
||||
let g = c.graph
|
||||
record genCall2(mNccInc, "inc", nodeFrom key, nodeFrom by)
|
||||
|
||||
proc recordPut*(c: PCtx; info: TLineInfo; key: string; k: string; val: PNode) =
|
||||
let g = c.graph
|
||||
record genCall3(mNctPut, "[]=", nodeFrom key, nodeFrom k, nodeFrom val)
|
||||
|
||||
proc recordAdd*(c: PCtx; info: TLineInfo; key: string; val: PNode) =
|
||||
let g = c.graph
|
||||
record genCall2(mNcsAdd, "add", nodeFrom key, nodeFrom val)
|
||||
|
||||
proc recordIncl*(c: PCtx; info: TLineInfo; key: string; val: PNode) =
|
||||
let g = c.graph
|
||||
record genCall2(mNcsIncl, "incl", nodeFrom key, nodeFrom val)
|
||||
c.append(recorded)
|
||||
|
||||
@@ -21,6 +21,8 @@ import
|
||||
modules,
|
||||
modulegraphs, tables, lineinfos, pathutils, vmprofiler
|
||||
|
||||
from ic / to_packed_ast import rodViewer
|
||||
|
||||
when not defined(leanCompiler):
|
||||
import jsgen, docgen, docgen2
|
||||
|
||||
@@ -158,6 +160,10 @@ proc commandScan(cache: IdentCache, config: ConfigRef) =
|
||||
else:
|
||||
rawMessage(config, errGenerated, "cannot open file: " & f.string)
|
||||
|
||||
proc commandView(graph: ModuleGraph) =
|
||||
let f = toAbsolute(mainCommandArg(graph.config), AbsoluteDir getCurrentDir()).addFileExt(RodExt)
|
||||
rodViewer(f, graph.config, graph.cache)
|
||||
|
||||
const
|
||||
PrintRopeCacheStats = false
|
||||
|
||||
@@ -311,10 +317,10 @@ proc mainCommand*(graph: ModuleGraph) =
|
||||
of cmdParse:
|
||||
wantMainModule(conf)
|
||||
discard parseFile(conf.projectMainIdx, cache, conf)
|
||||
of cmdScan:
|
||||
of cmdRod:
|
||||
wantMainModule(conf)
|
||||
commandScan(cache, conf)
|
||||
msgWriteln(conf, "Beware: Indentation tokens depend on the parser's state!")
|
||||
commandView(graph)
|
||||
#msgWriteln(conf, "Beware: Indentation tokens depend on the parser's state!")
|
||||
of cmdInteractive: commandInteractive(graph)
|
||||
of cmdNimscript:
|
||||
if conf.projectIsCmd or conf.projectIsStdin: discard
|
||||
|
||||
@@ -29,13 +29,14 @@ type
|
||||
|
||||
ModuleGraph* = ref object
|
||||
ifaces*: seq[Iface] ## indexed by int32 fileIdx
|
||||
packed: PackedModuleGraph
|
||||
packed*: PackedModuleGraph
|
||||
startupPackedConfig*: PackedConfig
|
||||
packageSyms*: TStrTable
|
||||
deps*: IntSet # the dependency graph or potentially its transitive closure.
|
||||
importDeps*: Table[FileIndex, seq[FileIndex]] # explicit import module dependencies
|
||||
suggestMode*: bool # whether we are in nimsuggest mode or not.
|
||||
invalidTransitiveClosure: bool
|
||||
systemModuleComplete*: bool
|
||||
inclToMod*: Table[FileIndex, FileIndex] # mapping of include file to the
|
||||
# first module that included it
|
||||
importStack*: seq[FileIndex] # The current import stack. Used for detecting recursive
|
||||
@@ -61,7 +62,6 @@ type
|
||||
symBodyHashes*: Table[int, SigHash] # symId to digest mapping
|
||||
importModuleCallback*: proc (graph: ModuleGraph; m: PSym, fileIdx: FileIndex): PSym {.nimcall.}
|
||||
includeFileCallback*: proc (graph: ModuleGraph; m: PSym, fileIdx: FileIndex): PNode {.nimcall.}
|
||||
recordStmt*: proc (graph: ModuleGraph; m: PSym; n: PNode) {.nimcall.}
|
||||
cacheSeqs*: Table[string, PNode] # state that is shared to support the 'macrocache' API
|
||||
cacheCounters*: Table[string, BiggestInt]
|
||||
cacheTables*: Table[string, BTree[string, PNode]]
|
||||
@@ -128,6 +128,11 @@ template semtab*(m: PSym; g: ModuleGraph): TStrTable =
|
||||
proc cachedModule(g: ModuleGraph; m: PSym): bool {.inline.} =
|
||||
m.position < g.packed.len and g.packed[m.position].status == loaded
|
||||
|
||||
proc simulateCachedModule*(g: ModuleGraph; moduleSym: PSym; m: PackedModule) =
|
||||
when false:
|
||||
echo "simulating ", moduleSym.name.s, " ", moduleSym.position
|
||||
simulateLoadedModule(g.packed, g.config, g.cache, moduleSym, m)
|
||||
|
||||
type
|
||||
ModuleIter* = object
|
||||
fromRod: bool
|
||||
@@ -231,6 +236,10 @@ proc registerModule*(g: ModuleGraph; m: PSym) =
|
||||
|
||||
if m.position >= g.ifaces.len:
|
||||
setLen(g.ifaces, m.position + 1)
|
||||
|
||||
if m.position >= g.packed.len:
|
||||
setLen(g.packed, m.position + 1)
|
||||
|
||||
g.ifaces[m.position] = Iface(module: m, converters: @[], patterns: @[])
|
||||
initStrTable(g.ifaces[m.position].interf)
|
||||
|
||||
@@ -253,8 +262,6 @@ proc newModuleGraph*(cache: IdentCache; config: ConfigRef): ModuleGraph =
|
||||
result.opNot = createMagic(result, "not", mNot)
|
||||
result.opContains = createMagic(result, "contains", mInSet)
|
||||
result.emptyNode = newNode(nkEmpty)
|
||||
result.recordStmt = proc (graph: ModuleGraph; m: PSym; n: PNode) {.nimcall.} =
|
||||
discard
|
||||
result.cacheSeqs = initTable[string, PNode]()
|
||||
result.cacheCounters = initTable[string, BiggestInt]()
|
||||
result.cacheTables = initTable[string, BTree[string, PNode]]()
|
||||
@@ -334,14 +341,14 @@ proc isDirty*(g: ModuleGraph; m: PSym): bool =
|
||||
|
||||
proc getBody*(g: ModuleGraph; s: PSym): PNode {.inline.} =
|
||||
result = s.ast[bodyPos]
|
||||
if result == nil and g.config.symbolFiles in {readOnlySf, v2Sf}:
|
||||
if result == nil and g.config.symbolFiles in {readOnlySf, v2Sf, stressTest}:
|
||||
result = loadProcBody(g.config, g.cache, g.packed, s)
|
||||
s.ast[bodyPos] = result
|
||||
assert result != nil
|
||||
|
||||
proc moduleFromRodFile*(g: ModuleGraph; fileIdx: FileIndex): PSym =
|
||||
## Returns 'nil' if the module needs to be recompiled.
|
||||
if g.config.symbolFiles in {readOnlySf, v2Sf}:
|
||||
if g.config.symbolFiles in {readOnlySf, v2Sf, stressTest}:
|
||||
result = moduleFromRodFile(g.packed, g.config, g.cache, fileIdx)
|
||||
|
||||
proc configComplete*(g: ModuleGraph) =
|
||||
|
||||
@@ -14,6 +14,8 @@ import
|
||||
idents, lexer, passes, syntaxes, llstream, modulegraphs,
|
||||
lineinfos, pathutils, tables
|
||||
|
||||
import ic / replayer
|
||||
|
||||
proc resetSystemArtifacts*(g: ModuleGraph) =
|
||||
magicsys.resetSysTypes(g)
|
||||
|
||||
@@ -108,7 +110,7 @@ proc compileModule*(graph: ModuleGraph; fileIdx: FileIndex; flags: TSymFlags): P
|
||||
processModuleAux()
|
||||
else:
|
||||
partialInitModule(result, graph, fileIdx, filename)
|
||||
# XXX replay the pragmas here!
|
||||
replayStateChanges(result, graph)
|
||||
elif graph.isDirty(result):
|
||||
result.flags.excl sfDirty
|
||||
# reset module fields:
|
||||
|
||||
@@ -124,7 +124,7 @@ type
|
||||
cmdTcc # run the project via TCC backend
|
||||
cmdCheck # semantic checking for whole project
|
||||
cmdParse # parse a single file (for debugging)
|
||||
cmdScan # scan a single file (for debugging)
|
||||
cmdRod # .rod to some text representation (for debugging)
|
||||
cmdIdeTools # ide tools (e.g. nimsuggest)
|
||||
cmdNimscript # evaluate nimscript
|
||||
cmdDoc0
|
||||
@@ -190,7 +190,7 @@ type
|
||||
## are not anymore.
|
||||
|
||||
SymbolFilesOption* = enum
|
||||
disabledSf, writeOnlySf, readOnlySf, v2Sf
|
||||
disabledSf, writeOnlySf, readOnlySf, v2Sf, stressTest
|
||||
|
||||
TSystemCC* = enum
|
||||
ccNone, ccGcc, ccNintendoSwitch, ccLLVM_Gcc, ccCLang, ccBcc, ccVcc,
|
||||
|
||||
@@ -108,7 +108,8 @@ proc prepareConfigNotes(graph: ModuleGraph; module: PSym) =
|
||||
graph.config.notes = graph.config.foreignPackageNotes
|
||||
|
||||
proc moduleHasChanged*(graph: ModuleGraph; module: PSym): bool {.inline.} =
|
||||
result = module.id >= 0 or isDefined(graph.config, "nimBackendAssumesChange")
|
||||
result = true
|
||||
#module.id >= 0 or isDefined(graph.config, "nimBackendAssumesChange")
|
||||
|
||||
proc partOfStdlib(x: PSym): bool =
|
||||
var it = x.owner
|
||||
|
||||
@@ -96,10 +96,10 @@ proc pragma*(c: PContext, sym: PSym, n: PNode, validPragmas: TSpecialWords;
|
||||
isStatement: bool = false)
|
||||
|
||||
proc recordPragma(c: PContext; n: PNode; args: varargs[string]) =
|
||||
var recorded = newNodeI(nkCommentStmt, n.info)
|
||||
var recorded = newNodeI(nkReplayAction, n.info)
|
||||
for i in 0..args.high:
|
||||
recorded.add newStrNode(args[i], n.info)
|
||||
c.graph.recordStmt(c.graph, c.module, recorded)
|
||||
addPragmaComputation(c, recorded)
|
||||
|
||||
const
|
||||
errStringLiteralExpected = "string literal expected"
|
||||
@@ -705,7 +705,7 @@ proc markCompilerProc(c: PContext; s: PSym) =
|
||||
incl(s.flags, sfUsed)
|
||||
registerCompilerProc(c.graph, s)
|
||||
if c.config.symbolFiles != disabledSf:
|
||||
addCompilerProc(c.encoder, s)
|
||||
addCompilerProc(c.encoder, c.packedRepr, s)
|
||||
|
||||
proc deprecatedStmt(c: PContext; outerPragma: PNode) =
|
||||
let pragma = outerPragma[1]
|
||||
|
||||
@@ -268,15 +268,25 @@ proc newContext*(graph: ModuleGraph; module: PSym): PContext =
|
||||
result.typesWithOps = @[]
|
||||
result.features = graph.config.features
|
||||
if graph.config.symbolFiles != disabledSf:
|
||||
initEncoder result.encoder, module, graph.config, graph.startupPackedConfig
|
||||
let id = module.position
|
||||
assert graph.packed[id].status in {undefined, outdated}
|
||||
graph.packed[id].status = storing
|
||||
graph.packed[id].module = module
|
||||
initEncoder result.encoder, graph.packed[id].fromDisk, module, graph.config, graph.startupPackedConfig
|
||||
|
||||
template packedRepr*(c): untyped = c.graph.packed[c.module.position].fromDisk
|
||||
|
||||
proc addIncludeFileDep*(c: PContext; f: FileIndex) =
|
||||
if c.config.symbolFiles != disabledSf:
|
||||
addIncludeFileDep(c.encoder, f)
|
||||
addIncludeFileDep(c.encoder, c.packedRepr, f)
|
||||
|
||||
proc addImportFileDep*(c: PContext; f: FileIndex) =
|
||||
if c.config.symbolFiles != disabledSf:
|
||||
addImportFileDep(c.encoder, f)
|
||||
addImportFileDep(c.encoder, c.packedRepr, f)
|
||||
|
||||
proc addPragmaComputation*(c: PContext; n: PNode) =
|
||||
if c.config.symbolFiles != disabledSf:
|
||||
addPragmaComputation(c.encoder, c.packedRepr, n)
|
||||
|
||||
proc inclSym(sq: var seq[PSym], s: PSym) =
|
||||
for i in 0..<sq.len:
|
||||
@@ -287,28 +297,28 @@ proc addConverter*(c: PContext, conv: PSym) =
|
||||
inclSym(c.converters, conv)
|
||||
inclSym(c.graph.ifaces[c.module.position].converters, conv)
|
||||
if c.config.symbolFiles != disabledSf:
|
||||
addConverter(c.encoder, conv)
|
||||
addConverter(c.encoder, c.packedRepr, conv)
|
||||
|
||||
proc addPureEnum*(c: PContext, e: PSym) =
|
||||
inclSym(c.graph.ifaces[c.module.position].pureEnums, e)
|
||||
if c.config.symbolFiles != disabledSf:
|
||||
addPureEnum(c.encoder, e)
|
||||
addPureEnum(c.encoder, c.packedRepr, e)
|
||||
|
||||
proc addPattern*(c: PContext, p: PSym) =
|
||||
inclSym(c.patterns, p)
|
||||
inclSym(c.graph.ifaces[c.module.position].patterns, p)
|
||||
if c.config.symbolFiles != disabledSf:
|
||||
addTrmacro(c.encoder, p)
|
||||
addTrmacro(c.encoder, c.packedRepr, p)
|
||||
|
||||
proc exportSym*(c: PContext; s: PSym) =
|
||||
strTableAdd(c.module.semtab(c.graph), s)
|
||||
if c.config.symbolFiles != disabledSf:
|
||||
addExported(c.encoder, s)
|
||||
addExported(c.encoder, c.packedRepr, s)
|
||||
|
||||
proc reexportSym*(c: PContext; s: PSym) =
|
||||
strTableAdd(c.module.semtab(c.graph), s)
|
||||
if c.config.symbolFiles != disabledSf:
|
||||
addReexport(c.encoder, s)
|
||||
addReexport(c.encoder, c.packedRepr, s)
|
||||
|
||||
proc newLib*(kind: TLibKind): PLib =
|
||||
new(result)
|
||||
@@ -499,8 +509,23 @@ template addExport*(c: PContext; s: PSym) =
|
||||
|
||||
proc storeRodNode*(c: PContext, n: PNode) =
|
||||
if c.config.symbolFiles != disabledSf:
|
||||
toPackedNodeTopLevel(n, c.encoder)
|
||||
toPackedNodeTopLevel(n, c.encoder, c.packedRepr)
|
||||
|
||||
proc saveRodFile*(c: PContext) =
|
||||
if c.config.symbolFiles != disabledSf:
|
||||
saveRodFile(toRodFile(c.config, AbsoluteFile toFullPath(c.config, FileIndex c.module.position)), c.encoder)
|
||||
for (m, n) in PCtx(c.graph.vm).vmstateDiff:
|
||||
if m == c.module:
|
||||
addPragmaComputation(c, n)
|
||||
if sfSystemModule in c.module.flags:
|
||||
c.graph.systemModuleComplete = true
|
||||
if c.config.symbolFiles != stressTest:
|
||||
# For stress testing we seek to reload the symbols from memory. This
|
||||
# way much of the logic is tested but the test is reproducible as it does
|
||||
# not depend on the hard disk contents!
|
||||
saveRodFile(toRodFile(c.config, AbsoluteFile toFullPath(c.config, FileIndex c.module.position)),
|
||||
c.encoder, c.packedRepr)
|
||||
else:
|
||||
# debug code, but maybe a good idea for production? Could reduce the compiler's
|
||||
# memory consumption considerably at the cost of more loads from disk.
|
||||
simulateCachedModule(c.graph, c.module, c.packedRepr)
|
||||
c.graph.packed[c.module.position].status = loaded
|
||||
|
||||
@@ -265,6 +265,7 @@ type
|
||||
oldErrorCount*: int
|
||||
profiler*: Profiler
|
||||
templInstCounter*: ref int # gives every template instantiation a unique ID, needed here for getAst
|
||||
vmstateDiff*: seq[(PSym, PNode)] # we remember the "diff" to global state here (feature for IC)
|
||||
|
||||
PStackFrame* = ref TStackFrame
|
||||
TStackFrame* = object
|
||||
|
||||
Reference in New Issue
Block a user