mirror of
https://github.com/nim-lang/Nim.git
synced 2025-12-30 09:54:49 +00:00
refactoring: move DB model to incremental.nim
This commit is contained in:
186
compiler/btrees.nim
Normal file
186
compiler/btrees.nim
Normal file
@@ -0,0 +1,186 @@
|
||||
#
|
||||
#
|
||||
# The Nim Compiler
|
||||
# (c) Copyright 2018 Andreas Rumpf
|
||||
#
|
||||
# See the file "copying.txt", included in this
|
||||
# distribution, for details about the copyright.
|
||||
#
|
||||
|
||||
## BTree implementation with few features, but good enough for the
|
||||
## Nim compiler's needs.
|
||||
|
||||
const
|
||||
M = 512 # max children per B-tree node = M-1
|
||||
# (must be even and greater than 2)
|
||||
Mhalf = M div 2
|
||||
|
||||
type
|
||||
Node[Key, Val] = ref object
|
||||
entries: int
|
||||
keys: array[M, Key]
|
||||
case isInternal: bool
|
||||
of false:
|
||||
vals: array[M, Val]
|
||||
of true:
|
||||
links: array[M, Node[Key, Val]]
|
||||
BTree*[Key, Val] = object
|
||||
root: Node[Key, Val]
|
||||
height: int
|
||||
entries: int ## number of key-value pairs
|
||||
|
||||
proc initBTree*[Key, Val](): BTree[Key, Val] =
|
||||
BTree[Key, Val](root: Node[Key, Val](entries: 0, isInternal: false))
|
||||
|
||||
template less(a, b): bool = cmp(a, b) < 0
|
||||
template eq(a, b): bool = cmp(a, b) == 0
|
||||
|
||||
proc getOrDefault*[Key, Val](b: BTree[Key, Val], key: Key): Val =
|
||||
var x = b.root
|
||||
while x.isInternal:
|
||||
for j in 0 ..< x.entries:
|
||||
if j+1 == x.entries or less(key, x.keys[j+1]):
|
||||
x = x.links[j]
|
||||
break
|
||||
assert(not x.isInternal)
|
||||
for j in 0 ..< x.entries:
|
||||
if eq(key, x.keys[j]): return x.vals[j]
|
||||
|
||||
proc copyHalf[Key, Val](h, result: Node[Key, Val]) =
|
||||
for j in 0 ..< Mhalf:
|
||||
result.keys[j] = h.keys[Mhalf + j]
|
||||
if h.isInternal:
|
||||
for j in 0 ..< Mhalf:
|
||||
result.links[j] = h.links[Mhalf + j]
|
||||
else:
|
||||
for j in 0 ..< Mhalf:
|
||||
shallowCopy(result.vals[j], h.vals[Mhalf + j])
|
||||
|
||||
proc split[Key, Val](h: Node[Key, Val]): Node[Key, Val] =
|
||||
## split node in half
|
||||
result = Node[Key, Val](entries: Mhalf, isInternal: h.isInternal)
|
||||
h.entries = Mhalf
|
||||
copyHalf(h, result)
|
||||
|
||||
proc insert[Key, Val](h: Node[Key, Val], key: Key, val: Val): Node[Key, Val] =
|
||||
#var t = Entry(key: key, val: val, next: nil)
|
||||
var newKey = key
|
||||
var j = 0
|
||||
if not h.isInternal:
|
||||
while j < h.entries:
|
||||
if less(key, h.keys[j]): break
|
||||
inc j
|
||||
for i in countdown(h.entries, j+1):
|
||||
shallowCopy(h.vals[i], h.vals[i-1])
|
||||
h.vals[j] = val
|
||||
else:
|
||||
var newLink: Node[Key, Val] = nil
|
||||
while j < h.entries:
|
||||
if j+1 == h.entries or less(key, h.keys[j+1]):
|
||||
let u = insert(h.links[j], key, val)
|
||||
inc j
|
||||
if u == nil: return nil
|
||||
newKey = u.keys[0]
|
||||
newLink = u
|
||||
break
|
||||
inc j
|
||||
for i in countdown(h.entries, j+1):
|
||||
h.links[i] = h.links[i-1]
|
||||
h.links[j] = newLink
|
||||
|
||||
for i in countdown(h.entries, j+1):
|
||||
h.keys[i] = h.keys[i-1]
|
||||
h.keys[j] = newKey
|
||||
inc h.entries
|
||||
return if h.entries < M: nil else: split(h)
|
||||
|
||||
proc add*[Key, Val](b: var BTree[Key, Val]; key: Key; val: Val) =
|
||||
let u = insert(b.root, key, val)
|
||||
inc b.entries
|
||||
if u == nil: return
|
||||
|
||||
# need to split root
|
||||
let t = Node[Key, Val](entries: 2, isInternal: true)
|
||||
t.keys[0] = b.root.keys[0]
|
||||
t.links[0] = b.root
|
||||
t.keys[1] = u.keys[0]
|
||||
t.links[1] = u
|
||||
b.root = t
|
||||
inc b.height
|
||||
|
||||
proc toString[Key, Val](h: Node[Key, Val], indent: string; result: var string) =
|
||||
if not h.isInternal:
|
||||
for j in 0..<h.entries:
|
||||
result.add(indent)
|
||||
result.add($h.keys[j] & " " & $h.vals[j] & "\n")
|
||||
else:
|
||||
for j in 0..<h.entries:
|
||||
if j > 0: result.add(indent & "(" & $h.keys[j] & ")\n")
|
||||
toString(h.links[j], indent & " ", result)
|
||||
|
||||
proc `$`[Key, Val](b: BTree[Key, Val]): string =
|
||||
result = ""
|
||||
toString(b.root, "", result)
|
||||
|
||||
when isMainModule:
|
||||
|
||||
import random, tables
|
||||
|
||||
proc main =
|
||||
var st = initBTree[string, string]()
|
||||
st.add("www.cs.princeton.edu", "abc")
|
||||
st.add("www.princeton.edu", "128.112.128.15")
|
||||
st.add("www.yale.edu", "130.132.143.21")
|
||||
st.add("www.simpsons.com", "209.052.165.60")
|
||||
st.add("www.apple.com", "17.112.152.32")
|
||||
st.add("www.amazon.com", "207.171.182.16")
|
||||
st.add("www.ebay.com", "66.135.192.87")
|
||||
st.add("www.cnn.com", "64.236.16.20")
|
||||
st.add("www.google.com", "216.239.41.99")
|
||||
st.add("www.nytimes.com", "199.239.136.200")
|
||||
st.add("www.microsoft.com", "207.126.99.140")
|
||||
st.add("www.dell.com", "143.166.224.230")
|
||||
st.add("www.slashdot.org", "66.35.250.151")
|
||||
st.add("www.espn.com", "199.181.135.201")
|
||||
st.add("www.weather.com", "63.111.66.11")
|
||||
st.add("www.yahoo.com", "216.109.118.65")
|
||||
|
||||
assert st.getOrDefault("www.cs.princeton.edu") == "abc"
|
||||
assert st.getOrDefault("www.harvardsucks.com") == nil
|
||||
|
||||
assert st.getOrDefault("www.simpsons.com") == "209.052.165.60"
|
||||
assert st.getOrDefault("www.apple.com") == "17.112.152.32"
|
||||
assert st.getOrDefault("www.ebay.com") == "66.135.192.87"
|
||||
assert st.getOrDefault("www.dell.com") == "143.166.224.230"
|
||||
assert(st.entries == 16)
|
||||
|
||||
when false:
|
||||
var b2 = initBTree[string, string]()
|
||||
const iters = 10_000
|
||||
for i in 1..iters:
|
||||
b2.add($i, $(iters - i))
|
||||
for i in 1..iters:
|
||||
let x = b2.getOrDefault($i)
|
||||
if x != $(iters - i):
|
||||
echo "got ", x, ", but expected ", iters - i
|
||||
echo b2.entries
|
||||
echo b2.height
|
||||
|
||||
when true:
|
||||
var b2 = initBTree[int, string]()
|
||||
var t2 = initTable[int, string]()
|
||||
const iters = 100_000
|
||||
for i in 1..iters:
|
||||
let x = rand(high(int))
|
||||
if not t2.hasKey(x):
|
||||
doAssert b2.getOrDefault(x).len == 0, " what, tree has this element " & $x
|
||||
t2[x] = $x
|
||||
b2.add(x, $x)
|
||||
|
||||
doAssert b2.entries == t2.len
|
||||
echo "unique entries ", b2.entries
|
||||
for k, v in t2:
|
||||
doAssert $k == v
|
||||
doAssert b2.getOrDefault(k) == $k
|
||||
|
||||
main()
|
||||
188
compiler/incremental.nim
Normal file
188
compiler/incremental.nim
Normal file
@@ -0,0 +1,188 @@
|
||||
#
|
||||
#
|
||||
# The Nim Compiler
|
||||
# (c) Copyright 2018 Andreas Rumpf
|
||||
#
|
||||
# See the file "copying.txt", included in this
|
||||
# distribution, for details about the copyright.
|
||||
#
|
||||
|
||||
## Basic type definitions the module graph needs in order to support
|
||||
## incremental compilations.
|
||||
|
||||
const nimIncremental* = defined(nimIncremental)
|
||||
|
||||
import options, lineinfos
|
||||
|
||||
when nimIncremental:
|
||||
import ast, intsets, btrees, db_sqlite
|
||||
|
||||
type
|
||||
Writer* = object
|
||||
sstack*: seq[PSym] # a stack of symbols to process
|
||||
tstack*: seq[PType] # a stack of types to process
|
||||
tmarks*, smarks*: IntSet
|
||||
forwardedSyms*: seq[PSym]
|
||||
|
||||
Reader* = object
|
||||
syms*: BTree[int, PSym]
|
||||
types*: BTree[int, PType]
|
||||
|
||||
IncrementalCtx* = object
|
||||
db*: DbConn
|
||||
w*: Writer
|
||||
r*: Reader
|
||||
|
||||
proc init*(incr: var IncrementalCtx) =
|
||||
incr.w.sstack = @[]
|
||||
incr.w.tstack = @[]
|
||||
incr.w.tmarks = initIntSet()
|
||||
incr.w.smarks = initIntSet()
|
||||
incr.w.forwardedSyms = @[]
|
||||
incr.r.syms = initBTree[int, PSym]()
|
||||
incr.r.types = initBTree[int, PType]()
|
||||
|
||||
|
||||
proc hashFileCached*(conf: ConfigRef; fileIdx: FileIndex; fullpath: string): string =
|
||||
result = msgs.getHash(fileIdx)
|
||||
if result.len == 0:
|
||||
result = $secureHashFile(fullpath)
|
||||
msgs.setHash(fileIdx, result)
|
||||
|
||||
|
||||
proc toDbFileId*(fileIdx: int32): int =
|
||||
if fileIdx == -1: return -1
|
||||
let fullpath = fileIdx.toFullPath
|
||||
let row = db.getRow(sql"select id, fullhash from filenames where fullpath = ?",
|
||||
fullpath)
|
||||
let id = row[0]
|
||||
let fullhash = hashFileCached(fileIdx, fullpath)
|
||||
if id.len == 0:
|
||||
result = int db.insertID(sql"insert into filenames(fullpath, fullhash) values (?, ?)",
|
||||
fullpath, fullhash)
|
||||
else:
|
||||
if row[1] != fullhash:
|
||||
db.exec(sql"update filenames set fullhash = ? where fullpath = ?", fullhash, fullpath)
|
||||
result = parseInt(id)
|
||||
|
||||
proc fromDbFileId*(incr: var IncrementalCtx; conf: ConfigRef; dbId: int): FileIndex =
|
||||
if dbId == -1: return -1
|
||||
let fullpath = db.getValue(sql"select fullpath from filenames where id = ?", dbId)
|
||||
doAssert fullpath.len > 0, "cannot find file name for DB ID " & $dbId
|
||||
result = fileInfoIdx(conf, fullpath)
|
||||
|
||||
|
||||
proc addModuleDep*(incr: var IncrementalCtx; conf: ConfigRef;
|
||||
module, fileIdx: FileIndex;
|
||||
isIncludeFile: bool) =
|
||||
if conf.symbolFiles != v2Sf: return
|
||||
|
||||
let a = toDbFileId(incr, conf, module)
|
||||
let b = toDbFileId(incr, conf, fileIdx)
|
||||
|
||||
incr.db.exec(sql"insert into deps(module, dependency, isIncludeFile) values (?, ?, ?)",
|
||||
a, b, ord(isIncludeFile))
|
||||
|
||||
# --------------- Database model ---------------------------------------------
|
||||
|
||||
proc createDb*(db: DbConn) =
|
||||
db.exec(sql"""
|
||||
create table if not exists controlblock(
|
||||
idgen integer not null
|
||||
);
|
||||
""")
|
||||
|
||||
db.exec(sql"""
|
||||
create table if not exists filenames(
|
||||
id integer primary key,
|
||||
fullpath varchar(8000) not null,
|
||||
fullHash varchar(256) not null
|
||||
);
|
||||
""")
|
||||
db.exec sql"create index if not exists FilenameIx on filenames(fullpath);"
|
||||
|
||||
db.exec(sql"""
|
||||
create table if not exists modules(
|
||||
id integer primary key,
|
||||
fullpath varchar(8000) not null,
|
||||
interfHash varchar(256) not null,
|
||||
fullHash varchar(256) not null,
|
||||
|
||||
created timestamp not null default (DATETIME('now'))
|
||||
);""")
|
||||
db.exec(sql"""create unique index if not exists SymNameIx on modules(fullpath);""")
|
||||
|
||||
db.exec(sql"""
|
||||
create table if not exists deps(
|
||||
id integer primary key,
|
||||
module integer not null,
|
||||
dependency integer not null,
|
||||
isIncludeFile integer not null,
|
||||
foreign key (module) references filenames(id),
|
||||
foreign key (dependency) references filenames(id)
|
||||
);""")
|
||||
db.exec(sql"""create index if not exists DepsIx on deps(module);""")
|
||||
|
||||
db.exec(sql"""
|
||||
create table if not exists types(
|
||||
id integer primary key,
|
||||
nimid integer not null,
|
||||
module integer not null,
|
||||
data blob not null,
|
||||
foreign key (module) references module(id)
|
||||
);
|
||||
""")
|
||||
db.exec sql"create index TypeByModuleIdx on types(module);"
|
||||
db.exec sql"create index TypeByNimIdIdx on types(nimid);"
|
||||
|
||||
db.exec(sql"""
|
||||
create table if not exists syms(
|
||||
id integer primary key,
|
||||
nimid integer not null,
|
||||
module integer not null,
|
||||
name varchar(256) not null,
|
||||
data blob not null,
|
||||
exported int not null,
|
||||
foreign key (module) references module(id)
|
||||
);
|
||||
""")
|
||||
db.exec sql"create index if not exists SymNameIx on syms(name);"
|
||||
db.exec sql"create index SymByNameAndModuleIdx on syms(name, module);"
|
||||
db.exec sql"create index SymByModuleIdx on syms(module);"
|
||||
db.exec sql"create index SymByNimIdIdx on syms(nimid);"
|
||||
|
||||
|
||||
db.exec(sql"""
|
||||
create table if not exists toplevelstmts(
|
||||
id integer primary key,
|
||||
position integer not null,
|
||||
module integer not null,
|
||||
data blob not null,
|
||||
foreign key (module) references module(id)
|
||||
);
|
||||
""")
|
||||
db.exec sql"create index TopLevelStmtByModuleIdx on toplevelstmts(module);"
|
||||
db.exec sql"create index TopLevelStmtByPositionIdx on toplevelstmts(position);"
|
||||
|
||||
db.exec(sql"""
|
||||
create table if not exists statics(
|
||||
id integer primary key,
|
||||
module integer not null,
|
||||
data blob not null,
|
||||
foreign key (module) references module(id)
|
||||
);
|
||||
""")
|
||||
db.exec sql"create index StaticsByModuleIdx on toplevelstmts(module);"
|
||||
db.exec sql"insert into controlblock(idgen) values (0)"
|
||||
|
||||
|
||||
else:
|
||||
type
|
||||
IncrementalCtx* = object
|
||||
|
||||
template init*(incr: IncrementalCtx) = discard
|
||||
|
||||
template addModuleDep*(incr: var IncrementalCtx; conf: ConfigRef;
|
||||
module, fileIdx: FileIndex;
|
||||
isIncludeFile: bool) =
|
||||
discard
|
||||
@@ -157,7 +157,7 @@ const
|
||||
proc mainCommand*(graph: ModuleGraph; cache: IdentCache) =
|
||||
let conf = graph.config
|
||||
|
||||
setupModuleCache()
|
||||
setupModuleCache(graph)
|
||||
# In "nim serve" scenario, each command must reset the registered passes
|
||||
clearPasses(graph)
|
||||
conf.lastCmdTime = epochTime()
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
|
||||
## This module implements the module graph data structure. The module graph
|
||||
## represents a complete Nim project. Single modules can either be kept in RAM
|
||||
## or stored in a ROD file. The ROD file mechanism is not yet integrated here.
|
||||
## or stored in a Sqlite database.
|
||||
##
|
||||
## The caching of modules is critical for 'nimsuggest' and is tricky to get
|
||||
## right. If module E is being edited, we need autocompletion (and type
|
||||
@@ -25,7 +25,8 @@
|
||||
## - Its dependent module stays the same.
|
||||
##
|
||||
|
||||
import ast, intsets, tables, options, rod, lineinfos, hashes, idents
|
||||
import ast, intsets, tables, options, lineinfos, hashes, idents,
|
||||
incremental
|
||||
|
||||
type
|
||||
ModuleGraph* = ref object
|
||||
@@ -54,6 +55,7 @@ type
|
||||
intTypeCache*: array[-5..64, PType]
|
||||
opContains*, opNot*: PSym
|
||||
emptyNode*: PNode
|
||||
incr*: IncrementalCtx
|
||||
|
||||
proc hash*(x: FileIndex): Hash {.borrow.}
|
||||
|
||||
@@ -82,6 +84,7 @@ proc newModuleGraph*(cache: IdentCache; config: ConfigRef): ModuleGraph =
|
||||
result.opNot = createMagic(result, "not", mNot)
|
||||
result.opContains = createMagic(result, "contains", mInSet)
|
||||
result.emptyNode = newNode(nkEmpty)
|
||||
init(result.incr)
|
||||
|
||||
proc resetAllModules*(g: ModuleGraph) =
|
||||
initStrTable(packageSyms)
|
||||
@@ -103,7 +106,7 @@ proc dependsOn(a, b: int): int {.inline.} = (a shl 15) + b
|
||||
|
||||
proc addDep*(g: ModuleGraph; m: PSym, dep: FileIndex) =
|
||||
assert m.position == m.info.fileIndex.int32
|
||||
addModuleDep(m.info.fileIndex, dep, isIncludeFile = false)
|
||||
addModuleDep(g.incr, g.config, m.info.fileIndex, dep, isIncludeFile = false)
|
||||
if suggestMode:
|
||||
deps.incl m.position.dependsOn(dep.int)
|
||||
# we compute the transitive closure later when quering the graph lazily.
|
||||
@@ -111,7 +114,7 @@ proc addDep*(g: ModuleGraph; m: PSym, dep: FileIndex) =
|
||||
#invalidTransitiveClosure = true
|
||||
|
||||
proc addIncludeDep*(g: ModuleGraph; module, includeFile: FileIndex) =
|
||||
addModuleDep(module, includeFile, isIncludeFile = true)
|
||||
addModuleDep(g.incr, g.config, module, includeFile, isIncludeFile = true)
|
||||
discard hasKeyOrPut(inclToMod, includeFile, module)
|
||||
|
||||
proc parentModule*(g: ModuleGraph; fileIdx: FileIndex): FileIndex =
|
||||
|
||||
@@ -75,7 +75,7 @@ proc compileModule*(graph: ModuleGraph; fileIdx: FileIndex; cache: IdentCache, f
|
||||
return
|
||||
else:
|
||||
discard
|
||||
result.id = getModuleId(fileIdx, toFullPath(graph.config, fileIdx))
|
||||
result.id = getModuleId(graph, fileIdx, toFullPath(graph.config, fileIdx))
|
||||
discard processModule(graph, result,
|
||||
if sfMainModule in flags and graph.config.projectIsStdin: stdin.llStreamOpen else: nil,
|
||||
rd, cache)
|
||||
@@ -107,8 +107,8 @@ proc importModule*(graph: ModuleGraph; s: PSym, fileIdx: FileIndex;
|
||||
# localError(result.info, errAttemptToRedefine, result.name.s)
|
||||
# restore the notes for outer module:
|
||||
graph.config.notes =
|
||||
if s.owner.id == graph.config.mainPackageId: graph.config.mainPackageNotes
|
||||
else: graph.config.foreignPackageNotes
|
||||
if s.owner.id == graph.config.mainPackageId: graph.config.mainPackageNotes
|
||||
else: graph.config.foreignPackageNotes
|
||||
|
||||
proc includeModule*(graph: ModuleGraph; s: PSym, fileIdx: FileIndex;
|
||||
cache: IdentCache): PNode {.procvar.} =
|
||||
|
||||
@@ -180,7 +180,7 @@ proc processModule*(graph: ModuleGraph; module: PSym, stream: PLLStream,
|
||||
var stmtIndex = 0
|
||||
var doContinue = true
|
||||
while doContinue:
|
||||
let n = loadNode(module, stmtIndex)
|
||||
let n = loadNode(graph, module, stmtIndex)
|
||||
if n == nil or graph.stopCompile(): break
|
||||
#if n.kind == nkImportStmt:
|
||||
# echo "yes and it's ", n
|
||||
|
||||
@@ -9,18 +9,21 @@
|
||||
|
||||
## This module implements the canonalization for the various caching mechanisms.
|
||||
|
||||
import ast, idgen, lineinfos, msgs
|
||||
import ast, idgen, lineinfos, msgs, incremental, modulegraphs
|
||||
|
||||
when not defined(nimSymbolfiles):
|
||||
template setupModuleCache* = discard
|
||||
template storeNode*(module: PSym; n: PNode) = discard
|
||||
template loadNode*(module: PSym; index: var int): PNode = PNode(nil)
|
||||
when not nimIncremental:
|
||||
template setupModuleCache*(g: ModuleGraph) = discard
|
||||
template storeNode*(g: ModuleGraph; module: PSym; n: PNode) = discard
|
||||
template loadNode*(g: ModuleGraph; module: PSym; index: var int): PNode = PNode(nil)
|
||||
|
||||
template getModuleId*(fileIdx: FileIndex; fullpath: string): int = getID()
|
||||
template getModuleId*(g: ModuleGraph; fileIdx: FileIndex; fullpath: string): int = getID()
|
||||
|
||||
template addModuleDep*(module, fileIdx: FileIndex; isIncludeFile: bool) = discard
|
||||
template addModuleDep*(g: ModuleGraph; module, fileIdx: FileIndex; isIncludeFile: bool) = discard
|
||||
|
||||
template storeRemaining*(module: PSym) = discard
|
||||
template storeRemaining*(g: ModuleGraph; module: PSym) = discard
|
||||
|
||||
else:
|
||||
include rodimpl
|
||||
|
||||
# idea for testing all this logic: *Always* load the AST from the DB, whether
|
||||
# we already have it in RAM or not!
|
||||
|
||||
@@ -22,15 +22,10 @@ import strutils, os, intsets, tables, ropes, db_sqlite, msgs, options, types,
|
||||
## - Depencency computation should use signature hashes in order to
|
||||
## avoid recompiling dependent modules.
|
||||
|
||||
var db: DbConn
|
||||
template db(): DbConn = g.incr.db
|
||||
|
||||
proc hashFileCached(fileIdx: int32; fullpath: string): string =
|
||||
result = msgs.getHash(fileIdx)
|
||||
if result.len == 0:
|
||||
result = $secureHashFile(fullpath)
|
||||
msgs.setHash(fileIdx, result)
|
||||
|
||||
proc needsRecompile(fileIdx: int32; fullpath: string; cycleCheck: var IntSet): bool =
|
||||
proc needsRecompile(g: ModuleGraph; fileIdx: int32; fullpath: string;
|
||||
cycleCheck: var IntSet): bool =
|
||||
let root = db.getRow(sql"select id, fullhash from filenames where fullpath = ?",
|
||||
fullpath)
|
||||
if root[0].len == 0: return true
|
||||
@@ -43,11 +38,11 @@ proc needsRecompile(fileIdx: int32; fullpath: string; cycleCheck: var IntSet): b
|
||||
for row in db.fastRows(sql"select fullpath from filenames where id in (select dependency from deps where module = ?)",
|
||||
root[0]):
|
||||
let dep = row[0]
|
||||
if needsRecompile(dep.fileInfoIdx, dep, cycleCheck):
|
||||
if needsRecompile(g, dep.fileInfoIdx, dep, cycleCheck):
|
||||
return true
|
||||
return false
|
||||
|
||||
proc getModuleId*(fileIdx: int32; fullpath: string): int =
|
||||
proc getModuleId*(g: ModuleGraph; fileIdx: int32; fullpath: string): int =
|
||||
if gSymbolFiles != v2Sf: return getID()
|
||||
let module = db.getRow(
|
||||
sql"select id, fullHash from modules where fullpath = ?", fullpath)
|
||||
@@ -103,27 +98,6 @@ proc pushSym(w: PRodWriter, s: PSym) =
|
||||
if not containsOrIncl(w.smarks, s.id):
|
||||
w.sstack.add(s)
|
||||
|
||||
proc toDbFileId(fileIdx: int32): int =
|
||||
if fileIdx == -1: return -1
|
||||
let fullpath = fileIdx.toFullPath
|
||||
let row = db.getRow(sql"select id, fullhash from filenames where fullpath = ?",
|
||||
fullpath)
|
||||
let id = row[0]
|
||||
let fullhash = hashFileCached(fileIdx, fullpath)
|
||||
if id.len == 0:
|
||||
result = int db.insertID(sql"insert into filenames(fullpath, fullhash) values (?, ?)",
|
||||
fullpath, fullhash)
|
||||
else:
|
||||
if row[1] != fullhash:
|
||||
db.exec(sql"update filenames set fullhash = ? where fullpath = ?", fullhash, fullpath)
|
||||
result = parseInt(id)
|
||||
|
||||
proc fromDbFileId(dbId: int): int32 =
|
||||
if dbId == -1: return -1
|
||||
let fullpath = db.getValue(sql"select fullpath from filenames where id = ?", dbId)
|
||||
doAssert fullpath.len > 0, "cannot find file name for DB ID " & $dbId
|
||||
result = fileInfoIdx(fullpath)
|
||||
|
||||
proc encodeNode(w: PRodWriter, fInfo: TLineInfo, n: PNode,
|
||||
result: var string) =
|
||||
if n == nil:
|
||||
@@ -395,7 +369,7 @@ proc storeType(w: PRodWriter; t: PType) =
|
||||
|
||||
var w = initRodWriter(nil)
|
||||
|
||||
proc storeNode*(module: PSym; n: PNode) =
|
||||
proc storeNode*(g: ModuleGraph; module: PSym; n: PNode) =
|
||||
if gSymbolFiles != v2Sf: return
|
||||
w.module = module
|
||||
var buf = newStringOfCap(160)
|
||||
@@ -421,7 +395,7 @@ proc storeNode*(module: PSym; n: PNode) =
|
||||
break
|
||||
inc i
|
||||
|
||||
proc storeRemaining*(module: PSym) =
|
||||
proc storeRemaining*(g: ModuleGraph; module: PSym) =
|
||||
if gSymbolFiles != v2Sf: return
|
||||
w.module = module
|
||||
for s in w.forwardedSyms:
|
||||
@@ -437,7 +411,7 @@ type
|
||||
#tstack: seq[(PType, ptr PType)] # a stack of types to process
|
||||
|
||||
#tmarks, smarks: IntSet
|
||||
syms: Table[int, PSym] ## XXX make this more efficients
|
||||
syms: Table[int, PSym] ## XXX make this more efficient
|
||||
types: Table[int, PType]
|
||||
cache: IdentCache
|
||||
|
||||
@@ -813,7 +787,7 @@ proc loadModuleSymTab(r; module: PSym) =
|
||||
if sfSystemModule in module.flags:
|
||||
magicsys.systemModule = module
|
||||
|
||||
proc loadNode*(module: PSym; index: int): PNode =
|
||||
proc loadNode*(g: ModuleGraph; module: PSym; index: int): PNode =
|
||||
assert gSymbolFiles == v2Sf
|
||||
if index == 0:
|
||||
loadModuleSymTab(gr, module)
|
||||
@@ -828,110 +802,9 @@ proc loadNode*(module: PSym; index: int): PNode =
|
||||
gr.module = module
|
||||
result = decodeNode(gr, b, module.info)
|
||||
|
||||
proc addModuleDep*(module, fileIdx: int32; isIncludeFile: bool) =
|
||||
proc setupModuleCache*(g: ModuleGraph) =
|
||||
if gSymbolFiles != v2Sf: return
|
||||
|
||||
let a = toDbFileId(module)
|
||||
let b = toDbFileId(fileIdx)
|
||||
|
||||
db.exec(sql"insert into deps(module, dependency, isIncludeFile) values (?, ?, ?)",
|
||||
a, b, ord(isIncludeFile))
|
||||
|
||||
# --------------- Database model ---------------------------------------------
|
||||
|
||||
proc createDb() =
|
||||
db.exec(sql"""
|
||||
create table if not exists controlblock(
|
||||
idgen integer not null
|
||||
);
|
||||
""")
|
||||
|
||||
db.exec(sql"""
|
||||
create table if not exists filenames(
|
||||
id integer primary key,
|
||||
fullpath varchar(8000) not null,
|
||||
fullHash varchar(256) not null
|
||||
);
|
||||
""")
|
||||
db.exec sql"create index if not exists FilenameIx on filenames(fullpath);"
|
||||
|
||||
db.exec(sql"""
|
||||
create table if not exists modules(
|
||||
id integer primary key,
|
||||
fullpath varchar(8000) not null,
|
||||
interfHash varchar(256) not null,
|
||||
fullHash varchar(256) not null,
|
||||
|
||||
created timestamp not null default (DATETIME('now'))
|
||||
);""")
|
||||
db.exec(sql"""create unique index if not exists SymNameIx on modules(fullpath);""")
|
||||
|
||||
db.exec(sql"""
|
||||
create table if not exists deps(
|
||||
id integer primary key,
|
||||
module integer not null,
|
||||
dependency integer not null,
|
||||
isIncludeFile integer not null,
|
||||
foreign key (module) references filenames(id),
|
||||
foreign key (dependency) references filenames(id)
|
||||
);""")
|
||||
db.exec(sql"""create index if not exists DepsIx on deps(module);""")
|
||||
|
||||
db.exec(sql"""
|
||||
create table if not exists types(
|
||||
id integer primary key,
|
||||
nimid integer not null,
|
||||
module integer not null,
|
||||
data blob not null,
|
||||
foreign key (module) references module(id)
|
||||
);
|
||||
""")
|
||||
db.exec sql"create index TypeByModuleIdx on types(module);"
|
||||
db.exec sql"create index TypeByNimIdIdx on types(nimid);"
|
||||
|
||||
db.exec(sql"""
|
||||
create table if not exists syms(
|
||||
id integer primary key,
|
||||
nimid integer not null,
|
||||
module integer not null,
|
||||
name varchar(256) not null,
|
||||
data blob not null,
|
||||
exported int not null,
|
||||
foreign key (module) references module(id)
|
||||
);
|
||||
""")
|
||||
db.exec sql"create index if not exists SymNameIx on syms(name);"
|
||||
db.exec sql"create index SymByNameAndModuleIdx on syms(name, module);"
|
||||
db.exec sql"create index SymByModuleIdx on syms(module);"
|
||||
db.exec sql"create index SymByNimIdIdx on syms(nimid);"
|
||||
|
||||
|
||||
db.exec(sql"""
|
||||
create table if not exists toplevelstmts(
|
||||
id integer primary key,
|
||||
position integer not null,
|
||||
module integer not null,
|
||||
data blob not null,
|
||||
foreign key (module) references module(id)
|
||||
);
|
||||
""")
|
||||
db.exec sql"create index TopLevelStmtByModuleIdx on toplevelstmts(module);"
|
||||
db.exec sql"create index TopLevelStmtByPositionIdx on toplevelstmts(position);"
|
||||
|
||||
db.exec(sql"""
|
||||
create table if not exists statics(
|
||||
id integer primary key,
|
||||
module integer not null,
|
||||
data blob not null,
|
||||
foreign key (module) references module(id)
|
||||
);
|
||||
""")
|
||||
db.exec sql"create index StaticsByModuleIdx on toplevelstmts(module);"
|
||||
db.exec sql"insert into controlblock(idgen) values (0)"
|
||||
|
||||
proc setupModuleCache* =
|
||||
if gSymbolFiles != v2Sf: return
|
||||
let dbfile = getNimcacheDir() / "rodfiles.db"
|
||||
let dbfile = getNimcacheDir(g.config) / "rodfiles.db"
|
||||
if not fileExists(dbfile):
|
||||
db = open(connection=dbfile, user="nim", password="",
|
||||
database="nim")
|
||||
|
||||
@@ -622,12 +622,6 @@ proc process(c: PPassContext, n: PNode): PNode =
|
||||
# Note: The check for ``s.typ.kind = tyEnum`` is wrong for enum
|
||||
# type aliasing! Otherwise the same enum symbol would be included
|
||||
# several times!
|
||||
#
|
||||
# if (a.sons[2] <> nil) and (a.sons[2].kind = nkEnumTy) then begin
|
||||
# a := s.typ.n;
|
||||
# for j := 0 to sonsLen(a)-1 do
|
||||
# addInterfaceSym(w, a.sons[j].sym);
|
||||
# end
|
||||
of nkImportStmt:
|
||||
for i in countup(0, sonsLen(n) - 1):
|
||||
addModDep(w, getModuleName(w.config, n.sons[i]), n.info)
|
||||
|
||||
@@ -597,7 +597,7 @@ proc myProcess(context: PPassContext, n: PNode): PNode =
|
||||
else:
|
||||
result = newNodeI(nkEmpty, n.info)
|
||||
#if c.config.cmd == cmdIdeTools: findSuggest(c, n)
|
||||
rod.storeNode(c.module, result)
|
||||
rod.storeNode(c.graph, c.module, result)
|
||||
|
||||
proc testExamples(c: PContext) =
|
||||
let inp = toFullPath(c.config, c.module.info)
|
||||
@@ -627,7 +627,7 @@ proc myClose(graph: ModuleGraph; context: PPassContext, n: PNode): PNode =
|
||||
replayMethodDefs(graph, c.rd)
|
||||
popOwner(c)
|
||||
popProcCon(c)
|
||||
storeRemaining(c.module)
|
||||
storeRemaining(c.graph, c.module)
|
||||
if c.runnableExamples != nil: testExamples(c)
|
||||
|
||||
const semPass* = makePass(myOpen, myOpenCached, myProcess, myClose,
|
||||
|
||||
Reference in New Issue
Block a user