mirror of
https://github.com/nim-lang/Nim.git
synced 2026-01-01 02:42:05 +00:00
lexer, parser cleanup; boehm gc for mac os x
This commit is contained in:
@@ -83,7 +83,6 @@ type
|
||||
base10, # base10 is listed as the first element,
|
||||
# so that it is the correct default value
|
||||
base2, base8, base16
|
||||
PToken* = ref TToken
|
||||
TToken* = object # a Nimrod token
|
||||
tokType*: TTokType # the type of the token
|
||||
indent*: int # the indentation; only valid if tokType = tkIndent
|
||||
@@ -94,9 +93,7 @@ type
|
||||
# or float literals
|
||||
literal*: string # the parsed (string) literal; and
|
||||
# documentation comments are here too
|
||||
next*: PToken # next token; can be used for arbitrary look-ahead
|
||||
|
||||
PLexer* = ref TLexer
|
||||
TLexer* = object of TBaseLexer
|
||||
filename*: string
|
||||
indentStack*: seq[int] # the indentation stack
|
||||
@@ -118,13 +115,10 @@ proc rawGetTok*(L: var TLexer, tok: var TToken)
|
||||
proc getColumn*(L: TLexer): int
|
||||
proc getLineInfo*(L: TLexer): TLineInfo
|
||||
proc closeLexer*(lex: var TLexer)
|
||||
proc PrintTok*(tok: PToken)
|
||||
proc tokToStr*(tok: PToken): string
|
||||
|
||||
proc PrintTok*(tok: TToken)
|
||||
proc tokToStr*(tok: TToken): string
|
||||
|
||||
proc lexMessage*(L: TLexer, msg: TMsgKind, arg = "")
|
||||
# the Pascal scanner uses this too:
|
||||
proc fillToken*(L: var TToken)
|
||||
# implementation
|
||||
|
||||
proc isKeyword(kind: TTokType): bool =
|
||||
result = (kind >= tokKeywordLow) and (kind <= tokKeywordHigh)
|
||||
@@ -157,7 +151,7 @@ proc findIdent(L: TLexer, indent: int): bool =
|
||||
if L.indentStack[i] == indent:
|
||||
return true
|
||||
|
||||
proc tokToStr(tok: PToken): string =
|
||||
proc tokToStr*(tok: TToken): string =
|
||||
case tok.tokType
|
||||
of tkIntLit..tkInt64Lit: result = $tok.iNumber
|
||||
of tkFloatLit..tkFloat64Lit: result = $tok.fNumber
|
||||
@@ -171,21 +165,30 @@ proc tokToStr(tok: PToken): string =
|
||||
InternalError("tokToStr")
|
||||
result = ""
|
||||
|
||||
proc PrintTok(tok: PToken) =
|
||||
proc PrintTok*(tok: TToken) =
|
||||
write(stdout, TokTypeToStr[tok.tokType])
|
||||
write(stdout, " ")
|
||||
writeln(stdout, tokToStr(tok))
|
||||
|
||||
var dummyIdent: PIdent
|
||||
|
||||
proc fillToken(L: var TToken) =
|
||||
proc initToken*(L: var TToken) =
|
||||
L.TokType = tkInvalid
|
||||
L.iNumber = 0
|
||||
L.Indent = 0
|
||||
L.literal = ""
|
||||
L.fNumber = 0.0
|
||||
L.base = base10
|
||||
L.ident = dummyIdent # this prevents many bugs!
|
||||
L.ident = dummyIdent
|
||||
|
||||
proc fillToken(L: var TToken) =
|
||||
L.TokType = tkInvalid
|
||||
L.iNumber = 0
|
||||
L.Indent = 0
|
||||
setLen(L.literal, 0)
|
||||
L.fNumber = 0.0
|
||||
L.base = base10
|
||||
L.ident = dummyIdent
|
||||
|
||||
proc openLexer(lex: var TLexer, filename: string, inputstream: PLLStream) =
|
||||
openBaseLexer(lex, inputstream)
|
||||
|
||||
@@ -165,11 +165,11 @@ proc CommandScan(filename: string) =
|
||||
if stream != nil:
|
||||
var
|
||||
L: TLexer
|
||||
tok: PToken
|
||||
new(tok)
|
||||
tok: TToken
|
||||
initToken(tok)
|
||||
openLexer(L, f, stream)
|
||||
while true:
|
||||
rawGetTok(L, tok[])
|
||||
rawGetTok(L, tok)
|
||||
PrintTok(tok)
|
||||
if tok.tokType == tkEof: break
|
||||
CloseLexer(L)
|
||||
|
||||
@@ -16,13 +16,13 @@ import
|
||||
# ---------------- configuration file parser -----------------------------
|
||||
# we use Nimrod's scanner here to safe space and work
|
||||
|
||||
proc ppGetTok(L: var TLexer, tok: PToken) =
|
||||
proc ppGetTok(L: var TLexer, tok: var TToken) =
|
||||
# simple filter
|
||||
rawGetTok(L, tok[])
|
||||
while tok.tokType in {tkInd, tkSad, tkDed, tkComment}: rawGetTok(L, tok[])
|
||||
rawGetTok(L, tok)
|
||||
while tok.tokType in {tkInd, tkSad, tkDed, tkComment}: rawGetTok(L, tok)
|
||||
|
||||
proc parseExpr(L: var TLexer, tok: PToken): bool
|
||||
proc parseAtom(L: var TLexer, tok: PToken): bool =
|
||||
proc parseExpr(L: var TLexer, tok: var TToken): bool
|
||||
proc parseAtom(L: var TLexer, tok: var TToken): bool =
|
||||
if tok.tokType == tkParLe:
|
||||
ppGetTok(L, tok)
|
||||
result = parseExpr(L, tok)
|
||||
@@ -35,21 +35,21 @@ proc parseAtom(L: var TLexer, tok: PToken): bool =
|
||||
result = isDefined(tok.ident)
|
||||
ppGetTok(L, tok)
|
||||
|
||||
proc parseAndExpr(L: var TLexer, tok: PToken): bool =
|
||||
proc parseAndExpr(L: var TLexer, tok: var TToken): bool =
|
||||
result = parseAtom(L, tok)
|
||||
while tok.ident.id == ord(wAnd):
|
||||
ppGetTok(L, tok) # skip "and"
|
||||
var b = parseAtom(L, tok)
|
||||
result = result and b
|
||||
|
||||
proc parseExpr(L: var TLexer, tok: PToken): bool =
|
||||
proc parseExpr(L: var TLexer, tok: var TToken): bool =
|
||||
result = parseAndExpr(L, tok)
|
||||
while tok.ident.id == ord(wOr):
|
||||
ppGetTok(L, tok) # skip "or"
|
||||
var b = parseAndExpr(L, tok)
|
||||
result = result or b
|
||||
|
||||
proc EvalppIf(L: var TLexer, tok: PToken): bool =
|
||||
proc EvalppIf(L: var TLexer, tok: var TToken): bool =
|
||||
ppGetTok(L, tok) # skip 'if' or 'elif'
|
||||
result = parseExpr(L, tok)
|
||||
if tok.tokType == tkColon: ppGetTok(L, tok)
|
||||
@@ -57,7 +57,7 @@ proc EvalppIf(L: var TLexer, tok: PToken): bool =
|
||||
|
||||
var condStack: seq[bool] = @[]
|
||||
|
||||
proc doEnd(L: var TLexer, tok: PToken) =
|
||||
proc doEnd(L: var TLexer, tok: var TToken) =
|
||||
if high(condStack) < 0: lexMessage(L, errTokenExpected, "@if")
|
||||
ppGetTok(L, tok) # skip 'end'
|
||||
setlen(condStack, high(condStack))
|
||||
@@ -66,20 +66,20 @@ type
|
||||
TJumpDest = enum
|
||||
jdEndif, jdElseEndif
|
||||
|
||||
proc jumpToDirective(L: var TLexer, tok: PToken, dest: TJumpDest)
|
||||
proc doElse(L: var TLexer, tok: PToken) =
|
||||
proc jumpToDirective(L: var TLexer, tok: var TToken, dest: TJumpDest)
|
||||
proc doElse(L: var TLexer, tok: var TToken) =
|
||||
if high(condStack) < 0: lexMessage(L, errTokenExpected, "@if")
|
||||
ppGetTok(L, tok)
|
||||
if tok.tokType == tkColon: ppGetTok(L, tok)
|
||||
if condStack[high(condStack)]: jumpToDirective(L, tok, jdEndif)
|
||||
|
||||
proc doElif(L: var TLexer, tok: PToken) =
|
||||
proc doElif(L: var TLexer, tok: var TToken) =
|
||||
if high(condStack) < 0: lexMessage(L, errTokenExpected, "@if")
|
||||
var res = EvalppIf(L, tok)
|
||||
if condStack[high(condStack)] or not res: jumpToDirective(L, tok, jdElseEndif)
|
||||
else: condStack[high(condStack)] = true
|
||||
|
||||
proc jumpToDirective(L: var TLexer, tok: PToken, dest: TJumpDest) =
|
||||
proc jumpToDirective(L: var TLexer, tok: var TToken, dest: TJumpDest) =
|
||||
var nestedIfs = 0
|
||||
while True:
|
||||
if (tok.ident != nil) and (tok.ident.s == "@"):
|
||||
@@ -108,7 +108,7 @@ proc jumpToDirective(L: var TLexer, tok: PToken, dest: TJumpDest) =
|
||||
else:
|
||||
ppGetTok(L, tok)
|
||||
|
||||
proc parseDirective(L: var TLexer, tok: PToken) =
|
||||
proc parseDirective(L: var TLexer, tok: var TToken) =
|
||||
ppGetTok(L, tok) # skip @
|
||||
case whichKeyword(tok.ident)
|
||||
of wIf:
|
||||
@@ -143,16 +143,16 @@ proc parseDirective(L: var TLexer, tok: PToken) =
|
||||
ppGetTok(L, tok)
|
||||
else: lexMessage(L, errInvalidDirectiveX, tokToStr(tok))
|
||||
|
||||
proc confTok(L: var TLexer, tok: PToken) =
|
||||
proc confTok(L: var TLexer, tok: var TToken) =
|
||||
ppGetTok(L, tok)
|
||||
while tok.ident != nil and tok.ident.s == "@":
|
||||
parseDirective(L, tok) # else: give the token to the parser
|
||||
|
||||
proc checkSymbol(L: TLexer, tok: PToken) =
|
||||
proc checkSymbol(L: TLexer, tok: TToken) =
|
||||
if tok.tokType notin {tkSymbol..pred(tkIntLit), tkStrLit..tkTripleStrLit}:
|
||||
lexMessage(L, errIdentifierExpected, tokToStr(tok))
|
||||
|
||||
proc parseAssignment(L: var TLexer, tok: PToken) =
|
||||
proc parseAssignment(L: var TLexer, tok: var TToken) =
|
||||
if tok.ident.id == getIdent("-").id or tok.ident.id == getIdent("--").id:
|
||||
confTok(L, tok) # skip unnecessary prefix
|
||||
var info = getLineInfo(L) # safe for later in case of an error
|
||||
@@ -192,9 +192,9 @@ proc parseAssignment(L: var TLexer, tok: PToken) =
|
||||
proc readConfigFile(filename: string) =
|
||||
var
|
||||
L: TLexer
|
||||
tok: PToken
|
||||
tok: TToken
|
||||
stream: PLLStream
|
||||
new(tok)
|
||||
initToken(tok)
|
||||
stream = LLStreamOpen(filename, fmRead)
|
||||
if stream != nil:
|
||||
openLexer(L, filename, stream)
|
||||
|
||||
@@ -17,11 +17,11 @@
|
||||
import
|
||||
llstream, lexer, idents, strutils, ast, msgs
|
||||
|
||||
type
|
||||
type
|
||||
TParser*{.final.} = object # a TParser object represents a module that
|
||||
# is being parsed
|
||||
lex*: PLexer # the lexer that is used for parsing
|
||||
tok*: PToken # the current token
|
||||
lex*: TLexer # the lexer that is used for parsing
|
||||
tok*: TToken # the current token
|
||||
|
||||
|
||||
proc ParseAll*(p: var TParser): PNode
|
||||
@@ -32,8 +32,8 @@ proc parseTopLevelStmt*(p: var TParser): PNode
|
||||
# emtyNode if end of stream.
|
||||
|
||||
# helpers for the other parsers
|
||||
proc getPrecedence*(tok: PToken): int
|
||||
proc isOperator*(tok: PToken): bool
|
||||
proc getPrecedence*(tok: TToken): int
|
||||
proc isOperator*(tok: TToken): bool
|
||||
proc getTok*(p: var TParser)
|
||||
proc parMessage*(p: TParser, msg: TMsgKind, arg: string = "")
|
||||
proc skipComment*(p: var TParser, node: PNode)
|
||||
@@ -54,23 +54,22 @@ proc setBaseFlags*(n: PNode, base: TNumericalBase)
|
||||
proc parseSymbol*(p: var TParser): PNode
|
||||
# implementation
|
||||
|
||||
proc initParser(p: var TParser) =
|
||||
new(p.lex)
|
||||
new(p.tok)
|
||||
|
||||
proc getTok(p: var TParser) =
|
||||
rawGetTok(p.lex[], p.tok[])
|
||||
rawGetTok(p.lex, p.tok)
|
||||
|
||||
proc OpenParser(p: var TParser, filename: string, inputStream: PLLStream) =
|
||||
initParser(p)
|
||||
OpenLexer(p.lex[], filename, inputstream)
|
||||
initToken(p.tok)
|
||||
OpenLexer(p.lex, filename, inputstream)
|
||||
getTok(p) # read the first token
|
||||
|
||||
proc CloseParser(p: var TParser) =
|
||||
CloseLexer(p.lex[])
|
||||
CloseLexer(p.lex)
|
||||
|
||||
proc parMessage(p: TParser, msg: TMsgKind, arg: string = "") =
|
||||
lexMessage(p.lex[], msg, arg)
|
||||
lexMessage(p.lex, msg, arg)
|
||||
|
||||
proc parMessage(p: TParser, msg: TMsgKind, tok: TToken) =
|
||||
lexMessage(p.lex, msg, tokToStr(tok))
|
||||
|
||||
proc skipComment(p: var TParser, node: PNode) =
|
||||
if p.tok.tokType == tkComment:
|
||||
@@ -93,18 +92,18 @@ proc optInd(p: var TParser, n: PNode) =
|
||||
|
||||
proc expectIdentOrKeyw(p: TParser) =
|
||||
if p.tok.tokType != tkSymbol and not isKeyword(p.tok.tokType):
|
||||
lexMessage(p.lex[], errIdentifierExpected, tokToStr(p.tok))
|
||||
lexMessage(p.lex, errIdentifierExpected, tokToStr(p.tok))
|
||||
|
||||
proc ExpectIdent(p: TParser) =
|
||||
if p.tok.tokType != tkSymbol:
|
||||
lexMessage(p.lex[], errIdentifierExpected, tokToStr(p.tok))
|
||||
lexMessage(p.lex, errIdentifierExpected, tokToStr(p.tok))
|
||||
|
||||
proc Eat(p: var TParser, TokType: TTokType) =
|
||||
if p.tok.TokType == TokType: getTok(p)
|
||||
else: lexMessage(p.lex[], errTokenExpected, TokTypeToStr[tokType])
|
||||
else: lexMessage(p.lex, errTokenExpected, TokTypeToStr[tokType])
|
||||
|
||||
proc parLineInfo(p: TParser): TLineInfo =
|
||||
result = getLineInfo(p.lex[])
|
||||
result = getLineInfo(p.lex)
|
||||
|
||||
proc indAndComment(p: var TParser, n: PNode) =
|
||||
if p.tok.tokType == tkInd:
|
||||
@@ -116,7 +115,7 @@ proc indAndComment(p: var TParser, n: PNode) =
|
||||
skipComment(p, n)
|
||||
|
||||
proc newNodeP(kind: TNodeKind, p: TParser): PNode =
|
||||
result = newNodeI(kind, getLineInfo(p.lex[]))
|
||||
result = newNodeI(kind, getLineInfo(p.lex))
|
||||
|
||||
proc newIntNodeP(kind: TNodeKind, intVal: BiggestInt, p: TParser): PNode =
|
||||
result = newNodeP(kind, p)
|
||||
@@ -140,10 +139,10 @@ proc parseStmt(p: var TParser): PNode
|
||||
proc parseTypeDesc(p: var TParser): PNode
|
||||
proc parseParamList(p: var TParser): PNode
|
||||
|
||||
proc IsLeftAssociative(tok: PToken): bool {.inline.} =
|
||||
proc IsLeftAssociative(tok: TToken): bool {.inline.} =
|
||||
result = tok.tokType != tkOpr or tok.ident.s[0] != '^'
|
||||
|
||||
proc getPrecedence(tok: PToken): int =
|
||||
proc getPrecedence(tok: TToken): int =
|
||||
case tok.tokType
|
||||
of tkOpr:
|
||||
case tok.ident.s[0]
|
||||
@@ -161,7 +160,7 @@ proc getPrecedence(tok: PToken): int =
|
||||
of tkOr, tkXor: result = 2
|
||||
else: result = - 10
|
||||
|
||||
proc isOperator(tok: PToken): bool =
|
||||
proc isOperator(tok: TToken): bool =
|
||||
result = getPrecedence(tok) >= 0
|
||||
|
||||
proc parseSymbol(p: var TParser): PNode =
|
||||
@@ -192,11 +191,12 @@ proc parseSymbol(p: var TParser): PNode =
|
||||
add(result, newIdentNodeP(getIdent(tokToStr(p.tok)), p))
|
||||
getTok(p)
|
||||
else:
|
||||
if result.len == 0: parMessage(p, errIdentifierExpected, tokToStr(p.tok))
|
||||
if result.len == 0:
|
||||
parMessage(p, errIdentifierExpected, p.tok)
|
||||
break
|
||||
eat(p, tkAccent)
|
||||
else:
|
||||
parMessage(p, errIdentifierExpected, tokToStr(p.tok))
|
||||
parMessage(p, errIdentifierExpected, p.tok)
|
||||
result = ast.emptyNode
|
||||
|
||||
proc indexExpr(p: var TParser): PNode =
|
||||
@@ -228,8 +228,7 @@ proc exprColonEqExpr(p: var TParser, kind: TNodeKind, tok: TTokType): PNode =
|
||||
else:
|
||||
result = a
|
||||
|
||||
proc exprList(p: var TParser, endTok: TTokType,
|
||||
result: PNode) =
|
||||
proc exprList(p: var TParser, endTok: TTokType, result: PNode) =
|
||||
getTok(p)
|
||||
optInd(p, result)
|
||||
while (p.tok.tokType != endTok) and (p.tok.tokType != tkEof):
|
||||
@@ -413,8 +412,8 @@ proc identOrLiteral(p: var TParser): PNode =
|
||||
result = parseCast(p)
|
||||
of tkAddr:
|
||||
result = parseAddr(p)
|
||||
else:
|
||||
parMessage(p, errExprExpected, tokToStr(p.tok))
|
||||
else:
|
||||
parMessage(p, errExprExpected, p.tok)
|
||||
getTok(p) # we must consume a token here to prevend endless loops!
|
||||
result = ast.emptyNode
|
||||
|
||||
@@ -459,28 +458,25 @@ proc primary(p: var TParser): PNode =
|
||||
result = indexExprList(p, result)
|
||||
else: break
|
||||
|
||||
proc lowestExprAux(p: var TParser, v: var PNode, limit: int): PToken =
|
||||
v = primary(p) # expand while operators have priorities higher than 'limit'
|
||||
var op = p.tok
|
||||
var opPrec = getPrecedence(op)
|
||||
proc lowestExprAux(p: var TParser, limit: int): PNode =
|
||||
result = primary(p)
|
||||
# expand while operators have priorities higher than 'limit'
|
||||
var opPrec = getPrecedence(p.tok)
|
||||
while opPrec >= limit:
|
||||
var leftAssoc = ord(IsLeftAssociative(op))
|
||||
var node = newNodeP(nkInfix, p)
|
||||
var opNode = newIdentNodeP(op.ident, p) # skip operator:
|
||||
var leftAssoc = ord(IsLeftAssociative(p.tok))
|
||||
var a = newNodeP(nkInfix, p)
|
||||
var opNode = newIdentNodeP(p.tok.ident, p) # skip operator:
|
||||
getTok(p)
|
||||
optInd(p, opNode) # read sub-expression with higher priority
|
||||
var v2: PNode
|
||||
var nextop = lowestExprAux(p, v2, opPrec + leftAssoc)
|
||||
addSon(node, opNode)
|
||||
addSon(node, v)
|
||||
addSon(node, v2)
|
||||
v = node
|
||||
op = nextop
|
||||
opPrec = getPrecedence(nextop)
|
||||
result = op # return first untreated operator
|
||||
var b = lowestExprAux(p, opPrec + leftAssoc)
|
||||
addSon(a, opNode)
|
||||
addSon(a, result)
|
||||
addSon(a, b)
|
||||
result = a
|
||||
opPrec = getPrecedence(p.tok)
|
||||
|
||||
proc lowestExpr(p: var TParser): PNode =
|
||||
discard lowestExprAux(p, result, - 1)
|
||||
result = lowestExprAux(p, -1)
|
||||
|
||||
proc parseIfExpr(p: var TParser): PNode =
|
||||
result = newNodeP(nkIfExpr, p)
|
||||
@@ -510,7 +506,7 @@ proc parsePragma(p: var TParser): PNode =
|
||||
getTok(p)
|
||||
optInd(p, a)
|
||||
optPar(p)
|
||||
if (p.tok.tokType == tkCurlyDotRi) or (p.tok.tokType == tkCurlyRi): getTok(p)
|
||||
if p.tok.tokType in {tkCurlyDotRi, tkCurlyRi}: getTok(p)
|
||||
else: parMessage(p, errTokenExpected, ".}")
|
||||
|
||||
proc identVis(p: var TParser): PNode =
|
||||
@@ -560,7 +556,7 @@ proc parseIdentColonEquals(p: var TParser, flags: TDeclaredIdentFlags): PNode =
|
||||
else:
|
||||
addSon(result, ast.emptyNode)
|
||||
if (p.tok.tokType != tkEquals) and not (withBothOptional in flags):
|
||||
parMessage(p, errColonOrEqualsExpected, tokToStr(p.tok))
|
||||
parMessage(p, errColonOrEqualsExpected, p.tok)
|
||||
if p.tok.tokType == tkEquals:
|
||||
getTok(p)
|
||||
optInd(p, result)
|
||||
@@ -751,7 +747,7 @@ proc parseImportOrIncludeStmt(p: var TParser, kind: TNodeKind): PNode =
|
||||
a = newStrNodeP(nkTripleStrLit, p.tok.literal, p)
|
||||
getTok(p)
|
||||
else:
|
||||
parMessage(p, errIdentifierExpected, tokToStr(p.tok))
|
||||
parMessage(p, errIdentifierExpected, p.tok)
|
||||
break
|
||||
addSon(result, a)
|
||||
if p.tok.tokType != tkComma: break
|
||||
@@ -776,7 +772,7 @@ proc parseFromStmt(p: var TParser): PNode =
|
||||
a = newStrNodeP(nkTripleStrLit, p.tok.literal, p)
|
||||
getTok(p)
|
||||
else:
|
||||
parMessage(p, errIdentifierExpected, tokToStr(p.tok))
|
||||
parMessage(p, errIdentifierExpected, p.tok)
|
||||
return
|
||||
addSon(result, a) #optInd(p, a);
|
||||
eat(p, tkImport)
|
||||
@@ -788,7 +784,7 @@ proc parseFromStmt(p: var TParser): PNode =
|
||||
of tkSymbol, tkAccent:
|
||||
a = parseSymbol(p)
|
||||
else:
|
||||
parMessage(p, errIdentifierExpected, tokToStr(p.tok))
|
||||
parMessage(p, errIdentifierExpected, p.tok)
|
||||
break
|
||||
addSon(result, a)
|
||||
if p.tok.tokType != tkComma: break
|
||||
@@ -1060,7 +1056,7 @@ proc parseSection(p: var TParser, kind: TNodeKind,
|
||||
skipComment(p, result)
|
||||
case p.tok.tokType
|
||||
of tkInd:
|
||||
pushInd(p.lex[], p.tok.indent)
|
||||
pushInd(p.lex, p.tok.indent)
|
||||
getTok(p)
|
||||
skipComment(p, result)
|
||||
while true:
|
||||
@@ -1081,13 +1077,13 @@ proc parseSection(p: var TParser, kind: TNodeKind,
|
||||
skipComment(p, a)
|
||||
addSon(result, a)
|
||||
else:
|
||||
parMessage(p, errIdentifierExpected, tokToStr(p.tok))
|
||||
parMessage(p, errIdentifierExpected, p.tok)
|
||||
break
|
||||
popInd(p.lex[])
|
||||
popInd(p.lex)
|
||||
of tkSymbol, tkAccent, tkParLe:
|
||||
# tkParLe is allowed for ``var (x, y) = ...`` tuple parsing
|
||||
addSon(result, defparser(p))
|
||||
else: parMessage(p, errIdentifierExpected, tokToStr(p.tok))
|
||||
else: parMessage(p, errIdentifierExpected, p.tok)
|
||||
|
||||
proc parseConstant(p: var TParser): PNode =
|
||||
result = newNodeP(nkConstDef, p)
|
||||
@@ -1188,7 +1184,7 @@ proc parseObjectPart(p: var TParser): PNode =
|
||||
case p.tok.tokType
|
||||
of tkInd:
|
||||
result = newNodeP(nkRecList, p)
|
||||
pushInd(p.lex[], p.tok.indent)
|
||||
pushInd(p.lex, p.tok.indent)
|
||||
getTok(p)
|
||||
skipComment(p, result)
|
||||
while true:
|
||||
@@ -1203,9 +1199,9 @@ proc parseObjectPart(p: var TParser): PNode =
|
||||
of tkEof:
|
||||
break
|
||||
else:
|
||||
parMessage(p, errIdentifierExpected, tokToStr(p.tok))
|
||||
parMessage(p, errIdentifierExpected, p.tok)
|
||||
break
|
||||
popInd(p.lex[])
|
||||
popInd(p.lex)
|
||||
of tkWhen:
|
||||
result = parseObjectWhen(p)
|
||||
of tkCase:
|
||||
@@ -1322,7 +1318,7 @@ proc complexOrSimpleStmt(p: var TParser): PNode =
|
||||
proc parseStmt(p: var TParser): PNode =
|
||||
if p.tok.tokType == tkInd:
|
||||
result = newNodeP(nkStmtList, p)
|
||||
pushInd(p.lex[], p.tok.indent)
|
||||
pushInd(p.lex, p.tok.indent)
|
||||
getTok(p)
|
||||
while true:
|
||||
case p.tok.tokType
|
||||
@@ -1335,7 +1331,7 @@ proc parseStmt(p: var TParser): PNode =
|
||||
var a = complexOrSimpleStmt(p)
|
||||
if a.kind == nkEmpty: break
|
||||
addSon(result, a)
|
||||
popInd(p.lex[] )
|
||||
popInd(p.lex)
|
||||
else:
|
||||
# the case statement is only needed for better error messages:
|
||||
case p.tok.tokType
|
||||
@@ -1345,7 +1341,7 @@ proc parseStmt(p: var TParser): PNode =
|
||||
result = ast.emptyNode
|
||||
else:
|
||||
result = simpleStmt(p)
|
||||
if result.kind == nkEmpty: parMessage(p, errExprExpected, tokToStr(p.tok))
|
||||
if result.kind == nkEmpty: parMessage(p, errExprExpected, p.tok)
|
||||
if p.tok.tokType == tkSad: getTok(p)
|
||||
|
||||
proc parseAll(p: var TParser): PNode =
|
||||
@@ -1357,7 +1353,7 @@ proc parseAll(p: var TParser): PNode =
|
||||
of tkEof: break
|
||||
else:
|
||||
var a = complexOrSimpleStmt(p)
|
||||
if a.kind == nkEmpty: parMessage(p, errExprExpected, tokToStr(p.tok))
|
||||
if a.kind == nkEmpty: parMessage(p, errExprExpected, p.tok)
|
||||
addSon(result, a)
|
||||
|
||||
proc parseTopLevelStmt(p: var TParser): PNode =
|
||||
@@ -1371,5 +1367,5 @@ proc parseTopLevelStmt(p: var TParser): PNode =
|
||||
of tkEof: break
|
||||
else:
|
||||
result = complexOrSimpleStmt(p)
|
||||
if result.kind == nkEmpty: parMessage(p, errExprExpected, tokToStr(p.tok))
|
||||
break
|
||||
if result.kind == nkEmpty: parMessage(p, errExprExpected, p.tok)
|
||||
break
|
||||
|
||||
@@ -82,7 +82,7 @@ gcc.options.debug = "-g"
|
||||
@end
|
||||
@else:
|
||||
@if not release:
|
||||
gcc.options.always = "-w"
|
||||
gcc.options.always = "-w -O1"
|
||||
@else:
|
||||
gcc.options.always = "-w"
|
||||
@end
|
||||
|
||||
@@ -65,6 +65,10 @@ proc raiseOutOfMem() {.noreturn.} =
|
||||
when defined(boehmgc):
|
||||
when defined(windows):
|
||||
const boehmLib = "boehmgc.dll"
|
||||
elif defined(macosx):
|
||||
const boehmLib = "libgc.dylib"
|
||||
|
||||
proc boehmGCinit {.importc: "GC_init", dynlib: boehmLib.}
|
||||
else:
|
||||
const boehmLib = "/usr/lib/libgc.so.1"
|
||||
|
||||
@@ -93,7 +97,8 @@ when defined(boehmgc):
|
||||
proc dealloc(p: Pointer) =
|
||||
boehmDealloc(p)
|
||||
|
||||
proc initGC() = nil
|
||||
proc initGC() =
|
||||
when defined(macosx): boehmGCinit()
|
||||
|
||||
#boehmGCincremental()
|
||||
|
||||
|
||||
Reference in New Issue
Block a user