diff --git a/lib/pure/htmlparser.nim b/lib/pure/htmlparser.nim index be61bb0d86..4b305cfd63 100644 --- a/lib/pure/htmlparser.nim +++ b/lib/pure/htmlparser.nim @@ -52,7 +52,7 @@ import strutils, streams, parsexml, xmltree, unicode, strtabs type - HtmlTag* = enum ## list of all supported HTML tags; order will always be + HtmlTag* = enum ## list of all supported HTML tags; order will always be ## alphabetically tagUnknown, ## unknown HTML element tagA, ## the HTML ``a`` element @@ -1945,7 +1945,8 @@ proc untilElementEnd(x: var XmlParser, result: XmlNode, adderr(expected(x, result)) # this seems to do better match error corrections in browsers: while x.kind in {xmlElementEnd, xmlWhitespace}: - if x.kind == xmlElementEnd and cmpIgnoreCase(x.elemName, result.tag) == 0: + if x.kind == xmlElementEnd and cmpIgnoreCase(x.elemName, + result.tag) == 0: break next(x) next(x) diff --git a/lib/pure/json.nim b/lib/pure/json.nim index 530408b9fc..4dad325bc7 100644 --- a/lib/pure/json.nim +++ b/lib/pure/json.nim @@ -406,7 +406,7 @@ macro `%*`*(x: untyped): untyped = ## `%` for every element. result = toJson(x) -proc `==`* (a, b: JsonNode): bool = +proc `==`*(a, b: JsonNode): bool = ## Check two nodes for equality if a.isNil: if b.isNil: return true @@ -428,13 +428,13 @@ proc `==`* (a, b: JsonNode): bool = of JArray: result = a.elems == b.elems of JObject: - # we cannot use OrderedTable's equality here as - # the order does not matter for equality here. - if a.fields.len != b.fields.len: return false - for key, val in a.fields: - if not b.fields.hasKey(key): return false - if b.fields[key] != val: return false - result = true + # we cannot use OrderedTable's equality here as + # the order does not matter for equality here. + if a.fields.len != b.fields.len: return false + for key, val in a.fields: + if not b.fields.hasKey(key): return false + if b.fields[key] != val: return false + result = true proc hash*(n: OrderedTable[string, JsonNode]): Hash {.noSideEffect.} @@ -502,7 +502,8 @@ proc contains*(node: JsonNode, val: JsonNode): bool = assert(node.kind == JArray) find(node.elems, val) >= 0 -proc existsKey*(node: JsonNode, key: string): bool {.deprecated: "use 'hasKey' instead".} = +proc existsKey*(node: JsonNode, key: string): bool {. + deprecated: "use 'hasKey' instead".} = node.hasKey(key) proc `{}`*(node: JsonNode, keys: varargs[string]): JsonNode = @@ -538,7 +539,8 @@ proc getOrDefault*(node: JsonNode, key: string): JsonNode = if not isNil(node) and node.kind == JObject: result = node.fields.getOrDefault(key) -template simpleGetOrDefault*{`{}`(node, [key])}(node: JsonNode, key: string): JsonNode = node.getOrDefault(key) +template simpleGetOrDefault*{`{}`(node, [key])}(node: JsonNode, + key: string): JsonNode = node.getOrDefault(key) proc `{}=`*(node: JsonNode, keys: varargs[string], value: JsonNode) = ## Traverses the node and tries to set the value at the given location @@ -691,7 +693,7 @@ proc pretty*(node: JsonNode, indent = 2): string = ## Similar to prettyprint in Python. runnableExamples: let j = %* {"name": "Isaac", "books": ["Robot Dreams"], - "details": {"age":35, "pi":3.1415}} + "details": {"age": 35, "pi": 3.1415}} doAssert pretty(j) == """ { "name": "Isaac", @@ -721,14 +723,14 @@ proc toUgly*(result: var string, node: JsonNode) = result.add "[" for child in node.elems: if comma: result.add "," - else: comma = true + else: comma = true result.toUgly child result.add "]" of JObject: result.add "{" for key, value in pairs(node.fields): if comma: result.add "," - else: comma = true + else: comma = true key.escapeJson(result) result.add ":" result.toUgly value @@ -1331,7 +1333,8 @@ proc createConstructor(typeSym, jsonNode: NimNode): NimNode = ( var map = `tableInit`[`tableKeyType`, `tableValueType`](); verifyJsonKind(`jsonNode`, {JObject}, astToStr(`jsonNode`)); - for `forLoopKey` in keys(`jsonNode`.fields): map[`forLoopKey`] = `constructorNode`; + for `forLoopKey` in keys(`jsonNode`.fields): map[ + `forLoopKey`] = `constructorNode`; map ) of "ref": @@ -1374,7 +1377,8 @@ proc createConstructor(typeSym, jsonNode: NimNode): NimNode = ( var list: `typeSym`; verifyJsonKind(`jsonNode`, {JArray}, astToStr(`jsonNode`)); - for `forLoopI` in 0 ..< `jsonNode`.len: list[`forLoopI`] =`constructorNode`; + for `forLoopI` in 0 ..< `jsonNode`.len: list[ + `forLoopI`] = `constructorNode`; list ) of "tuple": @@ -1640,11 +1644,11 @@ when isMainModule: except: doAssert(false, "IndexError thrown for valid index") - doAssert(testJson{"b"}.getStr()=="asd", "Couldn't fetch a singly nested key with {}") + doAssert(testJson{"b"}.getStr() == "asd", "Couldn't fetch a singly nested key with {}") doAssert(isNil(testJson{"nonexistent"}), "Non-existent keys should return nil") doAssert(isNil(testJson{"a", "b"}), "Indexing through a list should return nil") doAssert(isNil(testJson{"a", "b"}), "Indexing through a list should return nil") - doAssert(testJson{"a"}==parseJson"[1, 2, 3, 4]", "Didn't return a non-JObject when there was one to be found") + doAssert(testJson{"a"} == parseJson"[1, 2, 3, 4]", "Didn't return a non-JObject when there was one to be found") doAssert(isNil(parseJson("[1, 2, 3]"){"foo"}), "Indexing directly into a list should return nil") # Generator: @@ -1669,10 +1673,10 @@ when isMainModule: const hisAge = 31 var j3 = %* - [ { "name": "John" + [ {"name": "John" , "age": herAge } - , { "name": "Susan" + , {"name": "Susan" , "age": hisAge } ] @@ -1708,7 +1712,8 @@ when isMainModule: except IndexError: doAssert(true) var parsed2 = parseFile("tests/testdata/jsontest2.json") - doAssert(parsed2{"repository", "description"}.str=="IRC Library for Haskell", "Couldn't fetch via multiply nested key using {}") + doAssert(parsed2{"repository", "description"}.str == + "IRC Library for Haskell", "Couldn't fetch via multiply nested key using {}") doAssert escapeJsonUnquoted("\10Foo🎃barÄ") == "\\nFoo🎃barÄ" doAssert escapeJsonUnquoted("\0\7\20") == "\\u0000\\u0007\\u0014" # for #7887 @@ -1752,15 +1757,15 @@ when isMainModule: # Generate constructors for range[T] types block: type - Q1 = range[0'u8 .. 50'u8] + Q1 = range[0'u8 .. 50'u8] Q2 = range[0'u16 .. 50'u16] Q3 = range[0'u32 .. 50'u32] - Q4 = range[0'i8 .. 50'i8] + Q4 = range[0'i8 .. 50'i8] Q5 = range[0'i16 .. 50'i16] Q6 = range[0'i32 .. 50'i32] Q7 = range[0'f32 .. 50'f32] Q8 = range[0'f64 .. 50'f64] - Q9 = range[0 .. 50] + Q9 = range[0 .. 50] X = object m1: Q1 diff --git a/lib/pure/lexbase.nim b/lib/pure/lexbase.nim index 0ef4a147a1..8bc96c82cc 100644 --- a/lib/pure/lexbase.nim +++ b/lib/pure/lexbase.nim @@ -15,7 +15,7 @@ import strutils, streams const - EndOfFile* = '\0' ## end of file marker + EndOfFile* = '\0' ## end of file marker NewLines* = {'\c', '\L'} # Buffer handling: @@ -27,13 +27,13 @@ const type BaseLexer* = object of RootObj ## the base lexer. Inherit your lexer from ## this object. - bufpos*: int ## the current position within the buffer - buf*: string ## the buffer itself - input: Stream ## the input stream - lineNumber*: int ## the current line number + bufpos*: int ## the current position within the buffer + buf*: string ## the buffer itself + input: Stream ## the input stream + lineNumber*: int ## the current line number sentinel: int - lineStart: int # index of last line start in buffer - offsetBase*: int # use ``offsetBase + bufpos`` to get the offset + lineStart: int # index of last line start in buffer + offsetBase*: int # use ``offsetBase + bufpos`` to get the offset refillChars: set[char] proc close*(L: var BaseLexer) = @@ -65,11 +65,11 @@ proc fillBuffer(L: var BaseLexer) = charsRead = L.input.readDataStr(L.buf, toCopy ..< toCopy + L.sentinel + 1) s = toCopy + charsRead if charsRead < L.sentinel + 1: - L.buf[s] = EndOfFile # set end marker + L.buf[s] = EndOfFile # set end marker L.sentinel = s else: # compute sentinel: - dec(s) # BUGFIX (valgrind) + dec(s) # BUGFIX (valgrind) while true: assert(s < L.buf.len) while s >= 0 and L.buf[s] notin L.refillChars: dec(s) @@ -92,7 +92,7 @@ proc fillBuffer(L: var BaseLexer) = proc fillBaseLexer(L: var BaseLexer, pos: int): int = assert(pos <= L.sentinel) if pos < L.sentinel: - result = pos + 1 # nothing to do + result = pos + 1 # nothing to do else: fillBuffer(L) L.offsetBase += pos @@ -142,7 +142,7 @@ proc open*(L: var BaseLexer, input: Stream, bufLen: int = 8192; L.buf = newString(bufLen) L.sentinel = bufLen - 1 L.lineStart = 0 - L.lineNumber = 1 # lines start at 1 + L.lineNumber = 1 # lines start at 1 fillBuffer(L) skipUtf8Bom(L) diff --git a/lib/pure/parsecfg.nim b/lib/pure/parsecfg.nim index 0fa6668863..4fd5647f67 100644 --- a/lib/pure/parsecfg.nim +++ b/lib/pure/parsecfg.nim @@ -115,34 +115,34 @@ include "system/inclrtl" type CfgEventKind* = enum ## enumeration of all events that may occur when parsing - cfgEof, ## end of file reached - cfgSectionStart, ## a ``[section]`` has been parsed - cfgKeyValuePair, ## a ``key=value`` pair has been detected - cfgOption, ## a ``--key=value`` command line option - cfgError ## an error occurred during parsing + cfgEof, ## end of file reached + cfgSectionStart, ## a ``[section]`` has been parsed + cfgKeyValuePair, ## a ``key=value`` pair has been detected + cfgOption, ## a ``--key=value`` command line option + cfgError ## an error occurred during parsing CfgEvent* = object of RootObj ## describes a parsing event case kind*: CfgEventKind ## the kind of the event of cfgEof: nil of cfgSectionStart: - section*: string ## `section` contains the name of the - ## parsed section start (syntax: ``[section]``) + section*: string ## `section` contains the name of the + ## parsed section start (syntax: ``[section]``) of cfgKeyValuePair, cfgOption: - key*, value*: string ## contains the (key, value) pair if an option - ## of the form ``--key: value`` or an ordinary - ## ``key= value`` pair has been parsed. - ## ``value==""`` if it was not specified in the - ## configuration file. - of cfgError: ## the parser encountered an error: `msg` - msg*: string ## contains the error message. No exceptions - ## are thrown if a parse error occurs. + key*, value*: string ## contains the (key, value) pair if an option + ## of the form ``--key: value`` or an ordinary + ## ``key= value`` pair has been parsed. + ## ``value==""`` if it was not specified in the + ## configuration file. + of cfgError: ## the parser encountered an error: `msg` + msg*: string ## contains the error message. No exceptions + ## are thrown if a parse error occurs. TokKind = enum tkInvalid, tkEof, tkSymbol, tkEquals, tkColon, tkBracketLe, tkBracketRi, tkDashDash - Token = object # a token - kind: TokKind # the type of the token - literal: string # the parsed (string) literal + Token = object # a token + kind: TokKind # the type of the token + literal: string # the parsed (string) literal CfgParser* = object of BaseLexer ## the parser object. tok: Token @@ -203,7 +203,7 @@ proc handleDecChars(c: var CfgParser, xi: var int) = inc(c.bufpos) proc getEscapedChar(c: var CfgParser, tok: var Token) = - inc(c.bufpos) # skip '\' + inc(c.bufpos) # skip '\' case c.buf[c.bufpos] of 'n', 'N': add(tok.literal, "\n") @@ -258,11 +258,11 @@ proc handleCRLF(c: var CfgParser, pos: int): int = else: result = pos proc getString(c: var CfgParser, tok: var Token, rawMode: bool) = - var pos = c.bufpos + 1 # skip " + var pos = c.bufpos + 1 # skip " tok.kind = tkSymbol if (c.buf[pos] == '"') and (c.buf[pos + 1] == '"'): # long string literal: - inc(pos, 2) # skip "" + inc(pos, 2) # skip "" # skip leading newline: pos = handleCRLF(c, pos) while true: @@ -280,13 +280,13 @@ proc getString(c: var CfgParser, tok: var Token, rawMode: bool) = else: add(tok.literal, c.buf[pos]) inc(pos) - c.bufpos = pos + 3 # skip the three """ + c.bufpos = pos + 3 # skip the three """ else: # ordinary string literal while true: var ch = c.buf[pos] if ch == '"': - inc(pos) # skip '"' + inc(pos) # skip '"' break if ch in {'\c', '\L', lexbase.EndOfFile}: tok.kind = tkInvalid @@ -320,7 +320,7 @@ proc skip(c: var CfgParser) = of '\c', '\L': pos = handleCRLF(c, pos) else: - break # EndOfFile also leaves the loop + break # EndOfFile also leaves the loop c.bufpos = pos proc rawGetTok(c: var CfgParser, tok: var Token) = @@ -370,13 +370,13 @@ proc errorStr*(c: CfgParser, msg: string): string {.rtl, extern: "npc$1".} = ## returns a properly formatted error message containing current line and ## column information. result = `%`("$1($2, $3) Error: $4", - [c.filename, $getLine(c), $getColumn(c), msg]) + [c.filename, $getLine(c), $getColumn(c), msg]) proc warningStr*(c: CfgParser, msg: string): string {.rtl, extern: "npc$1".} = ## returns a properly formatted warning message containing current line and ## column information. result = `%`("$1($2, $3) Warning: $4", - [c.filename, $getLine(c), $getColumn(c), msg]) + [c.filename, $getLine(c), $getColumn(c), msg]) proc ignoreMsg*(c: CfgParser, e: CfgEvent): string {.rtl, extern: "npc$1".} = ## returns a properly formatted warning message containing that diff --git a/lib/pure/parsecsv.nim b/lib/pure/parsecsv.nim index 91c8780785..741ce33b8b 100644 --- a/lib/pure/parsecsv.nim +++ b/lib/pure/parsecsv.nim @@ -71,12 +71,12 @@ import type CsvRow* = seq[string] ## A row in a CSV file. CsvParser* = object of BaseLexer ## The parser object. - ## - ## It consists of two public fields: - ## * `row` is the current row - ## * `headers` are the columns that are defined in the csv file - ## (read using `readHeaderRow <#readHeaderRow,CsvParser>`_). - ## Used with `rowEntry <#rowEntry,CsvParser,string>`_). + ## + ## It consists of two public fields: + ## * `row` is the current row + ## * `headers` are the columns that are defined in the csv file + ## (read using `readHeaderRow <#readHeaderRow,CsvParser>`_). + ## Used with `rowEntry <#rowEntry,CsvParser,string>`_). row*: CsvRow filename: string sep, quote, esc: char diff --git a/lib/pure/parsejson.nim b/lib/pure/parsejson.nim index 9893e434ee..0d7d7093ec 100644 --- a/lib/pure/parsejson.nim +++ b/lib/pure/parsejson.nim @@ -15,21 +15,21 @@ import strutils, lexbase, streams, unicode type - JsonEventKind* = enum ## enumeration of all events that may occur when parsing - jsonError, ## an error occurred during parsing - jsonEof, ## end of file reached - jsonString, ## a string literal - jsonInt, ## an integer literal - jsonFloat, ## a float literal - jsonTrue, ## the value ``true`` - jsonFalse, ## the value ``false`` - jsonNull, ## the value ``null`` - jsonObjectStart, ## start of an object: the ``{`` token - jsonObjectEnd, ## end of an object: the ``}`` token - jsonArrayStart, ## start of an array: the ``[`` token - jsonArrayEnd ## start of an array: the ``]`` token + JsonEventKind* = enum ## enumeration of all events that may occur when parsing + jsonError, ## an error occurred during parsing + jsonEof, ## end of file reached + jsonString, ## a string literal + jsonInt, ## an integer literal + jsonFloat, ## a float literal + jsonTrue, ## the value ``true`` + jsonFalse, ## the value ``false`` + jsonNull, ## the value ``null`` + jsonObjectStart, ## start of an object: the ``{`` token + jsonObjectEnd, ## end of an object: the ``}`` token + jsonArrayStart, ## start of an array: the ``[`` token + jsonArrayEnd ## start of an array: the ``]`` token - TokKind* = enum # must be synchronized with TJsonEventKind! + TokKind* = enum # must be synchronized with TJsonEventKind! tkError, tkEof, tkString, @@ -45,18 +45,18 @@ type tkColon, tkComma - JsonError* = enum ## enumeration that lists all errors that can occur - errNone, ## no error - errInvalidToken, ## invalid token - errStringExpected, ## string expected - errColonExpected, ## ``:`` expected - errCommaExpected, ## ``,`` expected - errBracketRiExpected, ## ``]`` expected - errCurlyRiExpected, ## ``}`` expected - errQuoteExpected, ## ``"`` or ``'`` expected - errEOC_Expected, ## ``*/`` expected - errEofExpected, ## EOF expected - errExprExpected ## expr expected + JsonError* = enum ## enumeration that lists all errors that can occur + errNone, ## no error + errInvalidToken, ## invalid token + errStringExpected, ## string expected + errColonExpected, ## ``:`` expected + errCommaExpected, ## ``,`` expected + errBracketRiExpected, ## ``]`` expected + errCurlyRiExpected, ## ``}`` expected + errQuoteExpected, ## ``"`` or ``'`` expected + errEOC_Expected, ## ``*/`` expected + errEofExpected, ## EOF expected + errExprExpected ## expr expected ParserState = enum stateEof, stateStart, stateObject, stateArray, stateExpectArrayComma, diff --git a/lib/pure/parseopt.nim b/lib/pure/parseopt.nim index 545f9f00aa..23978c964c 100644 --- a/lib/pure/parseopt.nim +++ b/lib/pure/parseopt.nim @@ -155,11 +155,11 @@ import os, strutils type - CmdLineKind* = enum ## The detected command line token. - cmdEnd, ## End of command line reached - cmdArgument, ## An argument such as a filename - cmdLongOption, ## A long option such as --option - cmdShortOption ## A short option such as -c + CmdLineKind* = enum ## The detected command line token. + cmdEnd, ## End of command line reached + cmdArgument, ## An argument such as a filename + cmdLongOption, ## A long option such as --option + cmdShortOption ## A short option such as -c OptParser* = object of RootObj ## \ ## Implementation of the command line parser. ## @@ -172,10 +172,10 @@ type longNoVal: seq[string] cmds: seq[string] idx: int - kind*: CmdLineKind ## The detected command line token - key*, val*: TaintedString ## Key and value pair; the key is the option - ## or the argument, and the value is not "" if - ## the option was given a value + kind*: CmdLineKind ## The detected command line token + key*, val*: TaintedString ## Key and value pair; the key is the option + ## or the argument, and the value is not "" if + ## the option was given a value proc parseWord(s: string, i: int, w: var string, delim: set[char] = {'\t', ' '}): int = @@ -197,7 +197,7 @@ when declared(os.paramCount): # we cannot provide this for NimRtl creation on Posix, because we can't # access the command line arguments then! - proc initOptParser*(cmdline = "", shortNoVal: set[char]={}, + proc initOptParser*(cmdline = "", shortNoVal: set[char] = {}, longNoVal: seq[string] = @[]; allowWhitespaceAfterColon = true): OptParser = ## Initializes the command line parser. @@ -235,7 +235,7 @@ when declared(os.paramCount): result.key = TaintedString"" result.val = TaintedString"" - proc initOptParser*(cmdline: seq[TaintedString], shortNoVal: set[char]={}, + proc initOptParser*(cmdline: seq[TaintedString], shortNoVal: set[char] = {}, longNoVal: seq[string] = @[]; allowWhitespaceAfterColon = true): OptParser = ## Initializes the command line parser. @@ -345,7 +345,8 @@ proc next*(p: var OptParser) {.rtl, extern: "npo$1".} = inc(i) while i < p.cmds[p.idx].len and p.cmds[p.idx][i] in {'\t', ' '}: inc(i) # if we're at the end, use the next command line option: - if i >= p.cmds[p.idx].len and p.idx < p.cmds.len and p.allowWhitespaceAfterColon: + if i >= p.cmds[p.idx].len and p.idx < p.cmds.len and + p.allowWhitespaceAfterColon: inc p.idx i = 0 if p.idx < p.cmds.len: @@ -403,7 +404,8 @@ proc remainingArgs*(p: OptParser): seq[TaintedString] {.rtl, extern: "npo$1".} = result = @[] for i in p.idx..