mirror of
https://github.com/nim-lang/Nim.git
synced 2026-02-15 15:44:14 +00:00
docgen understands and ignores *when false*
This commit is contained in:
@@ -57,6 +57,7 @@ hint[LineTooLong]=off
|
||||
@if unix:
|
||||
@if not bsd:
|
||||
gcc.options.linker = "-ldl"
|
||||
tcc.options.linker = "-ldl"
|
||||
@end
|
||||
@end
|
||||
|
||||
|
||||
@@ -2542,10 +2542,10 @@ cannot be inherited from.
|
||||
Pure pragma
|
||||
-----------
|
||||
The `pure`:idx: pragma serves two completely different purposes:
|
||||
1) To mark a procedure that Nimrod should not generate any exit statements like
|
||||
1. To mark a procedure that Nimrod should not generate any exit statements like
|
||||
``return result;`` in the generated code. This is useful for procs that only
|
||||
consist of an assembler statement.
|
||||
2) To mark an object type so that its type field should be omitted. This is
|
||||
2. To mark an object type so that its type field should be omitted. This is
|
||||
necessary for binary compatibility with other compiled languages.
|
||||
|
||||
|
||||
@@ -2795,7 +2795,7 @@ a dynamic library. The pragma then has no argument and has to be used in
|
||||
conjunction with the ``exportc`` pragma:
|
||||
|
||||
.. code-block:: Nimrod
|
||||
proc exportme(): int {.cdecl, export, dynlib.}
|
||||
proc exportme(): int {.cdecl, exportc, dynlib.}
|
||||
|
||||
This is only useful if the program is compiled as a dynamic library via the
|
||||
``--app:lib`` command line option.
|
||||
|
||||
@@ -98,6 +98,11 @@ macro meaning
|
||||
``\i`` ignore case for matching; use this at the start of the PEG
|
||||
``\y`` ignore style for matching; use this at the start of the PEG
|
||||
``\ident`` a standard ASCII identifier: ``[a-zA-Z_][a-zA-Z_0-9]*``
|
||||
``\letter`` any Unicode letter
|
||||
``\upper`` any Unicode uppercase letter
|
||||
``\lower`` any Unicode lowercase letter
|
||||
``\title`` any Unicode title letter
|
||||
``\white`` any Unicode whitespace character
|
||||
============== ============================================================
|
||||
|
||||
A backslash followed by a letter is a built-in macro, otherwise it
|
||||
|
||||
@@ -75,9 +75,9 @@ Inheritance is done with the ``object of`` syntax. Multiple inheritance is
|
||||
currently not supported. If an object type has no suitable ancestor, ``TObject``
|
||||
can be used as its ancestor, but this is only a convention.
|
||||
|
||||
**Note**: Aggregation (*has-a* relation) is often preferable to inheritance
|
||||
**Note**: Composition (*has-a* relation) is often preferable to inheritance
|
||||
(*is-a* relation) for simple code reuse. Since objects are value types in
|
||||
Nimrod, aggregation is as efficient as inheritance.
|
||||
Nimrod, composition is as efficient as inheritance.
|
||||
|
||||
|
||||
Mutually recursive types
|
||||
@@ -487,7 +487,7 @@ The example shows a generic binary tree. Depending on context, the brackets are
|
||||
used either to introduce type parameters or to instantiate a generic proc,
|
||||
iterator or type. As the example shows, generics work with overloading: the
|
||||
best match of ``add`` is used. The built-in ``add`` procedure for sequences
|
||||
is not hidden and used in the ``preorder`` iterator.
|
||||
is not hidden and is used in the ``preorder`` iterator.
|
||||
|
||||
|
||||
Templates
|
||||
|
||||
245
examples/httpserver2.nim
Normal file
245
examples/httpserver2.nim
Normal file
@@ -0,0 +1,245 @@
|
||||
import strutils, os, osproc, strtabs, streams, sockets
|
||||
|
||||
const
|
||||
wwwNL* = "\r\L"
|
||||
ServerSig = "Server: httpserver.nim/1.0.0" & wwwNL
|
||||
|
||||
type
|
||||
TRequestMethod = enum reqGet, reqPost
|
||||
TServer* = object ## contains the current server state
|
||||
socket: TSocket
|
||||
job: seq[TJob]
|
||||
TJob* = object
|
||||
client: TSocket
|
||||
process: PProcess
|
||||
|
||||
# --------------- output messages --------------------------------------------
|
||||
|
||||
proc sendTextContentType(client: TSocket) =
|
||||
send(client, "Content-type: text/html" & wwwNL)
|
||||
send(client, wwwNL)
|
||||
|
||||
proc badRequest(client: TSocket) =
|
||||
# Inform the client that a request it has made has a problem.
|
||||
send(client, "HTTP/1.0 400 BAD REQUEST" & wwwNL)
|
||||
sendTextContentType(client)
|
||||
send(client, "<p>Your browser sent a bad request, " &
|
||||
"such as a POST without a Content-Length.</p>" & wwwNL)
|
||||
|
||||
|
||||
proc cannotExec(client: TSocket) =
|
||||
send(client, "HTTP/1.0 500 Internal Server Error" & wwwNL)
|
||||
sendTextContentType(client)
|
||||
send(client, "<P>Error prohibited CGI execution.</p>" & wwwNL)
|
||||
|
||||
|
||||
proc headers(client: TSocket, filename: string) =
|
||||
# XXX could use filename to determine file type
|
||||
send(client, "HTTP/1.0 200 OK" & wwwNL)
|
||||
send(client, ServerSig)
|
||||
sendTextContentType(client)
|
||||
|
||||
proc notFound(client: TSocket, path: string) =
|
||||
send(client, "HTTP/1.0 404 NOT FOUND" & wwwNL)
|
||||
send(client, ServerSig)
|
||||
sendTextContentType(client)
|
||||
send(client, "<html><title>Not Found</title>" & wwwNL)
|
||||
send(client, "<body><p>The server could not fulfill" & wwwNL)
|
||||
send(client, "your request because the resource <b>" & path & "</b>" & wwwNL)
|
||||
send(client, "is unavailable or nonexistent.</p>" & wwwNL)
|
||||
send(client, "</body></html>" & wwwNL)
|
||||
|
||||
|
||||
proc unimplemented(client: TSocket) =
|
||||
send(client, "HTTP/1.0 501 Method Not Implemented" & wwwNL)
|
||||
send(client, ServerSig)
|
||||
sendTextContentType(client)
|
||||
send(client, "<html><head><title>Method Not Implemented" &
|
||||
"</title></head>" &
|
||||
"<body><p>HTTP request method not supported.</p>" &
|
||||
"</body></HTML>" & wwwNL)
|
||||
|
||||
|
||||
# ----------------- file serving ---------------------------------------------
|
||||
|
||||
proc discardHeaders(client: TSocket) = skip(client)
|
||||
|
||||
proc serveFile(client: TSocket, filename: string) =
|
||||
discardHeaders(client)
|
||||
|
||||
var f: TFile
|
||||
if open(f, filename):
|
||||
headers(client, filename)
|
||||
const bufSize = 8000 # != 8K might be good for memory manager
|
||||
var buf = alloc(bufsize)
|
||||
while True:
|
||||
var bytesread = readBuffer(f, buf, bufsize)
|
||||
if bytesread > 0:
|
||||
var byteswritten = send(client, buf, bytesread)
|
||||
if bytesread != bytesWritten:
|
||||
dealloc(buf)
|
||||
close(f)
|
||||
OSError()
|
||||
if bytesread != bufSize: break
|
||||
dealloc(buf)
|
||||
close(f)
|
||||
client.close()
|
||||
else:
|
||||
notFound(client, filename)
|
||||
|
||||
# ------------------ CGI execution -------------------------------------------
|
||||
|
||||
proc executeCgi(server: var TServer, client: TSocket, path, query: string,
|
||||
meth: TRequestMethod) =
|
||||
var env = newStringTable(modeCaseInsensitive)
|
||||
var contentLength = -1
|
||||
case meth
|
||||
of reqGet:
|
||||
discardHeaders(client)
|
||||
|
||||
env["REQUEST_METHOD"] = "GET"
|
||||
env["QUERY_STRING"] = query
|
||||
of reqPost:
|
||||
var buf = ""
|
||||
var dataAvail = true
|
||||
while dataAvail:
|
||||
dataAvail = recvLine(client, buf)
|
||||
if buf.len == 0:
|
||||
break
|
||||
var L = toLower(buf)
|
||||
if L.startsWith("content-length:"):
|
||||
var i = len("content-length:")
|
||||
while L[i] in Whitespace: inc(i)
|
||||
contentLength = parseInt(copy(L, i))
|
||||
|
||||
if contentLength < 0:
|
||||
badRequest(client)
|
||||
return
|
||||
|
||||
env["REQUEST_METHOD"] = "POST"
|
||||
env["CONTENT_LENGTH"] = $contentLength
|
||||
|
||||
send(client, "HTTP/1.0 200 OK" & wwwNL)
|
||||
|
||||
var process = startProcess(command=path, env=env)
|
||||
|
||||
var job: TJob
|
||||
job.process = process
|
||||
job.client = client
|
||||
server.job.add(job)
|
||||
|
||||
if meth == reqPost:
|
||||
# get from client and post to CGI program:
|
||||
var buf = alloc(contentLength)
|
||||
if recv(client, buf, contentLength) != contentLength:
|
||||
dealloc(buf)
|
||||
OSError()
|
||||
var inp = process.inputStream
|
||||
inp.writeData(inp, buf, contentLength)
|
||||
dealloc(buf)
|
||||
|
||||
proc animate(server: var TServer) =
|
||||
# checks list of jobs, removes finished ones (pretty sloppy by seq copying)
|
||||
var active_jobs: seq[TJob] = @[]
|
||||
for i in 0..server.job.len-1:
|
||||
var job = server.job[i]
|
||||
if running(job.process):
|
||||
active_jobs.add(job)
|
||||
else:
|
||||
# read process output stream and send it to client
|
||||
var outp = job.process.outputStream
|
||||
while true:
|
||||
var line = outp.readstr(1024)
|
||||
if line.len == 0:
|
||||
break
|
||||
else:
|
||||
try:
|
||||
send(job.client, line)
|
||||
except:
|
||||
echo("send failed, client diconnected")
|
||||
close(job.client)
|
||||
|
||||
server.job = active_jobs
|
||||
|
||||
# --------------- Server Setup -----------------------------------------------
|
||||
|
||||
proc acceptRequest(server: var TServer, client: TSocket) =
|
||||
var cgi = false
|
||||
var query = ""
|
||||
var buf = ""
|
||||
discard recvLine(client, buf)
|
||||
var path = ""
|
||||
var data = buf.split()
|
||||
var meth = reqGet
|
||||
var q = find(data[1], '?')
|
||||
|
||||
# extract path
|
||||
if q >= 0:
|
||||
# strip "?..." from path, this may be found in both POST and GET
|
||||
path = data[1].copy(0, q-1)
|
||||
else:
|
||||
path = data[1]
|
||||
# path starts with "/", by adding "." in front of it we serve files from cwd
|
||||
path = "." & path
|
||||
|
||||
echo("accept: " & path)
|
||||
|
||||
if cmpIgnoreCase(data[0], "GET") == 0:
|
||||
if q >= 0:
|
||||
cgi = true
|
||||
query = data[1].copy(q+1)
|
||||
elif cmpIgnoreCase(data[0], "POST") == 0:
|
||||
cgi = true
|
||||
meth = reqPost
|
||||
else:
|
||||
unimplemented(client)
|
||||
|
||||
if path[path.len-1] == '/' or existsDir(path):
|
||||
path = path / "index.html"
|
||||
|
||||
if not ExistsFile(path):
|
||||
discardHeaders(client)
|
||||
notFound(client, path)
|
||||
client.close()
|
||||
else:
|
||||
when defined(Windows):
|
||||
var ext = splitFile(path).ext.toLower
|
||||
if ext == ".exe" or ext == ".cgi":
|
||||
# XXX: extract interpreter information here?
|
||||
cgi = true
|
||||
else:
|
||||
if {fpUserExec, fpGroupExec, fpOthersExec} * path.getFilePermissions != {}:
|
||||
cgi = true
|
||||
if not cgi:
|
||||
serveFile(client, path)
|
||||
else:
|
||||
executeCgi(server, client, path, query, meth)
|
||||
|
||||
|
||||
|
||||
when isMainModule:
|
||||
var port = 80
|
||||
|
||||
var server: TServer
|
||||
server.job = @[]
|
||||
server.socket = socket(AF_INET)
|
||||
if server.socket == InvalidSocket: OSError()
|
||||
server.socket.bindAddr(port=TPort(port))
|
||||
listen(server.socket)
|
||||
echo("server up on port " & $port)
|
||||
|
||||
while true:
|
||||
# check for new new connection & handle it
|
||||
var list: seq[TSocket] = @[server.socket]
|
||||
if select(list, 10) > 0:
|
||||
var client = accept(server.socket)
|
||||
try:
|
||||
acceptRequest(server, client)
|
||||
except:
|
||||
echo("failed to accept client request")
|
||||
|
||||
# pooling events
|
||||
animate(server)
|
||||
# some slack for CPU
|
||||
sleep(10)
|
||||
server.socket.close()
|
||||
@@ -22,6 +22,10 @@ __TINYC__
|
||||
#ifndef NIMBASE_H
|
||||
#define NIMBASE_H
|
||||
|
||||
#if defined(__GNUC__)
|
||||
# define _GNU_SOURCE 1
|
||||
#endif
|
||||
|
||||
#if !defined(__TINYC__)
|
||||
# include <math.h>
|
||||
#else
|
||||
@@ -29,7 +33,6 @@ __TINYC__
|
||||
# define GCC_MAJOR 4
|
||||
# define __GNUC_MINOR__ 4
|
||||
# define __GNUC_PATCHLEVEL__ 5 */
|
||||
|
||||
# define __DECLSPEC_SUPPORTED 1
|
||||
#endif
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
# distribution, for details about the copyright.
|
||||
#
|
||||
|
||||
## This module implements helper procs for CGI applictions. Example:
|
||||
## This module implements helper procs for CGI applications. Example:
|
||||
##
|
||||
## .. code-block:: Nimrod
|
||||
##
|
||||
@@ -29,7 +29,7 @@
|
||||
## writeln(stdout, "your password: " & myData["password"])
|
||||
## writeln(stdout, "</body></html>")
|
||||
|
||||
import strutils, os, strtabs
|
||||
import strutils, os, strtabs, cookies
|
||||
|
||||
proc URLencode*(s: string): string =
|
||||
## Encodes a value to be HTTP safe: This means that characters in the set
|
||||
@@ -355,32 +355,16 @@ proc setCookie*(name, value: string) =
|
||||
write(stdout, "Set-Cookie: ", name, "=", value, "\n")
|
||||
|
||||
var
|
||||
cookies: PStringTable = nil
|
||||
|
||||
proc parseCookies(s: string): PStringTable =
|
||||
result = newStringTable(modeCaseInsensitive)
|
||||
var i = 0
|
||||
while true:
|
||||
while s[i] == ' ' or s[i] == '\t': inc(i)
|
||||
var keystart = i
|
||||
while s[i] != '=' and s[i] != '\0': inc(i)
|
||||
var keyend = i-1
|
||||
if s[i] == '\0': break
|
||||
inc(i) # skip '='
|
||||
var valstart = i
|
||||
while s[i] != ';' and s[i] != '\0': inc(i)
|
||||
result[copy(s, keystart, keyend)] = copy(s, valstart, i-1)
|
||||
if s[i] == '\0': break
|
||||
inc(i) # skip ';'
|
||||
gcookies: PStringTable = nil
|
||||
|
||||
proc getCookie*(name: string): string =
|
||||
## Gets a cookie. If no cookie of `name` exists, "" is returned.
|
||||
if cookies == nil: cookies = parseCookies(getHttpCookie())
|
||||
result = cookies[name]
|
||||
if gcookies == nil: gcookies = parseCookies(getHttpCookie())
|
||||
result = gcookies[name]
|
||||
|
||||
proc existsCookie*(name: string): bool =
|
||||
## Checks if a cookie of `name` exists.
|
||||
if cookies == nil: cookies = parseCookies(getHttpCookie())
|
||||
result = hasKey(cookies, name)
|
||||
if gcookies == nil: gcookies = parseCookies(getHttpCookie())
|
||||
result = hasKey(gcookies, name)
|
||||
|
||||
|
||||
|
||||
30
lib/pure/cookies.nim
Normal file
30
lib/pure/cookies.nim
Normal file
@@ -0,0 +1,30 @@
|
||||
#
|
||||
#
|
||||
# Nimrod's Runtime Library
|
||||
# (c) Copyright 2010 Andreas Rumpf
|
||||
#
|
||||
# See the file "copying.txt", included in this
|
||||
# distribution, for details about the copyright.
|
||||
#
|
||||
|
||||
## This module implements helper procs for parsing Cookies.
|
||||
|
||||
import strtabs
|
||||
|
||||
proc parseCookies*(s: string): PStringTable =
|
||||
## parses cookies into a string table.
|
||||
result = newStringTable(modeCaseInsensitive)
|
||||
var i = 0
|
||||
while true:
|
||||
while s[i] == ' ' or s[i] == '\t': inc(i)
|
||||
var keystart = i
|
||||
while s[i] != '=' and s[i] != '\0': inc(i)
|
||||
var keyend = i-1
|
||||
if s[i] == '\0': break
|
||||
inc(i) # skip '='
|
||||
var valstart = i
|
||||
while s[i] != ';' and s[i] != '\0': inc(i)
|
||||
result[copy(s, keystart, keyend)] = copy(s, valstart, i-1)
|
||||
if s[i] == '\0': break
|
||||
inc(i) # skip ';'
|
||||
|
||||
@@ -136,9 +136,12 @@ proc executeCgi(client: TSocket, path, query: string, meth: TRequestMethod) =
|
||||
if meth == reqPost:
|
||||
# get from client and post to CGI program:
|
||||
var buf = alloc(contentLength)
|
||||
if recv(client, buf, contentLength) != contentLength: OSError()
|
||||
if recv(client, buf, contentLength) != contentLength:
|
||||
dealloc(buf)
|
||||
OSError()
|
||||
var inp = process.inputStream
|
||||
inp.writeData(inp, buf, contentLength)
|
||||
dealloc(buf)
|
||||
|
||||
var outp = process.outputStream
|
||||
while running(process) or not outp.atEnd(outp):
|
||||
@@ -153,10 +156,21 @@ proc acceptRequest(client: TSocket) =
|
||||
var query = ""
|
||||
var buf = ""
|
||||
discard recvLine(client, buf)
|
||||
var path = ""
|
||||
var data = buf.split()
|
||||
var meth = reqGet
|
||||
|
||||
var q = find(data[1], '?')
|
||||
|
||||
# extract path
|
||||
if q >= 0:
|
||||
# strip "?..." from path, this may be found in both POST and GET
|
||||
path = "." & data[1].copy(0, q-1)
|
||||
else:
|
||||
path = "." & data[1]
|
||||
# path starts with "/", by adding "." in front of it we serve files from cwd
|
||||
|
||||
if cmpIgnoreCase(data[0], "GET") == 0:
|
||||
var q = find(data[1], '?')
|
||||
if q >= 0:
|
||||
cgi = true
|
||||
query = data[1].copy(q+1)
|
||||
@@ -166,7 +180,6 @@ proc acceptRequest(client: TSocket) =
|
||||
else:
|
||||
unimplemented(client)
|
||||
|
||||
var path = data[1]
|
||||
if path[path.len-1] == '/' or existsDir(path):
|
||||
path = path / "index.html"
|
||||
|
||||
@@ -221,7 +234,7 @@ proc next*(s: var TServer) =
|
||||
var buf = ""
|
||||
discard recvLine(s.client, buf)
|
||||
var data = buf.split()
|
||||
if cmpIgnoreCase(data[0], "GET") == 0:
|
||||
if cmpIgnoreCase(data[0], "GET") == 0 or cmpIgnoreCase(data[0], "POST") == 0:
|
||||
var q = find(data[1], '?')
|
||||
if q >= 0:
|
||||
s.query = data[1].copy(q+1)
|
||||
|
||||
@@ -37,6 +37,11 @@ type
|
||||
pkAny, ## any character (.)
|
||||
pkAnyRune, ## any Unicode character (_)
|
||||
pkNewLine, ## CR-LF, LF, CR
|
||||
pkLetter, ## Unicode letter
|
||||
pkLower, ## Unicode lower case letter
|
||||
pkUpper, ## Unicode upper case letter
|
||||
pkTitle, ## Unicode title character
|
||||
pkWhitespace, ## Unicode whitespace character
|
||||
pkTerminal,
|
||||
pkTerminalIgnoreCase,
|
||||
pkTerminalIgnoreStyle,
|
||||
@@ -71,7 +76,7 @@ type
|
||||
rule: TNode ## the rule that the symbol refers to
|
||||
TNode {.final.} = object
|
||||
case kind: TPegKind
|
||||
of pkEmpty, pkAny, pkAnyRune, pkGreedyAny, pkNewLine: nil
|
||||
of pkEmpty..pkWhitespace: nil
|
||||
of pkTerminal, pkTerminalIgnoreCase, pkTerminalIgnoreStyle: term: string
|
||||
of pkChar, pkGreedyRepChar: ch: char
|
||||
of pkCharChoice, pkGreedyRepSet: charChoice: ref set[char]
|
||||
@@ -196,6 +201,7 @@ proc `@`*(a: TPeg): TPeg {.nosideEffect, rtl, extern: "npegsSearch".} =
|
||||
|
||||
proc `@@`*(a: TPeg): TPeg {.noSideEffect, rtl,
|
||||
extern: "npgegsCapturedSearch".} =
|
||||
## constructs a "captured search" for the PEG `a`
|
||||
result.kind = pkCapturedSearch
|
||||
result.sons = @[a]
|
||||
|
||||
@@ -237,6 +243,27 @@ proc newLine*: TPeg {.inline.} =
|
||||
## constructs the PEG `newline`:idx: (``\n``)
|
||||
result.kind = pkNewline
|
||||
|
||||
proc UnicodeLetter*: TPeg {.inline.} =
|
||||
## constructs the PEG ``\letter`` which matches any Unicode letter.
|
||||
result.kind = pkLetter
|
||||
|
||||
proc UnicodeLower*: TPeg {.inline.} =
|
||||
## constructs the PEG ``\lower`` which matches any Unicode lowercase letter.
|
||||
result.kind = pkLower
|
||||
|
||||
proc UnicodeUpper*: TPeg {.inline.} =
|
||||
## constructs the PEG ``\upper`` which matches any Unicode lowercase letter.
|
||||
result.kind = pkUpper
|
||||
|
||||
proc UnicodeTitle*: TPeg {.inline.} =
|
||||
## constructs the PEG ``\title`` which matches any Unicode title letter.
|
||||
result.kind = pkTitle
|
||||
|
||||
proc UnicodeWhitespace*: TPeg {.inline.} =
|
||||
## constructs the PEG ``\white`` which matches any Unicode
|
||||
## whitespace character.
|
||||
result.kind = pkWhitespace
|
||||
|
||||
proc capture*(a: TPeg): TPeg {.nosideEffect, rtl, extern: "npegsCapture".} =
|
||||
## constructs a capture with the PEG `a`
|
||||
result.kind = pkCapture
|
||||
@@ -267,8 +294,8 @@ proc spaceCost(n: TPeg): int =
|
||||
case n.kind
|
||||
of pkEmpty: nil
|
||||
of pkTerminal, pkTerminalIgnoreCase, pkTerminalIgnoreStyle, pkChar,
|
||||
pkGreedyRepChar, pkCharChoice, pkGreedyRepSet, pkAny, pkAnyRune,
|
||||
pkNewLine, pkGreedyAny:
|
||||
pkGreedyRepChar, pkCharChoice, pkGreedyRepSet,
|
||||
pkAny..pkWhitespace, pkGreedyAny:
|
||||
result = 1
|
||||
of pkNonTerminal:
|
||||
# we cannot inline a rule with a non-terminal
|
||||
@@ -379,6 +406,12 @@ proc toStrAux(r: TPeg, res: var string) =
|
||||
of pkEmpty: add(res, "()")
|
||||
of pkAny: add(res, '.')
|
||||
of pkAnyRune: add(res, '_')
|
||||
of pkLetter: add(res, "\\letter")
|
||||
of pkLower: add(res, "\\lower")
|
||||
of pkUpper: add(res, "\\upper")
|
||||
of pkTitle: add(res, "\\title")
|
||||
of pkWhitespace: add(res, "\\white")
|
||||
|
||||
of pkNewline: add(res, "\\n")
|
||||
of pkTerminal: add(res, singleQuoteEsc(r.term))
|
||||
of pkTerminalIgnoreCase:
|
||||
@@ -460,10 +493,15 @@ proc `$` *(r: TPeg): string {.nosideEffect, rtl, extern: "npegsToString".} =
|
||||
# --------------------- core engine -------------------------------------------
|
||||
|
||||
type
|
||||
TMatchClosure {.final.} = object
|
||||
TCaptures* {.final.} = object ## contains the captured substrings.
|
||||
matches: array[0..maxSubpatterns-1, tuple[first, last: int]]
|
||||
ml: int
|
||||
|
||||
proc bounds*(c: TCaptures,
|
||||
i: range[0..maxSubpatterns-1]): tuple[first, last: int] =
|
||||
## returns the bounds ``[first..last]`` of the `i`'th capture.
|
||||
result = c.matches[i]
|
||||
|
||||
when not useUnicode:
|
||||
type
|
||||
TRune = char
|
||||
@@ -472,9 +510,17 @@ when not useUnicode:
|
||||
inc(i)
|
||||
template runeLenAt(s, i: expr): expr = 1
|
||||
|
||||
proc m(s: string, p: TPeg, start: int, c: var TMatchClosure): int =
|
||||
## this implements a simple PEG interpreter. Thanks to superoperators it
|
||||
## has competitive performance nevertheless.
|
||||
proc isAlpha(a: char): bool {.inline.} = return a in {'a'..'z','A'..'Z'}
|
||||
proc isUpper(a: char): bool {.inline.} = return a in {'A'..'Z'}
|
||||
proc isLower(a: char): bool {.inline.} = return a in {'a'..'z'}
|
||||
proc isTitle(a: char): bool {.inline.} = return false
|
||||
proc isWhiteSpace(a: char): bool {.inline.} = return a in {' ', '\9'..'\13'}
|
||||
|
||||
proc rawMatch*(s: string, p: TPeg, start: int, c: var TCaptures): int {.
|
||||
nosideEffect, rtl, extern: "npegs$1".} =
|
||||
## low-level matching proc that implements the PEG interpreter. Use this
|
||||
## for maximum efficiency (every other PEG operation ends up calling this
|
||||
## proc).
|
||||
## Returns -1 if it does not match, else the length of the match
|
||||
case p.kind
|
||||
of pkEmpty: result = 0 # match of length 0
|
||||
@@ -486,6 +532,51 @@ proc m(s: string, p: TPeg, start: int, c: var TMatchClosure): int =
|
||||
result = runeLenAt(s, start)
|
||||
else:
|
||||
result = -1
|
||||
of pkLetter:
|
||||
if s[start] != '\0':
|
||||
var a: TRune
|
||||
result = start
|
||||
fastRuneAt(s, result, a)
|
||||
if isAlpha(a): dec(result, start)
|
||||
else: result = -1
|
||||
else:
|
||||
result = -1
|
||||
of pkLower:
|
||||
if s[start] != '\0':
|
||||
var a: TRune
|
||||
result = start
|
||||
fastRuneAt(s, result, a)
|
||||
if isLower(a): dec(result, start)
|
||||
else: result = -1
|
||||
else:
|
||||
result = -1
|
||||
of pkUpper:
|
||||
if s[start] != '\0':
|
||||
var a: TRune
|
||||
result = start
|
||||
fastRuneAt(s, result, a)
|
||||
if isUpper(a): dec(result, start)
|
||||
else: result = -1
|
||||
else:
|
||||
result = -1
|
||||
of pkTitle:
|
||||
if s[start] != '\0':
|
||||
var a: TRune
|
||||
result = start
|
||||
fastRuneAt(s, result, a)
|
||||
if isTitle(a): dec(result, start)
|
||||
else: result = -1
|
||||
else:
|
||||
result = -1
|
||||
of pkWhitespace:
|
||||
if s[start] != '\0':
|
||||
var a: TRune
|
||||
result = start
|
||||
fastRuneAt(s, result, a)
|
||||
if isWhitespace(a): dec(result, start)
|
||||
else: result = -1
|
||||
else:
|
||||
result = -1
|
||||
of pkGreedyAny:
|
||||
result = len(s) - start
|
||||
of pkNewLine:
|
||||
@@ -537,14 +628,14 @@ proc m(s: string, p: TPeg, start: int, c: var TMatchClosure): int =
|
||||
of pkNonTerminal:
|
||||
var oldMl = c.ml
|
||||
when false: echo "enter: ", p.nt.name
|
||||
result = m(s, p.nt.rule, start, c)
|
||||
result = rawMatch(s, p.nt.rule, start, c)
|
||||
when false: echo "leave: ", p.nt.name
|
||||
if result < 0: c.ml = oldMl
|
||||
of pkSequence:
|
||||
var oldMl = c.ml
|
||||
result = 0
|
||||
for i in 0..high(p.sons):
|
||||
var x = m(s, p.sons[i], start+result, c)
|
||||
var x = rawMatch(s, p.sons[i], start+result, c)
|
||||
if x < 0:
|
||||
c.ml = oldMl
|
||||
result = -1
|
||||
@@ -553,14 +644,14 @@ proc m(s: string, p: TPeg, start: int, c: var TMatchClosure): int =
|
||||
of pkOrderedChoice:
|
||||
var oldMl = c.ml
|
||||
for i in 0..high(p.sons):
|
||||
result = m(s, p.sons[i], start, c)
|
||||
result = rawMatch(s, p.sons[i], start, c)
|
||||
if result >= 0: break
|
||||
c.ml = oldMl
|
||||
of pkSearch:
|
||||
var oldMl = c.ml
|
||||
result = 0
|
||||
while start+result < s.len:
|
||||
var x = m(s, p.sons[0], start+result, c)
|
||||
var x = rawMatch(s, p.sons[0], start+result, c)
|
||||
if x >= 0:
|
||||
inc(result, x)
|
||||
return
|
||||
@@ -572,7 +663,7 @@ proc m(s: string, p: TPeg, start: int, c: var TMatchClosure): int =
|
||||
inc(c.ml)
|
||||
result = 0
|
||||
while start+result < s.len:
|
||||
var x = m(s, p.sons[0], start+result, c)
|
||||
var x = rawMatch(s, p.sons[0], start+result, c)
|
||||
if x >= 0:
|
||||
if idx < maxSubpatterns:
|
||||
c.matches[idx] = (start, start+result-1)
|
||||
@@ -585,7 +676,7 @@ proc m(s: string, p: TPeg, start: int, c: var TMatchClosure): int =
|
||||
of pkGreedyRep:
|
||||
result = 0
|
||||
while true:
|
||||
var x = m(s, p.sons[0], start+result, c)
|
||||
var x = rawMatch(s, p.sons[0], start+result, c)
|
||||
# if x == 0, we have an endless loop; so the correct behaviour would be
|
||||
# not to break. But endless loops can be easily introduced:
|
||||
# ``(comment / \w*)*`` is such an example. Breaking for x == 0 does the
|
||||
@@ -600,15 +691,15 @@ proc m(s: string, p: TPeg, start: int, c: var TMatchClosure): int =
|
||||
result = 0
|
||||
while contains(p.charChoice^, s[start+result]): inc(result)
|
||||
of pkOption:
|
||||
result = max(0, m(s, p.sons[0], start, c))
|
||||
result = max(0, rawMatch(s, p.sons[0], start, c))
|
||||
of pkAndPredicate:
|
||||
var oldMl = c.ml
|
||||
result = m(s, p.sons[0], start, c)
|
||||
result = rawMatch(s, p.sons[0], start, c)
|
||||
if result >= 0: result = 0 # do not consume anything
|
||||
else: c.ml = oldMl
|
||||
of pkNotPredicate:
|
||||
var oldMl = c.ml
|
||||
result = m(s, p.sons[0], start, c)
|
||||
result = rawMatch(s, p.sons[0], start, c)
|
||||
if result < 0: result = 0
|
||||
else:
|
||||
c.ml = oldMl
|
||||
@@ -616,7 +707,7 @@ proc m(s: string, p: TPeg, start: int, c: var TMatchClosure): int =
|
||||
of pkCapture:
|
||||
var idx = c.ml # reserve a slot for the subpattern
|
||||
inc(c.ml)
|
||||
result = m(s, p.sons[0], start, c)
|
||||
result = rawMatch(s, p.sons[0], start, c)
|
||||
if result >= 0:
|
||||
if idx < maxSubpatterns:
|
||||
c.matches[idx] = (start, start+result-1)
|
||||
@@ -629,7 +720,7 @@ proc m(s: string, p: TPeg, start: int, c: var TMatchClosure): int =
|
||||
var n: TPeg
|
||||
n.kind = succ(pkTerminal, ord(p.kind)-ord(pkBackRef))
|
||||
n.term = s.copy(a, b)
|
||||
result = m(s, n, start, c)
|
||||
result = rawMatch(s, n, start, c)
|
||||
of pkRule, pkList: assert false
|
||||
|
||||
proc match*(s: string, pattern: TPeg, matches: var openarray[string],
|
||||
@@ -638,8 +729,8 @@ proc match*(s: string, pattern: TPeg, matches: var openarray[string],
|
||||
## the captured substrings in the array ``matches``. If it does not
|
||||
## match, nothing is written into ``matches`` and ``false`` is
|
||||
## returned.
|
||||
var c: TMatchClosure
|
||||
result = m(s, pattern, start, c) == len(s) -start
|
||||
var c: TCaptures
|
||||
result = rawMatch(s, pattern, start, c) == len(s) -start
|
||||
if result:
|
||||
for i in 0..c.ml-1:
|
||||
matches[i] = copy(s, c.matches[i][0], c.matches[i][1])
|
||||
@@ -647,8 +738,8 @@ proc match*(s: string, pattern: TPeg, matches: var openarray[string],
|
||||
proc match*(s: string, pattern: TPeg,
|
||||
start = 0): bool {.nosideEffect, rtl, extern: "npegs$1".} =
|
||||
## returns ``true`` if ``s`` matches the ``pattern`` beginning from ``start``.
|
||||
var c: TMatchClosure
|
||||
result = m(s, pattern, start, c) == len(s)-start
|
||||
var c: TCaptures
|
||||
result = rawMatch(s, pattern, start, c) == len(s)-start
|
||||
|
||||
proc matchLen*(s: string, pattern: TPeg, matches: var openarray[string],
|
||||
start = 0): int {.nosideEffect, rtl, extern: "npegs$1Capture".} =
|
||||
@@ -656,8 +747,8 @@ proc matchLen*(s: string, pattern: TPeg, matches: var openarray[string],
|
||||
## if there is no match, -1 is returned. Note that a match length
|
||||
## of zero can happen. It's possible that a suffix of `s` remains
|
||||
## that does not belong to the match.
|
||||
var c: TMatchClosure
|
||||
result = m(s, pattern, start, c)
|
||||
var c: TCaptures
|
||||
result = rawMatch(s, pattern, start, c)
|
||||
if result >= 0:
|
||||
for i in 0..c.ml-1:
|
||||
matches[i] = copy(s, c.matches[i][0], c.matches[i][1])
|
||||
@@ -668,8 +759,8 @@ proc matchLen*(s: string, pattern: TPeg,
|
||||
## if there is no match, -1 is returned. Note that a match length
|
||||
## of zero can happen. It's possible that a suffix of `s` remains
|
||||
## that does not belong to the match.
|
||||
var c: TMatchClosure
|
||||
result = m(s, pattern, start, c)
|
||||
var c: TCaptures
|
||||
result = rawMatch(s, pattern, start, c)
|
||||
|
||||
proc find*(s: string, pattern: TPeg, matches: var openarray[string],
|
||||
start = 0): int {.nosideEffect, rtl, extern: "npegs$1Capture".} =
|
||||
@@ -681,6 +772,18 @@ proc find*(s: string, pattern: TPeg, matches: var openarray[string],
|
||||
return -1
|
||||
# could also use the pattern here: (!P .)* P
|
||||
|
||||
proc findBounds*(s: string, pattern: TPeg, matches: var openarray[string],
|
||||
start = 0): tuple[first, last: int] {.
|
||||
nosideEffect, rtl, extern: "npegs$1Capture".} =
|
||||
## returns the starting position and end position of ``pattern`` in ``s``
|
||||
## and the captured
|
||||
## substrings in the array ``matches``. If it does not match, nothing
|
||||
## is written into ``matches`` and (-1,0) is returned.
|
||||
for i in start .. s.len-1:
|
||||
var L = matchLen(s, pattern, matches, i)
|
||||
if L >= 0: return (i, i+L-1)
|
||||
return (-1, 0)
|
||||
|
||||
proc find*(s: string, pattern: TPeg,
|
||||
start = 0): int {.nosideEffect, rtl, extern: "npegs$1".} =
|
||||
## returns the starting position of ``pattern`` in ``s``. If it does not
|
||||
@@ -1351,6 +1454,11 @@ proc primary(p: var TPegParser): TPeg =
|
||||
of "a": result = charset({'a'..'z', 'A'..'Z'})
|
||||
of "A": result = charset({'\1'..'\xff'} - {'a'..'z', 'A'..'Z'})
|
||||
of "ident": result = pegs.ident
|
||||
of "letter": result = UnicodeLetter()
|
||||
of "upper": result = UnicodeUpper()
|
||||
of "lower": result = UnicodeLower()
|
||||
of "title": result = UnicodeTitle()
|
||||
of "white": result = UnicodeWhitespace()
|
||||
else: pegError(p, "unknown built-in: " & p.tok.literal)
|
||||
getTok(p)
|
||||
of tkEscaped:
|
||||
@@ -1439,9 +1547,12 @@ proc rawParse(p: var TPegParser): TPeg =
|
||||
elif ntUsed notin nt.flags and i > 0:
|
||||
pegError(p, "unused rule: " & nt.name, nt.line, nt.col)
|
||||
|
||||
proc parsePeg*(input: string, filename = "pattern", line = 1, col = 0): TPeg =
|
||||
proc parsePeg*(pattern: string, filename = "pattern", line = 1, col = 0): TPeg =
|
||||
## constructs a TPeg object from `pattern`. `filename`, `line`, `col` are
|
||||
## used for error messages, but they only provide start offsets. `parsePeg`
|
||||
## keeps track of line and column numbers within `pattern`.
|
||||
var p: TPegParser
|
||||
init(TPegLexer(p), input, filename, line, col)
|
||||
init(TPegLexer(p), pattern, filename, line, col)
|
||||
p.tok.kind = tkInvalid
|
||||
p.tok.modifier = modNone
|
||||
p.tok.literal = ""
|
||||
@@ -1505,9 +1616,9 @@ when isMainModule:
|
||||
expr.rule = sequence(capture(ident), *sequence(
|
||||
nonterminal(ws), term('+'), nonterminal(ws), nonterminal(expr)))
|
||||
|
||||
var c: TMatchClosure
|
||||
var c: TCaptures
|
||||
var s = "a+b + c +d+e+f"
|
||||
assert m(s, expr.rule, 0, c) == len(s)
|
||||
assert rawMatch(s, expr.rule, 0, c) == len(s)
|
||||
var a = ""
|
||||
for i in 0..c.ml-1:
|
||||
a.add(copy(s, c.matches[i][0], c.matches[i][1]))
|
||||
@@ -1559,4 +1670,10 @@ when isMainModule:
|
||||
else:
|
||||
assert false
|
||||
|
||||
|
||||
assert match("eine übersicht und außerdem", peg"(\letter \white*)+")
|
||||
# ß is not a lower cased letter?!
|
||||
assert match("eine übersicht und auerdem", peg"(\lower \white*)+")
|
||||
assert match("EINE ÜBERSICHT UND AUSSERDEM", peg"(\upper \white*)+")
|
||||
assert(not match("456678", peg"(\letter)+"))
|
||||
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
|
||||
## This module implements a simple portable type-safe sockets layer.
|
||||
|
||||
import os
|
||||
import os, parseutils
|
||||
|
||||
when defined(Windows):
|
||||
import winlean
|
||||
@@ -146,18 +146,66 @@ proc listen*(socket: TSocket, attempts = 5) =
|
||||
## listens to socket.
|
||||
if listen(cint(socket), cint(attempts)) < 0'i32: OSError()
|
||||
|
||||
proc bindAddr*(socket: TSocket, port = TPort(0)) =
|
||||
## binds a port number to a socket.
|
||||
proc invalidIp4(s: string) {.noreturn, noinline.} =
|
||||
raise newException(EInvalidValue, "invalid ip4 address: " & s)
|
||||
|
||||
proc parseIp4*(s: string): int32 =
|
||||
## parses an IP version 4 in dotted decimal form like "a.b.c.d".
|
||||
## Raises EInvalidValue in case of an error.
|
||||
var a, b, c, d: int
|
||||
var i = 0
|
||||
var j = parseInt(s, a, i)
|
||||
if j <= 0: invalidIp4(s)
|
||||
inc(i, j)
|
||||
if s[i] == '.': inc(i)
|
||||
else: invalidIp4(s)
|
||||
j = parseInt(s, b, i)
|
||||
if j <= 0: invalidIp4(s)
|
||||
inc(i, j)
|
||||
if s[i] == '.': inc(i)
|
||||
else: invalidIp4(s)
|
||||
j = parseInt(s, c, i)
|
||||
if j <= 0: invalidIp4(s)
|
||||
inc(i, j)
|
||||
if s[i] == '.': inc(i)
|
||||
else: invalidIp4(s)
|
||||
j = parseInt(s, d, i)
|
||||
if j <= 0: invalidIp4(s)
|
||||
inc(i, j)
|
||||
if s[i] != '\0': invalidIp4(s)
|
||||
result = int32(a shl 24 or b shl 16 or c shl 8 or d)
|
||||
|
||||
proc bindAddr*(socket: TSocket, port = TPort(0), address = "") =
|
||||
## binds an address/port number to a socket.
|
||||
## Use address string in dotted decimal form like "a.b.c.d"
|
||||
## or leave "" for any address.
|
||||
var name: Tsockaddr_in
|
||||
when defined(Windows):
|
||||
name.sin_family = int16(ord(AF_INET))
|
||||
else:
|
||||
name.sin_family = posix.AF_INET
|
||||
name.sin_port = sockets.htons(int16(port))
|
||||
name.sin_addr.s_addr = sockets.htonl(INADDR_ANY)
|
||||
if address == "":
|
||||
name.sin_addr.s_addr = sockets.htonl(INADDR_ANY)
|
||||
else:
|
||||
name.sin_addr.s_addr = parseIp4(address)
|
||||
if bindSocket(cint(socket), cast[ptr TSockAddr](addr(name)),
|
||||
sizeof(name)) < 0'i32:
|
||||
OSError()
|
||||
|
||||
when false:
|
||||
proc bindAddr*(socket: TSocket, port = TPort(0)) =
|
||||
## binds a port number to a socket.
|
||||
var name: Tsockaddr_in
|
||||
when defined(Windows):
|
||||
name.sin_family = int16(ord(AF_INET))
|
||||
else:
|
||||
name.sin_family = posix.AF_INET
|
||||
name.sin_port = sockets.htons(int16(port))
|
||||
name.sin_addr.s_addr = sockets.htonl(INADDR_ANY)
|
||||
if bindSocket(cint(socket), cast[ptr TSockAddr](addr(name)),
|
||||
sizeof(name)) < 0'i32:
|
||||
OSError()
|
||||
|
||||
proc getSockName*(socket: TSocket): TPort =
|
||||
## returns the socket's associated port number.
|
||||
@@ -409,6 +457,33 @@ proc send*(socket: TSocket, data: string) =
|
||||
## sends data to a socket.
|
||||
if send(socket, cstring(data), data.len) != data.len: OSError()
|
||||
|
||||
when defined(Windows):
|
||||
const
|
||||
SOCKET_ERROR = -1
|
||||
IOCPARM_MASK = 127
|
||||
IOC_IN = int(-2147483648)
|
||||
FIONBIO = int(IOC_IN or ((sizeof(int) and IOCPARM_MASK) shl 16) or
|
||||
(102 shl 8) or 126)
|
||||
|
||||
proc ioctlsocket(s: TWinSocket, cmd: clong,
|
||||
argptr: ptr clong): cint {.
|
||||
stdcall, importc:"ioctlsocket", dynlib: "ws2_32.dll".}
|
||||
|
||||
proc setBlocking*(s: TSocket, blocking: bool) =
|
||||
## sets blocking mode on socket
|
||||
when defined(Windows):
|
||||
var mode = clong(ord(not blocking)) # 1 for non-blocking, 0 for blocking
|
||||
if SOCKET_ERROR == ioctlsocket(TWinSocket(s), FIONBIO, addr(mode)):
|
||||
OSError()
|
||||
else: # BSD sockets
|
||||
var x: int = fcntl(cint(s), F_GETFL, 0)
|
||||
if x == -1:
|
||||
OSError()
|
||||
else:
|
||||
var mode = if blocking: x and not O_NONBLOCK else: x or O_NONBLOCK
|
||||
if fcntl(cint(s), F_SETFL, mode) == -1:
|
||||
OSError()
|
||||
|
||||
when defined(Windows):
|
||||
var wsa: TWSADATA
|
||||
if WSAStartup(0x0101'i16, wsa) != 0: OSError()
|
||||
|
||||
@@ -62,10 +62,7 @@ proc popCurrentException {.compilerRtl, inl.} =
|
||||
# some platforms have native support for stack traces:
|
||||
const
|
||||
nativeStackTrace = (defined(macosx) or defined(linux)) and
|
||||
not nimrodStackTrace and false
|
||||
|
||||
# `nativeStackTrace` does not work for me --> deactivated for now. Maybe for
|
||||
# the next release version.
|
||||
not nimrodStackTrace
|
||||
|
||||
when nativeStacktrace:
|
||||
type
|
||||
|
||||
@@ -756,6 +756,9 @@ proc renderRstToOut(d: PDoc, n: PRstNode): PRope =
|
||||
of rnTitle: d.meta[metaTitle] = renderRstToOut(d, n.sons[0])
|
||||
else: InternalError("renderRstToOut")
|
||||
|
||||
proc checkForFalse(n: PNode): bool =
|
||||
result = n.kind == nkIdent and IdentEq(n.ident, "false")
|
||||
|
||||
proc generateDoc(d: PDoc, n: PNode) =
|
||||
if n == nil: return
|
||||
case n.kind
|
||||
@@ -782,7 +785,8 @@ proc generateDoc(d: PDoc, n: PNode) =
|
||||
for i in countup(0, sonsLen(n) - 1): generateDoc(d, n.sons[i])
|
||||
of nkWhenStmt:
|
||||
# generate documentation for the first branch only:
|
||||
generateDoc(d, lastSon(n.sons[0]))
|
||||
if not checkForFalse(n.sons[0].sons[0]):
|
||||
generateDoc(d, lastSon(n.sons[0]))
|
||||
else: nil
|
||||
|
||||
proc genSection(d: PDoc, kind: TSymKind) =
|
||||
|
||||
637
tests/gc/talloc.nim
Executable file
637
tests/gc/talloc.nim
Executable file
@@ -0,0 +1,637 @@
|
||||
#
|
||||
#
|
||||
# Nimrod's Runtime Library
|
||||
# (c) Copyright 2010 Andreas Rumpf
|
||||
#
|
||||
# See the file "copying.txt", included in this
|
||||
# distribution, for details about the copyright.
|
||||
#
|
||||
|
||||
# Low level allocator for Nimrod. Has been designed to support the GC.
|
||||
# TODO:
|
||||
# - eliminate "used" field
|
||||
# - make searching for block O(1)
|
||||
|
||||
const
|
||||
debugGC = false # we wish to debug the GC...
|
||||
logGC = false
|
||||
traceGC = false # extensive debugging
|
||||
reallyDealloc = true # for debugging purposes this can be set to false
|
||||
cycleGC = true # (de)activate the cycle GC
|
||||
stressGC = false
|
||||
reallyOsDealloc = true
|
||||
coalescRight = true
|
||||
coalescLeft = true
|
||||
overwriteFree = false
|
||||
|
||||
# Page size of the system; in most cases 4096 bytes. For exotic OS or
|
||||
# CPU this needs to be changed:
|
||||
const
|
||||
PageShift = 12
|
||||
PageSize = 1 shl PageShift
|
||||
PageMask = PageSize-1
|
||||
|
||||
MemAlign = 8 # also minimal allocatable memory block
|
||||
|
||||
BitsPerPage = PageSize div MemAlign
|
||||
UnitsPerPage = BitsPerPage div (sizeof(int)*8)
|
||||
# how many ints do we need to describe a page:
|
||||
# on 32 bit systems this is only 16 (!)
|
||||
|
||||
TrunkShift = 9
|
||||
BitsPerTrunk = 1 shl TrunkShift # needs to be power of 2 and divisible by 64
|
||||
TrunkMask = BitsPerTrunk - 1
|
||||
IntsPerTrunk = BitsPerTrunk div (sizeof(int)*8)
|
||||
IntShift = 5 + ord(sizeof(int) == 8) # 5 or 6, depending on int width
|
||||
IntMask = 1 shl IntShift - 1
|
||||
|
||||
proc raiseOutOfMem() {.noreturn.} =
|
||||
quit("out of memory")
|
||||
|
||||
# ------------ platform specific chunk allocation code -----------------------
|
||||
|
||||
when defined(posix):
|
||||
const
|
||||
PROT_READ = 1 # page can be read
|
||||
PROT_WRITE = 2 # page can be written
|
||||
MAP_PRIVATE = 2 # Changes are private
|
||||
|
||||
when defined(linux) or defined(aix):
|
||||
const MAP_ANONYMOUS = 0x20 # don't use a file
|
||||
elif defined(macosx) or defined(bsd):
|
||||
const MAP_ANONYMOUS = 0x1000
|
||||
elif defined(solaris):
|
||||
const MAP_ANONYMOUS = 0x100
|
||||
else:
|
||||
{.error: "Port memory manager to your platform".}
|
||||
|
||||
proc mmap(adr: pointer, len: int, prot, flags, fildes: cint,
|
||||
off: int): pointer {.header: "<sys/mman.h>".}
|
||||
|
||||
proc munmap(adr: pointer, len: int) {.header: "<sys/mman.h>".}
|
||||
|
||||
proc osAllocPages(size: int): pointer {.inline.} =
|
||||
result = mmap(nil, size, PROT_READ or PROT_WRITE,
|
||||
MAP_PRIVATE or MAP_ANONYMOUS, -1, 0)
|
||||
if result == nil or result == cast[pointer](-1):
|
||||
raiseOutOfMem()
|
||||
|
||||
proc osDeallocPages(p: pointer, size: int) {.inline} =
|
||||
when reallyOsDealloc: munmap(p, size)
|
||||
|
||||
elif defined(windows):
|
||||
const
|
||||
MEM_RESERVE = 0x2000
|
||||
MEM_COMMIT = 0x1000
|
||||
MEM_TOP_DOWN = 0x100000
|
||||
PAGE_READWRITE = 0x04
|
||||
|
||||
MEM_DECOMMIT = 0x4000
|
||||
MEM_RELEASE = 0x8000
|
||||
|
||||
proc VirtualAlloc(lpAddress: pointer, dwSize: int, flAllocationType,
|
||||
flProtect: int32): pointer {.
|
||||
header: "<windows.h>", stdcall.}
|
||||
|
||||
proc VirtualFree(lpAddress: pointer, dwSize: int,
|
||||
dwFreeType: int32) {.header: "<windows.h>", stdcall.}
|
||||
|
||||
proc osAllocPages(size: int): pointer {.inline.} =
|
||||
result = VirtualAlloc(nil, size, MEM_RESERVE or MEM_COMMIT,
|
||||
PAGE_READWRITE)
|
||||
if result == nil: raiseOutOfMem()
|
||||
|
||||
proc osDeallocPages(p: pointer, size: int) {.inline.} =
|
||||
# according to Microsoft, 0 is the only correct value here:
|
||||
when reallyOsDealloc: VirtualFree(p, 0, MEM_RELEASE)
|
||||
|
||||
else:
|
||||
{.error: "Port memory manager to your platform".}
|
||||
|
||||
# --------------------- end of non-portable code -----------------------------
|
||||
|
||||
# We manage *chunks* of memory. Each chunk is a multiple of the page size.
|
||||
# Each chunk starts at an address that is divisible by the page size. Chunks
|
||||
# that are bigger than ``ChunkOsReturn`` are returned back to the operating
|
||||
# system immediately.
|
||||
|
||||
const
|
||||
ChunkOsReturn = 256 * PageSize
|
||||
InitialMemoryRequest = ChunkOsReturn div 2 # < ChunkOsReturn!
|
||||
SmallChunkSize = PageSize
|
||||
|
||||
type
|
||||
PTrunk = ptr TTrunk
|
||||
TTrunk {.final.} = object
|
||||
next: PTrunk # all nodes are connected with this pointer
|
||||
key: int # start address at bit 0
|
||||
bits: array[0..IntsPerTrunk-1, int] # a bit vector
|
||||
|
||||
TTrunkBuckets = array[0..1023, PTrunk]
|
||||
TIntSet {.final.} = object
|
||||
data: TTrunkBuckets
|
||||
|
||||
type
|
||||
TAlignType = biggestFloat
|
||||
TFreeCell {.final, pure.} = object
|
||||
next: ptr TFreeCell # next free cell in chunk (overlaid with refcount)
|
||||
zeroField: int # 0 means cell is not used (overlaid with typ field)
|
||||
# 1 means cell is manually managed pointer
|
||||
|
||||
PChunk = ptr TBaseChunk
|
||||
PBigChunk = ptr TBigChunk
|
||||
PSmallChunk = ptr TSmallChunk
|
||||
TBaseChunk {.pure.} = object
|
||||
prevSize: int # size of previous chunk; for coalescing
|
||||
size: int # if < PageSize it is a small chunk
|
||||
used: bool # later will be optimized into prevSize...
|
||||
|
||||
TSmallChunk = object of TBaseChunk
|
||||
next, prev: PSmallChunk # chunks of the same size
|
||||
freeList: ptr TFreeCell
|
||||
free: int # how many bytes remain
|
||||
acc: int # accumulator for small object allocation
|
||||
data: TAlignType # start of usable memory
|
||||
|
||||
TBigChunk = object of TBaseChunk # not necessarily > PageSize!
|
||||
next: PBigChunk # chunks of the same (or bigger) size
|
||||
prev: PBigChunk
|
||||
align: int
|
||||
data: TAlignType # start of usable memory
|
||||
|
||||
template smallChunkOverhead(): expr = sizeof(TSmallChunk)-sizeof(TAlignType)
|
||||
template bigChunkOverhead(): expr = sizeof(TBigChunk)-sizeof(TAlignType)
|
||||
|
||||
proc roundup(x, v: int): int {.inline.} =
|
||||
result = (x + (v-1)) and not (v-1)
|
||||
assert(result >= x)
|
||||
#return ((-x) and (v-1)) +% x
|
||||
|
||||
assert(roundup(14, PageSize) == PageSize)
|
||||
assert(roundup(15, 8) == 16)
|
||||
assert(roundup(65, 8) == 72)
|
||||
|
||||
# ------------- chunk table ---------------------------------------------------
|
||||
# We use a PtrSet of chunk starts and a table[Page, chunksize] for chunk
|
||||
# endings of big chunks. This is needed by the merging operation. The only
|
||||
# remaining operation is best-fit for big chunks. Since there is a size-limit
|
||||
# for big chunks (because greater than the limit means they are returned back
|
||||
# to the OS), a fixed size array can be used.
|
||||
|
||||
type
|
||||
PLLChunk = ptr TLLChunk
|
||||
TLLChunk {.pure.} = object ## *low-level* chunk
|
||||
size: int # remaining size
|
||||
acc: int # accumulator
|
||||
|
||||
TAllocator {.final, pure.} = object
|
||||
llmem: PLLChunk
|
||||
currMem, maxMem, freeMem: int # memory sizes (allocated from OS)
|
||||
freeSmallChunks: array[0..SmallChunkSize div MemAlign-1, PSmallChunk]
|
||||
freeChunksList: PBigChunk # XXX make this a datastructure with O(1) access
|
||||
chunkStarts: TIntSet
|
||||
|
||||
proc incCurrMem(a: var TAllocator, bytes: int) {.inline.} =
|
||||
inc(a.currMem, bytes)
|
||||
|
||||
proc decCurrMem(a: var TAllocator, bytes: int) {.inline.} =
|
||||
a.maxMem = max(a.maxMem, a.currMem)
|
||||
dec(a.currMem, bytes)
|
||||
|
||||
proc getMaxMem(a: var TAllocator): int =
|
||||
# Since we update maxPagesCount only when freeing pages,
|
||||
# maxPagesCount may not be up to date. Thus we use the
|
||||
# maximum of these both values here:
|
||||
return max(a.currMem, a.maxMem)
|
||||
|
||||
var
|
||||
allocator: TAllocator
|
||||
|
||||
proc llAlloc(a: var TAllocator, size: int): pointer =
|
||||
# *low-level* alloc for the memory managers data structures. Deallocation
|
||||
# is never done.
|
||||
if a.llmem == nil or size > a.llmem.size:
|
||||
var request = roundup(size+sizeof(TLLChunk), PageSize)
|
||||
a.llmem = cast[PLLChunk](osAllocPages(request))
|
||||
incCurrMem(a, request)
|
||||
a.llmem.size = request - sizeof(TLLChunk)
|
||||
a.llmem.acc = sizeof(TLLChunk)
|
||||
result = cast[pointer](cast[TAddress](a.llmem) + a.llmem.acc)
|
||||
dec(a.llmem.size, size)
|
||||
inc(a.llmem.acc, size)
|
||||
zeroMem(result, size)
|
||||
|
||||
proc IntSetGet(t: TIntSet, key: int): PTrunk =
|
||||
var it = t.data[key and high(t.data)]
|
||||
while it != nil:
|
||||
if it.key == key: return it
|
||||
it = it.next
|
||||
result = nil
|
||||
|
||||
proc IntSetPut(t: var TIntSet, key: int): PTrunk =
|
||||
result = IntSetGet(t, key)
|
||||
if result == nil:
|
||||
result = cast[PTrunk](llAlloc(allocator, sizeof(result^)))
|
||||
result.next = t.data[key and high(t.data)]
|
||||
t.data[key and high(t.data)] = result
|
||||
result.key = key
|
||||
|
||||
proc Contains(s: TIntSet, key: int): bool =
|
||||
var t = IntSetGet(s, key shr TrunkShift)
|
||||
if t != nil:
|
||||
var u = key and TrunkMask
|
||||
result = (t.bits[u shr IntShift] and (1 shl (u and IntMask))) != 0
|
||||
else:
|
||||
result = false
|
||||
|
||||
proc Incl(s: var TIntSet, key: int) =
|
||||
var t = IntSetPut(s, key shr TrunkShift)
|
||||
var u = key and TrunkMask
|
||||
t.bits[u shr IntShift] = t.bits[u shr IntShift] or (1 shl (u and IntMask))
|
||||
|
||||
proc Excl(s: var TIntSet, key: int) =
|
||||
var t = IntSetGet(s, key shr TrunkShift)
|
||||
if t != nil:
|
||||
var u = key and TrunkMask
|
||||
t.bits[u shr IntShift] = t.bits[u shr IntShift] and not
|
||||
(1 shl (u and IntMask))
|
||||
|
||||
proc ContainsOrIncl(s: var TIntSet, key: int): bool =
|
||||
var t = IntSetGet(s, key shr TrunkShift)
|
||||
if t != nil:
|
||||
var u = key and TrunkMask
|
||||
result = (t.bits[u shr IntShift] and (1 shl (u and IntMask))) != 0
|
||||
if not result:
|
||||
t.bits[u shr IntShift] = t.bits[u shr IntShift] or
|
||||
(1 shl (u and IntMask))
|
||||
else:
|
||||
Incl(s, key)
|
||||
result = false
|
||||
|
||||
# ------------- chunk management ----------------------------------------------
|
||||
proc pageIndex(c: PChunk): int {.inline.} =
|
||||
result = cast[TAddress](c) shr PageShift
|
||||
|
||||
proc pageIndex(p: pointer): int {.inline.} =
|
||||
result = cast[TAddress](p) shr PageShift
|
||||
|
||||
proc pageAddr(p: pointer): PChunk {.inline.} =
|
||||
result = cast[PChunk](cast[TAddress](p) and not PageMask)
|
||||
assert(Contains(allocator.chunkStarts, pageIndex(result)))
|
||||
|
||||
var lastSize = PageSize
|
||||
|
||||
proc requestOsChunks(a: var TAllocator, size: int): PBigChunk =
|
||||
incCurrMem(a, size)
|
||||
inc(a.freeMem, size)
|
||||
result = cast[PBigChunk](osAllocPages(size))
|
||||
assert((cast[TAddress](result) and PageMask) == 0)
|
||||
#zeroMem(result, size)
|
||||
result.next = nil
|
||||
result.prev = nil
|
||||
result.used = false
|
||||
result.size = size
|
||||
# update next.prevSize:
|
||||
var nxt = cast[TAddress](result) +% size
|
||||
assert((nxt and PageMask) == 0)
|
||||
var next = cast[PChunk](nxt)
|
||||
if pageIndex(next) in a.chunkStarts:
|
||||
#echo("Next already allocated!")
|
||||
next.prevSize = size
|
||||
# set result.prevSize:
|
||||
var prv = cast[TAddress](result) -% lastSize
|
||||
assert((nxt and PageMask) == 0)
|
||||
var prev = cast[PChunk](prv)
|
||||
if pageIndex(prev) in a.chunkStarts and prev.size == lastSize:
|
||||
#echo("Prev already allocated!")
|
||||
result.prevSize = lastSize
|
||||
else:
|
||||
result.prevSize = 0 # unknown
|
||||
lastSize = size # for next request
|
||||
|
||||
proc freeOsChunks(a: var TAllocator, p: pointer, size: int) =
|
||||
# update next.prevSize:
|
||||
var c = cast[PChunk](p)
|
||||
var nxt = cast[TAddress](p) +% c.size
|
||||
assert((nxt and PageMask) == 0)
|
||||
var next = cast[PChunk](nxt)
|
||||
if pageIndex(next) in a.chunkStarts:
|
||||
next.prevSize = 0 # XXX used
|
||||
excl(a.chunkStarts, pageIndex(p))
|
||||
osDeallocPages(p, size)
|
||||
decCurrMem(a, size)
|
||||
dec(a.freeMem, size)
|
||||
#c_fprintf(c_stdout, "[Alloc] back to OS: %ld\n", size)
|
||||
|
||||
proc isAccessible(p: pointer): bool {.inline.} =
|
||||
result = Contains(allocator.chunkStarts, pageIndex(p))
|
||||
|
||||
proc contains[T](list, x: T): bool =
|
||||
var it = list
|
||||
while it != nil:
|
||||
if it == x: return true
|
||||
it = it.next
|
||||
|
||||
when false:
|
||||
proc writeFreeList(a: TAllocator) =
|
||||
var it = a.freeChunksList
|
||||
c_fprintf(c_stdout, "freeChunksList: %p\n", it)
|
||||
while it != nil:
|
||||
c_fprintf(c_stdout, "it: %p, next: %p, prev: %p\n",
|
||||
it, it.next, it.prev)
|
||||
it = it.next
|
||||
|
||||
proc ListAdd[T](head: var T, c: T) {.inline.} =
|
||||
assert(c notin head)
|
||||
assert c.prev == nil
|
||||
assert c.next == nil
|
||||
c.next = head
|
||||
if head != nil:
|
||||
assert head.prev == nil
|
||||
head.prev = c
|
||||
head = c
|
||||
|
||||
proc ListRemove[T](head: var T, c: T) {.inline.} =
|
||||
assert(c in head)
|
||||
if c == head:
|
||||
head = c.next
|
||||
assert c.prev == nil
|
||||
if head != nil: head.prev = nil
|
||||
else:
|
||||
assert c.prev != nil
|
||||
c.prev.next = c.next
|
||||
if c.next != nil: c.next.prev = c.prev
|
||||
c.next = nil
|
||||
c.prev = nil
|
||||
|
||||
proc isSmallChunk(c: PChunk): bool {.inline.} =
|
||||
return c.size <= SmallChunkSize-smallChunkOverhead()
|
||||
#return c.size < SmallChunkSize
|
||||
|
||||
proc chunkUnused(c: PChunk): bool {.inline.} =
|
||||
result = not c.used
|
||||
|
||||
proc updatePrevSize(a: var TAllocator, c: PBigChunk,
|
||||
prevSize: int) {.inline.} =
|
||||
var ri = cast[PChunk](cast[TAddress](c) +% c.size)
|
||||
assert((cast[TAddress](ri) and PageMask) == 0)
|
||||
if isAccessible(ri):
|
||||
ri.prevSize = prevSize
|
||||
|
||||
proc freeBigChunk(a: var TAllocator, c: PBigChunk) =
|
||||
var c = c
|
||||
assert(c.size >= PageSize)
|
||||
inc(a.freeMem, c.size)
|
||||
when coalescRight:
|
||||
var ri = cast[PChunk](cast[TAddress](c) +% c.size)
|
||||
assert((cast[TAddress](ri) and PageMask) == 0)
|
||||
if isAccessible(ri) and chunkUnused(ri):
|
||||
assert(not isSmallChunk(ri))
|
||||
if not isSmallChunk(ri):
|
||||
ListRemove(a.freeChunksList, cast[PBigChunk](ri))
|
||||
inc(c.size, ri.size)
|
||||
excl(a.chunkStarts, pageIndex(ri))
|
||||
when coalescLeft:
|
||||
if c.prevSize != 0:
|
||||
var le = cast[PChunk](cast[TAddress](c) -% c.prevSize)
|
||||
assert((cast[TAddress](le) and PageMask) == 0)
|
||||
if isAccessible(le) and chunkUnused(le):
|
||||
assert(not isSmallChunk(le))
|
||||
if not isSmallChunk(le):
|
||||
ListRemove(a.freeChunksList, cast[PBigChunk](le))
|
||||
inc(le.size, c.size)
|
||||
excl(a.chunkStarts, pageIndex(c))
|
||||
c = cast[PBigChunk](le)
|
||||
|
||||
if c.size < ChunkOsReturn:
|
||||
incl(a.chunkStarts, pageIndex(c))
|
||||
updatePrevSize(a, c, c.size)
|
||||
ListAdd(a.freeChunksList, c)
|
||||
c.used = false
|
||||
else:
|
||||
freeOsChunks(a, c, c.size)
|
||||
|
||||
proc splitChunk(a: var TAllocator, c: PBigChunk, size: int) =
|
||||
var rest = cast[PBigChunk](cast[TAddress](c) +% size)
|
||||
assert(rest notin a.freeChunksList)
|
||||
# c_fprintf(c_stdout, "to add: %p\n", rest)
|
||||
# writeFreeList(allocator)
|
||||
# assert false
|
||||
rest.size = c.size - size
|
||||
rest.used = false
|
||||
rest.next = nil
|
||||
rest.prev = nil
|
||||
rest.prevSize = size
|
||||
updatePrevSize(a, c, rest.size)
|
||||
c.size = size
|
||||
incl(a.chunkStarts, pageIndex(rest))
|
||||
ListAdd(a.freeChunksList, rest)
|
||||
|
||||
proc getBigChunk(a: var TAllocator, size: int): PBigChunk =
|
||||
# use first fit for now:
|
||||
assert((size and PageMask) == 0)
|
||||
assert(size > 0)
|
||||
result = a.freeChunksList
|
||||
block search:
|
||||
while result != nil:
|
||||
#if not chunkUnused(result):
|
||||
# c_fprintf(c_stdout, "%lld\n", int(result.used))
|
||||
assert chunkUnused(result)
|
||||
if result.size == size:
|
||||
ListRemove(a.freeChunksList, result)
|
||||
break search
|
||||
elif result.size > size:
|
||||
#c_fprintf(c_stdout, "res size: %lld; size: %lld\n", result.size, size)
|
||||
ListRemove(a.freeChunksList, result)
|
||||
splitChunk(a, result, size)
|
||||
break search
|
||||
result = result.next
|
||||
assert result != a.freeChunksList
|
||||
if size < InitialMemoryRequest:
|
||||
result = requestOsChunks(a, InitialMemoryRequest)
|
||||
splitChunk(a, result, size)
|
||||
else:
|
||||
result = requestOsChunks(a, size)
|
||||
result.prevSize = 0 # XXX why is this needed?
|
||||
result.used = true
|
||||
incl(a.chunkStarts, pageIndex(result))
|
||||
dec(a.freeMem, size)
|
||||
|
||||
proc getSmallChunk(a: var TAllocator): PSmallChunk =
|
||||
var res = getBigChunk(a, PageSize)
|
||||
assert res.prev == nil
|
||||
assert res.next == nil
|
||||
result = cast[PSmallChunk](res)
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
proc getCellSize(p: pointer): int {.inline.} =
|
||||
var c = pageAddr(p)
|
||||
result = c.size
|
||||
|
||||
proc rawAlloc(a: var TAllocator, requestedSize: int): pointer =
|
||||
assert(roundup(65, 8) == 72)
|
||||
assert requestedSize >= sizeof(TFreeCell)
|
||||
var size = roundup(requestedSize, MemAlign)
|
||||
#c_fprintf(c_stdout, "alloc; size: %ld; %ld\n", requestedSize, size)
|
||||
if size <= SmallChunkSize-smallChunkOverhead():
|
||||
# allocate a small block: for small chunks, we use only its next pointer
|
||||
var s = size div MemAlign
|
||||
var c = a.freeSmallChunks[s]
|
||||
if c == nil:
|
||||
c = getSmallChunk(a)
|
||||
c.freeList = nil
|
||||
assert c.size == PageSize
|
||||
c.size = size
|
||||
c.acc = size
|
||||
c.free = SmallChunkSize - smallChunkOverhead() - size
|
||||
c.next = nil
|
||||
c.prev = nil
|
||||
ListAdd(a.freeSmallChunks[s], c)
|
||||
result = addr(c.data)
|
||||
assert((cast[TAddress](result) and (MemAlign-1)) == 0)
|
||||
else:
|
||||
assert c.next != c
|
||||
#if c.size != size:
|
||||
# c_fprintf(c_stdout, "csize: %lld; size %lld\n", c.size, size)
|
||||
assert c.size == size
|
||||
if c.freeList == nil:
|
||||
assert(c.acc + smallChunkOverhead() + size <= SmallChunkSize)
|
||||
result = cast[pointer](cast[TAddress](addr(c.data)) +% c.acc)
|
||||
inc(c.acc, size)
|
||||
else:
|
||||
result = c.freeList
|
||||
assert(c.freeList.zeroField == 0)
|
||||
c.freeList = c.freeList.next
|
||||
dec(c.free, size)
|
||||
assert((cast[TAddress](result) and (MemAlign-1)) == 0)
|
||||
if c.free < size:
|
||||
ListRemove(a.freeSmallChunks[s], c)
|
||||
else:
|
||||
size = roundup(requestedSize+bigChunkOverhead(), PageSize)
|
||||
# allocate a large block
|
||||
var c = getBigChunk(a, size)
|
||||
assert c.prev == nil
|
||||
assert c.next == nil
|
||||
assert c.size == size
|
||||
result = addr(c.data)
|
||||
assert((cast[TAddress](result) and (MemAlign-1)) == 0)
|
||||
assert(isAccessible(result))
|
||||
|
||||
proc rawDealloc(a: var TAllocator, p: pointer) =
|
||||
var c = pageAddr(p)
|
||||
if isSmallChunk(c):
|
||||
# `p` is within a small chunk:
|
||||
var c = cast[PSmallChunk](c)
|
||||
var s = c.size
|
||||
var f = cast[ptr TFreeCell](p)
|
||||
#echo("setting to nil: ", $cast[TAddress](addr(f.zeroField)))
|
||||
assert(f.zeroField != 0)
|
||||
f.zeroField = 0
|
||||
f.next = c.freeList
|
||||
c.freeList = f
|
||||
when overwriteFree:
|
||||
# set to 0xff to check for usage after free bugs:
|
||||
c_memset(cast[pointer](cast[int](p) +% sizeof(TFreeCell)), -1'i32,
|
||||
s -% sizeof(TFreeCell))
|
||||
# check if it is not in the freeSmallChunks[s] list:
|
||||
if c.free < s:
|
||||
assert c notin a.freeSmallChunks[s div memAlign]
|
||||
# add it to the freeSmallChunks[s] array:
|
||||
ListAdd(a.freeSmallChunks[s div memAlign], c)
|
||||
inc(c.free, s)
|
||||
else:
|
||||
inc(c.free, s)
|
||||
if c.free == SmallChunkSize-smallChunkOverhead():
|
||||
ListRemove(a.freeSmallChunks[s div memAlign], c)
|
||||
c.size = SmallChunkSize
|
||||
freeBigChunk(a, cast[PBigChunk](c))
|
||||
else:
|
||||
# set to 0xff to check for usage after free bugs:
|
||||
when overwriteFree: c_memset(p, -1'i32, c.size -% bigChunkOverhead())
|
||||
# free big chunk
|
||||
freeBigChunk(a, cast[PBigChunk](c))
|
||||
|
||||
proc isAllocatedPtr(a: TAllocator, p: pointer): bool =
|
||||
if isAccessible(p):
|
||||
var c = pageAddr(p)
|
||||
if not chunkUnused(c):
|
||||
if isSmallChunk(c):
|
||||
var c = cast[PSmallChunk](c)
|
||||
var offset = (cast[TAddress](p) and PageMask) -% smallChunkOverhead()
|
||||
result = (c.acc >% offset) and (offset %% c.size == 0) and
|
||||
(cast[ptr TFreeCell](p).zeroField >% 1)
|
||||
else:
|
||||
var c = cast[PBigChunk](c)
|
||||
result = p == addr(c.data) and cast[ptr TFreeCell](p).zeroField >% 1
|
||||
|
||||
# ---------------------- interface to programs -------------------------------
|
||||
|
||||
when true:
|
||||
proc alloc(size: int): pointer =
|
||||
result = rawAlloc(allocator, size+sizeof(TFreeCell))
|
||||
cast[ptr TFreeCell](result).zeroField = 2 # mark it as used
|
||||
#assert(not isAllocatedPtr(allocator, result))
|
||||
result = cast[pointer](cast[TAddress](result) +% sizeof(TFreeCell))
|
||||
|
||||
proc alloc0(size: int): pointer =
|
||||
result = talloc.alloc(size)
|
||||
zeroMem(result, size)
|
||||
|
||||
proc dealloc(p: pointer) =
|
||||
var x = cast[pointer](cast[TAddress](p) -% sizeof(TFreeCell))
|
||||
assert(cast[ptr TFreeCell](x).zeroField == 2)
|
||||
rawDealloc(allocator, x)
|
||||
assert(not isAllocatedPtr(allocator, x))
|
||||
|
||||
proc isAllocatedPtr(p: pointer): bool =
|
||||
var x = cast[pointer](cast[TAddress](p) -% sizeof(TFreeCell))
|
||||
result = isAllocatedPtr(allocator, x)
|
||||
|
||||
proc ptrSize(p: pointer): int =
|
||||
var x = cast[pointer](cast[TAddress](p) -% sizeof(TFreeCell))
|
||||
result = pageAddr(x).size - sizeof(TFreeCell)
|
||||
|
||||
proc realloc(p: pointer, newsize: int): pointer =
|
||||
if newsize > 0:
|
||||
result = talloc.alloc(newsize)
|
||||
if p != nil:
|
||||
copyMem(result, p, ptrSize(p))
|
||||
talloc.dealloc(p)
|
||||
elif p != nil:
|
||||
talloc.dealloc(p)
|
||||
|
||||
proc countFreeMem(): int =
|
||||
# only used for assertions
|
||||
var it = allocator.freeChunksList
|
||||
while it != nil:
|
||||
inc(result, it.size)
|
||||
it = it.next
|
||||
|
||||
proc getFreeMem(): int =
|
||||
result = allocator.freeMem
|
||||
#assert(result == countFreeMem())
|
||||
|
||||
proc getTotalMem(): int = return allocator.currMem
|
||||
proc getOccupiedMem(): int = return talloc.getTotalMem() - talloc.getFreeMem()
|
||||
|
||||
when isMainModule:
|
||||
const iterations = 4000_000
|
||||
incl(allocator.chunkStarts, 11)
|
||||
assert 11 in allocator.chunkStarts
|
||||
excl(allocator.chunkStarts, 11)
|
||||
assert 11 notin allocator.chunkStarts
|
||||
var p: array [1..iterations, pointer]
|
||||
for i in 4..70:
|
||||
var x = i * 8
|
||||
for j in 1.. iterations:
|
||||
p[j] = talloc.Alloc(x)
|
||||
for j in 1..iterations:
|
||||
assert isAllocatedPtr(p[j])
|
||||
echo($i, " used memory: ", $(allocator.currMem))
|
||||
for j in countdown(iterations, 1):
|
||||
#echo("j: ", $j)
|
||||
talloc.dealloc(p[j])
|
||||
assert(not isAllocatedPtr(allocator, p[j]))
|
||||
echo($i, " after freeing: ", $(allocator.currMem))
|
||||
|
||||
@@ -7,11 +7,13 @@
|
||||
# define TCC_TARGET_I386
|
||||
# define CONFIG_TCCDIR "."
|
||||
#elif defined(__i386__)
|
||||
# define CONFIG_USE_LIBGCC
|
||||
# define TCC_TARGET_I386
|
||||
# define CONFIG_TCCDIR "/usr/local/lib/tcc"
|
||||
# define GCC_MAJOR 4
|
||||
# define HOST_I386 1
|
||||
#else
|
||||
# define CONFIG_USE_LIBGCC
|
||||
# define TCC_TARGET_X86_64
|
||||
# define CONFIG_TCCDIR "/usr/local/lib/tcc"
|
||||
# define GCC_MAJOR 4
|
||||
|
||||
@@ -84,6 +84,7 @@ proc initConfigData(c: var TConfigData) =
|
||||
c.vars = newStringTable(modeStyleInsensitive)
|
||||
|
||||
proc skipRoot(f: string): string =
|
||||
# "abc/def/xyz" --> "def/xyz"
|
||||
var i = 0
|
||||
result = ""
|
||||
for component in split(f, {dirsep, altsep}):
|
||||
|
||||
@@ -39,7 +39,7 @@ proc destroy(widget: PWidget, data: pgpointer){.cdecl.} =
|
||||
proc FileOpenClicked(menuitem: PMenuItem, userdata: pgpointer) {.cdecl.} =
|
||||
var path = ChooseFileToOpen(w)
|
||||
if path != "":
|
||||
var file: string = readFile(path)
|
||||
var file = readFile(path)
|
||||
if file != nil:
|
||||
set_text(InputTextBuffer, file, len(file))
|
||||
else:
|
||||
|
||||
@@ -5,6 +5,8 @@ News
|
||||
2010-XX-XX Version 0.8.12 released
|
||||
==================================
|
||||
|
||||
Version 0.8.12 has been released! Get it `here <download.html>`_.
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
@@ -21,7 +23,9 @@ Additions
|
||||
|
||||
- Added ``re.findAll``, ``pegs.findAll``.
|
||||
- Added ``os.findExe``.
|
||||
- The Pegs module supports a *captured search loop operator* ``{@}``.
|
||||
- Pegs support a *captured search loop operator* ``{@}``.
|
||||
- Pegs support new built-ins: ``\letter``, ``\upper``, ``\lower``,
|
||||
``\title``, ``\white``.
|
||||
|
||||
|
||||
2010-10-20 Version 0.8.10 released
|
||||
@@ -95,9 +99,6 @@ Additions
|
||||
2010-03-14 Version 0.8.8 released
|
||||
=================================
|
||||
|
||||
Version 0.8.8 has been released! Get it `here <download.html>`_.
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
- The Posix version of ``os.copyFile`` has better error handling.
|
||||
|
||||
Reference in New Issue
Block a user