Merge branch 'master' into parser-experiments

This commit is contained in:
gingerBill
2020-12-06 00:49:48 +00:00
committed by GitHub
86 changed files with 14317 additions and 5037 deletions

View File

@@ -1,6 +1,7 @@
name: Nightly
on:
workflow_dispatch:
schedule:
- cron: 0 20 * * *
@@ -50,7 +51,7 @@ jobs:
- name: (Linux) Download LLVM
run: sudo apt-get install llvm
- name: build odin
run: make release
run: make nightly
- name: Odin run
run: ./odin run examples/demo/demo.odin
- name: Copy artifacts
@@ -76,7 +77,7 @@ jobs:
TMP_PATH=$(xcrun --show-sdk-path)/user/include
echo "CPATH=$TMP_PATH" >> $GITHUB_ENV
- name: build odin
run: make release
run: make nightly
- name: Odin run
run: ./odin run examples/demo/demo.odin
- name: Copy artifacts
@@ -96,10 +97,18 @@ jobs:
needs: [build_windows, build_macos, build_ubuntu]
steps:
- uses: actions/checkout@v1
- uses: actions/setup-python@v2
with:
python-version: '3.x'
- name: Install B2 CLI
shell: bash
run: sudo pip install --upgrade b2
run: |
python -m pip install --upgrade pip
pip install --upgrade b2
- name: Display Python version
run: python -c "import sys; print(sys.version)"
- name: Download Windows artifacts
uses: actions/download-artifact@v1

View File

@@ -22,7 +22,7 @@ release:
$(CC) src/main.cpp $(DISABLED_WARNINGS) $(CFLAGS) -O3 -march=native $(LDFLAGS) -o odin
nightly:
$(CC) src/main.cpp $(DISABLED_WARNINGS) $(CFLAGS) -DNIGHTLY -O3 -march=native $(LDFLAGS) -o odin
$(CC) src/main.cpp $(DISABLED_WARNINGS) $(CFLAGS) -DNIGHTLY -O3 $(LDFLAGS) -o odin

View File

@@ -30,7 +30,7 @@ The proposal process is the process for reviewing a proposal and reaching a deci
* Accept proposal
* Decline proposal
After the proposal is accepted or declined, implementation of the proprosal proceeds in the same way as any other contribution to the project.
After the proposal is accepted or declined, implementation of the proposal proceeds in the same way as any other contribution to the project.
## Design Documents

View File

@@ -60,7 +60,7 @@ main :: proc() {
#### [Getting Started](https://odin-lang.org/docs/install)
Instructions for downloading and install the Odin compiler and libraries.
Instructions for downloading and installing the Odin compiler and libraries.
### Learning Odin

31
build-m1.sh Executable file
View File

@@ -0,0 +1,31 @@
#!/usr/bin/env bash
release_mode=$1
warnings_to_disable="-std=c++11 -Wno-switch -Wno-pointer-sign -Wno-tautological-constant-out-of-range-compare -Wno-tautological-compare -Wno-macro-redefined"
libraries="-pthread -ldl -lm -lstdc++"
other_args="-DLLVM_BACKEND_SUPPORT -DUSE_NEW_LLVM_ABI_SYSTEM"
compiler="clang"
if [ -z "$release_mode" ]; then release_mode="0"; fi
if [ "$release_mode" -eq "0" ]; then
other_args="${other_args} -g"
fi
if [ "$release_mode" -eq "1" ]; then
other_args="${other_args} -O3 -march=native"
fi
if [[ "$(uname)" == "Darwin" ]]; then
# Set compiler to clang on MacOS
# MacOS provides a symlink to clang called gcc, but it's nice to be explicit here.
compiler="clang"
other_args="${other_args} -liconv"
elif [[ "$(uname)" == "FreeBSD" ]]; then
compiler="clang"
fi
${compiler} src/main.cpp ${warnings_to_disable} ${libraries} ${other_args} -o odin \
&& ./odin run examples/demo/demo.odin -llvm-api

View File

@@ -0,0 +1,68 @@
package bufio
import "core:io"
// Read_Writer stores pointers to a Reader and a Writer
Read_Writer :: struct {
r: ^Reader,
w: ^Writer,
}
read_writer_init :: proc(rw: ^Read_Writer, r: ^Reader, w: ^Writer) {
rw.r, rw.w = r, w;
}
read_writer_to_stream :: proc(rw: ^Read_Writer) -> (s: io.Stream) {
s.stream_data = rw;
s.stream_vtable = _read_writer_vtable;
return;
}
@(private)
_read_writer_vtable := &io.Stream_VTable{
impl_read = proc(s: io.Stream, p: []byte) -> (n: int, err: io.Error) {
b := (^Read_Writer)(s.stream_data).r;
return reader_read(b, p);
},
impl_read_byte = proc(s: io.Stream) -> (c: byte, err: io.Error) {
b := (^Read_Writer)(s.stream_data).r;
return reader_read_byte(b);
},
impl_unread_byte = proc(s: io.Stream) -> io.Error {
b := (^Read_Writer)(s.stream_data).r;
return reader_unread_byte(b);
},
impl_read_rune = proc(s: io.Stream) -> (r: rune, size: int, err: io.Error) {
b := (^Read_Writer)(s.stream_data).r;
return reader_read_rune(b);
},
impl_unread_rune = proc(s: io.Stream) -> io.Error {
b := (^Read_Writer)(s.stream_data).r;
return reader_unread_rune(b);
},
impl_write_to = proc(s: io.Stream, w: io.Writer) -> (n: i64, err: io.Error) {
b := (^Read_Writer)(s.stream_data).r;
return reader_write_to(b, w);
},
impl_flush = proc(s: io.Stream) -> io.Error {
b := (^Read_Writer)(s.stream_data).w;
return writer_flush(b);
},
impl_write = proc(s: io.Stream, p: []byte) -> (n: int, err: io.Error) {
b := (^Read_Writer)(s.stream_data).w;
return writer_write(b, p);
},
impl_write_byte = proc(s: io.Stream, c: byte) -> io.Error {
b := (^Read_Writer)(s.stream_data).w;
return writer_write_byte(b, c);
},
impl_write_rune = proc(s: io.Stream, r: rune) -> (int, io.Error) {
b := (^Read_Writer)(s.stream_data).w;
return writer_write_rune(b, r);
},
impl_read_from = proc(s: io.Stream, r: io.Reader) -> (n: i64, err: io.Error) {
b := (^Read_Writer)(s.stream_data).w;
return writer_read_from(b, r);
},
};

474
core/bufio/reader.odin Normal file
View File

@@ -0,0 +1,474 @@
package bufio
import "core:io"
import "core:mem"
import "core:unicode/utf8"
import "core:bytes"
// Reader is a buffered wrapper for an io.Reader
Reader :: struct {
buf: []byte,
buf_allocator: mem.Allocator,
rd: io.Reader, // reader
r, w: int, // read and write positions for buf
err: io.Error,
last_byte: int, // last byte read, invalid is -1
last_rune_size: int, // size of last rune read, invalid is -1
}
DEFAULT_BUF_SIZE :: 4096;
@(private)
MIN_READ_BUFFER_SIZE :: 16;
@(private)
MAX_CONSECUTIVE_EMPTY_READS :: 128;
reader_init :: proc(b: ^Reader, rd: io.Reader, size: int = DEFAULT_BUF_SIZE, allocator := context.allocator) {
size := size;
size = max(size, MIN_READ_BUFFER_SIZE);
reader_reset(b, rd);
b.buf_allocator = allocator;
b.buf = make([]byte, size, allocator);
}
reader_init_with_buf :: proc(b: ^Reader, rd: io.Reader, buf: []byte) {
reader_reset(b, rd);
b.buf_allocator = {};
b.buf = buf;
}
// reader_destroy destroys the underlying buffer with its associated allocator IFF that allocator has been set
reader_destroy :: proc(b: ^Reader) {
delete(b.buf, b.buf_allocator);
b^ = {};
}
reader_size :: proc(b: ^Reader) -> int {
return len(b.buf);
}
reader_reset :: proc(b: ^Reader, r: io.Reader) {
b.rd = r;
b.r, b.w = 0, 0;
b.err = nil;
b.last_byte = -1;
b.last_rune_size = -1;
}
@(private)
_reader_read_new_chunk :: proc(b: ^Reader) -> io.Error {
if b.r > 0 {
copy(b.buf, b.buf[b.r:b.w]);
b.w -= b.r;
b.r = 0;
}
if b.w >= len(b.buf) {
return .Buffer_Full;
}
// read new data, and try a limited number of times
for i := MAX_CONSECUTIVE_EMPTY_READS; i > 0; i -= 1 {
n, err := io.read(b.rd, b.buf[b.w:]);
if n < 0 {
return .Negative_Read;
}
b.w += n;
if err != nil {
b.err = err;
return nil;
}
if n > 0 {
return nil;
}
}
b.err = .No_Progress;
return nil;
}
@(private)
_reader_consume_err :: proc(b: ^Reader) -> io.Error {
err := b.err;
b.err = nil;
return err;
}
// reader_peek returns the next n bytes without advancing the reader
// The bytes stop being valid on the next read call
// If reader_peek returns fewer than n bytes, it also return an error
// explaining why the read is short
// The error will be .Buffer_Full if n is larger than the internal buffer size
reader_peek :: proc(b: ^Reader, n: int) -> (data: []byte, err: io.Error) {
n := n;
if n < 0 {
return nil, .Negative_Count;
}
b.last_byte = -1;
b.last_rune_size = -1;
for b.w-b.r < n && b.w-b.r < len(b.buf) && b.err == nil {
if fill_err := _reader_read_new_chunk(b); fill_err != nil {
return nil, fill_err;
}
}
if n > len(b.buf) {
return b.buf[b.r : b.w], .Buffer_Full;
}
if available := b.w - b.r; available < n {
n = available;
err = _reader_consume_err(b);
if err == nil {
err = .Buffer_Full;
}
}
return b.buf[b.r : b.r+n], err;
}
// reader_buffered returns the number of bytes that can be read from the current buffer
reader_buffered :: proc(b: ^Reader) -> int {
return b.w - b.r;
}
// reader_discard skips the next n bytes, and returns the number of bytes that were discarded
reader_discard :: proc(b: ^Reader, n: int) -> (discarded: int, err: io.Error) {
if n < 0 {
return 0, .Negative_Count;
}
if n == 0 {
return;
}
remaining := n;
for {
skip := reader_buffered(b);
if skip == 0 {
if fill_err := _reader_read_new_chunk(b); fill_err != nil {
return 0, fill_err;
}
skip = reader_buffered(b);
}
skip = min(skip, remaining);
b.r += skip;
remaining -= skip;
if remaining == 0 {
return n, nil;
}
if b.err != nil {
return n - remaining, _reader_consume_err(b);
}
}
return;
}
// reader_read reads data into p
// The bytes are taken from at most one read on the underlying Reader, which means n may be less than len(p)
reader_read :: proc(b: ^Reader, p: []byte) -> (n: int, err: io.Error) {
n = len(p);
if n == 0 {
if reader_buffered(b) > 0 {
return 0, nil;
}
return 0, _reader_consume_err(b);
}
if b.r == b.w {
if b.err != nil {
return 0, _reader_consume_err(b);
}
if len(p) >= len(b.buf) {
n, b.err = io.read(b.rd, p);
if n < 0 {
return 0, .Negative_Read;
}
if n > 0 {
b.last_byte = int(p[n-1]);
b.last_rune_size = -1;
}
return n, _reader_consume_err(b);
}
b.r, b.w = 0, 0;
n, b.err = io.read(b.rd, b.buf);
if n < 0 {
return 0, .Negative_Read;
}
if n == 0 {
return 0, _reader_consume_err(b);
}
b.w += n;
}
n = copy(p, b.buf[b.r:b.w]);
b.r += n;
b.last_byte = int(b.buf[b.r-1]);
b.last_rune_size = -1;
return n, nil;
}
// reader_read_byte reads and returns a single byte
// If no byte is available, it return an error
reader_read_byte :: proc(b: ^Reader) -> (byte, io.Error) {
b.last_rune_size = -1;
for b.r == b.w {
if b.err != nil {
return 0, _reader_consume_err(b);
}
if err := _reader_read_new_chunk(b); err != nil {
return 0, err;
}
}
c := b.buf[b.r];
b.r += 1;
b.last_byte = int(c);
return c, nil;
}
// reader_unread_byte unreads the last byte. Only the most recently read byte can be unread
reader_unread_byte :: proc(b: ^Reader) -> io.Error {
if b.last_byte < 0 || b.r == 0 && b.w > 0 {
return .Invalid_Unread;
}
if b.r > 0 {
b.r -= 1;
} else {
// b.r == 0 && b.w == 0
b.w = 1;
}
b.buf[b.r] = byte(b.last_byte);
b.last_byte = -1;
b.last_rune_size = -1;
return nil;
}
// reader_read_rune reads a single UTF-8 encoded unicode character
// and returns the rune and its size in bytes
// If the encoded rune is invalid, it consumes one byte and returns utf8.RUNE_ERROR (U+FFFD) with a size of 1
reader_read_rune :: proc(b: ^Reader) -> (r: rune, size: int, err: io.Error) {
for b.r+utf8.UTF_MAX > b.w &&
!utf8.full_rune(b.buf[b.r:b.w]) &&
b.err == nil &&
b.w-b.w < len(b.buf) {
if err = _reader_read_new_chunk(b); err != nil {
return;
}
}
b.last_rune_size = -1;
if b.r == b.w {
err = _reader_consume_err(b);
return;
}
r, size = rune(b.buf[b.r]), 1;
if r >= utf8.RUNE_SELF {
r, size = utf8.decode_rune(b.buf[b.r : b.w]);
}
b.r += size;
b.last_byte = int(b.buf[b.r-1]);
b.last_rune_size = size;
return;
}
// reader_unread_rune unreads the last rune. Only the most recently read rune can be unread
reader_unread_rune :: proc(b: ^Reader) -> io.Error {
if b.last_rune_size < 0 || b.r < b.last_rune_size {
return .Invalid_Unread;
}
b.r -= b.last_rune_size;
b.last_byte = -1;
b.last_rune_size = -1;
return nil;
}
reader_write_to :: proc(b: ^Reader, w: io.Writer) -> (n: i64, err: io.Error) {
write_buf :: proc(b: ^Reader, w: io.Writer) -> (i64, io.Error) {
n, err := io.write(w, b.buf[b.r:b.w]);
if n < 0 {
return 0, .Negative_Write;
}
b.r += n;
return i64(n), err;
}
n, err = write_buf(b, w);
if err != nil {
return;
}
m: i64;
if nr, cerr := io.to_writer_to(b.rd); cerr == nil {
m, err = io.write_to(nr, w);
n += m;
return n, err;
}
if nw, cerr := io.to_reader_from(w); cerr == nil {
m, err = io.read_from(nw, b.rd);
n += m;
return n, err;
}
if b.w-b.r < len(b.buf) {
if err = _reader_read_new_chunk(b); err != nil {
return;
}
}
for b.r < b.w {
m, err = write_buf(b, w);
n += m;
if err != nil {
return;
}
if err = _reader_read_new_chunk(b); err != nil {
return;
}
}
if b.err == .EOF {
b.err = nil;
}
err = _reader_consume_err(b);
return;
}
// reader_to_stream converts a Reader into an io.Stream
reader_to_stream :: proc(b: ^Reader) -> (s: io.Stream) {
s.stream_data = b;
s.stream_vtable = _reader_vtable;
return;
}
@(private)
_reader_vtable := &io.Stream_VTable{
impl_destroy = proc(s: io.Stream) -> io.Error {
b := (^Reader)(s.stream_data);
reader_destroy(b);
return nil;
},
impl_read = proc(s: io.Stream, p: []byte) -> (n: int, err: io.Error) {
b := (^Reader)(s.stream_data);
return reader_read(b, p);
},
impl_read_byte = proc(s: io.Stream) -> (c: byte, err: io.Error) {
b := (^Reader)(s.stream_data);
return reader_read_byte(b);
},
impl_unread_byte = proc(s: io.Stream) -> io.Error {
b := (^Reader)(s.stream_data);
return reader_unread_byte(b);
},
impl_read_rune = proc(s: io.Stream) -> (r: rune, size: int, err: io.Error) {
b := (^Reader)(s.stream_data);
return reader_read_rune(b);
},
impl_unread_rune = proc(s: io.Stream) -> io.Error {
b := (^Reader)(s.stream_data);
return reader_unread_rune(b);
},
impl_write_to = proc(s: io.Stream, w: io.Writer) -> (n: i64, err: io.Error) {
b := (^Reader)(s.stream_data);
return reader_write_to(b, w);
},
};
//
// Utility procedures
//
// reader_read_slice reads until the first occurrence of delim from the reader
// It returns a slice pointing at the bytes in the buffer
// The bytes stop being valid at the next read
// If reader_read_slice encounters an error before finding a delimiter
// reader_read_slice fails with error .Buffer_Full if the buffer fills without a delim
// Because the data returned from reader_read_slice will be overwritten on the
// next IO operation, reader_read_bytes or reader_read_string is usually preferred
//
// reader_read_slice returns err != nil if and only if line does not end in delim
//
reader_read_slice :: proc(b: ^Reader, delim: byte) -> (line: []byte, err: io.Error) {
s := 0;
for {
if i := bytes.index_byte(b.buf[b.r+s : b.w], delim); i >= 0 {
i += s;
line = b.buf[b.r:][:i+1];
b.r += i + 1;
break;
}
if b.err != nil {
line = b.buf[b.r : b.w];
b.r = b.w;
err = _reader_consume_err(b);
break;
}
if reader_buffered(b) >= len(b.buf) {
b.r = b.w;
line = b.buf;
err = .Buffer_Full;
break;
}
s = b.w - b.r;
if err = _reader_read_new_chunk(b); err != nil {
break;
}
}
if i := len(line)-1; i >= 0 {
b.last_byte = int(line[i]);
b.last_rune_size = -1;
}
return;
}
// reader_read_bytes reads until the first occurrence of delim from the Reader
// It returns an allocated slice containing the data up to and including the delimiter
reader_read_bytes :: proc(b: ^Reader, delim: byte, allocator := context.allocator) -> (buf: []byte, err: io.Error) {
full: [dynamic]byte;
full.allocator = allocator;
frag: []byte;
for {
e: io.Error;
frag, e = reader_read_slice(b, delim);
if e == nil {
break;
}
if e != .Buffer_Full {
err = e;
break;
}
append(&full, ..frag);
}
append(&full, ..frag);
return full[:], err;
}
// reader_read_string reads until the first occurrence of delim from the Reader
// It returns an allocated string containing the data up to and including the delimiter
reader_read_string :: proc(b: ^Reader, delim: byte, allocator := context.allocator) -> (string, io.Error) {
buf, err := reader_read_bytes(b, delim, allocator);
return string(buf), err;
}

255
core/bufio/writer.odin Normal file
View File

@@ -0,0 +1,255 @@
package bufio
import "core:io"
import "core:mem"
import "core:unicode/utf8"
// import "core:bytes"
// Writer is a buffered wrapper for an io.Writer
Writer :: struct {
buf: []byte,
buf_allocator: mem.Allocator,
wr: io.Writer,
n: int,
err: io.Error,
}
writer_init :: proc(b: ^Writer, wr: io.Writer, size: int = DEFAULT_BUF_SIZE, allocator := context.allocator) {
size := size;
size = max(size, MIN_READ_BUFFER_SIZE);
writer_reset(b, wr);
b.buf_allocator = allocator;
b.buf = make([]byte, size, allocator);
}
writer_init_with_buf :: proc(b: ^Writer, wr: io.Writer, buf: []byte) {
writer_reset(b, wr);
b.buf_allocator = {};
b.buf = buf;
}
// writer_destroy destroys the underlying buffer with its associated allocator IFF that allocator has been set
writer_destroy :: proc(b: ^Writer) {
delete(b.buf, b.buf_allocator);
b^ = {};
}
// writer_size returns the size of underlying buffer in bytes
writer_size :: proc(b: ^Writer) -> int {
return len(b.buf);
}
writer_reset :: proc(b: ^Writer, w: io.Writer) {
b.wr = w;
b.n = 0;
b.err = nil;
}
// writer_flush writes any buffered data into the underlying io.Writer
writer_flush :: proc(b: ^Writer) -> io.Error {
if b.err != nil {
return b.err;
}
if b.n == 0 {
return nil;
}
n, err := io.write(b.wr, b.buf[0:b.n]);
if n < b.n && err == nil {
err = .Short_Write;
}
if err != nil {
if n > 0 && n < b.n {
copy(b.buf[:b.n-n], b.buf[n : b.n]);
}
b.n -= n;
b.err = err;
return err;
}
b.n = 0;
return nil;
}
// writer_available returns how many bytes are unused in the buffer
writer_available :: proc(b: ^Writer) -> int {
return len(b.buf) - b.n;
}
// writer_buffered returns the number of bytes that have been writted into the current buffer
writer_buffered :: proc(b: ^Writer) -> int {
return b.n;
}
// writer_write writes the contents of p into the buffer
// It returns the number of bytes written
// If n < len(p), it will return an error explaining why the write is short
writer_write :: proc(b: ^Writer, p: []byte) -> (n: int, err: io.Error) {
p := p;
for len(p) > writer_available(b) && b.err == nil {
m: int;
if writer_buffered(b) == 0 {
m, b.err = io.write(b.wr, p);
} else {
m = copy(b.buf[b.n:], p);
b.n += m;
writer_flush(b);
}
n += m;
p = p[m:];
}
if b.err != nil {
return n, b.err;
}
m := copy(b.buf[b.n:], p);
b.n += m;
m += n;
return m, nil;
}
// writer_write_byte writes a single byte
writer_write_byte :: proc(b: ^Writer, c: byte) -> io.Error {
if b.err != nil {
return b.err;
}
if writer_available(b) <= 0 && writer_flush(b) != nil {
return b.err;
}
b.buf[b.n] = c;
b.n += 1;
return nil;
}
// writer_write_rune writes a single unicode code point, and returns the number of bytes written with any error
writer_write_rune :: proc(b: ^Writer, r: rune) -> (size: int, err: io.Error) {
if r < utf8.RUNE_SELF {
err = writer_write_byte(b, byte(r));
size = 0 if err != nil else 1;
return;
}
if b.err != nil {
return 0, b.err;
}
buf: [4]u8;
n := writer_available(b);
if n < utf8.UTF_MAX {
writer_flush(b);
if b.err != nil {
return 0, b.err;
}
n = writer_available(b);
if n < utf8.UTF_MAX {
// this only happens if the buffer is very small
w: int;
buf, w = utf8.encode_rune(r);
return writer_write(b, buf[:w]);
}
}
buf, size = utf8.encode_rune(r);
copy(b.buf[b.n:], buf[:size]);
b.n += size;
return;
}
// writer_write writes a string into the buffer
// It returns the number of bytes written
// If n < len(p), it will return an error explaining why the write is short
writer_write_string :: proc(b: ^Writer, s: string) -> (int, io.Error) {
return writer_write(b, transmute([]byte)s);
}
// writer_read_from is to support io.Reader_From types
// If the underlying writer supports the io,read_from, and b has no buffered data yet,
// this procedure calls the underlying read_from implementation without buffering
writer_read_from :: proc(b: ^Writer, r: io.Reader) -> (n: i64, err: io.Error) {
if b.err != nil {
return 0, b.err;
}
if writer_buffered(b) == 0 {
if w, cerr := io.to_reader_from(b.wr); cerr != nil {
n, err = io.read_from(w, r);
b.err = err;
return;
}
}
for {
if writer_available(b) == 0 {
if ferr := writer_flush(b); ferr != nil {
return n, ferr;
}
}
m: int;
nr := 0;
for nr < MAX_CONSECUTIVE_EMPTY_READS {
m, err = io.read(r, b.buf[b.n:]);
if m != 0 || err != nil {
break;
}
nr += 1;
}
if nr == MAX_CONSECUTIVE_EMPTY_READS {
return n, .No_Progress;
}
b.n += m;
n += i64(m);
if err != nil {
break;
}
}
if err == .EOF {
if writer_available(b) == 0 {
err = writer_flush(b);
} else {
err = nil;
}
}
return;
}
// writer_to_stream converts a Writer into an io.Stream
writer_to_stream :: proc(b: ^Writer) -> (s: io.Stream) {
s.stream_data = b;
s.stream_vtable = _writer_vtable;
return;
}
@(private)
_writer_vtable := &io.Stream_VTable{
impl_destroy = proc(s: io.Stream) -> io.Error {
b := (^Writer)(s.stream_data);
writer_destroy(b);
return nil;
},
impl_flush = proc(s: io.Stream) -> io.Error {
b := (^Writer)(s.stream_data);
return writer_flush(b);
},
impl_write = proc(s: io.Stream, p: []byte) -> (n: int, err: io.Error) {
b := (^Writer)(s.stream_data);
return writer_write(b, p);
},
impl_write_byte = proc(s: io.Stream, c: byte) -> io.Error {
b := (^Writer)(s.stream_data);
return writer_write_byte(b, c);
},
impl_write_rune = proc(s: io.Stream, r: rune) -> (int, io.Error) {
b := (^Writer)(s.stream_data);
return writer_write_rune(b, r);
},
impl_read_from = proc(s: io.Stream, r: io.Reader) -> (n: i64, err: io.Error) {
b := (^Writer)(s.stream_data);
return writer_read_from(b, r);
},
};

335
core/bytes/buffer.odin Normal file
View File

@@ -0,0 +1,335 @@
package bytes
import "core:io"
import "core:unicode/utf8"
MIN_READ :: 512;
@(private)
SMALL_BUFFER_SIZE :: 64;
Buffer :: struct {
buf: [dynamic]byte,
off: int,
last_read: Read_Op,
}
@(private)
Read_Op :: enum i8 {
Read = -1,
Invalid = 0,
Read_Rune1 = 1,
Read_Rune2 = 2,
Read_Rune3 = 3,
Read_Rune4 = 4,
}
buffer_init :: proc(b: ^Buffer, buf: []byte) {
resize(&b.buf, len(buf));
copy(b.buf[:], buf);
}
buffer_init_string :: proc(b: ^Buffer, s: string) {
resize(&b.buf, len(s));
copy(b.buf[:], s);
}
buffer_destroy :: proc(b: ^Buffer) {
delete(b.buf);
buffer_reset(b);
}
buffer_to_bytes :: proc(b: ^Buffer) -> []byte {
return b.buf[b.off:];
}
buffer_to_string :: proc(b: ^Buffer) -> string {
if b == nil {
return "<nil>";
}
return string(b.buf[b.off:]);
}
buffer_is_empty :: proc(b: ^Buffer) -> bool {
return len(b.buf) <= b.off;
}
buffer_length :: proc(b: ^Buffer) -> int {
return len(b.buf) - b.off;
}
buffer_capacity :: proc(b: ^Buffer) -> int {
return cap(b.buf);
}
buffer_reset :: proc(b: ^Buffer) {
clear(&b.buf);
b.off = 0;
b.last_read = .Invalid;
}
buffer_truncate :: proc(b: ^Buffer, n: int) {
if n == 0 {
buffer_reset(b);
return;
}
b.last_read = .Invalid;
if n < 0 || n > buffer_length(b) {
panic("bytes.truncate: truncation out of range");
}
resize(&b.buf, b.off+n);
}
@(private)
_buffer_try_grow :: proc(b: ^Buffer, n: int) -> (int, bool) {
if l := len(b.buf); n <= cap(b.buf)-l {
resize(&b.buf, l+n);
return l, true;
}
return 0, false;
}
@(private)
_buffer_grow :: proc(b: ^Buffer, n: int) -> int {
m := buffer_length(b);
if m == 0 && b.off != 0 {
buffer_reset(b);
}
if i, ok := _buffer_try_grow(b, n); ok {
return i;
}
if b.buf == nil && n <= SMALL_BUFFER_SIZE {
b.buf = make([dynamic]byte, n, SMALL_BUFFER_SIZE);
return 0;
}
c := cap(b.buf);
if n <= c/2 - m {
copy(b.buf[:], b.buf[b.off:]);
} else if c > max(int) - c - n {
panic("bytes.Buffer: too large");
} else {
resize(&b.buf, 2*c + n);
copy(b.buf[:], b.buf[b.off:]);
}
b.off = 0;
resize(&b.buf, m+n);
return m;
}
buffer_grow :: proc(b: ^Buffer, n: int) {
if n < 0 {
panic("bytes.buffer_grow: negative count");
}
m := _buffer_grow(b, n);
resize(&b.buf, m);
}
buffer_write :: proc(b: ^Buffer, p: []byte) -> (n: int, err: io.Error) {
b.last_read = .Invalid;
m, ok := _buffer_try_grow(b, len(p));
if !ok {
m = _buffer_grow(b, len(p));
}
return copy(b.buf[m:], p), nil;
}
buffer_write_string :: proc(b: ^Buffer, s: string) -> (n: int, err: io.Error) {
b.last_read = .Invalid;
m, ok := _buffer_try_grow(b, len(s));
if !ok {
m = _buffer_grow(b, len(s));
}
return copy(b.buf[m:], s), nil;
}
buffer_write_byte :: proc(b: ^Buffer, c: byte) -> io.Error {
b.last_read = .Invalid;
m, ok := _buffer_try_grow(b, 1);
if !ok {
m = _buffer_grow(b, 1);
}
b.buf[m] = c;
return nil;
}
buffer_write_rune :: proc(b: ^Buffer, r: rune) -> (n: int, err: io.Error) {
if r < utf8.RUNE_SELF {
buffer_write_byte(b, byte(r));
return 1, nil;
}
b.last_read = .Invalid;
m, ok := _buffer_try_grow(b, utf8.UTF_MAX);
if !ok {
m = _buffer_grow(b, utf8.UTF_MAX);
}
res: [4]byte;
res, n = utf8.encode_rune(r);
copy(b.buf[m:][:utf8.UTF_MAX], res[:n]);
resize(&b.buf, m+n);
return;
}
buffer_next :: proc(b: ^Buffer, n: int) -> []byte {
n := n;
b.last_read = .Invalid;
m := buffer_length(b);
if n > m {
n = m;
}
data := b.buf[b.off : b.off + n];
b.off += n;
if n > 0 {
b.last_read = .Read;
}
return data;
}
buffer_read :: proc(b: ^Buffer, p: []byte) -> (n: int, err: io.Error) {
b.last_read = .Invalid;
if buffer_is_empty(b) {
buffer_reset(b);
if len(p) == 0 {
return 0, nil;
}
return 0, .EOF;
}
n = copy(p, b.buf[b.off:]);
b.off += n;
if n > 0 {
b.last_read = .Read;
}
return;
}
buffer_read_byte :: proc(b: ^Buffer) -> (byte, io.Error) {
if buffer_is_empty(b) {
buffer_reset(b);
return 0, .EOF;
}
c := b.buf[b.off];
b.off += 1;
b.last_read = .Read;
return c, nil;
}
buffer_read_rune :: proc(b: ^Buffer) -> (r: rune, size: int, err: io.Error) {
if buffer_is_empty(b) {
buffer_reset(b);
return 0, 0, .EOF;
}
c := b.buf[b.off];
if c < utf8.RUNE_SELF {
b.off += 1;
b.last_read = .Read_Rune1;
return rune(c), 1, nil;
}
r, size = utf8.decode_rune(b.buf[b.off:]);
b.off += size;
b.last_read = Read_Op(i8(size));
return;
}
buffer_unread_byte :: proc(b: ^Buffer) -> io.Error {
if b.last_read == .Invalid {
return .Invalid_Unread;
}
b.last_read = .Invalid;
if b.off > 0 {
b.off -= 1;
}
return nil;
}
buffer_unread_rune :: proc(b: ^Buffer) -> io.Error {
if b.last_read <= .Invalid {
return .Invalid_Unread;
}
if b.off >= int(b.last_read) {
b.off -= int(i8(b.last_read));
}
b.last_read = .Invalid;
return nil;
}
buffer_read_bytes :: proc(b: ^Buffer, delim: byte) -> (line: []byte, err: io.Error) {
i := index_byte(b.buf[b.off:], delim);
end := b.off + i + 1;
if i < 0 {
end = len(b.buf);
err = .EOF;
}
line = b.buf[b.off:end];
b.off = end;
b.last_read = .Read;
return;
}
buffer_read_string :: proc(b: ^Buffer, delim: byte) -> (line: string, err: io.Error) {
slice: []byte;
slice, err = buffer_read_bytes(b, delim);
return string(slice), err;
}
buffer_to_stream :: proc(b: ^Buffer) -> (s: io.Stream) {
s.stream_data = b;
s.stream_vtable = _buffer_vtable;
return;
}
@(private)
_buffer_vtable := &io.Stream_VTable{
impl_size = proc(s: io.Stream) -> i64 {
b := (^Buffer)(s.stream_data);
return i64(buffer_capacity(b));
},
impl_read = proc(s: io.Stream, p: []byte) -> (n: int, err: io.Error) {
b := (^Buffer)(s.stream_data);
return buffer_read(b, p);
},
impl_read_byte = proc(s: io.Stream) -> (byte, io.Error) {
b := (^Buffer)(s.stream_data);
return buffer_read_byte(b);
},
impl_read_rune = proc(s: io.Stream) -> (r: rune, size: int, err: io.Error) {
b := (^Buffer)(s.stream_data);
return buffer_read_rune(b);
},
impl_write = proc(s: io.Stream, p: []byte) -> (n: int, err: io.Error) {
b := (^Buffer)(s.stream_data);
return buffer_write(b, p);
},
impl_write_byte = proc(s: io.Stream, c: byte) -> io.Error {
b := (^Buffer)(s.stream_data);
return buffer_write_byte(b, c);
},
impl_write_rune = proc(s: io.Stream, r: rune) -> (int, io.Error) {
b := (^Buffer)(s.stream_data);
return buffer_write_rune(b, r);
},
impl_unread_byte = proc(s: io.Stream) -> io.Error {
b := (^Buffer)(s.stream_data);
return buffer_unread_byte(b);
},
impl_unread_rune = proc(s: io.Stream) -> io.Error {
b := (^Buffer)(s.stream_data);
return buffer_unread_rune(b);
},
impl_destroy = proc(s: io.Stream) -> io.Error {
b := (^Buffer)(s.stream_data);
buffer_destroy(b);
return nil;
},
// TODO(bill): write_to and read_from
// impl_write_to = nil,
// impl_read_from = nil,
};

177
core/bytes/reader.odin Normal file
View File

@@ -0,0 +1,177 @@
package bytes
import "core:io"
import "core:unicode/utf8"
Reader :: struct {
s: []byte, // read-only buffer
i: i64, // current reading index
prev_rune: int, // previous reading index of rune or < 0
}
reader_init :: proc(r: ^Reader, s: []byte) {
r.s = s;
r.i = 0;
r.prev_rune = -1;
}
reader_to_stream :: proc(r: ^Reader) -> (s: io.Stream) {
s.stream_data = r;
s.stream_vtable = _reader_vtable;
return;
}
reader_length :: proc(r: ^Reader) -> int {
if r.i >= i64(len(r.s)) {
return 0;
}
return int(i64(len(r.s)) - r.i);
}
reader_size :: proc(r: ^Reader) -> i64 {
return i64(len(r.s));
}
reader_read :: proc(r: ^Reader, p: []byte) -> (n: int, err: io.Error) {
if r.i >= i64(len(r.s)) {
return 0, .EOF;
}
r.prev_rune = -1;
n = copy(p, r.s[r.i:]);
r.i += i64(n);
return;
}
reader_read_at :: proc(r: ^Reader, p: []byte, off: i64) -> (n: int, err: io.Error) {
if off < 0 {
return 0, .Invalid_Offset;
}
if off >= i64(len(r.s)) {
return 0, .EOF;
}
n = copy(p, r.s[off:]);
if n < len(p) {
err = .EOF;
}
return;
}
reader_read_byte :: proc(r: ^Reader) -> (byte, io.Error) {
r.prev_rune = -1;
if r.i >= i64(len(r.s)) {
return 0, .EOF;
}
b := r.s[r.i];
r.i += 1;
return b, nil;
}
reader_unread_byte :: proc(r: ^Reader) -> io.Error {
if r.i <= 0 {
return .Invalid_Unread;
}
r.prev_rune = -1;
r.i -= 1;
return nil;
}
reader_read_rune :: proc(r: ^Reader) -> (ch: rune, size: int, err: io.Error) {
if r.i >= i64(len(r.s)) {
r.prev_rune = -1;
return 0, 0, .EOF;
}
r.prev_rune = int(r.i);
if c := r.s[r.i]; c < utf8.RUNE_SELF {
r.i += 1;
return rune(c), 1, nil;
}
ch, size = utf8.decode_rune(r.s[r.i:]);
r.i += i64(size);
return;
}
reader_unread_rune :: proc(r: ^Reader) -> io.Error {
if r.i <= 0 {
return .Invalid_Unread;
}
if r.prev_rune < 0 {
return .Invalid_Unread;
}
r.i = i64(r.prev_rune);
r.prev_rune = -1;
return nil;
}
reader_seek :: proc(r: ^Reader, offset: i64, whence: io.Seek_From) -> (i64, io.Error) {
r.prev_rune = -1;
abs: i64;
switch whence {
case .Start:
abs = offset;
case .Current:
abs = r.i + offset;
case .End:
abs = i64(len(r.s)) + offset;
case:
return 0, .Invalid_Whence;
}
if abs < 0 {
return 0, .Invalid_Offset;
}
r.i = abs;
return abs, nil;
}
reader_write_to :: proc(r: ^Reader, w: io.Writer) -> (n: i64, err: io.Error) {
r.prev_rune = -1;
if r.i >= i64(len(r.s)) {
return 0, nil;
}
s := r.s[r.i:];
m: int;
m, err = io.write(w, s);
if m > len(s) {
panic("bytes.Reader.write_to: invalid io.write_string count");
}
r.i += i64(m);
n = i64(m);
if m != len(s) && err == nil {
err = .Short_Write;
}
return;
}
@(private)
_reader_vtable := &io.Stream_VTable{
impl_size = proc(s: io.Stream) -> i64 {
r := (^Reader)(s.stream_data);
return reader_size(r);
},
impl_read = proc(s: io.Stream, p: []byte) -> (n: int, err: io.Error) {
r := (^Reader)(s.stream_data);
return reader_read(r, p);
},
impl_read_at = proc(s: io.Stream, p: []byte, off: i64) -> (n: int, err: io.Error) {
r := (^Reader)(s.stream_data);
return reader_read_at(r, p, off);
},
impl_read_byte = proc(s: io.Stream) -> (byte, io.Error) {
r := (^Reader)(s.stream_data);
return reader_read_byte(r);
},
impl_unread_byte = proc(s: io.Stream) -> io.Error {
r := (^Reader)(s.stream_data);
return reader_unread_byte(r);
},
impl_read_rune = proc(s: io.Stream) -> (ch: rune, size: int, err: io.Error) {
r := (^Reader)(s.stream_data);
return reader_read_rune(r);
},
impl_unread_rune = proc(s: io.Stream) -> io.Error {
r := (^Reader)(s.stream_data);
return reader_unread_rune(r);
},
impl_seek = proc(s: io.Stream, offset: i64, whence: io.Seek_From) -> (i64, io.Error) {
r := (^Reader)(s.stream_data);
return reader_seek(r, offset, whence);
},
impl_write_to = proc(s: io.Stream, w: io.Writer) -> (n: i64, err: io.Error) {
r := (^Reader)(s.stream_data);
return reader_write_to(r, w);
},
};

1032
core/bytes/strings.odin Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,3 +1,3 @@
package dynlib
Library :: opaque rawptr;
Library :: #opaque rawptr;

View File

@@ -14,7 +14,8 @@ Marshal_Error :: enum {
}
marshal :: proc(v: any, allocator := context.allocator) -> ([]byte, Marshal_Error) {
b := strings.make_builder(allocator);
b: strings.Builder;
strings.init_builder(&b, allocator);
err := marshal_arg(&b, v);
@@ -129,7 +130,7 @@ marshal_arg :: proc(b: ^strings.Builder, v: any) -> Marshal_Error {
case b32: val = bool(b);
case b64: val = bool(b);
}
write_string(b, val ? "true" : "false");
write_string_builder(b, val ? "true" : "false");
case Type_Info_Any:
return .Unsupported_Type;
@@ -208,14 +209,12 @@ marshal_arg :: proc(b: ^strings.Builder, v: any) -> Marshal_Error {
if i > 0 { write_string(b, ", "); }
data := uintptr(entries.data) + uintptr(i*entry_size);
header := cast(^Map_Entry_Header)data;
marshal_arg(b, any{rawptr(&header.key.key.val), info.key.id});
key := rawptr(data + entry_type.offsets[2]);
value := rawptr(data + entry_type.offsets[3]);
marshal_arg(b, any{key, info.key.id});
write_string(b, ": ");
value := data + entry_type.offsets[2];
marshal_arg(b, any{rawptr(value), info.value.id});
marshal_arg(b, any{value, info.value.id});
}
}
write_byte(b, '}');

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,5 @@
// This is purely for documentation
//+ignore
package intrinsics
// Types
@@ -114,7 +115,7 @@ type_is_ordered_numeric :: proc($T: typeid) -> bool ---
type_is_indexable :: proc($T: typeid) -> bool ---
type_is_sliceable :: proc($T: typeid) -> bool ---
type_is_comparable :: proc($T: typeid) -> bool ---
type_is_simple_compare :: proc($T: typeid) -> bool --- // easily compared using memcmp
type_is_simple_compare :: proc($T: typeid) -> bool --- // easily compared using memcmp (== and !=)
type_is_dereferenceable :: proc($T: typeid) -> bool ---
type_is_valid_map_key :: proc($T: typeid) -> bool ---
@@ -152,3 +153,6 @@ type_polymorphic_record_parameter_value :: proc($T: typeid, index: int) -> $V --
type_field_index_of :: proc($T: typeid, $name: string) -> uintptr ---
type_equal_proc :: proc($T: typeid) -> (equal: proc "contextless" (rawptr, rawptr) -> bool) ---
type_hasher_proc :: proc($T: typeid) -> (hasher: proc "contextless" (data: rawptr, seed: uintptr) -> uintptr) ---

200
core/io/conv.odin Normal file
View File

@@ -0,0 +1,200 @@
package io
Conversion_Error :: enum {
None,
Missing_Procedure,
Fallback_Possible,
}
to_reader :: proc(s: Stream) -> (r: Reader, err: Conversion_Error) {
r.stream = s;
if s.stream_vtable == nil || s.impl_read == nil {
err = .Missing_Procedure;
}
return;
}
to_writer :: proc(s: Stream) -> (w: Writer, err: Conversion_Error) {
w.stream = s;
if s.stream_vtable == nil || s.impl_write == nil {
err = .Missing_Procedure;
}
return;
}
to_closer :: proc(s: Stream) -> (c: Closer, err: Conversion_Error) {
c.stream = s;
if s.stream_vtable == nil || s.impl_close == nil {
err = .Missing_Procedure;
}
return;
}
to_flusher :: proc(s: Stream) -> (f: Flusher, err: Conversion_Error) {
f.stream = s;
if s.stream_vtable == nil || s.impl_flush == nil {
err = .Missing_Procedure;
}
return;
}
to_seeker :: proc(s: Stream) -> (seeker: Seeker, err: Conversion_Error) {
seeker.stream = s;
if s.stream_vtable == nil || s.impl_seek == nil {
err = .Missing_Procedure;
}
return;
}
to_read_writer :: proc(s: Stream) -> (r: Read_Writer, err: Conversion_Error) {
r.stream = s;
if s.stream_vtable == nil || s.impl_read == nil || s.impl_write == nil {
err = .Missing_Procedure;
}
return;
}
to_read_closer :: proc(s: Stream) -> (r: Read_Closer, err: Conversion_Error) {
r.stream = s;
if s.stream_vtable == nil || s.impl_read == nil || s.impl_close == nil {
err = .Missing_Procedure;
}
return;
}
to_read_write_closer :: proc(s: Stream) -> (r: Read_Write_Closer, err: Conversion_Error) {
r.stream = s;
if s.stream_vtable == nil || s.impl_read == nil || s.impl_write == nil || s.impl_close == nil {
err = .Missing_Procedure;
}
return;
}
to_read_write_seeker :: proc(s: Stream) -> (r: Read_Write_Seeker, err: Conversion_Error) {
r.stream = s;
if s.stream_vtable == nil || s.impl_read == nil || s.impl_write == nil || s.impl_seek == nil {
err = .Missing_Procedure;
}
return;
}
to_write_flusher :: proc(s: Stream) -> (w: Write_Flusher, err: Conversion_Error) {
w.stream = s;
if s.stream_vtable == nil || s.impl_write == nil || s.impl_flush == nil {
err = .Missing_Procedure;
}
return;
}
to_write_flush_closer :: proc(s: Stream) -> (w: Write_Flush_Closer, err: Conversion_Error) {
w.stream = s;
if s.stream_vtable == nil || s.impl_write == nil || s.impl_flush == nil || s.impl_close == nil {
err = .Missing_Procedure;
}
return;
}
to_reader_at :: proc(s: Stream) -> (r: Reader_At, err: Conversion_Error) {
r.stream = s;
if s.stream_vtable == nil || s.impl_read_at == nil {
err = .Missing_Procedure;
}
return;
}
to_writer_at :: proc(s: Stream) -> (w: Writer_At, err: Conversion_Error) {
w.stream = s;
if s.stream_vtable == nil || s.impl_write_at == nil {
err = .Missing_Procedure;
}
return;
}
to_reader_from :: proc(s: Stream) -> (r: Reader_From, err: Conversion_Error) {
r.stream = s;
if s.stream_vtable == nil || s.impl_read_from == nil {
err = .Missing_Procedure;
}
return;
}
to_writer_to :: proc(s: Stream) -> (w: Writer_To, err: Conversion_Error) {
w.stream = s;
if s.stream_vtable == nil || s.impl_write_to == nil {
err = .Missing_Procedure;
}
return;
}
to_write_closer :: proc(s: Stream) -> (w: Write_Closer, err: Conversion_Error) {
w.stream = s;
if s.stream_vtable == nil || s.impl_write == nil || s.impl_close == nil {
err = .Missing_Procedure;
}
return;
}
to_write_seeker :: proc(s: Stream) -> (w: Write_Seeker, err: Conversion_Error) {
w.stream = s;
if s.stream_vtable == nil || s.impl_write == nil || s.impl_seek == nil {
err = .Missing_Procedure;
}
return;
}
to_byte_reader :: proc(s: Stream) -> (b: Byte_Reader, err: Conversion_Error) {
b.stream = s;
if s.stream_vtable == nil || s.impl_read_byte == nil {
err = .Missing_Procedure;
if s.stream_vtable != nil && s.impl_read != nil {
err = .Fallback_Possible;
}
}
return;
}
to_byte_scanner :: proc(s: Stream) -> (b: Byte_Scanner, err: Conversion_Error) {
b.stream = s;
if s.stream_vtable != nil {
if s.impl_unread_byte == nil {
err = .Missing_Procedure;
return;
}
if s.impl_read_byte != nil {
err = .None;
} else if s.impl_read != nil {
err = .Fallback_Possible;
} else {
err = .Missing_Procedure;
}
}
return;
}
to_byte_writer :: proc(s: Stream) -> (b: Byte_Writer, err: Conversion_Error) {
b.stream = s;
if s.stream_vtable == nil || s.impl_write_byte == nil {
err = .Missing_Procedure;
if s.stream_vtable != nil && s.impl_write != nil {
err = .Fallback_Possible;
}
}
return;
}
to_rune_reader :: proc(s: Stream) -> (r: Rune_Reader, err: Conversion_Error) {
r.stream = s;
if s.stream_vtable == nil || s.impl_read_rune == nil {
err = .Missing_Procedure;
if s.stream_vtable != nil && s.impl_read != nil {
err = .Fallback_Possible;
}
}
return;
}
to_rune_scanner :: proc(s: Stream) -> (r: Rune_Scanner, err: Conversion_Error) {
r.stream = s;
if s.stream_vtable != nil {
if s.impl_unread_rune == nil {
err = .Missing_Procedure;
return;
}
if s.impl_read_rune != nil {
err = .None;
} else if s.impl_read != nil {
err = .Fallback_Possible;
} else {
err = .Missing_Procedure;
}
} else {
err = .Missing_Procedure;
}
return;
}

504
core/io/io.odin Normal file
View File

@@ -0,0 +1,504 @@
package io
import "intrinsics"
import "core:runtime"
import "core:unicode/utf8"
Seek_From :: enum {
Start = 0, // seek relative to the origin of the file
Current = 1, // seek relative to the current offset
End = 2, // seek relative to the end
}
Error :: enum i32 {
// No Error
None = 0,
// EOF is the error returned by `read` when no more input is available
EOF,
// Unexpected_EOF means that EOF was encountered in the middle of reading a fixed-sized block of data
Unexpected_EOF,
// Short_Write means that a write accepted fewer bytes than requested but failed to return an explicit error
Short_Write,
// Short_Buffer means that a read required a longer buffer than was provided
Short_Buffer,
// No_Progress is returned by some implementations of `io.Reader` when many calls
// to `read` have failed to return any data or error.
// This is usually a signed of a broken `io.Reader` implementation
No_Progress,
Invalid_Whence,
Invalid_Offset,
Invalid_Unread,
Negative_Read,
Negative_Write,
Negative_Count,
Buffer_Full,
// Empty is returned when a procedure has not been implemented for an io.Stream
Empty = -1,
}
Close_Proc :: proc(using s: Stream) -> Error;
Flush_Proc :: proc(using s: Stream) -> Error;
Seek_Proc :: proc(using s: Stream, offset: i64, whence: Seek_From) -> (n: i64, err: Error);
Size_Proc :: proc(using s: Stream) -> i64;
Read_Proc :: proc(using s: Stream, p: []byte) -> (n: int, err: Error);
Read_At_Proc :: proc(using s: Stream, p: []byte, off: i64) -> (n: int, err: Error);
Read_From_Proc :: proc(using s: Stream, r: Reader) -> (n: i64, err: Error);
Read_Byte_Proc :: proc(using s: Stream) -> (byte, Error);
Read_Rune_Proc :: proc(using s: Stream) -> (ch: rune, size: int, err: Error);
Unread_Byte_Proc :: proc(using s: Stream) -> Error;
Unread_Rune_Proc :: proc(using s: Stream) -> Error;
Write_Proc :: proc(using s: Stream, p: []byte) -> (n: int, err: Error);
Write_At_Proc :: proc(using s: Stream, p: []byte, off: i64) -> (n: int, err: Error);
Write_To_Proc :: proc(using s: Stream, w: Writer) -> (n: i64, err: Error);
Write_Byte_Proc :: proc(using s: Stream, c: byte) -> Error;
Write_Rune_Proc :: proc(using s: Stream, r: rune) -> (size: int, err: Error);
Destroy_Proc :: proc(using s: Stream) -> Error;
Stream :: struct {
using stream_vtable: ^Stream_VTable,
stream_data: rawptr,
}
Stream_VTable :: struct {
impl_close: Close_Proc,
impl_flush: Flush_Proc,
impl_seek: Seek_Proc,
impl_size: Size_Proc,
impl_read: Read_Proc,
impl_read_at: Read_At_Proc,
impl_read_byte: Read_Byte_Proc,
impl_read_rune: Read_Rune_Proc,
impl_write_to: Write_To_Proc,
impl_write: Write_Proc,
impl_write_at: Write_At_Proc,
impl_write_byte: Write_Byte_Proc,
impl_write_rune: Write_Rune_Proc,
impl_read_from: Read_From_Proc,
impl_unread_byte: Unread_Byte_Proc,
impl_unread_rune: Unread_Rune_Proc,
impl_destroy: Destroy_Proc,
}
Reader :: struct {using stream: Stream};
Writer :: struct {using stream: Stream};
Closer :: struct {using stream: Stream};
Flusher :: struct {using stream: Stream};
Seeker :: struct {using stream: Stream};
Read_Writer :: struct {using stream: Stream};
Read_Closer :: struct {using stream: Stream};
Read_Write_Closer :: struct {using stream: Stream};
Read_Write_Seeker :: struct {using stream: Stream};
Write_Closer :: struct {using stream: Stream};
Write_Seeker :: struct {using stream: Stream};
Write_Flusher :: struct {using stream: Stream};
Write_Flush_Closer :: struct {using stream: Stream};
Reader_At :: struct {using stream: Stream};
Writer_At :: struct {using stream: Stream};
Reader_From :: struct {using stream: Stream};
Writer_To :: struct {using stream: Stream};
Byte_Reader :: struct {using stream: Stream};
Byte_Scanner :: struct {using stream: Stream};
Byte_Writer :: struct {using stream: Stream};
Rune_Reader :: struct {using stream: Stream};
Rune_Scanner :: struct {using stream: Stream};
destroy :: proc(s: Stream) -> Error {
close_err := close({s});
if s.stream_vtable != nil && s.impl_destroy != nil {
return s->impl_destroy();
}
if close_err != .None {
return close_err;
}
return .Empty;
}
read :: proc(s: Reader, p: []byte) -> (n: int, err: Error) {
if s.stream_vtable != nil && s.impl_read != nil {
return s->impl_read(p);
}
return 0, .Empty;
}
write :: proc(s: Writer, p: []byte) -> (n: int, err: Error) {
if s.stream_vtable != nil && s.impl_write != nil {
return s->impl_write(p);
}
return 0, .Empty;
}
seek :: proc(s: Seeker, offset: i64, whence: Seek_From) -> (n: i64, err: Error) {
if s.stream_vtable != nil && s.impl_seek != nil {
return s->impl_seek(offset, whence);
}
return 0, .Empty;
}
close :: proc(s: Closer) -> Error {
if s.stream_vtable != nil && s.impl_close != nil {
return s->impl_close();
}
// Instead of .Empty, .None is fine in this case
return .None;
}
flush :: proc(s: Flusher) -> Error {
if s.stream_vtable != nil && s.impl_flush != nil {
return s->impl_flush();
}
// Instead of .Empty, .None is fine in this case
return .None;
}
size :: proc(s: Stream) -> i64 {
if s.stream_vtable == nil {
return 0;
}
if s.impl_size != nil {
return s->impl_size();
}
if s.impl_seek == nil {
return 0;
}
curr, end: i64;
err: Error;
if curr, err = s->impl_seek(0, .Current); err != nil {
return 0;
}
if end, err = s->impl_seek(0, .End); err != nil {
return 0;
}
if _, err = s->impl_seek(curr, .Start); err != nil {
return 0;
}
return end;
}
read_at :: proc(r: Reader_At, p: []byte, offset: i64) -> (n: int, err: Error) {
if r.stream_vtable == nil {
return 0, .Empty;
}
if r.impl_read_at != nil {
return r->impl_read_at(p, offset);
}
if r.impl_seek == nil || r.impl_read == nil {
return 0, .Empty;
}
current_offset: i64;
current_offset, err = r->impl_seek(offset, .Current);
if err != nil {
return 0, err;
}
n, err = r->impl_read(p);
if err != nil {
return;
}
_, err = r->impl_seek(current_offset, .Start);
return;
}
write_at :: proc(w: Writer_At, p: []byte, offset: i64) -> (n: int, err: Error) {
if w.stream_vtable == nil {
return 0, .Empty;
}
if w.impl_write_at != nil {
return w->impl_write_at(p, offset);
}
if w.impl_seek == nil || w.impl_write == nil {
return 0, .Empty;
}
current_offset: i64;
current_offset, err = w->impl_seek(offset, .Current);
if err != nil {
return 0, err;
}
defer w->impl_seek(current_offset, .Start);
return w->impl_write(p);
}
write_to :: proc(r: Writer_To, w: Writer) -> (n: i64, err: Error) {
if r.stream_vtable == nil || w.stream_vtable == nil {
return 0, .Empty;
}
if r.impl_write_to != nil {
return r->impl_write_to(w);
}
return 0, .Empty;
}
read_from :: proc(w: Reader_From, r: Reader) -> (n: i64, err: Error) {
if r.stream_vtable == nil || w.stream_vtable == nil {
return 0, .Empty;
}
if r.impl_read_from != nil {
return w->impl_read_from(r);
}
return 0, .Empty;
}
read_byte :: proc(r: Byte_Reader) -> (byte, Error) {
if r.stream_vtable == nil {
return 0, .Empty;
}
if r.impl_read_byte != nil {
return r->impl_read_byte();
}
if r.impl_read == nil {
return 0, .Empty;
}
b: [1]byte;
_, err := r->impl_read(b[:]);
return b[0], err;
}
write_byte :: proc{
write_byte_to_byte_writer,
write_byte_to_writer,
};
write_byte_to_byte_writer :: proc(w: Byte_Writer, c: byte) -> Error {
return _write_byte(auto_cast w, c);
}
write_byte_to_writer :: proc(w: Writer, c: byte) -> Error {
return _write_byte(auto_cast w, c);
}
@(private)
_write_byte :: proc(w: Byte_Writer, c: byte) -> Error {
if w.stream_vtable == nil {
return .Empty;
}
if w.impl_write_byte != nil {
return w->impl_write_byte(c);
}
if w.impl_write == nil {
return .Empty;
}
b := [1]byte{c};
_, err := w->impl_write(b[:]);
return err;
}
read_rune :: proc(br: Rune_Reader) -> (ch: rune, size: int, err: Error) {
if br.stream_vtable == nil {
return 0, 0, .Empty;
}
if br.impl_read_rune != nil {
return br->impl_read_rune();
}
if br.impl_read == nil {
return 0, 0, .Empty;
}
b: [utf8.UTF_MAX]byte;
_, err = br->impl_read(b[:1]);
s0 := b[0];
ch = rune(s0);
size = 1;
if err != nil {
return;
}
if ch < utf8.RUNE_SELF {
return;
}
x := utf8.accept_sizes[s0];
if x >= 0xf0 {
mask := rune(x) << 31 >> 31;
ch = ch &~ mask | utf8.RUNE_ERROR&mask;
return;
}
sz := int(x&7);
n: int;
n, err = br->impl_read(b[1:sz]);
if err != nil || n+1 < sz {
ch = utf8.RUNE_ERROR;
return;
}
ch, size = utf8.decode_rune(b[:sz]);
return;
}
unread_byte :: proc(s: Byte_Scanner) -> Error {
if s.stream_vtable != nil && s.impl_unread_byte != nil {
return s->impl_unread_byte();
}
return .Empty;
}
unread_rune :: proc(s: Rune_Scanner) -> Error {
if s.stream_vtable != nil && s.impl_unread_rune != nil {
return s->impl_unread_rune();
}
return .Empty;
}
write_string :: proc(s: Writer, str: string) -> (n: int, err: Error) {
return write(s, transmute([]byte)str);
}
write_rune :: proc(s: Writer, r: rune) -> (size: int, err: Error) {
if s.stream_vtable != nil && s.impl_write_rune != nil {
return s->impl_write_rune(r);
}
if r < utf8.RUNE_SELF {
err = write_byte(s, byte(r));
if err == nil {
size = 1;
}
return;
}
buf, w := utf8.encode_rune(r);
return write(s, buf[:w]);
}
read_full :: proc(r: Reader, buf: []byte) -> (n: int, err: Error) {
return read_at_least(r, buf, len(buf));
}
read_at_least :: proc(r: Reader, buf: []byte, min: int) -> (n: int, err: Error) {
if len(buf) < min {
return 0, .Short_Buffer;
}
for n < min && err == nil {
nn: int;
nn, err = read(r, buf[n:]);
n += n;
}
if n >= min {
err = nil;
} else if n > 0 && err == .EOF {
err = .Unexpected_EOF;
}
return;
}
// copy copies from src to dst till either EOF is reached on src or an error occurs
// It returns the number of bytes copied and the first error that occurred whilst copying, if any.
copy :: proc(dst: Writer, src: Reader) -> (written: i64, err: Error) {
return _copy_buffer(dst, src, nil);
}
// copy_buffer is the same as copy except that it stages through the provided buffer (if one is required)
// rather than allocating a temporary one on the stack through `intrinsics.alloca`
// If buf is `nil`, it is allocate through `intrinsics.alloca`; otherwise if it has zero length, it will panic
copy_buffer :: proc(dst: Writer, src: Reader, buf: []byte) -> (written: i64, err: Error) {
if buf != nil && len(buf) == 0 {
panic("empty buffer in io.copy_buffer");
}
return _copy_buffer(dst, src, buf);
}
// copy_n copies n bytes (or till an error) from src to dst.
// It returns the number of bytes copied and the first error that occurred whilst copying, if any.
// On return, written == n IFF err == nil
copy_n :: proc(dst: Writer, src: Reader, n: i64) -> (written: i64, err: Error) {
nsrc := inline_limited_reader(&Limited_Reader{}, src, n);
written, err = copy(dst, nsrc);
if written == n {
return n, nil;
}
if written < n && err == nil {
// src stopped early and must have been an EOF
err = .EOF;
}
return;
}
@(private)
_copy_buffer :: proc(dst: Writer, src: Reader, buf: []byte) -> (written: i64, err: Error) {
if dst.stream_vtable == nil || src.stream_vtable == nil {
return 0, .Empty;
}
if src.impl_write_to != nil {
return src->impl_write_to(dst);
}
if src.impl_read_from != nil {
return dst->impl_read_from(src);
}
buf := buf;
if buf == nil {
DEFAULT_SIZE :: 4 * 1024;
size := DEFAULT_SIZE;
if src.stream_vtable == _limited_reader_vtable {
l := (^Limited_Reader)(src.stream_data);
if i64(size) > l.n {
if l.n < 1 {
size = 1;
} else {
size = int(l.n);
}
}
}
// NOTE(bill): alloca is fine here
buf = transmute([]byte)runtime.Raw_Slice{intrinsics.alloca(size, 2*align_of(rawptr)), size};
}
for {
nr, er := read(src, buf);
if nr > 0 {
nw, ew := write(dst, buf[0:nr]);
if nw > 0 {
written += i64(nw);
}
if ew != nil {
err = ew;
break;
}
if nr != nw {
err = .Short_Write;
break;
}
}
if er != nil {
if er != .EOF {
err = er;
}
break;
}
}
return;
}

113
core/io/multi.odin Normal file
View File

@@ -0,0 +1,113 @@
package io
import "core:runtime"
@(private)
Multi_Reader :: struct {
readers: [dynamic]Reader,
}
@(private)
_multi_reader_vtable := &Stream_VTable{
impl_read = proc(s: Stream, p: []byte) -> (n: int, err: Error) {
mr := (^Multi_Reader)(s.stream_data);
for len(mr.readers) > 0 {
r := mr.readers[0];
n, err = read(r, p);
if err == .EOF {
ordered_remove(&mr.readers, 0);
}
if n > 0 || err != .EOF {
if err == .EOF && len(mr.readers) > 0 {
// Don't return EOF yet, more readers remain
err = nil;
}
return;
}
}
return 0, .EOF;
},
impl_destroy = proc(s: Stream) -> Error {
mr := (^Multi_Reader)(s.stream_data);
context.allocator = mr.readers.allocator;
delete(mr.readers);
free(mr);
return .None;
},
};
multi_reader :: proc(readers: ..Reader, allocator := context.allocator) -> (r: Reader) {
context.allocator = allocator;
mr := new(Multi_Reader);
all_readers := make([dynamic]Reader, 0, len(readers));
for w in readers {
if w.stream_vtable == _multi_reader_vtable {
other := (^Multi_Reader)(w.stream_data);
append(&all_readers, ..other.readers[:]);
} else {
append(&all_readers, w);
}
}
mr.readers = all_readers;
r.stream_vtable = _multi_reader_vtable;
r.stream_data = mr;
return;
}
@(private)
Multi_Writer :: struct {
writers: []Writer,
allocator: runtime.Allocator,
}
@(private)
_multi_writer_vtable := &Stream_VTable{
impl_write = proc(s: Stream, p: []byte) -> (n: int, err: Error) {
mw := (^Multi_Writer)(s.stream_data);
for w in mw.writers {
n, err = write(w, p);
if err != nil {
return;
}
if n != len(p) {
err = .Short_Write;
return;
}
}
return len(p), nil;
},
impl_destroy = proc(s: Stream) -> Error {
mw := (^Multi_Writer)(s.stream_data);
context.allocator = mw.allocator;
delete(mw.writers);
free(mw);
return .None;
},
};
multi_writer :: proc(writers: ..Writer, allocator := context.allocator) -> (out: Writer) {
context.allocator = allocator;
mw := new(Multi_Writer);
mw.allocator = allocator;
all_writers := make([dynamic]Writer, 0, len(writers));
for w in writers {
if w.stream_vtable == _multi_writer_vtable {
other := (^Multi_Writer)(w.stream_data);
append(&all_writers, ..other.writers);
} else {
append(&all_writers, w);
}
}
mw.writers = all_writers[:];
out.stream_vtable = _multi_writer_vtable;
out.stream_data = mw;
return;
}

192
core/io/util.odin Normal file
View File

@@ -0,0 +1,192 @@
package io
import "core:runtime"
import "core:strconv"
write_u64 :: proc(w: Writer, i: u64, base: int = 10) -> (n: int, err: Error) {
buf: [32]byte;
s := strconv.append_bits(buf[:], u64(i), base, false, 64, strconv.digits, nil);
return write_string(w, s);
}
write_i64 :: proc(w: Writer, i: i64, base: int = 10) -> (n: int, err: Error) {
buf: [32]byte;
s := strconv.append_bits(buf[:], u64(i), base, true, 64, strconv.digits, nil);
return write_string(w, s);
}
write_uint :: proc(w: Writer, i: uint, base: int = 10) -> (n: int, err: Error) {
return write_u64(w, u64(i), base);
}
write_int :: proc(w: Writer, i: int, base: int = 10) -> (n: int, err: Error) {
return write_i64(w, i64(i), base);
}
@(private)
Tee_Reader :: struct {
r: Reader,
w: Writer,
allocator: runtime.Allocator,
}
@(private)
_tee_reader_vtable := &Stream_VTable{
impl_read = proc(s: Stream, p: []byte) -> (n: int, err: Error) {
t := (^Tee_Reader)(s.stream_data);
n, err = read(t.r, p);
if n > 0 {
if wn, werr := write(t.w, p[:n]); werr != nil {
return wn, werr;
}
}
return;
},
impl_destroy = proc(s: Stream) -> Error {
t := (^Tee_Reader)(s.stream_data);
allocator := t.allocator;
free(t, allocator);
return .None;
},
};
// tee_reader returns a Reader that writes to 'w' what it reads from 'r'
// All reads from 'r' performed through it are matched with a corresponding write to 'w'
// There is no internal buffering done
// The write must complete before th read completes
// Any error encountered whilst writing is reported as a 'read' error
// tee_reader must call io.destroy when done with
tee_reader :: proc(r: Reader, w: Writer, allocator := context.allocator) -> (out: Reader) {
t := new(Tee_Reader, allocator);
t.r, t.w = r, w;
t.allocator = allocator;
out.stream_data = t;
out.stream_vtable = _tee_reader_vtable;
return;
}
// A Limited_Reader reads from r but limits the amount of data returned to just n bytes.
// Each call to read updates n to reflect the new amount remaining.
// read returns EOF when n <= 0 or when the underlying r returns EOF.
Limited_Reader :: struct {
r: Reader, // underlying reader
n: i64, // max_bytes
}
@(private)
_limited_reader_vtable := &Stream_VTable{
impl_read = proc(s: Stream, p: []byte) -> (n: int, err: Error) {
l := (^Limited_Reader)(s.stream_data);
if l.n <= 0 {
return 0, .EOF;
}
p := p;
if i64(len(p)) > l.n {
p = p[0:l.n];
}
n, err = read(l.r, p);
l.n -= i64(n);
return;
},
};
new_limited_reader :: proc(r: Reader, n: i64) -> ^Limited_Reader {
l := new(Limited_Reader);
l.r = r;
l.n = n;
return l;
}
limited_reader_to_reader :: proc(l: ^Limited_Reader) -> (r: Reader) {
r.stream_vtable = _limited_reader_vtable;
r.stream_data = l;
return;
}
@(private="package")
inline_limited_reader :: proc(l: ^Limited_Reader, r: Reader, n: i64) -> Reader {
l.r = r;
l.n = n;
return limited_reader_to_reader(l);
}
// Section_Reader implements read, seek, and read_at on a section of an underlying Reader_At
Section_Reader :: struct {
r: Reader_At,
base: i64,
off: i64,
limit: i64,
}
init_section_reader :: proc(s: ^Section_Reader, r: Reader_At, off: i64, n: i64) {
s.r = r;
s.off = off;
s.limit = off + n;
return;
}
section_reader_to_stream :: proc(s: ^Section_Reader) -> (out: Stream) {
out.stream_data = s;
out.stream_vtable = _section_reader_vtable;
return;
}
@(private)
_section_reader_vtable := &Stream_VTable{
impl_read = proc(stream: Stream, p: []byte) -> (n: int, err: Error) {
s := (^Section_Reader)(stream.stream_data);
if s.off >= s.limit {
return 0, .EOF;
}
p := p;
if max := s.limit - s.off; i64(len(p)) > max {
p = p[0:max];
}
n, err = read_at(s.r, p, s.off);
s.off += i64(n);
return;
},
impl_read_at = proc(stream: Stream, p: []byte, off: i64) -> (n: int, err: Error) {
s := (^Section_Reader)(stream.stream_data);
p, off := p, off;
if off < 0 || off >= s.limit - s.base {
return 0, .EOF;
}
off += s.base;
if max := s.limit - off; i64(len(p)) > max {
p = p[0:max];
n, err = read_at(s.r, p, off);
if err == nil {
err = .EOF;
}
return;
}
return read_at(s.r, p, off);
},
impl_seek = proc(stream: Stream, offset: i64, whence: Seek_From) -> (n: i64, err: Error) {
s := (^Section_Reader)(stream.stream_data);
offset := offset;
switch whence {
case:
return 0, .Invalid_Whence;
case .Start:
offset += s.base;
case .Current:
offset += s.off;
case .End:
offset += s.limit;
}
if offset < s.base {
return 0, .Invalid_Offset;
}
s.off = offset;
n = offset - s.base;
return;
},
impl_size = proc(stream: Stream) -> i64 {
s := (^Section_Reader)(stream.stream_data);
return s.limit - s.base;
},
};

View File

@@ -593,6 +593,30 @@ is_inf :: proc{is_inf_f32, is_inf_f64};
inf_f32 :: proc(sign: int) -> f32 {
return f32(inf_f64(sign));
}
inf_f64 :: proc(sign: int) -> f64 {
v: u64;
if sign >= 0 {
v = 0x7ff00000_00000000;
} else {
v = 0xfff00000_00000000;
}
return transmute(f64)v;
}
nan_f32 :: proc() -> f32 {
return f32(nan_f64());
}
nan_f64 :: proc() -> f64 {
v: u64 = 0x7ff80000_00000001;
return transmute(f64)v;
}
is_power_of_two :: proc(x: int) -> bool {
return x > 0 && (x & (x-1)) == 0;
}

View File

@@ -146,6 +146,7 @@ Paren_Expr :: struct {
Selector_Expr :: struct {
using node: Expr,
expr: ^Expr,
op: tokenizer.Token,
field: ^Ident,
}
@@ -154,6 +155,13 @@ Implicit_Selector_Expr :: struct {
field: ^Ident,
}
Selector_Call_Expr :: struct {
using node: Expr,
expr: ^Expr,
call: ^Call_Expr,
modified_call: bool,
}
Index_Expr :: struct {
using node: Expr,
expr: ^Expr,
@@ -206,9 +214,9 @@ Ternary_Expr :: struct {
Ternary_If_Expr :: struct {
using node: Expr,
x: ^Expr,
x: ^Expr,
op1: tokenizer.Token,
cond: ^Expr,
cond: ^Expr,
op2: tokenizer.Token,
y: ^Expr,
}
@@ -217,7 +225,7 @@ Ternary_When_Expr :: struct {
using node: Expr,
x: ^Expr,
op1: tokenizer.Token,
cond: ^Expr,
cond: ^Expr,
op2: tokenizer.Token,
y: ^Expr,
}
@@ -561,7 +569,6 @@ Distinct_Type :: struct {
Opaque_Type :: struct {
using node: Expr,
tok: tokenizer.Token_Kind,
type: ^Expr,
}

View File

@@ -190,6 +190,50 @@ peek_token_kind :: proc(p: ^Parser, kind: tokenizer.Token_Kind, lookahead := 0)
return;
}
peek_token :: proc(p: ^Parser, lookahead := 0) -> (tok: tokenizer.Token) {
prev_parser := p^;
defer p^ = prev_parser;
p.tok.err = nil;
for i := 0; i <= lookahead; i += 1 {
advance_token(p);
}
tok = p.curr_tok;
return;
}
skip_possible_newline :: proc(p: ^Parser) -> bool {
if .Insert_Semicolon not_in p.tok.flags {
return false;
}
prev := p.curr_tok;
if tokenizer.is_newline(prev) {
advance_token(p);
return true;
}
return false;
}
skip_possible_newline_for_literal :: proc(p: ^Parser) -> bool {
if .Insert_Semicolon not_in p.tok.flags {
return false;
}
curr_pos := p.curr_tok.pos;
if tokenizer.is_newline(p.curr_tok) {
next := peek_token(p);
if curr_pos.line+1 >= next.pos.line {
#partial switch next.kind {
case .Open_Brace, .Else, .Where:
advance_token(p);
return true;
}
}
}
return false;
}
next_token0 :: proc(p: ^Parser) -> bool {
p.curr_tok = tokenizer.scan(&p.tok);
@@ -280,7 +324,7 @@ expect_token :: proc(p: ^Parser, kind: tokenizer.Token_Kind) -> tokenizer.Token
prev := p.curr_tok;
if prev.kind != kind {
e := tokenizer.to_string(kind);
g := tokenizer.to_string(prev.kind);
g := tokenizer.token_to_string(prev);
error(p, prev.pos, "expected '%s', got '%s'", e, g);
}
advance_token(p);
@@ -291,7 +335,7 @@ expect_token_after :: proc(p: ^Parser, kind: tokenizer.Token_Kind, msg: string)
prev := p.curr_tok;
if prev.kind != kind {
e := tokenizer.to_string(kind);
g := tokenizer.to_string(prev.kind);
g := tokenizer.token_to_string(prev);
error(p, prev.pos, "expected '%s' after %s, got '%s'", e, msg, g);
}
advance_token(p);
@@ -300,8 +344,10 @@ expect_token_after :: proc(p: ^Parser, kind: tokenizer.Token_Kind, msg: string)
expect_operator :: proc(p: ^Parser) -> tokenizer.Token {
prev := p.curr_tok;
if !tokenizer.is_operator(prev.kind) {
g := tokenizer.to_string(prev.kind);
if prev.kind == .If || prev.kind == .When {
// okay
} else if !tokenizer.is_operator(prev.kind) {
g := tokenizer.token_to_string(prev);
error(p, prev.pos, "expected an operator, got '%s'", g);
}
advance_token(p);
@@ -398,7 +444,16 @@ expect_semicolon :: proc(p: ^Parser, node: ^ast.Node) -> bool {
}
if node != nil {
if prev.pos.line != p.curr_tok.pos.line {
if .Insert_Semicolon in p.tok.flags {
#partial switch p.curr_tok.kind {
case .Close_Brace, .Close_Paren, .Else, .EOF:
return true;
}
if is_semicolon_optional_for_node(p, node) {
return true;
}
} else if prev.pos.line != p.curr_tok.pos.line {
if is_semicolon_optional_for_node(p, node) {
return true;
}
@@ -418,7 +473,7 @@ expect_semicolon :: proc(p: ^Parser, node: ^ast.Node) -> bool {
}
}
error(p, prev.pos, "expected ';', got %s", tokenizer.to_string(p.curr_tok.kind));
error(p, prev.pos, "expected ';', got %s", tokenizer.token_to_string(p.curr_tok));
return false;
}
@@ -491,6 +546,7 @@ parse_when_stmt :: proc(p: ^Parser) -> ^ast.When_Stmt {
body = convert_stmt_to_body(p, parse_stmt(p));
} else {
body = parse_block_stmt(p, true);
skip_possible_newline_for_literal(p);
}
if allow_token(p, .Else) {
@@ -566,6 +622,7 @@ parse_if_stmt :: proc(p: ^Parser) -> ^ast.If_Stmt {
body = convert_stmt_to_body(p, parse_stmt(p));
} else {
body = parse_block_stmt(p, false);
skip_possible_newline_for_literal(p);
}
if allow_token(p, .Else) {
@@ -627,6 +684,7 @@ parse_for_stmt :: proc(p: ^Parser) -> ^ast.Stmt {
body = convert_stmt_to_body(p, parse_stmt(p));
} else {
body = parse_body(p);
skip_possible_newline_for_literal(p);
}
range_stmt := ast.new(ast.Range_Stmt, tok.pos, body.end);
@@ -661,6 +719,7 @@ parse_for_stmt :: proc(p: ^Parser) -> ^ast.Stmt {
body = convert_stmt_to_body(p, parse_stmt(p));
} else {
body = parse_body(p);
skip_possible_newline_for_literal(p);
}
@@ -838,6 +897,8 @@ parse_attribute :: proc(p: ^Parser, tok: tokenizer.Token, open_kind, close_kind:
attribute.elems = elems[:];
attribute.close = close.pos;
skip_possible_newline(p);
decl := parse_stmt(p);
switch d in &decl.derived {
case ast.Value_Decl:
@@ -1026,10 +1087,11 @@ parse_stmt :: proc(p: ^Parser) -> ^ast.Stmt {
body = convert_stmt_to_body(p, parse_stmt(p));
} else {
body = parse_block_stmt(p, false);
skip_possible_newline_for_literal(p);
}
if bad_stmt {
return ast.new(ast.Bad_Stmt, inline_tok.pos, end_pos(p.prev_tok));
return ast.new(ast.Bad_Stmt, inline_tok.pos, end_pos(p.prev_tok));
}
range_stmt := ast.new(ast.Inline_Range_Stmt, inline_tok.pos, body.end);
@@ -1204,7 +1266,7 @@ parse_stmt :: proc(p: ^Parser) -> ^ast.Stmt {
}
tok := advance_token(p);
error(p, tok.pos, "expected a statement, got %s", tokenizer.to_string(tok.kind));
error(p, tok.pos, "expected a statement, got %s", tokenizer.token_to_string(tok));
s := ast.new(ast.Bad_Stmt, tok.pos, end_pos(tok));
return s;
}
@@ -1957,13 +2019,6 @@ parse_operand :: proc(p: ^Parser, lhs: bool) -> ^ast.Expr {
bl.tok = tok;
return bl;
case .Size_Of, .Align_Of, .Offset_Of:
tok := advance_token(p);
expr := ast.new(ast.Implicit, tok.pos, end_pos(tok));
expr.tok = tok;
return parse_call_expr(p, expr);
case .Open_Brace:
if !lhs {
return parse_literal_value(p, nil);
@@ -1992,15 +2047,22 @@ parse_operand :: proc(p: ^Parser, lhs: bool) -> ^ast.Expr {
case .Opaque:
tok := advance_token(p);
warn(p, tok.pos, "opaque is deprecated in favour of #opaque");
type := parse_type(p);
ot := ast.new(ast.Opaque_Type, tok.pos, type.end);
ot.tok = tok.kind;
ot.type = type;
return ot;
case .Hash:
tok := expect_token(p, .Hash);
name := expect_token(p, .Ident);
switch name.text {
case "opaque":
type := parse_type(p);
ot := ast.new(ast.Opaque_Type, tok.pos, type.end);
ot.type = type;
return ot;
case "type":
type := parse_type(p);
hp := ast.new(ast.Helper_Type, tok.pos, type.end);
@@ -2156,7 +2218,10 @@ parse_operand :: proc(p: ^Parser, lhs: bool) -> ^ast.Expr {
where_token: tokenizer.Token;
where_clauses: []^ast.Expr;
if (p.curr_tok.kind == .Where) {
skip_possible_newline_for_literal(p);
if p.curr_tok.kind == .Where {
where_token = expect_token(p, .Where);
prev_level := p.expr_level;
p.expr_level = -1;
@@ -2225,25 +2290,6 @@ parse_operand :: proc(p: ^Parser, lhs: bool) -> ^ast.Expr {
ti.specialization = nil;
return ti;
case .Type_Of:
tok := advance_token(p);
i := ast.new(ast.Implicit, tok.pos, end_pos(tok));
i.tok = tok;
type: ^ast.Expr = parse_call_expr(p, i);
for p.curr_tok.kind == .Period {
period := advance_token(p);
field := parse_ident(p);
sel := ast.new(ast.Selector_Expr, period.pos, field.end);
sel.expr = type;
sel.field = field;
type = sel;
}
return type;
case .Pointer:
tok := expect_token(p, .Pointer);
elem := parse_type(p);
@@ -2351,12 +2397,15 @@ parse_operand :: proc(p: ^Parser, lhs: bool) -> ^ast.Expr {
where_token: tokenizer.Token;
where_clauses: []^ast.Expr;
if (p.curr_tok.kind == .Where) {
skip_possible_newline_for_literal(p);
if p.curr_tok.kind == .Where {
where_token = expect_token(p, .Where);
prev_level := p.expr_level;
where_prev_level := p.expr_level;
p.expr_level = -1;
where_clauses = parse_rhs_expr_list(p);
p.expr_level = prev_level;
p.expr_level = where_prev_level;
}
expect_token(p, .Open_Brace);
@@ -2414,12 +2463,15 @@ parse_operand :: proc(p: ^Parser, lhs: bool) -> ^ast.Expr {
where_token: tokenizer.Token;
where_clauses: []^ast.Expr;
if (p.curr_tok.kind == .Where) {
skip_possible_newline_for_literal(p);
if p.curr_tok.kind == .Where {
where_token = expect_token(p, .Where);
prev_level := p.expr_level;
where_prev_level := p.expr_level;
p.expr_level = -1;
where_clauses = parse_rhs_expr_list(p);
p.expr_level = prev_level;
p.expr_level = where_prev_level;
}
variants: [dynamic]^ast.Expr;
@@ -2628,7 +2680,7 @@ parse_literal_value :: proc(p: ^Parser, type: ^ast.Expr) -> ^ast.Comp_Lit {
return lit;
}
parse_call_expr :: proc(p: ^Parser, operand: ^ast.Expr) -> ^ast.Call_Expr {
parse_call_expr :: proc(p: ^Parser, operand: ^ast.Expr) -> ^ast.Expr {
args: [dynamic]^ast.Expr;
ellipsis: tokenizer.Token;
@@ -2686,6 +2738,14 @@ parse_call_expr :: proc(p: ^Parser, operand: ^ast.Expr) -> ^ast.Call_Expr {
ce.ellipsis = ellipsis;
ce.close = close.pos;
o := ast.unparen_expr(operand);
if se, ok := o.derived.(ast.Selector_Expr); ok && se.op.kind == .Arrow_Right {
sce := ast.new(ast.Selector_Call_Expr, ce.pos, ce.end);
sce.expr = o;
sce.call = ce;
return sce;
}
return ce;
}
@@ -2739,7 +2799,7 @@ parse_atom_expr :: proc(p: ^Parser, value: ^ast.Expr, lhs: bool) -> (operand: ^a
case .Colon:
interval = advance_token(p);
is_slice_op = true;
if (p.curr_tok.kind != .Close_Bracket && p.curr_tok.kind != .EOF) {
if p.curr_tok.kind != .Close_Bracket && p.curr_tok.kind != .EOF {
indicies[1] = parse_expr(p, false);
}
}
@@ -2776,6 +2836,7 @@ parse_atom_expr :: proc(p: ^Parser, value: ^ast.Expr, lhs: bool) -> (operand: ^a
sel := ast.new(ast.Selector_Expr, operand.pos, field.end);
sel.expr = operand;
sel.op = tok;
sel.field = field;
operand = sel;
@@ -2811,6 +2872,24 @@ parse_atom_expr :: proc(p: ^Parser, value: ^ast.Expr, lhs: bool) -> (operand: ^a
operand = ast.new(ast.Bad_Expr, operand.pos, end_pos(tok));
}
case .Arrow_Right:
tok := expect_token(p, .Arrow_Right);
#partial switch p.curr_tok.kind {
case .Ident:
field := parse_ident(p);
sel := ast.new(ast.Selector_Expr, operand.pos, field.end);
sel.expr = operand;
sel.op = tok;
sel.field = field;
operand = sel;
case:
error(p, p.curr_tok.pos, "expected a selector");
advance_token(p);
operand = ast.new(ast.Bad_Expr, operand.pos, end_pos(tok));
}
case .Pointer:
op := expect_token(p, .Pointer);
deref := ast.new(ast.Deref_Expr, operand.pos, end_pos(op));

View File

@@ -133,7 +133,6 @@ Token_Kind :: enum u32 {
Defer,
Return,
Proc,
Macro,
Struct,
Union,
Enum,
@@ -150,11 +149,6 @@ Token_Kind :: enum u32 {
Inline,
No_Inline,
Context,
Size_Of,
Align_Of,
Offset_Of,
Type_Of,
Const,
B_Keyword_End,
COUNT,
@@ -268,7 +262,6 @@ tokens := [Token_Kind.COUNT]string {
"defer",
"return",
"proc",
"macro",
"struct",
"union",
"enum",
@@ -285,16 +278,24 @@ tokens := [Token_Kind.COUNT]string {
"inline",
"no_inline",
"context",
"size_of",
"align_of",
"offset_of",
"type_of",
"const",
"",
};
custom_keyword_tokens: []string;
is_newline :: proc(tok: Token) -> bool {
return tok.kind == .Semicolon && tok.text == "\n";
}
token_to_string :: proc(tok: Token) -> string {
if is_newline(tok) {
return "newline";
}
return to_string(tok.kind);
}
to_string :: proc(kind: Token_Kind) -> string {
if Token_Kind.Invalid <= kind && kind < Token_Kind.COUNT {
return tokens[kind];

View File

@@ -1,22 +1,31 @@
package odin_tokenizer
import "core:fmt"
import "core:unicode"
import "core:unicode/utf8"
Error_Handler :: #type proc(pos: Pos, fmt: string, args: ..any);
Flag :: enum {
Insert_Semicolon,
}
Flags :: distinct bit_set[Flag; u32];
Tokenizer :: struct {
// Immutable data
path: string,
src: []byte,
err: Error_Handler,
flags: Flags,
// Tokenizing state
ch: rune,
offset: int,
read_offset: int,
line_offset: int,
line_count: int,
insert_semicolon: bool,
// Mutable data
error_count: int,
@@ -105,11 +114,18 @@ peek_byte :: proc(t: ^Tokenizer, offset := 0) -> byte {
}
skip_whitespace :: proc(t: ^Tokenizer) {
for t.ch == ' ' ||
t.ch == '\t' ||
t.ch == '\n' ||
t.ch == '\r' {
advance_rune(t);
for {
switch t.ch {
case ' ', '\t', '\r':
advance_rune(t);
case '\n':
if t.insert_semicolon {
return;
}
advance_rune(t);
case:
return;
}
}
}
@@ -122,12 +138,13 @@ is_letter :: proc(r: rune) -> bool {
return true;
}
}
// TODO(bill): Add unicode lookup tables
return false;
return unicode.is_letter(r);
}
is_digit :: proc(r: rune) -> bool {
// TODO(bill): Add unicode lookup tables
return '0' <= r && r <= '9';
if '0' <= r && r <= '9' {
return true;
}
return unicode.is_digit(r);
}
@@ -491,6 +508,8 @@ scan :: proc(t: ^Tokenizer) -> Token {
lit: string;
pos := offset_to_pos(t, offset);
insert_semicolon := false;
switch ch := t.ch; true {
case is_letter(ch):
lit = scan_identifier(t);
@@ -509,24 +528,39 @@ scan :: proc(t: ^Tokenizer) -> Token {
break check_keyword;
}
}
if kind == .Ident && lit == "notin" {
kind = .Not_In;
#partial switch kind {
case .Ident, .Context, .Typeid, .Break, .Continue, .Fallthrough, .Return:
insert_semicolon = true;
}
}
case '0' <= ch && ch <= '9':
insert_semicolon = true;
kind, lit = scan_number(t, false);
case:
advance_rune(t);
switch ch {
case -1:
kind = .EOF;
if t.insert_semicolon {
t.insert_semicolon = false;
kind = .Semicolon;
lit = "\n";
}
case '\n':
t.insert_semicolon = false;
kind = .Semicolon;
lit = "\n";
case '"':
insert_semicolon = true;
kind = .String;
lit = scan_string(t);
case '\'':
insert_semicolon = true;
kind = .Rune;
lit = scan_rune(t);
case '`':
insert_semicolon = true;
kind = .String;
lit = scan_raw_string(t);
case '=':
@@ -540,10 +574,13 @@ scan :: proc(t: ^Tokenizer) -> Token {
case '#':
kind = .Hash;
if t.ch == '!' {
insert_semicolon = t.insert_semicolon;
kind = .Comment;
lit = scan_comment(t);
}
case '?': kind = .Question;
case '?':
insert_semicolon = true;
kind = .Question;
case '@': kind = .At;
case '$': kind = .Dollar;
case '^': kind = .Pointer;
@@ -562,6 +599,7 @@ scan :: proc(t: ^Tokenizer) -> Token {
case '*': kind = switch2(t, .Mul, .Mul_Eq);
case '/':
if t.ch == '/' || t.ch == '*' {
insert_semicolon = t.insert_semicolon;
kind = .Comment;
lit = scan_comment(t);
} else {
@@ -604,11 +642,17 @@ scan :: proc(t: ^Tokenizer) -> Token {
case ',': kind = .Comma;
case ';': kind = .Semicolon;
case '(': kind = .Open_Paren;
case ')': kind = .Close_Paren;
case ')':
insert_semicolon = true;
kind = .Close_Paren;
case '[': kind = .Open_Bracket;
case ']': kind = .Close_Bracket;
case ']':
insert_semicolon = true;
kind = .Close_Bracket;
case '{': kind = .Open_Brace;
case '}': kind = .Close_Brace;
case '}':
insert_semicolon = true;
kind = .Close_Brace;
case '\\': kind = .Back_Slash;
@@ -616,10 +660,15 @@ scan :: proc(t: ^Tokenizer) -> Token {
if ch != utf8.RUNE_BOM {
error(t, t.offset, "illegal character '%r': %d", ch, ch);
}
insert_semicolon = t.insert_semicolon; // preserve insert_semicolon info
kind = .Invalid;
}
}
if .Insert_Semicolon in t.flags {
t.insert_semicolon = insert_semicolon;
}
if lit == "" {
lit = string(t.src[offset : t.offset]);
}

69
core/os/stream.odin Normal file
View File

@@ -0,0 +1,69 @@
package os
import "core:io"
stream_from_handle :: proc(fd: Handle) -> io.Stream {
s: io.Stream;
s.stream_data = rawptr(uintptr(fd));
s.stream_vtable = _file_stream_vtable;
return s;
}
@(private)
_file_stream_vtable := &io.Stream_VTable{
impl_read = proc(s: io.Stream, p: []byte) -> (n: int, err: io.Error) {
fd := Handle(uintptr(s.stream_data));
os_err: Errno;
n, os_err = read(fd, p);
return;
},
impl_read_at = proc(s: io.Stream, p: []byte, offset: i64) -> (n: int, err: io.Error) {
when ODIN_OS == "windows" {
fd := Handle(uintptr(s.stream_data));
os_err: Errno;
n, os_err = read_at(fd, p, offset);
}
return;
},
impl_write = proc(s: io.Stream, p: []byte) -> (n: int, err: io.Error) {
fd := Handle(uintptr(s.stream_data));
os_err: Errno;
n, os_err = write(fd, p);
return;
},
impl_write_at = proc(s: io.Stream, p: []byte, offset: i64) -> (n: int, err: io.Error) {
when ODIN_OS == "windows" {
fd := Handle(uintptr(s.stream_data));
os_err: Errno;
n, os_err = write_at(fd, p, offset);
_ = os_err;
}
return;
},
impl_seek = proc(s: io.Stream, offset: i64, whence: io.Seek_From) -> (i64, io.Error) {
fd := Handle(uintptr(s.stream_data));
n, os_err := seek(fd, offset, int(whence));
_ = os_err;
return n, nil;
},
impl_size = proc(s: io.Stream) -> i64 {
fd := Handle(uintptr(s.stream_data));
sz, _ := file_size(fd);
return sz;
},
impl_flush = proc(s: io.Stream) -> io.Error {
when ODIN_OS == "windows" {
fd := Handle(uintptr(s.stream_data));
flush(fd);
} else {
// TOOD(bill): other operating systems
}
return nil;
},
impl_close = proc(s: io.Stream) -> io.Error {
fd := Handle(uintptr(s.stream_data));
close(fd);
return nil;
},
};

View File

@@ -8,7 +8,8 @@ SEPARATOR :: '\\';
SEPARATOR_STRING :: `\`;
LIST_SEPARATOR :: ';';
reserved_names := []string{
@(private)
reserved_names := [?]string{
"CON", "PRN", "AUX", "NUL",
"COM1", "COM2", "COM3", "COM4", "COM5", "COM6", "COM7", "COM8", "COM9",
"LPT1", "LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8", "LPT9",

View File

@@ -28,8 +28,6 @@ Match_Error :: enum {
// match requires that the pattern matches the entirety of the name, not just a substring
// The only possible error returned is .Syntax_Error
//
// NOTE(bill): This is effectively the shell pattern matching system found
//
match :: proc(pattern, name: string) -> (matched: bool, err: Match_Error) {
pattern, name := pattern, name;
pattern_loop: for len(pattern) > 0 {

View File

@@ -1206,3 +1206,82 @@ as_raw_data :: proc(a: any) -> (value: rawptr, valid: bool) {
return;
}
/*
not_equal :: proc(a, b: any) -> bool {
return !equal(a, b);
}
equal :: proc(a, b: any) -> bool {
if a == nil && b == nil {
return true;
}
if a.id != b.id {
return false;
}
if a.data == b.data {
return true;
}
t := type_info_of(a.id);
if .Comparable not_in t.flags {
return false;
}
if t.size == 0 {
return true;
}
if .Simple_Compare in t.flags {
return mem.compare_byte_ptrs((^byte)(a.data), (^byte)(b.data), t.size) == 0;
}
t = runtime.type_info_core(t);
#partial switch v in t.variant {
case Type_Info_String:
if v.is_cstring {
x := string((^cstring)(a.data)^);
y := string((^cstring)(b.data)^);
return x == y;
} else {
x := (^string)(a.data)^;
y := (^string)(b.data)^;
return x == y;
}
case Type_Info_Array:
for i in 0..<v.count {
x := rawptr(uintptr(a.data) + uintptr(v.elem_size*i));
y := rawptr(uintptr(b.data) + uintptr(v.elem_size*i));
if !equal(any{x, v.elem.id}, any{y, v.elem.id}) {
return false;
}
}
case Type_Info_Enumerated_Array:
for i in 0..<v.count {
x := rawptr(uintptr(a.data) + uintptr(v.elem_size*i));
y := rawptr(uintptr(b.data) + uintptr(v.elem_size*i));
if !equal(any{x, v.elem.id}, any{y, v.elem.id}) {
return false;
}
}
case Type_Info_Struct:
if v.equal != nil {
return v.equal(a.data, b.data);
} else {
for offset, i in v.offsets {
x := rawptr(uintptr(a.data) + offset);
y := rawptr(uintptr(b.data) + offset);
id := v.types[i].id;
if !equal(any{x, id}, any{y, id}) {
return false;
}
}
}
}
return true;
}
*/

View File

@@ -1,5 +1,6 @@
package reflect
import "core:io"
import "core:strings"
are_types_identical :: proc(a, b: ^Type_Info) -> bool {
@@ -218,6 +219,14 @@ is_unsigned :: proc(info: ^Type_Info) -> bool {
return false;
}
is_byte :: proc(info: ^Type_Info) -> bool {
if info == nil { return false; }
#partial switch i in type_info_base(info).variant {
case Type_Info_Integer: return info.size == 1;
}
return false;
}
is_integer :: proc(info: ^Type_Info) -> bool {
if info == nil { return false; }
@@ -352,258 +361,278 @@ is_relative_slice :: proc(info: ^Type_Info) -> bool {
write_typeid :: proc(buf: ^strings.Builder, id: typeid) {
write_typeid_builder :: proc(buf: ^strings.Builder, id: typeid) {
write_type(buf, type_info_of(id));
}
write_typeid_writer :: proc(writer: io.Writer, id: typeid) {
write_type(writer, type_info_of(id));
}
write_type :: proc(buf: ^strings.Builder, ti: ^Type_Info) {
write_typeid :: proc{
write_typeid_builder,
write_typeid_writer,
};
write_type :: proc{
write_type_builder,
write_type_writer,
};
write_type_builder :: proc(buf: ^strings.Builder, ti: ^Type_Info) -> int {
return write_type_writer(strings.to_writer(buf), ti);
}
write_type_writer :: proc(w: io.Writer, ti: ^Type_Info) -> (n: int) {
using strings;
if ti == nil {
write_string(buf, "nil");
return;
return write_string(w, "nil");
}
_n1 :: proc(err: io.Error) -> int { return 1 if err == nil else 0; };
_n2 :: proc(n: int, _: io.Error) -> int { return n; };
_n :: proc{_n1, _n2};
switch info in ti.variant {
case Type_Info_Named:
write_string(buf, info.name);
return write_string(w, info.name);
case Type_Info_Integer:
switch ti.id {
case int: write_string(buf, "int");
case uint: write_string(buf, "uint");
case uintptr: write_string(buf, "uintptr");
case int: return write_string(w, "int");
case uint: return write_string(w, "uint");
case uintptr: return write_string(w, "uintptr");
case:
write_byte(buf, 'i' if info.signed else 'u');
write_i64(buf, i64(8*ti.size), 10);
n += _n(io.write_byte(w, 'i' if info.signed else 'u'));
n += _n(io.write_i64(w, i64(8*ti.size), 10));
switch info.endianness {
case .Platform: // Okay
case .Little: write_string(buf, "le");
case .Big: write_string(buf, "be");
case .Little: n += write_string(w, "le");
case .Big: n += write_string(w, "be");
}
}
case Type_Info_Rune:
write_string(buf, "rune");
n += _n(io.write_string(w, "rune"));
case Type_Info_Float:
write_byte(buf, 'f');
write_i64(buf, i64(8*ti.size), 10);
n += _n(io.write_byte(w, 'f'));
n += _n(io.write_i64(w, i64(8*ti.size), 10));
switch info.endianness {
case .Platform: // Okay
case .Little: write_string(buf, "le");
case .Big: write_string(buf, "be");
case .Little: n += write_string(w, "le");
case .Big: n += write_string(w, "be");
}
case Type_Info_Complex:
write_string(buf, "complex");
write_i64(buf, i64(8*ti.size), 10);
n += _n(io.write_string(w, "complex"));
n += _n(io.write_i64(w, i64(8*ti.size), 10));
case Type_Info_Quaternion:
write_string(buf, "quaternion");
write_i64(buf, i64(8*ti.size), 10);
n += _n(io.write_string(w, "quaternion"));
n += _n(io.write_i64(w, i64(8*ti.size), 10));
case Type_Info_String:
if info.is_cstring {
write_string(buf, "cstring");
n += write_string(w, "cstring");
} else {
write_string(buf, "string");
n += write_string(w, "string");
}
case Type_Info_Boolean:
switch ti.id {
case bool: write_string(buf, "bool");
case bool: n += write_string(w, "bool");
case:
write_byte(buf, 'b');
write_i64(buf, i64(8*ti.size), 10);
n += _n(io.write_byte(w, 'b'));
n += _n(io.write_i64(w, i64(8*ti.size), 10));
}
case Type_Info_Any:
write_string(buf, "any");
n += write_string(w, "any");
case Type_Info_Type_Id:
write_string(buf, "typeid");
n += write_string(w, "typeid");
case Type_Info_Pointer:
if info.elem == nil {
write_string(buf, "rawptr");
write_string(w, "rawptr");
} else {
write_string(buf, "^");
write_type(buf, info.elem);
write_string(w, "^");
write_type(w, info.elem);
}
case Type_Info_Procedure:
write_string(buf, "proc");
n += write_string(w, "proc");
if info.params == nil {
write_string(buf, "()");
n += write_string(w, "()");
} else {
t := info.params.variant.(Type_Info_Tuple);
write_string(buf, "(");
n += write_string(w, "(");
for t, i in t.types {
if i > 0 {
write_string(buf, ", ");
n += write_string(w, ", ");
}
write_type(buf, t);
n += write_type(w, t);
}
write_string(buf, ")");
n += write_string(w, ")");
}
if info.results != nil {
write_string(buf, " -> ");
write_type(buf, info.results);
n += write_string(w, " -> ");
n += write_type(w, info.results);
}
case Type_Info_Tuple:
count := len(info.names);
if count != 1 { write_string(buf, "("); }
if count != 1 { n += write_string(w, "("); }
for name, i in info.names {
if i > 0 { write_string(buf, ", "); }
if i > 0 { n += write_string(w, ", "); }
t := info.types[i];
if len(name) > 0 {
write_string(buf, name);
write_string(buf, ": ");
n += write_string(w, name);
n += write_string(w, ": ");
}
write_type(buf, t);
n += write_type(w, t);
}
if count != 1 { write_string(buf, ")"); }
if count != 1 { n += write_string(w, ")"); }
case Type_Info_Array:
write_string(buf, "[");
write_i64(buf, i64(info.count), 10);
write_string(buf, "]");
write_type(buf, info.elem);
n += _n(io.write_string(w, "["));
n += _n(io.write_i64(w, i64(info.count), 10));
n += _n(io.write_string(w, "]"));
n += write_type(w, info.elem);
case Type_Info_Enumerated_Array:
write_string(buf, "[");
write_type(buf, info.index);
write_string(buf, "]");
write_type(buf, info.elem);
n += write_string(w, "[");
n += write_type(w, info.index);
n += write_string(w, "]");
n += write_type(w, info.elem);
case Type_Info_Dynamic_Array:
write_string(buf, "[dynamic]");
write_type(buf, info.elem);
n += _n(io.write_string(w, "[dynamic]"));
n += write_type(w, info.elem);
case Type_Info_Slice:
write_string(buf, "[]");
write_type(buf, info.elem);
n += _n(io.write_string(w, "[]"));
n += write_type(w, info.elem);
case Type_Info_Map:
write_string(buf, "map[");
write_type(buf, info.key);
write_byte(buf, ']');
write_type(buf, info.value);
n += _n(io.write_string(w, "map["));
n += write_type(w, info.key);
n += _n(io.write_byte(w, ']'));
n += write_type(w, info.value);
case Type_Info_Struct:
switch info.soa_kind {
case .None: // Ignore
case .Fixed:
write_string(buf, "#soa[");
write_i64(buf, i64(info.soa_len));
write_byte(buf, ']');
write_type(buf, info.soa_base_type);
n += _n(io.write_string(w, "#soa["));
n += _n(io.write_i64(w, i64(info.soa_len)));
n += _n(io.write_byte(w, ']'));
n += write_type(w, info.soa_base_type);
return;
case .Slice:
write_string(buf, "#soa[]");
write_type(buf, info.soa_base_type);
n += _n(io.write_string(w, "#soa[]"));
n += write_type(w, info.soa_base_type);
return;
case .Dynamic:
write_string(buf, "#soa[dynamic]");
write_type(buf, info.soa_base_type);
n += _n(io.write_string(w, "#soa[dynamic]"));
n += write_type(w, info.soa_base_type);
return;
}
write_string(buf, "struct ");
if info.is_packed { write_string(buf, "#packed "); }
if info.is_raw_union { write_string(buf, "#raw_union "); }
n += write_string(w, "struct ");
if info.is_packed { n += write_string(w, "#packed "); }
if info.is_raw_union { n += write_string(w, "#raw_union "); }
if info.custom_align {
write_string(buf, "#align ");
write_i64(buf, i64(ti.align), 10);
write_byte(buf, ' ');
n += _n(io.write_string(w, "#align "));
n += _n(io.write_i64(w, i64(ti.align), 10));
n += _n(io.write_byte(w, ' '));
}
write_byte(buf, '{');
n += _n(io.write_byte(w, '{'));
for name, i in info.names {
if i > 0 { write_string(buf, ", "); }
write_string(buf, name);
write_string(buf, ": ");
write_type(buf, info.types[i]);
if i > 0 { n += write_string(w, ", "); }
n += _n(io.write_string(w, name));
n += _n(io.write_string(w, ": "));
n += write_type(w, info.types[i]);
}
write_byte(buf, '}');
n += _n(io.write_byte(w, '}'));
case Type_Info_Union:
write_string(buf, "union ");
n += write_string(w, "union ");
if info.custom_align {
write_string(buf, "#align ");
write_i64(buf, i64(ti.align), 10);
write_byte(buf, ' ');
n += write_string(w, "#align ");
n += _n(io.write_i64(w, i64(ti.align), 10));
n += _n(io.write_byte(w, ' '));
}
write_byte(buf, '{');
n += _n(io.write_byte(w, '{'));
for variant, i in info.variants {
if i > 0 { write_string(buf, ", "); }
write_type(buf, variant);
if i > 0 { n += write_string(w, ", "); }
n += write_type(w, variant);
}
write_byte(buf, '}');
n += _n(io.write_byte(w, '}'));
case Type_Info_Enum:
write_string(buf, "enum ");
write_type(buf, info.base);
write_string(buf, " {");
n += write_string(w, "enum ");
n += write_type(w, info.base);
n += write_string(w, " {");
for name, i in info.names {
if i > 0 { write_string(buf, ", "); }
write_string(buf, name);
if i > 0 { n += write_string(w, ", "); }
n += write_string(w, name);
}
write_byte(buf, '}');
n += _n(io.write_byte(w, '}'));
case Type_Info_Bit_Field:
write_string(buf, "bit_field ");
n += write_string(w, "bit_field ");
if ti.align != 1 {
write_string(buf, "#align ");
write_i64(buf, i64(ti.align), 10);
write_byte(buf, ' ');
n += write_string(w, "#align ");
n += _n(io.write_i64(w, i64(ti.align), 10));
n += _n(io.write_byte(w, ' '));
}
write_string(buf, " {");
n += write_string(w, " {");
for name, i in info.names {
if i > 0 { write_string(buf, ", "); }
write_string(buf, name);
write_string(buf, ": ");
write_i64(buf, i64(info.bits[i]), 10);
if i > 0 { n += write_string(w, ", "); }
n += write_string(w, name);
n += write_string(w, ": ");
n += _n(io.write_i64(w, i64(info.bits[i]), 10));
}
write_byte(buf, '}');
n += _n(io.write_byte(w, '}'));
case Type_Info_Bit_Set:
write_string(buf, "bit_set[");
n += write_string(w, "bit_set[");
switch {
case is_enum(info.elem):
write_type(buf, info.elem);
n += write_type(w, info.elem);
case is_rune(info.elem):
write_encoded_rune(buf, rune(info.lower));
write_string(buf, "..");
write_encoded_rune(buf, rune(info.upper));
n += write_encoded_rune(w, rune(info.lower));
n += write_string(w, "..");
n += write_encoded_rune(w, rune(info.upper));
case:
write_i64(buf, info.lower, 10);
write_string(buf, "..");
write_i64(buf, info.upper, 10);
n += _n(io.write_i64(w, info.lower, 10));
n += write_string(w, "..");
n += _n(io.write_i64(w, info.upper, 10));
}
if info.underlying != nil {
write_string(buf, "; ");
write_type(buf, info.underlying);
n += write_string(w, "; ");
n += write_type(w, info.underlying);
}
write_byte(buf, ']');
n += _n(io.write_byte(w, ']'));
case Type_Info_Opaque:
write_string(buf, "opaque ");
write_type(buf, info.elem);
n += write_string(w, "#opaque ");
n += write_type(w, info.elem);
case Type_Info_Simd_Vector:
if info.is_x86_mmx {
write_string(buf, "intrinsics.x86_mmx");
n += write_string(w, "intrinsics.x86_mmx");
} else {
write_string(buf, "#simd[");
write_i64(buf, i64(info.count));
write_byte(buf, ']');
write_type(buf, info.elem);
n += write_string(w, "#simd[");
n += _n(io.write_i64(w, i64(info.count)));
n += _n(io.write_byte(w, ']'));
n += write_type(w, info.elem);
}
case Type_Info_Relative_Pointer:
write_string(buf, "#relative(");
write_type(buf, info.base_integer);
write_string(buf, ") ");
write_type(buf, info.pointer);
n += write_string(w, "#relative(");
n += write_type(w, info.base_integer);
n += write_string(w, ") ");
n += write_type(w, info.pointer);
case Type_Info_Relative_Slice:
write_string(buf, "#relative(");
write_type(buf, info.base_integer);
write_string(buf, ") ");
write_type(buf, info.slice);
n += write_string(w, "#relative(");
n += write_type(w, info.base_integer);
n += write_string(w, ") ");
n += write_type(w, info.slice);
}
return;
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,838 @@
package runtime
@builtin
Maybe :: union(T: typeid) #maybe {T};
@thread_local global_default_temp_allocator_data: Default_Temp_Allocator;
@builtin
init_global_temporary_allocator :: proc(size: int, backup_allocator := context.allocator) {
default_temp_allocator_init(&global_default_temp_allocator_data, size, backup_allocator);
}
@builtin
copy_slice :: proc "contextless" (dst, src: $T/[]$E) -> int {
n := max(0, min(len(dst), len(src)));
if n > 0 {
mem_copy(raw_data(dst), raw_data(src), n*size_of(E));
}
return n;
}
@builtin
copy_from_string :: proc "contextless" (dst: $T/[]$E/u8, src: $S/string) -> int {
n := max(0, min(len(dst), len(src)));
if n > 0 {
mem_copy(raw_data(dst), raw_data(src), n);
}
return n;
}
@builtin
copy :: proc{copy_slice, copy_from_string};
@builtin
unordered_remove :: proc(array: ^$D/[dynamic]$T, index: int, loc := #caller_location) {
bounds_check_error_loc(loc, index, len(array));
n := len(array)-1;
if index != n {
array[index] = array[n];
}
pop(array);
}
@builtin
ordered_remove :: proc(array: ^$D/[dynamic]$T, index: int, loc := #caller_location) {
bounds_check_error_loc(loc, index, len(array));
if index+1 < len(array) {
copy(array[index:], array[index+1:]);
}
pop(array);
}
@builtin
remove_range :: proc(array: ^$D/[dynamic]$T, lo, hi: int, loc := #caller_location) {
slice_expr_error_lo_hi_loc(loc, lo, hi, len(array));
n := max(hi-lo, 0);
if n > 0 {
if hi != len(array) {
copy(array[lo:], array[hi:]);
}
(^Raw_Dynamic_Array)(array).len -= n;
}
}
@builtin
pop :: proc(array: ^$T/[dynamic]$E, loc := #caller_location) -> (res: E) #no_bounds_check {
assert(len(array) > 0, "", loc);
res = array[len(array)-1];
(^Raw_Dynamic_Array)(array).len -= 1;
return res;
}
@builtin
pop_safe :: proc(array: ^$T/[dynamic]$E) -> (res: E, ok: bool) #no_bounds_check {
if len(array) == 0 {
return;
}
res, ok = array[len(array)-1], true;
(^Raw_Dynamic_Array)(array).len -= 1;
return;
}
@builtin
pop_front :: proc(array: ^$T/[dynamic]$E, loc := #caller_location) -> (res: E) #no_bounds_check {
assert(len(array) > 0, "", loc);
res = array[0];
if len(array) > 1 {
copy(array[0:], array[1:]);
}
(^Raw_Dynamic_Array)(array).len -= 1;
return res;
}
@builtin
pop_front_safe :: proc(array: ^$T/[dynamic]$E) -> (res: E, ok: bool) #no_bounds_check {
if len(array) == 0 {
return;
}
res, ok = array[0], true;
if len(array) > 1 {
copy(array[0:], array[1:]);
}
(^Raw_Dynamic_Array)(array).len -= 1;
return;
}
@builtin
clear :: proc{clear_dynamic_array, clear_map};
@builtin
reserve :: proc{reserve_dynamic_array, reserve_map};
@builtin
resize :: proc{resize_dynamic_array};
@builtin
free :: proc{mem_free};
@builtin
free_all :: proc{mem_free_all};
@builtin
delete_string :: proc(str: string, allocator := context.allocator, loc := #caller_location) {
mem_free(raw_data(str), allocator, loc);
}
@builtin
delete_cstring :: proc(str: cstring, allocator := context.allocator, loc := #caller_location) {
mem_free((^byte)(str), allocator, loc);
}
@builtin
delete_dynamic_array :: proc(array: $T/[dynamic]$E, loc := #caller_location) {
mem_free(raw_data(array), array.allocator, loc);
}
@builtin
delete_slice :: proc(array: $T/[]$E, allocator := context.allocator, loc := #caller_location) {
mem_free(raw_data(array), allocator, loc);
}
@builtin
delete_map :: proc(m: $T/map[$K]$V, loc := #caller_location) {
raw := transmute(Raw_Map)m;
delete_slice(raw.hashes);
mem_free(raw.entries.data, raw.entries.allocator, loc);
}
@builtin
delete :: proc{
delete_string,
delete_cstring,
delete_dynamic_array,
delete_slice,
delete_map,
};
// The new built-in procedure allocates memory. The first argument is a type, not a value, and the value
// return is a pointer to a newly allocated value of that type using the specified allocator, default is context.allocator
@builtin
new :: inline proc($T: typeid, allocator := context.allocator, loc := #caller_location) -> ^T {
ptr := (^T)(mem_alloc(size_of(T), align_of(T), allocator, loc));
if ptr != nil { ptr^ = T{}; }
return ptr;
}
@builtin
new_clone :: inline proc(data: $T, allocator := context.allocator, loc := #caller_location) -> ^T {
ptr := (^T)(mem_alloc(size_of(T), align_of(T), allocator, loc));
if ptr != nil { ptr^ = data; }
return ptr;
}
make_aligned :: proc($T: typeid/[]$E, auto_cast len: int, alignment: int, allocator := context.allocator, loc := #caller_location) -> T {
make_slice_error_loc(loc, len);
data := mem_alloc(size_of(E)*len, alignment, allocator, loc);
if data == nil && size_of(E) != 0 {
return nil;
}
// mem_zero(data, size_of(E)*len);
s := Raw_Slice{data, len};
return transmute(T)s;
}
@builtin
make_slice :: inline proc($T: typeid/[]$E, auto_cast len: int, allocator := context.allocator, loc := #caller_location) -> T {
return make_aligned(T, len, align_of(E), allocator, loc);
}
@builtin
make_dynamic_array :: proc($T: typeid/[dynamic]$E, allocator := context.allocator, loc := #caller_location) -> T {
return make_dynamic_array_len_cap(T, 0, 16, allocator, loc);
}
@builtin
make_dynamic_array_len :: proc($T: typeid/[dynamic]$E, auto_cast len: int, allocator := context.allocator, loc := #caller_location) -> T {
return make_dynamic_array_len_cap(T, len, len, allocator, loc);
}
@builtin
make_dynamic_array_len_cap :: proc($T: typeid/[dynamic]$E, auto_cast len: int, auto_cast cap: int, allocator := context.allocator, loc := #caller_location) -> T {
make_dynamic_array_error_loc(loc, len, cap);
data := mem_alloc(size_of(E)*cap, align_of(E), allocator, loc);
s := Raw_Dynamic_Array{data, len, cap, allocator};
if data == nil && size_of(E) != 0 {
s.len, s.cap = 0, 0;
}
// mem_zero(data, size_of(E)*cap);
return transmute(T)s;
}
@builtin
make_map :: proc($T: typeid/map[$K]$E, auto_cast cap: int = 16, allocator := context.allocator, loc := #caller_location) -> T {
make_map_expr_error_loc(loc, cap);
context.allocator = allocator;
m: T;
reserve_map(&m, cap);
return m;
}
// The make built-in procedure allocates and initializes a value of type slice, dynamic array, or map (only)
// Similar to new, the first argument is a type, not a value. Unlike new, make's return type is the same as the
// type of its argument, not a pointer to it.
// Make uses the specified allocator, default is context.allocator, default is context.allocator
@builtin
make :: proc{
make_slice,
make_dynamic_array,
make_dynamic_array_len,
make_dynamic_array_len_cap,
make_map,
};
@builtin
clear_map :: inline proc "contextless" (m: ^$T/map[$K]$V) {
if m == nil {
return;
}
raw_map := (^Raw_Map)(m);
entries := (^Raw_Dynamic_Array)(&raw_map.entries);
entries.len = 0;
for _, i in raw_map.hashes {
raw_map.hashes[i] = -1;
}
}
@builtin
reserve_map :: proc(m: ^$T/map[$K]$V, capacity: int) {
if m != nil {
__dynamic_map_reserve(__get_map_header(m), capacity);
}
}
// The delete_key built-in procedure deletes the element with the specified key (m[key]) from the map.
// If m is nil, or there is no such element, this procedure is a no-op
@builtin
delete_key :: proc(m: ^$T/map[$K]$V, key: K) {
if m != nil {
key := key;
__dynamic_map_delete_key(__get_map_header(m), __get_map_hash(&key));
}
}
@builtin
append_elem :: proc(array: ^$T/[dynamic]$E, arg: E, loc := #caller_location) {
if array == nil {
return;
}
arg_len := 1;
if cap(array) < len(array)+arg_len {
cap := 2 * cap(array) + max(8, arg_len);
_ = reserve(array, cap, loc);
}
arg_len = min(cap(array)-len(array), arg_len);
if arg_len > 0 {
a := (^Raw_Dynamic_Array)(array);
if size_of(E) != 0 {
data := (^E)(a.data);
assert(data != nil);
val := arg;
mem_copy(ptr_offset(data, a.len), &val, size_of(E));
}
a.len += arg_len;
}
}
@builtin
append_elems :: proc(array: ^$T/[dynamic]$E, args: ..E, loc := #caller_location) {
if array == nil {
return;
}
arg_len := len(args);
if arg_len <= 0 {
return;
}
if cap(array) < len(array)+arg_len {
cap := 2 * cap(array) + max(8, arg_len);
_ = reserve(array, cap, loc);
}
arg_len = min(cap(array)-len(array), arg_len);
if arg_len > 0 {
a := (^Raw_Dynamic_Array)(array);
if size_of(E) != 0 {
data := (^E)(a.data);
assert(data != nil);
mem_copy(ptr_offset(data, a.len), &args[0], size_of(E) * arg_len);
}
a.len += arg_len;
}
}
// The append_string built-in procedure appends a string to the end of a [dynamic]u8 like type
@builtin
append_elem_string :: proc(array: ^$T/[dynamic]$E/u8, arg: $A/string, loc := #caller_location) {
args := transmute([]E)arg;
append_elems(array=array, args=args, loc=loc);
}
@builtin
reserve_soa :: proc(array: ^$T/#soa[dynamic]$E, capacity: int, loc := #caller_location) -> bool {
if array == nil {
return false;
}
old_cap := cap(array);
if capacity <= old_cap {
return true;
}
if array.allocator.procedure == nil {
array.allocator = context.allocator;
}
assert(array.allocator.procedure != nil);
ti := type_info_of(typeid_of(T));
ti = type_info_base(ti);
si := &ti.variant.(Type_Info_Struct);
field_count := uintptr(len(si.offsets) - 3);
if field_count == 0 {
return true;
}
cap_ptr := cast(^int)rawptr(uintptr(array) + (field_count + 1)*size_of(rawptr));
assert(cap_ptr^ == old_cap);
old_size := 0;
new_size := 0;
max_align := 0;
for i in 0..<field_count {
type := si.types[i].variant.(Type_Info_Pointer).elem;
max_align = max(max_align, type.align);
old_size = align_forward_int(old_size, type.align);
new_size = align_forward_int(new_size, type.align);
old_size += type.size * old_cap;
new_size += type.size * capacity;
}
old_size = align_forward_int(old_size, max_align);
new_size = align_forward_int(new_size, max_align);
old_data := (^rawptr)(array)^;
new_data := array.allocator.procedure(
array.allocator.data, .Alloc, new_size, max_align,
nil, old_size, 0, loc,
);
if new_data == nil {
return false;
}
cap_ptr^ = capacity;
old_offset := 0;
new_offset := 0;
for i in 0..<field_count {
type := si.types[i].variant.(Type_Info_Pointer).elem;
max_align = max(max_align, type.align);
old_offset = align_forward_int(old_offset, type.align);
new_offset = align_forward_int(new_offset, type.align);
new_data_elem := rawptr(uintptr(new_data) + uintptr(new_offset));
old_data_elem := rawptr(uintptr(old_data) + uintptr(old_offset));
mem_copy(new_data_elem, old_data_elem, type.size * old_cap);
(^rawptr)(uintptr(array) + i*size_of(rawptr))^ = new_data_elem;
old_offset += type.size * old_cap;
new_offset += type.size * capacity;
}
array.allocator.procedure(
array.allocator.data, .Free, 0, max_align,
old_data, old_size, 0, loc,
);
return true;
}
@builtin
append_soa_elem :: proc(array: ^$T/#soa[dynamic]$E, arg: E, loc := #caller_location) {
if array == nil {
return;
}
arg_len := 1;
if cap(array) <= len(array)+arg_len {
cap := 2 * cap(array) + max(8, arg_len);
_ = reserve_soa(array, cap, loc);
}
arg_len = min(cap(array)-len(array), arg_len);
if arg_len > 0 {
ti := type_info_of(typeid_of(T));
ti = type_info_base(ti);
si := &ti.variant.(Type_Info_Struct);
field_count := uintptr(len(si.offsets) - 3);
if field_count == 0 {
return;
}
data := (^rawptr)(array)^;
len_ptr := cast(^int)rawptr(uintptr(array) + (field_count + 0)*size_of(rawptr));
soa_offset := 0;
item_offset := 0;
arg_copy := arg;
arg_ptr := &arg_copy;
max_align := 0;
for i in 0..<field_count {
type := si.types[i].variant.(Type_Info_Pointer).elem;
max_align = max(max_align, type.align);
soa_offset = align_forward_int(soa_offset, type.align);
item_offset = align_forward_int(item_offset, type.align);
dst := rawptr(uintptr(data) + uintptr(soa_offset) + uintptr(type.size * len_ptr^));
src := rawptr(uintptr(arg_ptr) + uintptr(item_offset));
mem_copy(dst, src, type.size);
soa_offset += type.size * cap(array);
item_offset += type.size;
}
len_ptr^ += arg_len;
}
}
@builtin
append_soa_elems :: proc(array: ^$T/#soa[dynamic]$E, args: ..E, loc := #caller_location) {
if array == nil {
return;
}
arg_len := len(args);
if arg_len == 0 {
return;
}
if cap(array) <= len(array)+arg_len {
cap := 2 * cap(array) + max(8, arg_len);
_ = reserve_soa(array, cap, loc);
}
arg_len = min(cap(array)-len(array), arg_len);
if arg_len > 0 {
ti := type_info_of(typeid_of(T));
ti = type_info_base(ti);
si := &ti.variant.(Type_Info_Struct);
field_count := uintptr(len(si.offsets) - 3);
if field_count == 0 {
return;
}
data := (^rawptr)(array)^;
len_ptr := cast(^int)rawptr(uintptr(array) + (field_count + 0)*size_of(rawptr));
soa_offset := 0;
item_offset := 0;
args_ptr := &args[0];
max_align := 0;
for i in 0..<field_count {
type := si.types[i].variant.(Type_Info_Pointer).elem;
max_align = max(max_align, type.align);
soa_offset = align_forward_int(soa_offset, type.align);
item_offset = align_forward_int(item_offset, type.align);
dst := uintptr(data) + uintptr(soa_offset) + uintptr(type.size * len_ptr^);
src := uintptr(args_ptr) + uintptr(item_offset);
for j in 0..<arg_len {
d := rawptr(dst + uintptr(j*type.size));
s := rawptr(src + uintptr(j*size_of(E)));
mem_copy(d, s, type.size);
}
soa_offset += type.size * cap(array);
item_offset += type.size;
}
len_ptr^ += arg_len;
}
}
// The append_string built-in procedure appends multiple strings to the end of a [dynamic]u8 like type
@builtin
append_string :: proc(array: ^$T/[dynamic]$E/u8, args: ..string, loc := #caller_location) {
for arg in args {
append(array = array, args = transmute([]E)(arg), loc = loc);
}
}
// The append built-in procedure appends elements to the end of a dynamic array
@builtin append :: proc{append_elem, append_elems, append_elem_string};
// The append_soa built-in procedure appends elements to the end of an #soa dynamic array
@builtin append_soa :: proc{append_soa_elem, append_soa_elems};
@builtin
append_nothing :: proc(array: ^$T/[dynamic]$E, loc := #caller_location) {
if array == nil {
return;
}
resize(array, len(array)+1);
}
@builtin
insert_at_elem :: proc(array: ^$T/[dynamic]$E, index: int, arg: E, loc := #caller_location) -> (ok: bool) #no_bounds_check {
if array == nil {
return;
}
n := len(array);
m :: 1;
resize(array, n+m, loc);
if n+m <= len(array) {
when size_of(E) != 0 {
copy(array[index+m:], array[index:]);
array[index] = arg;
}
ok = true;
}
return;
}
@builtin
insert_at_elems :: proc(array: ^$T/[dynamic]$E, index: int, args: ..E, loc := #caller_location) -> (ok: bool) #no_bounds_check {
if array == nil {
return;
}
if len(args) == 0 {
ok = true;
return;
}
n := len(array);
m := len(args);
resize(array, n+m, loc);
if n+m <= len(array) {
when size_of(E) != 0 {
copy(array[index+m:], array[index:]);
copy(array[index:], args);
}
ok = true;
}
return;
}
@builtin
insert_at_elem_string :: proc(array: ^$T/[dynamic]$E/u8, index: int, arg: string, loc := #caller_location) -> (ok: bool) #no_bounds_check {
if array == nil {
return;
}
if len(args) == 0 {
ok = true;
return;
}
n := len(array);
m := len(args);
resize(array, n+m, loc);
if n+m <= len(array) {
copy(array[index+m:], array[index:]);
copy(array[index:], args);
ok = true;
}
return;
}
@builtin insert_at :: proc{insert_at_elem, insert_at_elems, insert_at_elem_string};
@builtin
clear_dynamic_array :: inline proc "contextless" (array: ^$T/[dynamic]$E) {
if array != nil {
(^Raw_Dynamic_Array)(array).len = 0;
}
}
@builtin
reserve_dynamic_array :: proc(array: ^$T/[dynamic]$E, capacity: int, loc := #caller_location) -> bool {
if array == nil {
return false;
}
a := (^Raw_Dynamic_Array)(array);
if capacity <= a.cap {
return true;
}
if a.allocator.procedure == nil {
a.allocator = context.allocator;
}
assert(a.allocator.procedure != nil);
old_size := a.cap * size_of(E);
new_size := capacity * size_of(E);
allocator := a.allocator;
new_data := allocator.procedure(
allocator.data, .Resize, new_size, align_of(E),
a.data, old_size, 0, loc,
);
if new_data == nil {
return false;
}
a.data = new_data;
a.cap = capacity;
return true;
}
@builtin
resize_dynamic_array :: proc(array: ^$T/[dynamic]$E, length: int, loc := #caller_location) -> bool {
if array == nil {
return false;
}
a := (^Raw_Dynamic_Array)(array);
if length <= a.cap {
a.len = max(length, 0);
return true;
}
if a.allocator.procedure == nil {
a.allocator = context.allocator;
}
assert(a.allocator.procedure != nil);
old_size := a.cap * size_of(E);
new_size := length * size_of(E);
allocator := a.allocator;
new_data := allocator.procedure(
allocator.data, .Resize, new_size, align_of(E),
a.data, old_size, 0, loc,
);
if new_data == nil {
return false;
}
a.data = new_data;
a.len = length;
a.cap = length;
return true;
}
@builtin
incl_elem :: inline proc(s: ^$S/bit_set[$E; $U], elem: E) -> S {
s^ |= {elem};
return s^;
}
@builtin
incl_elems :: inline proc(s: ^$S/bit_set[$E; $U], elems: ..E) -> S {
for elem in elems {
s^ |= {elem};
}
return s^;
}
@builtin
incl_bit_set :: inline proc(s: ^$S/bit_set[$E; $U], other: S) -> S {
s^ |= other;
return s^;
}
@builtin
excl_elem :: inline proc(s: ^$S/bit_set[$E; $U], elem: E) -> S {
s^ &~= {elem};
return s^;
}
@builtin
excl_elems :: inline proc(s: ^$S/bit_set[$E; $U], elems: ..E) -> S {
for elem in elems {
s^ &~= {elem};
}
return s^;
}
@builtin
excl_bit_set :: inline proc(s: ^$S/bit_set[$E; $U], other: S) -> S {
s^ &~= other;
return s^;
}
@builtin incl :: proc{incl_elem, incl_elems, incl_bit_set};
@builtin excl :: proc{excl_elem, excl_elems, excl_bit_set};
@builtin
card :: proc(s: $S/bit_set[$E; $U]) -> int {
when size_of(S) == 1 {
foreign { @(link_name="llvm.ctpop.i8") count_ones :: proc(i: u8) -> u8 --- }
return int(count_ones(transmute(u8)s));
} else when size_of(S) == 2 {
foreign { @(link_name="llvm.ctpop.i16") count_ones :: proc(i: u16) -> u16 --- }
return int(count_ones(transmute(u16)s));
} else when size_of(S) == 4 {
foreign { @(link_name="llvm.ctpop.i32") count_ones :: proc(i: u32) -> u32 --- }
return int(count_ones(transmute(u32)s));
} else when size_of(S) == 8 {
foreign { @(link_name="llvm.ctpop.i64") count_ones :: proc(i: u64) -> u64 --- }
return int(count_ones(transmute(u64)s));
} else when size_of(S) == 16 {
foreign { @(link_name="llvm.ctpop.i128") count_ones :: proc(i: u128) -> u128 --- }
return int(count_ones(transmute(u128)s));
} else {
#panic("Unhandled card bit_set size");
}
}
@builtin
raw_array_data :: proc "contextless" (a: $P/^($T/[$N]$E)) -> ^E {
return (^E)(a);
}
@builtin
raw_slice_data :: proc "contextless" (s: $S/[]$E) -> ^E {
ptr := (transmute(Raw_Slice)s).data;
return (^E)(ptr);
}
@builtin
raw_dynamic_array_data :: proc "contextless" (s: $S/[dynamic]$E) -> ^E {
ptr := (transmute(Raw_Dynamic_Array)s).data;
return (^E)(ptr);
}
@builtin
raw_string_data :: proc "contextless" (s: $S/string) -> ^u8 {
return (transmute(Raw_String)s).data;
}
@builtin
raw_data :: proc{raw_array_data, raw_slice_data, raw_dynamic_array_data, raw_string_data};
@builtin
@(disabled=ODIN_DISABLE_ASSERT)
assert :: proc(condition: bool, message := "", loc := #caller_location) {
if !condition {
proc(message: string, loc: Source_Code_Location) {
p := context.assertion_failure_proc;
if p == nil {
p = default_assertion_failure_proc;
}
p("runtime assertion", message, loc);
}(message, loc);
}
}
@builtin
@(disabled=ODIN_DISABLE_ASSERT)
panic :: proc(message: string, loc := #caller_location) -> ! {
p := context.assertion_failure_proc;
if p == nil {
p = default_assertion_failure_proc;
}
p("panic", message, loc);
}
@builtin
@(disabled=ODIN_DISABLE_ASSERT)
unimplemented :: proc(message := "", loc := #caller_location) -> ! {
p := context.assertion_failure_proc;
if p == nil {
p = default_assertion_failure_proc;
}
p("not yet implemented", message, loc);
}
@builtin
@(disabled=ODIN_DISABLE_ASSERT)
unreachable :: proc(message := "", loc := #caller_location) -> ! {
p := context.assertion_failure_proc;
if p == nil {
p = default_assertion_failure_proc;
}
if message != "" {
p("internal error", message, loc);
} else {
p("internal error", "entered unreachable code", loc);
}
}

View File

@@ -0,0 +1,100 @@
package runtime
__dynamic_array_make :: proc(array_: rawptr, elem_size, elem_align: int, len, cap: int, loc := #caller_location) {
array := (^Raw_Dynamic_Array)(array_);
array.allocator = context.allocator;
assert(array.allocator.procedure != nil);
if cap > 0 {
__dynamic_array_reserve(array_, elem_size, elem_align, cap, loc);
array.len = len;
}
}
__dynamic_array_reserve :: proc(array_: rawptr, elem_size, elem_align: int, cap: int, loc := #caller_location) -> bool {
array := (^Raw_Dynamic_Array)(array_);
// NOTE(tetra, 2020-01-26): We set the allocator before earlying-out below, because user code is usually written
// assuming that appending/reserving will set the allocator, if it is not already set.
if array.allocator.procedure == nil {
array.allocator = context.allocator;
}
assert(array.allocator.procedure != nil);
if cap <= array.cap {
return true;
}
old_size := array.cap * elem_size;
new_size := cap * elem_size;
allocator := array.allocator;
new_data := allocator.procedure(allocator.data, .Resize, new_size, elem_align, array.data, old_size, 0, loc);
if new_data != nil || elem_size == 0 {
array.data = new_data;
array.cap = cap;
return true;
}
return false;
}
__dynamic_array_resize :: proc(array_: rawptr, elem_size, elem_align: int, len: int, loc := #caller_location) -> bool {
array := (^Raw_Dynamic_Array)(array_);
ok := __dynamic_array_reserve(array_, elem_size, elem_align, len, loc);
if ok {
array.len = len;
}
return ok;
}
__dynamic_array_append :: proc(array_: rawptr, elem_size, elem_align: int,
items: rawptr, item_count: int, loc := #caller_location) -> int {
array := (^Raw_Dynamic_Array)(array_);
if items == nil {
return 0;
}
if item_count <= 0 {
return 0;
}
ok := true;
if array.cap <= array.len+item_count {
cap := 2 * array.cap + max(8, item_count);
ok = __dynamic_array_reserve(array, elem_size, elem_align, cap, loc);
}
// TODO(bill): Better error handling for failed reservation
if !ok {
return array.len;
}
assert(array.data != nil);
data := uintptr(array.data) + uintptr(elem_size*array.len);
mem_copy(rawptr(data), items, elem_size * item_count);
array.len += item_count;
return array.len;
}
__dynamic_array_append_nothing :: proc(array_: rawptr, elem_size, elem_align: int, loc := #caller_location) -> int {
array := (^Raw_Dynamic_Array)(array_);
ok := true;
if array.cap <= array.len+1 {
cap := 2 * array.cap + max(8, 1);
ok = __dynamic_array_reserve(array, elem_size, elem_align, cap, loc);
}
// TODO(bill): Better error handling for failed reservation
if !ok {
return array.len;
}
assert(array.data != nil);
data := uintptr(array.data) + uintptr(elem_size*array.len);
mem_zero(rawptr(data), elem_size);
array.len += 1;
return array.len;
}

View File

@@ -0,0 +1,394 @@
package runtime
import "intrinsics"
_ :: intrinsics;
INITIAL_MAP_CAP :: 16;
// Temporary data structure for comparing hashes and keys
Map_Hash :: struct {
hash: uintptr,
key_ptr: rawptr, // address of Map_Entry_Header.key
}
__get_map_hash :: proc "contextless" (k: ^$K) -> (map_hash: Map_Hash) {
hasher := intrinsics.type_hasher_proc(K);
map_hash.key_ptr = k;
map_hash.hash = hasher(k, 0);
return;
}
__get_map_hash_from_entry :: proc "contextless" (h: Map_Header, entry: ^Map_Entry_Header) -> (hash: Map_Hash) {
hash.hash = entry.hash;
hash.key_ptr = rawptr(uintptr(entry) + h.key_offset);
return;
}
Map_Find_Result :: struct {
hash_index: int,
entry_prev: int,
entry_index: int,
}
Map_Entry_Header :: struct {
hash: uintptr,
next: int,
/*
key: Key_Value,
value: Value_Type,
*/
}
Map_Header :: struct {
m: ^Raw_Map,
equal: Equal_Proc,
entry_size: int,
entry_align: int,
key_offset: uintptr,
key_size: int,
value_offset: uintptr,
value_size: int,
}
INITIAL_HASH_SEED :: 0xcbf29ce484222325;
_fnv64a :: proc "contextless" (data: []byte, seed: u64 = INITIAL_HASH_SEED) -> u64 {
h: u64 = seed;
for b in data {
h = (h ~ u64(b)) * 0x100000001b3;
}
return h;
}
default_hash :: inline proc "contextless" (data: []byte) -> uintptr {
return uintptr(_fnv64a(data));
}
default_hash_string :: inline proc "contextless" (s: string) -> uintptr {
return default_hash(transmute([]byte)(s));
}
default_hash_ptr :: inline proc "contextless" (data: rawptr, size: int) -> uintptr {
s := Raw_Slice{data, size};
return default_hash(transmute([]byte)(s));
}
@(private)
_default_hasher_const :: inline proc "contextless" (data: rawptr, seed: uintptr, $N: uint) -> uintptr where N <= 16 {
h := u64(seed) + 0xcbf29ce484222325;
p := uintptr(data);
inline for _ in 0..<N {
b := u64((^byte)(p)^);
h = (h ~ b) * 0x100000001b3;
p += 1;
}
return uintptr(h);
}
default_hasher_n :: inline proc "contextless" (data: rawptr, seed: uintptr, N: int) -> uintptr {
h := u64(seed) + 0xcbf29ce484222325;
p := uintptr(data);
for _ in 0..<N {
b := u64((^byte)(p)^);
h = (h ~ b) * 0x100000001b3;
p += 1;
}
return uintptr(h);
}
// NOTE(bill): There are loads of predefined ones to improve optimizations for small types
default_hasher1 :: proc "contextless" (data: rawptr, seed: uintptr) -> uintptr { return inline _default_hasher_const(data, seed, 1); }
default_hasher2 :: proc "contextless" (data: rawptr, seed: uintptr) -> uintptr { return inline _default_hasher_const(data, seed, 2); }
default_hasher3 :: proc "contextless" (data: rawptr, seed: uintptr) -> uintptr { return inline _default_hasher_const(data, seed, 3); }
default_hasher4 :: proc "contextless" (data: rawptr, seed: uintptr) -> uintptr { return inline _default_hasher_const(data, seed, 4); }
default_hasher5 :: proc "contextless" (data: rawptr, seed: uintptr) -> uintptr { return inline _default_hasher_const(data, seed, 5); }
default_hasher6 :: proc "contextless" (data: rawptr, seed: uintptr) -> uintptr { return inline _default_hasher_const(data, seed, 6); }
default_hasher7 :: proc "contextless" (data: rawptr, seed: uintptr) -> uintptr { return inline _default_hasher_const(data, seed, 7); }
default_hasher8 :: proc "contextless" (data: rawptr, seed: uintptr) -> uintptr { return inline _default_hasher_const(data, seed, 8); }
default_hasher9 :: proc "contextless" (data: rawptr, seed: uintptr) -> uintptr { return inline _default_hasher_const(data, seed, 9); }
default_hasher10 :: proc "contextless" (data: rawptr, seed: uintptr) -> uintptr { return inline _default_hasher_const(data, seed, 10); }
default_hasher11 :: proc "contextless" (data: rawptr, seed: uintptr) -> uintptr { return inline _default_hasher_const(data, seed, 11); }
default_hasher12 :: proc "contextless" (data: rawptr, seed: uintptr) -> uintptr { return inline _default_hasher_const(data, seed, 12); }
default_hasher13 :: proc "contextless" (data: rawptr, seed: uintptr) -> uintptr { return inline _default_hasher_const(data, seed, 13); }
default_hasher14 :: proc "contextless" (data: rawptr, seed: uintptr) -> uintptr { return inline _default_hasher_const(data, seed, 14); }
default_hasher15 :: proc "contextless" (data: rawptr, seed: uintptr) -> uintptr { return inline _default_hasher_const(data, seed, 15); }
default_hasher16 :: proc "contextless" (data: rawptr, seed: uintptr) -> uintptr { return inline _default_hasher_const(data, seed, 16); }
default_hasher_string :: proc "contextless" (data: rawptr, seed: uintptr) -> uintptr {
h := u64(seed) + 0xcbf29ce484222325;
str := (^[]byte)(data)^;
for b in str {
h = (h ~ u64(b)) * 0x100000001b3;
}
return uintptr(h);
}
default_hasher_cstring :: proc "contextless" (data: rawptr, seed: uintptr) -> uintptr {
h := u64(seed) + 0xcbf29ce484222325;
ptr := (^uintptr)(data)^;
for (^byte)(ptr)^ != 0 {
b := (^byte)(ptr)^;
h = (h ~ u64(b)) * 0x100000001b3;
ptr += 1;
}
return uintptr(h);
}
source_code_location_hash :: proc(s: Source_Code_Location) -> uintptr {
hash := _fnv64a(transmute([]byte)s.file_path);
hash = hash ~ (u64(s.line) * 0x100000001b3);
hash = hash ~ (u64(s.column) * 0x100000001b3);
return uintptr(hash);
}
__get_map_header :: proc "contextless" (m: ^$T/map[$K]$V) -> Map_Header {
header := Map_Header{m = (^Raw_Map)(m)};
Entry :: struct {
hash: uintptr,
next: int,
key: K,
value: V,
};
header.equal = intrinsics.type_equal_proc(K);
header.entry_size = int(size_of(Entry));
header.entry_align = int(align_of(Entry));
header.key_offset = uintptr(offset_of(Entry, key));
header.key_size = int(size_of(K));
header.value_offset = uintptr(offset_of(Entry, value));
header.value_size = int(size_of(V));
return header;
}
__slice_resize :: proc(array_: ^$T/[]$E, new_count: int, allocator: Allocator, loc := #caller_location) -> bool {
array := (^Raw_Slice)(array_);
if new_count < array.len {
return true;
}
assert(allocator.procedure != nil);
old_size := array.len*size_of(T);
new_size := new_count*size_of(T);
new_data := mem_resize(array.data, old_size, new_size, align_of(T), allocator, loc);
if new_data == nil {
return false;
}
array.data = new_data;
array.len = new_count;
return true;
}
__dynamic_map_reserve :: proc(using header: Map_Header, cap: int, loc := #caller_location) {
__dynamic_array_reserve(&m.entries, entry_size, entry_align, cap, loc);
old_len := len(m.hashes);
__slice_resize(&m.hashes, cap, m.entries.allocator, loc);
for i in old_len..<len(m.hashes) {
m.hashes[i] = -1;
}
}
__dynamic_map_rehash :: proc(using header: Map_Header, new_count: int, loc := #caller_location) #no_bounds_check {
new_header: Map_Header = header;
nm := Raw_Map{};
nm.entries.allocator = m.entries.allocator;
new_header.m = &nm;
c := context;
if m.entries.allocator.procedure != nil {
c.allocator = m.entries.allocator;
}
context = c;
new_count := new_count;
new_count = max(new_count, 2*m.entries.len);
__dynamic_array_reserve(&nm.entries, entry_size, entry_align, m.entries.len, loc);
__slice_resize(&nm.hashes, new_count, m.entries.allocator, loc);
for i in 0 ..< new_count {
nm.hashes[i] = -1;
}
for i in 0 ..< m.entries.len {
if len(nm.hashes) == 0 {
__dynamic_map_grow(new_header, loc);
}
entry_header := __dynamic_map_get_entry(header, i);
entry_hash := __get_map_hash_from_entry(header, entry_header);
fr := __dynamic_map_find(new_header, entry_hash);
j := __dynamic_map_add_entry(new_header, entry_hash, loc);
if fr.entry_prev < 0 {
nm.hashes[fr.hash_index] = j;
} else {
e := __dynamic_map_get_entry(new_header, fr.entry_prev);
e.next = j;
}
e := __dynamic_map_get_entry(new_header, j);
__dynamic_map_copy_entry(header, e, entry_header);
e.next = fr.entry_index;
if __dynamic_map_full(new_header) {
__dynamic_map_grow(new_header, loc);
}
}
delete(m.hashes, m.entries.allocator, loc);
free(m.entries.data, m.entries.allocator, loc);
header.m^ = nm;
}
__dynamic_map_get :: proc(h: Map_Header, hash: Map_Hash) -> rawptr {
index := __dynamic_map_find(h, hash).entry_index;
if index >= 0 {
data := uintptr(__dynamic_map_get_entry(h, index));
return rawptr(data + h.value_offset);
}
return nil;
}
__dynamic_map_set :: proc(h: Map_Header, hash: Map_Hash, value: rawptr, loc := #caller_location) #no_bounds_check {
index: int;
assert(value != nil);
if len(h.m.hashes) == 0 {
__dynamic_map_reserve(h, INITIAL_MAP_CAP, loc);
__dynamic_map_grow(h, loc);
}
fr := __dynamic_map_find(h, hash);
if fr.entry_index >= 0 {
index = fr.entry_index;
} else {
index = __dynamic_map_add_entry(h, hash, loc);
if fr.entry_prev >= 0 {
entry := __dynamic_map_get_entry(h, fr.entry_prev);
entry.next = index;
} else {
h.m.hashes[fr.hash_index] = index;
}
}
{
e := __dynamic_map_get_entry(h, index);
e.hash = hash.hash;
key := rawptr(uintptr(e) + h.key_offset);
mem_copy(key, hash.key_ptr, h.key_size);
val := rawptr(uintptr(e) + h.value_offset);
mem_copy(val, value, h.value_size);
}
if __dynamic_map_full(h) {
__dynamic_map_grow(h, loc);
}
}
__dynamic_map_grow :: proc(using h: Map_Header, loc := #caller_location) {
// TODO(bill): Determine an efficient growing rate
new_count := max(4*m.entries.cap + 7, INITIAL_MAP_CAP);
__dynamic_map_rehash(h, new_count, loc);
}
__dynamic_map_full :: inline proc "contextless" (using h: Map_Header) -> bool {
return int(0.75 * f64(len(m.hashes))) <= m.entries.cap;
}
__dynamic_map_hash_equal :: proc "contextless" (h: Map_Header, a, b: Map_Hash) -> bool {
if a.hash == b.hash {
return h.equal(a.key_ptr, b.key_ptr);
}
return false;
}
__dynamic_map_find :: proc(using h: Map_Header, hash: Map_Hash) -> Map_Find_Result #no_bounds_check {
fr := Map_Find_Result{-1, -1, -1};
if n := uintptr(len(m.hashes)); n > 0 {
fr.hash_index = int(hash.hash % n);
fr.entry_index = m.hashes[fr.hash_index];
for fr.entry_index >= 0 {
entry := __dynamic_map_get_entry(h, fr.entry_index);
entry_hash := __get_map_hash_from_entry(h, entry);
if __dynamic_map_hash_equal(h, entry_hash, hash) {
return fr;
}
fr.entry_prev = fr.entry_index;
fr.entry_index = entry.next;
}
}
return fr;
}
__dynamic_map_add_entry :: proc(using h: Map_Header, hash: Map_Hash, loc := #caller_location) -> int {
prev := m.entries.len;
c := __dynamic_array_append_nothing(&m.entries, entry_size, entry_align, loc);
if c != prev {
end := __dynamic_map_get_entry(h, c-1);
end.hash = hash.hash;
mem_copy(rawptr(uintptr(end) + key_offset), hash.key_ptr, key_size);
end.next = -1;
}
return prev;
}
__dynamic_map_delete_key :: proc(using h: Map_Header, hash: Map_Hash) {
fr := __dynamic_map_find(h, hash);
if fr.entry_index >= 0 {
__dynamic_map_erase(h, fr);
}
}
__dynamic_map_get_entry :: proc(using h: Map_Header, index: int) -> ^Map_Entry_Header {
assert(0 <= index && index < m.entries.len);
return (^Map_Entry_Header)(uintptr(m.entries.data) + uintptr(index*entry_size));
}
__dynamic_map_copy_entry :: proc "contextless" (h: Map_Header, new, old: ^Map_Entry_Header) {
mem_copy(new, old, h.entry_size);
}
__dynamic_map_erase :: proc(using h: Map_Header, fr: Map_Find_Result) #no_bounds_check {
if fr.entry_prev < 0 {
m.hashes[fr.hash_index] = __dynamic_map_get_entry(h, fr.entry_index).next;
} else {
prev := __dynamic_map_get_entry(h, fr.entry_prev);
curr := __dynamic_map_get_entry(h, fr.entry_index);
prev.next = curr.next;
}
if (fr.entry_index == m.entries.len-1) {
// NOTE(bill): No need to do anything else, just pop
} else {
old := __dynamic_map_get_entry(h, fr.entry_index);
end := __dynamic_map_get_entry(h, m.entries.len-1);
__dynamic_map_copy_entry(h, old, end);
old_hash := __get_map_hash_from_entry(h, old);
if last := __dynamic_map_find(h, old_hash); last.entry_prev >= 0 {
last_entry := __dynamic_map_get_entry(h, last.entry_prev);
last_entry.next = fr.entry_index;
} else {
m.hashes[last.hash_index] = fr.entry_index;
}
}
m.entries.len -= 1;
}

View File

@@ -23,7 +23,7 @@ bounds_check_error :: proc "contextless" (file: string, line, column: int, index
}
handle_error :: proc "contextless" (file: string, line, column: int, index, count: int) {
context = default_context();
print_caller_location(Source_Code_Location{file, line, column, "", 0});
print_caller_location(Source_Code_Location{file, line, column, ""});
print_string(" Index ");
print_i64(i64(index));
print_string(" is out of bounds range 0:");
@@ -36,7 +36,7 @@ bounds_check_error :: proc "contextless" (file: string, line, column: int, index
slice_handle_error :: proc "contextless" (file: string, line, column: int, lo, hi: int, len: int) -> ! {
context = default_context();
print_caller_location(Source_Code_Location{file, line, column, "", 0});
print_caller_location(Source_Code_Location{file, line, column, ""});
print_string(" Invalid slice indices: ");
print_i64(i64(lo));
print_string(":");
@@ -67,7 +67,7 @@ dynamic_array_expr_error :: proc "contextless" (file: string, line, column: int,
}
handle_error :: proc "contextless" (file: string, line, column: int, low, high, max: int) {
context = default_context();
print_caller_location(Source_Code_Location{file, line, column, "", 0});
print_caller_location(Source_Code_Location{file, line, column, ""});
print_string(" Invalid dynamic array values: ");
print_i64(i64(low));
print_string(":");
@@ -87,7 +87,7 @@ type_assertion_check :: proc "contextless" (ok: bool, file: string, line, column
}
handle_error :: proc "contextless" (file: string, line, column: int, from, to: typeid) {
context = default_context();
print_caller_location(Source_Code_Location{file, line, column, "", 0});
print_caller_location(Source_Code_Location{file, line, column, ""});
print_string(" Invalid type assertion from ");
print_typeid(from);
print_string(" to ");
@@ -98,6 +98,59 @@ type_assertion_check :: proc "contextless" (ok: bool, file: string, line, column
handle_error(file, line, column, from, to);
}
type_assertion_check2 :: proc "contextless" (ok: bool, file: string, line, column: int, from, to: typeid, from_data: rawptr) {
if ok {
return;
}
variant_type :: proc "contextless" (id: typeid, data: rawptr) -> typeid {
if id == nil || data == nil {
return id;
}
ti := type_info_base(type_info_of(id));
#partial switch v in ti.variant {
case Type_Info_Any:
return (^any)(data).id;
case Type_Info_Union:
tag_ptr := uintptr(data) + v.tag_offset;
idx := 0;
switch v.tag_type.size {
case 1: idx = int((^u8)(tag_ptr)^) - 1;
case 2: idx = int((^u16)(tag_ptr)^) - 1;
case 4: idx = int((^u32)(tag_ptr)^) - 1;
case 8: idx = int((^u64)(tag_ptr)^) - 1;
case 16: idx = int((^u128)(tag_ptr)^) - 1;
}
if idx < 0 {
return nil;
} else if idx < len(v.variants) {
return v.variants[idx].id;
}
}
return id;
}
handle_error :: proc "contextless" (file: string, line, column: int, from, to: typeid, from_data: rawptr) {
context = default_context();
actual := variant_type(from, from_data);
print_caller_location(Source_Code_Location{file, line, column, ""});
print_string(" Invalid type assertion from ");
print_typeid(from);
print_string(" to ");
print_typeid(to);
if actual != from {
print_string(", actual type: ");
print_typeid(actual);
}
print_byte('\n');
type_assertion_trap();
}
handle_error(file, line, column, from, to, from_data);
}
make_slice_error_loc :: inline proc "contextless" (loc := #caller_location, len: int) {
if 0 <= len {
return;

View File

@@ -93,18 +93,18 @@ mem_copy :: proc "contextless" (dst, src: rawptr, len: int) -> rawptr {
when ODIN_USE_LLVM_API {
when size_of(rawptr) == 8 {
@(link_name="llvm.memmove.p0i8.p0i8.i64")
llvm_memmove :: proc(dst, src: rawptr, len: int, is_volatile: bool = false) ---;
llvm_memmove :: proc "none" (dst, src: rawptr, len: int, is_volatile: bool = false) ---;
} else {
@(link_name="llvm.memmove.p0i8.p0i8.i32")
llvm_memmove :: proc(dst, src: rawptr, len: int, is_volatile: bool = false) ---;
llvm_memmove :: proc "none" (dst, src: rawptr, len: int, is_volatile: bool = false) ---;
}
} else {
when size_of(rawptr) == 8 {
@(link_name="llvm.memmove.p0i8.p0i8.i64")
llvm_memmove :: proc(dst, src: rawptr, len: int, align: i32 = 1, is_volatile: bool = false) ---;
llvm_memmove :: proc "none" (dst, src: rawptr, len: int, align: i32 = 1, is_volatile: bool = false) ---;
} else {
@(link_name="llvm.memmove.p0i8.p0i8.i32")
llvm_memmove :: proc(dst, src: rawptr, len: int, align: i32 = 1, is_volatile: bool = false) ---;
llvm_memmove :: proc "none" (dst, src: rawptr, len: int, align: i32 = 1, is_volatile: bool = false) ---;
}
}
}
@@ -121,18 +121,18 @@ mem_copy_non_overlapping :: proc "contextless" (dst, src: rawptr, len: int) -> r
when ODIN_USE_LLVM_API {
when size_of(rawptr) == 8 {
@(link_name="llvm.memcpy.p0i8.p0i8.i64")
llvm_memcpy :: proc(dst, src: rawptr, len: int, is_volatile: bool = false) ---;
llvm_memcpy :: proc "none" (dst, src: rawptr, len: int, is_volatile: bool = false) ---;
} else {
@(link_name="llvm.memcpy.p0i8.p0i8.i32")
llvm_memcpy :: proc(dst, src: rawptr, len: int, is_volatile: bool = false) ---;
llvm_memcpy :: proc "none" (dst, src: rawptr, len: int, is_volatile: bool = false) ---;
}
} else {
when size_of(rawptr) == 8 {
@(link_name="llvm.memcpy.p0i8.p0i8.i64")
llvm_memcpy :: proc(dst, src: rawptr, len: int, align: i32 = 1, is_volatile: bool = false) ---;
llvm_memcpy :: proc "none" (dst, src: rawptr, len: int, align: i32 = 1, is_volatile: bool = false) ---;
} else {
@(link_name="llvm.memcpy.p0i8.p0i8.i32")
llvm_memcpy :: proc(dst, src: rawptr, len: int, align: i32 = 1, is_volatile: bool = false) ---;
llvm_memcpy :: proc "none" (dst, src: rawptr, len: int, align: i32 = 1, is_volatile: bool = false) ---;
}
}
}
@@ -180,9 +180,16 @@ mem_resize :: inline proc(ptr: rawptr, old_size, new_size: int, alignment: int =
}
return allocator.procedure(allocator.data, .Resize, new_size, alignment, ptr, old_size, 0, loc);
}
memory_equal :: proc "contextless" (a, b: rawptr, n: int) -> bool {
return memory_compare(a, b, n) == 0;
}
memory_compare :: proc "contextless" (a, b: rawptr, n: int) -> int #no_bounds_check {
switch {
case a == b: return 0;
case a == nil: return -1;
case b == nil: return +1;
}
x := uintptr(a);
y := uintptr(b);
n := uintptr(n);
@@ -389,45 +396,45 @@ string_decode_rune :: inline proc "contextless" (s: string) -> (rune, int) {
return rune(s0&MASK4)<<18 | rune(b1&MASKX)<<12 | rune(b2&MASKX)<<6 | rune(b3&MASKX), 4;
}
@(default_calling_convention = "c")
@(default_calling_convention = "none")
foreign {
@(link_name="llvm.sqrt.f32") _sqrt_f32 :: proc(x: f32) -> f32 ---
@(link_name="llvm.sqrt.f64") _sqrt_f64 :: proc(x: f64) -> f64 ---
}
abs_f32 :: inline proc "contextless" (x: f32) -> f32 {
foreign {
@(link_name="llvm.fabs.f32") _abs :: proc "c" (x: f32) -> f32 ---
@(link_name="llvm.fabs.f32") _abs :: proc "none" (x: f32) -> f32 ---
}
return _abs(x);
}
abs_f64 :: inline proc "contextless" (x: f64) -> f64 {
foreign {
@(link_name="llvm.fabs.f64") _abs :: proc "c" (x: f64) -> f64 ---
@(link_name="llvm.fabs.f64") _abs :: proc "none" (x: f64) -> f64 ---
}
return _abs(x);
}
min_f32 :: proc(a, b: f32) -> f32 {
foreign {
@(link_name="llvm.minnum.f32") _min :: proc "c" (a, b: f32) -> f32 ---
@(link_name="llvm.minnum.f32") _min :: proc "none" (a, b: f32) -> f32 ---
}
return _min(a, b);
}
min_f64 :: proc(a, b: f64) -> f64 {
foreign {
@(link_name="llvm.minnum.f64") _min :: proc "c" (a, b: f64) -> f64 ---
@(link_name="llvm.minnum.f64") _min :: proc "none" (a, b: f64) -> f64 ---
}
return _min(a, b);
}
max_f32 :: proc(a, b: f32) -> f32 {
foreign {
@(link_name="llvm.maxnum.f32") _max :: proc "c" (a, b: f32) -> f32 ---
@(link_name="llvm.maxnum.f32") _max :: proc "none" (a, b: f32) -> f32 ---
}
return _max(a, b);
}
max_f64 :: proc(a, b: f64) -> f64 {
foreign {
@(link_name="llvm.maxnum.f64") _max :: proc "c" (a, b: f64) -> f64 ---
@(link_name="llvm.maxnum.f64") _max :: proc "none" (a, b: f64) -> f64 ---
}
return _max(a, b);
}

View File

@@ -0,0 +1,135 @@
package runtime
@(link_name="__umodti3")
umodti3 :: proc "c" (a, b: u128) -> u128 {
r: u128 = ---;
_ = udivmod128(a, b, &r);
return r;
}
@(link_name="__udivmodti4")
udivmodti4 :: proc "c" (a, b: u128, rem: ^u128) -> u128 {
return udivmod128(a, b, rem);
}
@(link_name="__udivti3")
udivti3 :: proc "c" (a, b: u128) -> u128 {
return udivmodti4(a, b, nil);
}
@(link_name="__modti3")
modti3 :: proc "c" (a, b: i128) -> i128 {
s_a := a >> (128 - 1);
s_b := b >> (128 - 1);
an := (a ~ s_a) - s_a;
bn := (b ~ s_b) - s_b;
r: u128 = ---;
_ = udivmod128(transmute(u128)an, transmute(u128)bn, &r);
return (transmute(i128)r ~ s_a) - s_a;
}
@(link_name="__divmodti4")
divmodti4 :: proc "c" (a, b: i128, rem: ^i128) -> i128 {
u := udivmod128(transmute(u128)a, transmute(u128)b, cast(^u128)rem);
return transmute(i128)u;
}
@(link_name="__divti3")
divti3 :: proc "c" (a, b: i128) -> i128 {
u := udivmodti4(transmute(u128)a, transmute(u128)b, nil);
return transmute(i128)u;
}
@(link_name="__fixdfti")
fixdfti :: proc(a: u64) -> i128 {
significandBits :: 52;
typeWidth :: (size_of(u64)*8);
exponentBits :: (typeWidth - significandBits - 1);
maxExponent :: ((1 << exponentBits) - 1);
exponentBias :: (maxExponent >> 1);
implicitBit :: (u64(1) << significandBits);
significandMask :: (implicitBit - 1);
signBit :: (u64(1) << (significandBits + exponentBits));
absMask :: (signBit - 1);
exponentMask :: (absMask ~ significandMask);
// Break a into sign, exponent, significand
aRep := a;
aAbs := aRep & absMask;
sign := i128(-1 if aRep & signBit != 0 else 1);
exponent := u64((aAbs >> significandBits) - exponentBias);
significand := u64((aAbs & significandMask) | implicitBit);
// If exponent is negative, the result is zero.
if exponent < 0 {
return 0;
}
// If the value is too large for the integer type, saturate.
if exponent >= size_of(i128) * 8 {
return max(i128) if sign == 1 else min(i128);
}
// If 0 <= exponent < significandBits, right shift to get the result.
// Otherwise, shift left.
if exponent < significandBits {
return sign * i128(significand >> (significandBits - exponent));
} else {
return sign * (i128(significand) << (exponent - significandBits));
}
}
@(default_calling_convention = "none")
foreign {
@(link_name="llvm.ctlz.i128") _clz_i128 :: proc(x: i128, is_zero_undef := false) -> i128 ---
}
@(link_name="__floattidf")
floattidf :: proc(a: i128) -> f64 {
DBL_MANT_DIG :: 53;
if a == 0 {
return 0.0;
}
a := a;
N :: size_of(i128) * 8;
s := a >> (N-1);
a = (a ~ s) - s;
sd: = N - _clz_i128(a); // number of significant digits
e := u32(sd - 1); // exponent
if sd > DBL_MANT_DIG {
switch sd {
case DBL_MANT_DIG + 1:
a <<= 1;
case DBL_MANT_DIG + 2:
// okay
case:
a = i128(u128(a) >> u128(sd - (DBL_MANT_DIG+2))) |
i128(u128(a) & (~u128(0) >> u128(N + DBL_MANT_DIG+2 - sd)) != 0);
};
a |= i128((a & 4) != 0);
a += 1;
a >>= 2;
if a & (1 << DBL_MANT_DIG) != 0 {
a >>= 1;
e += 1;
}
} else {
a <<= u128(DBL_MANT_DIG - sd);
}
fb: [2]u32;
fb[1] = (u32(s) & 0x80000000) | // sign
((e + 1023) << 20) | // exponent
((u32(a) >> 32) & 0x000FFFFF); // mantissa-high
fb[1] = u32(a); // mantissa-low
return transmute(f64)fb;
}

View File

@@ -2,134 +2,134 @@ package runtime
@(link_name="__umodti3")
umodti3 :: proc "c" (a, b: u128) -> u128 {
r: u128 = ---;
_ = udivmod128(a, b, &r);
return r;
r: u128 = ---;
_ = udivmod128(a, b, &r);
return r;
}
@(link_name="__udivmodti4")
udivmodti4 :: proc "c" (a, b: u128, rem: ^u128) -> u128 {
return udivmod128(a, b, rem);
return udivmod128(a, b, rem);
}
@(link_name="__udivti3")
udivti3 :: proc "c" (a, b: u128) -> u128 {
return udivmodti4(a, b, nil);
return udivmodti4(a, b, nil);
}
@(link_name="__modti3")
modti3 :: proc "c" (a, b: i128) -> i128 {
s_a := a >> (128 - 1);
s_b := b >> (128 - 1);
an := (a ~ s_a) - s_a;
bn := (b ~ s_b) - s_b;
s_a := a >> (128 - 1);
s_b := b >> (128 - 1);
an := (a ~ s_a) - s_a;
bn := (b ~ s_b) - s_b;
r: u128 = ---;
_ = udivmod128(transmute(u128)an, transmute(u128)bn, &r);
return (transmute(i128)r ~ s_a) - s_a;
r: u128 = ---;
_ = udivmod128(transmute(u128)an, transmute(u128)bn, &r);
return (transmute(i128)r ~ s_a) - s_a;
}
@(link_name="__divmodti4")
divmodti4 :: proc "c" (a, b: i128, rem: ^i128) -> i128 {
u := udivmod128(transmute(u128)a, transmute(u128)b, cast(^u128)rem);
return transmute(i128)u;
u := udivmod128(transmute(u128)a, transmute(u128)b, cast(^u128)rem);
return transmute(i128)u;
}
@(link_name="__divti3")
divti3 :: proc "c" (a, b: i128) -> i128 {
u := udivmodti4(transmute(u128)a, transmute(u128)b, nil);
return transmute(i128)u;
u := udivmodti4(transmute(u128)a, transmute(u128)b, nil);
return transmute(i128)u;
}
@(link_name="__fixdfti")
fixdfti :: proc(a: u64) -> i128 {
significandBits :: 52;
typeWidth :: (size_of(u64)*8);
exponentBits :: (typeWidth - significandBits - 1);
maxExponent :: ((1 << exponentBits) - 1);
exponentBias :: (maxExponent >> 1);
significandBits :: 52;
typeWidth :: (size_of(u64)*8);
exponentBits :: (typeWidth - significandBits - 1);
maxExponent :: ((1 << exponentBits) - 1);
exponentBias :: (maxExponent >> 1);
implicitBit :: (u64(1) << significandBits);
significandMask :: (implicitBit - 1);
signBit :: (u64(1) << (significandBits + exponentBits));
absMask :: (signBit - 1);
exponentMask :: (absMask ~ significandMask);
implicitBit :: (u64(1) << significandBits);
significandMask :: (implicitBit - 1);
signBit :: (u64(1) << (significandBits + exponentBits));
absMask :: (signBit - 1);
exponentMask :: (absMask ~ significandMask);
// Break a into sign, exponent, significand
aRep := a;
aAbs := aRep & absMask;
sign := i128(-1 if aRep & signBit != 0 else 1);
exponent := u64((aAbs >> significandBits) - exponentBias);
significand := u64((aAbs & significandMask) | implicitBit);
// Break a into sign, exponent, significand
aRep := a;
aAbs := aRep & absMask;
sign := i128(-1 if aRep & signBit != 0 else 1);
exponent := u64((aAbs >> significandBits) - exponentBias);
significand := u64((aAbs & significandMask) | implicitBit);
// If exponent is negative, the result is zero.
if exponent < 0 {
return 0;
}
// If exponent is negative, the result is zero.
if exponent < 0 {
return 0;
}
// If the value is too large for the integer type, saturate.
if exponent >= size_of(i128) * 8 {
return max(i128) if sign == 1 else min(i128);
}
// If the value is too large for the integer type, saturate.
if exponent >= size_of(i128) * 8 {
return max(i128) if sign == 1 else min(i128);
}
// If 0 <= exponent < significandBits, right shift to get the result.
// Otherwise, shift left.
if exponent < significandBits {
return sign * i128(significand >> (significandBits - exponent));
} else {
return sign * (i128(significand) << (exponent - significandBits));
}
// If 0 <= exponent < significandBits, right shift to get the result.
// Otherwise, shift left.
if exponent < significandBits {
return sign * i128(significand >> (significandBits - exponent));
} else {
return sign * (i128(significand) << (exponent - significandBits));
}
}
@(default_calling_convention = "none")
foreign {
@(link_name="llvm.ctlz.i128") _clz_i128 :: proc(x: i128, is_zero_undef := false) -> i128 ---
@(link_name="llvm.ctlz.i128") _clz_i128 :: proc(x: i128, is_zero_undef := false) -> i128 ---
}
@(link_name="__floattidf")
floattidf :: proc(a: i128) -> f64 {
DBL_MANT_DIG :: 53;
if a == 0 {
return 0.0;
}
a := a;
N :: size_of(i128) * 8;
s := a >> (N-1);
a = (a ~ s) - s;
sd: = N - _clz_i128(a); // number of significant digits
e := u32(sd - 1); // exponent
if sd > DBL_MANT_DIG {
switch sd {
case DBL_MANT_DIG + 1:
a <<= 1;
case DBL_MANT_DIG + 2:
// okay
case:
a = i128(u128(a) >> u128(sd - (DBL_MANT_DIG+2))) |
i128(u128(a) & (~u128(0) >> u128(N + DBL_MANT_DIG+2 - sd)) != 0);
};
DBL_MANT_DIG :: 53;
if a == 0 {
return 0.0;
}
a := a;
N :: size_of(i128) * 8;
s := a >> (N-1);
a = (a ~ s) - s;
sd: = N - _clz_i128(a); // number of significant digits
e := u32(sd - 1); // exponent
if sd > DBL_MANT_DIG {
switch sd {
case DBL_MANT_DIG + 1:
a <<= 1;
case DBL_MANT_DIG + 2:
// okay
case:
a = i128(u128(a) >> u128(sd - (DBL_MANT_DIG+2))) |
i128(u128(a) & (~u128(0) >> u128(N + DBL_MANT_DIG+2 - sd)) != 0);
};
a |= i128((a & 4) != 0);
a += 1;
a >>= 2;
a |= i128((a & 4) != 0);
a += 1;
a >>= 2;
if a & (1 << DBL_MANT_DIG) != 0 {
a >>= 1;
e += 1;
}
} else {
a <<= u128(DBL_MANT_DIG - sd);
}
fb: [2]u32;
fb[1] = (u32(s) & 0x80000000) | // sign
((e + 1023) << 20) | // exponent
((u32(a) >> 32) & 0x000FFFFF); // mantissa-high
fb[1] = u32(a); // mantissa-low
return transmute(f64)fb;
if a & (1 << DBL_MANT_DIG) != 0 {
a >>= 1;
e += 1;
}
} else {
a <<= u128(DBL_MANT_DIG - sd);
}
fb: [2]u32;
fb[1] = (u32(s) & 0x80000000) | // sign
((e + 1023) << 20) | // exponent
((u32(a) >> 32) & 0x000FFFFF); // mantissa-high
fb[1] = u32(a); // mantissa-low
return transmute(f64)fb;
}

View File

@@ -350,7 +350,7 @@ print_type :: proc "contextless" (ti: ^Type_Info) {
print_byte(']');
case Type_Info_Opaque:
print_string("opaque ");
print_string("#opaque ");
print_type(info.elem);
case Type_Info_Simd_Vector:

View File

@@ -2,15 +2,14 @@ package runtime
foreign import kernel32 "system:Kernel32.lib"
windows_trap_array_bounds :: proc "contextless" () -> ! {
DWORD :: u32;
ULONG_PTR :: uint;
@(private)
foreign kernel32 {
RaiseException :: proc "stdcall" (dwExceptionCode, dwExceptionFlags, nNumberOfArguments: u32, lpArguments: ^uint) -> ! ---
}
windows_trap_array_bounds :: proc "contextless" () -> ! {
EXCEPTION_ARRAY_BOUNDS_EXCEEDED :: 0xC000008C;
foreign kernel32 {
RaiseException :: proc "stdcall" (dwExceptionCode, dwExceptionFlags, nNumberOfArguments: DWORD, lpArguments: ^ULONG_PTR) -> ! ---
}
RaiseException(EXCEPTION_ARRAY_BOUNDS_EXCEEDED, 0, 0, nil);
}

View File

@@ -216,7 +216,7 @@ split_last :: proc(array: $T/[]$E) -> (rest: T, last: E) {
first :: proc(array: $T/[]$E) -> E {
return array[0];
}
last :: proc(array: $T/[]$E) -> ^E {
last :: proc(array: $T/[]$E) -> E {
return array[len(array)-1];
}
@@ -252,3 +252,44 @@ get_ptr :: proc(array: $T/[]$E, index: int) -> (value: ^E, ok: bool) {
as_ptr :: proc(array: $T/[]$E) -> ^E {
return raw_data(array);
}
mapper :: proc(s: $S/[]$U, f: proc(U) -> $V, allocator := context.allocator) -> []V {
r := make([]V, len(s), allocator);
for v, i in s {
r[i] = f(v);
}
return r;
}
reduce :: proc(s: $S/[]$U, initializer: $V, f: proc(V, U) -> V) -> V {
r := initializer;
for v in s {
r = f(r, v);
}
return r;
}
filter :: proc(s: $S/[]$U, f: proc(U) -> bool, allocator := context.allocator) -> S {
r := make([dynamic]S, 0, 0, allocator);
for v in s {
if f(v) {
append(&r, v);
}
}
return r[:];
}
dot_product :: proc(a, b: $S/[]$T) -> T
where intrinsics.type_is_numeric(T) {
if len(a) != len(b) {
panic("slice.dot_product: slices of unequal length");
}
r: T;
#no_bounds_check for _, i in a {
r += a[i] * b[i];
}
return r;
}

View File

@@ -54,17 +54,17 @@ reverse_sort :: proc(data: $T/[]$E) where ORD(E) {
// TODO(bill): Should `sort_by_key` exist or is `sort_by` more than enough?
sort_by_key :: proc(data: $T/[]$E, key: proc(E) -> $K) where ORD(K) {
context.user_ptr = rawptr(key);
context._internal = rawptr(key);
sort_by(data, proc(i, j: E) -> bool {
k := (proc(E) -> K)(context.user_ptr);
k := (proc(E) -> K)(context._internal);
return k(i) < k(j);
});
}
reverse_sort_by_key :: proc(data: $T/[]$E, key: proc(E) -> $K) where ORD(K) {
context.user_ptr = rawptr(key);
context._internal = rawptr(key);
sort_by(data, proc(i, j: E) -> bool {
k := (proc(E) -> K)(context.user_ptr);
k := (proc(E) -> K)(context._internal);
return k(j) < k(i);
});
}

View File

@@ -3,15 +3,12 @@ package strings
import "core:mem"
import "core:unicode/utf8"
import "core:strconv"
import "core:io"
Builder_Flush_Proc :: #type proc(b: ^Builder) -> (do_reset: bool);
Builder :: struct {
buf: [dynamic]byte,
// The custom flush procedure allows for the ability to flush the buffer, i.e. write to file
flush_proc: Builder_Flush_Proc,
flush_data: rawptr,
}
make_builder_none :: proc(allocator := context.allocator) -> Builder {
@@ -32,6 +29,61 @@ make_builder :: proc{
make_builder_len_cap,
};
init_builder_none :: proc(b: ^Builder, allocator := context.allocator) {
b.buf = make([dynamic]byte, allocator);
}
init_builder_len :: proc(b: ^Builder, len: int, allocator := context.allocator) {
b.buf = make([dynamic]byte, len, allocator);
}
init_builder_len_cap :: proc(b: ^Builder, len, cap: int, allocator := context.allocator) {
b.buf = make([dynamic]byte, len, cap, allocator);
}
init_builder :: proc{
init_builder_none,
init_builder_len,
init_builder_len_cap,
};
@(private)
_builder_stream_vtable := &io.Stream_VTable{
impl_write = proc(s: io.Stream, p: []byte) -> (n: int, err: io.Error) {
b := (^Builder)(s.stream_data);
n = write_bytes(b, p);
if len(b.buf) == cap(b.buf) {
err = .EOF;
}
return;
},
impl_write_byte = proc(s: io.Stream, c: byte) -> io.Error {
b := (^Builder)(s.stream_data);
_ = write_byte(b, c);
if len(b.buf) == cap(b.buf) {
return .EOF;
}
return nil;
},
impl_size = proc(s: io.Stream) -> i64 {
b := (^Builder)(s.stream_data);
return i64(len(b.buf));
},
impl_destroy = proc(s: io.Stream) -> io.Error {
b := (^Builder)(s.stream_data);
delete(b.buf);
return .None;
},
};
to_stream :: proc(b: ^Builder) -> io.Stream {
return io.Stream{stream_vtable=_builder_stream_vtable, stream_data=b};
}
to_writer :: proc(b: ^Builder) -> io.Writer {
w, _ := io.to_writer(to_stream(b));
return w;
}
@@ -48,24 +100,6 @@ reset_builder :: proc(b: ^Builder) {
clear(&b.buf);
}
flush_builder :: proc(b: ^Builder) -> (was_reset: bool) {
if b.flush_proc != nil {
was_reset = b.flush_proc(b);
if was_reset {
reset_builder(b);
}
}
return;
}
flush_builder_check_space :: proc(b: ^Builder, required: int) -> (was_reset: bool) {
if n := max(cap(b.buf) - len(b.buf), 0); n < required {
was_reset = flush_builder(b);
}
return;
}
builder_from_slice :: proc(backing: []byte) -> Builder {
s := transmute(mem.Raw_Slice)backing;
@@ -94,7 +128,6 @@ builder_space :: proc(b: Builder) -> int {
}
write_byte :: proc(b: ^Builder, x: byte) -> (n: int) {
flush_builder_check_space(b, 1);
if builder_space(b^) > 0 {
append(&b.buf, x);
n += 1;
@@ -105,7 +138,6 @@ write_byte :: proc(b: ^Builder, x: byte) -> (n: int) {
write_bytes :: proc(b: ^Builder, x: []byte) -> (n: int) {
x := x;
for len(x) != 0 {
flush_builder_check_space(b, len(x));
space := builder_space(b^);
if space == 0 {
break; // No need to append
@@ -121,20 +153,56 @@ write_bytes :: proc(b: ^Builder, x: []byte) -> (n: int) {
return;
}
write_rune :: proc(b: ^Builder, r: rune) -> int {
if r < utf8.RUNE_SELF {
return write_byte(b, byte(r));
write_rune_builder :: proc(b: ^Builder, r: rune) -> (int, io.Error) {
return io.write_rune(to_writer(b), r);
}
write_quoted_rune_builder :: proc(b: ^Builder, r: rune) -> (n: int) {
return write_quoted_rune(to_writer(b), r);
}
@(private)
_write_byte :: proc(w: io.Writer, c: byte) -> int {
err := io.write_byte(w, c);
return 1 if err == nil else 0;
}
write_quoted_rune :: proc(w: io.Writer, r: rune) -> (n: int) {
quote := byte('\'');
n += _write_byte(w, quote);
buf, width := utf8.encode_rune(r);
if width == 1 && r == utf8.RUNE_ERROR {
n += _write_byte(w, '\\');
n += _write_byte(w, 'x');
n += _write_byte(w, DIGITS_LOWER[buf[0]>>4]);
n += _write_byte(w, DIGITS_LOWER[buf[0]&0xf]);
} else {
n += write_escaped_rune(w, r, quote);
}
s, n := utf8.encode_rune(r);
write_bytes(b, s[:n]);
return n;
n += _write_byte(w, quote);
return;
}
write_string :: proc(b: ^Builder, s: string) -> (n: int) {
return write_bytes(b, transmute([]byte)s);
write_string :: proc{
write_string_builder,
write_string_writer,
};
write_string_builder :: proc(b: ^Builder, s: string) -> (n: int) {
return write_string_writer(to_writer(b), s);
}
write_string_writer :: proc(w: io.Writer, s: string) -> (n: int) {
n, _ = io.write(w, transmute([]byte)s);
return;
}
pop_byte :: proc(b: ^Builder) -> (r: byte) {
if len(b.buf) == 0 {
return 0;
@@ -156,8 +224,17 @@ pop_rune :: proc(b: ^Builder) -> (r: rune, width: int) {
@(private, static)
DIGITS_LOWER := "0123456789abcdefx";
write_quoted_string :: proc(b: ^Builder, str: string, quote: byte = '"') -> (n: int) {
n += write_byte(b, quote);
write_quoted_string :: proc{
write_quoted_string_builder,
write_quoted_string_writer,
};
write_quoted_string_builder :: proc(b: ^Builder, str: string, quote: byte = '"') -> (n: int) {
return write_quoted_string_writer(to_writer(b), str, quote);
}
write_quoted_string_writer :: proc(w: io.Writer, str: string, quote: byte = '"') -> (n: int) {
n += _write_byte(w, quote);
for width, s := 0, str; len(s) > 0; s = s[width:] {
r := rune(s[0]);
width = 1;
@@ -165,57 +242,75 @@ write_quoted_string :: proc(b: ^Builder, str: string, quote: byte = '"') -> (n:
r, width = utf8.decode_rune_in_string(s);
}
if width == 1 && r == utf8.RUNE_ERROR {
n += write_byte(b, '\\');
n += write_byte(b, 'x');
n += write_byte(b, DIGITS_LOWER[s[0]>>4]);
n += write_byte(b, DIGITS_LOWER[s[0]&0xf]);
n += _write_byte(w, '\\');
n += _write_byte(w, 'x');
n += _write_byte(w, DIGITS_LOWER[s[0]>>4]);
n += _write_byte(w, DIGITS_LOWER[s[0]&0xf]);
continue;
}
n += write_escaped_rune(b, r, quote);
n += write_escaped_rune(w, r, quote);
}
n += write_byte(b, quote);
n += _write_byte(w, quote);
return;
}
write_encoded_rune :: proc{
write_encoded_rune_builder,
write_encoded_rune_writer,
};
write_encoded_rune :: proc(b: ^Builder, r: rune, write_quote := true) -> (n: int) {
write_encoded_rune_builder :: proc(b: ^Builder, r: rune, write_quote := true) -> (n: int) {
return write_encoded_rune_writer(to_writer(b), r, write_quote);
}
write_encoded_rune_writer :: proc(w: io.Writer, r: rune, write_quote := true) -> (n: int) {
if write_quote {
n += write_byte(b, '\'');
n += _write_byte(w, '\'');
}
switch r {
case '\a': n += write_string(b, `\a"`);
case '\b': n += write_string(b, `\b"`);
case '\e': n += write_string(b, `\e"`);
case '\f': n += write_string(b, `\f"`);
case '\n': n += write_string(b, `\n"`);
case '\r': n += write_string(b, `\r"`);
case '\t': n += write_string(b, `\t"`);
case '\v': n += write_string(b, `\v"`);
case '\a': n += write_string(w, `\a"`);
case '\b': n += write_string(w, `\b"`);
case '\e': n += write_string(w, `\e"`);
case '\f': n += write_string(w, `\f"`);
case '\n': n += write_string(w, `\n"`);
case '\r': n += write_string(w, `\r"`);
case '\t': n += write_string(w, `\t"`);
case '\v': n += write_string(w, `\v"`);
case:
if r < 32 {
n += write_string(b, `\x`);
n += write_string(w, `\x`);
buf: [2]byte;
s := strconv.append_bits(buf[:], u64(r), 16, true, 64, strconv.digits, nil);
switch len(s) {
case 0: n += write_string(b, "00");
case 1: n += write_byte(b, '0');
case 2: n += write_string(b, s);
case 0: n += write_string(w, "00");
case 1: n += _write_byte(w, '0');
case 2: n += write_string(w, s);
}
} else {
n += write_rune(b, r);
rn, _ := io.write_rune(w, r);
n += rn;
}
}
if write_quote {
n += write_byte(b, '\'');
n += _write_byte(w, '\'');
}
return;
}
write_escaped_rune :: proc(b: ^Builder, r: rune, quote: byte, html_safe := false) -> (n: int) {
write_escaped_rune :: proc{
write_escaped_rune_builder,
write_escaped_rune_writer,
};
write_escaped_rune_builder :: proc(b: ^Builder, r: rune, quote: byte, html_safe := false) -> (n: int) {
return write_escaped_rune_writer(to_writer(b), r, quote, html_safe);
}
write_escaped_rune_writer :: proc(w: io.Writer, r: rune, quote: byte, html_safe := false) -> (n: int) {
is_printable :: proc(r: rune) -> bool {
if r <= 0xff {
switch r {
@@ -233,54 +328,54 @@ write_escaped_rune :: proc(b: ^Builder, r: rune, quote: byte, html_safe := false
if html_safe {
switch r {
case '<', '>', '&':
n += write_byte(b, '\\');
n += write_byte(b, 'u');
n += _write_byte(w, '\\');
n += _write_byte(w, 'u');
for s := 12; s >= 0; s -= 4 {
n += write_byte(b, DIGITS_LOWER[r>>uint(s) & 0xf]);
n += _write_byte(w, DIGITS_LOWER[r>>uint(s) & 0xf]);
}
return;
}
}
if r == rune(quote) || r == '\\' {
n += write_byte(b, '\\');
n += write_byte(b, byte(r));
n += _write_byte(w, '\\');
n += _write_byte(w, byte(r));
return;
} else if is_printable(r) {
n += write_encoded_rune(b, r, false);
n += write_encoded_rune(w, r, false);
return;
}
switch r {
case '\a': n += write_string(b, `\a`);
case '\b': n += write_string(b, `\b`);
case '\e': n += write_string(b, `\e`);
case '\f': n += write_string(b, `\f`);
case '\n': n += write_string(b, `\n`);
case '\r': n += write_string(b, `\r`);
case '\t': n += write_string(b, `\t`);
case '\v': n += write_string(b, `\v`);
case '\a': n += write_string(w, `\a`);
case '\b': n += write_string(w, `\b`);
case '\e': n += write_string(w, `\e`);
case '\f': n += write_string(w, `\f`);
case '\n': n += write_string(w, `\n`);
case '\r': n += write_string(w, `\r`);
case '\t': n += write_string(w, `\t`);
case '\v': n += write_string(w, `\v`);
case:
switch c := r; {
case c < ' ':
n += write_byte(b, '\\');
n += write_byte(b, 'x');
n += write_byte(b, DIGITS_LOWER[byte(c)>>4]);
n += write_byte(b, DIGITS_LOWER[byte(c)&0xf]);
n += _write_byte(w, '\\');
n += _write_byte(w, 'x');
n += _write_byte(w, DIGITS_LOWER[byte(c)>>4]);
n += _write_byte(w, DIGITS_LOWER[byte(c)&0xf]);
case c > utf8.MAX_RUNE:
c = 0xfffd;
fallthrough;
case c < 0x10000:
n += write_byte(b, '\\');
n += write_byte(b, 'u');
n += _write_byte(w, '\\');
n += _write_byte(w, 'u');
for s := 12; s >= 0; s -= 4 {
n += write_byte(b, DIGITS_LOWER[c>>uint(s) & 0xf]);
n += _write_byte(w, DIGITS_LOWER[c>>uint(s) & 0xf]);
}
case:
n += write_byte(b, '\\');
n += write_byte(b, 'U');
n += _write_byte(w, '\\');
n += _write_byte(w, 'U');
for s := 28; s >= 0; s -= 4 {
n += write_byte(b, DIGITS_LOWER[c>>uint(s) & 0xf]);
n += _write_byte(w, DIGITS_LOWER[c>>uint(s) & 0xf]);
}
}
}

View File

@@ -0,0 +1,269 @@
package strings
import "core:io"
import "core:unicode"
import "core:unicode/utf8"
to_valid_utf8 :: proc(s, replacement: string, allocator := context.allocator) -> string {
if len(s) == 0 {
return "";
}
b: Builder;
init_builder(&b, 0, 0, allocator);
s := s;
for c, i in s {
if c != utf8.RUNE_ERROR {
continue;
}
_, w := utf8.decode_rune_in_string(s[i:]);
if w == 1 {
grow_builder(&b, len(s) + len(replacement));
write_string(&b, s[:i]);
s = s[i:];
break;
}
}
if builder_cap(b) == 0 {
return clone(s, allocator);
}
invalid := false;
for i := 0; i < len(s); /**/ {
c := s[i];
if c < utf8.RUNE_SELF {
i += 1;
invalid = false;
write_byte(&b, c);
continue;
}
_, w := utf8.decode_rune_in_string(s[i:]);
if w == 1 {
i += 1;
if !invalid {
invalid = true;
write_string(&b, replacement);
}
continue;
}
invalid = false;
write_string(&b, s[i:][:w]);
i += w;
}
return to_string(b);
}
to_lower :: proc(s: string, allocator := context.allocator) -> string {
b: Builder;
init_builder(&b, 0, len(s), allocator);
for r in s {
write_rune_builder(&b, unicode.to_lower(r));
}
return to_string(b);
}
to_upper :: proc(s: string, allocator := context.allocator) -> string {
b: Builder;
init_builder(&b, 0, len(s), allocator);
for r in s {
write_rune_builder(&b, unicode.to_upper(r));
}
return to_string(b);
}
is_delimiter :: proc(c: rune) -> bool {
return c == '-' || c == '_' || is_space(c);
}
is_separator :: proc(r: rune) -> bool {
if r <= 0x7f {
switch r {
case '0'..'9': return false;
case 'a'..'z': return false;
case 'A'..'Z': return false;
case '_': return false;
}
return true;
}
// TODO(bill): unicode categories
// if unicode.is_letter(r) || unicode.is_digit(r) {
// return false;
// }
return unicode.is_space(r);
}
string_case_iterator :: proc(w: io.Writer, s: string, callback: proc(w: io.Writer, prev, curr, next: rune)) {
prev, curr: rune;
for next in s {
if curr == 0 {
prev = curr;
curr = next;
continue;
}
callback(w, prev, curr, next);
prev = curr;
curr = next;
}
if len(s) > 0 {
callback(w, prev, curr, 0);
}
}
to_lower_camel_case :: to_camel_case;
to_camel_case :: proc(s: string, allocator := context.allocator) -> string {
s := s;
s = trim_space(s);
b: Builder;
init_builder(&b, 0, len(s), allocator);
w := to_writer(&b);
string_case_iterator(w, s, proc(w: io.Writer, prev, curr, next: rune) {
if !is_delimiter(curr) {
if is_delimiter(prev) {
io.write_rune(w, unicode.to_upper(curr));
} else if unicode.is_lower(prev) {
io.write_rune(w, curr);
} else {
io.write_rune(w, unicode.to_lower(curr));
}
}
});
return to_string(b);
}
to_upper_camel_case :: to_pascal_case;
to_pascal_case :: proc(s: string, allocator := context.allocator) -> string {
s := s;
s = trim_space(s);
b: Builder;
init_builder(&b, 0, len(s), allocator);
w := to_writer(&b);
string_case_iterator(w, s, proc(w: io.Writer, prev, curr, next: rune) {
if !is_delimiter(curr) {
if is_delimiter(prev) || prev == 0 {
io.write_rune(w, unicode.to_upper(curr));
} else if unicode.is_lower(prev) {
io.write_rune(w, curr);
} else {
io.write_rune(w, unicode.to_lower(curr));
}
}
});
return to_string(b);
}
to_delimiter_case :: proc(s: string, delimiter: rune, all_upper_case: bool, allocator := context.allocator) -> string {
s := s;
s = trim_space(s);
b: Builder;
init_builder(&b, 0, len(s), allocator);
w := to_writer(&b);
adjust_case := unicode.to_upper if all_upper_case else unicode.to_lower;
prev, curr: rune;
for next in s {
if is_delimiter(curr) {
if !is_delimiter(prev) {
io.write_rune(w, delimiter);
}
} else if unicode.is_upper(curr) {
if unicode.is_lower(prev) || (unicode.is_upper(prev) && unicode.is_lower(next)) {
io.write_rune(w, delimiter);
}
io.write_rune(w, adjust_case(curr));
} else if curr != 0 {
io.write_rune(w, adjust_case(curr));
}
prev = curr;
curr = next;
}
if len(s) > 0 {
if unicode.is_upper(curr) && unicode.is_lower(prev) && prev != 0 {
io.write_rune(w, delimiter);
}
io.write_rune(w, adjust_case(curr));
}
return to_string(b);
}
to_snake_case :: proc(s: string, allocator := context.allocator) -> string {
return to_delimiter_case(s, '_', false, allocator);
}
to_screaming_snake_case :: to_upper_snake_case;
to_upper_snake_case :: proc(s: string, allocator := context.allocator) -> string {
return to_delimiter_case(s, '_', true, allocator);
}
to_kebab_case :: proc(s: string, allocator := context.allocator) -> string {
return to_delimiter_case(s, '-', false, allocator);
}
to_upper_case :: proc(s: string, allocator := context.allocator) -> string {
return to_delimiter_case(s, '-', true, allocator);
}
to_ada_case :: proc(s: string, allocator := context.allocator) -> string {
delimiter :: '_';
s := s;
s = trim_space(s);
b: Builder;
init_builder(&b, 0, len(s), allocator);
w := to_writer(&b);
prev, curr: rune;
for next in s {
if is_delimiter(curr) {
if !is_delimiter(prev) {
io.write_rune(w, delimiter);
}
} else if unicode.is_upper(curr) {
if unicode.is_lower(prev) || (unicode.is_upper(prev) && unicode.is_lower(next)) {
io.write_rune(w, delimiter);
}
io.write_rune(w, unicode.to_upper(curr));
} else if curr != 0 {
io.write_rune(w, unicode.to_lower(curr));
}
prev = curr;
curr = next;
}
if len(s) > 0 {
if unicode.is_upper(curr) && unicode.is_lower(prev) && prev != 0 {
io.write_rune(w, delimiter);
io.write_rune(w, unicode.to_upper(curr));
} else {
io.write_rune(w, unicode.to_lower(curr));
}
}
return to_string(b);
}

177
core/strings/reader.odin Normal file
View File

@@ -0,0 +1,177 @@
package strings
import "core:io"
import "core:unicode/utf8"
Reader :: struct {
s: string, // read-only buffer
i: i64, // current reading index
prev_rune: int, // previous reading index of rune or < 0
}
reader_init :: proc(r: ^Reader, s: string) {
r.s = s;
r.i = 0;
r.prev_rune = -1;
}
reader_to_stream :: proc(r: ^Reader) -> (s: io.Stream) {
s.stream_data = r;
s.stream_vtable = _reader_vtable;
return;
}
reader_length :: proc(r: ^Reader) -> int {
if r.i >= i64(len(r.s)) {
return 0;
}
return int(i64(len(r.s)) - r.i);
}
reader_size :: proc(r: ^Reader) -> i64 {
return i64(len(r.s));
}
reader_read :: proc(r: ^Reader, p: []byte) -> (n: int, err: io.Error) {
if r.i >= i64(len(r.s)) {
return 0, .EOF;
}
r.prev_rune = -1;
n = copy(p, r.s[r.i:]);
r.i += i64(n);
return;
}
reader_read_at :: proc(r: ^Reader, p: []byte, off: i64) -> (n: int, err: io.Error) {
if off < 0 {
return 0, .Invalid_Offset;
}
if off >= i64(len(r.s)) {
return 0, .EOF;
}
n = copy(p, r.s[off:]);
if n < len(p) {
err = .EOF;
}
return;
}
reader_read_byte :: proc(r: ^Reader) -> (byte, io.Error) {
r.prev_rune = -1;
if r.i >= i64(len(r.s)) {
return 0, .EOF;
}
b := r.s[r.i];
r.i += 1;
return b, nil;
}
reader_unread_byte :: proc(r: ^Reader) -> io.Error {
if r.i <= 0 {
return .Invalid_Unread;
}
r.prev_rune = -1;
r.i -= 1;
return nil;
}
reader_read_rune :: proc(r: ^Reader) -> (ch: rune, size: int, err: io.Error) {
if r.i >= i64(len(r.s)) {
r.prev_rune = -1;
return 0, 0, .EOF;
}
r.prev_rune = int(r.i);
if c := r.s[r.i]; c < utf8.RUNE_SELF {
r.i += 1;
return rune(c), 1, nil;
}
ch, size = utf8.decode_rune_in_string(r.s[r.i:]);
r.i += i64(size);
return;
}
reader_unread_rune :: proc(r: ^Reader) -> io.Error {
if r.i <= 0 {
return .Invalid_Unread;
}
if r.prev_rune < 0 {
return .Invalid_Unread;
}
r.i = i64(r.prev_rune);
r.prev_rune = -1;
return nil;
}
reader_seek :: proc(r: ^Reader, offset: i64, whence: io.Seek_From) -> (i64, io.Error) {
r.prev_rune = -1;
abs: i64;
switch whence {
case .Start:
abs = offset;
case .Current:
abs = r.i + offset;
case .End:
abs = i64(len(r.s)) + offset;
case:
return 0, .Invalid_Whence;
}
if abs < 0 {
return 0, .Invalid_Offset;
}
r.i = abs;
return abs, nil;
}
reader_write_to :: proc(r: ^Reader, w: io.Writer) -> (n: i64, err: io.Error) {
r.prev_rune = -1;
if r.i >= i64(len(r.s)) {
return 0, nil;
}
s := r.s[r.i:];
m: int;
m, err = io.write_string(w, s);
if m > len(s) {
panic("bytes.Reader.write_to: invalid io.write_string count");
}
r.i += i64(m);
n = i64(m);
if m != len(s) && err == nil {
err = .Short_Write;
}
return;
}
@(private)
_reader_vtable := &io.Stream_VTable{
impl_size = proc(s: io.Stream) -> i64 {
r := (^Reader)(s.stream_data);
return reader_size(r);
},
impl_read = proc(s: io.Stream, p: []byte) -> (n: int, err: io.Error) {
r := (^Reader)(s.stream_data);
return reader_read(r, p);
},
impl_read_at = proc(s: io.Stream, p: []byte, off: i64) -> (n: int, err: io.Error) {
r := (^Reader)(s.stream_data);
return reader_read_at(r, p, off);
},
impl_read_byte = proc(s: io.Stream) -> (byte, io.Error) {
r := (^Reader)(s.stream_data);
return reader_read_byte(r);
},
impl_unread_byte = proc(s: io.Stream) -> io.Error {
r := (^Reader)(s.stream_data);
return reader_unread_byte(r);
},
impl_read_rune = proc(s: io.Stream) -> (ch: rune, size: int, err: io.Error) {
r := (^Reader)(s.stream_data);
return reader_read_rune(r);
},
impl_unread_rune = proc(s: io.Stream) -> io.Error {
r := (^Reader)(s.stream_data);
return reader_unread_rune(r);
},
impl_seek = proc(s: io.Stream, offset: i64, whence: io.Seek_From) -> (i64, io.Error) {
r := (^Reader)(s.stream_data);
return reader_seek(r, offset, whence);
},
impl_write_to = proc(s: io.Stream, w: io.Writer) -> (n: i64, err: io.Error) {
r := (^Reader)(s.stream_data);
return reader_write_to(r, w);
},
};

View File

@@ -1,5 +1,6 @@
package strings
import "core:io"
import "core:mem"
import "core:unicode"
import "core:unicode/utf8"
@@ -225,7 +226,7 @@ index_byte :: proc(s: string, c: byte) -> int {
return -1;
}
// Returns i1 if c is not present
// Returns -1 if c is not present
last_index_byte :: proc(s: string, c: byte) -> int {
for i := len(s)-1; i >= 0; i -= 1 {
if s[i] == c {
@@ -467,10 +468,12 @@ replace :: proc(s, old, new: string, n: int, allocator := context.allocator) ->
return;
}
@(private) _ascii_space := [256]u8{'\t' = 1, '\n' = 1, '\v' = 1, '\f' = 1, '\r' = 1, ' ' = 1};
is_ascii_space :: proc(r: rune) -> bool {
switch r {
case '\t', '\n', '\v', '\f', '\r', ' ':
return true;
if r < utf8.RUNE_SELF {
return _ascii_space[u8(r)] != 0;
}
return false;
}
@@ -757,7 +760,8 @@ split_multi :: proc(s: string, substrs: []string, skip_empty := false, allocator
// Adjacent invalid bytes are only replaced once
scrub :: proc(s: string, replacement: string, allocator := context.allocator) -> string {
str := s;
b := make_builder(0, len(str), allocator);
b: Builder;
init_builder(&b, 0, len(s), allocator);
has_error := false;
cursor := 0;
@@ -787,207 +791,6 @@ scrub :: proc(s: string, replacement: string, allocator := context.allocator) ->
}
to_lower :: proc(s: string, allocator := context.allocator) -> string {
b := make_builder(0, len(s), allocator);
for r in s {
write_rune(&b, unicode.to_lower(r));
}
return to_string(b);
}
to_upper :: proc(s: string, allocator := context.allocator) -> string {
b := make_builder(0, len(s), allocator);
for r in s {
write_rune(&b, unicode.to_upper(r));
}
return to_string(b);
}
is_delimiter :: proc(c: rune) -> bool {
return c == '-' || c == '_' || is_space(c);
}
is_separator :: proc(r: rune) -> bool {
if r <= 0x7f {
switch r {
case '0'..'9': return false;
case 'a'..'z': return false;
case 'A'..'Z': return false;
case '_': return false;
}
return true;
}
// TODO(bill): unicode categories
// if unicode.is_letter(r) || unicode.is_digit(r) {
// return false;
// }
return unicode.is_space(r);
}
string_case_iterator :: proc(b: ^Builder, s: string, callback: proc(b: ^Builder, prev, curr, next: rune)) {
prev, curr: rune;
for next in s {
if curr == 0 {
prev = curr;
curr = next;
continue;
}
callback(b, prev, curr, next);
prev = curr;
curr = next;
}
if len(s) > 0 {
callback(b, prev, curr, 0);
}
}
to_lower_camel_case :: to_camel_case;
to_camel_case :: proc(s: string, allocator := context.allocator) -> string {
s := s;
s = trim_space(s);
b := make_builder(0, len(s), allocator);
string_case_iterator(&b, s, proc(b: ^Builder, prev, curr, next: rune) {
if !is_delimiter(curr) {
if is_delimiter(prev) {
write_rune(b, unicode.to_upper(curr));
} else if unicode.is_lower(prev) {
write_rune(b, curr);
} else {
write_rune(b, unicode.to_lower(curr));
}
}
});
return to_string(b);
}
to_upper_camel_case :: to_pascal_case;
to_pascal_case :: proc(s: string, allocator := context.allocator) -> string {
s := s;
s = trim_space(s);
b := make_builder(0, len(s), allocator);
string_case_iterator(&b, s, proc(b: ^Builder, prev, curr, next: rune) {
if !is_delimiter(curr) {
if is_delimiter(prev) || prev == 0 {
write_rune(b, unicode.to_upper(curr));
} else if unicode.is_lower(prev) {
write_rune(b, curr);
} else {
write_rune(b, unicode.to_lower(curr));
}
}
});
return to_string(b);
}
to_delimiter_case :: proc(s: string, delimiter: rune, all_upper_case: bool, allocator := context.allocator) -> string {
s := s;
s = trim_space(s);
b := make_builder(0, len(s), allocator);
adjust_case := unicode.to_upper if all_upper_case else unicode.to_lower;
prev, curr: rune;
for next in s {
if is_delimiter(curr) {
if !is_delimiter(prev) {
write_rune(&b, delimiter);
}
} else if unicode.is_upper(curr) {
if unicode.is_lower(prev) || (unicode.is_upper(prev) && unicode.is_lower(next)) {
write_rune(&b, delimiter);
}
write_rune(&b, adjust_case(curr));
} else if curr != 0 {
write_rune(&b, adjust_case(curr));
}
prev = curr;
curr = next;
}
if len(s) > 0 {
if unicode.is_upper(curr) && unicode.is_lower(prev) && prev != 0 {
write_rune(&b, delimiter);
}
write_rune(&b, adjust_case(curr));
}
return to_string(b);
}
to_snake_case :: proc(s: string, allocator := context.allocator) -> string {
return to_delimiter_case(s, '_', false, allocator);
}
to_screaming_snake_case :: to_upper_snake_case;
to_upper_snake_case :: proc(s: string, allocator := context.allocator) -> string {
return to_delimiter_case(s, '_', true, allocator);
}
to_kebab_case :: proc(s: string, allocator := context.allocator) -> string {
return to_delimiter_case(s, '-', false, allocator);
}
to_upper_case :: proc(s: string, allocator := context.allocator) -> string {
return to_delimiter_case(s, '-', true, allocator);
}
to_ada_case :: proc(s: string, allocator := context.allocator) -> string {
delimiter :: '_';
s := s;
s = trim_space(s);
b := make_builder(0, len(s), allocator);
prev, curr: rune;
for next in s {
if is_delimiter(curr) {
if !is_delimiter(prev) {
write_rune(&b, delimiter);
}
} else if unicode.is_upper(curr) {
if unicode.is_lower(prev) || (unicode.is_upper(prev) && unicode.is_lower(next)) {
write_rune(&b, delimiter);
}
write_rune(&b, unicode.to_upper(curr));
} else if curr != 0 {
write_rune(&b, unicode.to_lower(curr));
}
prev = curr;
curr = next;
}
if len(s) > 0 {
if unicode.is_upper(curr) && unicode.is_lower(prev) && prev != 0 {
write_rune(&b, delimiter);
write_rune(&b, unicode.to_upper(curr));
} else {
write_rune(&b, unicode.to_lower(curr));
}
}
return to_string(b);
}
reverse :: proc(s: string, allocator := context.allocator) -> string {
str := s;
n := len(str);
@@ -1013,7 +816,9 @@ expand_tabs :: proc(s: string, tab_size: int, allocator := context.allocator) ->
return "";
}
b := make_builder(allocator);
b: Builder;
init_builder(&b, allocator);
writer := to_writer(&b);
str := s;
column: int;
@@ -1024,7 +829,7 @@ expand_tabs :: proc(s: string, tab_size: int, allocator := context.allocator) ->
expand := tab_size - column%tab_size;
for i := 0; i < expand; i += 1 {
write_byte(&b, ' ');
io.write_byte(writer, ' ');
}
column += expand;
@@ -1035,7 +840,7 @@ expand_tabs :: proc(s: string, tab_size: int, allocator := context.allocator) ->
column += w;
}
write_rune(&b, r);
io.write_rune(writer, r);
}
str = str[w:];
@@ -1070,12 +875,15 @@ centre_justify :: proc(str: string, length: int, pad: string, allocator := conte
remains := length-1;
pad_len := rune_count(pad);
b := make_builder(allocator);
b: Builder;
init_builder(&b, allocator);
grow_builder(&b, len(str) + (remains/pad_len + 1)*len(pad));
write_pad_string(&b, pad, pad_len, remains/2);
write_string(&b, str);
write_pad_string(&b, pad, pad_len, (remains+1)/2);
w := to_writer(&b);
write_pad_string(w, pad, pad_len, remains/2);
io.write_string(w, str);
write_pad_string(w, pad, pad_len, (remains+1)/2);
return to_string(b);
}
@@ -1090,11 +898,14 @@ left_justify :: proc(str: string, length: int, pad: string, allocator := context
remains := length-1;
pad_len := rune_count(pad);
b := make_builder(allocator);
b: Builder;
init_builder(&b, allocator);
grow_builder(&b, len(str) + (remains/pad_len + 1)*len(pad));
write_string(&b, str);
write_pad_string(&b, pad, pad_len, remains);
w := to_writer(&b);
io.write_string(w, str);
write_pad_string(w, pad, pad_len, remains);
return to_string(b);
}
@@ -1109,86 +920,121 @@ right_justify :: proc(str: string, length: int, pad: string, allocator := contex
remains := length-1;
pad_len := rune_count(pad);
b := make_builder(allocator);
b: Builder;
init_builder(&b, allocator);
grow_builder(&b, len(str) + (remains/pad_len + 1)*len(pad));
write_pad_string(&b, pad, pad_len, remains);
write_string(&b, str);
w := to_writer(&b);
write_pad_string(w, pad, pad_len, remains);
io.write_string(w, str);
return to_string(b);
}
to_valid_utf8 :: proc(s, replacement: string, allocator := context.allocator) -> string {
if len(s) == 0 {
return "";
}
b := make_builder_len_cap(0, 0, allocator);
s := s;
for c, i in s {
if c != utf8.RUNE_ERROR {
continue;
}
_, w := utf8.decode_rune_in_string(s[i:]);
if w == 1 {
grow_builder(&b, len(s) + len(replacement));
write_string(&b, s[:i]);
s = s[i:];
break;
}
}
if builder_cap(b) == 0 {
return clone(s, allocator);
}
invalid := false;
for i := 0; i < len(s); /**/ {
c := s[i];
if c < utf8.RUNE_SELF {
i += 1;
invalid = false;
write_byte(&b, c);
continue;
}
_, w := utf8.decode_rune_in_string(s[i:]);
if w == 1 {
i += 1;
if !invalid {
invalid = true;
write_string(&b, replacement);
}
continue;
}
invalid = false;
write_string(&b, s[i:][:w]);
i += w;
}
return to_string(b);
}
@private
write_pad_string :: proc(b: ^Builder, pad: string, pad_len, remains: int) {
write_pad_string :: proc(w: io.Writer, pad: string, pad_len, remains: int) {
repeats := remains / pad_len;
for i := 0; i < repeats; i += 1 {
write_string(b, pad);
io.write_string(w, pad);
}
n := remains % pad_len;
p := pad;
for i := 0; i < n; i += 1 {
r, w := utf8.decode_rune_in_string(p);
write_rune(b, r);
p = p[w:];
r, width := utf8.decode_rune_in_string(p);
io.write_rune(w, r);
p = p[width:];
}
}
// fields splits the string s around each instance of one or more consecutive white space character, defined by unicode.is_space
// returning a slice of substrings of s or an empty slice if s only contains white space
fields :: proc(s: string, allocator := context.allocator) -> []string #no_bounds_check {
n := 0;
was_space := 1;
set_bits := u8(0);
// check to see
for i in 0..<len(s) {
r := s[i];
set_bits |= r;
is_space := int(_ascii_space[r]);
n += was_space & ~is_space;
was_space = is_space;
}
if set_bits >= utf8.RUNE_SELF {
return fields_proc(s, unicode.is_space, allocator);
}
if n == 0 {
return nil;
}
a := make([]string, n, allocator);
na := 0;
field_start := 0;
i := 0;
for i < len(s) && _ascii_space[s[i]] != 0 {
i += 1;
}
field_start = i;
for i < len(s) {
if _ascii_space[s[i]] == 0 {
i += 1;
continue;
}
a[na] = s[field_start : i];
na += 1;
i += 1;
for i < len(s) && _ascii_space[s[i]] != 0 {
i += 1;
}
field_start = i;
}
if field_start < len(s) {
a[na] = s[field_start:];
}
return a;
}
// fields_proc splits the string s at each run of unicode code points `ch` satisfying f(ch)
// returns a slice of substrings of s
// If all code points in s satisfy f(ch) or string is empty, an empty slice is returned
//
// fields_proc makes no guarantee about the order in which it calls f(ch)
// it assumes that `f` always returns the same value for a given ch
fields_proc :: proc(s: string, f: proc(rune) -> bool, allocator := context.allocator) -> []string #no_bounds_check {
substrings := make([dynamic]string, 0, 32, allocator);
start, end := -1, -1;
for r, offset in s {
end = offset;
if f(r) {
if start >= 0 {
append(&substrings, s[start : end]);
// -1 could be used, but just speed it up through bitwise not
// gotta love 2's complement
start = ~start;
}
} else {
if start < 0 {
start = end;
}
}
}
if start >= 0 {
append(&substrings, s[start : end]);
}
return substrings[:];
}

View File

@@ -2,60 +2,89 @@ package sync
import "core:mem"
import "core:time"
import "core:intrinsics"
import "intrinsics"
import "core:math/rand"
_, _ :: time, rand;
Channel_Direction :: enum i8 {
Both = 0,
Send = +1,
Recv = -1,
}
Channel :: struct(T: typeid) {
Channel :: struct(T: typeid, Direction := Channel_Direction.Both) {
using _internal: ^Raw_Channel,
}
channel_init :: proc(ch: ^$C/Channel($T), cap := 0, allocator := context.allocator) {
channel_init :: proc(ch: ^$C/Channel($T, $D), cap := 0, allocator := context.allocator) {
context.allocator = allocator;
ch._internal = raw_channel_create(size_of(T), align_of(T), cap);
return;
}
channel_make :: proc($T: typeid, cap := 0, allocator := context.allocator) -> (ch: Channel(T)) {
channel_make :: proc($T: typeid, cap := 0, allocator := context.allocator) -> (ch: Channel(T, .Both)) {
context.allocator = allocator;
ch._internal = raw_channel_create(size_of(T), align_of(T), cap);
return;
}
channel_destroy :: proc(ch: $C/Channel($T)) {
channel_make_send :: proc($T: typeid, cap := 0, allocator := context.allocator) -> (ch: Channel(T, .Send)) {
context.allocator = allocator;
ch._internal = raw_channel_create(size_of(T), align_of(T), cap);
return;
}
channel_make_recv :: proc($T: typeid, cap := 0, allocator := context.allocator) -> (ch: Channel(T, .Recv)) {
context.allocator = allocator;
ch._internal = raw_channel_create(size_of(T), align_of(T), cap);
return;
}
channel_destroy :: proc(ch: $C/Channel($T, $D)) {
raw_channel_destroy(ch._internal);
}
channel_len :: proc(ch: $C/Channel($T)) -> int {
return ch._internal.len;
channel_as_send :: proc(ch: $C/Channel($T, .Both)) -> (res: Channel(T, .Send)) {
res._internal = ch._internal;
return;
}
channel_cap :: proc(ch: $C/Channel($T)) -> int {
return ch._internal.cap;
channel_as_recv :: proc(ch: $C/Channel($T, .Both)) -> (res: Channel(T, .Recv)) {
res._internal = ch._internal;
return;
}
channel_send :: proc(ch: $C/Channel($T), msg: T, loc := #caller_location) {
channel_len :: proc(ch: $C/Channel($T, $D)) -> int {
return ch._internal.len if ch._internal != nil else 0;
}
channel_cap :: proc(ch: $C/Channel($T, $D)) -> int {
return ch._internal.cap if ch._internal != nil else 0;
}
channel_send :: proc(ch: $C/Channel($T, $D), msg: T, loc := #caller_location) where D >= .Both {
msg := msg;
_ = raw_channel_send_impl(ch._internal, &msg, /*block*/true, loc);
}
channel_try_send :: proc(ch: $C/Channel($T), msg: T, loc := #caller_location) -> bool {
channel_try_send :: proc(ch: $C/Channel($T, $D), msg: T, loc := #caller_location) -> bool where D >= .Both {
msg := msg;
return raw_channel_send_impl(ch._internal, &msg, /*block*/false, loc);
}
channel_recv :: proc(ch: $C/Channel($T), loc := #caller_location) -> (msg: T) {
channel_recv :: proc(ch: $C/Channel($T, $D), loc := #caller_location) -> (msg: T) where D <= .Both {
c := ch._internal;
if c == nil {
panic(message="cannot recv message; channel is nil", loc=loc);
}
mutex_lock(&c.mutex);
raw_channel_recv_impl(c, &msg, loc);
mutex_unlock(&c.mutex);
return;
}
channel_try_recv :: proc(ch: $C/Channel($T), loc := #caller_location) -> (msg: T, ok: bool) {
channel_try_recv :: proc(ch: $C/Channel($T, $D), loc := #caller_location) -> (msg: T, ok: bool) where D <= .Both {
c := ch._internal;
if mutex_try_lock(&c.mutex) {
if c != nil && mutex_try_lock(&c.mutex) {
if c.len > 0 {
raw_channel_recv_impl(c, &msg, loc);
ok = true;
@@ -64,7 +93,7 @@ channel_try_recv :: proc(ch: $C/Channel($T), loc := #caller_location) -> (msg: T
}
return;
}
channel_try_recv_ptr :: proc(ch: $C/Channel($T), msg: ^T, loc := #caller_location) -> (ok: bool) {
channel_try_recv_ptr :: proc(ch: $C/Channel($T, $D), msg: ^T, loc := #caller_location) -> (ok: bool) where D <= .Both {
res: T;
res, ok = channel_try_recv(ch, loc);
if ok && msg != nil {
@@ -74,32 +103,32 @@ channel_try_recv_ptr :: proc(ch: $C/Channel($T), msg: ^T, loc := #caller_locatio
}
channel_is_nil :: proc(ch: $C/Channel($T)) -> bool {
channel_is_nil :: proc(ch: $C/Channel($T, $D)) -> bool {
return ch._internal == nil;
}
channel_is_open :: proc(ch: $C/Channel($T)) -> bool {
channel_is_open :: proc(ch: $C/Channel($T, $D)) -> bool {
c := ch._internal;
return c != nil && !c.closed;
}
channel_eq :: proc(a, b: $C/Channel($T)) -> bool {
channel_eq :: proc(a, b: $C/Channel($T, $D)) -> bool {
return a._internal == b._internal;
}
channel_ne :: proc(a, b: $C/Channel($T)) -> bool {
channel_ne :: proc(a, b: $C/Channel($T, $D)) -> bool {
return a._internal != b._internal;
}
channel_can_send :: proc(ch: $C/Channel($T)) -> (ok: bool) {
channel_can_send :: proc(ch: $C/Channel($T, $D)) -> (ok: bool) where D >= .Both {
return raw_channel_can_send(ch._internal);
}
channel_can_recv :: proc(ch: $C/Channel($T)) -> (ok: bool) {
channel_can_recv :: proc(ch: $C/Channel($T, $D)) -> (ok: bool) where D <= .Both {
return raw_channel_can_recv(ch._internal);
}
channel_peek :: proc(ch: $C/Channel($T)) -> int {
channel_peek :: proc(ch: $C/Channel($T, $D)) -> int {
c := ch._internal;
if c == nil {
return -1;
@@ -111,12 +140,12 @@ channel_peek :: proc(ch: $C/Channel($T)) -> int {
}
channel_close :: proc(ch: $C/Channel($T), loc := #caller_location) {
channel_close :: proc(ch: $C/Channel($T, $D), loc := #caller_location) {
raw_channel_close(ch._internal, loc);
}
channel_iterator :: proc(ch: $C/Channel($T)) -> (msg: T, ok: bool) {
channel_iterator :: proc(ch: $C/Channel($T, $D)) -> (msg: T, ok: bool) where D <= .Both {
c := ch._internal;
if c == nil {
return;
@@ -127,12 +156,12 @@ channel_iterator :: proc(ch: $C/Channel($T)) -> (msg: T, ok: bool) {
}
return;
}
channel_drain :: proc(ch: $C/Channel($T)) {
channel_drain :: proc(ch: $C/Channel($T, $D)) where D >= .Both {
raw_channel_drain(ch._internal);
}
channel_move :: proc(dst, src: $C/Channel($T)) {
channel_move :: proc(dst: $C1/Channel($T, $D1) src: $C2/Channel(T, $D2)) where D1 <= .Both, D2 >= .Both {
for msg in channel_iterator(src) {
channel_send(dst, msg);
}
@@ -258,18 +287,19 @@ raw_channel_send_impl :: proc(c: ^Raw_Channel, msg: rawptr, block: bool, loc :=
for c.len >= c.cap {
condition_wait_for(&c.cond);
}
} else if c.len > 0 {
} else if c.len > 0 { // TODO(bill): determine correct behaviour
if !block {
return false;
}
condition_wait_for(&c.cond);
} else if c.len == 0 && !block {
return false;
}
send(c, msg);
condition_signal(&c.cond);
raw_channel_wait_queue_signal(c.recvq);
return true;
}
@@ -509,7 +539,7 @@ select_recv :: proc(channels: ..^Raw_Channel) -> (index: int) {
return;
}
select_recv_msg :: proc(channels: ..$C/Channel($T)) -> (msg: T, index: int) {
select_recv_msg :: proc(channels: ..$C/Channel($T, $D)) -> (msg: T, index: int) {
switch len(channels) {
case 0:
panic("sync: select with no channels");
@@ -535,7 +565,7 @@ select_recv_msg :: proc(channels: ..$C/Channel($T)) -> (msg: T, index: int) {
q.state = &state;
raw_channel_wait_queue_insert(&c.recvq, q);
}
raw_channel_wait_queue_wait_on(&state);
raw_channel_wait_queue_wait_on(&state, SELECT_MAX_TIMEOUT);
for c, i in channels {
q := &queues[i];
raw_channel_wait_queue_remove(&c.recvq, q);
@@ -560,7 +590,7 @@ select_recv_msg :: proc(channels: ..$C/Channel($T)) -> (msg: T, index: int) {
return;
}
select_send_msg :: proc(msg: $T, channels: ..$C/Channel(T)) -> (index: int) {
select_send_msg :: proc(msg: $T, channels: ..$C/Channel(T, $D)) -> (index: int) {
switch len(channels) {
case 0:
panic("sync: select with no channels");
@@ -589,7 +619,7 @@ select_send_msg :: proc(msg: $T, channels: ..$C/Channel(T)) -> (index: int) {
q.state = &state;
raw_channel_wait_queue_insert(&c.recvq, q);
}
raw_channel_wait_queue_wait_on(&state);
raw_channel_wait_queue_wait_on(&state, SELECT_MAX_TIMEOUT);
for c, i in channels {
q := &queues[i];
raw_channel_wait_queue_remove(&c.recvq, q);
@@ -781,16 +811,15 @@ select_try_send :: proc(channels: ..^Raw_Channel) -> (index: int) #no_bounds_che
return;
}
select_try_recv_msg :: proc(channels: ..$C/Channel($T)) -> (msg: T, index: int) {
select_try_recv_msg :: proc(channels: ..$C/Channel($T, $D)) -> (msg: T, index: int) {
switch len(channels) {
case 0:
index = 0;
index = -1;
return;
case 1:
if c := channels[0]; channel_can_recv(c) {
ok: bool;
if msg, ok = channel_try_recv(channels[0]); ok {
index = 0;
msg = channel_recv(c);
return;
}
return;
}
@@ -820,16 +849,14 @@ select_try_recv_msg :: proc(channels: ..$C/Channel($T)) -> (msg: T, index: int)
return;
}
select_try_send_msg :: proc(msg: $T, channels: ..$C/Channel(T)) -> (index: int) {
select_try_send_msg :: proc(msg: $T, channels: ..$C/Channel(T, $D)) -> (index: int) {
index = -1;
switch len(channels) {
case 0:
index = 0;
return;
case 1:
if c := channels[0]; channel_can_send(c) {
if channel_try_send(channels[0], msg) {
index = 0;
channel_send(c, msg);
return;
}
return;
}

35
core/sys/cpu/cpu.odin Normal file
View File

@@ -0,0 +1,35 @@
package sys_cpu
#assert(ODIN_USE_LLVM_API);
Cache_Line_Pad :: struct {_: [_cache_line_size]byte};
initialized: bool;
x86: struct {
_: Cache_Line_Pad,
has_aes: bool, // AES hardware implementation (AES NI)
has_adx: bool, // Multi-precision add-carry instruction extensions
has_avx: bool, // Advanced vector extension
has_avx2: bool, // Advanced vector extension 2
has_bmi1: bool, // Bit manipulation instruction set 1
has_bmi2: bool, // Bit manipulation instruction set 2
has_erms: bool, // Enhanced REP for MOVSB and STOSB
has_fma: bool, // Fused-multiply-add instructions
has_os_xsave: bool, // OS supports XSAVE/XRESTOR for saving/restoring XMM registers.
has_pclmulqdq: bool, // PCLMULQDQ instruction - most often used for AES-GCM
has_popcnt: bool, // Hamming weight instruction POPCNT.
has_rdrand: bool, // RDRAND instruction (on-chip random number generator)
has_rdseed: bool, // RDSEED instruction (on-chip random number generator)
has_sse2: bool, // Streaming SIMD extension 2 (always available on amd64)
has_sse3: bool, // Streaming SIMD extension 3
has_ssse3: bool, // Supplemental streaming SIMD extension 3
has_sse41: bool, // Streaming SIMD extension 4 and 4.1
has_sse42: bool, // Streaming SIMD extension 4 and 4.2
_: Cache_Line_Pad,
};
init :: proc() {
_init();
}

67
core/sys/cpu/cpu_x86.odin Normal file
View File

@@ -0,0 +1,67 @@
//+build 386, amd64
package sys_cpu
_cache_line_size :: 64;
cpuid :: proc(ax, cx: u32) -> (eax, ebc, ecx, edx: u32) {
return expand_to_tuple(asm(u32, u32) -> struct{eax, ebc, ecx, edx: u32} {
"cpuid",
"={ax},={bx},={cx},={dx},{ax},{cx}",
}(ax, cx));
}
xgetbv :: proc() -> (eax, edx: u32) {
return expand_to_tuple(asm(u32) -> struct{eax, edx: u32} {
"xgetbv",
"={ax},={dx},{cx}",
}(0));
}
_init :: proc() {
is_set :: proc(hwc: u32, value: u32) -> bool {
return hwc&value != 0;
}
initialized = true;
max_id, _, _, _ := cpuid(0, 0);
if max_id < 1 {
return;
}
_, _, ecx1, edx1 := cpuid(1, 0);
x86.has_sse2 = is_set(26, edx1);
x86.has_sse3 = is_set(0, ecx1);
x86.has_pclmulqdq = is_set(1, ecx1);
x86.has_ssse3 = is_set(9, ecx1);
x86.has_fma = is_set(12, ecx1);
x86.has_sse41 = is_set(19, ecx1);
x86.has_sse42 = is_set(20, ecx1);
x86.has_popcnt = is_set(23, ecx1);
x86.has_aes = is_set(25, ecx1);
x86.has_os_xsave = is_set(27, ecx1);
x86.has_rdrand = is_set(30, ecx1);
os_supports_avx := false;
if x86.has_os_xsave {
eax, _ := xgetbv();
os_supports_avx = is_set(1, eax) && is_set(2, eax);
}
x86.has_avx = is_set(28, ecx1) && os_supports_avx;
if max_id < 7 {
return;
}
_, ebx7, _, _ := cpuid(7, 0);
x86.has_bmi1 = is_set(3, ebx7);
x86.has_avx2 = is_set(5, ebx7) && os_supports_avx;
x86.has_bmi2 = is_set(8, ebx7);
x86.has_erms = is_set(9, ebx7);
x86.has_rdseed = is_set(18, ebx7);
x86.has_adx = is_set(19, ebx7);
}

View File

@@ -14,44 +14,44 @@ PTHREAD_ONCE_SIZE :: 8;
PTHREAD_RWLOCK_SIZE :: 192;
PTHREAD_RWLOCKATTR_SIZE :: 16;
pthread_t :: opaque u64;
pthread_t :: #opaque u64;
pthread_attr_t :: opaque struct #align 16 {
pthread_attr_t :: #opaque struct #align 16 {
sig: c.long,
_: [PTHREAD_ATTR_SIZE] c.char,
};
pthread_cond_t :: opaque struct #align 16 {
pthread_cond_t :: #opaque struct #align 16 {
sig: c.long,
_: [PTHREAD_COND_SIZE] c.char,
};
pthread_condattr_t :: opaque struct #align 16 {
pthread_condattr_t :: #opaque struct #align 16 {
sig: c.long,
_: [PTHREAD_CONDATTR_SIZE] c.char,
};
pthread_mutex_t :: opaque struct #align 16 {
pthread_mutex_t :: #opaque struct #align 16 {
sig: c.long,
_: [PTHREAD_MUTEX_SIZE] c.char,
};
pthread_mutexattr_t :: opaque struct #align 16 {
pthread_mutexattr_t :: #opaque struct #align 16 {
sig: c.long,
_: [PTHREAD_MUTEXATTR_SIZE] c.char,
};
pthread_once_t :: opaque struct #align 16 {
pthread_once_t :: #opaque struct #align 16 {
sig: c.long,
_: [PTHREAD_ONCE_SIZE] c.char,
};
pthread_rwlock_t :: opaque struct #align 16 {
pthread_rwlock_t :: #opaque struct #align 16 {
sig: c.long,
_: [PTHREAD_RWLOCK_SIZE] c.char,
};
pthread_rwlockattr_t :: opaque struct #align 16 {
pthread_rwlockattr_t :: #opaque struct #align 16 {
sig: c.long,
_: [PTHREAD_RWLOCKATTR_SIZE] c.char,
};

View File

@@ -26,32 +26,32 @@ when size_of(int) == 8 {
PTHREAD_BARRIER_T_SIZE :: 20;
}
pthread_cond_t :: opaque struct #align 16 {
pthread_cond_t :: #opaque struct #align 16 {
_: [PTHREAD_COND_T_SIZE] c.char,
};
pthread_mutex_t :: opaque struct #align 16 {
pthread_mutex_t :: #opaque struct #align 16 {
_: [PTHREAD_MUTEX_T_SIZE] c.char,
};
pthread_rwlock_t :: opaque struct #align 16 {
pthread_rwlock_t :: #opaque struct #align 16 {
_: [PTHREAD_RWLOCK_T_SIZE] c.char,
};
pthread_barrier_t :: opaque struct #align 16 {
pthread_barrier_t :: #opaque struct #align 16 {
_: [PTHREAD_BARRIER_T_SIZE] c.char,
};
pthread_attr_t :: opaque struct #align 16 {
pthread_attr_t :: #opaque struct #align 16 {
_: [PTHREAD_ATTR_T_SIZE] c.char,
};
pthread_condattr_t :: opaque struct #align 16 {
pthread_condattr_t :: #opaque struct #align 16 {
_: [PTHREAD_CONDATTR_T_SIZE] c.char,
};
pthread_mutexattr_t :: opaque struct #align 16 {
pthread_mutexattr_t :: #opaque struct #align 16 {
_: [PTHREAD_MUTEXATTR_T_SIZE] c.char,
};
pthread_rwlockattr_t :: opaque struct #align 16 {
pthread_rwlockattr_t :: #opaque struct #align 16 {
_: [PTHREAD_RWLOCKATTR_T_SIZE] c.char,
};
pthread_barrierattr_t :: opaque struct #align 16 {
pthread_barrierattr_t :: #opaque struct #align 16 {
_: [PTHREAD_BARRIERATTR_T_SIZE] c.char,
};

View File

@@ -33,32 +33,32 @@ when size_of(int) == 8 {
PTHREAD_BARRIER_T_SIZE :: 20;
}
pthread_cond_t :: opaque struct #align 16 {
pthread_cond_t :: #opaque struct #align 16 {
_: [PTHREAD_COND_T_SIZE] c.char,
};
pthread_mutex_t :: opaque struct #align 16 {
pthread_mutex_t :: #opaque struct #align 16 {
_: [PTHREAD_MUTEX_T_SIZE] c.char,
};
pthread_rwlock_t :: opaque struct #align 16 {
pthread_rwlock_t :: #opaque struct #align 16 {
_: [PTHREAD_RWLOCK_T_SIZE] c.char,
};
pthread_barrier_t :: opaque struct #align 16 {
pthread_barrier_t :: #opaque struct #align 16 {
_: [PTHREAD_BARRIER_T_SIZE] c.char,
};
pthread_attr_t :: opaque struct #align 16 {
pthread_attr_t :: #opaque struct #align 16 {
_: [PTHREAD_ATTR_T_SIZE] c.char,
};
pthread_condattr_t :: opaque struct #align 16 {
pthread_condattr_t :: #opaque struct #align 16 {
_: [PTHREAD_CONDATTR_T_SIZE] c.char,
};
pthread_mutexattr_t :: opaque struct #align 16 {
pthread_mutexattr_t :: #opaque struct #align 16 {
_: [PTHREAD_MUTEXATTR_T_SIZE] c.char,
};
pthread_rwlockattr_t :: opaque struct #align 16 {
pthread_rwlockattr_t :: #opaque struct #align 16 {
_: [PTHREAD_RWLOCKATTR_T_SIZE] c.char,
};
pthread_barrierattr_t :: opaque struct #align 16 {
pthread_barrierattr_t :: #opaque struct #align 16 {
_: [PTHREAD_BARRIERATTR_T_SIZE] c.char,
};

View File

@@ -8,12 +8,12 @@ foreign kernel32 {
@(link_name="CreateProcessA") create_process_a :: proc(application_name, command_line: cstring,
process_attributes, thread_attributes: ^Security_Attributes,
inherit_handle: Bool, creation_flags: u32, environment: rawptr,
current_direcotry: cstring, startup_info: ^Startup_Info,
current_directory: cstring, startup_info: ^Startup_Info,
process_information: ^Process_Information) -> Bool ---;
@(link_name="CreateProcessW") create_process_w :: proc(application_name, command_line: Wstring,
process_attributes, thread_attributes: ^Security_Attributes,
inherit_handle: Bool, creation_flags: u32, environment: rawptr,
current_direcotry: Wstring, startup_info: ^Startup_Info,
current_directory: Wstring, startup_info: ^Startup_Info,
process_information: ^Process_Information) -> Bool ---;
@(link_name="GetExitCodeProcess") get_exit_code_process :: proc(process: Handle, exit: ^u32) -> Bool ---;
@(link_name="ExitProcess") exit_process :: proc(exit_code: u32) ---;

View File

@@ -0,0 +1,585 @@
package text_scanner
import "core:fmt"
import "core:strings"
import "core:unicode"
import "core:unicode/utf8"
Position :: struct {
filename: string, // filename, if present
offset: int, // byte offset, starting @ 0
line: int, // line number, starting @ 1
column: int, // column number, starting @ 1 (character count per line)
}
position_is_valid :: proc(pos: Position) -> bool {
return pos.line > 0;
}
position_to_string :: proc(pos: Position, allocator := context.temp_allocator) -> string {
s := pos.filename;
if s == "" {
s = "<input>";
}
context.allocator = allocator;
if position_is_valid(pos) {
return fmt.aprintf("%s(%d:%d)", s, pos.line, pos.column);
} else {
return strings.clone(s);
}
}
EOF :: -1;
Ident :: -2;
Int :: -3;
Float :: -4;
Char :: -5;
String :: -6;
Raw_String :: -7;
Comment :: -8;
Scan_Flag :: enum u32 {
Scan_Idents,
Scan_Ints,
Scan_C_Int_Prefixes,
Scan_Floats,
Scan_Chars,
Scan_Strings,
Scan_Raw_Strings,
Scan_Comments,
Skip_Comments,
}
Scan_Flags :: bit_set[Scan_Flag; u32];
Odin_Like_Tokens :: Scan_Flags{.Scan_Idents, .Scan_Ints, .Scan_Floats, .Scan_Chars, .Scan_Strings, .Scan_Raw_Strings, .Scan_Comments, .Skip_Comments};
C_Like_Tokens :: Scan_Flags{.Scan_Idents, .Scan_Ints, .Scan_C_Int_Prefixes, .Scan_Floats, .Scan_Chars, .Scan_Strings, .Scan_Raw_Strings, .Scan_Comments, .Skip_Comments};
Odin_Whitespace :: 1<<'\t' | 1<<'\n' | 1<<'\r' | 1<<' ';
C_Whitespace :: 1<<'\t' | 1<<'\n' | 1<<'\r' | 1<<'\v' | 1<<'\f' | 1<<' ';
Scanner :: struct {
src: string,
src_pos: int,
src_end: int,
tok_pos: int,
tok_end: int,
ch: rune,
line: int,
column: int,
prev_line_len: int,
prev_char_len: int,
error: proc(s: ^Scanner, msg: string),
error_count: int,
flags: Scan_Flags,
whitespace: u64,
is_ident_rune: proc(ch: rune, i: int) -> bool,
pos: Position,
}
init :: proc(s: ^Scanner, src: string, filename := "") -> ^Scanner {
s^ = {};
s.src = src;
s.pos.filename = filename;
s.tok_pos = -1;
s.ch = -2; // no char read yet, not an EOF
s.line = 1;
s.flags = Odin_Like_Tokens;
s.whitespace = Odin_Whitespace;
return s;
}
@(private)
advance :: proc(s: ^Scanner) -> rune {
if s.src_pos >= len(s.src) {
s.prev_char_len = 0;
return EOF;
}
ch, width := rune(s.src[s.src_pos]), 1;
if ch >= utf8.RUNE_SELF {
ch, width = utf8.decode_rune_in_string(s.src[s.src_pos:]);
if ch == utf8.RUNE_ERROR && width == 1 {
s.src_pos += width;
s.prev_char_len = width;
s.column += 1;
error(s, "invalid UTF-8 encoding");
return ch;
}
}
s.src_pos += width;
s.prev_char_len = width;
s.column += 1;
switch ch {
case 0:
error(s, "invalid character NUL");
case '\n':
s.line += 1;
s.prev_line_len = s.column;
s.column = 0;
}
return ch;
}
next :: proc(s: ^Scanner) -> rune {
s.tok_pos = -1;
s.pos.line = 0;
ch := peek(s);
if ch != EOF {
s.ch = advance(s);
}
return ch;
}
peek :: proc(s: ^Scanner) -> rune {
if s.ch == -2 {
s.ch = advance(s);
if s.ch == '\ufeff' { // Ignore BOM
s.ch = advance(s);
}
}
return s.ch;
}
error :: proc(s: ^Scanner, msg: string) {
s.error_count += 1;
if s.error != nil {
s.error(s, msg);
return;
}
p := s.pos;
if !position_is_valid(p) {
p = position(s);
}
s := p.filename;
if s == "" {
s = "<input>";
}
if position_is_valid(p) {
fmt.eprintf("%s(%d:%d): %s\n", s, p.line, p.column, msg);
} else {
fmt.eprintf("%s: %s\n", s, msg);
}
}
errorf :: proc(s: ^Scanner, format: string, args: ..any) {
error(s, fmt.tprintf(format, ..args));
}
@(private)
is_ident_rune :: proc(s: ^Scanner, ch: rune, i: int) -> bool {
if s.is_ident_rune != nil {
return s.is_ident_rune(ch, i);
}
return ch == '_' || unicode.is_letter(ch) || unicode.is_digit(ch) && i > 0;
}
@(private)
scan_identifier :: proc(s: ^Scanner) -> rune {
ch := advance(s);
for i := 1; is_ident_rune(s, ch, i); i += 1 {
ch = advance(s);
}
return ch;
}
@(private) lower :: proc(ch: rune) -> rune { return ('a' - 'A') | ch; }
@(private) is_decimal :: proc(ch: rune) -> bool { return '0' <= ch && ch <= '9'; }
@(private) is_hex :: proc(ch: rune) -> bool { return '0' <= ch && ch <= '9' || 'a' <= lower(ch) && lower(ch) <= 'f'; }
@(private)
scan_number :: proc(s: ^Scanner, ch: rune, seen_dot: bool) -> (rune, rune) {
lit_name :: proc(prefix: rune) -> string {
switch prefix {
case 'b': return "binary literal";
case 'o': return "octal literal";
case 'z': return "dozenal literal";
case 'x': return "hexadecimal literal";
}
return "decimal literal";
}
digits :: proc(s: ^Scanner, ch0: rune, base: int, invalid: ^rune) -> (ch: rune, digsep: int) {
ch = ch0;
if base <= 10 {
max := rune('0' + base);
for is_decimal(ch) || ch == '_' {
ds := 1;
if ch == '_' {
ds = 2;
} else if ch >= max && invalid^ == 0 {
invalid^ = ch;
}
digsep |= ds;
ch = advance(s);
}
} else {
for is_hex(ch) || ch == '_' {
ds := 1;
if ch == '_' {
ds = 2;
}
digsep |= ds;
ch = advance(s);
}
}
return;
}
ch, seen_dot := ch, seen_dot;
base := 10;
prefix := rune(0);
digsep := 0;
invalid := rune(0);
tok: rune;
ds: int;
if !seen_dot {
tok = Int;
if ch == '0' {
ch = advance(s);
p := lower(ch);
if .Scan_C_Int_Prefixes in s.flags {
switch p {
case 'b':
ch = advance(s);
base, prefix = 2, 'b';
case 'x':
ch = advance(s);
base, prefix = 16, 'x';
case:
base, prefix = 8, 'o';
digsep = 1; // Leading zero
}
} else {
switch p {
case 'b':
ch = advance(s);
base, prefix = 2, 'b';
case 'o':
ch = advance(s);
base, prefix = 8, 'o';
case 'd':
ch = advance(s);
base, prefix = 10, 'd';
case 'z':
ch = advance(s);
base, prefix = 12, 'z';
case 'h':
tok = Float;
fallthrough;
case 'x':
ch = advance(s);
base, prefix = 16, 'x';
case:
digsep = 1; // Leading zero
}
}
}
ch, ds = digits(s, ch, base, &invalid);
digsep |= ds;
if ch == '.' && .Scan_Floats in s.flags {
ch = advance(s);
seen_dot = true;
}
}
if seen_dot {
tok = Float;
if prefix != 0 && prefix != 'x' {
errorf(s, "invalid radix point in %s", lit_name(prefix));
}
ch, ds = digits(s, ch, base, &invalid);
digsep |= ds;
}
if digsep&1 == 0 {
errorf(s, "%s has no digits", lit_name(prefix));
}
if e := lower(ch); (e == 'e' || e == 'p') && .Scan_Floats in s.flags {
switch {
case e == 'e' && prefix != 0:
errorf(s, "%q exponent requires decimal mantissa", ch);
case e == 'p' && prefix != 'x':
errorf(s, "%q exponent requires hexadecimal mantissa", ch);
}
ch = advance(s);
tok = Float;
if ch == '+' || ch == '-' {
ch = advance(s);
}
ch, ds = digits(s, ch, 10, nil);
digsep |= ds;
if ds&1 == 0 {
error(s, "exponent has no digits");
}
} else if prefix == 'x' && tok == Float {
error(s, "hexadecimal mantissa requires a 'p' exponent");
}
if tok == Int && invalid != 0 {
errorf(s, "invalid digit %q in %s", invalid, lit_name(prefix));
}
if digsep&2 != 0 {
s.tok_end = s.src_pos - s.prev_char_len;
}
return tok, ch;
}
@(private)
scan_string :: proc(s: ^Scanner, quote: rune) -> (n: int) {
digit_val :: proc(ch: rune) -> int {
switch v := lower(ch); v {
case '0'..'9': return int(v - '0');
case 'a'..'z': return int(v - 'a');
}
return 16;
}
scan_digits :: proc(s: ^Scanner, ch: rune, base, n: int) -> rune {
ch, n := ch, n;
for n > 0 && digit_val(ch) < base {
ch = advance(s);
n -= 1;
}
if n > 0 {
error(s, "invalid char escape");
}
return ch;
}
ch := advance(s);
for ch != quote {
if ch == '\n' || ch < 0 {
error(s, "literal no terminated");
return;
}
if ch == '\\' {
ch = advance(s);
switch ch {
case quote, 'a', 'b', 'e', 'f', 'n', 'r', 't', 'v', '\\':
ch = advance(s);
case '0'..'7': ch = scan_digits(s, advance(s), 8, 3);
case 'x': ch = scan_digits(s, advance(s), 16, 2);
case 'u': ch = scan_digits(s, advance(s), 16, 4);
case 'U': ch = scan_digits(s, advance(s), 16, 8);
case:
error(s, "invalid char escape");
}
} else {
ch = advance(s);
}
n += 1;
}
return;
}
@(private)
scan_raw_string :: proc(s: ^Scanner) {
ch := advance(s);
for ch != '`' {
if ch < 0 {
error(s, "literal not terminated");
return;
}
ch = advance(s);
}
}
@(private)
scan_char :: proc(s: ^Scanner) {
if scan_string(s, '\'') != 1 {
error(s, "invalid char literal");
}
}
@(private)
scan_comment :: proc(s: ^Scanner, ch: rune) -> rune {
ch := ch;
if ch == '/' { // line comment
ch = advance(s);
for ch != '\n' && ch >= 0 {
ch = advance(s);
}
return ch;
}
// block /**/ comment
ch = advance(s);
for {
if ch < 0 {
error(s, "comment not terminated");
break;
}
ch0 := ch;
ch = advance(s);
if ch0 == '*' && ch == '/' {
return advance(s);
}
}
return ch;
}
scan :: proc(s: ^Scanner) -> (tok: rune) {
ch := peek(s);
if ch == EOF {
return ch;
}
// reset position
s.tok_pos = -1;
s.pos.line = 0;
redo: for {
for s.whitespace & (1<<uint(ch)) != 0 {
ch = advance(s);
}
s.tok_pos = s.src_pos - s.prev_char_len;
s.pos.offset = s.tok_pos;
if s.column > 0 {
s.pos.line = s.line;
s.pos.column = s.column;
} else {
// previous character was newline
s.pos.line = s.line - 1;
s.pos.column = s.prev_line_len;
}
tok = ch;
if is_ident_rune(s, ch, 0) {
if .Scan_Idents in s.flags {
tok = Ident;
ch = scan_identifier(s);
} else {
ch = advance(s);
}
} else if is_decimal(ch) {
if s.flags >= {.Scan_Ints, .Scan_Floats} {
tok, ch = scan_number(s, ch, false);
} else {
ch = advance(s);
}
} else {
switch ch {
case EOF:
break;
case '"':
if .Scan_Strings in s.flags {
scan_string(s, '"');
tok = String;
}
ch = advance(s);
case '\'':
if .Scan_Chars in s.flags {
scan_string(s, '\'');
tok = Char;
}
ch = advance(s);
case '`':
if .Scan_Raw_Strings in s.flags {
scan_raw_string(s);
tok = Raw_String;
}
ch = advance(s);
case '.':
ch = advance(s);
if is_decimal(ch) && .Scan_Floats in s.flags {
tok, ch = scan_number(s, ch, true);
}
case '/':
ch = advance(s);
if (ch == '/' || ch == '*') && .Scan_Comments in s.flags {
if .Skip_Comments in s.flags {
s.tok_pos = -1;
ch = scan_comment(s, ch);
continue redo;
}
ch = scan_comment(s, ch);
tok = Comment;
}
case:
ch = advance(s);
}
}
break redo;
}
s.tok_end = s.src_pos - s.prev_char_len;
s.ch = ch;
return tok;
}
position :: proc(s: ^Scanner) -> Position {
pos: Position;
pos.filename = s.pos.filename;
pos.offset = s.src_pos - s.prev_char_len;
switch {
case s.column > 0:
pos.line = s.line;
pos.column = s.column;
case s.prev_line_len > 0:
pos.line = s.line-1;
pos.column = s.prev_line_len;
case:
pos.line = 1;
pos.column = 1;
}
return pos;
}
token_text :: proc(s: ^Scanner) -> string {
if s.tok_pos < 0 {
return "";
}
return string(s.src[s.tok_pos:s.tok_end]);
}
token_string :: proc(tok: rune, allocator := context.temp_allocator) -> string {
context.allocator = allocator;
switch tok {
case EOF: return strings.clone("EOF");
case Ident: return strings.clone("Ident");
case Int: return strings.clone("Int");
case Float: return strings.clone("Float");
case Char: return strings.clone("Char");
case String: return strings.clone("String");
case Raw_String: return strings.clone("Raw_String");
case Comment: return strings.clone("Comment");
}
return fmt.aprintf("%q", tok);
}

View File

@@ -2,17 +2,26 @@ package thread
import "core:runtime"
import "core:sync"
import "core:intrinsics"
import "core:mem"
import "intrinsics"
_ :: intrinsics;
Thread_Proc :: #type proc(^Thread);
MAX_USER_ARGUMENTS :: 8;
Thread :: struct {
using specific: Thread_Os_Specific,
procedure: Thread_Proc,
data: rawptr,
user_index: int,
using specific: Thread_Os_Specific,
procedure: Thread_Proc,
data: rawptr,
user_index: int,
user_args: [MAX_USER_ARGUMENTS]rawptr,
init_context: Maybe(runtime.Context),
creation_allocator: mem.Allocator,
}
#assert(size_of(Thread{}.user_index) == size_of(uintptr));
@@ -34,17 +43,108 @@ run :: proc(fn: proc(), init_context: Maybe(runtime.Context) = nil, priority :=
run_with_data :: proc(data: rawptr, fn: proc(data: rawptr), init_context: Maybe(runtime.Context) = nil, priority := Thread_Priority.Normal) {
thread_proc :: proc(t: ^Thread) {
fn := cast(proc(rawptr))t.data;
data := rawptr(uintptr(t.user_index));
assert(t.user_index >= 1);
data := t.user_args[0];
fn(data);
destroy(t);
}
t := create(thread_proc, priority);
t.data = rawptr(fn);
t.user_index = int(uintptr(data));
t.user_index = 1;
t.user_args = data;
t.init_context = init_context;
start(t);
}
run_with_poly_data :: proc(data: $T, fn: proc(data: T), init_context: Maybe(runtime.Context) = nil, priority := Thread_Priority.Normal)
where size_of(T) <= size_of(rawptr) {
thread_proc :: proc(t: ^Thread) {
fn := cast(proc(T))t.data;
assert(t.user_index >= 1);
data := (^T)(&t.user_args[0])^;
fn(data);
destroy(t);
}
t := create(thread_proc, priority);
t.data = rawptr(fn);
t.user_index = 1;
data := data;
mem.copy(&t.user_args[0], &data, size_of(data));
t.init_context = init_context;
start(t);
}
run_with_poly_data2 :: proc(arg1: $T1, arg2: $T2, fn: proc(T1, T2), init_context: Maybe(runtime.Context) = nil, priority := Thread_Priority.Normal)
where size_of(T1) <= size_of(rawptr),
size_of(T2) <= size_of(rawptr) {
thread_proc :: proc(t: ^Thread) {
fn := cast(proc(T1, T2))t.data;
assert(t.user_index >= 2);
arg1 := (^T1)(&t.user_args[0])^;
arg2 := (^T2)(&t.user_args[1])^;
fn(arg1, arg2);
destroy(t);
}
t := create(thread_proc, priority);
t.data = rawptr(fn);
t.user_index = 2;
arg1, arg2 := arg1, arg2;
mem.copy(&t.user_args[0], &arg1, size_of(arg1));
mem.copy(&t.user_args[1], &arg2, size_of(arg2));
t.init_context = init_context;
start(t);
}
run_with_poly_data3 :: proc(arg1: $T1, arg2: $T2, arg3: $T3, fn: proc(arg1: T1, arg2: T2, arg3: T3), init_context: Maybe(runtime.Context) = nil, priority := Thread_Priority.Normal)
where size_of(T1) <= size_of(rawptr),
size_of(T2) <= size_of(rawptr),
size_of(T3) <= size_of(rawptr) {
thread_proc :: proc(t: ^Thread) {
fn := cast(proc(T1, T2, T3))t.data;
assert(t.user_index >= 3);
arg1 := (^T1)(&t.user_args[0])^;
arg2 := (^T2)(&t.user_args[1])^;
arg3 := (^T3)(&t.user_args[2])^;
fn(arg1, arg2, arg3);
destroy(t);
}
t := create(thread_proc, priority);
t.data = rawptr(fn);
t.user_index = 3;
arg1, arg2, arg3 := arg1, arg2, arg3;
mem.copy(&t.user_args[0], &arg1, size_of(arg1));
mem.copy(&t.user_args[1], &arg2, size_of(arg2));
mem.copy(&t.user_args[2], &arg3, size_of(arg3));
t.init_context = init_context;
start(t);
}
run_with_poly_data4 :: proc(arg1: $T1, arg2: $T2, arg3: $T3, arg4: $T4, fn: proc(arg1: T1, arg2: T2, arg3: T3, arg4: T4), init_context: Maybe(runtime.Context) = nil, priority := Thread_Priority.Normal)
where size_of(T1) <= size_of(rawptr),
size_of(T2) <= size_of(rawptr),
size_of(T3) <= size_of(rawptr) {
thread_proc :: proc(t: ^Thread) {
fn := cast(proc(T1, T2, T3, T4))t.data;
assert(t.user_index >= 4);
arg1 := (^T1)(&t.user_args[0])^;
arg2 := (^T2)(&t.user_args[1])^;
arg3 := (^T3)(&t.user_args[2])^;
arg4 := (^T4)(&t.user_args[3])^;
fn(arg1, arg2, arg3, arg4);
destroy(t);
}
t := create(thread_proc, priority);
t.data = rawptr(fn);
t.user_index = 4;
arg1, arg2, arg3, arg4 := arg1, arg2, arg3, arg4;
mem.copy(&t.user_args[0], &arg1, size_of(arg1));
mem.copy(&t.user_args[1], &arg2, size_of(arg2));
mem.copy(&t.user_args[2], &arg3, size_of(arg3));
mem.copy(&t.user_args[3], &arg4, size_of(arg4));
t.init_context = init_context;
start(t);
}
create_and_start :: proc(fn: Thread_Proc, init_context: Maybe(runtime.Context) = nil, priority := Thread_Priority.Normal) -> ^Thread {
t := create(fn, priority);

View File

@@ -85,6 +85,7 @@ create :: proc(procedure: Thread_Proc, priority := Thread_Priority.Normal) -> ^T
if thread == nil {
return nil;
}
thread.creation_allocator = context.allocator;
// Set thread priority.
policy: i32;
@@ -106,7 +107,7 @@ create :: proc(procedure: Thread_Proc, priority := Thread_Priority.Normal) -> ^T
sync.mutex_init(&thread.start_mutex);
sync.condition_init(&thread.start_gate, &thread.start_mutex);
if unix.pthread_create(&thread.unix_thread, &attrs, __linux_thread_entry_proc, thread) != 0 {
free(thread);
free(thread, thread.creation_allocator);
return nil;
}
thread.procedure = procedure;
@@ -172,7 +173,7 @@ join_multiple :: proc(threads: ..^Thread) {
destroy :: proc(t: ^Thread) {
join(t);
t.unix_thread = {};
free(t);
free(t, t.creation_allocator);
}

View File

@@ -49,10 +49,14 @@ create :: proc(procedure: Thread_Proc, priority := Thread_Priority.Normal) -> ^T
thread := new(Thread);
if thread == nil {
return nil;
}
thread.creation_allocator = context.allocator;
win32_thread := win32.CreateThread(nil, 0, __windows_thread_entry_proc, thread, win32.CREATE_SUSPENDED, &win32_thread_id);
if win32_thread == nil {
free(thread);
free(thread, thread.creation_allocator);
return nil;
}
thread.procedure = procedure;
@@ -111,7 +115,7 @@ join_multiple :: proc(threads: ..^Thread) {
destroy :: proc(thread: ^Thread) {
join(thread);
free(thread);
free(thread, thread.creation_allocator);
}
terminate :: proc(using thread : ^Thread, exit_code: u32) {

File diff suppressed because it is too large Load Diff

1272
core/unicode/tables.odin Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -350,3 +350,44 @@ rune_size :: proc(r: rune) -> int {
}
return -1;
}
// full_rune reports if the bytes in b begin with a full utf-8 encoding of a rune or not
// An invalid encoding is considered a full rune since it will convert as an error rune of width 1 (RUNE_ERROR)
full_rune :: proc(b: []byte) -> bool {
n := len(b);
if n == 0 {
return false;
}
x := _first[b[0]];
if n >= int(x & 7) {
return true;
}
accept := accept_ranges[x>>4];
if n > 1 && (b[1] < accept.lo || accept.hi < b[1]) {
return true;
} else if n > 2 && (b[2] < LOCB || HICB < b[2]) {
return true;
}
return false;
}
// full_rune_in_string reports if the bytes in s begin with a full utf-8 encoding of a rune or not
// An invalid encoding is considered a full rune since it will convert as an error rune of width 1 (RUNE_ERROR)
full_rune_in_string :: proc(s: string) -> bool {
return full_rune(transmute([]byte)s);
}
_first := [256]u8{
0x00..0x7f = 0xf0, // ascii, size 1
0x80..0xc1 = 0xf1, // invalid, size 1
0xc2..0xdf = 0x02, // accept 1, size 2
0xe0 = 0x13, // accept 1, size 3
0xe1..0xec = 0x03, // accept 0, size 3
0xed = 0x23, // accept 2, size 3
0xee..0xef = 0x03, // accept 0, size 3
0xf0 = 0x34, // accept 3, size 4
0xf1..0xf3 = 0x04, // accept 0, size 4
0xf4 = 0x44, // accept 4, size 4
0xf5..0xff = 0xf1, // ascii, size 1
};

View File

@@ -75,7 +75,7 @@ the_basics :: proc() {
// Binary literals are prefixed with 0b, octal literals with 0o, and hexadecimal
// literals 0x. A leading zero does not produce an octal constant (unlike C).
// In Odin, if a number constant is possible to be represented by a type without
// In Odin, if a numeric constant can be represented by a type without
// precision loss, it will automatically convert to that type.
x: int = 1.0; // A float literal but it can be represented by an integer without precision loss
@@ -85,7 +85,7 @@ the_basics :: proc() {
y = 1; // `1` is an untyped integer literal which can implicitly convert to `int`
z: f64; // `z` is typed of type `f64` (64-bit floating point number)
z = 1; // `1` is an untyped integer literals which can be implicity conver to `f64`
z = 1; // `1` is an untyped integer literal which can be implicitly converted to `f64`
// No need for any suffixes or decimal places like in other languages
// CONSTANTS JUST WORK!!!
@@ -150,7 +150,7 @@ control_flow :: proc() {
i += 1;
}
// If the condition is omitted, this produces an infinite loop:
// If the condition is omitted, an infinite loop is produced:
for {
break;
}

View File

@@ -43,11 +43,96 @@ template <typename T> void array_set_capacity (Array<T> *array, isize capac
template <typename T> Array<T> array_slice (Array<T> const &array, isize lo, isize hi);
template <typename T> Array<T> array_clone (gbAllocator const &a, Array<T> const &array);
template <typename T> void array_ordered_remove (Array<T> *array, isize index);
template <typename T> void array_unordered_remove(Array<T> *array, isize index);
template <typename T> void array_copy(Array<T> *array, Array<T> const &data, isize offset);
template <typename T> void array_copy(Array<T> *array, Array<T> const &data, isize offset, isize count);
template <typename T> T *array_end_ptr(Array<T> *array);
template <typename T>
struct Slice {
T *data;
isize count;
T &operator[](isize index) {
#if !defined(NO_ARRAY_BOUNDS_CHECK)
GB_ASSERT_MSG(0 <= index && index < count, "Index %td is out of bounds ranges 0..<%td", index, count);
#endif
return data[index];
}
T const &operator[](isize index) const {
#if !defined(NO_ARRAY_BOUNDS_CHECK)
GB_ASSERT_MSG(0 <= index && index < count, "Index %td is out of bounds ranges 0..<%td", index, count);
#endif
return data[index];
}
};
template <typename T> Slice<T> slice_from_array(Array<T> const &a);
template <typename T>
Slice<T> slice_make(gbAllocator const &allocator, isize count) {
Slice<T> s = {};
s.data = gb_alloc_array(allocator, T, count);
s.count = count;
return s;
}
template <typename T>
Slice<T> slice_from_array(Array<T> const &a) {
return {a.data, a.count};
}
template <typename T>
Slice<T> slice_clone(gbAllocator const &allocator, Slice<T> const &a) {
T *data = cast(T *)gb_alloc_copy_align(allocator, a.data, a.count*gb_size_of(T), gb_align_of(T));
return {data, a.count};
}
template <typename T>
Slice<T> slice_clone_from_array(gbAllocator const &allocator, Array<T> const &a) {
auto c = array_clone(allocator, a);
return {c.data, c.count};
}
template <typename T>
void slice_copy(Slice<T> *slice, Slice<T> const &data, isize offset) {
gb_memmove(slice->data+offset, data.data, gb_size_of(T)*data.count);
}
template <typename T>
void slice_copy(Slice<T> *slice, Slice<T> const &data, isize offset, isize count) {
gb_memmove(slice->data+offset, data.data, gb_size_of(T)*gb_min(data.count, count));
}
template <typename T>
void slice_ordered_remove(Slice<T> *array, isize index) {
GB_ASSERT(0 <= index && index < array->count);
isize bytes = gb_size_of(T) * (array->count-(index+1));
gb_memmove(array->data+index, array->data+index+1, bytes);
array->count -= 1;
}
template <typename T>
void slice_unordered_remove(Slice<T> *array, isize index) {
GB_ASSERT(0 <= index && index < array->count);
isize n = array->count-1;
if (index != n) {
gb_memmove(array->data+index, array->data+n, gb_size_of(T));
}
array->count -= 1;
}
template <typename T>
void array_copy(Array<T> *array, Array<T> const &data, isize offset) {

View File

@@ -104,6 +104,37 @@ enum BuildModeKind {
BuildMode_Assembly,
};
enum CommandKind : u32 {
Command_run = 1<<0,
Command_build = 1<<1,
Command_check = 1<<3,
Command_query = 1<<4,
Command_doc = 1<<5,
Command_version = 1<<6,
Command_test = 1<<7,
Command__does_check = Command_run|Command_build|Command_check|Command_query|Command_doc|Command_test,
Command__does_build = Command_run|Command_build|Command_test,
Command_all = ~(u32)0,
};
char const *odin_command_strings[32] = {
"run",
"build",
"check",
"query",
"doc",
"version",
};
enum CmdDocFlag : u32 {
CmdDocFlag_Short = 1<<0,
CmdDocFlag_AllPackages = 1<<1,
};
// This stores the information for the specify architecture of this build
struct BuildContext {
@@ -124,6 +155,7 @@ struct BuildContext {
i64 word_size; // Size of a pointer, must be >= 4
i64 max_align; // max alignment, must be >= 1 (and typically >= word_size)
CommandKind command_kind;
String command;
TargetMetrics metrics;
@@ -143,6 +175,8 @@ struct BuildContext {
bool generate_docs;
i32 optimization_level;
bool show_timings;
bool show_unused;
bool show_unused_with_location;
bool show_more_timings;
bool show_system_calls;
bool keep_temp_files;
@@ -151,6 +185,7 @@ struct BuildContext {
bool no_dynamic_literals;
bool no_output_files;
bool no_crt;
bool no_entry_point;
bool use_lld;
bool vet;
bool cross_compiling;
@@ -165,6 +200,9 @@ struct BuildContext {
bool ignore_microsoft_magic;
bool linker_map_file;
u32 cmd_doc_flags;
Array<String> extra_packages;
QueryDataSetSettings query_data_set_settings;
gbAffinity affinity;
@@ -297,6 +335,19 @@ bool is_excluded_target_filename(String name) {
String original_name = name;
name = remove_extension_from_path(name);
if (string_starts_with(name, str_lit("."))) {
// Ignore .*.odin files
return true;
}
String test_suffix = str_lit("_test");
if (build_context.command_kind != Command_test) {
if (string_ends_with(name, test_suffix) && name != test_suffix) {
// Ignore *_test.odin files
return true;
}
}
String str1 = {};
String str2 = {};
isize n = 0;

View File

@@ -113,7 +113,7 @@ Type *check_init_variable(CheckerContext *ctx, Entity *e, Operand *operand, Stri
return e->type;
}
void check_init_variables(CheckerContext *ctx, Entity **lhs, isize lhs_count, Array<Ast *> const &inits, String context_name) {
void check_init_variables(CheckerContext *ctx, Entity **lhs, isize lhs_count, Slice<Ast *> const &inits, String context_name) {
if ((lhs == nullptr || lhs_count == 0) && inits.count == 0) {
return;
}
@@ -121,8 +121,7 @@ void check_init_variables(CheckerContext *ctx, Entity **lhs, isize lhs_count, Ar
// NOTE(bill): If there is a bad syntax error, rhs > lhs which would mean there would need to be
// an extra allocation
auto operands = array_make<Operand>(ctx->allocator, 0, 2*lhs_count);
defer (array_free(&operands));
auto operands = array_make<Operand>(temporary_allocator(), 0, 2*lhs_count);
check_unpack_arguments(ctx, lhs, lhs_count, &operands, inits, true, false);
isize rhs_count = operands.count;
@@ -317,7 +316,6 @@ void check_type_decl(CheckerContext *ctx, Entity *e, Ast *init_expr, Type *def)
break;
default:
error(e->token, "Only struct types can have custom atom operations");
gb_free(heap_allocator(), ac.atom_op_table);
break;
}
}
@@ -638,7 +636,7 @@ String handle_link_name(CheckerContext *ctx, Token token, String link_name, Stri
error(token, "'link_name' and 'link_prefix' cannot be used together");
} else {
isize len = link_prefix.len + token.string.len;
u8 *name = gb_alloc_array(ctx->allocator, u8, len+1);
u8 *name = gb_alloc_array(permanent_allocator(), u8, len+1);
gb_memmove(name, &link_prefix[0], link_prefix.len);
gb_memmove(name+link_prefix.len, &token.string[0], token.string.len);
name[len] = 0;
@@ -975,7 +973,7 @@ void check_proc_group_decl(CheckerContext *ctx, Entity *pg_entity, DeclInfo *d)
ast_node(pg, ProcGroup, d->init_expr);
pge->entities = array_make<Entity*>(ctx->allocator, 0, pg->args.count);
pge->entities = array_make<Entity*>(permanent_allocator(), 0, pg->args.count);
// NOTE(bill): This must be set here to prevent cycles in checking if someone
// places the entity within itself
@@ -1009,11 +1007,10 @@ void check_proc_group_decl(CheckerContext *ctx, Entity *pg_entity, DeclInfo *d)
continue;
}
if (ptr_set_exists(&entity_set, e)) {
if (ptr_set_update(&entity_set, e)) {
error(arg, "Previous use of `%.*s` in procedure group", LIT(e->token.string));
continue;
}
ptr_set_add(&entity_set, e);
array_add(&pge->entities, e);
}

File diff suppressed because it is too large Load Diff

View File

@@ -15,7 +15,7 @@ bool is_divigering_stmt(Ast *stmt) {
return t->kind == Type_Proc && t->Proc.diverging;
}
void check_stmt_list(CheckerContext *ctx, Array<Ast *> const &stmts, u32 flags) {
void check_stmt_list(CheckerContext *ctx, Slice<Ast *> const &stmts, u32 flags) {
if (stmts.count == 0) {
return;
}
@@ -78,7 +78,7 @@ void check_stmt_list(CheckerContext *ctx, Array<Ast *> const &stmts, u32 flags)
}
}
bool check_is_terminating_list(Array<Ast *> const &stmts, String const &label) {
bool check_is_terminating_list(Slice<Ast *> const &stmts, String const &label) {
// Iterate backwards
for (isize n = stmts.count-1; n >= 0; n--) {
Ast *stmt = stmts[n];
@@ -96,7 +96,7 @@ bool check_is_terminating_list(Array<Ast *> const &stmts, String const &label) {
return false;
}
bool check_has_break_list(Array<Ast *> const &stmts, String const &label, bool implicit) {
bool check_has_break_list(Slice<Ast *> const &stmts, String const &label, bool implicit) {
for_array(i, stmts) {
Ast *stmt = stmts[i];
if (check_has_break(stmt, label, implicit)) {
@@ -641,8 +641,7 @@ void add_constant_switch_case(CheckerContext *ctx, Map<TypeAndToken> *seen, Oper
TypeAndToken *found = map_get(seen, key);
if (found != nullptr) {
isize count = multi_map_count(seen, key);
TypeAndToken *taps = gb_alloc_array(ctx->allocator, TypeAndToken, count);
defer (gb_free(ctx->allocator, taps));
TypeAndToken *taps = gb_alloc_array(temporary_allocator(), TypeAndToken, count);
multi_map_get_all(seen, key, taps);
for (isize i = 0; i < count; i++) {
@@ -859,8 +858,7 @@ void check_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_flags) {
token.pos = ast_token(ss->body).pos;
token.string = str_lit("true");
x.expr = gb_alloc_item(ctx->allocator, Ast);
x.expr->kind = Ast_Ident;
x.expr = alloc_ast_node(nullptr, Ast_Ident);
x.expr->Ident.token = token;
}
@@ -1025,8 +1023,7 @@ void check_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_flags) {
GB_ASSERT(is_type_enum(et));
auto fields = et->Enum.fields;
auto unhandled = array_make<Entity *>(ctx->allocator, 0, fields.count);
defer (array_free(&unhandled));
auto unhandled = array_make<Entity *>(temporary_allocator(), 0, fields.count);
for_array(i, fields) {
Entity *f = fields[i];
@@ -1265,8 +1262,7 @@ void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_flags) {
GB_ASSERT(is_type_union(ut));
auto variants = ut->Union.variants;
auto unhandled = array_make<Type *>(ctx->allocator, 0, variants.count);
defer (array_free(&unhandled));
auto unhandled = array_make<Type *>(temporary_allocator(), 0, variants.count);
for_array(i, variants) {
Type *t = variants[i];
@@ -1433,12 +1429,11 @@ void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags) {
return;
}
// NOTE(bill): If there is a bad syntax error, rhs > lhs which would mean there would need to be
// an extra allocation
auto lhs_operands = array_make<Operand>(ctx->allocator, lhs_count);
auto rhs_operands = array_make<Operand>(ctx->allocator, 0, 2*lhs_count);
defer (array_free(&lhs_operands));
defer (array_free(&rhs_operands));
auto lhs_operands = array_make<Operand>(temporary_allocator(), lhs_count);
auto rhs_operands = array_make<Operand>(temporary_allocator(), 0, 2*lhs_count);
for_array(i, as->lhs) {
if (is_blank_ident(as->lhs[i])) {
@@ -1462,8 +1457,7 @@ void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags) {
}
}
auto lhs_to_ignore = array_make<bool>(ctx->allocator, lhs_count);
defer (array_free(&lhs_to_ignore));
auto lhs_to_ignore = array_make<bool>(temporary_allocator(), lhs_count);
isize max = gb_min(lhs_count, rhs_count);
// NOTE(bill, 2020-05-02): This is an utter hack to get these custom atom operations working
@@ -1642,8 +1636,7 @@ void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags) {
} else if (operands.count != result_count) {
error(node, "Expected %td return values, got %td", result_count, operands.count);
} else {
isize max_count = rs->results.count;
for (isize i = 0; i < max_count; i++) {
for (isize i = 0; i < result_count; i++) {
Entity *e = pt->results->Tuple.variables[i];
check_assignment(ctx, &operands[i], e->type, str_lit("return statement"));
}
@@ -1878,7 +1871,7 @@ void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags) {
DeclInfo *d = decl_info_of_entity(e);
GB_ASSERT(d == nullptr);
add_entity(ctx->checker, ctx->scope, e->identifier, e);
d = make_decl_info(ctx->allocator, ctx->scope, ctx->decl);
d = make_decl_info(ctx->scope, ctx->decl);
add_entity_and_decl_info(ctx, e->identifier, e, d);
}
@@ -2036,7 +2029,7 @@ void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags) {
case_ast_node(vd, ValueDecl, node);
if (vd->is_mutable) {
Entity **entities = gb_alloc_array(ctx->allocator, Entity *, vd->names.count);
Entity **entities = gb_alloc_array(permanent_allocator(), Entity *, vd->names.count);
isize entity_count = 0;
isize new_name_count = 0;

View File

@@ -1,3 +1,4 @@
ParameterValue handle_parameter_value(CheckerContext *ctx, Type *in_type, Type **out_type_, Ast *expr, bool allow_caller_location);
void populate_using_array_index(CheckerContext *ctx, Ast *node, AstField *field, Type *t, String name, i32 idx) {
t = base_type(t);
@@ -116,7 +117,7 @@ bool does_field_type_allow_using(Type *t) {
return false;
}
void check_struct_fields(CheckerContext *ctx, Ast *node, Array<Entity *> *fields, Array<String> *tags, Array<Ast *> const &params,
void check_struct_fields(CheckerContext *ctx, Ast *node, Array<Entity *> *fields, Array<String> *tags, Slice<Ast *> const &params,
isize init_field_capacity, Type *struct_type, String context) {
*fields = array_make<Entity *>(heap_allocator(), 0, init_field_capacity);
*tags = array_make<String>(heap_allocator(), 0, init_field_capacity);
@@ -388,7 +389,7 @@ void check_struct_type(CheckerContext *ctx, Type *struct_type, Ast *node, Array<
if (st->polymorphic_params != nullptr) {
ast_node(field_list, FieldList, st->polymorphic_params);
Array<Ast *> params = field_list->list;
Slice<Ast *> params = field_list->list;
if (params.count != 0) {
isize variable_count = 0;
for_array(i, params) {
@@ -399,7 +400,7 @@ void check_struct_type(CheckerContext *ctx, Type *struct_type, Ast *node, Array<
}
}
auto entities = array_make<Entity *>(ctx->allocator, 0, variable_count);
auto entities = array_make<Entity *>(permanent_allocator(), 0, variable_count);
for_array(i, params) {
Ast *param = params[i];
@@ -408,32 +409,50 @@ void check_struct_type(CheckerContext *ctx, Type *struct_type, Ast *node, Array<
}
ast_node(p, Field, param);
Ast *type_expr = p->type;
Ast *default_value = unparen_expr(p->default_value);
Type *type = nullptr;
bool is_type_param = false;
bool is_type_polymorphic_type = false;
if (type_expr == nullptr) {
if (type_expr == nullptr && default_value == nullptr) {
error(param, "Expected a type for this parameter");
continue;
}
if (type_expr->kind == Ast_Ellipsis) {
type_expr = type_expr->Ellipsis.expr;
error(param, "A polymorphic parameter cannot be variadic");
}
if (type_expr->kind == Ast_TypeidType) {
is_type_param = true;
Type *specialization = nullptr;
if (type_expr->TypeidType.specialization != nullptr) {
Ast *s = type_expr->TypeidType.specialization;
specialization = check_type(ctx, s);
if (type_expr != nullptr) {
if (type_expr->kind == Ast_Ellipsis) {
type_expr = type_expr->Ellipsis.expr;
error(param, "A polymorphic parameter cannot be variadic");
}
type = alloc_type_generic(ctx->scope, 0, str_lit(""), specialization);
} else {
type = check_type(ctx, type_expr);
if (is_type_polymorphic(type)) {
is_type_polymorphic_type = true;
if (type_expr->kind == Ast_TypeidType) {
is_type_param = true;
Type *specialization = nullptr;
if (type_expr->TypeidType.specialization != nullptr) {
Ast *s = type_expr->TypeidType.specialization;
specialization = check_type(ctx, s);
}
type = alloc_type_generic(ctx->scope, 0, str_lit(""), specialization);
} else {
type = check_type(ctx, type_expr);
if (is_type_polymorphic(type)) {
is_type_polymorphic_type = true;
}
}
}
ParameterValue param_value = {};
if (default_value != nullptr) {
Type *out_type = nullptr;
param_value = handle_parameter_value(ctx, type, &out_type, default_value, false);
if (type == nullptr && out_type != nullptr) {
type = out_type;
}
if (param_value.kind != ParameterValue_Constant && param_value.kind != ParameterValue_Nil) {
error(default_value, "Invalid parameter value");
param_value = {};
}
}
if (type == nullptr) {
error(params[i], "Invalid parameter type");
type = t_invalid;
@@ -471,7 +490,14 @@ void check_struct_type(CheckerContext *ctx, Type *struct_type, Ast *node, Array<
Token token = name->Ident.token;
if (poly_operands != nullptr) {
Operand operand = (*poly_operands)[entities.count];
Operand operand = {};
operand.type = t_invalid;
if (entities.count < poly_operands->count) {
operand = (*poly_operands)[entities.count];
} else if (param_value.kind != ParameterValue_Invalid) {
operand.mode = Addressing_Constant;
operand.value = param_value.value;
}
if (is_type_param) {
if (is_type_polymorphic(base_type(operand.type))) {
is_polymorphic = true;
@@ -486,6 +512,7 @@ void check_struct_type(CheckerContext *ctx, Type *struct_type, Ast *node, Array<
}
if (e == nullptr) {
e = alloc_entity_constant(scope, token, operand.type, operand.value);
e->Constant.param_value = param_value;
}
}
} else {
@@ -493,7 +520,8 @@ void check_struct_type(CheckerContext *ctx, Type *struct_type, Ast *node, Array<
e = alloc_entity_type_name(scope, token, type);
e->TypeName.is_type_alias = true;
} else {
e = alloc_entity_constant(scope, token, type, empty_exact_value);
e = alloc_entity_constant(scope, token, type, param_value.value);
e->Constant.param_value = param_value;
}
}
@@ -568,7 +596,7 @@ void check_union_type(CheckerContext *ctx, Type *union_type, Ast *node, Array<Op
Entity *using_index_expr = nullptr;
auto variants = array_make<Type *>(ctx->allocator, 0, variant_count);
auto variants = array_make<Type *>(permanent_allocator(), 0, variant_count);
union_type->Union.scope = ctx->scope;
@@ -579,7 +607,7 @@ void check_union_type(CheckerContext *ctx, Type *union_type, Ast *node, Array<Op
if (ut->polymorphic_params != nullptr) {
ast_node(field_list, FieldList, ut->polymorphic_params);
Array<Ast *> params = field_list->list;
Slice<Ast *> params = field_list->list;
if (params.count != 0) {
isize variable_count = 0;
for_array(i, params) {
@@ -590,7 +618,7 @@ void check_union_type(CheckerContext *ctx, Type *union_type, Ast *node, Array<Op
}
}
auto entities = array_make<Entity *>(ctx->allocator, 0, variable_count);
auto entities = array_make<Entity *>(permanent_allocator(), 0, variable_count);
for_array(i, params) {
Ast *param = params[i];
@@ -599,29 +627,45 @@ void check_union_type(CheckerContext *ctx, Type *union_type, Ast *node, Array<Op
}
ast_node(p, Field, param);
Ast *type_expr = p->type;
Ast *default_value = unparen_expr(p->default_value);
Type *type = nullptr;
bool is_type_param = false;
bool is_type_polymorphic_type = false;
if (type_expr == nullptr) {
if (type_expr == nullptr && default_value == nullptr) {
error(param, "Expected a type for this parameter");
continue;
}
if (type_expr->kind == Ast_Ellipsis) {
type_expr = type_expr->Ellipsis.expr;
error(param, "A polymorphic parameter cannot be variadic");
}
if (type_expr->kind == Ast_TypeidType) {
is_type_param = true;
Type *specialization = nullptr;
if (type_expr->TypeidType.specialization != nullptr) {
Ast *s = type_expr->TypeidType.specialization;
specialization = check_type(ctx, s);
if (type_expr != nullptr) {
if (type_expr->kind == Ast_Ellipsis) {
type_expr = type_expr->Ellipsis.expr;
error(param, "A polymorphic parameter cannot be variadic");
}
type = alloc_type_generic(ctx->scope, 0, str_lit(""), specialization);
} else {
type = check_type(ctx, type_expr);
if (is_type_polymorphic(type)) {
is_type_polymorphic_type = true;
if (type_expr->kind == Ast_TypeidType) {
is_type_param = true;
Type *specialization = nullptr;
if (type_expr->TypeidType.specialization != nullptr) {
Ast *s = type_expr->TypeidType.specialization;
specialization = check_type(ctx, s);
}
type = alloc_type_generic(ctx->scope, 0, str_lit(""), specialization);
} else {
type = check_type(ctx, type_expr);
if (is_type_polymorphic(type)) {
is_type_polymorphic_type = true;
}
}
}
ParameterValue param_value = {};
if (default_value != nullptr) {
Type *out_type = nullptr;
param_value = handle_parameter_value(ctx, type, &out_type, default_value, false);
if (type == nullptr && out_type != nullptr) {
type = out_type;
}
if (param_value.kind != ParameterValue_Constant && param_value.kind != ParameterValue_Nil) {
error(default_value, "Invalid parameter value");
param_value = {};
}
}
@@ -662,7 +706,14 @@ void check_union_type(CheckerContext *ctx, Type *union_type, Ast *node, Array<Op
Token token = name->Ident.token;
if (poly_operands != nullptr) {
Operand operand = (*poly_operands)[entities.count];
Operand operand = {};
operand.type = t_invalid;
if (entities.count < poly_operands->count) {
operand = (*poly_operands)[entities.count];
} else if (param_value.kind != ParameterValue_Invalid) {
operand.mode = Addressing_Constant;
operand.value = param_value.value;
}
if (is_type_param) {
GB_ASSERT(operand.mode == Addressing_Type ||
operand.mode == Addressing_Invalid);
@@ -675,6 +726,7 @@ void check_union_type(CheckerContext *ctx, Type *union_type, Ast *node, Array<Op
} else {
// GB_ASSERT(operand.mode == Addressing_Constant);
e = alloc_entity_constant(scope, token, operand.type, operand.value);
e->Constant.param_value = param_value;
}
} else {
if (is_type_param) {
@@ -682,6 +734,7 @@ void check_union_type(CheckerContext *ctx, Type *union_type, Ast *node, Array<Op
e->TypeName.is_type_alias = true;
} else {
e = alloc_entity_constant(scope, token, type, empty_exact_value);
e->Constant.param_value = param_value;
}
}
@@ -816,7 +869,7 @@ void check_enum_type(CheckerContext *ctx, Type *enum_type, Type *named_type, Ast
enum_type->Enum.base_type = base_type;
enum_type->Enum.scope = ctx->scope;
auto fields = array_make<Entity *>(ctx->allocator, 0, et->fields.count);
auto fields = array_make<Entity *>(permanent_allocator(), 0, et->fields.count);
Type *constant_type = enum_type;
if (named_type != nullptr) {
@@ -933,9 +986,9 @@ void check_bit_field_type(CheckerContext *ctx, Type *bit_field_type, Ast *node)
ast_node(bft, BitFieldType, node);
GB_ASSERT(is_type_bit_field(bit_field_type));
auto fields = array_make<Entity*>(ctx->allocator, 0, bft->fields.count);
auto sizes = array_make<u32> (ctx->allocator, 0, bft->fields.count);
auto offsets = array_make<u32> (ctx->allocator, 0, bft->fields.count);
auto fields = array_make<Entity*>(permanent_allocator(), 0, bft->fields.count);
auto sizes = array_make<u32> (permanent_allocator(), 0, bft->fields.count);
auto offsets = array_make<u32> (permanent_allocator(), 0, bft->fields.count);
scope_reserve(ctx->scope, bft->fields.count);
@@ -1337,7 +1390,7 @@ Type *determine_type_from_polymorphic(CheckerContext *ctx, Type *poly_type, Oper
if (is_polymorphic_type_assignable(ctx, poly_type, operand.type, false, modify_type)) {
if (show_error) {
set_procedure_abi_types(ctx->allocator, poly_type);
set_procedure_abi_types(poly_type);
}
return poly_type;
}
@@ -1463,7 +1516,7 @@ Type *check_get_params(CheckerContext *ctx, Scope *scope, Ast *_params, bool *is
bool success = true;
ast_node(field_list, FieldList, _params);
Array<Ast *> params = field_list->list;
Slice<Ast *> params = field_list->list;
if (params.count == 0) {
if (success_) *success_ = success;
@@ -1496,7 +1549,7 @@ Type *check_get_params(CheckerContext *ctx, Scope *scope, Ast *_params, bool *is
bool is_variadic = false;
isize variadic_index = -1;
bool is_c_vararg = false;
auto variables = array_make<Entity *>(ctx->allocator, 0, variable_count);
auto variables = array_make<Entity *>(permanent_allocator(), 0, variable_count);
for_array(i, params) {
Ast *param = params[i];
if (param->kind != Ast_Field) {
@@ -1822,7 +1875,7 @@ Type *check_get_results(CheckerContext *ctx, Scope *scope, Ast *_results) {
return nullptr;
}
ast_node(field_list, FieldList, _results);
Array<Ast *> results = field_list->list;
Slice<Ast *> results = field_list->list;
if (results.count == 0) {
return nullptr;
@@ -1838,7 +1891,7 @@ Type *check_get_results(CheckerContext *ctx, Scope *scope, Ast *_results) {
}
}
auto variables = array_make<Entity *>(ctx->allocator, 0, variable_count);
auto variables = array_make<Entity *>(permanent_allocator(), 0, variable_count);
for_array(i, results) {
ast_node(field, Field, results[i]);
Ast *default_value = unparen_expr(field->default_value);
@@ -2209,6 +2262,11 @@ Type *type_to_abi_compat_param_type(gbAllocator a, Type *original_type, ProcCall
return new_type;
}
if (is_type_proc(original_type)) {
// NOTE(bill): Force a cast to prevent a possible type cycle
return t_rawptr;
}
if (cc == ProcCC_None || cc == ProcCC_PureNone || cc == ProcCC_InlineAsm) {
return new_type;
}
@@ -2221,7 +2279,11 @@ Type *type_to_abi_compat_param_type(gbAllocator a, Type *original_type, ProcCall
return new_type;
}
if (build_context.ODIN_ARCH == "amd64") {
if (is_type_integer_128bit(original_type)) {
bool is_128 = is_type_integer_128bit(original_type);
if (!is_128 && is_type_bit_set(original_type) && type_size_of(original_type) == 16) {
// is_128 = true;
}
if (is_128) {
if (build_context.ODIN_OS == "windows") {
return alloc_type_simd_vector(2, t_u64);
} else {
@@ -2332,6 +2394,11 @@ Type *type_to_abi_compat_result_type(gbAllocator a, Type *original_type, ProcCal
return new_type;
}
if (is_type_proc(single_type)) {
// NOTE(bill): Force a cast to prevent a possible type cycle
return t_rawptr;
}
if (is_type_simd_vector(single_type)) {
return new_type;
}
@@ -2445,16 +2512,21 @@ bool abi_compat_return_by_pointer(gbAllocator a, ProcCallingConvention cc, Type
return false;
}
void set_procedure_abi_types(gbAllocator allocator, Type *type) {
void set_procedure_abi_types(Type *type) {
type = base_type(type);
if (type->kind != Type_Proc) {
return;
}
if (type->Proc.abi_types_set) {
if (type->Proc.abi_types_set || type->flags & TypeFlag_InProcessOfCheckingABI) {
return;
}
gbAllocator allocator = permanent_allocator();
u32 flags = type->flags;
type->flags |= TypeFlag_InProcessOfCheckingABI;
type->Proc.abi_compat_params = array_make<Type *>(allocator, cast(isize)type->Proc.param_count);
for (i32 i = 0; i < type->Proc.param_count; i++) {
Entity *e = type->Proc.params->Tuple.variables[i];
@@ -2466,7 +2538,7 @@ void set_procedure_abi_types(gbAllocator allocator, Type *type) {
case ProcCC_Odin:
case ProcCC_Contextless:
case ProcCC_Pure:
if (is_type_pointer(new_type) & !is_type_pointer(e->type)) {
if (is_type_pointer(new_type) && !is_type_pointer(e->type) && !is_type_proc(e->type)) {
e->flags |= EntityFlag_ImplicitReference;
}
break;
@@ -2474,7 +2546,7 @@ void set_procedure_abi_types(gbAllocator allocator, Type *type) {
if (build_context.ODIN_OS == "linux" ||
build_context.ODIN_OS == "darwin") {
if (is_type_pointer(new_type) & !is_type_pointer(e->type)) {
if (is_type_pointer(new_type) & !is_type_pointer(e->type) && !is_type_proc(e->type)) {
e->flags |= EntityFlag_ByVal;
}
}
@@ -2484,13 +2556,13 @@ void set_procedure_abi_types(gbAllocator allocator, Type *type) {
for (i32 i = 0; i < type->Proc.param_count; i++) {
Entity *e = type->Proc.params->Tuple.variables[i];
if (e->kind == Entity_Variable) {
set_procedure_abi_types(allocator, e->type);
set_procedure_abi_types(e->type);
}
}
for (i32 i = 0; i < type->Proc.result_count; i++) {
Entity *e = type->Proc.results->Tuple.variables[i];
if (e->kind == Entity_Variable) {
set_procedure_abi_types(allocator, e->type);
set_procedure_abi_types(e->type);
}
}
@@ -2499,6 +2571,7 @@ void set_procedure_abi_types(gbAllocator allocator, Type *type) {
type->Proc.return_by_pointer = abi_compat_return_by_pointer(allocator, type->Proc.calling_convention, type->Proc.abi_compat_result_type);
type->Proc.abi_types_set = true;
type->flags = flags;
}
// NOTE(bill): 'operands' is for generating non generic procedure type
@@ -2711,30 +2784,29 @@ void init_map_entry_type(Type *type) {
if (type->Map.entry_type != nullptr) return;
// NOTE(bill): The preload types may have not been set yet
GB_ASSERT(t_map_key != nullptr);
gbAllocator a = heap_allocator();
GB_ASSERT(t_map_hash != nullptr);
Type *entry_type = alloc_type_struct();
/*
struct {
hash: __MapKey;
next: int;
key: Key;
value: Value;
hash: runtime.Map_Hash,
next: int,
key: Key,
value: Value,
}
*/
Ast *dummy_node = alloc_ast_node(nullptr, Ast_Invalid);
Scope *s = create_scope(builtin_pkg->scope, a);
Scope *s = create_scope(builtin_pkg->scope);
auto fields = array_make<Entity *>(a, 0, 3);
array_add(&fields, alloc_entity_field(s, make_token_ident(str_lit("key")), t_map_key, false, 0, EntityState_Resolved));
array_add(&fields, alloc_entity_field(s, make_token_ident(str_lit("next")), t_int, false, 1, EntityState_Resolved));
array_add(&fields, alloc_entity_field(s, make_token_ident(str_lit("value")), type->Map.value, false, 2, EntityState_Resolved));
auto fields = array_make<Entity *>(permanent_allocator(), 0, 4);
array_add(&fields, alloc_entity_field(s, make_token_ident(str_lit("hash")), t_uintptr, false, cast(i32)fields.count, EntityState_Resolved));
array_add(&fields, alloc_entity_field(s, make_token_ident(str_lit("next")), t_int, false, cast(i32)fields.count, EntityState_Resolved));
array_add(&fields, alloc_entity_field(s, make_token_ident(str_lit("key")), type->Map.key, false, cast(i32)fields.count, EntityState_Resolved));
array_add(&fields, alloc_entity_field(s, make_token_ident(str_lit("value")), type->Map.value, false, cast(i32)fields.count, EntityState_Resolved));
entry_type->Struct.fields = fields;
// type_set_offsets(a, entry_type);
type->Map.entry_type = entry_type;
}
@@ -2757,15 +2829,14 @@ void init_map_internal_types(Type *type) {
entries: [dynamic]EntryType;
}
*/
gbAllocator a = heap_allocator();
Ast *dummy_node = alloc_ast_node(nullptr, Ast_Invalid);
Scope *s = create_scope(builtin_pkg->scope, a);
Scope *s = create_scope(builtin_pkg->scope);
Type *hashes_type = alloc_type_slice(t_int);
Type *entries_type = alloc_type_dynamic_array(type->Map.entry_type);
auto fields = array_make<Entity *>(a, 0, 2);
auto fields = array_make<Entity *>(permanent_allocator(), 0, 2);
array_add(&fields, alloc_entity_field(s, make_token_ident(str_lit("hashes")), hashes_type, false, 0, EntityState_Resolved));
array_add(&fields, alloc_entity_field(s, make_token_ident(str_lit("entries")), entries_type, false, 1, EntityState_Resolved));
@@ -2777,6 +2848,47 @@ void init_map_internal_types(Type *type) {
type->Map.lookup_result_type = make_optional_ok_type(value);
}
void add_map_key_type_dependencies(CheckerContext *ctx, Type *key) {
key = core_type(key);
if (is_type_cstring(key)) {
add_package_dependency(ctx, "runtime", "default_hasher_cstring");
} else if (is_type_string(key)) {
add_package_dependency(ctx, "runtime", "default_hasher_string");
} else if (!is_type_polymorphic(key)) {
if (!is_type_comparable(key)) {
return;
}
if (is_type_simple_compare(key)) {
i64 sz = type_size_of(key);
if (1 <= sz && sz <= 16) {
char buf[20] = {};
gb_snprintf(buf, 20, "default_hasher%d", cast(i32)sz);
add_package_dependency(ctx, "runtime", buf);
return;
} else {
add_package_dependency(ctx, "runtime", "default_hasher_n");
return;
}
}
if (key->kind == Type_Struct) {
add_package_dependency(ctx, "runtime", "default_hasher_n");
for_array(i, key->Struct.fields) {
Entity *field = key->Struct.fields[i];
add_map_key_type_dependencies(ctx, field->type);
}
} else if (key->kind == Type_EnumeratedArray) {
add_package_dependency(ctx, "runtime", "default_hasher_n");
add_map_key_type_dependencies(ctx, key->EnumeratedArray.elem);
} else if (key->kind == Type_Array) {
add_package_dependency(ctx, "runtime", "default_hasher_n");
add_map_key_type_dependencies(ctx, key->Array.elem);
}
}
}
void check_map_type(CheckerContext *ctx, Type *type, Ast *node) {
GB_ASSERT(type->kind == Type_Map);
ast_node(mt, MapType, node);
@@ -2793,16 +2905,16 @@ void check_map_type(CheckerContext *ctx, Type *type, Ast *node) {
gb_string_free(str);
}
}
if (type_size_of(key) == 0) {
gbString str = type_to_string(key);
error(node, "Invalid type of a key for a map of size 0, got '%s'", str);
gb_string_free(str);
}
type->Map.key = key;
type->Map.value = value;
if (is_type_string(key)) {
add_package_dependency(ctx, "runtime", "default_hash_string");
} else {
add_package_dependency(ctx, "runtime", "default_hash_ptr");
}
add_map_key_type_dependencies(ctx, key);
init_core_map_type(ctx->checker);
init_map_internal_types(type);
@@ -2833,7 +2945,7 @@ Type *make_soa_struct_fixed(CheckerContext *ctx, Ast *array_typ_expr, Ast *elem_
soa_struct->Struct.soa_elem = elem;
soa_struct->Struct.soa_count = count;
scope = create_scope(ctx->scope, ctx->allocator);
scope = create_scope(ctx->scope);
soa_struct->Struct.scope = scope;
String params_xyzw[4] = {
@@ -2866,7 +2978,7 @@ Type *make_soa_struct_fixed(CheckerContext *ctx, Ast *array_typ_expr, Ast *elem_
soa_struct->Struct.soa_elem = elem;
soa_struct->Struct.soa_count = count;
scope = create_scope(old_struct->Struct.scope->parent, ctx->allocator);
scope = create_scope(old_struct->Struct.scope->parent);
soa_struct->Struct.scope = scope;
for_array(i, old_struct->Struct.fields) {
@@ -2927,7 +3039,7 @@ Type *make_soa_struct_slice(CheckerContext *ctx, Ast *array_typ_expr, Ast *elem_
soa_struct->Struct.soa_count = 0;
soa_struct->Struct.is_polymorphic = true;
scope = create_scope(ctx->scope, ctx->allocator);
scope = create_scope(ctx->scope);
soa_struct->Struct.scope = scope;
} else if (is_type_array(elem)) {
Type *old_array = base_type(elem);
@@ -2941,7 +3053,7 @@ Type *make_soa_struct_slice(CheckerContext *ctx, Ast *array_typ_expr, Ast *elem_
soa_struct->Struct.soa_elem = elem;
soa_struct->Struct.soa_count = 0;
scope = create_scope(ctx->scope, ctx->allocator);
scope = create_scope(ctx->scope);
soa_struct->Struct.scope = scope;
String params_xyzw[4] = {
@@ -2977,7 +3089,7 @@ Type *make_soa_struct_slice(CheckerContext *ctx, Ast *array_typ_expr, Ast *elem_
soa_struct->Struct.soa_elem = elem;
soa_struct->Struct.soa_count = 0;
scope = create_scope(old_struct->Struct.scope->parent, ctx->allocator);
scope = create_scope(old_struct->Struct.scope->parent);
soa_struct->Struct.scope = scope;
for_array(i, old_struct->Struct.fields) {
@@ -3044,7 +3156,7 @@ Type *make_soa_struct_dynamic_array(CheckerContext *ctx, Ast *array_typ_expr, As
soa_struct->Struct.soa_count = 0;
soa_struct->Struct.is_polymorphic = true;
scope = create_scope(ctx->scope, ctx->allocator);
scope = create_scope(ctx->scope);
soa_struct->Struct.scope = scope;
} else if (is_type_array(elem)) {
Type *old_array = base_type(elem);
@@ -3058,7 +3170,7 @@ Type *make_soa_struct_dynamic_array(CheckerContext *ctx, Ast *array_typ_expr, As
soa_struct->Struct.soa_elem = elem;
soa_struct->Struct.soa_count = 0;
scope = create_scope(ctx->scope, ctx->allocator);
scope = create_scope(ctx->scope);
soa_struct->Struct.scope = scope;
String params_xyzw[4] = {
@@ -3093,7 +3205,7 @@ Type *make_soa_struct_dynamic_array(CheckerContext *ctx, Ast *array_typ_expr, As
soa_struct->Struct.soa_elem = elem;
soa_struct->Struct.soa_count = 0;
scope = create_scope(old_struct->Struct.scope->parent, ctx->allocator);
scope = create_scope(old_struct->Struct.scope->parent);
soa_struct->Struct.scope = scope;
for_array(i, old_struct->Struct.fields) {

View File

@@ -187,8 +187,8 @@ void init_decl_info(DeclInfo *d, Scope *scope, DeclInfo *parent) {
array_init (&d->labels, heap_allocator());
}
DeclInfo *make_decl_info(gbAllocator a, Scope *scope, DeclInfo *parent) {
DeclInfo *d = gb_alloc_item(a, DeclInfo);
DeclInfo *make_decl_info(Scope *scope, DeclInfo *parent) {
DeclInfo *d = gb_alloc_item(permanent_allocator(), DeclInfo);
init_decl_info(d, scope, parent);
return d;
}
@@ -219,8 +219,8 @@ bool decl_info_has_init(DeclInfo *d) {
Scope *create_scope(Scope *parent, gbAllocator allocator, isize init_elements_capacity=DEFAULT_SCOPE_CAPACITY) {
Scope *s = gb_alloc_item(allocator, Scope);
Scope *create_scope(Scope *parent, isize init_elements_capacity=DEFAULT_SCOPE_CAPACITY) {
Scope *s = gb_alloc_item(permanent_allocator(), Scope);
s->parent = parent;
string_map_init(&s->elements, heap_allocator(), init_elements_capacity);
ptr_set_init(&s->imported, heap_allocator(), 0);
@@ -244,7 +244,7 @@ Scope *create_scope_from_file(CheckerContext *c, AstFile *f) {
GB_ASSERT(f->pkg != nullptr);
GB_ASSERT(f->pkg->scope != nullptr);
Scope *s = create_scope(f->pkg->scope, c->allocator);
Scope *s = create_scope(f->pkg->scope);
array_reserve(&s->delayed_imports, f->imports.count);
array_reserve(&s->delayed_directives, f->directive_count);
@@ -264,7 +264,7 @@ Scope *create_scope_from_package(CheckerContext *c, AstPackage *pkg) {
decl_count += pkg->files[i]->decls.count;
}
isize init_elements_capacity = 2*decl_count;
Scope *s = create_scope(builtin_pkg->scope, c->allocator, init_elements_capacity);
Scope *s = create_scope(builtin_pkg->scope, init_elements_capacity);
s->flags |= ScopeFlag_Pkg;
s->pkg = pkg;
@@ -324,7 +324,7 @@ void check_open_scope(CheckerContext *c, Ast *node) {
GB_ASSERT(node->kind == Ast_Invalid ||
is_ast_stmt(node) ||
is_ast_type(node));
Scope *scope = create_scope(c->scope, c->allocator);
Scope *scope = create_scope(c->scope);
add_scope(c, node, scope);
switch (node->kind) {
case Ast_ProcType:
@@ -368,9 +368,14 @@ void scope_lookup_parent(Scope *scope, String const &name, Scope **scope_, Entit
if (e->kind == Entity_Label) {
continue;
}
if (e->kind == Entity_Variable &&
!(e->scope->flags&ScopeFlag_File)) {
continue;
if (e->kind == Entity_Variable) {
if (e->scope->flags&ScopeFlag_File) {
// Global variables are file to access
} else if (e->flags&EntityFlag_Static) {
// Allow static/thread_local variables to be referenced
} else {
continue;
}
}
}
@@ -690,31 +695,33 @@ void add_global_type_entity(String name, Type *type) {
void init_universal(void) {
BuildContext *bc = &build_context;
// NOTE(bill): No need to free these
gbAllocator a = heap_allocator();
// gbAllocator a = heap_allocator();
gbAllocator a = permanent_allocator();
builtin_pkg = gb_alloc_item(a, AstPackage);
builtin_pkg->name = str_lit("builtin");
builtin_pkg->kind = Package_Normal;
builtin_pkg->scope = create_scope(nullptr, a);
builtin_pkg->scope->flags |= ScopeFlag_Pkg | ScopeFlag_Global;
builtin_pkg->scope = create_scope(nullptr);
builtin_pkg->scope->flags |= ScopeFlag_Pkg | ScopeFlag_Global | ScopeFlag_Builtin;
builtin_pkg->scope->pkg = builtin_pkg;
intrinsics_pkg = gb_alloc_item(a, AstPackage);
intrinsics_pkg->name = str_lit("intrinsics");
intrinsics_pkg->kind = Package_Normal;
intrinsics_pkg->scope = create_scope(nullptr, a);
intrinsics_pkg->scope->flags |= ScopeFlag_Pkg | ScopeFlag_Global;
intrinsics_pkg->scope = create_scope(nullptr);
intrinsics_pkg->scope->flags |= ScopeFlag_Pkg | ScopeFlag_Global | ScopeFlag_Builtin;
intrinsics_pkg->scope->pkg = intrinsics_pkg;
config_pkg = gb_alloc_item(a, AstPackage);
config_pkg->name = str_lit("config");
config_pkg->kind = Package_Normal;
config_pkg->scope = create_scope(nullptr, a);
config_pkg->scope->flags |= ScopeFlag_Pkg | ScopeFlag_Global;
config_pkg->scope = create_scope(nullptr);
config_pkg->scope->flags |= ScopeFlag_Pkg | ScopeFlag_Global | ScopeFlag_Builtin;
config_pkg->scope->pkg = config_pkg;
@@ -724,6 +731,18 @@ void init_universal(void) {
}
add_global_type_entity(str_lit("byte"), &basic_types[Basic_u8]);
{
void set_procedure_abi_types(Type *type);
Type *equal_args[2] = {t_rawptr, t_rawptr};
t_equal_proc = alloc_type_proc_from_types(equal_args, 2, t_bool, false, ProcCC_Contextless);
set_procedure_abi_types(t_equal_proc);
Type *hasher_args[2] = {t_rawptr, t_uintptr};
t_hasher_proc = alloc_type_proc_from_types(hasher_args, 2, t_uintptr, false, ProcCC_Contextless);
set_procedure_abi_types(t_hasher_proc);
}
// Constants
add_global_constant(str_lit("true"), t_untyped_bool, exact_value_bool(true));
add_global_constant(str_lit("false"), t_untyped_bool, exact_value_bool(false));
@@ -742,6 +761,7 @@ void init_universal(void) {
add_global_constant(str_lit("ODIN_DEFAULT_TO_NIL_ALLOCATOR"), t_untyped_bool, exact_value_bool(bc->ODIN_DEFAULT_TO_NIL_ALLOCATOR));
add_global_constant(str_lit("ODIN_USE_LLVM_API"), t_untyped_bool, exact_value_bool(bc->use_llvm_api));
add_global_constant(str_lit("ODIN_NO_DYNAMIC_LITERALS"), t_untyped_bool, exact_value_bool(bc->no_dynamic_literals));
add_global_constant(str_lit("ODIN_TEST"), t_untyped_bool, exact_value_bool(bc->command_kind == Command_test));
// Builtin Procedures
@@ -837,6 +857,8 @@ void init_checker_info(CheckerInfo *i) {
array_init(&i->variable_init_order, a);
array_init(&i->required_foreign_imports_through_force, a);
array_init(&i->required_global_variables, a);
array_init(&i->testing_procedures, a, 0, 0);
i->allow_identifier_uses = build_context.query_data_set_settings.kind == QueryDataSet_GoToDefinitions;
if (i->allow_identifier_uses) {
@@ -870,7 +892,6 @@ CheckerContext make_checker_context(Checker *c) {
CheckerContext ctx = c->init_ctx;
ctx.checker = c;
ctx.info = &c->info;
ctx.allocator = c->allocator;
ctx.scope = builtin_pkg->scope;
ctx.pkg = builtin_pkg;
@@ -895,6 +916,7 @@ bool init_checker(Checker *c, Parser *parser) {
gbAllocator a = heap_allocator();
init_checker_info(&c->info);
c->info.checker = c;
array_init(&c->procs_to_check, a);
array_init(&c->procs_with_deferred_to_check, a);
@@ -904,8 +926,6 @@ bool init_checker(Checker *c, Parser *parser) {
isize total_token_count = c->parser->total_token_count;
isize arena_size = 2 * item_size * total_token_count;
c->allocator = heap_allocator();
c->init_ctx = make_checker_context(c);
return true;
}
@@ -1502,11 +1522,10 @@ void add_min_dep_type_info(Checker *c, Type *t) {
ti_index = type_info_index(&c->info, t, false);
}
GB_ASSERT(ti_index >= 0);
if (ptr_set_exists(set, ti_index)) {
if (ptr_set_update(set, ti_index)) {
// Type Already exists
return;
}
ptr_set_add(set, ti_index);
// Add nested types
if (t->kind == Type_Named) {
@@ -1680,8 +1699,6 @@ void add_dependency_to_set(Checker *c, Entity *entity) {
CheckerInfo *info = &c->info;
auto *set = &info->minimum_dependency_set;
String name = entity->token.string;
if (entity->type != nullptr &&
is_type_polymorphic(entity->type)) {
@@ -1691,12 +1708,10 @@ void add_dependency_to_set(Checker *c, Entity *entity) {
}
}
if (ptr_set_exists(set, entity)) {
if (ptr_set_update(set, entity)) {
return;
}
ptr_set_add(set, entity);
DeclInfo *decl = decl_info_of_entity(entity);
if (decl == nullptr) {
return;
@@ -1715,16 +1730,15 @@ void add_dependency_to_set(Checker *c, Entity *entity) {
if (fl != nullptr) {
GB_ASSERT_MSG(fl->kind == Entity_LibraryName &&
(fl->flags&EntityFlag_Used),
"%.*s", LIT(name));
"%.*s", LIT(entity->token.string));
add_dependency_to_set(c, fl);
}
}
if (e->kind == Entity_Variable && e->Variable.is_foreign) {
} else if (e->kind == Entity_Variable && e->Variable.is_foreign) {
Entity *fl = e->Variable.foreign_library;
if (fl != nullptr) {
GB_ASSERT_MSG(fl->kind == Entity_LibraryName &&
(fl->flags&EntityFlag_Used),
"%.*s", LIT(name));
"%.*s", LIT(entity->token.string));
add_dependency_to_set(c, fl);
}
}
@@ -1733,7 +1747,10 @@ void add_dependency_to_set(Checker *c, Entity *entity) {
void generate_minimum_dependency_set(Checker *c, Entity *start) {
ptr_set_init(&c->info.minimum_dependency_set, heap_allocator());
isize entity_count = c->info.entities.count;
isize min_dep_set_cap = next_pow2_isize(entity_count*4); // empirically determined factor
ptr_set_init(&c->info.minimum_dependency_set, heap_allocator(), min_dep_set_cap);
ptr_set_init(&c->info.minimum_dependency_type_info_set, heap_allocator());
String required_runtime_entities[] = {
@@ -1769,6 +1786,7 @@ void generate_minimum_dependency_set(Checker *c, Entity *start) {
str_lit("memcpy"),
str_lit("memmove"),
str_lit("memory_equal"),
str_lit("memory_compare"),
str_lit("memory_compare_zero"),
@@ -1842,7 +1860,68 @@ void generate_minimum_dependency_set(Checker *c, Entity *start) {
add_dependency_to_set(c, e);
}
add_dependency_to_set(c, start);
for_array(i, c->info.entities) {
Entity *e = c->info.entities[i];
switch (e->kind) {
case Entity_Variable:
if (e->Variable.is_export) {
add_dependency_to_set(c, e);
}
break;
case Entity_Procedure:
if (e->Procedure.is_export) {
add_dependency_to_set(c, e);
}
break;
}
}
if (build_context.command_kind == Command_test) {
AstPackage *pkg = c->info.init_package;
Scope *s = pkg->scope;
for_array(i, s->elements.entries) {
Entity *e = s->elements.entries[i].value;
if (e->kind != Entity_Procedure) {
continue;
}
if (e->file == nullptr || !e->file->is_test) {
continue;
}
String name = e->token.string;
String prefix = str_lit("test_");
if (!string_starts_with(name, prefix)) {
continue;
}
bool is_tester = false;
if (name != prefix) {
is_tester = true;
} else {
error(e->token, "Invalid testing procedure name: %.*s", LIT(name));
}
Type *t = base_type(e->type);
GB_ASSERT(t->kind == Type_Proc);
if (t->Proc.param_count == 0 && t->Proc.result_count == 0) {
// Good
} else {
gbString str = type_to_string(t);
error(e->token, "Testing procedures must have a signature type of proc(), got %s", str);
gb_string_free(str);
is_tester = false;
}
if (is_tester) {
add_dependency_to_set(c, e);
array_add(&c->info.testing_procedures, e);
}
}
} else {
add_dependency_to_set(c, start);
}
}
bool is_entity_a_dependency(Entity *e) {
@@ -1881,19 +1960,17 @@ void add_entity_dependency_from_procedure_parameters(Map<EntityGraphNode *> *M,
}
Array<EntityGraphNode *> generate_entity_dependency_graph(CheckerInfo *info) {
Array<EntityGraphNode *> generate_entity_dependency_graph(CheckerInfo *info, gbAllocator allocator) {
#define TIME_SECTION(str) do { if (build_context.show_more_timings) timings_start_section(&global_timings, str_lit(str)); } while (0)
gbAllocator a = heap_allocator();
Map<EntityGraphNode *> M = {}; // Key: Entity *
map_init(&M, a, info->entities.count);
map_init(&M, allocator, info->entities.count);
defer (map_destroy(&M));
for_array(i, info->entities) {
Entity *e = info->entities[i];
DeclInfo *d = e->decl_info;
if (is_entity_a_dependency(e)) {
EntityGraphNode *n = gb_alloc_item(a, EntityGraphNode);
EntityGraphNode *n = gb_alloc_item(allocator, EntityGraphNode);
n->entity = e;
map_set(&M, hash_pointer(e), n);
}
@@ -1928,7 +2005,7 @@ Array<EntityGraphNode *> generate_entity_dependency_graph(CheckerInfo *info) {
// This means that the entity graph node set will have to be thread safe
TIME_SECTION("generate_entity_dependency_graph: Calculate edges for graph M - Part 2");
auto G = array_make<EntityGraphNode *>(a, 0, M.entries.count);
auto G = array_make<EntityGraphNode *>(allocator, 0, M.entries.count);
for_array(i, M.entries) {
auto *entry = &M.entries[i];
@@ -1949,17 +2026,27 @@ Array<EntityGraphNode *> generate_entity_dependency_graph(CheckerInfo *info) {
EntityGraphNode *s = n->succ.entries[k].ptr;
// Ignore self-cycles
if (s != n) {
if (p->entity->kind == Entity_Procedure &&
s->entity->kind == Entity_Procedure) {
// NOTE(bill, 2020-11-15): Only care about variable initialization ordering
// TODO(bill): This is probably wrong!!!!
continue;
}
// IMPORTANT NOTE/TODO(bill, 2020-11-15): These three calls take the majority of the
// the time to process
entity_graph_node_set_add(&p->succ, s);
entity_graph_node_set_add(&s->pred, p);
// Remove edge to 'n'
entity_graph_node_set_remove(&s->pred, n);
}
}
// Remove edge to 'n'
entity_graph_node_set_remove(&p->succ, n);
}
}
} else {
} else if (e->kind == Entity_Variable) {
array_add(&G, n);
}
}
@@ -1972,6 +2059,28 @@ Array<EntityGraphNode *> generate_entity_dependency_graph(CheckerInfo *info) {
GB_ASSERT(n->dep_count >= 0);
}
// f64 succ_count = 0.0;
// f64 pred_count = 0.0;
// f64 succ_capacity = 0.0;
// f64 pred_capacity = 0.0;
// f64 succ_max = 0.0;
// f64 pred_max = 0.0;
// for_array(i, G) {
// EntityGraphNode *n = G[i];
// succ_count += n->succ.entries.count;
// pred_count += n->pred.entries.count;
// succ_capacity += n->succ.entries.capacity;
// pred_capacity += n->pred.entries.capacity;
// succ_max = gb_max(succ_max, n->succ.entries.capacity);
// pred_max = gb_max(pred_max, n->pred.entries.capacity);
// }
// f64 count = cast(f64)G.count;
// gb_printf_err(">>>count pred: %f succ: %f\n", pred_count/count, succ_count/count);
// gb_printf_err(">>>capacity pred: %f succ: %f\n", pred_capacity/count, succ_capacity/count);
// gb_printf_err(">>>max pred: %f succ: %f\n", pred_max, succ_max);
return G;
#undef TIME_SECTION
@@ -2088,9 +2197,9 @@ void init_core_type_info(Checker *c) {
t_type_info_enum_value = type_info_enum_value->type;
t_type_info_enum_value_ptr = alloc_type_pointer(t_type_info_enum_value);
GB_ASSERT(tis->fields.count == 4);
GB_ASSERT(tis->fields.count == 5);
Entity *type_info_variant = tis->fields[3];
Entity *type_info_variant = tis->fields[4];
Type *tiv_type = type_info_variant->type;
GB_ASSERT(is_type_union(tiv_type));
@@ -2186,14 +2295,14 @@ void init_core_source_code_location(Checker *c) {
}
void init_core_map_type(Checker *c) {
if (t_map_key == nullptr) {
Entity *e = find_core_entity(c, str_lit("Map_Key"));
if (t_map_hash == nullptr) {
Entity *e = find_core_entity(c, str_lit("Map_Hash"));
if (e->state == EntityState_Unresolved) {
auto ctx = c->init_ctx;
check_entity_decl(&ctx, e, nullptr, nullptr);
}
t_map_key = e->type;
GB_ASSERT(t_map_key != nullptr);
t_map_hash = e->type;
GB_ASSERT(t_map_hash != nullptr);
}
if (t_map_header == nullptr) {
@@ -2579,7 +2688,7 @@ DECL_ATTRIBUTE_PROC(type_decl_attribute) {
if (valid && build_context.use_llvm_api) {
if (ac->atom_op_table == nullptr) {
ac->atom_op_table = gb_alloc_item(heap_allocator(), TypeAtomOpTable);
ac->atom_op_table = gb_alloc_item(permanent_allocator(), TypeAtomOpTable);
}
ac->atom_op_table->op[TypeAtomOp_index_get] = e;
}
@@ -2638,7 +2747,7 @@ DECL_ATTRIBUTE_PROC(type_decl_attribute) {
if (valid && build_context.use_llvm_api) {
if (ac->atom_op_table == nullptr) {
ac->atom_op_table = gb_alloc_item(heap_allocator(), TypeAtomOpTable);
ac->atom_op_table = gb_alloc_item(permanent_allocator(), TypeAtomOpTable);
}
ac->atom_op_table->op[TypeAtomOp_index_set] = e;
}
@@ -2720,7 +2829,7 @@ DECL_ATTRIBUTE_PROC(type_decl_attribute) {
if (valid && build_context.use_llvm_api) {
if (ac->atom_op_table == nullptr) {
ac->atom_op_table = gb_alloc_item(heap_allocator(), TypeAtomOpTable);
ac->atom_op_table = gb_alloc_item(permanent_allocator(), TypeAtomOpTable);
}
ac->atom_op_table->op[TypeAtomOp_slice] = e;
}
@@ -2809,7 +2918,7 @@ void check_decl_attributes(CheckerContext *c, Array<Ast *> const &attributes, De
}
isize get_total_value_count(Array<Ast *> const &values) {
isize get_total_value_count(Slice<Ast *> const &values) {
isize count = 0;
for_array(i, values) {
Type *t = type_of_expr(values[i]);
@@ -2967,8 +3076,8 @@ void check_builtin_attributes(CheckerContext *ctx, Entity *e, Array<Ast *> *attr
}
void check_collect_value_decl(CheckerContext *c, Ast *decl) {
if (decl->been_handled) return;
decl->been_handled = true;
if (decl->state_flags & StateFlag_BeenHandled) return;
decl->state_flags |= StateFlag_BeenHandled;
ast_node(vd, ValueDecl, decl);
@@ -3023,7 +3132,7 @@ void check_collect_value_decl(CheckerContext *c, Ast *decl) {
} else {
entity_visibility_kind = kind;
}
array_unordered_remove(elems, j);
slice_unordered_remove(elems, j);
j -= 1;
}
}
@@ -3072,7 +3181,10 @@ void check_collect_value_decl(CheckerContext *c, Ast *decl) {
}
Ast *init_expr = value;
DeclInfo *d = make_decl_info(heap_allocator(), c->scope, c->decl);
DeclInfo *d = make_decl_info(c->scope, c->decl);
d->decl_node = decl;
d->comment = vd->comment;
d->docs = vd->docs;
d->entity = e;
d->type_expr = vd->type;
d->init_expr = init_expr;
@@ -3100,9 +3212,12 @@ void check_collect_value_decl(CheckerContext *c, Ast *decl) {
Token token = name->Ident.token;
Ast *fl = c->foreign_context.curr_library;
DeclInfo *d = make_decl_info(c->allocator, c->scope, c->decl);
Entity *e = nullptr;
DeclInfo *d = make_decl_info(c->scope, c->decl);
d->decl_node = decl;
d->comment = vd->comment;
d->docs = vd->docs;
d->attributes = vd->attributes;
d->type_expr = vd->type;
d->init_expr = init;
@@ -3186,8 +3301,8 @@ void check_collect_value_decl(CheckerContext *c, Ast *decl) {
}
void check_add_foreign_block_decl(CheckerContext *ctx, Ast *decl) {
if (decl->been_handled) return;
decl->been_handled = true;
if (decl->state_flags & StateFlag_BeenHandled) return;
decl->state_flags |= StateFlag_BeenHandled;
ast_node(fb, ForeignBlockDecl, decl);
Ast *foreign_library = fb->foreign_library;
@@ -3207,7 +3322,7 @@ void check_add_foreign_block_decl(CheckerContext *ctx, Ast *decl) {
}
// NOTE(bill): If file_scopes == nullptr, this will act like a local scope
void check_collect_entities(CheckerContext *c, Array<Ast *> const &nodes) {
void check_collect_entities(CheckerContext *c, Slice<Ast *> const &nodes) {
for_array(decl_index, nodes) {
Ast *decl = nodes[decl_index];
if (!is_ast_decl(decl) && !is_ast_when_stmt(decl)) {
@@ -3522,11 +3637,9 @@ struct ImportPathItem {
Array<ImportPathItem> find_import_path(Checker *c, AstPackage *start, AstPackage *end, PtrSet<AstPackage *> *visited) {
Array<ImportPathItem> empty_path = {};
if (ptr_set_exists(visited, start)) {
if (ptr_set_update(visited, start)) {
return empty_path;
}
ptr_set_add(visited, start);
String path = start->fullpath;
AstPackage **found = string_map_get(&c->info.packages, path);
@@ -3571,8 +3684,8 @@ Array<ImportPathItem> find_import_path(Checker *c, AstPackage *start, AstPackage
}
#endif
void check_add_import_decl(CheckerContext *ctx, Ast *decl) {
if (decl->been_handled) return;
decl->been_handled = true;
if (decl->state_flags & StateFlag_BeenHandled) return;
decl->state_flags |= StateFlag_BeenHandled;
ast_node(id, ImportDecl, decl);
Token token = id->relpath;
@@ -3612,10 +3725,8 @@ void check_add_import_decl(CheckerContext *ctx, Ast *decl) {
GB_ASSERT(scope->flags&ScopeFlag_Pkg);
if (ptr_set_exists(&parent_scope->imported, scope)) {
if (ptr_set_update(&parent_scope->imported, scope)) {
// error(token, "Multiple import of the same file within this scope");
} else {
ptr_set_add(&parent_scope->imported, scope);
}
String import_name = path_to_entity_name(id->import_name.string, id->fullpath, false);
@@ -3686,8 +3797,8 @@ DECL_ATTRIBUTE_PROC(foreign_import_decl_attribute) {
}
void check_add_foreign_import_decl(CheckerContext *ctx, Ast *decl) {
if (decl->been_handled) return;
decl->been_handled = true;
if (decl->state_flags & StateFlag_BeenHandled) return;
decl->state_flags |= StateFlag_BeenHandled;
ast_node(fl, ForeignImportDecl, decl);
@@ -3738,7 +3849,7 @@ void check_add_foreign_import_decl(CheckerContext *ctx, Ast *decl) {
}
}
bool collect_checked_packages_from_decl_list(Checker *c, Array<Ast *> const &decls) {
bool collect_checked_packages_from_decl_list(Checker *c, Slice<Ast *> const &decls) {
bool new_files = false;
for_array(i, decls) {
Ast *decl = decls[i];
@@ -3760,7 +3871,7 @@ bool collect_checked_packages_from_decl_list(Checker *c, Array<Ast *> const &dec
}
// Returns true if a new package is present
bool collect_file_decls(CheckerContext *ctx, Array<Ast *> const &decls);
bool collect_file_decls(CheckerContext *ctx, Slice<Ast *> const &decls);
bool collect_file_decls_from_when_stmt(CheckerContext *ctx, AstWhenStmt *ws);
bool collect_when_stmt_from_file(CheckerContext *ctx, AstWhenStmt *ws) {
@@ -3835,7 +3946,7 @@ bool collect_file_decls_from_when_stmt(CheckerContext *ctx, AstWhenStmt *ws) {
return false;
}
bool collect_file_decls(CheckerContext *ctx, Array<Ast *> const &decls) {
bool collect_file_decls(CheckerContext *ctx, Slice<Ast *> const &decls) {
GB_ASSERT(ctx->scope->flags&ScopeFlag_File);
if (collect_checked_packages_from_decl_list(ctx->checker, decls)) {
@@ -3968,10 +4079,9 @@ void check_import_entities(Checker *c) {
if (pkg == nullptr) {
continue;
}
if (ptr_set_exists(&emitted, pkg)) {
if (ptr_set_update(&emitted, pkg)) {
continue;
}
ptr_set_add(&emitted, pkg);
array_add(&package_order, n);
}
@@ -4162,7 +4272,7 @@ void calculate_global_init_order(Checker *c) {
CheckerInfo *info = &c->info;
TIME_SECTION("calculate_global_init_order: generate entity dependency graph");
Array<EntityGraphNode *> dep_graph = generate_entity_dependency_graph(info);
Array<EntityGraphNode *> dep_graph = generate_entity_dependency_graph(info, heap_allocator());
defer ({
for_array(i, dep_graph) {
entity_graph_node_destroy(dep_graph[i], heap_allocator());
@@ -4214,11 +4324,9 @@ void calculate_global_init_order(Checker *c) {
// if (!decl_info_has_init(d)) {
// continue;
// }
if (ptr_set_exists(&emitted, d)) {
if (ptr_set_update(&emitted, d)) {
continue;
}
ptr_set_add(&emitted, d);
array_add(&info->variable_init_order, d);
}
@@ -4299,10 +4407,11 @@ void check_parsed_files(Checker *c) {
for_array(i, c->parser->packages) {
AstPackage *p = c->parser->packages[i];
Scope *scope = create_scope_from_package(&c->init_ctx, p);
p->decl_info = make_decl_info(c->allocator, scope, c->init_ctx.decl);
p->decl_info = make_decl_info(scope, c->init_ctx.decl);
string_map_set(&c->info.packages, p->fullpath, p);
if (scope->flags&ScopeFlag_Init) {
c->info.init_package = p;
c->info.init_scope = scope;
}
if (p->kind == Package_Runtime) {
@@ -4572,7 +4681,7 @@ void check_parsed_files(Checker *c) {
TIME_SECTION("check entry point");
if (build_context.build_mode == BuildMode_Executable) {
if (build_context.build_mode == BuildMode_Executable && !build_context.no_entry_point && build_context.command_kind != Command_test) {
Scope *s = c->info.init_scope;
GB_ASSERT(s != nullptr);
GB_ASSERT(s->flags&ScopeFlag_Init);

View File

@@ -45,7 +45,7 @@ enum StmtFlag {
Stmt_TypeSwitch = 1<<4,
Stmt_CheckScopeDecls = 1<<5,
Stmt_CheckScopeDecls = 1<<5,
};
enum BuiltinProcPkg {
@@ -132,6 +132,7 @@ struct DeclInfo {
Entity *entity;
Ast * decl_node;
Ast * type_expr;
Ast * init_expr;
Array<Ast *> attributes;
@@ -140,6 +141,9 @@ struct DeclInfo {
bool is_using;
bool where_clauses_evaluated;
CommentGroup *comment;
CommentGroup *docs;
PtrSet<Entity *> deps;
PtrSet<Type *> type_info_deps;
Array<BlockLabel> labels;
@@ -160,12 +164,13 @@ struct ProcInfo {
enum ScopeFlag : i32 {
ScopeFlag_Pkg = 1<<1,
ScopeFlag_Global = 1<<2,
ScopeFlag_File = 1<<3,
ScopeFlag_Init = 1<<4,
ScopeFlag_Proc = 1<<5,
ScopeFlag_Type = 1<<6,
ScopeFlag_Pkg = 1<<1,
ScopeFlag_Builtin = 1<<2,
ScopeFlag_Global = 1<<3,
ScopeFlag_File = 1<<4,
ScopeFlag_Init = 1<<5,
ScopeFlag_Proc = 1<<6,
ScopeFlag_Type = 1<<7,
ScopeFlag_HasBeenImported = 1<<10, // This is only applicable to file scopes
@@ -247,8 +252,12 @@ struct AtomOpMapEntry {
};
struct CheckerContext;
// CheckerInfo stores all the symbol information for a type-checked program
struct CheckerInfo {
Checker *checker;
Map<ExprInfo> untyped; // Key: Ast * | Expression -> ExprInfo
// NOTE(bill): This needs to be a map and not on the Ast
// as it needs to be iterated across
@@ -268,6 +277,7 @@ struct CheckerInfo {
AstPackage * builtin_package;
AstPackage * runtime_package;
AstPackage * init_package;
Scope * init_scope;
Entity * entry_point;
PtrSet<Entity *> minimum_dependency_set;
@@ -278,6 +288,7 @@ struct CheckerInfo {
Map<AtomOpMapEntry> atom_op_map; // Key: Ast *
Array<Entity *> testing_procedures;
bool allow_identifier_uses;
Array<Ast *> identifier_uses; // only used by 'odin query'
@@ -301,7 +312,6 @@ struct CheckerContext {
ProcCallingConvention curr_proc_calling_convention;
bool in_proc_sig;
ForeignContext foreign_context;
gbAllocator allocator;
CheckerTypePath *type_path;
isize type_level; // TODO(bill): Actually handle correctly
@@ -317,6 +327,7 @@ struct CheckerContext {
bool no_polymorphic_errors;
bool hide_polymorphic_errors;
bool in_polymorphic_specialization;
bool allow_arrow_right_selector_expr;
Scope * polymorphic_scope;
Ast *assignment_lhs_hint;
@@ -331,14 +342,11 @@ struct Checker {
Array<Entity *> procs_with_deferred_to_check;
CheckerContext *curr_ctx;
gbAllocator allocator;
CheckerContext init_ctx;
};
gb_global AstPackage *builtin_pkg = nullptr;
gb_global AstPackage *intrinsics_pkg = nullptr;
gb_global AstPackage *config_pkg = nullptr;
@@ -387,7 +395,7 @@ void check_add_foreign_import_decl(CheckerContext *c, Ast *decl);
bool check_arity_match(CheckerContext *c, AstValueDecl *vd, bool is_global = false);
void check_collect_entities(CheckerContext *c, Array<Ast *> const &nodes);
void check_collect_entities(CheckerContext *c, Slice<Ast *> const &nodes);
void check_collect_entities_from_when_stmt(CheckerContext *c, AstWhenStmt *ws);
void check_delayed_file_import_entity(CheckerContext *c, Ast *decl);

View File

@@ -183,6 +183,9 @@ BuiltinProc__type_simple_boolean_end,
BuiltinProc_type_field_index_of,
BuiltinProc_type_equal_proc,
BuiltinProc_type_hasher_proc,
BuiltinProc__type_end,
@@ -367,5 +370,8 @@ gb_global BuiltinProc builtin_procs[BuiltinProc_COUNT] = {
{STR_LIT("type_field_index_of"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
{STR_LIT("type_equal_proc"), 1, false, Expr_Expr, BuiltinProcPkg_intrinsics},
{STR_LIT("type_hasher_proc"), 1, false, Expr_Expr, BuiltinProcPkg_intrinsics},
{STR_LIT(""), 0, false, Expr_Stmt, BuiltinProcPkg_intrinsics},
};

View File

@@ -56,6 +56,14 @@ gb_inline isize align_formula_isize(isize size, isize align) {
}
return size;
}
gb_inline void *align_formula_ptr(void *ptr, isize align) {
if (align > 0) {
uintptr result = (cast(uintptr)ptr) + align-1;
return (void *)(result - result%align);
}
return ptr;
}
GB_ALLOCATOR_PROC(heap_allocator_proc);
@@ -373,13 +381,16 @@ typedef struct Arena {
gbAllocator backing;
isize block_size;
gbMutex mutex;
isize total_used;
bool use_mutex;
} Arena;
#define ARENA_MIN_ALIGNMENT 16
#define ARENA_DEFAULT_BLOCK_SIZE (8*1024*1024)
gb_global Arena permanent_arena = {};
void arena_init(Arena *arena, gbAllocator backing, isize block_size=ARENA_DEFAULT_BLOCK_SIZE) {
arena->backing = backing;
arena->block_size = block_size;
@@ -388,8 +399,9 @@ void arena_init(Arena *arena, gbAllocator backing, isize block_size=ARENA_DEFAUL
}
void arena_grow(Arena *arena, isize min_size) {
// gb_mutex_lock(&arena->mutex);
// defer (gb_mutex_unlock(&arena->mutex));
if (arena->use_mutex) {
gb_mutex_lock(&arena->mutex);
}
isize size = gb_max(arena->block_size, min_size);
size = ALIGN_UP(size, ARENA_MIN_ALIGNMENT);
@@ -399,11 +411,16 @@ void arena_grow(Arena *arena, isize min_size) {
GB_ASSERT(arena->ptr == ALIGN_DOWN_PTR(arena->ptr, ARENA_MIN_ALIGNMENT));
arena->end = arena->ptr + size;
array_add(&arena->blocks, arena->ptr);
if (arena->use_mutex) {
gb_mutex_unlock(&arena->mutex);
}
}
void *arena_alloc(Arena *arena, isize size, isize alignment) {
// gb_mutex_lock(&arena->mutex);
// defer (gb_mutex_unlock(&arena->mutex));
if (arena->use_mutex) {
gb_mutex_lock(&arena->mutex);
}
arena->total_used += size;
@@ -419,12 +436,17 @@ void *arena_alloc(Arena *arena, isize size, isize alignment) {
GB_ASSERT(arena->ptr <= arena->end);
GB_ASSERT(ptr == ALIGN_DOWN_PTR(ptr, align));
// zero_size(ptr, size);
if (arena->use_mutex) {
gb_mutex_unlock(&arena->mutex);
}
return ptr;
}
void arena_free_all(Arena *arena) {
// gb_mutex_lock(&arena->mutex);
// defer (gb_mutex_unlock(&arena->mutex));
if (arena->use_mutex) {
gb_mutex_lock(&arena->mutex);
}
for_array(i, arena->blocks) {
gb_free(arena->backing, arena->blocks[i]);
@@ -432,8 +454,11 @@ void arena_free_all(Arena *arena) {
array_clear(&arena->blocks);
arena->ptr = nullptr;
arena->end = nullptr;
}
if (arena->use_mutex) {
gb_mutex_unlock(&arena->mutex);
}
}
@@ -460,7 +485,14 @@ GB_ALLOCATOR_PROC(arena_allocator_proc) {
// GB_PANIC("gbAllocation_Free not supported");
break;
case gbAllocation_Resize:
GB_PANIC("gbAllocation_Resize: not supported");
if (size == 0) {
ptr = nullptr;
} else if (size <= old_size) {
ptr = old_memory;
} else {
ptr = arena_alloc(arena, size, alignment);
gb_memmove(ptr, old_memory, old_size);
}
break;
case gbAllocation_FreeAll:
arena_free_all(arena);
@@ -471,6 +503,97 @@ GB_ALLOCATOR_PROC(arena_allocator_proc) {
}
gbAllocator permanent_allocator() {
return arena_allocator(&permanent_arena);
// return heap_allocator();
}
struct Temp_Allocator {
u8 *data;
isize len;
isize curr_offset;
gbAllocator backup_allocator;
Array<void *> leaked_allocations;
};
gb_global Temp_Allocator temporary_allocator_data = {};
void temp_allocator_init(Temp_Allocator *s, isize size) {
s->backup_allocator = heap_allocator();
s->data = cast(u8 *)gb_alloc_align(s->backup_allocator, size, 16);
s->curr_offset = 0;
s->leaked_allocations.allocator = s->backup_allocator;
}
void *temp_allocator_alloc(Temp_Allocator *s, isize size, isize alignment) {
size = align_formula_isize(size, alignment);
if (s->curr_offset+size <= s->len) {
u8 *start = s->data;
u8 *ptr = start + s->curr_offset;
ptr = cast(u8 *)align_formula_ptr(ptr, alignment);
// assume memory is zero
isize offset = ptr - start;
s->curr_offset = offset + size;
return ptr;
} else if (size <= s->len) {
u8 *start = s->data;
u8 *ptr = cast(u8 *)align_formula_ptr(start, alignment);
// assume memory is zero
isize offset = ptr - start;
s->curr_offset = offset + size;
return ptr;
}
void *ptr = gb_alloc_align(s->backup_allocator, size, alignment);
array_add(&s->leaked_allocations, ptr);
return ptr;
}
void temp_allocator_free_all(Temp_Allocator *s) {
s->curr_offset = 0;
for_array(i, s->leaked_allocations) {
gb_free(s->backup_allocator, s->leaked_allocations[i]);
}
array_clear(&s->leaked_allocations);
gb_zero_size(s->data, s->len);
}
GB_ALLOCATOR_PROC(temp_allocator_proc) {
void *ptr = nullptr;
Temp_Allocator *s = cast(Temp_Allocator *)allocator_data;
GB_ASSERT_NOT_NULL(s);
switch (type) {
case gbAllocation_Alloc:
return temp_allocator_alloc(s, size, alignment);
case gbAllocation_Free:
break;
case gbAllocation_Resize:
if (size == 0) {
ptr = nullptr;
} else if (size <= old_size) {
ptr = old_memory;
} else {
ptr = temp_allocator_alloc(s, size, alignment);
gb_memmove(ptr, old_memory, old_size);
}
break;
case gbAllocation_FreeAll:
temp_allocator_free_all(s);
break;
}
return ptr;
}
gbAllocator temporary_allocator() {
return {temp_allocator_proc, &temporary_allocator_data};
}

View File

@@ -1,22 +1,102 @@
// Generates Documentation
gbString expr_to_string(Ast *expression);
gb_global int print_entity_kind_ordering[Entity_Count] = {
/*Invalid*/ -1,
/*Constant*/ 0,
/*Variable*/ 1,
/*TypeName*/ 4,
/*Procedure*/ 2,
/*ProcGroup*/ 3,
/*Builtin*/ -1,
/*ImportName*/ -1,
/*LibraryName*/ -1,
/*Nil*/ -1,
/*Label*/ -1,
};
gb_global char const *print_entity_names[Entity_Count] = {
/*Invalid*/ "",
/*Constant*/ "constants",
/*Variable*/ "variables",
/*TypeName*/ "types",
/*Procedure*/ "procedures",
/*ProcGroup*/ "proc_group",
/*Builtin*/ "",
/*ImportName*/ "import names",
/*LibraryName*/ "library names",
/*Nil*/ "",
/*Label*/ "",
};
String alloc_comment_group_string(gbAllocator a, CommentGroup g) {
GB_COMPARE_PROC(cmp_entities_for_printing) {
GB_ASSERT(a != nullptr);
GB_ASSERT(b != nullptr);
Entity *x = *cast(Entity **)a;
Entity *y = *cast(Entity **)b;
int res = 0;
res = string_compare(x->pkg->name, y->pkg->name);
if (res != 0) {
return res;
}
int ox = print_entity_kind_ordering[x->kind];
int oy = print_entity_kind_ordering[y->kind];
res = ox - oy;
if (res != 0) {
return res;
}
res = string_compare(x->token.string, y->token.string);
return res;
}
GB_COMPARE_PROC(cmp_ast_package_by_name) {
GB_ASSERT(a != nullptr);
GB_ASSERT(b != nullptr);
AstPackage *x = *cast(AstPackage **)a;
AstPackage *y = *cast(AstPackage **)b;
return string_compare(x->name, y->name);
}
void print_doc_line(i32 indent, char const *fmt, ...) {
while (indent --> 0) {
gb_printf("\t");
}
va_list va;
va_start(va, fmt);
gb_printf_va(fmt, va);
va_end(va);
gb_printf("\n");
}
void print_doc_line_no_newline(i32 indent, char const *fmt, ...) {
while (indent --> 0) {
gb_printf("\t");
}
va_list va;
va_start(va, fmt);
gb_printf_va(fmt, va);
va_end(va);
}
bool print_doc_comment_group_string(i32 indent, CommentGroup *g) {
if (g == nullptr) {
return false;
}
isize len = 0;
for_array(i, g.list) {
String comment = g.list[i].string;
for_array(i, g->list) {
String comment = g->list[i].string;
len += comment.len;
len += 1; // for \n
}
if (len == 0) {
return make_string(nullptr, 0);
if (len <= g->list.count) {
return false;
}
u8 *text = gb_alloc_array(a, u8, len+1);
len = 0;
for_array(i, g.list) {
String comment = g.list[i].string;
isize count = 0;
for_array(i, g->list) {
String comment = g->list[i].string;
String original_comment = comment;
bool slash_slash = comment[1] == '/';
bool slash_star = comment[1] == '*';
if (comment[1] == '/') {
comment.text += 2;
comment.len -= 2;
@@ -24,84 +104,216 @@ String alloc_comment_group_string(gbAllocator a, CommentGroup g) {
comment.text += 2;
comment.len -= 4;
}
comment = string_trim_whitespace(comment);
gb_memmove(text+len, comment.text, comment.len);
len += comment.len;
text[len++] = '\n';
}
return make_string(text, len);
}
#if 0
void print_type_spec(Ast *spec) {
ast_node(ts, TypeSpec, spec);
GB_ASSERT(ts->name->kind == Ast_Ident);
String name = ts->name->Ident.string;
if (name.len == 0) {
return;
}
if (name[0] == '_') {
return;
}
gb_printf("type %.*s\n", LIT(name));
}
// Ignore the first space
if (comment.len > 0 && comment[0] == ' ') {
comment.text += 1;
comment.len -= 1;
}
void print_proc_decl(AstProcDecl *pd) {
GB_ASSERT(pd->name->kind == Ast_Ident);
String name = pd->name->Ident.string;
if (name.len == 0) {
return;
}
if (name[0] == '_') {
return;
}
String docs = alloc_comment_group_string(heap_allocator(), pd->docs);
defer (gb_free(heap_allocator(), docs.text));
if (docs.len > 0) {
gb_file_write(&gb__std_files[gbFileStandard_Output], docs.text, docs.len);
} else {
return;
}
ast_node(proc_type, ProcType, pd->type);
gbString params = expr_to_string(proc_type->params);
defer (gb_string_free(params));
gb_printf("proc %.*s(%s)", LIT(name), params);
if (proc_type->results != nullptr) {
ast_node(fl, FieldList, proc_type->results);
isize count = fl->list.count;
if (count > 0) {
gbString results = expr_to_string(proc_type->results);
defer (gb_string_free(results));
gb_printf(" -> ");
if (count != 1) {
gb_printf("(");
if (slash_slash) {
if (string_starts_with(comment, str_lit("+"))) {
continue;
}
gb_printf("%s", results);
if (count != 1) {
gb_printf(")");
if (string_starts_with(comment, str_lit("@("))) {
continue;
}
}
if (slash_slash) {
print_doc_line(indent, "%.*s", LIT(comment));
count += 1;
} else {
isize pos = 0;
for (; pos < comment.len; pos++) {
isize end = pos;
for (; end < comment.len; end++) {
if (comment[end] == '\n') {
break;
}
}
String line = substring(comment, pos, end);
pos = end+1;
String trimmed_line = string_trim_whitespace(line);
if (trimmed_line.len == 0) {
if (count == 0) {
continue;
}
}
/*
* Remove comments with
* styles
* like this
*/
if (string_starts_with(line, str_lit("* "))) {
line = substring(line, 2, line.len);
}
print_doc_line(indent, "%.*s", LIT(line));
count += 1;
}
}
}
gb_printf("\n\n");
}
#endif
void print_declaration(Ast *decl) {
if (count > 0) {
print_doc_line(0, "");
return true;
}
return false;
}
void generate_documentation(Parser *parser) {
// for_array(file_index, parser->files) {
// AstFile *file = parser->files[file_index];
// Tokenizer *tokenizer = &file->tokenizer;
// String fullpath = tokenizer->fullpath;
// gb_printf("%.*s\n", LIT(fullpath));
// for_array(decl_index, file->decls) {
// Ast *decl = file->decls[decl_index];
// print_declaration(decl);
// }
// }
void print_doc_expr(Ast *expr) {
gbString s = nullptr;
if (build_context.cmd_doc_flags & CmdDocFlag_Short) {
s = expr_to_string_shorthand(expr);
} else {
s = expr_to_string(expr);
}
gb_file_write(gb_file_get_standard(gbFileStandard_Output), s, gb_string_length(s));
gb_string_free(s);
}
void print_doc_package(CheckerInfo *info, AstPackage *pkg) {
if (pkg == nullptr) {
return;
}
print_doc_line(0, "package %.*s", LIT(pkg->name));
for_array(i, pkg->files) {
AstFile *f = pkg->files[i];
if (f->pkg_decl) {
GB_ASSERT(f->pkg_decl->kind == Ast_PackageDecl);
print_doc_comment_group_string(1, f->pkg_decl->PackageDecl.docs);
}
}
if (pkg->scope != nullptr) {
auto entities = array_make<Entity *>(heap_allocator(), 0, pkg->scope->elements.entries.count);
defer (array_free(&entities));
for_array(i, pkg->scope->elements.entries) {
Entity *e = pkg->scope->elements.entries[i].value;
switch (e->kind) {
case Entity_Invalid:
case Entity_Builtin:
case Entity_Nil:
case Entity_Label:
continue;
case Entity_Constant:
case Entity_Variable:
case Entity_TypeName:
case Entity_Procedure:
case Entity_ProcGroup:
case Entity_ImportName:
case Entity_LibraryName:
// Fine
break;
}
array_add(&entities, e);
}
gb_sort_array(entities.data, entities.count, cmp_entities_for_printing);
bool show_docs = (build_context.cmd_doc_flags & CmdDocFlag_Short) == 0;
EntityKind curr_entity_kind = Entity_Invalid;
for_array(i, entities) {
Entity *e = entities[i];
if (e->pkg != pkg) {
continue;
}
if (!is_entity_exported(e)) {
continue;
}
if (curr_entity_kind != e->kind) {
if (curr_entity_kind != Entity_Invalid) {
print_doc_line(0, "");
}
curr_entity_kind = e->kind;
print_doc_line(1, "%s", print_entity_names[e->kind]);
}
Ast *type_expr = nullptr;
Ast *init_expr = nullptr;
Ast *decl_node = nullptr;
CommentGroup *comment = nullptr;
CommentGroup *docs = nullptr;
if (e->decl_info != nullptr) {
type_expr = e->decl_info->type_expr;
init_expr = e->decl_info->init_expr;
decl_node = e->decl_info->decl_node;
comment = e->decl_info->comment;
docs = e->decl_info->docs;
}
GB_ASSERT(type_expr != nullptr || init_expr != nullptr);
print_doc_line_no_newline(2, "%.*s", LIT(e->token.string));
if (type_expr != nullptr) {
gbString t = expr_to_string(type_expr);
gb_printf(": %s ", t);
gb_string_free(t);
} else {
gb_printf(" :");
}
if (e->kind == Entity_Variable) {
if (init_expr != nullptr) {
gb_printf("= ");
print_doc_expr(init_expr);
}
} else {
gb_printf(": ");
print_doc_expr(init_expr);
}
gb_printf(";\n");
if (show_docs) {
print_doc_comment_group_string(3, docs);
}
}
print_doc_line(0, "");
}
if (pkg->fullpath.len != 0) {
print_doc_line(0, "");
print_doc_line(1, "fullpath:");
print_doc_line(2, "%.*s", LIT(pkg->fullpath));
print_doc_line(1, "files:");
for_array(i, pkg->files) {
AstFile *f = pkg->files[i];
String filename = remove_directory_from_path(f->fullpath);
print_doc_line(2, "%.*s", LIT(filename));
}
}
}
void generate_documentation(Checker *c) {
CheckerInfo *info = &c->info;
auto pkgs = array_make<AstPackage *>(permanent_allocator(), 0, info->packages.entries.count);
for_array(i, info->packages.entries) {
AstPackage *pkg = info->packages.entries[i].value;
if (build_context.cmd_doc_flags & CmdDocFlag_AllPackages) {
array_add(&pkgs, pkg);
} else {
if (pkg->kind == Package_Init) {
array_add(&pkgs, pkg);
} else if (pkg->is_extra) {
array_add(&pkgs, pkg);
}
}
}
gb_sort_array(pkgs.data, pkgs.count, cmp_ast_package_by_name);
for_array(i, pkgs) {
print_doc_package(info, pkgs[i]);
}
}

View File

@@ -120,6 +120,7 @@ struct Entity {
union {
struct {
ExactValue value;
ParameterValue param_value;
} Constant;
struct {
Ast *init_expr; // only used for some variables within procedure bodies
@@ -164,7 +165,7 @@ struct Entity {
Scope *scope;
} ImportName;
struct {
Array<String> paths;
Slice<String> paths;
String name;
} LibraryName;
i32 Nil;
@@ -219,7 +220,7 @@ bool entity_has_deferred_procedure(Entity *e) {
gb_global u64 global_entity_id = 0;
Entity *alloc_entity(EntityKind kind, Scope *scope, Token token, Type *type) {
gbAllocator a = heap_allocator();
gbAllocator a = permanent_allocator();
Entity *entity = gb_alloc_item(a, Entity);
entity->kind = kind;
entity->state = EntityState_Unresolved;
@@ -332,7 +333,7 @@ Entity *alloc_entity_import_name(Scope *scope, Token token, Type *type,
}
Entity *alloc_entity_library_name(Scope *scope, Token token, Type *type,
Array<String> paths, String name) {
Slice<String> paths, String name) {
Entity *entity = alloc_entity(Entity_LibraryName, scope, token, type);
entity->LibraryName.paths = paths;
entity->LibraryName.name = name;

View File

@@ -46,16 +46,16 @@ enum ExactValueKind {
struct ExactValue {
ExactValueKind kind;
union {
bool value_bool;
String value_string;
BigInt value_integer; // NOTE(bill): This must be an integer and not a pointer
f64 value_float;
i64 value_pointer;
Complex128 value_complex;
Quaternion256 value_quaternion;
Ast * value_compound;
Ast * value_procedure;
Type * value_typeid;
bool value_bool;
String value_string;
BigInt value_integer; // NOTE(bill): This must be an integer and not a pointer
f64 value_float;
i64 value_pointer;
Complex128 *value_complex;
Quaternion256 *value_quaternion;
Ast * value_compound;
Ast * value_procedure;
Type * value_typeid;
};
};
@@ -85,9 +85,9 @@ HashKey hash_exact_value(ExactValue v) {
case ExactValue_Pointer:
return hash_integer(v.value_pointer);
case ExactValue_Complex:
return hashing_proc(&v.value_complex, gb_size_of(Complex128));
return hashing_proc(v.value_complex, gb_size_of(Complex128));
case ExactValue_Quaternion:
return hashing_proc(&v.value_quaternion, gb_size_of(Quaternion256));
return hashing_proc(v.value_quaternion, gb_size_of(Quaternion256));
case ExactValue_Compound:
return hash_pointer(v.value_compound);
case ExactValue_Procedure:
@@ -139,17 +139,19 @@ ExactValue exact_value_float(f64 f) {
ExactValue exact_value_complex(f64 real, f64 imag) {
ExactValue result = {ExactValue_Complex};
result.value_complex.real = real;
result.value_complex.imag = imag;
result.value_complex = gb_alloc_item(permanent_allocator(), Complex128);
result.value_complex->real = real;
result.value_complex->imag = imag;
return result;
}
ExactValue exact_value_quaternion(f64 real, f64 imag, f64 jmag, f64 kmag) {
ExactValue result = {ExactValue_Quaternion};
result.value_quaternion.real = real;
result.value_quaternion.imag = imag;
result.value_quaternion.jmag = jmag;
result.value_quaternion.kmag = kmag;
result.value_quaternion = gb_alloc_item(permanent_allocator(), Quaternion256);
result.value_quaternion->real = real;
result.value_quaternion->imag = imag;
result.value_quaternion->jmag = jmag;
result.value_quaternion->kmag = kmag;
return result;
}
@@ -373,6 +375,7 @@ ExactValue exact_value_to_complex(ExactValue v) {
// return exact_value_complex(v.value_quaternion.real, v.value_quaternion.imag);
}
ExactValue r = {ExactValue_Invalid};
v.value_complex = gb_alloc_item(permanent_allocator(), Complex128);
return r;
}
ExactValue exact_value_to_quaternion(ExactValue v) {
@@ -382,11 +385,12 @@ ExactValue exact_value_to_quaternion(ExactValue v) {
case ExactValue_Float:
return exact_value_quaternion(v.value_float, 0, 0, 0);
case ExactValue_Complex:
return exact_value_quaternion(v.value_complex.real, v.value_complex.imag, 0, 0);
return exact_value_quaternion(v.value_complex->real, v.value_complex->imag, 0, 0);
case ExactValue_Quaternion:
return v;
}
ExactValue r = {ExactValue_Invalid};
v.value_quaternion = gb_alloc_item(permanent_allocator(), Quaternion256);
return r;
}
@@ -396,9 +400,9 @@ ExactValue exact_value_real(ExactValue v) {
case ExactValue_Float:
return v;
case ExactValue_Complex:
return exact_value_float(v.value_complex.real);
return exact_value_float(v.value_complex->real);
case ExactValue_Quaternion:
return exact_value_float(v.value_quaternion.real);
return exact_value_float(v.value_quaternion->real);
}
ExactValue r = {ExactValue_Invalid};
return r;
@@ -410,9 +414,9 @@ ExactValue exact_value_imag(ExactValue v) {
case ExactValue_Float:
return exact_value_i64(0);
case ExactValue_Complex:
return exact_value_float(v.value_complex.imag);
return exact_value_float(v.value_complex->imag);
case ExactValue_Quaternion:
return exact_value_float(v.value_quaternion.imag);
return exact_value_float(v.value_quaternion->imag);
}
ExactValue r = {ExactValue_Invalid};
return r;
@@ -425,7 +429,7 @@ ExactValue exact_value_jmag(ExactValue v) {
case ExactValue_Complex:
return exact_value_i64(0);
case ExactValue_Quaternion:
return exact_value_float(v.value_quaternion.jmag);
return exact_value_float(v.value_quaternion->jmag);
}
ExactValue r = {ExactValue_Invalid};
return r;
@@ -438,7 +442,7 @@ ExactValue exact_value_kmag(ExactValue v) {
case ExactValue_Complex:
return exact_value_i64(0);
case ExactValue_Quaternion:
return exact_value_float(v.value_quaternion.kmag);
return exact_value_float(v.value_quaternion->kmag);
}
ExactValue r = {ExactValue_Invalid};
return r;
@@ -532,15 +536,15 @@ ExactValue exact_unary_operator_value(TokenKind op, ExactValue v, i32 precision,
return i;
}
case ExactValue_Complex: {
f64 real = v.value_complex.real;
f64 imag = v.value_complex.imag;
f64 real = v.value_complex->real;
f64 imag = v.value_complex->imag;
return exact_value_complex(-real, -imag);
}
case ExactValue_Quaternion: {
f64 real = v.value_quaternion.real;
f64 imag = v.value_quaternion.imag;
f64 jmag = v.value_quaternion.jmag;
f64 kmag = v.value_quaternion.kmag;
f64 real = v.value_quaternion->real;
f64 imag = v.value_quaternion->imag;
f64 jmag = v.value_quaternion->jmag;
f64 kmag = v.value_quaternion->kmag;
return exact_value_quaternion(-real, -imag, -jmag, -kmag);
}
}
@@ -685,6 +689,8 @@ ExactValue exact_binary_operator_value(TokenKind op, ExactValue x, ExactValue y)
case Token_CmpOr: return exact_value_bool(x.value_bool || y.value_bool);
case Token_And: return exact_value_bool(x.value_bool & y.value_bool);
case Token_Or: return exact_value_bool(x.value_bool | y.value_bool);
case Token_AndNot: return exact_value_bool(x.value_bool & !y.value_bool);
case Token_Xor: return exact_value_bool((x.value_bool && !y.value_bool) || (!x.value_bool && y.value_bool));
default: goto error;
}
break;
@@ -730,10 +736,10 @@ ExactValue exact_binary_operator_value(TokenKind op, ExactValue x, ExactValue y)
case ExactValue_Complex: {
y = exact_value_to_complex(y);
f64 a = x.value_complex.real;
f64 b = x.value_complex.imag;
f64 c = y.value_complex.real;
f64 d = y.value_complex.imag;
f64 a = x.value_complex->real;
f64 b = x.value_complex->imag;
f64 c = y.value_complex->real;
f64 d = y.value_complex->imag;
f64 real = 0;
f64 imag = 0;
switch (op) {
@@ -763,14 +769,14 @@ ExactValue exact_binary_operator_value(TokenKind op, ExactValue x, ExactValue y)
case ExactValue_Quaternion: {
y = exact_value_to_quaternion(y);
f64 xr = x.value_quaternion.real;
f64 xi = x.value_quaternion.imag;
f64 xj = x.value_quaternion.jmag;
f64 xk = x.value_quaternion.kmag;
f64 yr = y.value_quaternion.real;
f64 yi = y.value_quaternion.imag;
f64 yj = y.value_quaternion.jmag;
f64 yk = y.value_quaternion.kmag;
f64 xr = x.value_quaternion->real;
f64 xi = x.value_quaternion->imag;
f64 xj = x.value_quaternion->jmag;
f64 xk = x.value_quaternion->kmag;
f64 yr = y.value_quaternion->real;
f64 yi = y.value_quaternion->imag;
f64 yj = y.value_quaternion->jmag;
f64 yk = y.value_quaternion->kmag;
f64 real = 0;
@@ -897,10 +903,10 @@ bool compare_exact_values(TokenKind op, ExactValue x, ExactValue y) {
}
case ExactValue_Complex: {
f64 a = x.value_complex.real;
f64 b = x.value_complex.imag;
f64 c = y.value_complex.real;
f64 d = y.value_complex.imag;
f64 a = x.value_complex->real;
f64 b = x.value_complex->imag;
f64 c = y.value_complex->real;
f64 d = y.value_complex->imag;
switch (op) {
case Token_CmpEq: return cmp_f64(a, c) == 0 && cmp_f64(b, d) == 0;
case Token_NotEq: return cmp_f64(a, c) != 0 || cmp_f64(b, d) != 0;
@@ -945,7 +951,7 @@ bool compare_exact_values(TokenKind op, ExactValue x, ExactValue y) {
Entity *strip_entity_wrapping(Ast *expr);
Entity *strip_entity_wrapping(Entity *e);
gbString write_expr_to_string(gbString str, Ast *node);
gbString write_expr_to_string(gbString str, Ast *node, bool shorthand);
gbString write_exact_value_to_string(gbString str, ExactValue const &v, isize string_limit=36) {
switch (v.kind) {
@@ -976,14 +982,16 @@ gbString write_exact_value_to_string(gbString str, ExactValue const &v, isize st
case ExactValue_Float:
return gb_string_append_fmt(str, "%f", v.value_float);
case ExactValue_Complex:
return gb_string_append_fmt(str, "%f+%fi", v.value_complex.real, v.value_complex.imag);
return gb_string_append_fmt(str, "%f+%fi", v.value_complex->real, v.value_complex->imag);
case ExactValue_Quaternion:
return gb_string_append_fmt(str, "%f+%fi+%fj+%fk", v.value_quaternion->real, v.value_quaternion->imag, v.value_quaternion->jmag, v.value_quaternion->kmag);
case ExactValue_Pointer:
return str;
case ExactValue_Compound:
return write_expr_to_string(str, v.value_compound);
return write_expr_to_string(str, v.value_compound, false);
case ExactValue_Procedure:
return write_expr_to_string(str, v.value_procedure);
return write_expr_to_string(str, v.value_procedure, false);
}
return str;
};

View File

@@ -157,7 +157,7 @@ extern "C" {
#endif
#endif
#if defined(_WIN64) || defined(__x86_64__) || defined(_M_X64) || defined(__64BIT__) || defined(__powerpc64__) || defined(__ppc64__)
#if defined(_WIN64) || defined(__x86_64__) || defined(_M_X64) || defined(__64BIT__) || defined(__powerpc64__) || defined(__ppc64__) || defined(__aarch64__)
#ifndef GB_ARCH_64_BIT
#define GB_ARCH_64_BIT 1
#endif
@@ -230,7 +230,7 @@ extern "C" {
#define GB_CACHE_LINE_SIZE 128
#endif
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__) || defined(_M_ARM) || defined(_M_ARM64)
#ifndef GB_CPU_ARM
#define GB_CPU_ARM 1
#endif
@@ -3702,6 +3702,12 @@ gb_inline void *gb_memcopy(void *dest, void const *source, isize n) {
void *dest_copy = dest;
__asm__ __volatile__("rep movsb" : "+D"(dest_copy), "+S"(source), "+c"(n) : : "memory");
#elif defined(GB_CPU_ARM)
u8 *s = cast(u8 *)source;
u8 *d = cast(u8 *)dest;
for (isize i = 0; i < n; i++) {
*d++ = *s++;
}
#else
u8 *d = cast(u8 *)dest;
u8 const *s = cast(u8 const *)source;
@@ -4438,6 +4444,76 @@ gb_inline i64 gb_atomic64_fetch_or(gbAtomic64 volatile *a, i64 operand) {
#endif
}
#elif defined(GB_CPU_ARM)
gb_inline i32 gb_atomic32_load (gbAtomic32 const volatile *a) {
return __atomic_load_n(&a->value, __ATOMIC_SEQ_CST);
}
gb_inline void gb_atomic32_store(gbAtomic32 volatile *a, i32 value) {
__atomic_store_n(&a->value, value, __ATOMIC_SEQ_CST);
}
gb_inline i32 gb_atomic32_compare_exchange(gbAtomic32 volatile *a, i32 expected, i32 desired) {
i32 expected_copy = expected;
auto result = __atomic_compare_exchange_n(&a->value, &expected_copy, desired, true, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
if (result) {
return expected;
} else {
return expected_copy;
}
}
gb_inline i32 gb_atomic32_exchanged(gbAtomic32 volatile *a, i32 desired) {
return __atomic_exchange_n(&a->value, desired, __ATOMIC_SEQ_CST);
}
gb_inline i32 gb_atomic32_fetch_add(gbAtomic32 volatile *a, i32 operand) {
return __atomic_fetch_add(&a->value, operand, __ATOMIC_SEQ_CST);
}
gb_inline i32 gb_atomic32_fetch_and(gbAtomic32 volatile *a, i32 operand) {
return __atomic_fetch_and(&a->value, operand, __ATOMIC_SEQ_CST);
}
gb_inline i32 gb_atomic32_fetch_or(gbAtomic32 volatile *a, i32 operand) {
return __atomic_fetch_or(&a->value, operand, __ATOMIC_SEQ_CST);
}
gb_inline i64 gb_atomic64_load(gbAtomic64 const volatile *a) {
return __atomic_load_n(&a->value, __ATOMIC_SEQ_CST);
}
gb_inline void gb_atomic64_store(gbAtomic64 volatile *a, i64 value) {
__atomic_store_n(&a->value, value, __ATOMIC_SEQ_CST);
}
gb_inline i64 gb_atomic64_compare_exchange(gbAtomic64 volatile *a, i64 expected, i64 desired) {
i64 expected_copy = expected;
auto result = __atomic_compare_exchange_n(&a->value, &expected_copy, desired, true, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
if (result) {
return expected;
} else {
return expected_copy;
}
}
gb_inline i64 gb_atomic64_exchanged(gbAtomic64 volatile *a, i64 desired) {
return __atomic_exchange_n(&a->value, desired, __ATOMIC_SEQ_CST);
}
gb_inline i64 gb_atomic64_fetch_add(gbAtomic64 volatile *a, i64 operand) {
return __atomic_fetch_add(&a->value, operand, __ATOMIC_SEQ_CST);
}
gb_inline i64 gb_atomic64_fetch_and(gbAtomic64 volatile *a, i64 operand) {
return __atomic_fetch_and(&a->value, operand, __ATOMIC_SEQ_CST);
}
gb_inline i64 gb_atomic64_fetch_or(gbAtomic64 volatile *a, i64 operand) {
return __atomic_fetch_or(&a->value, operand, __ATOMIC_SEQ_CST);
}
#else
#error TODO(bill): Implement Atomics for this CPU
#endif
@@ -4563,7 +4639,11 @@ gb_inline void gb_yield_thread(void) {
#if defined(GB_SYSTEM_WINDOWS)
_mm_pause();
#elif defined(GB_SYSTEM_OSX)
#if defined(GB_CPU_X86)
__asm__ volatile ("" : : : "memory");
#elif defined(GB_CPU_ARM)
__asm__ volatile ("yield" : : : "memory");
#endif
#elif defined(GB_CPU_X86)
_mm_pause();
#else
@@ -4575,7 +4655,11 @@ gb_inline void gb_mfence(void) {
#if defined(GB_SYSTEM_WINDOWS)
_ReadWriteBarrier();
#elif defined(GB_SYSTEM_OSX)
#if defined(GB_CPU_X86)
__sync_synchronize();
#elif defined(GB_CPU_ARM)
__atomic_thread_fence(__ATOMIC_SEQ_CST);
#endif
#elif defined(GB_CPU_X86)
_mm_mfence();
#else
@@ -4587,7 +4671,12 @@ gb_inline void gb_sfence(void) {
#if defined(GB_SYSTEM_WINDOWS)
_WriteBarrier();
#elif defined(GB_SYSTEM_OSX)
#if defined(GB_CPU_X86)
__asm__ volatile ("" : : : "memory");
#elif defined(GB_CPU_ARM)
// TODO(bill): is this correct?
__atomic_thread_fence(__ATOMIC_SEQ_CST);
#endif
#elif defined(GB_CPU_X86)
_mm_sfence();
#else
@@ -5156,7 +5245,7 @@ b32 gb_affinity_set(gbAffinity *a, isize core, isize thread_index) {
index = core * a->threads_per_core + thread_index;
thread = pthread_self();
cpuset_t mn;
CPU_ZERO(&mn);
@@ -5202,7 +5291,7 @@ void gb_affinity_init(gbAffinity *a) {
for (;;) {
// The 'temporary char'. Everything goes into this char,
// so that we can check against EOF at the end of this loop.
char c;
int c;
#define AF__CHECK(letter) ((c = getc(cpu_info)) == letter)
if (AF__CHECK('c') && AF__CHECK('p') && AF__CHECK('u') && AF__CHECK(' ') &&
@@ -8808,6 +8897,14 @@ gb_inline gbDllProc gb_dll_proc_address(gbDllHandle dll, char const *proc_name)
return result;
}
#elif defined(__aarch64__)
gb_inline u64 gb_rdtsc(void) {
int64_t virtual_timer_value;
asm volatile("mrs %0, cntvct_el0" : "=r"(virtual_timer_value));
return virtual_timer_value;
}
#else
#error "gb_rdtsc not supported"
#endif
#if defined(GB_SYSTEM_WINDOWS)

View File

@@ -25,6 +25,9 @@ struct irModule {
Map<irDebugInfo *> debug_info; // Key: Unique pointer
Map<irValue *> anonymous_proc_lits; // Key: Ast *
Map<irValue *> equal_procs; // Key: Type *
Map<irValue *> hasher_procs; // Key: Type *
irDebugInfo * debug_compile_unit;
Array<irDebugInfo *> debug_location_stack;
@@ -161,6 +164,7 @@ struct irProcedure {
Ast * return_ptr_hint_ast;
bool return_ptr_hint_used;
bool ignore_dead_instr;
Array<irBranchBlocks> branch_blocks;
@@ -454,7 +458,6 @@ struct irValueSourceCodeLocation {
irValue *line;
irValue *column;
irValue *procedure;
u64 hash;
};
@@ -525,6 +528,11 @@ struct irAddr {
Type *ir_type(irValue *value);
irValue *ir_gen_anonymous_proc_lit(irModule *m, String prefix_name, Ast *expr, irProcedure *proc = nullptr);
void ir_begin_procedure_body(irProcedure *proc);
void ir_end_procedure_body(irProcedure *proc);
irValue *ir_get_equal_proc_for_type(irModule *m, Type *type);
irValue *ir_get_hasher_proc_for_type(irModule *m, Type *type);
irAddr ir_addr(irValue *addr) {
irAddr v = {irAddr_Default, addr};
@@ -1159,7 +1167,7 @@ irValue *ir_instr_atomic_cxchg(irProcedure *p, Type *type, irValue *address, irV
GB_ASSERT(type->Tuple.variables.count == 2);
Type *elem = type->Tuple.variables[0]->type;
// LEAK TODO(bill): LLVM returns {T, i1} whilst Odin does {T, bool}, fix this mapping hack
gbAllocator a = heap_allocator();
gbAllocator a = permanent_allocator();
Type *llvm_type = alloc_type_tuple();
array_init(&llvm_type->Tuple.variables, a, 0, 2);
array_add (&llvm_type->Tuple.variables, alloc_entity_field(nullptr, blank_token, elem, false, 0));
@@ -1799,7 +1807,7 @@ irValue *ir_add_local(irProcedure *proc, Entity *e, Ast *expr, bool zero_initial
if (zero_initialized) {
ir_emit_zero_init(proc, instr, expr);
}
set_procedure_abi_types(heap_allocator(), e->type);
set_procedure_abi_types(e->type);
// if (proc->module->generate_debug_info && expr != nullptr && proc->entity != nullptr) {
// if (proc->module->generate_debug_info && proc->entity != nullptr) {
@@ -2132,7 +2140,7 @@ irDebugInfo *ir_add_debug_info_field(irModule *module, irDebugInfo *scope, Entit
if (e->token.string.len == 0) {
// If no name available for field, use its field index as its name.
isize max_len = 8;
u8 *str = cast(u8 *)gb_alloc_array(heap_allocator(), u8, max_len);
u8 *str = cast(u8 *)gb_alloc_array(permanent_allocator(), u8, max_len);
isize len = gb_snprintf(cast(char *)str, 8, "%d", index);
di->DerivedType.name = make_string(str, len-1);
}
@@ -3282,7 +3290,7 @@ irValue *ir_emit_call(irProcedure *p, irValue *value, Array<irValue *> const &ar
context_ptr = ir_find_or_generate_context_ptr(p);
}
set_procedure_abi_types(heap_allocator(), pt);
set_procedure_abi_types(pt);
bool is_c_vararg = pt->Proc.c_vararg;
isize param_count = pt->Proc.param_count;
@@ -3293,7 +3301,7 @@ irValue *ir_emit_call(irProcedure *p, irValue *value, Array<irValue *> const &ar
GB_ASSERT_MSG(param_count == args.count, "%.*s %td == %td", LIT(p->entity->token.string), param_count, args.count);
}
auto processed_args = array_make<irValue *>(heap_allocator(), 0, args.count);
auto processed_args = array_make<irValue *>(permanent_allocator(), 0, args.count);
for (isize i = 0; i < param_count; i++) {
Entity *e = pt->Proc.params->Tuple.variables[i];
@@ -3416,7 +3424,7 @@ irValue *ir_emit_call(irProcedure *p, irValue *value, Array<irValue *> const &ar
case DeferredProcedure_in_out:
{
auto out_args = ir_value_to_array(p, result);
array_init(&result_as_args, heap_allocator(), in_args.count + out_args.count);
array_init(&result_as_args, permanent_allocator(), in_args.count + out_args.count);
array_copy(&result_as_args, in_args, 0);
array_copy(&result_as_args, out_args, in_args.count);
}
@@ -3587,65 +3595,69 @@ irValue *ir_gen_map_header(irProcedure *proc, irValue *map_val_ptr, Type *map_ty
irValue *m = ir_emit_conv(proc, map_val_ptr, type_deref(ir_type(gep0)));
ir_emit_store(proc, gep0, m);
ir_emit_store(proc, ir_emit_struct_ep(proc, h, 1), ir_const_bool(is_type_string(key_type)));
i64 entry_size = type_size_of (map_type->Map.entry_type);
i64 entry_align = type_align_of (map_type->Map.entry_type);
i64 value_offset = type_offset_of(map_type->Map.entry_type, 2);
i64 key_offset = type_offset_of(map_type->Map.entry_type, 2);
i64 key_size = type_size_of (map_type->Map.key);
i64 value_offset = type_offset_of(map_type->Map.entry_type, 3);
i64 value_size = type_size_of (map_type->Map.value);
ir_emit_store(proc, ir_emit_struct_ep(proc, h, 1), ir_get_equal_proc_for_type(proc->module, key_type));
ir_emit_store(proc, ir_emit_struct_ep(proc, h, 2), ir_const_int(entry_size));
ir_emit_store(proc, ir_emit_struct_ep(proc, h, 3), ir_const_int(entry_align));
ir_emit_store(proc, ir_emit_struct_ep(proc, h, 4), ir_const_uintptr(value_offset));
ir_emit_store(proc, ir_emit_struct_ep(proc, h, 5), ir_const_int(value_size));
ir_emit_store(proc, ir_emit_struct_ep(proc, h, 4), ir_const_uintptr(key_offset));
ir_emit_store(proc, ir_emit_struct_ep(proc, h, 5), ir_const_int(key_size));
ir_emit_store(proc, ir_emit_struct_ep(proc, h, 6), ir_const_uintptr(value_offset));
ir_emit_store(proc, ir_emit_struct_ep(proc, h, 7), ir_const_int(value_size));
return ir_emit_load(proc, h);
}
irValue *ir_gen_map_key(irProcedure *proc, irValue *key, Type *key_type) {
irValue *ir_const_hash(irModule *m, irValue *key, Type *key_type) {
irValue *hashed_key = nullptr;
if (key->kind == irValue_Constant) {
u64 hash = 0xcbf29ce484222325;
if (is_type_string(key_type)) {
GB_ASSERT(key->Constant.value.kind == ExactValue_String);
String s = key->Constant.value.value_string;
hash = fnv64a(s.text, s.len);
} else {
return nullptr;
}
// TODO(bill): other const hash types
if (build_context.word_size == 4) {
hash &= 0xffffffffull;
}
hashed_key = ir_const_uintptr(hash);
}
return hashed_key;
}
irValue *ir_gen_map_hash(irProcedure *proc, irValue *key, Type *key_type) {
Type *hash_type = t_u64;
irValue *v = ir_add_local_generated(proc, t_map_key, true);
irValue *v = ir_add_local_generated(proc, t_map_hash, true);
Type *t = base_type(ir_type(key));
key = ir_emit_conv(proc, key, key_type);
if (is_type_string(t)) {
irValue *str = ir_emit_conv(proc, key, t_string);
irValue *hashed_str = nullptr;
irValue *key_ptr = ir_address_from_load_or_generate_local(proc, key);
key_ptr = ir_emit_conv(proc, key_ptr, t_rawptr);
if (str->kind == irValue_Constant) {
ExactValue ev = str->Constant.value;
GB_ASSERT(ev.kind == ExactValue_String);
u64 hs = fnv64a(ev.value_string.text, ev.value_string.len);
hashed_str = ir_value_constant(t_u64, exact_value_u64(hs));
} else {
auto args = array_make<irValue *>(ir_allocator(), 1);
args[0] = str;
hashed_str = ir_emit_runtime_call(proc, "default_hash_string", args);
}
ir_emit_store(proc, ir_emit_struct_ep(proc, v, 0), hashed_str);
irValue *key_data = ir_emit_struct_ep(proc, v, 1);
key_data = ir_emit_conv(proc, key_data, alloc_type_pointer(key_type));
ir_emit_store(proc, key_data, str);
} else {
i64 sz = type_size_of(t);
GB_ASSERT(sz <= 8);
if (sz != 0) {
auto args = array_make<irValue *>(ir_allocator(), 2);
args[0] = ir_address_from_load_or_generate_local(proc, key);
args[1] = ir_const_int(sz);
irValue *hash = ir_emit_runtime_call(proc, "default_hash_ptr", args);
irValue *hash_ptr = ir_emit_struct_ep(proc, v, 0);
irValue *key_data = ir_emit_struct_ep(proc, v, 1);
key_data = ir_emit_conv(proc, key_data, alloc_type_pointer(key_type));
ir_emit_store(proc, hash_ptr, hash);
ir_emit_store(proc, key_data, key);
}
irValue *hashed_key = ir_const_hash(proc->module, key, key_type);
if (hashed_key == nullptr) {
irValue *hasher = ir_get_hasher_proc_for_type(proc->module, key_type);
auto args = array_make<irValue *>(permanent_allocator(), 2);
args[0] = key_ptr;
args[1] = ir_value_constant(t_uintptr, exact_value_i64(0));
hashed_key = ir_emit_call(proc, hasher, args);
}
ir_emit_store(proc, ir_emit_struct_ep(proc, v, 0), hashed_key);
ir_emit_store(proc, ir_emit_struct_ep(proc, v, 1), key_ptr);
return ir_emit_load(proc, v);
}
@@ -3701,7 +3713,7 @@ irValue *ir_insert_dynamic_map_key_and_value(irProcedure *proc, irValue *addr, T
map_type = base_type(map_type);
irValue *h = ir_gen_map_header(proc, addr, map_type);
irValue *key = ir_gen_map_key(proc, map_key, map_type->Map.key);
irValue *key = ir_gen_map_hash(proc, map_key, map_type->Map.key);
irValue *v = ir_emit_conv(proc, map_value, map_type->Map.value);
irValue *ptr = ir_add_local_generated(proc, ir_type(v), false);
@@ -4058,7 +4070,7 @@ irValue *ir_addr_load(irProcedure *proc, irAddr const &addr) {
Type *map_type = base_type(addr.map_type);
irValue *v = ir_add_local_generated(proc, map_type->Map.lookup_result_type, true);
irValue *h = ir_gen_map_header(proc, addr.addr, map_type);
irValue *key = ir_gen_map_key(proc, addr.map_key, map_type->Map.key);
irValue *key = ir_gen_map_hash(proc, addr.map_key, map_type->Map.key);
auto args = array_make<irValue *>(ir_allocator(), 2);
args[0] = h;
@@ -4226,7 +4238,7 @@ irValue *ir_addr_get_ptr(irProcedure *proc, irAddr const &addr, bool allow_refer
if (allow_reference) {
Type *map_type = base_type(addr.map_type);
irValue *h = ir_gen_map_header(proc, addr.addr, map_type);
irValue *key = ir_gen_map_key(proc, addr.map_key, map_type->Map.key);
irValue *key = ir_gen_map_hash(proc, addr.map_key, map_type->Map.key);
auto args = array_make<irValue *>(ir_allocator(), 2);
args[0] = h;
@@ -4537,7 +4549,7 @@ irValue *ir_emit_arith(irProcedure *proc, TokenKind op, irValue *left, irValue *
Type *ft = base_complex_elem_type(t_left);
if (op == Token_Quo) {
auto args = array_make<irValue *>(heap_allocator(), 2);
auto args = array_make<irValue *>(permanent_allocator(), 2);
args[0] = left;
args[1] = right;
@@ -4615,7 +4627,7 @@ irValue *ir_emit_arith(irProcedure *proc, TokenKind op, irValue *left, irValue *
return ir_emit_load(proc, res);
} else if (op == Token_Mul) {
auto args = array_make<irValue *>(heap_allocator(), 2);
auto args = array_make<irValue *>(permanent_allocator(), 2);
args[0] = left;
args[1] = right;
@@ -4625,7 +4637,7 @@ irValue *ir_emit_arith(irProcedure *proc, TokenKind op, irValue *left, irValue *
default: GB_PANIC("Unknown float type"); break;
}
} else if (op == Token_Quo) {
auto args = array_make<irValue *>(heap_allocator(), 2);
auto args = array_make<irValue *>(permanent_allocator(), 2);
args[0] = left;
args[1] = right;
@@ -4828,7 +4840,7 @@ irValue *ir_emit_comp_against_nil(irProcedure *proc, TokenKind op_kind, irValue
irValue *invalid_typeid = ir_value_constant(t_typeid, exact_value_i64(0));
return ir_emit_comp(proc, op_kind, x, invalid_typeid);
} else if (is_type_bit_field(t)) {
auto args = array_make<irValue *>(heap_allocator(), 2);
auto args = array_make<irValue *>(permanent_allocator(), 2);
irValue *lhs = ir_address_from_load_or_generate_local(proc, x);
args[0] = ir_emit_conv(proc, lhs, t_rawptr);
args[1] = ir_const_int(type_size_of(t));
@@ -4848,7 +4860,7 @@ irValue *ir_emit_comp_against_nil(irProcedure *proc, TokenKind op_kind, irValue
return ir_emit_comp(proc, op_kind, cap, v_zero);
}
} else if (is_type_struct(t) && type_has_nil(t)) {
auto args = array_make<irValue *>(heap_allocator(), 2);
auto args = array_make<irValue *>(permanent_allocator(), 2);
irValue *lhs = ir_address_from_load_or_generate_local(proc, x);
args[0] = ir_emit_conv(proc, lhs, t_rawptr);
args[1] = ir_const_int(type_size_of(t));
@@ -4859,6 +4871,244 @@ irValue *ir_emit_comp_against_nil(irProcedure *proc, TokenKind op_kind, irValue
return nullptr;
}
irValue *ir_get_equal_proc_for_type(irModule *m, Type *type) {
Type *original_type = type;
type = base_type(type);
Type *pt = alloc_type_pointer(type);
auto key = hash_type(type);
irValue **found = map_get(&m->equal_procs, key);
if (found) {
return *found;
}
static u32 proc_index = 0;
char buf[16] = {};
isize n = gb_snprintf(buf, 16, "__$equal%u", ++proc_index);
char *str = gb_alloc_str_len(permanent_allocator(), buf, n-1);
String proc_name = make_string_c(str);
Ast *body = alloc_ast_node(nullptr, Ast_Invalid);
Entity *e = alloc_entity_procedure(nullptr, make_token_ident(proc_name), t_equal_proc, 0);
e->Procedure.link_name = proc_name;
irValue *p = ir_value_procedure(m, e, t_equal_proc, nullptr, body, proc_name);
map_set(&m->values, hash_entity(e), p);
string_map_set(&m->members, proc_name, p);
map_set(&m->equal_procs, key, p);
irProcedure *proc = &p->Proc;
proc->is_startup = true;
proc->ignore_dead_instr = true;
ir_begin_procedure_body(proc);
// ir_start_block(proc, proc->decl_block);
GB_ASSERT(proc->curr_block != nullptr);
irValue *x = proc->params[0];
irValue *y = proc->params[1];
irValue *lhs = ir_emit_conv(proc, x, pt);
irValue *rhs = ir_emit_conv(proc, y, pt);
irBlock *block_same_ptr = ir_new_block(proc, nullptr, "same_ptr");
irBlock *block_diff_ptr = ir_new_block(proc, nullptr, "diff_ptr");
irValue *same_ptr = ir_emit_comp(proc, Token_CmpEq, lhs, rhs);
ir_emit_if(proc, same_ptr, block_same_ptr, block_diff_ptr);
ir_start_block(proc, block_same_ptr);
ir_emit(proc, ir_instr_return(proc, ir_const_bool(true)));
ir_start_block(proc, block_diff_ptr);
if (type->kind == Type_Struct) {
type_set_offsets(type);
irBlock *done = ir_new_block(proc, nullptr, "done"); // NOTE(bill): Append later
irBlock *block_false = ir_new_block(proc, nullptr, "bfalse");
for_array(i, type->Struct.fields) {
irBlock *next_block = ir_new_block(proc, nullptr, "btrue");
irValue *pleft = ir_emit_struct_ep(proc, lhs, cast(i32)i);
irValue *pright = ir_emit_struct_ep(proc, rhs, cast(i32)i);
irValue *left = ir_emit_load(proc, pleft);
irValue *right = ir_emit_load(proc, pright);
irValue *ok = ir_emit_comp(proc, Token_CmpEq, left, right);
ir_emit_if(proc, ok, next_block, block_false);
ir_emit_jump(proc, next_block);
ir_start_block(proc, next_block);
}
ir_emit_jump(proc, done);
ir_start_block(proc, block_false);
ir_emit(proc, ir_instr_return(proc, ir_const_bool(false)));
ir_emit_jump(proc, done);
ir_start_block(proc, done);
ir_emit(proc, ir_instr_return(proc, ir_const_bool(true)));
} else {
irValue *left = ir_emit_load(proc, lhs);
irValue *right = ir_emit_load(proc, rhs);
irValue *ok = ir_emit_comp(proc, Token_CmpEq, left, right);
ok = ir_emit_conv(proc, ok, t_bool);
ir_emit(proc, ir_instr_return(proc, ok));
}
ir_end_procedure_body(proc);
return p;
}
irValue *ir_simple_compare_hash(irProcedure *p, Type *type, irValue *data, irValue *seed) {
GB_ASSERT_MSG(is_type_simple_compare(type), "%s", type_to_string(type));
i64 sz = type_size_of(type);
if (1 <= sz && sz <= 16) {
char name[20] = {};
gb_snprintf(name, 20, "default_hasher%d", cast(i32)sz);
auto args = array_make<irValue *>(permanent_allocator(), 2);
args[0] = data;
args[1] = seed;
return ir_emit_runtime_call(p, name, args);
}
auto args = array_make<irValue *>(permanent_allocator(), 3);
args[0] = data;
args[1] = seed;
args[2] = ir_const_int(type_size_of(type));
return ir_emit_runtime_call(p, "default_hasher_n", args);
}
irValue *ir_get_hasher_proc_for_type(irModule *m, Type *type) {
Type *original_type = type;
type = core_type(type);
Type *pt = alloc_type_pointer(type);
GB_ASSERT(is_type_valid_for_keys(type));
auto key = hash_type(type);
irValue **found = map_get(&m->hasher_procs, key);
if (found) {
return *found;
}
static u32 proc_index = 0;
char buf[16] = {};
isize n = gb_snprintf(buf, 16, "__$hasher%u", ++proc_index);
char *str = gb_alloc_str_len(permanent_allocator(), buf, n-1);
String proc_name = make_string_c(str);
Ast *body = alloc_ast_node(nullptr, Ast_Invalid);
Entity *e = alloc_entity_procedure(nullptr, make_token_ident(proc_name), t_hasher_proc, 0);
e->Procedure.link_name = proc_name;
irValue *p = ir_value_procedure(m, e, t_hasher_proc, nullptr, body, proc_name);
map_set(&m->values, hash_entity(e), p);
string_map_set(&m->members, proc_name, p);
map_set(&m->hasher_procs, key, p);
irProcedure *proc = &p->Proc;
proc->is_startup = true;
proc->ignore_dead_instr = true;
ir_begin_procedure_body(proc);
defer (ir_end_procedure_body(proc));
// ir_start_block(proc, proc->decl_block);
GB_ASSERT(proc->curr_block != nullptr);
irValue *data = proc->params[0];
irValue *seed = proc->params[1];
if (is_type_simple_compare(type)) {
irValue *res = ir_simple_compare_hash(proc, type, data, seed);
ir_emit(proc, ir_instr_return(proc, res));
return p;
}
if (is_type_cstring(type)) {
auto args = array_make<irValue *>(permanent_allocator(), 2);
args[0] = data;
args[1] = seed;
irValue *res = ir_emit_runtime_call(proc, "default_hasher_cstring", args);
ir_emit(proc, ir_instr_return(proc, res));
} else if (is_type_string(type)) {
auto args = array_make<irValue *>(permanent_allocator(), 2);
args[0] = data;
args[1] = seed;
irValue *res = ir_emit_runtime_call(proc, "default_hasher_string", args);
ir_emit(proc, ir_instr_return(proc, res));
} else if (type->kind == Type_Struct) {
type_set_offsets(type);
data = ir_emit_conv(proc, data, t_u8_ptr);
auto args = array_make<irValue *>(permanent_allocator(), 2);
for_array(i, type->Struct.fields) {
i64 offset = type->Struct.offsets[i];
Entity *field = type->Struct.fields[i];
irValue *field_hasher = ir_get_hasher_proc_for_type(m, field->type);
irValue *ptr = ir_emit_ptr_offset(proc, data, ir_const_uintptr(offset));
args[0] = ptr;
args[1] = seed;
seed = ir_emit_call(proc, field_hasher, args);
}
ir_emit(proc, ir_instr_return(proc, seed));
} else if (type->kind == Type_Array) {
irValue *pres = ir_add_local_generated(proc, t_uintptr, false);
ir_emit_store(proc, pres, seed);
auto args = array_make<irValue *>(permanent_allocator(), 2);
irValue *elem_hasher = ir_get_hasher_proc_for_type(m, type->Array.elem);
auto loop_data = ir_loop_start(proc, type->Array.count, t_i32);
data = ir_emit_conv(proc, data, pt);
irValue *ptr = ir_emit_array_ep(proc, data, loop_data.idx);
args[0] = ptr;
args[1] = ir_emit_load(proc, pres);
irValue *new_seed = ir_emit_call(proc, elem_hasher, args);
ir_emit_store(proc, pres, new_seed);
ir_loop_end(proc, loop_data);
irValue *res = ir_emit_load(proc, pres);
ir_emit(proc, ir_instr_return(proc, res));
} else if (type->kind == Type_EnumeratedArray) {
irValue *pres = ir_add_local_generated(proc, t_uintptr, false);
ir_emit_store(proc, pres, seed);
auto args = array_make<irValue *>(permanent_allocator(), 2);
irValue *elem_hasher = ir_get_hasher_proc_for_type(m, type->Array.elem);
auto loop_data = ir_loop_start(proc, type->Array.count, t_i32);
data = ir_emit_conv(proc, data, pt);
irValue *ptr = ir_emit_array_ep(proc, data, loop_data.idx);
args[0] = ptr;
args[1] = ir_emit_load(proc, pres);
irValue *new_seed = ir_emit_call(proc, elem_hasher, args);
ir_emit_store(proc, pres, new_seed);
ir_loop_end(proc, loop_data);
irValue *res = ir_emit_load(proc, pres);
ir_emit(proc, ir_instr_return(proc, res));
} else {
GB_PANIC("Unhandled type for hasher: %s", type_to_string(type));
}
return p;
}
irValue *ir_emit_comp(irProcedure *proc, TokenKind op_kind, irValue *left, irValue *right) {
Type *a = base_type(ir_type(left));
Type *b = base_type(ir_type(right));
@@ -4966,7 +5216,7 @@ irValue *ir_emit_comp(irProcedure *proc, TokenKind op_kind, irValue *left, irVal
} else {
if (is_type_simple_compare(tl) && (op_kind == Token_CmpEq || op_kind == Token_NotEq)) {
// TODO(bill): Test to see if this is actually faster!!!!
auto args = array_make<irValue *>(heap_allocator(), 3);
auto args = array_make<irValue *>(permanent_allocator(), 3);
args[0] = ir_emit_conv(proc, lhs, t_rawptr);
args[1] = ir_emit_conv(proc, rhs, t_rawptr);
args[2] = ir_const_int(type_size_of(tl));
@@ -4992,6 +5242,30 @@ irValue *ir_emit_comp(irProcedure *proc, TokenKind op_kind, irValue *left, irVal
}
}
if (is_type_struct(a) && is_type_comparable(a)) {
irValue *left_ptr = ir_address_from_load_or_generate_local(proc, left);
irValue *right_ptr = ir_address_from_load_or_generate_local(proc, right);
irValue *res = {};
if (is_type_simple_compare(a)) {
// TODO(bill): Test to see if this is actually faster!!!!
auto args = array_make<irValue *>(permanent_allocator(), 3);
args[0] = ir_emit_conv(proc, left_ptr, t_rawptr);
args[1] = ir_emit_conv(proc, right_ptr, t_rawptr);
args[2] = ir_const_int(type_size_of(a));
res = ir_emit_runtime_call(proc, "memory_equal", args);
} else {
irValue *value = ir_get_equal_proc_for_type(proc->module, a);
auto args = array_make<irValue *>(permanent_allocator(), 2);
args[0] = ir_emit_conv(proc, left_ptr, t_rawptr);
args[1] = ir_emit_conv(proc, right_ptr, t_rawptr);
res = ir_emit_call(proc, value, args);
}
if (op_kind == Token_NotEq) {
res = ir_emit_unary_arith(proc, Token_Not, res, ir_type(res));
}
return res;
}
if (is_type_string(a)) {
if (is_type_cstring(a)) {
left = ir_emit_conv(proc, left, t_string);
@@ -6636,7 +6910,7 @@ void ir_mangle_add_sub_type_name(irModule *m, Entity *field, String parent) {
return;
}
if (is_type_proc(field->type)) {
set_procedure_abi_types(heap_allocator(), field->type);
set_procedure_abi_types(field->type);
}
String cn = field->token.string;
@@ -6733,7 +7007,7 @@ irValue *ir_gen_anonymous_proc_lit(irModule *m, String prefix_name, Ast *expr, i
String name = make_string(name_text, name_len-1);
Type *type = type_of_expr(expr);
set_procedure_abi_types(heap_allocator(), type);
set_procedure_abi_types(type);
irValue *value = ir_value_procedure(m, nullptr, type, pl->type, pl->body, name);
value->Proc.tags = pl->tags;
@@ -6789,6 +7063,9 @@ void ir_gen_global_type_name(irModule *m, Entity *e, String name) {
if (!ir_min_dep_entity(m, e)) {
return;
}
if (is_type_proc(e->type)) {
return;
}
irValue *t = ir_value_type_name(name, e->type);
ir_module_add_value(m, e, t);
string_map_set(&m->members, name, t);
@@ -6884,7 +7161,7 @@ irValue *ir_find_global_variable(irProcedure *proc, String name) {
return *value;
}
void ir_build_stmt_list(irProcedure *proc, Array<Ast *> stmts);
void ir_build_stmt_list(irProcedure *proc, Slice<Ast *> stmts);
void ir_build_assign_op(irProcedure *proc, irAddr const &lhs, irValue *value, TokenKind op);
bool is_double_pointer(Type *t) {
@@ -6898,17 +7175,6 @@ bool is_double_pointer(Type *t) {
return is_type_pointer(td);
}
u64 ir_generate_source_code_location_hash(TokenPos pos) {
u64 h = 0xcbf29ce484222325;
for (isize i = 0; i < pos.file.len; i++) {
h = (h ^ u64(pos.file[i])) * 0x100000001b3;
}
h = h ^ (u64(pos.line) * 0x100000001b3);
h = h ^ (u64(pos.column) * 0x100000001b3);
return h;
}
irValue *ir_emit_source_code_location(irProcedure *proc, String procedure, TokenPos pos) {
gbAllocator a = ir_allocator();
irValue *v = ir_alloc_value(irValue_SourceCodeLocation);
@@ -6916,7 +7182,6 @@ irValue *ir_emit_source_code_location(irProcedure *proc, String procedure, Token
v->SourceCodeLocation.line = ir_const_int(pos.line);
v->SourceCodeLocation.column = ir_const_int(pos.column);
v->SourceCodeLocation.procedure = ir_find_or_add_entity_string(proc->module, procedure);
v->SourceCodeLocation.hash = ir_generate_source_code_location_hash(pos);
return v;
}
@@ -7355,7 +7620,7 @@ irValue *ir_build_builtin_proc(irProcedure *proc, Ast *expr, TypeAndValue tv, Bu
// "Intrinsics"
case BuiltinProc_alloca:
{
auto args = array_make<irValue *>(heap_allocator(), 2);
auto args = array_make<irValue *>(permanent_allocator(), 2);
args[0] = ir_emit_conv(proc, ir_build_expr(proc, ce->args[0]), t_i32);
args[1] = ir_build_expr(proc, ce->args[1]);
return ir_emit(proc, ir_instr_inline_code(proc, id, args, t_u8_ptr));
@@ -7459,7 +7724,11 @@ irValue *ir_build_builtin_proc(irProcedure *proc, Ast *expr, TypeAndValue tv, Bu
return ir_emit(proc, ir_instr_atomic_cxchg(proc, type, address, old_value, new_value, id));
}
case BuiltinProc_type_equal_proc:
return ir_get_equal_proc_for_type(proc->module, ce->args[0]->tav.type);
case BuiltinProc_type_hasher_proc:
return ir_get_hasher_proc_for_type(proc->module, ce->args[0]->tav.type);
}
GB_PANIC("Unhandled built-in procedure");
@@ -7584,7 +7853,7 @@ irValue *ir_build_call_expr(irProcedure *proc, Ast *expr) {
Type *proc_type_ = base_type(ir_type(value));
GB_ASSERT(proc_type_->kind == Type_Proc);
TypeProc *pt = &proc_type_->Proc;
set_procedure_abi_types(heap_allocator(), proc_type_);
set_procedure_abi_types(proc_type_);
if (is_call_expr_field_value(ce)) {
auto args = array_make<irValue *>(ir_allocator(), pt->param_count);
@@ -7801,7 +8070,11 @@ irValue *ir_build_expr_internal(irProcedure *proc, Ast *expr) {
if (tv.value.kind != ExactValue_Invalid) {
// NOTE(bill): Edge case
if (tv.value.kind != ExactValue_Compound &&
if (is_type_u8_array(tv.type) && tv.value.kind == ExactValue_String) {
return ir_add_module_constant(proc->module, tv.type, tv.value);
} else if (is_type_rune_array(tv.type) && tv.value.kind == ExactValue_String) {
return ir_add_module_constant(proc->module, tv.type, tv.value);
} else if (tv.value.kind != ExactValue_Compound &&
is_type_array(tv.type)) {
Type *elem = core_array_type(tv.type);
ExactValue value = convert_exact_value_for_type(tv.value, elem);
@@ -8207,7 +8480,7 @@ irValue *ir_build_expr_internal(irProcedure *proc, Ast *expr) {
irValue *addr = ir_address_from_load_or_generate_local(proc, right);
irValue *h = ir_gen_map_header(proc, addr, rt);
irValue *key = ir_gen_map_key(proc, left, rt->Map.key);
irValue *key = ir_gen_map_hash(proc, left, rt->Map.key);
auto args = array_make<irValue *>(ir_allocator(), 2);
args[0] = h;
@@ -9024,8 +9297,7 @@ irAddr ir_build_addr(irProcedure *proc, Ast *expr) {
if (cl->elems.count > 0) {
ir_emit_store(proc, v, ir_add_module_constant(proc->module, type, exact_value_compound(expr)));
auto temp_data = array_make<irCompoundLitElemTempData>(heap_allocator(), 0, cl->elems.count);
defer (array_free(&temp_data));
auto temp_data = array_make<irCompoundLitElemTempData>(temporary_allocator(), 0, cl->elems.count);
// NOTE(bill): Separate value, gep, store into their own chunks
for_array(i, cl->elems) {
@@ -9123,8 +9395,7 @@ irAddr ir_build_addr(irProcedure *proc, Ast *expr) {
if (cl->elems.count > 0) {
ir_emit_store(proc, v, ir_add_module_constant(proc->module, type, exact_value_compound(expr)));
auto temp_data = array_make<irCompoundLitElemTempData>(heap_allocator(), 0, cl->elems.count);
defer (array_free(&temp_data));
auto temp_data = array_make<irCompoundLitElemTempData>(temporary_allocator(), 0, cl->elems.count);
// NOTE(bill): Separate value, gep, store into their own chunks
for_array(i, cl->elems) {
@@ -9232,8 +9503,7 @@ irAddr ir_build_addr(irProcedure *proc, Ast *expr) {
irValue *data = ir_emit_array_ep(proc, slice->ConstantSlice.backing_array, v_zero32);
auto temp_data = array_make<irCompoundLitElemTempData>(heap_allocator(), 0, cl->elems.count);
defer (array_free(&temp_data));
auto temp_data = array_make<irCompoundLitElemTempData>(temporary_allocator(), 0, cl->elems.count);
for_array(i, cl->elems) {
Ast *elem = cl->elems[i];
@@ -9574,7 +9844,7 @@ void ir_build_nested_proc(irProcedure *proc, AstProcLit *pd, Entity *e) {
name_len = gb_snprintf(cast(char *)name_text, name_len, "%.*s.%.*s-%d", LIT(proc->name), LIT(pd_name), guid);
String name = make_string(name_text, name_len-1);
set_procedure_abi_types(heap_allocator(), e->type);
set_procedure_abi_types(e->type);
irValue *value = ir_value_procedure(proc->module, e, e->type, pd->type, pd->body, name);
value->Proc.tags = pd->tags;
@@ -9673,7 +9943,7 @@ void ir_build_constant_value_decl(irProcedure *proc, AstValueDecl *vd) {
return;
}
set_procedure_abi_types(heap_allocator(), e->type);
set_procedure_abi_types(e->type);
irValue *value = ir_value_procedure(proc->module, e, e->type, pl->type, pl->body, name);
value->Proc.tags = pl->tags;
@@ -9692,7 +9962,7 @@ void ir_build_constant_value_decl(irProcedure *proc, AstValueDecl *vd) {
}
}
void ir_build_stmt_list(irProcedure *proc, Array<Ast *> stmts) {
void ir_build_stmt_list(irProcedure *proc, Slice<Ast *> stmts) {
// NOTE(bill): Precollect constant entities
for_array(i, stmts) {
Ast *stmt = stmts[i];
@@ -9844,13 +10114,8 @@ void ir_build_range_indexed(irProcedure *proc, irValue *expr, Type *val_type, ir
elem = ir_emit_load(proc, elem);
irValue *entry = ir_emit_ptr_offset(proc, elem, idx);
val = ir_emit_load(proc, ir_emit_struct_ep(proc, entry, 2));
irValue *key_raw = ir_emit_struct_ep(proc, entry, 0);
key_raw = ir_emit_struct_ep(proc, key_raw, 1);
irValue *key = ir_emit_conv(proc, key_raw, alloc_type_pointer(expr_type->Map.key));
idx = ir_emit_load(proc, key);
idx = ir_emit_load(proc, ir_emit_struct_ep(proc, entry, 2));
val = ir_emit_load(proc, ir_emit_struct_ep(proc, entry, 3));
break;
}
@@ -9995,7 +10260,7 @@ void ir_build_range_enum(irProcedure *proc, Type *enum_type, Type *val_type, irV
irValue *max_count = ir_const_int(enum_count);
irValue *ti = ir_type_info(proc, t);
irValue *variant = ir_emit_struct_ep(proc, ti, 3);
irValue *variant = ir_emit_struct_ep(proc, ti, 4);
irValue *eti_ptr = ir_emit_conv(proc, variant, t_type_info_enum_ptr);
irValue *values = ir_emit_load(proc, ir_emit_struct_ep(proc, eti_ptr, 2));
irValue *values_data = ir_slice_elem(proc, values);
@@ -10179,7 +10444,7 @@ void ir_build_stmt_internal(irProcedure *proc, Ast *node) {
String mangled_name = {};
{
gbString str = gb_string_make_length(heap_allocator(), proc->name.text, proc->name.len);
gbString str = gb_string_make_length(permanent_allocator(), proc->name.text, proc->name.len);
str = gb_string_appendc(str, "-");
str = gb_string_append_fmt(str, ".%.*s-%llu", LIT(name), cast(long long)e->id);
mangled_name.text = cast(u8 *)str;
@@ -10902,7 +11167,7 @@ void ir_build_stmt_internal(irProcedure *proc, Ast *node) {
ast_node(body, BlockStmt, ss->body);
Array<Ast *> default_stmts = {};
Slice<Ast *> default_stmts = {};
irBlock *default_fall = nullptr;
irBlock *default_block = nullptr;
@@ -11349,6 +11614,9 @@ void ir_begin_procedure_body(irProcedure *proc) {
bool ir_remove_dead_instr(irProcedure *proc) {
if (proc->ignore_dead_instr) {
return false;
}
isize elimination_count = 0;
retry:
#if 1
@@ -11471,11 +11739,11 @@ void ir_insert_code_before_proc(irProcedure* proc, irProcedure *parent) {
void ir_build_proc(irValue *value, irProcedure *parent) {
irProcedure *proc = &value->Proc;
set_procedure_abi_types(heap_allocator(), proc->type);
set_procedure_abi_types(proc->type);
proc->parent = parent;
if (proc->body != nullptr) {
if (proc->body != nullptr && proc->body->kind != Ast_Invalid) {
u64 prev_state_flags = proc->module->state_flags;
if (proc->tags != 0) {
@@ -11577,6 +11845,8 @@ void ir_init_module(irModule *m, Checker *c) {
map_init(&m->debug_info, heap_allocator());
map_init(&m->entity_names, heap_allocator());
map_init(&m->anonymous_proc_lits, heap_allocator());
map_init(&m->equal_procs, heap_allocator());
map_init(&m->hasher_procs, heap_allocator());
array_init(&m->procs, heap_allocator());
array_init(&m->procs_to_generate, heap_allocator());
array_init(&m->foreign_library_paths, heap_allocator());
@@ -11860,6 +12130,8 @@ void ir_setup_type_info_data(irProcedure *proc) { // NOTE(bill): Setup type_info
// Useful types
Type *t_i64_slice_ptr = alloc_type_pointer(alloc_type_slice(t_i64));
Type *t_string_slice_ptr = alloc_type_pointer(alloc_type_slice(t_string));
Entity *type_info_flags_entity = find_core_entity(info->checker, str_lit("Type_Info_Flags"));
Type *t_type_info_flags = type_info_flags_entity->type;
i32 type_info_member_types_index = 0;
i32 type_info_member_names_index = 0;
@@ -11879,11 +12151,14 @@ void ir_setup_type_info_data(irProcedure *proc) { // NOTE(bill): Setup type_info
irValue *tag = nullptr;
irValue *ti_ptr = ir_emit_array_epi(proc, ir_global_type_info_data, cast(i32)entry_index);
irValue *variant_ptr = ir_emit_struct_ep(proc, ti_ptr, 3);
irValue *variant_ptr = ir_emit_struct_ep(proc, ti_ptr, 4);
irValue *type_info_flags = ir_value_constant(t_type_info_flags, exact_value_i64(type_info_flags_of_type(t)));
ir_emit_store(proc, ir_emit_struct_ep(proc, ti_ptr, 0), ir_const_int(type_size_of(t)));
ir_emit_store(proc, ir_emit_struct_ep(proc, ti_ptr, 1), ir_const_int(type_align_of(t)));
ir_emit_store(proc, ir_emit_struct_ep(proc, ti_ptr, 2), ir_typeid(proc->module, t));
ir_emit_store(proc, ir_emit_struct_ep(proc, ti_ptr, 2), type_info_flags);
ir_emit_store(proc, ir_emit_struct_ep(proc, ti_ptr, 3), ir_typeid(proc->module, t));
switch (t->kind) {
@@ -11897,6 +12172,21 @@ void ir_setup_type_info_data(irProcedure *proc) { // NOTE(bill): Setup type_info
ir_emit_store(proc, ir_emit_struct_ep(proc, tag, 0), name);
ir_emit_store(proc, ir_emit_struct_ep(proc, tag, 1), gtip);
if (t->Named.type_name->pkg) {
irValue *name = ir_const_string(proc->module, t->Named.type_name->pkg->name);
ir_emit_store(proc, ir_emit_struct_ep(proc, tag, 2), name);
}
String proc_name = {};
if (t->Named.type_name->parent_proc_decl) {
DeclInfo *decl = t->Named.type_name->parent_proc_decl;
if (decl->entity && decl->entity->kind == Entity_Procedure) {
proc_name = decl->entity->token.string;
}
}
irValue *loc = ir_emit_source_code_location(proc, proc_name, t->Named.type_name->token.pos);
ir_emit_store(proc, ir_emit_struct_ep(proc, tag, 3), loc);
break;
}
@@ -12234,8 +12524,13 @@ void ir_setup_type_info_data(irProcedure *proc) { // NOTE(bill): Setup type_info
ir_emit_store(proc, ir_emit_struct_ep(proc, tag, 6), is_raw_union);
ir_emit_store(proc, ir_emit_struct_ep(proc, tag, 7), is_custom_align);
if (is_type_comparable(t) && !is_type_simple_compare(t)) {
ir_emit_store(proc, ir_emit_struct_ep(proc, tag, 8), ir_get_equal_proc_for_type(proc->module, t));
}
if (t->Struct.soa_kind != StructSoa_None) {
irValue *kind = ir_emit_struct_ep(proc, tag, 8);
irValue *kind = ir_emit_struct_ep(proc, tag, 9);
Type *kind_type = type_deref(ir_type(kind));
irValue *soa_kind = ir_value_constant(kind_type, exact_value_i64(t->Struct.soa_kind));
@@ -12244,8 +12539,8 @@ void ir_setup_type_info_data(irProcedure *proc) { // NOTE(bill): Setup type_info
ir_emit_store(proc, kind, soa_kind);
ir_emit_store(proc, ir_emit_struct_ep(proc, tag, 9), soa_type);
ir_emit_store(proc, ir_emit_struct_ep(proc, tag, 10), soa_len);
ir_emit_store(proc, ir_emit_struct_ep(proc, tag, 10), soa_type);
ir_emit_store(proc, ir_emit_struct_ep(proc, tag, 11), soa_len);
}
}
@@ -12308,10 +12603,14 @@ void ir_setup_type_info_data(irProcedure *proc) { // NOTE(bill): Setup type_info
irValue *key = ir_emit_struct_ep(proc, tag, 0);
irValue *value = ir_emit_struct_ep(proc, tag, 1);
irValue *generated_struct = ir_emit_struct_ep(proc, tag, 2);
irValue *key_equal = ir_emit_struct_ep(proc, tag, 3);
irValue *key_hasher = ir_emit_struct_ep(proc, tag, 4);
ir_emit_store(proc, key, ir_get_type_info_ptr(proc, t->Map.key));
ir_emit_store(proc, value, ir_get_type_info_ptr(proc, t->Map.value));
ir_emit_store(proc, generated_struct, ir_get_type_info_ptr(proc, t->Map.generated_struct_type));
ir_emit_store(proc, key_equal, ir_get_equal_proc_for_type(proc->module, t->Map.key));
ir_emit_store(proc, key_hasher, ir_get_hasher_proc_for_type(proc->module, t->Map.key));
break;
}
@@ -12612,7 +12911,7 @@ void ir_gen_tree(irGen *s) {
Ast *type_expr = pl->type;
set_procedure_abi_types(heap_allocator(), e->type);
set_procedure_abi_types(e->type);
irValue *p = ir_value_procedure(m, e, e->type, type_expr, body, name);
p->Proc.tags = pl->tags;
p->Proc.inlining = pl->inlining;
@@ -12646,7 +12945,7 @@ void ir_gen_tree(irGen *s) {
#if defined(GB_SYSTEM_WINDOWS)
if (build_context.build_mode == BuildMode_DynamicLibrary && !has_dll_main) {
if (build_context.build_mode == BuildMode_DynamicLibrary && !has_dll_main && !build_context.no_entry_point) {
// DllMain :: proc(inst: rawptr, reason: u32, reserved: rawptr) -> i32
String name = str_lit("DllMain");
Type *proc_params = alloc_type_tuple();
@@ -12717,7 +13016,7 @@ void ir_gen_tree(irGen *s) {
ir_emit_return(proc, v_one32);
}
#endif
if (!(build_context.build_mode == BuildMode_DynamicLibrary && !has_dll_main)) {
if (!(build_context.build_mode == BuildMode_DynamicLibrary && !has_dll_main) && !build_context.no_entry_point) {
// main :: proc(argc: i32, argv: ^^u8) -> i32
String name = str_lit("main");
@@ -12784,11 +13083,18 @@ void ir_gen_tree(irGen *s) {
ir_fill_slice(proc, global_args, argv, ir_emit_conv(proc, argc, t_int));
ir_emit(proc, ir_alloc_instr(proc, irInstr_StartupRuntime));
{
Array<irValue *> empty_args = {};
if (build_context.command_kind == Command_test) {
for_array(i, m->info->testing_procedures) {
Entity *e = m->info->testing_procedures[i];
irValue **found = map_get(&proc->module->values, hash_entity(e));
GB_ASSERT(found != nullptr);
ir_emit_call(proc, *found, empty_args);
}
} else {
irValue **found = map_get(&proc->module->values, hash_entity(entry_point));
if (found != nullptr) {
Array<irValue *> args = {};
ir_emit_call(proc, *found, args);
ir_emit_call(proc, *found, empty_args);
}
}
@@ -12796,7 +13102,7 @@ void ir_gen_tree(irGen *s) {
}
#if defined(GB_SYSTEM_WINDOWS)
if (build_context.build_mode != BuildMode_DynamicLibrary && build_context.no_crt) {
if (build_context.build_mode != BuildMode_DynamicLibrary && build_context.no_crt && !build_context.no_entry_point) {
s->print_chkstk = true;
{

View File

@@ -76,7 +76,6 @@ void ir_write_u64(irFileBuffer *f, u64 i) {
}
void ir_write_big_int(irFileBuffer *f, BigInt const &x, Type *type, bool swap_endian) {
if (x.len == 2) {
gbAllocator a = heap_allocator(); // TODO(bill): Change this allocator
u64 words[2] = {};
BigInt y = x;
if (swap_endian) {
@@ -88,9 +87,8 @@ void ir_write_big_int(irFileBuffer *f, BigInt const &x, Type *type, bool swap_en
y.d.words = words;
}
String s = big_int_to_string(a, &y, 10);
String s = big_int_to_string(temporary_allocator(), &y, 10);
ir_write_string(f, s);
gb_free(a, s.text);
} else {
i64 i = 0;
if (x.neg) {
@@ -296,7 +294,7 @@ void ir_print_alignment_prefix_hack(irFileBuffer *f, i64 alignment) {
void ir_print_proc_results(irFileBuffer *f, irModule *m, Type *t) {
set_procedure_abi_types(heap_allocator(), t);
set_procedure_abi_types(t);
GB_ASSERT(is_type_proc(t));
t = base_type(t);
@@ -325,7 +323,7 @@ void ir_print_proc_results(irFileBuffer *f, irModule *m, Type *t) {
void ir_print_proc_type_without_pointer(irFileBuffer *f, irModule *m, Type *t) {
set_procedure_abi_types(heap_allocator(), t);
set_procedure_abi_types(t);
i64 word_bits = 8*build_context.word_size;
t = base_type(t);
@@ -736,6 +734,28 @@ void ir_print_exact_value(irFileBuffer *f, irModule *m, ExactValue value, Type *
if (is_type_array(type) && value.kind == ExactValue_String && !is_type_u8(core_array_type(type))) {
i64 count = type->Array.count;
Type *elem = type->Array.elem;
if (is_type_rune_array(type)) {
Rune rune;
isize offset = 0;
isize width = 1;
String s = value.value_string;
ir_write_byte(f, '[');
for (i64 i = 0; i < count && offset < s.len; i++) {
width = gb_utf8_decode(s.text+offset, s.len-offset, &rune);
if (i > 0) ir_write_str_lit(f, ", ");
ir_print_type(f, m, elem);
ir_write_byte(f, ' ');
ir_print_exact_value(f, m, exact_value_i64(rune), elem);
offset += width;
}
GB_ASSERT(offset == s.len);
ir_write_byte(f, ']');
return;
}
ir_write_byte(f, '[');
for (i64 i = 0; i < count; i++) {
@@ -747,7 +767,7 @@ void ir_print_exact_value(irFileBuffer *f, irModule *m, ExactValue value, Type *
ir_write_byte(f, ']');
return;
} else if (is_type_array(type) &&
} else if (is_type_array(type) &&
value.kind != ExactValue_Invalid &&
value.kind != ExactValue_String &&
value.kind != ExactValue_Compound) {
@@ -798,7 +818,11 @@ void ir_print_exact_value(irFileBuffer *f, irModule *m, ExactValue value, Type *
GB_ASSERT(is_type_array(type));
ir_write_str_lit(f, "c\"");
ir_print_escape_string(f, str, false, false);
ir_write_str_lit(f, "\\00\"");
if (type->Array.count == str.len) {
ir_write_str_lit(f, "\"");
} else {
ir_write_str_lit(f, "\\00\"");
}
} else if (is_type_cstring(t)) {
// HACK NOTE(bill): This is a hack but it works because strings are created at the very end
// of the .ll file
@@ -812,7 +836,7 @@ void ir_print_exact_value(irFileBuffer *f, irModule *m, ExactValue value, Type *
ir_write_str_lit(f, ", ");
ir_print_type(f, m, t_i32);
ir_write_str_lit(f, " 0, i32 0)");
}else {
} else {
// HACK NOTE(bill): This is a hack but it works because strings are created at the very end
// of the .ll file
irValue *str_array = ir_add_global_string_array(m, str);
@@ -929,9 +953,9 @@ void ir_print_exact_value(irFileBuffer *f, irModule *m, ExactValue value, Type *
ir_write_byte(f, ' ');
ir_write_byte(f, '{');
ir_print_type(f, m, ft); ir_write_byte(f, ' ');
ir_print_exact_value(f, m, exact_value_float(value.value_complex.real), ft);
ir_print_exact_value(f, m, exact_value_float(value.value_complex->real), ft);
ir_write_str_lit(f, ", "); ir_print_type(f, m, ft); ir_write_byte(f, ' ');
ir_print_exact_value(f, m, exact_value_float(value.value_complex.imag), ft);
ir_print_exact_value(f, m, exact_value_float(value.value_complex->imag), ft);
ir_write_byte(f, '}');
break;
}
@@ -944,13 +968,13 @@ void ir_print_exact_value(irFileBuffer *f, irModule *m, ExactValue value, Type *
ir_write_byte(f, ' ');
ir_write_byte(f, '{');
ir_print_type(f, m, ft); ir_write_byte(f, ' ');
ir_print_exact_value(f, m, exact_value_float(value.value_quaternion.imag), ft);
ir_print_exact_value(f, m, exact_value_float(value.value_quaternion->imag), ft);
ir_write_str_lit(f, ", "); ir_print_type(f, m, ft); ir_write_byte(f, ' ');
ir_print_exact_value(f, m, exact_value_float(value.value_quaternion.jmag), ft);
ir_print_exact_value(f, m, exact_value_float(value.value_quaternion->jmag), ft);
ir_write_str_lit(f, ", "); ir_print_type(f, m, ft); ir_write_byte(f, ' ');
ir_print_exact_value(f, m, exact_value_float(value.value_quaternion.kmag), ft);
ir_print_exact_value(f, m, exact_value_float(value.value_quaternion->kmag), ft);
ir_write_str_lit(f, ", "); ir_print_type(f, m, ft); ir_write_byte(f, ' ');
ir_print_exact_value(f, m, exact_value_float(value.value_quaternion.real), ft);
ir_print_exact_value(f, m, exact_value_float(value.value_quaternion->real), ft);
ir_write_byte(f, '}');
break;
}
@@ -1406,7 +1430,6 @@ void ir_print_value(irFileBuffer *f, irModule *m, irValue *value, Type *type_hin
irValue *line = value->SourceCodeLocation.line;
irValue *column = value->SourceCodeLocation.column;
irValue *procedure = value->SourceCodeLocation.procedure;
u64 hash = value->SourceCodeLocation.hash;
ir_write_byte(f, '{');
ir_print_type(f, m, t_string); ir_write_byte(f, ' '); ir_print_value(f, m, file, t_string);
@@ -1416,8 +1439,6 @@ void ir_print_value(irFileBuffer *f, irModule *m, irValue *value, Type *type_hin
ir_print_type(f, m, t_int); ir_write_byte(f, ' '); ir_print_value(f, m, column, t_int);
ir_write_string(f, str_lit(", "));
ir_print_type(f, m, t_string); ir_write_byte(f, ' '); ir_print_value(f, m, procedure, t_string);
ir_write_string(f, str_lit(", "));
ir_print_type(f, m, t_u64); ir_write_byte(f, ' '); ir_write_u64(f, hash);
ir_write_byte(f, '}');
break;
}
@@ -1551,7 +1572,11 @@ void ir_print_instr(irFileBuffer *f, irModule *m, irValue *value) {
break;
case BuiltinProc_cpu_relax:
ir_write_str_lit(f, "call void asm sideeffect \"pause\", \"\"()");
if (build_context.metrics.arch == TargetArch_amd64) {
ir_write_str_lit(f, "call void asm sideeffect \"pause\", \"\"()");
} else {
// ir_write_str_lit(f, "call void asm sideeffect \"yield\", \"\"()");
}
break;
default: GB_PANIC("Unknown inline code %d", instr->InlineCode.id); break;
}
@@ -2189,7 +2214,7 @@ void ir_print_instr(irFileBuffer *f, irModule *m, irValue *value) {
irInstrCall *call = &instr->Call;
Type *proc_type = base_type(ir_type(call->value));
GB_ASSERT(is_type_proc(proc_type));
set_procedure_abi_types(heap_allocator(), proc_type);
set_procedure_abi_types(proc_type);
bool is_c_vararg = proc_type->Proc.c_vararg;
Type *result_type = call->type;
@@ -2396,7 +2421,7 @@ void ir_print_instr(irFileBuffer *f, irModule *m, irValue *value) {
void ir_print_proc(irFileBuffer *f, irModule *m, irProcedure *proc) {
set_procedure_abi_types(heap_allocator(), proc->type);
set_procedure_abi_types(proc->type);
if (proc->body == nullptr) {
ir_write_str_lit(f, "declare ");

959
src/llvm_abi.cpp Normal file
View File

@@ -0,0 +1,959 @@
enum lbArgKind {
lbArg_Direct,
lbArg_Indirect,
lbArg_Ignore,
};
struct lbArgType {
lbArgKind kind;
LLVMTypeRef type;
LLVMTypeRef cast_type; // Optional
LLVMTypeRef pad_type; // Optional
LLVMAttributeRef attribute; // Optional
};
lbArgType lb_arg_type_direct(LLVMTypeRef type, LLVMTypeRef cast_type, LLVMTypeRef pad_type, LLVMAttributeRef attr) {
return lbArgType{lbArg_Direct, type, cast_type, pad_type, attr};
}
lbArgType lb_arg_type_direct(LLVMTypeRef type) {
return lb_arg_type_direct(type, nullptr, nullptr, nullptr);
}
lbArgType lb_arg_type_indirect(LLVMTypeRef type, LLVMAttributeRef attr) {
return lbArgType{lbArg_Indirect, type, nullptr, nullptr, attr};
}
lbArgType lb_arg_type_ignore(LLVMTypeRef type) {
return lbArgType{lbArg_Ignore, type, nullptr, nullptr, nullptr};
}
struct lbFunctionType {
LLVMContextRef ctx;
ProcCallingConvention calling_convention;
Array<lbArgType> args;
lbArgType ret;
};
i64 llvm_align_formula(i64 off, i64 a) {
return (off + a - 1) / a * a;
}
bool lb_is_type_kind(LLVMTypeRef type, LLVMTypeKind kind) {
if (type == nullptr) {
return false;
}
return LLVMGetTypeKind(type) == kind;
}
LLVMTypeRef lb_function_type_to_llvm_ptr(lbFunctionType *ft, bool is_var_arg) {
unsigned arg_count = cast(unsigned)ft->args.count;
unsigned offset = 0;
LLVMTypeRef ret = nullptr;
if (ft->ret.kind == lbArg_Direct) {
if (ft->ret.cast_type != nullptr) {
ret = ft->ret.cast_type;
} else {
ret = ft->ret.type;
}
} else if (ft->ret.kind == lbArg_Indirect) {
offset += 1;
ret = LLVMVoidTypeInContext(ft->ctx);
} else if (ft->ret.kind == lbArg_Ignore) {
ret = LLVMVoidTypeInContext(ft->ctx);
}
GB_ASSERT_MSG(ret != nullptr, "%d", ft->ret.kind);
unsigned maximum_arg_count = offset+arg_count;
LLVMTypeRef *args = gb_alloc_array(heap_allocator(), LLVMTypeRef, maximum_arg_count);
if (offset == 1) {
GB_ASSERT(ft->ret.kind == lbArg_Indirect);
args[0] = LLVMPointerType(ft->ret.type, 0);
}
unsigned arg_index = offset;
for (unsigned i = 0; i < arg_count; i++) {
lbArgType *arg = &ft->args[i];
if (arg->kind == lbArg_Direct) {
LLVMTypeRef arg_type = nullptr;
if (ft->args[i].cast_type != nullptr) {
arg_type = arg->cast_type;
} else {
arg_type = arg->type;
}
args[arg_index++] = arg_type;
} else if (arg->kind == lbArg_Indirect) {
GB_ASSERT(!lb_is_type_kind(arg->type, LLVMPointerTypeKind));
args[arg_index++] = LLVMPointerType(arg->type, 0);
} else if (arg->kind == lbArg_Ignore) {
// ignore
}
}
unsigned total_arg_count = arg_index;
LLVMTypeRef func_type = LLVMFunctionType(ret, args, total_arg_count, is_var_arg);
return LLVMPointerType(func_type, 0);
}
void lb_add_function_type_attributes(LLVMValueRef fn, lbFunctionType *ft, ProcCallingConvention calling_convention) {
if (ft == nullptr) {
return;
}
unsigned arg_count = cast(unsigned)ft->args.count;
unsigned offset = 0;
if (ft->ret.kind == lbArg_Indirect) {
offset += 1;
}
LLVMContextRef c = ft->ctx;
LLVMAttributeRef noalias_attr = lb_create_enum_attribute(c, "noalias", true);
LLVMAttributeRef nonnull_attr = lb_create_enum_attribute(c, "nonnull", true);
LLVMAttributeRef nocapture_attr = lb_create_enum_attribute(c, "nocapture", true);
unsigned arg_index = offset;
for (unsigned i = 0; i < arg_count; i++) {
lbArgType *arg = &ft->args[i];
if (arg->kind == lbArg_Ignore) {
continue;
}
if (arg->attribute) {
LLVMAddAttributeAtIndex(fn, arg_index+1, arg->attribute);
}
arg_index++;
}
if (offset != 0 && ft->ret.kind == lbArg_Indirect && ft->ret.attribute != nullptr) {
LLVMAddAttributeAtIndex(fn, offset, ft->ret.attribute);
LLVMAddAttributeAtIndex(fn, offset, noalias_attr);
}
lbCallingConventionKind cc_kind = lbCallingConvention_C;
// TODO(bill): Clean up this logic
if (build_context.metrics.os != TargetOs_js) {
cc_kind = lb_calling_convention_map[calling_convention];
}
LLVMSetFunctionCallConv(fn, cc_kind);
if (calling_convention == ProcCC_Odin) {
unsigned context_index = offset+arg_count;
LLVMAddAttributeAtIndex(fn, context_index, noalias_attr);
LLVMAddAttributeAtIndex(fn, context_index, nonnull_attr);
LLVMAddAttributeAtIndex(fn, context_index, nocapture_attr);
}
}
i64 lb_sizeof(LLVMTypeRef type);
i64 lb_alignof(LLVMTypeRef type);
i64 lb_sizeof(LLVMTypeRef type) {
LLVMTypeKind kind = LLVMGetTypeKind(type);
switch (kind) {
case LLVMVoidTypeKind:
return 0;
case LLVMIntegerTypeKind:
{
unsigned w = LLVMGetIntTypeWidth(type);
return (w + 7)/8;
}
case LLVMFloatTypeKind:
return 4;
case LLVMDoubleTypeKind:
return 8;
case LLVMPointerTypeKind:
return build_context.word_size;
case LLVMStructTypeKind:
{
unsigned field_count = LLVMCountStructElementTypes(type);
i64 offset = 0;
if (LLVMIsPackedStruct(type)) {
for (unsigned i = 0; i < field_count; i++) {
LLVMTypeRef field = LLVMStructGetTypeAtIndex(type, i);
offset += lb_sizeof(field);
}
} else {
for (unsigned i = 0; i < field_count; i++) {
LLVMTypeRef field = LLVMStructGetTypeAtIndex(type, i);
i64 align = lb_alignof(field);
offset = llvm_align_formula(offset, align);
offset += lb_sizeof(field);
}
offset = llvm_align_formula(offset, lb_alignof(type));
}
return offset;
}
break;
case LLVMArrayTypeKind:
{
LLVMTypeRef elem = LLVMGetElementType(type);
i64 elem_size = lb_sizeof(elem);
i64 count = LLVMGetArrayLength(type);
i64 size = count * elem_size;
return size;
}
break;
case LLVMX86_MMXTypeKind:
return 8;
case LLVMVectorTypeKind:
{
LLVMTypeRef elem = LLVMGetElementType(type);
i64 elem_size = lb_sizeof(elem);
i64 count = LLVMGetVectorSize(type);
i64 size = count * elem_size;
return gb_clamp(next_pow2(size), 1, build_context.max_align);
}
}
GB_PANIC("Unhandled type for lb_sizeof -> %s", LLVMPrintTypeToString(type));
return 0;
}
i64 lb_alignof(LLVMTypeRef type) {
LLVMTypeKind kind = LLVMGetTypeKind(type);
switch (kind) {
case LLVMVoidTypeKind:
return 1;
case LLVMIntegerTypeKind:
{
unsigned w = LLVMGetIntTypeWidth(type);
return gb_clamp((w + 7)/8, 1, build_context.max_align);
}
case LLVMFloatTypeKind:
return 4;
case LLVMDoubleTypeKind:
return 8;
case LLVMPointerTypeKind:
return build_context.word_size;
case LLVMStructTypeKind:
{
if (LLVMIsPackedStruct(type)) {
return 1;
} else {
unsigned field_count = LLVMCountStructElementTypes(type);
i64 max_align = 1;
for (unsigned i = 0; i < field_count; i++) {
LLVMTypeRef field = LLVMStructGetTypeAtIndex(type, i);
i64 field_align = lb_alignof(field);
max_align = gb_max(max_align, field_align);
}
return max_align;
}
}
break;
case LLVMArrayTypeKind:
return lb_alignof(LLVMGetElementType(type));
case LLVMX86_MMXTypeKind:
return 8;
case LLVMVectorTypeKind:
{
LLVMTypeRef elem = LLVMGetElementType(type);
i64 elem_size = lb_sizeof(elem);
i64 count = LLVMGetVectorSize(type);
i64 size = count * elem_size;
return gb_clamp(next_pow2(size), 1, build_context.max_align);
}
}
GB_PANIC("Unhandled type for lb_sizeof -> %s", LLVMPrintTypeToString(type));
// LLVMValueRef v = LLVMAlignOf(type);
// GB_ASSERT(LLVMIsConstant(v));
// return LLVMConstIntGetSExtValue(v);
return 1;
}
#if 0
Type *lb_abi_to_odin_type(lbModule *m, LLVMTypeRef type, bool is_return, u32 level = 0) {
Type **found = map_get(&m->llvm_types, hash_pointer(type));
if (found) {
return *found;
}
GB_ASSERT_MSG(level < 64, "%s %d", LLVMPrintTypeToString(type), is_return);
LLVMTypeKind kind = LLVMGetTypeKind(type);
switch (kind) {
case LLVMVoidTypeKind:
return nullptr;
case LLVMIntegerTypeKind:
{
unsigned w = LLVMGetIntTypeWidth(type);
if (w == 1) {
return t_llvm_bool;
}
unsigned bytes = (w + 7)/8;
switch (bytes) {
case 1: return t_u8;
case 2: return t_u16;
case 4: return t_u32;
case 8: return t_u64;
case 16: return t_u128;
}
GB_PANIC("Unhandled integer type");
}
case LLVMFloatTypeKind:
return t_f32;
case LLVMDoubleTypeKind:
return t_f64;
case LLVMPointerTypeKind:
{
LLVMTypeRef elem = LLVMGetElementType(type);
if (lb_is_type_kind(elem, LLVMFunctionTypeKind)) {
unsigned param_count = LLVMCountParamTypes(elem);
LLVMTypeRef *params = gb_alloc_array(heap_allocator(), LLVMTypeRef, param_count);
defer (gb_free(heap_allocator(), params));
LLVMGetParamTypes(elem, params);
Type **param_types = gb_alloc_array(heap_allocator(), Type *, param_count);
defer (gb_free(heap_allocator(), param_types));
for (unsigned i = 0; i < param_count; i++) {
param_types[i] = lb_abi_to_odin_type(m, params[i], false, level+1);
}
LLVMTypeRef ret = LLVMGetReturnType(elem);
Type *ret_type = lb_abi_to_odin_type(m, ret, true, level+1);
bool is_c_vararg = !!LLVMIsFunctionVarArg(elem);
return alloc_type_proc_from_types(param_types, param_count, ret_type, is_c_vararg);
}
return alloc_type_pointer(lb_abi_to_odin_type(m, elem, false, level+1));
}
case LLVMFunctionTypeKind:
GB_PANIC("LLVMFunctionTypeKind should not be seen on its own");
break;
case LLVMStructTypeKind:
{
unsigned field_count = LLVMCountStructElementTypes(type);
Type **fields = gb_alloc_array(heap_allocator(), Type *, field_count);
for (unsigned i = 0; i < field_count; i++) {
LLVMTypeRef field_type = LLVMStructGetTypeAtIndex(type, i);
if (lb_is_type_kind(field_type, LLVMPointerTypeKind) && level > 0) {
fields[i] = t_rawptr;
} else {
fields[i] = lb_abi_to_odin_type(m, field_type, false, level+1);
}
}
if (is_return) {
return alloc_type_tuple_from_field_types(fields, field_count, !!LLVMIsPackedStruct(type), false);
} else {
return alloc_type_struct_from_field_types(fields, field_count, !!LLVMIsPackedStruct(type));
}
}
break;
case LLVMArrayTypeKind:
{
i64 count = LLVMGetArrayLength(type);
Type *elem = lb_abi_to_odin_type(m, LLVMGetElementType(type), false, level+1);
return alloc_type_array(elem, count);
}
break;
case LLVMX86_MMXTypeKind:
return t_vector_x86_mmx;
case LLVMVectorTypeKind:
{
i64 count = LLVMGetVectorSize(type);
Type *elem = lb_abi_to_odin_type(m, LLVMGetElementType(type), false, level+1);
return alloc_type_simd_vector(count, elem);
}
}
GB_PANIC("Unhandled type for lb_abi_to_odin_type -> %s", LLVMPrintTypeToString(type));
return 0;
}
#endif
#define LB_ABI_INFO(name) lbFunctionType *name(LLVMContextRef c, LLVMTypeRef *arg_types, unsigned arg_count, LLVMTypeRef return_type, bool return_is_defined, ProcCallingConvention calling_convention)
typedef LB_ABI_INFO(lbAbiInfoType);
// NOTE(bill): I hate `namespace` in C++ but this is just because I don't want to prefix everything
namespace lbAbi386 {
Array<lbArgType> compute_arg_types(LLVMContextRef c, LLVMTypeRef *arg_types, unsigned arg_count);
lbArgType compute_return_type(LLVMContextRef c, LLVMTypeRef return_type, bool return_is_defined);
LB_ABI_INFO(abi_info) {
lbFunctionType *ft = gb_alloc_item(heap_allocator(), lbFunctionType);
ft->ctx = c;
ft->args = compute_arg_types(c, arg_types, arg_count);
ft->ret = compute_return_type(c, return_type, return_is_defined);
ft->calling_convention = calling_convention;
return ft;
}
lbArgType non_struct(LLVMContextRef c, LLVMTypeRef type, bool is_return) {
if (!is_return && lb_sizeof(type) > 8) {
return lb_arg_type_indirect(type, nullptr);
}
if (build_context.metrics.os == TargetOs_windows &&
build_context.word_size == 8 &&
lb_is_type_kind(type, LLVMIntegerTypeKind) &&
type == LLVMIntTypeInContext(c, 128)) {
// NOTE(bill): Because Windows AMD64 is weird
LLVMTypeRef cast_type = LLVMVectorType(LLVMInt64TypeInContext(c), 2);
return lb_arg_type_direct(type, cast_type, nullptr, nullptr);
}
LLVMAttributeRef attr = nullptr;
LLVMTypeRef i1 = LLVMInt1TypeInContext(c);
if (type == i1) {
attr = lb_create_enum_attribute(c, "zeroext", true);
}
return lb_arg_type_direct(type, nullptr, nullptr, attr);
}
Array<lbArgType> compute_arg_types(LLVMContextRef c, LLVMTypeRef *arg_types, unsigned arg_count) {
auto args = array_make<lbArgType>(heap_allocator(), arg_count);
for (unsigned i = 0; i < arg_count; i++) {
LLVMTypeRef t = arg_types[i];
LLVMTypeKind kind = LLVMGetTypeKind(t);
i64 sz = lb_sizeof(t);
if (kind == LLVMStructTypeKind) {
if (sz == 0) {
args[i] = lb_arg_type_ignore(t);
} else {
args[i] = lb_arg_type_indirect(t, lb_create_enum_attribute(c, "byval", true));
}
} else {
args[i] = non_struct(c, t, false);
}
}
return args;
}
lbArgType compute_return_type(LLVMContextRef c, LLVMTypeRef return_type, bool return_is_defined) {
if (!return_is_defined) {
return lb_arg_type_direct(LLVMVoidTypeInContext(c));
} else if (lb_is_type_kind(return_type, LLVMStructTypeKind) || lb_is_type_kind(return_type, LLVMArrayTypeKind)) {
i64 sz = lb_sizeof(return_type);
switch (sz) {
case 1: return lb_arg_type_direct(return_type, LLVMIntTypeInContext(c, 8), nullptr, nullptr);
case 2: return lb_arg_type_direct(return_type, LLVMIntTypeInContext(c, 16), nullptr, nullptr);
case 4: return lb_arg_type_direct(return_type, LLVMIntTypeInContext(c, 32), nullptr, nullptr);
case 8: return lb_arg_type_direct(return_type, LLVMIntTypeInContext(c, 64), nullptr, nullptr);
}
return lb_arg_type_indirect(return_type, lb_create_enum_attribute(c, "sret", true));
}
return non_struct(c, return_type, true);
}
};
namespace lbAbiAmd64Win64 {
Array<lbArgType> compute_arg_types(LLVMContextRef c, LLVMTypeRef *arg_types, unsigned arg_count);
LB_ABI_INFO(abi_info) {
lbFunctionType *ft = gb_alloc_item(heap_allocator(), lbFunctionType);
ft->ctx = c;
ft->args = compute_arg_types(c, arg_types, arg_count);
ft->ret = lbAbi386::compute_return_type(c, return_type, return_is_defined);
ft->calling_convention = calling_convention;
return ft;
}
Array<lbArgType> compute_arg_types(LLVMContextRef c, LLVMTypeRef *arg_types, unsigned arg_count) {
auto args = array_make<lbArgType>(heap_allocator(), arg_count);
for (unsigned i = 0; i < arg_count; i++) {
LLVMTypeRef t = arg_types[i];
LLVMTypeKind kind = LLVMGetTypeKind(t);
if (kind == LLVMStructTypeKind) {
i64 sz = lb_sizeof(t);
switch (sz) {
case 1:
case 2:
case 4:
case 8:
args[i] = lb_arg_type_direct(t, LLVMIntTypeInContext(c, 8*cast(unsigned)sz), nullptr, nullptr);
break;
default:
args[i] = lb_arg_type_indirect(t, nullptr);
break;
}
} else {
args[i] = lbAbi386::non_struct(c, t, false);
}
}
return args;
}
};
// NOTE(bill): I hate `namespace` in C++ but this is just because I don't want to prefix everything
namespace lbAbiAmd64SysV {
enum RegClass {
RegClass_NoClass,
RegClass_Int,
RegClass_SSEFs,
RegClass_SSEFv,
RegClass_SSEDs,
RegClass_SSEDv,
RegClass_SSEInt8,
RegClass_SSEInt16,
RegClass_SSEInt32,
RegClass_SSEInt64,
RegClass_SSEUp,
RegClass_X87,
RegClass_X87Up,
RegClass_ComplexX87,
RegClass_Memory,
};
bool is_sse(RegClass reg_class) {
switch (reg_class) {
case RegClass_SSEFs:
case RegClass_SSEFv:
case RegClass_SSEDv:
return true;
}
return false;
}
void all_mem(Array<RegClass> *cs) {
for_array(i, *cs) {
(*cs)[i] = RegClass_Memory;
}
}
enum Amd64TypeAttributeKind {
Amd64TypeAttribute_None,
Amd64TypeAttribute_ByVal,
Amd64TypeAttribute_StructRect,
};
Array<lbArgType> compute_arg_types(LLVMContextRef c, LLVMTypeRef *arg_types, unsigned arg_count);
lbArgType compute_return_type(LLVMContextRef c, LLVMTypeRef return_type, bool return_is_defined);
void classify_with(LLVMTypeRef t, Array<RegClass> *cls, i64 ix, i64 off);
void fixup(LLVMTypeRef t, Array<RegClass> *cls);
lbArgType amd64_type(LLVMContextRef c, LLVMTypeRef type, Amd64TypeAttributeKind attribute_kind);
Array<RegClass> classify(LLVMTypeRef t);
LLVMTypeRef llreg(LLVMContextRef c, Array<RegClass> const &reg_classes);
LB_ABI_INFO(abi_info) {
lbFunctionType *ft = gb_alloc_item(heap_allocator(), lbFunctionType);
ft->ctx = c;
ft->calling_convention = calling_convention;
ft->args = array_make<lbArgType>(heap_allocator(), arg_count);
for (unsigned i = 0; i < arg_count; i++) {
ft->args[i] = amd64_type(c, arg_types[i], Amd64TypeAttribute_ByVal);
}
if (return_is_defined) {
ft->ret = amd64_type(c, return_type, Amd64TypeAttribute_StructRect);
} else {
ft->ret = lb_arg_type_direct(LLVMVoidTypeInContext(c));
}
return ft;
}
bool is_mem_cls(Array<RegClass> const &cls, Amd64TypeAttributeKind attribute_kind) {
if (attribute_kind == Amd64TypeAttribute_ByVal) {
if (cls.count == 0) {
return false;
}
auto first = cls[0];
return first == RegClass_Memory || first == RegClass_X87 || first == RegClass_ComplexX87;
} else if (attribute_kind == Amd64TypeAttribute_StructRect) {
if (cls.count == 0) {
return false;
}
return cls[0] == RegClass_Memory;
}
return false;
}
bool is_register(LLVMTypeRef type) {
LLVMTypeKind kind = LLVMGetTypeKind(type);
switch (kind) {
case LLVMIntegerTypeKind:
case LLVMFloatTypeKind:
case LLVMDoubleTypeKind:
case LLVMPointerTypeKind:
return true;
}
return false;
}
lbArgType amd64_type(LLVMContextRef c, LLVMTypeRef type, Amd64TypeAttributeKind attribute_kind) {
if (is_register(type)) {
LLVMAttributeRef attribute = nullptr;
if (type == LLVMInt1TypeInContext(c)) {
attribute = lb_create_enum_attribute(c, "zeroext", true);
}
return lb_arg_type_direct(type, nullptr, nullptr, attribute);
}
auto cls = classify(type);
if (is_mem_cls(cls, attribute_kind)) {
LLVMAttributeRef attribute = nullptr;
if (attribute_kind == Amd64TypeAttribute_ByVal) {
attribute = lb_create_enum_attribute(c, "byval", true);
} else if (attribute_kind == Amd64TypeAttribute_StructRect) {
attribute = lb_create_enum_attribute(c, "sret", true);
}
return lb_arg_type_indirect(type, attribute);
} else {
return lb_arg_type_direct(type, llreg(c, cls), nullptr, nullptr);
}
}
lbArgType non_struct(LLVMContextRef c, LLVMTypeRef type) {
LLVMAttributeRef attr = nullptr;
LLVMTypeRef i1 = LLVMInt1TypeInContext(c);
if (type == i1) {
attr = lb_create_enum_attribute(c, "zeroext", true);
}
return lb_arg_type_direct(type, nullptr, nullptr, attr);
}
Array<RegClass> classify(LLVMTypeRef t) {
i64 sz = lb_sizeof(t);
i64 words = (sz + 7)/8;
auto reg_classes = array_make<RegClass>(heap_allocator(), cast(isize)words);
if (words > 4) {
all_mem(&reg_classes);
} else {
classify_with(t, &reg_classes, 0, 0);
fixup(t, &reg_classes);
}
return reg_classes;
}
void unify(Array<RegClass> *cls, i64 i, RegClass newv) {
RegClass &oldv = (*cls)[i];
if (oldv == newv) {
return;
} else if (oldv == RegClass_NoClass) {
oldv = newv;
} else if (newv == RegClass_NoClass) {
return;
} else if (oldv == RegClass_Memory || newv == RegClass_Memory) {
return;
} else if (oldv == RegClass_Int || newv == RegClass_Int) {
return;
} else if (oldv == RegClass_X87 || oldv == RegClass_X87Up || oldv == RegClass_ComplexX87 ||
newv == RegClass_X87 || newv == RegClass_X87Up || newv == RegClass_ComplexX87) {
oldv = RegClass_Memory;
} else {
oldv = newv;
}
}
void fixup(LLVMTypeRef t, Array<RegClass> *cls) {
i64 i = 0;
i64 e = cls->count;
if (e > 2 && (lb_is_type_kind(t, LLVMStructTypeKind) || lb_is_type_kind(t, LLVMArrayTypeKind))) {
RegClass &oldv = (*cls)[i];
if (is_sse(oldv)) {
for (i++; i < e; i++) {
if (oldv != RegClass_SSEUp) {
all_mem(cls);
return;
}
}
} else {
all_mem(cls);
return;
}
} else {
while (i < e) {
RegClass &oldv = (*cls)[i];
if (oldv == RegClass_Memory) {
all_mem(cls);
return;
} else if (oldv == RegClass_X87Up) {
// NOTE(bill): Darwin
all_mem(cls);
return;
} else if (oldv == RegClass_SSEUp) {
oldv = RegClass_SSEDv;
} else if (is_sse(oldv)) {
i++;
while (i != e && oldv == RegClass_SSEUp) {
i++;
}
} else if (oldv == RegClass_X87) {
i++;
while (i != e && oldv == RegClass_X87Up) {
i++;
}
} else {
i++;
}
}
}
}
unsigned llvec_len(Array<RegClass> const &reg_classes, isize offset) {
unsigned len = 1;
for (isize i = offset+1; i < reg_classes.count; i++) {
if (reg_classes[offset] != RegClass_SSEFv && reg_classes[i] != RegClass_SSEUp) {
break;
}
len++;
}
return len;
}
LLVMTypeRef llreg(LLVMContextRef c, Array<RegClass> const &reg_classes) {
auto types = array_make<LLVMTypeRef>(heap_allocator(), 0, reg_classes.count);
for_array(i, reg_classes) {
RegClass reg_class = reg_classes[i];
switch (reg_class) {
case RegClass_Int:
array_add(&types, LLVMIntTypeInContext(c, 64));
break;
case RegClass_SSEFv:
case RegClass_SSEDv:
case RegClass_SSEInt8:
case RegClass_SSEInt16:
case RegClass_SSEInt32:
case RegClass_SSEInt64:
{
unsigned elems_per_word = 0;
LLVMTypeRef elem_type = nullptr;
switch (reg_class) {
case RegClass_SSEFv:
elems_per_word = 2;
elem_type = LLVMFloatTypeInContext(c);
break;
case RegClass_SSEDv:
elems_per_word = 1;
elem_type = LLVMDoubleTypeInContext(c);
break;
case RegClass_SSEInt8:
elems_per_word = 64/8;
elem_type = LLVMIntTypeInContext(c, 8);
break;
case RegClass_SSEInt16:
elems_per_word = 64/16;
elem_type = LLVMIntTypeInContext(c, 16);
break;
case RegClass_SSEInt32:
elems_per_word = 64/32;
elem_type = LLVMIntTypeInContext(c, 32);
break;
case RegClass_SSEInt64:
elems_per_word = 64/64;
elem_type = LLVMIntTypeInContext(c, 64);
break;
}
unsigned vec_len = llvec_len(reg_classes, i);
LLVMTypeRef vec_type = LLVMVectorType(elem_type, vec_len * elems_per_word);
array_add(&types, vec_type);
i += vec_len;
continue;
}
break;
case RegClass_SSEFs:
array_add(&types, LLVMFloatTypeInContext(c));
break;
case RegClass_SSEDs:
array_add(&types, LLVMDoubleTypeInContext(c));
break;
default:
GB_PANIC("Unhandled RegClass");
}
}
GB_ASSERT(types.count != 0);
if (types.count == 1) {
return types[0];
}
return LLVMStructTypeInContext(c, types.data, cast(unsigned)types.count, false);
}
void classify_with(LLVMTypeRef t, Array<RegClass> *cls, i64 ix, i64 off) {
i64 t_align = lb_alignof(t);
i64 t_size = lb_sizeof(t);
i64 misalign = off % t_align;
if (misalign != 0) {
i64 e = (off + t_size + 7) / 8;
for (i64 i = off / 8; i < e; i++) {
unify(cls, ix+i, RegClass_Memory);
}
return;
}
switch (LLVMGetTypeKind(t)) {
case LLVMIntegerTypeKind:
case LLVMPointerTypeKind:
unify(cls, ix + off/8, RegClass_Int);
break;
case LLVMFloatTypeKind:
unify(cls, ix + off/8, (off%8 == 4) ? RegClass_SSEFv : RegClass_SSEFs);
break;
case LLVMDoubleTypeKind:
unify(cls, ix + off/8, RegClass_SSEDs);
break;
case LLVMStructTypeKind:
{
LLVMBool packed = LLVMIsPackedStruct(t);
unsigned field_count = LLVMCountStructElementTypes(t);
i64 field_off = off;
for (unsigned field_index = 0; field_index < field_count; field_index++) {
LLVMTypeRef field_type = LLVMStructGetTypeAtIndex(t, field_index);
if (!packed) {
field_off = llvm_align_formula(field_off, lb_alignof(field_type));
}
classify_with(field_type, cls, ix, field_off);
field_off += lb_sizeof(field_type);
}
}
break;
case LLVMArrayTypeKind:
{
i64 len = LLVMGetArrayLength(t);
LLVMTypeRef elem = LLVMGetElementType(t);
i64 elem_sz = lb_sizeof(elem);
for (i64 i = 0; i < len; i++) {
classify_with(elem, cls, ix, off + i*elem_sz);
}
}
break;
case LLVMVectorTypeKind:
{
i64 len = LLVMGetVectorSize(t);
LLVMTypeRef elem = LLVMGetElementType(t);
i64 elem_sz = lb_sizeof(elem);
LLVMTypeKind elem_kind = LLVMGetTypeKind(elem);
RegClass reg = RegClass_NoClass;
switch (elem_kind) {
case LLVMIntegerTypeKind:
switch (LLVMGetIntTypeWidth(elem)) {
case 8: reg = RegClass_SSEInt8;
case 16: reg = RegClass_SSEInt16;
case 32: reg = RegClass_SSEInt32;
case 64: reg = RegClass_SSEInt64;
default:
GB_PANIC("Unhandled integer width for vector type");
}
break;
case LLVMFloatTypeKind:
reg = RegClass_SSEFv;
break;
case LLVMDoubleTypeKind:
reg = RegClass_SSEDv;
break;
default:
GB_PANIC("Unhandled vector element type");
}
for (i64 i = 0; i < len; i++) {
unify(cls, ix + (off + i*elem_sz)/8, reg);
// NOTE(bill): Everything after the first one is the upper
// half of a register
reg = RegClass_SSEUp;
}
}
break;
default:
GB_PANIC("Unhandled type");
break;
}
}
Array<lbArgType> compute_arg_types(LLVMContextRef c, LLVMTypeRef *arg_types, unsigned arg_count) {
auto args = array_make<lbArgType>(heap_allocator(), arg_count);
for (unsigned i = 0; i < arg_count; i++) {
LLVMTypeRef t = arg_types[i];
LLVMTypeKind kind = LLVMGetTypeKind(t);
if (kind == LLVMStructTypeKind) {
i64 sz = lb_sizeof(t);
if (sz == 0) {
args[i] = lb_arg_type_ignore(t);
} else {
args[i] = lb_arg_type_indirect(t, lb_create_enum_attribute(c, "byval", true));
}
} else {
args[i] = non_struct(c, t);
}
}
return args;
}
lbArgType compute_return_type(LLVMContextRef c, LLVMTypeRef return_type, bool return_is_defined) {
if (!return_is_defined) {
return lb_arg_type_direct(LLVMVoidTypeInContext(c));
} else if (lb_is_type_kind(return_type, LLVMStructTypeKind)) {
i64 sz = lb_sizeof(return_type);
switch (sz) {
case 1: return lb_arg_type_direct(return_type, LLVMIntTypeInContext(c, 8), nullptr, nullptr);
case 2: return lb_arg_type_direct(return_type, LLVMIntTypeInContext(c, 16), nullptr, nullptr);
case 4: return lb_arg_type_direct(return_type, LLVMIntTypeInContext(c, 32), nullptr, nullptr);
case 8: return lb_arg_type_direct(return_type, LLVMIntTypeInContext(c, 64), nullptr, nullptr);
}
return lb_arg_type_indirect(return_type, lb_create_enum_attribute(c, "sret", true));
} else if (build_context.metrics.os == TargetOs_windows && lb_is_type_kind(return_type, LLVMIntegerTypeKind) && lb_sizeof(return_type) == 16) {
return lb_arg_type_direct(return_type, LLVMIntTypeInContext(c, 128), nullptr, nullptr);
}
return non_struct(c, return_type);
}
};
namespace lbAbiAarch64 {
LB_ABI_INFO(abi_info) {
lbFunctionType *ft = gb_alloc_item(heap_allocator(), lbFunctionType);
ft->ctx = c;
// ft->args = compute_arg_types(c, arg_types, arg_count);
// ft->ret = lbAbi386::compute_return_type(c, return_type, return_is_defined);
// ft->calling_convention = calling_convention;
return ft;
}
}
LB_ABI_INFO(lb_get_abi_info) {
switch (calling_convention) {
case ProcCC_None:
case ProcCC_PureNone:
case ProcCC_InlineAsm:
{
lbFunctionType *ft = gb_alloc_item(heap_allocator(), lbFunctionType);
ft->ctx = c;
ft->args = array_make<lbArgType>(heap_allocator(), arg_count);
for (unsigned i = 0; i < arg_count; i++) {
ft->args[i] = lb_arg_type_direct(arg_types[i]);
}
if (return_is_defined) {
ft->ret = lb_arg_type_direct(return_type);
} else {
ft->ret = lb_arg_type_direct(LLVMVoidTypeInContext(c));
}
ft->calling_convention = calling_convention;
return ft;
}
}
if (build_context.metrics.arch == TargetArch_amd64) {
if (build_context.metrics.os == TargetOs_windows) {
return lbAbiAmd64Win64::abi_info(c, arg_types, arg_count, return_type, return_is_defined, calling_convention);
} else {
return lbAbiAmd64SysV::abi_info(c, arg_types, arg_count, return_type, return_is_defined, calling_convention);
}
} else if (build_context.metrics.arch == TargetArch_386) {
return lbAbi386::abi_info(c, arg_types, arg_count, return_type, return_is_defined, calling_convention);
} else if (build_context.metrics.arch == TargetArch_wasm32) {
return lbAbi386::abi_info(c, arg_types, arg_count, return_type, return_is_defined, calling_convention);
}
GB_PANIC("Unsupported ABI");
return {};
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,3 +1,5 @@
#if defined(LLVM_BACKEND_SUPPORT)
#if defined(GB_SYSTEM_WINDOWS)
#include "llvm-c/Core.h"
#include "llvm-c/ExecutionEngine.h"
#include "llvm-c/Target.h"
@@ -12,6 +14,23 @@
#include "llvm-c/Transforms/Scalar.h"
#include "llvm-c/Transforms/Utils.h"
#include "llvm-c/Transforms/Vectorize.h"
#else
#include <llvm-c/Core.h>
#include <llvm-c/ExecutionEngine.h>
#include <llvm-c/Target.h>
#include <llvm-c/Analysis.h>
#include <llvm-c/Object.h>
#include <llvm-c/BitWriter.h>
#include <llvm-c/DebugInfo.h>
#include <llvm-c/Transforms/AggressiveInstCombine.h>
#include <llvm-c/Transforms/InstCombine.h>
#include <llvm-c/Transforms/IPO.h>
#include <llvm-c/Transforms/PassManagerBuilder.h>
#include <llvm-c/Transforms/Scalar.h>
#include <llvm-c/Transforms/Utils.h>
#include <llvm-c/Transforms/Vectorize.h>
#endif
#endif
struct lbProcedure;
@@ -74,6 +93,8 @@ struct lbModule {
gbMutex mutex;
Map<LLVMTypeRef> types; // Key: Type *
Map<Type *> llvm_types; // Key: LLVMTypeRef
i32 internal_type_level;
Map<lbValue> values; // Key: Entity *
StringMap<lbValue> members;
@@ -83,6 +104,10 @@ struct lbModule {
StringMap<LLVMValueRef> const_strings;
Map<lbProcedure *> anonymous_proc_lits; // Key: Ast *
Map<struct lbFunctionType *> function_type_map; // Key: Type *
Map<lbProcedure *> equal_procs; // Key: Type *
Map<lbProcedure *> hasher_procs; // Key: Type *
u32 global_array_index;
u32 global_generated_index;
@@ -199,6 +224,7 @@ struct lbProcedure {
bool is_entry_point;
bool is_startup;
lbFunctionType *abi_function_type;
LLVMValueRef value;
LLVMBuilderRef builder;
@@ -301,7 +327,7 @@ lbAddr lb_add_local(lbProcedure *p, Type *type, Entity *e=nullptr, bool zero_ini
void lb_add_foreign_library_path(lbModule *m, Entity *e);
lbValue lb_typeid(lbModule *m, Type *type, Type *typeid_type=t_typeid);
lbValue lb_typeid(lbModule *m, Type *type);
lbValue lb_address_from_load_or_generate_local(lbProcedure *p, lbValue value);
lbValue lb_address_from_load(lbProcedure *p, lbValue value);
@@ -344,7 +370,7 @@ String lb_get_const_string(lbModule *m, lbValue value);
lbValue lb_generate_local_array(lbProcedure *p, Type *elem_type, i64 count, bool zero_init=true);
lbValue lb_generate_global_array(lbModule *m, Type *elem_type, i64 count, String prefix, i64 id);
lbValue lb_gen_map_header(lbProcedure *p, lbValue map_val_ptr, Type *map_type);
lbValue lb_gen_map_key(lbProcedure *p, lbValue key, Type *key_type);
lbValue lb_gen_map_hash(lbProcedure *p, lbValue key, Type *key_type);
void lb_insert_dynamic_map_key_and_value(lbProcedure *p, lbAddr addr, Type *map_type, lbValue map_key, lbValue map_value, Ast *node);
@@ -354,6 +380,9 @@ lbValue lb_emit_source_code_location(lbProcedure *p, String const &procedure, To
lbValue lb_handle_param_value(lbProcedure *p, Type *parameter_type, ParameterValue const &param_value, TokenPos const &pos);
lbValue lb_get_equal_proc_for_type(lbModule *m, Type *type);
lbValue lb_get_hasher_proc_for_type(lbModule *m, Type *type);
lbValue lb_emit_conv(lbProcedure *p, lbValue value, Type *t);
#define LB_STARTUP_RUNTIME_PROC_NAME "__$startup_runtime"
#define LB_STARTUP_TYPE_INFO_PROC_NAME "__$startup_type_info"

File diff suppressed because it is too large Load Diff

View File

@@ -108,8 +108,25 @@ Token ast_token(Ast *node) {
return empty_token;
}
isize ast_node_size(AstKind kind) {
return align_formula_isize(gb_size_of(AstCommonStuff) + ast_variant_sizes[kind], gb_align_of(void *));
}
// NOTE(bill): And this below is why is I/we need a new language! Discriminated unions are a pain in C/C++
Ast *alloc_ast_node(AstFile *f, AstKind kind) {
gbAllocator a = ast_allocator(f);
isize size = ast_node_size(kind);
Ast *node = cast(Ast *)gb_alloc(a, size);
node->kind = kind;
node->file = f;
return node;
}
Ast *clone_ast(Ast *node);
Array<Ast *> clone_ast_array(Array<Ast *> array) {
Array<Ast *> clone_ast_array(Array<Ast *> const &array) {
Array<Ast *> result = {};
if (array.count > 0) {
result = array_make<Ast *>(ast_allocator(nullptr), array.count);
@@ -119,13 +136,23 @@ Array<Ast *> clone_ast_array(Array<Ast *> array) {
}
return result;
}
Slice<Ast *> clone_ast_array(Slice<Ast *> const &array) {
Slice<Ast *> result = {};
if (array.count > 0) {
result = slice_clone(permanent_allocator(), array);
for_array(i, array) {
result[i] = clone_ast(array[i]);
}
}
return result;
}
Ast *clone_ast(Ast *node) {
if (node == nullptr) {
return nullptr;
}
Ast *n = alloc_ast_node(node->file, node->kind);
gb_memmove(n, node, gb_size_of(Ast));
gb_memmove(n, node, ast_node_size(node->kind));
switch (n->kind) {
default: GB_PANIC("Unhandled Ast %.*s", LIT(ast_strings[n->kind])); break;
@@ -463,23 +490,6 @@ bool ast_node_expect(Ast *node, AstKind kind) {
return true;
}
gb_global gbAtomic64 total_allocated_node_memory = {0};
gb_global gbAtomic64 total_subtype_node_memory_test = {0};
// NOTE(bill): And this below is why is I/we need a new language! Discriminated unions are a pain in C/C++
Ast *alloc_ast_node(AstFile *f, AstKind kind) {
gbAllocator a = ast_allocator(f);
gb_atomic64_fetch_add(&total_allocated_node_memory, cast(i64)(gb_size_of(Ast)));
gb_atomic64_fetch_add(&total_subtype_node_memory_test, cast(i64)(gb_size_of(AstCommonStuff) + ast_variant_sizes[kind]));
Ast *node = gb_alloc_item(a, Ast);
node->kind = kind;
node->file = f;
return node;
}
Ast *ast_bad_expr(AstFile *f, Token begin, Token end) {
Ast *result = alloc_ast_node(f, Ast_BadExpr);
result->BadExpr.begin = begin;
@@ -537,10 +547,10 @@ Ast *ast_paren_expr(AstFile *f, Ast *expr, Token open, Token close) {
return result;
}
Ast *ast_call_expr(AstFile *f, Ast *proc, Array<Ast *> args, Token open, Token close, Token ellipsis) {
Ast *ast_call_expr(AstFile *f, Ast *proc, Array<Ast *> const &args, Token open, Token close, Token ellipsis) {
Ast *result = alloc_ast_node(f, Ast_CallExpr);
result->CallExpr.proc = proc;
result->CallExpr.args = args;
result->CallExpr.args = slice_from_array(args);
result->CallExpr.open = open;
result->CallExpr.close = close;
result->CallExpr.ellipsis = ellipsis;
@@ -624,7 +634,8 @@ Ast *ast_undef(AstFile *f, Token token) {
Ast *ast_basic_lit(AstFile *f, Token basic_lit) {
Ast *result = alloc_ast_node(f, Ast_BasicLit);
result->BasicLit.token = basic_lit;
result->BasicLit.value = exact_value_from_basic_literal(basic_lit);
result->tav.mode = Addressing_Constant;
result->tav.value = exact_value_from_basic_literal(basic_lit);
return result;
}
@@ -643,12 +654,12 @@ Ast *ast_ellipsis(AstFile *f, Token token, Ast *expr) {
}
Ast *ast_proc_group(AstFile *f, Token token, Token open, Token close, Array<Ast *> args) {
Ast *ast_proc_group(AstFile *f, Token token, Token open, Token close, Array<Ast *> const &args) {
Ast *result = alloc_ast_node(f, Ast_ProcGroup);
result->ProcGroup.token = token;
result->ProcGroup.open = open;
result->ProcGroup.close = close;
result->ProcGroup.args = args;
result->ProcGroup.args = slice_from_array(args);
return result;
}
@@ -658,7 +669,7 @@ Ast *ast_proc_lit(AstFile *f, Ast *type, Ast *body, u64 tags, Token where_token,
result->ProcLit.body = body;
result->ProcLit.tags = tags;
result->ProcLit.where_token = where_token;
result->ProcLit.where_clauses = where_clauses;
result->ProcLit.where_clauses = slice_from_array(where_clauses);
return result;
}
@@ -670,10 +681,10 @@ Ast *ast_field_value(AstFile *f, Ast *field, Ast *value, Token eq) {
return result;
}
Ast *ast_compound_lit(AstFile *f, Ast *type, Array<Ast *> elems, Token open, Token close) {
Ast *ast_compound_lit(AstFile *f, Ast *type, Array<Ast *> const &elems, Token open, Token close) {
Ast *result = alloc_ast_node(f, Ast_CompoundLit);
result->CompoundLit.type = type;
result->CompoundLit.elems = elems;
result->CompoundLit.elems = slice_from_array(elems);
result->CompoundLit.open = open;
result->CompoundLit.close = close;
return result;
@@ -736,7 +747,7 @@ Ast *ast_inline_asm_expr(AstFile *f, Token token, Token open, Token close,
result->InlineAsmExpr.token = token;
result->InlineAsmExpr.open = open;
result->InlineAsmExpr.close = close;
result->InlineAsmExpr.param_types = param_types;
result->InlineAsmExpr.param_types = slice_from_array(param_types);
result->InlineAsmExpr.return_type = return_type;
result->InlineAsmExpr.asm_string = asm_string;
result->InlineAsmExpr.constraints_string = constraints_string;
@@ -768,18 +779,18 @@ Ast *ast_expr_stmt(AstFile *f, Ast *expr) {
return result;
}
Ast *ast_assign_stmt(AstFile *f, Token op, Array<Ast *> lhs, Array<Ast *> rhs) {
Ast *ast_assign_stmt(AstFile *f, Token op, Array<Ast *> const &lhs, Array<Ast *> const &rhs) {
Ast *result = alloc_ast_node(f, Ast_AssignStmt);
result->AssignStmt.op = op;
result->AssignStmt.lhs = lhs;
result->AssignStmt.rhs = rhs;
result->AssignStmt.lhs = slice_from_array(lhs);
result->AssignStmt.rhs = slice_from_array(rhs);
return result;
}
Ast *ast_block_stmt(AstFile *f, Array<Ast *> stmts, Token open, Token close) {
Ast *ast_block_stmt(AstFile *f, Array<Ast *> const &stmts, Token open, Token close) {
Ast *result = alloc_ast_node(f, Ast_BlockStmt);
result->BlockStmt.stmts = stmts;
result->BlockStmt.stmts = slice_from_array(stmts);
result->BlockStmt.open = open;
result->BlockStmt.close = close;
return result;
@@ -805,10 +816,10 @@ Ast *ast_when_stmt(AstFile *f, Token token, Ast *cond, Ast *body, Ast *else_stmt
}
Ast *ast_return_stmt(AstFile *f, Token token, Array<Ast *> results) {
Ast *ast_return_stmt(AstFile *f, Token token, Array<Ast *> const &results) {
Ast *result = alloc_ast_node(f, Ast_ReturnStmt);
result->ReturnStmt.token = token;
result->ReturnStmt.results = results;
result->ReturnStmt.results = slice_from_array(results);
return result;
}
@@ -866,11 +877,11 @@ Ast *ast_type_switch_stmt(AstFile *f, Token token, Ast *tag, Ast *body) {
return result;
}
Ast *ast_case_clause(AstFile *f, Token token, Array<Ast *> list, Array<Ast *> stmts) {
Ast *ast_case_clause(AstFile *f, Token token, Array<Ast *> const &list, Array<Ast *> const &stmts) {
Ast *result = alloc_ast_node(f, Ast_CaseClause);
result->CaseClause.token = token;
result->CaseClause.list = list;
result->CaseClause.stmts = stmts;
result->CaseClause.list = slice_from_array(list);
result->CaseClause.stmts = slice_from_array(stmts);
return result;
}
@@ -889,10 +900,10 @@ Ast *ast_branch_stmt(AstFile *f, Token token, Ast *label) {
return result;
}
Ast *ast_using_stmt(AstFile *f, Token token, Array<Ast *> list) {
Ast *ast_using_stmt(AstFile *f, Token token, Array<Ast *> const &list) {
Ast *result = alloc_ast_node(f, Ast_UsingStmt);
result->UsingStmt.token = token;
result->UsingStmt.list = list;
result->UsingStmt.list = slice_from_array(list);
return result;
}
@@ -905,10 +916,10 @@ Ast *ast_bad_decl(AstFile *f, Token begin, Token end) {
return result;
}
Ast *ast_field(AstFile *f, Array<Ast *> names, Ast *type, Ast *default_value, u32 flags, Token tag,
Ast *ast_field(AstFile *f, Array<Ast *> const &names, Ast *type, Ast *default_value, u32 flags, Token tag,
CommentGroup *docs, CommentGroup *comment) {
Ast *result = alloc_ast_node(f, Ast_Field);
result->Field.names = names;
result->Field.names = slice_from_array(names);
result->Field.type = type;
result->Field.default_value = default_value;
result->Field.flags = flags;
@@ -918,10 +929,10 @@ Ast *ast_field(AstFile *f, Array<Ast *> names, Ast *type, Ast *default_value, u3
return result;
}
Ast *ast_field_list(AstFile *f, Token token, Array<Ast *> list) {
Ast *ast_field_list(AstFile *f, Token token, Array<Ast *> const &list) {
Ast *result = alloc_ast_node(f, Ast_FieldList);
result->FieldList.token = token;
result->FieldList.list = list;
result->FieldList.list = slice_from_array(list);
return result;
}
@@ -1002,7 +1013,7 @@ Ast *ast_dynamic_array_type(AstFile *f, Token token, Ast *elem) {
return result;
}
Ast *ast_struct_type(AstFile *f, Token token, Array<Ast *> fields, isize field_count,
Ast *ast_struct_type(AstFile *f, Token token, Slice<Ast *> fields, isize field_count,
Ast *polymorphic_params, bool is_packed, bool is_raw_union,
Ast *align,
Token where_token, Array<Ast *> const &where_clauses) {
@@ -1015,38 +1026,38 @@ Ast *ast_struct_type(AstFile *f, Token token, Array<Ast *> fields, isize field_c
result->StructType.is_raw_union = is_raw_union;
result->StructType.align = align;
result->StructType.where_token = where_token;
result->StructType.where_clauses = where_clauses;
result->StructType.where_clauses = slice_from_array(where_clauses);
return result;
}
Ast *ast_union_type(AstFile *f, Token token, Array<Ast *> variants, Ast *polymorphic_params, Ast *align, bool no_nil, bool maybe,
Ast *ast_union_type(AstFile *f, Token token, Array<Ast *> const &variants, Ast *polymorphic_params, Ast *align, bool no_nil, bool maybe,
Token where_token, Array<Ast *> const &where_clauses) {
Ast *result = alloc_ast_node(f, Ast_UnionType);
result->UnionType.token = token;
result->UnionType.variants = variants;
result->UnionType.variants = slice_from_array(variants);
result->UnionType.polymorphic_params = polymorphic_params;
result->UnionType.align = align;
result->UnionType.no_nil = no_nil;
result->UnionType.maybe = maybe;
result->UnionType.maybe = maybe;
result->UnionType.where_token = where_token;
result->UnionType.where_clauses = where_clauses;
result->UnionType.where_clauses = slice_from_array(where_clauses);
return result;
}
Ast *ast_enum_type(AstFile *f, Token token, Ast *base_type, Array<Ast *> fields) {
Ast *ast_enum_type(AstFile *f, Token token, Ast *base_type, Array<Ast *> const &fields) {
Ast *result = alloc_ast_node(f, Ast_EnumType);
result->EnumType.token = token;
result->EnumType.base_type = base_type;
result->EnumType.fields = fields;
result->EnumType.fields = slice_from_array(fields);
return result;
}
Ast *ast_bit_field_type(AstFile *f, Token token, Array<Ast *> fields, Ast *align) {
Ast *ast_bit_field_type(AstFile *f, Token token, Array<Ast *> const &fields, Ast *align) {
Ast *result = alloc_ast_node(f, Ast_BitFieldType);
result->BitFieldType.token = token;
result->BitFieldType.fields = fields;
result->BitFieldType.fields = slice_from_array(fields);
result->BitFieldType.align = align;
return result;
}
@@ -1069,7 +1080,7 @@ Ast *ast_map_type(AstFile *f, Token token, Ast *key, Ast *value) {
Ast *ast_foreign_block_decl(AstFile *f, Token token, Ast *foreign_library, Ast *body,
CommentGroup *docs) {
CommentGroup *docs) {
Ast *result = alloc_ast_node(f, Ast_ForeignBlockDecl);
result->ForeignBlockDecl.token = token;
result->ForeignBlockDecl.foreign_library = foreign_library;
@@ -1087,12 +1098,12 @@ Ast *ast_label_decl(AstFile *f, Token token, Ast *name) {
return result;
}
Ast *ast_value_decl(AstFile *f, Array<Ast *> names, Ast *type, Array<Ast *> values, bool is_mutable,
CommentGroup *docs, CommentGroup *comment) {
Ast *ast_value_decl(AstFile *f, Array<Ast *> const &names, Ast *type, Array<Ast *> const &values, bool is_mutable,
CommentGroup *docs, CommentGroup *comment) {
Ast *result = alloc_ast_node(f, Ast_ValueDecl);
result->ValueDecl.names = names;
result->ValueDecl.names = slice_from_array(names);
result->ValueDecl.type = type;
result->ValueDecl.values = values;
result->ValueDecl.values = slice_from_array(values);
result->ValueDecl.is_mutable = is_mutable;
result->ValueDecl.docs = docs;
result->ValueDecl.comment = comment;
@@ -1111,7 +1122,7 @@ Ast *ast_package_decl(AstFile *f, Token token, Token name, CommentGroup *docs, C
}
Ast *ast_import_decl(AstFile *f, Token token, bool is_using, Token relpath, Token import_name,
CommentGroup *docs, CommentGroup *comment) {
CommentGroup *docs, CommentGroup *comment) {
Ast *result = alloc_ast_node(f, Ast_ImportDecl);
result->ImportDecl.token = token;
result->ImportDecl.is_using = is_using;
@@ -1123,10 +1134,10 @@ Ast *ast_import_decl(AstFile *f, Token token, bool is_using, Token relpath, Toke
}
Ast *ast_foreign_import_decl(AstFile *f, Token token, Array<Token> filepaths, Token library_name,
CommentGroup *docs, CommentGroup *comment) {
CommentGroup *docs, CommentGroup *comment) {
Ast *result = alloc_ast_node(f, Ast_ForeignImportDecl);
result->ForeignImportDecl.token = token;
result->ForeignImportDecl.filepaths = filepaths;
result->ForeignImportDecl.filepaths = slice_from_array(filepaths);
result->ForeignImportDecl.library_name = library_name;
result->ForeignImportDecl.docs = docs;
result->ForeignImportDecl.comment = comment;
@@ -1136,11 +1147,11 @@ Ast *ast_foreign_import_decl(AstFile *f, Token token, Array<Token> filepaths, To
}
Ast *ast_attribute(AstFile *f, Token token, Token open, Token close, Array<Ast *> elems) {
Ast *ast_attribute(AstFile *f, Token token, Token open, Token close, Array<Ast *> const &elems) {
Ast *result = alloc_ast_node(f, Ast_Attribute);
result->Attribute.token = token;
result->Attribute.open = open;
result->Attribute.elems = elems;
result->Attribute.elems = slice_from_array(elems);
result->Attribute.close = close;
return result;
}
@@ -1182,6 +1193,12 @@ CommentGroup *consume_comment_group(AstFile *f, isize n, isize *end_line_) {
Array<Token> list = {};
list.allocator = heap_allocator();
isize end_line = f->curr_token.pos.line;
if (f->curr_token_index == 1 &&
f->prev_token.kind == Token_Comment &&
f->prev_token.pos.line+1 == f->curr_token.pos.line) {
// NOTE(bill): Special logic for the first comment in the file
array_add(&list, f->prev_token);
}
while (f->curr_token.kind == Token_Comment &&
f->curr_token.pos.line <= end_line+n) {
array_add(&list, consume_comment(f, &end_line));
@@ -1192,7 +1209,7 @@ CommentGroup *consume_comment_group(AstFile *f, isize n, isize *end_line_) {
CommentGroup *comments = nullptr;
if (list.count > 0) {
comments = gb_alloc_item(heap_allocator(), CommentGroup);
comments->list = list;
comments->list = slice_from_array(list);
array_add(&f->comments, comments);
}
return comments;
@@ -1894,12 +1911,18 @@ Ast *parse_operand(AstFile *f, bool lhs) {
case Token_opaque: {
Token token = expect_token(f, Token_opaque);
warning(token, "opaque is deprecated, please use #opaque");
Ast *type = parse_type(f);
return ast_opaque_type(f, token, type);
}
case Token_Hash: {
Token token = expect_token(f, Token_Hash);
if (allow_token(f, Token_opaque)) {
Ast *type = parse_type(f);
return ast_opaque_type(f, token, type);
}
Token name = expect_token(f, Token_Ident);
if (name.string == "type") {
return ast_helper_type(f, token, parse_type(f));
@@ -2201,7 +2224,7 @@ Ast *parse_operand(AstFile *f, bool lhs) {
if (allow_token(f, Token_OpenParen)) {
isize param_count = 0;
polymorphic_params = parse_field_list(f, &param_count, 0, Token_CloseParen, false, true);
polymorphic_params = parse_field_list(f, &param_count, 0, Token_CloseParen, true, true);
if (param_count == 0) {
syntax_error(polymorphic_params, "Expected at least 1 polymorphic parameter");
polymorphic_params = nullptr;
@@ -2262,7 +2285,7 @@ Ast *parse_operand(AstFile *f, bool lhs) {
Ast *fields = parse_struct_field_list(f, &name_count);
Token close = expect_token(f, Token_CloseBrace);
Array<Ast *> decls = {};
Slice<Ast *> decls = {};
if (fields != nullptr) {
GB_ASSERT(fields->kind == Ast_FieldList);
decls = fields->FieldList.list;
@@ -2284,7 +2307,7 @@ Ast *parse_operand(AstFile *f, bool lhs) {
if (allow_token(f, Token_OpenParen)) {
isize param_count = 0;
polymorphic_params = parse_field_list(f, &param_count, 0, Token_CloseParen, false, true);
polymorphic_params = parse_field_list(f, &param_count, 0, Token_CloseParen, true, true);
if (param_count == 0) {
syntax_error(polymorphic_params, "Expected at least 1 polymorphic parametric");
polymorphic_params = nullptr;
@@ -2586,7 +2609,15 @@ Ast *parse_call_expr(AstFile *f, Ast *operand) {
f->expr_level--;
close_paren = expect_closing(f, Token_CloseParen, str_lit("argument list"));
return ast_call_expr(f, operand, args, open_paren, close_paren, ellipsis);
Ast *call = ast_call_expr(f, operand, args, open_paren, close_paren, ellipsis);
Ast *o = unparen_expr(operand);
if (o->kind == Ast_SelectorExpr && o->SelectorExpr.token.kind == Token_ArrowRight) {
return ast_selector_call_expr(f, o->SelectorExpr.token, o, call);
}
return call;
}
Ast *parse_atom_expr(AstFile *f, Ast *operand, bool lhs) {
@@ -2638,11 +2669,10 @@ Ast *parse_atom_expr(AstFile *f, Ast *operand, bool lhs) {
case Token_ArrowRight: {
Token token = advance_token(f);
// syntax_error(token, "Selector expressions use '.' rather than '->'");
Ast *sel = ast_selector_expr(f, token, operand, parse_ident(f));
Ast *call = parse_call_expr(f, sel);
operand = ast_selector_call_expr(f, token, sel, call);
operand = ast_selector_expr(f, token, operand, parse_ident(f));
// Ast *call = parse_call_expr(f, sel);
// operand = ast_selector_call_expr(f, token, sel, call);
break;
}
@@ -3317,11 +3347,10 @@ FieldPrefixKind is_token_field_prefix(AstFile *f) {
return FieldPrefix_no_alias;
} else if (f->curr_token.string == "c_vararg") {
return FieldPrefix_c_var_arg;
} else if (f->curr_token.string == "const") {
return FieldPrefix_const;
}
break;
case Token_const:
return FieldPrefix_const;
}
return FieldPrefix_Unknown;
}
@@ -4723,18 +4752,14 @@ void parser_add_foreign_file_to_process(Parser *p, AstPackage *pkg, AstForeignFi
// NOTE(bill): Returns true if it's added
bool try_add_import_path(Parser *p, String const &path, String const &rel_path, TokenPos pos, PackageKind kind = Package_Normal) {
if (build_context.generate_docs) {
return false;
}
AstPackage *try_add_import_path(Parser *p, String const &path, String const &rel_path, TokenPos pos, PackageKind kind = Package_Normal) {
String const FILE_EXT = str_lit(".odin");
gb_mutex_lock(&p->file_add_mutex);
defer (gb_mutex_unlock(&p->file_add_mutex));
if (string_set_exists(&p->imported_files, path)) {
return false;
return nullptr;
}
string_set_add(&p->imported_files, path);
@@ -4757,7 +4782,7 @@ bool try_add_import_path(Parser *p, String const &path, String const &rel_path,
pkg->is_single_file = true;
parser_add_file_to_process(p, pkg, fi, pos);
parser_add_package(p, pkg);
return true;
return pkg;
}
@@ -4773,22 +4798,22 @@ bool try_add_import_path(Parser *p, String const &path, String const &rel_path,
switch (rd_err) {
case ReadDirectory_InvalidPath:
syntax_error(pos, "Invalid path: %.*s", LIT(rel_path));
return false;
return nullptr;
case ReadDirectory_NotExists:
syntax_error(pos, "Path does not exist: %.*s", LIT(rel_path));
return false;
return nullptr;
case ReadDirectory_Permission:
syntax_error(pos, "Unknown error whilst reading path %.*s", LIT(rel_path));
return false;
return nullptr;
case ReadDirectory_NotDir:
syntax_error(pos, "Expected a directory for a package, got a file: %.*s", LIT(rel_path));
return false;
return nullptr;
case ReadDirectory_Empty:
syntax_error(pos, "Empty directory: %.*s", LIT(rel_path));
return false;
return nullptr;
case ReadDirectory_Unknown:
syntax_error(pos, "Unknown error whilst reading path %.*s", LIT(rel_path));
return false;
return nullptr;
}
for_array(list_index, list) {
@@ -4810,7 +4835,7 @@ bool try_add_import_path(Parser *p, String const &path, String const &rel_path,
parser_add_package(p, pkg);
return true;
return pkg;
}
gb_global Rune illegal_import_runes[] = {
@@ -4829,7 +4854,7 @@ bool is_import_path_valid(String path) {
u8 *curr = start;
while (curr < end) {
isize width = 1;
Rune r = curr[0];
Rune r = *curr;
if (r >= 0x80) {
width = gb_utf8_decode(curr, end-curr, &r);
if (r == GB_RUNE_INVALID && width == 1) {
@@ -4854,6 +4879,45 @@ bool is_import_path_valid(String path) {
return false;
}
bool is_build_flag_path_valid(String path) {
if (path.len > 0) {
u8 *start = path.text;
u8 *end = path.text + path.len;
u8 *curr = start;
isize index = 0;
while (curr < end) {
isize width = 1;
Rune r = *curr;
if (r >= 0x80) {
width = gb_utf8_decode(curr, end-curr, &r);
if (r == GB_RUNE_INVALID && width == 1) {
return false;
}
else if (r == GB_RUNE_BOM && curr-start > 0) {
return false;
}
}
for (isize i = 0; i < gb_count_of(illegal_import_runes); i++) {
#if defined(GB_SYSTEM_WINDOWS)
if (r == '\\') {
break;
}
#endif
if (r == illegal_import_runes[i]) {
return false;
}
}
curr += width;
index += 1;
}
return true;
}
return false;
}
bool is_package_name_reserved(String const &name) {
if (name == "builtin") {
@@ -4974,7 +5038,7 @@ bool determine_path_from_string(gbMutex *file_mutex, Ast *node, String base_dir,
void parse_setup_file_decls(Parser *p, AstFile *f, String base_dir, Array<Ast *> &decls);
void parse_setup_file_decls(Parser *p, AstFile *f, String base_dir, Slice<Ast *> &decls);
void parse_setup_file_when_stmt(Parser *p, AstFile *f, String base_dir, AstWhenStmt *ws) {
if (ws->body != nullptr) {
@@ -4995,7 +5059,7 @@ void parse_setup_file_when_stmt(Parser *p, AstFile *f, String base_dir, AstWhenS
}
}
void parse_setup_file_decls(Parser *p, AstFile *f, String base_dir, Array<Ast *> &decls) {
void parse_setup_file_decls(Parser *p, AstFile *f, String base_dir, Slice<Ast *> &decls) {
for_array(i, decls) {
Ast *node = decls[i];
if (!is_ast_decl(node) &&
@@ -5034,8 +5098,7 @@ void parse_setup_file_decls(Parser *p, AstFile *f, String base_dir, Array<Ast *>
} else if (node->kind == Ast_ForeignImportDecl) {
ast_node(fl, ForeignImportDecl, node);
fl->fullpaths.allocator = heap_allocator();
array_reserve(&fl->fullpaths, fl->filepaths.count);
auto fullpaths = array_make<String>(permanent_allocator(), 0, fl->filepaths.count);
for_array(fp_idx, fl->filepaths) {
String file_str = fl->filepaths[fp_idx].string;
@@ -5049,14 +5112,17 @@ void parse_setup_file_decls(Parser *p, AstFile *f, String base_dir, Array<Ast *>
}
fullpath = foreign_path;
}
array_add(&fl->fullpaths, fullpath);
array_add(&fullpaths, fullpath);
}
if (fl->fullpaths.count == 0) {
if (fullpaths.count == 0) {
syntax_error(decls[i], "No foreign paths found");
decls[i] = ast_bad_decl(f, fl->filepaths[0], fl->filepaths[fl->filepaths.count-1]);
goto end;
}
fl->fullpaths = slice_from_array(fullpaths);
} else if (node->kind == Ast_WhenStmt) {
ast_node(ws, WhenStmt, node);
parse_setup_file_when_stmt(p, f, base_dir, ws);
@@ -5218,12 +5284,12 @@ bool parse_file(Parser *p, AstFile *f) {
f->pkg_decl = pd;
if (f->error_count == 0) {
f->decls = array_make<Ast *>(heap_allocator());
auto decls = array_make<Ast *>(heap_allocator());
while (f->curr_token.kind != Token_EOF) {
Ast *stmt = parse_stmt(f);
if (stmt && stmt->kind != Ast_EmptyStmt) {
array_add(&f->decls, stmt);
array_add(&decls, stmt);
if (stmt->kind == Ast_ExprStmt &&
stmt->ExprStmt.expr != nullptr &&
stmt->ExprStmt.expr->kind == Ast_ProcLit) {
@@ -5232,6 +5298,8 @@ bool parse_file(Parser *p, AstFile *f) {
}
}
f->decls = slice_from_array(decls);
parse_setup_file_decls(p, f, base_dir, f->decls);
}
@@ -5325,7 +5393,7 @@ ParseFileError parse_packages(Parser *p, String init_filename) {
}
TokenPos init_pos = {};
if (!build_context.generate_docs) {
{
String s = get_fullpath_core(heap_allocator(), str_lit("runtime"));
try_add_import_path(p, s, s, init_pos, Package_Runtime);
}
@@ -5333,6 +5401,22 @@ ParseFileError parse_packages(Parser *p, String init_filename) {
try_add_import_path(p, init_fullpath, init_fullpath, init_pos, Package_Init);
p->init_fullpath = init_fullpath;
for_array(i, build_context.extra_packages) {
String path = build_context.extra_packages[i];
String fullpath = path_to_full_path(heap_allocator(), path); // LEAK?
if (!path_is_directory(fullpath)) {
String const ext = str_lit(".odin");
if (!string_ends_with(fullpath, ext)) {
error_line("Expected either a directory or a .odin file, got '%.*s'\n", LIT(fullpath));
return ParseFile_WrongExtension;
}
}
AstPackage *pkg = try_add_import_path(p, fullpath, fullpath, init_pos, Package_Normal);
if (pkg) {
pkg->is_extra = true;
}
}
thread_pool_start(&parser_thread_pool);
thread_pool_wait_to_process(&parser_thread_pool);

View File

@@ -46,7 +46,7 @@ enum ParseFileError {
};
struct CommentGroup {
Array<Token> list; // Token_Comment
Slice<Token> list; // Token_Comment
};
@@ -98,8 +98,8 @@ struct AstFile {
bool in_foreign_block;
bool allow_type;
Array<Ast *> decls;
Array<Ast *> imports; // 'import' 'using import'
Slice<Ast *> decls;
Array<Ast *> imports; // 'import'
isize directive_count;
Ast * curr_proc;
@@ -107,6 +107,8 @@ struct AstFile {
f64 time_to_tokenize; // seconds
f64 time_to_parse; // seconds
bool is_test;
CommentGroup *lead_comment; // Comment (block) before the decl
CommentGroup *line_comment; // Comment after the semicolon
CommentGroup *docs; // current docs
@@ -148,6 +150,7 @@ struct AstPackage {
Scope * scope;
DeclInfo *decl_info;
bool used;
bool is_extra;
};
@@ -217,14 +220,16 @@ enum ProcCallingConvention {
ProcCC_ForeignBlockDefault = -1,
};
enum StateFlag {
enum StateFlag : u16 {
StateFlag_bounds_check = 1<<0,
StateFlag_no_bounds_check = 1<<1,
StateFlag_no_deferred = 1<<5,
StateFlag_BeenHandled = 1<<15,
};
enum ViralStateFlag {
enum ViralStateFlag : u16 {
ViralStateFlag_ContainsDeferredProcedure = 1<<0,
};
@@ -275,7 +280,6 @@ char const *inline_asm_dialect_strings[InlineAsmDialect_COUNT] = {
AST_KIND(Undef, "undef", Token) \
AST_KIND(BasicLit, "basic literal", struct { \
Token token; \
ExactValue value; \
}) \
AST_KIND(BasicDirective, "basic directive", struct { \
Token token; \
@@ -289,7 +293,7 @@ char const *inline_asm_dialect_strings[InlineAsmDialect_COUNT] = {
Token token; \
Token open; \
Token close; \
Array<Ast *> args; \
Slice<Ast *> args; \
}) \
AST_KIND(ProcLit, "procedure literal", struct { \
Ast *type; \
@@ -297,12 +301,12 @@ char const *inline_asm_dialect_strings[InlineAsmDialect_COUNT] = {
u64 tags; \
ProcInlining inlining; \
Token where_token; \
Array<Ast *> where_clauses; \
Slice<Ast *> where_clauses; \
DeclInfo *decl; \
}) \
AST_KIND(CompoundLit, "compound literal", struct { \
Ast *type; \
Array<Ast *> elems; \
Slice<Ast *> elems; \
Token open, close; \
i64 max_count; \
}) \
@@ -325,7 +329,7 @@ AST_KIND(_ExprBegin, "", bool) \
}) \
AST_KIND(CallExpr, "call expression", struct { \
Ast * proc; \
Array<Ast *> args; \
Slice<Ast *> args; \
Token open; \
Token close; \
Token ellipsis; \
@@ -342,7 +346,7 @@ AST_KIND(_ExprBegin, "", bool) \
AST_KIND(InlineAsmExpr, "inline asm expression", struct { \
Token token; \
Token open, close; \
Array<Ast *> param_types; \
Slice<Ast *> param_types; \
Ast *return_type; \
Ast *asm_string; \
Ast *constraints_string; \
@@ -362,11 +366,11 @@ AST_KIND(_StmtBegin, "", bool) \
}) \
AST_KIND(AssignStmt, "assign statement", struct { \
Token op; \
Array<Ast *> lhs, rhs; \
Slice<Ast *> lhs, rhs; \
}) \
AST_KIND(_ComplexStmtBegin, "", bool) \
AST_KIND(BlockStmt, "block statement", struct { \
Array<Ast *> stmts; \
Slice<Ast *> stmts; \
Ast *label; \
Token open, close; \
}) \
@@ -388,7 +392,7 @@ AST_KIND(_ComplexStmtBegin, "", bool) \
}) \
AST_KIND(ReturnStmt, "return statement", struct { \
Token token; \
Array<Ast *> results; \
Slice<Ast *> results; \
}) \
AST_KIND(ForStmt, "for statement", struct { \
Token token; \
@@ -418,8 +422,8 @@ AST_KIND(_ComplexStmtBegin, "", bool) \
}) \
AST_KIND(CaseClause, "case clause", struct { \
Token token; \
Array<Ast *> list; \
Array<Ast *> stmts; \
Slice<Ast *> list; \
Slice<Ast *> stmts; \
Entity *implicit_entity; \
}) \
AST_KIND(SwitchStmt, "switch statement", struct { \
@@ -436,12 +440,12 @@ AST_KIND(_ComplexStmtBegin, "", bool) \
Ast *tag; \
Ast *body; \
bool partial; \
}) \
}) \
AST_KIND(DeferStmt, "defer statement", struct { Token token; Ast *stmt; }) \
AST_KIND(BranchStmt, "branch statement", struct { Token token; Ast *label; }) \
AST_KIND(UsingStmt, "using statement", struct { \
Token token; \
Array<Ast *> list; \
Slice<Ast *> list; \
}) \
AST_KIND(_ComplexStmtEnd, "", bool) \
AST_KIND(_StmtEnd, "", bool) \
@@ -459,9 +463,9 @@ AST_KIND(_DeclBegin, "", bool) \
Ast *name; \
}) \
AST_KIND(ValueDecl, "value declaration", struct { \
Array<Ast *> names; \
Slice<Ast *> names; \
Ast * type; \
Array<Ast *> values; \
Slice<Ast *> values; \
Array<Ast *> attributes; \
CommentGroup *docs; \
CommentGroup *comment; \
@@ -486,10 +490,10 @@ AST_KIND(_DeclBegin, "", bool) \
}) \
AST_KIND(ForeignImportDecl, "foreign import declaration", struct { \
Token token; \
Array<Token> filepaths; \
Slice<Token> filepaths; \
Token library_name; \
String collection_name; \
Array<String> fullpaths; \
Slice<String> fullpaths; \
Array<Ast *> attributes; \
CommentGroup *docs; \
CommentGroup *comment; \
@@ -497,11 +501,11 @@ AST_KIND(_DeclBegin, "", bool) \
AST_KIND(_DeclEnd, "", bool) \
AST_KIND(Attribute, "attribute", struct { \
Token token; \
Array<Ast *> elems; \
Slice<Ast *> elems; \
Token open, close; \
}) \
AST_KIND(Field, "field", struct { \
Array<Ast *> names; \
Slice<Ast *> names; \
Ast * type; \
Ast * default_value; \
Token tag; \
@@ -511,7 +515,7 @@ AST_KIND(_DeclEnd, "", bool) \
}) \
AST_KIND(FieldList, "field list", struct { \
Token token; \
Array<Ast *> list; \
Slice<Ast *> list; \
}) \
AST_KIND(_TypeBegin, "", bool) \
AST_KIND(TypeidType, "typeid", struct { \
@@ -565,34 +569,34 @@ AST_KIND(_TypeBegin, "", bool) \
}) \
AST_KIND(StructType, "struct type", struct { \
Token token; \
Array<Ast *> fields; \
Slice<Ast *> fields; \
isize field_count; \
Ast *polymorphic_params; \
Ast *align; \
Token where_token; \
Array<Ast *> where_clauses; \
Slice<Ast *> where_clauses; \
bool is_packed; \
bool is_raw_union; \
}) \
AST_KIND(UnionType, "union type", struct { \
Token token; \
Array<Ast *> variants; \
Slice<Ast *> variants; \
Ast *polymorphic_params; \
Ast * align; \
bool maybe; \
bool no_nil; \
Token where_token; \
Array<Ast *> where_clauses; \
Slice<Ast *> where_clauses; \
}) \
AST_KIND(EnumType, "enum type", struct { \
Token token; \
Ast * base_type; \
Array<Ast *> fields; /* FieldValue */ \
Slice<Ast *> fields; /* FieldValue */ \
bool is_using; \
}) \
AST_KIND(BitFieldType, "bit field type", struct { \
Token token; \
Array<Ast *> fields; /* FieldValue with : */ \
Slice<Ast *> fields; /* FieldValue with : */ \
Ast * align; \
}) \
AST_KIND(BitSetType, "bit set type", struct { \
@@ -638,23 +642,22 @@ isize const ast_variant_sizes[] = {
struct AstCommonStuff {
AstKind kind;
u32 state_flags;
u32 viral_state_flags;
bool been_handled;
u16 state_flags;
u16 viral_state_flags;
AstFile * file;
Scope * scope;
TypeAndValue tav;
TypeAndValue tav; // TODO(bill): Make this a pointer to minimize pointer size
};
struct Ast {
AstKind kind;
u32 state_flags;
u32 viral_state_flags;
bool been_handled;
u16 state_flags;
u16 viral_state_flags;
AstFile * file;
Scope * scope;
TypeAndValue tav;
TypeAndValue tav; // TODO(bill): Make this a pointer to minimize pointer size
// IMPORTANT NOTE(bill): This must be at the end since the AST is allocated to be size of the variant
union {
#define AST_KIND(_kind_name_, name, ...) GB_JOIN2(Ast, _kind_name_) _kind_name_;
AST_KINDS

View File

@@ -1,25 +1,30 @@
typedef u32 PtrSetIndex;
struct PtrSetFindResult {
isize hash_index;
isize entry_prev;
isize entry_index;
PtrSetIndex hash_index;
PtrSetIndex entry_prev;
PtrSetIndex entry_index;
};
enum : PtrSetIndex { PTR_SET_SENTINEL = ~(PtrSetIndex)0 };
template <typename T>
struct PtrSetEntry {
T ptr;
isize next;
T ptr;
PtrSetIndex next;
};
template <typename T>
struct PtrSet {
Array<isize> hashes;
Array<PtrSetIndex> hashes;
Array<PtrSetEntry<T>> entries;
};
template <typename T> void ptr_set_init (PtrSet<T> *s, gbAllocator a, isize capacity = 16);
template <typename T> void ptr_set_destroy(PtrSet<T> *s);
template <typename T> T ptr_set_add (PtrSet<T> *s, T ptr);
template <typename T> bool ptr_set_update (PtrSet<T> *s, T ptr); // returns true if it previously existsed
template <typename T> bool ptr_set_exists (PtrSet<T> *s, T ptr);
template <typename T> void ptr_set_remove (PtrSet<T> *s, T ptr);
template <typename T> void ptr_set_clear (PtrSet<T> *s);
@@ -27,12 +32,31 @@ template <typename T> void ptr_set_grow (PtrSet<T> *s);
template <typename T> void ptr_set_rehash (PtrSet<T> *s, isize new_count);
isize next_pow2_isize(isize n) {
if (n <= 0) {
return 0;
}
n--;
n |= n >> 1;
n |= n >> 2;
n |= n >> 4;
n |= n >> 8;
n |= n >> 16;
if (gb_size_of(isize) == 8) {
n |= n >> 32;
}
n++;
return n;
}
template <typename T>
void ptr_set_init(PtrSet<T> *s, gbAllocator a, isize capacity) {
capacity = next_pow2_isize(gb_max(16, capacity));
array_init(&s->hashes, a, capacity);
array_init(&s->entries, a, 0, capacity);
for (isize i = 0; i < capacity; i++) {
s->hashes.data[i] = -1;
s->hashes.data[i] = PTR_SET_SENTINEL;
}
}
@@ -43,72 +67,69 @@ void ptr_set_destroy(PtrSet<T> *s) {
}
template <typename T>
gb_internal isize ptr_set__add_entry(PtrSet<T> *s, T ptr) {
gb_internal PtrSetIndex ptr_set__add_entry(PtrSet<T> *s, T ptr) {
PtrSetEntry<T> e = {};
e.ptr = ptr;
e.next = -1;
e.next = PTR_SET_SENTINEL;
array_add(&s->entries, e);
return s->entries.count-1;
return cast(PtrSetIndex)(s->entries.count-1);
}
template <typename T>
gb_internal PtrSetFindResult ptr_set__find(PtrSet<T> *s, T ptr) {
PtrSetFindResult fr = {-1, -1, -1};
if (s->hashes.count > 0) {
PtrSetFindResult fr = {PTR_SET_SENTINEL, PTR_SET_SENTINEL, PTR_SET_SENTINEL};
if (s->hashes.count != 0) {
u64 hash = 0xcbf29ce484222325ull ^ cast(u64)cast(uintptr)ptr;
u64 n = cast(u64)s->hashes.count;
fr.hash_index = cast(isize)(hash % n);
fr.entry_index = s->hashes[fr.hash_index];
while (fr.entry_index >= 0) {
if (s->entries[fr.entry_index].ptr == ptr) {
fr.hash_index = cast(PtrSetIndex)(hash & (n-1));
fr.entry_index = s->hashes.data[fr.hash_index];
while (fr.entry_index != PTR_SET_SENTINEL) {
if (s->entries.data[fr.entry_index].ptr == ptr) {
return fr;
}
fr.entry_prev = fr.entry_index;
fr.entry_index = s->entries[fr.entry_index].next;
fr.entry_index = s->entries.data[fr.entry_index].next;
}
}
return fr;
}
template <typename T>
gb_internal b32 ptr_set__full(PtrSet<T> *s) {
gb_internal bool ptr_set__full(PtrSet<T> *s) {
return 0.75f * s->hashes.count <= s->entries.count;
}
#define PTR_ARRAY_GROW_FORMULA(x) (4*(x) + 7)
GB_STATIC_ASSERT(PTR_ARRAY_GROW_FORMULA(0) > 0);
template <typename T>
gb_inline void ptr_set_grow(PtrSet<T> *s) {
isize new_count = PTR_ARRAY_GROW_FORMULA(s->entries.count);
isize new_count = s->hashes.count*2;
ptr_set_rehash(s, new_count);
}
template <typename T>
void ptr_set_rehash(PtrSet<T> *s, isize new_count) {
isize i, j;
PtrSetIndex i, j;
PtrSet<T> ns = {};
ptr_set_init(&ns, s->hashes.allocator);
array_resize(&ns.hashes, new_count);
array_reserve(&ns.entries, s->entries.count);
for (i = 0; i < new_count; i++) {
ns.hashes[i] = -1;
ns.hashes.data[i] = PTR_SET_SENTINEL;
}
for (i = 0; i < s->entries.count; i++) {
PtrSetEntry<T> *e = &s->entries[i];
PtrSetEntry<T> *e = &s->entries.data[i];
PtrSetFindResult fr;
if (ns.hashes.count == 0) {
ptr_set_grow(&ns);
}
fr = ptr_set__find(&ns, e->ptr);
j = ptr_set__add_entry(&ns, e->ptr);
if (fr.entry_prev < 0) {
ns.hashes[fr.hash_index] = j;
if (fr.entry_prev == PTR_SET_SENTINEL) {
ns.hashes.data[fr.hash_index] = j;
} else {
ns.entries[fr.entry_prev].next = j;
ns.entries.data[fr.entry_prev].next = j;
}
ns.entries[j].next = fr.entry_index;
ns.entries.data[j].next = fr.entry_index;
if (ptr_set__full(&ns)) {
ptr_set_grow(&ns);
}
@@ -120,26 +141,24 @@ void ptr_set_rehash(PtrSet<T> *s, isize new_count) {
template <typename T>
gb_inline bool ptr_set_exists(PtrSet<T> *s, T ptr) {
isize index = ptr_set__find(s, ptr).entry_index;
return index >= 0;
return index != PTR_SET_SENTINEL;
}
// Returns true if it already exists
template <typename T>
T ptr_set_add(PtrSet<T> *s, T ptr) {
isize index;
PtrSetIndex index;
PtrSetFindResult fr;
if (s->hashes.count == 0) {
ptr_set_grow(s);
}
fr = ptr_set__find(s, ptr);
if (fr.entry_index >= 0) {
index = fr.entry_index;
} else {
if (fr.entry_index == PTR_SET_SENTINEL) {
index = ptr_set__add_entry(s, ptr);
if (fr.entry_prev >= 0) {
s->entries[fr.entry_prev].next = index;
if (fr.entry_prev != PTR_SET_SENTINEL) {
s->entries.data[fr.entry_prev].next = index;
} else {
s->hashes[fr.hash_index] = index;
s->hashes.data[fr.hash_index] = index;
}
}
if (ptr_set__full(s)) {
@@ -148,32 +167,58 @@ T ptr_set_add(PtrSet<T> *s, T ptr) {
return ptr;
}
template <typename T>
bool ptr_set_update(PtrSet<T> *s, T ptr) { // returns true if it previously existsed
bool exists = false;
PtrSetIndex index;
PtrSetFindResult fr;
if (s->hashes.count == 0) {
ptr_set_grow(s);
}
fr = ptr_set__find(s, ptr);
if (fr.entry_index != PTR_SET_SENTINEL) {
exists = true;
} else {
index = ptr_set__add_entry(s, ptr);
if (fr.entry_prev != PTR_SET_SENTINEL) {
s->entries.data[fr.entry_prev].next = index;
} else {
s->hashes.data[fr.hash_index] = index;
}
}
if (ptr_set__full(s)) {
ptr_set_grow(s);
}
return exists;
}
template <typename T>
void ptr_set__erase(PtrSet<T> *s, PtrSetFindResult fr) {
PtrSetFindResult last;
if (fr.entry_prev < 0) {
s->hashes[fr.hash_index] = s->entries[fr.entry_index].next;
if (fr.entry_prev == PTR_SET_SENTINEL) {
s->hashes.data[fr.hash_index] = s->entries.data[fr.entry_index].next;
} else {
s->entries[fr.entry_prev].next = s->entries[fr.entry_index].next;
s->entries.data[fr.entry_prev].next = s->entries.data[fr.entry_index].next;
}
if (fr.entry_index == s->entries.count-1) {
array_pop(&s->entries);
return;
}
s->entries[fr.entry_index] = s->entries[s->entries.count-1];
last = ptr_set__find(s, s->entries[fr.entry_index].ptr);
if (last.entry_prev >= 0) {
s->entries[last.entry_prev].next = fr.entry_index;
s->entries.data[fr.entry_index] = s->entries.data[s->entries.count-1];
last = ptr_set__find(s, s->entries.data[fr.entry_index].ptr);
if (last.entry_prev != PTR_SET_SENTINEL) {
s->entries.data[last.entry_prev].next = fr.entry_index;
} else {
s->hashes[last.hash_index] = fr.entry_index;
s->hashes.data[last.hash_index] = fr.entry_index;
}
}
template <typename T>
void ptr_set_remove(PtrSet<T> *s, T ptr) {
PtrSetFindResult fr = ptr_set__find(s, ptr);
if (fr.entry_index >= 0) {
if (fr.entry_index != PTR_SET_SENTINEL) {
ptr_set__erase(s, fr);
}
}

View File

@@ -164,6 +164,7 @@ int string_compare(String const &x, String const &y) {
return cast(int)x[offset] - cast(int)y[offset];
}
}
return cast(int)(x.len - y.len);
}
return 0;
}

View File

@@ -118,8 +118,6 @@ TOKEN_KIND(Token__KeywordBegin, ""), \
TOKEN_KIND(Token_no_inline, "no_inline"), \
TOKEN_KIND(Token_context, "context"), \
TOKEN_KIND(Token_asm, "asm"), \
TOKEN_KIND(Token_macro, "macro"), \
TOKEN_KIND(Token_const, "const"), \
TOKEN_KIND(Token__KeywordEnd, ""), \
TOKEN_KIND(Token_Count, "")

View File

@@ -323,6 +323,8 @@ String const type_strings[] = {
enum TypeFlag : u32 {
TypeFlag_Polymorphic = 1<<1,
TypeFlag_PolySpecialized = 1<<2,
TypeFlag_InProcessOfCheckingPolymorphic = 1<<3,
TypeFlag_InProcessOfCheckingABI = 1<<4,
};
struct Type {
@@ -371,7 +373,28 @@ enum Typeid_Kind : u8 {
Typeid_Relative_Slice,
};
// IMPORTANT NOTE(bill): This must match the same as the in core.odin
enum TypeInfoFlag : u32 {
TypeInfoFlag_Comparable = 1<<0,
TypeInfoFlag_Simple_Compare = 1<<1,
};
bool is_type_comparable(Type *t);
bool is_type_simple_compare(Type *t);
u32 type_info_flags_of_type(Type *type) {
if (type == nullptr) {
return 0;
}
u32 flags = 0;
if (is_type_comparable(type)) {
flags |= TypeInfoFlag_Comparable;
}
if (is_type_simple_compare(type)) {
flags |= TypeInfoFlag_Comparable;
}
return flags;
}
// TODO(bill): Should I add extra information here specifying the kind of selection?
@@ -661,12 +684,15 @@ gb_global Type *t_context_ptr = nullptr;
gb_global Type *t_source_code_location = nullptr;
gb_global Type *t_source_code_location_ptr = nullptr;
gb_global Type *t_map_key = nullptr;
gb_global Type *t_map_hash = nullptr;
gb_global Type *t_map_header = nullptr;
gb_global Type *t_vector_x86_mmx = nullptr;
gb_global Type *t_equal_proc = nullptr;
gb_global Type *t_hasher_proc = nullptr;
i64 type_size_of (Type *t);
i64 type_align_of (Type *t);
@@ -769,7 +795,8 @@ void set_base_type(Type *t, Type *base) {
Type *alloc_type(TypeKind kind) {
gbAllocator a = heap_allocator();
// gbAllocator a = heap_allocator();
gbAllocator a = permanent_allocator();
Type *t = gb_alloc_item(a, Type);
zero_item(t);
t->kind = kind;
@@ -884,6 +911,25 @@ Type *alloc_type_named(String name, Type *base, Entity *type_name) {
return t;
}
bool is_calling_convention_none(ProcCallingConvention calling_convention) {
switch (calling_convention) {
case ProcCC_None:
case ProcCC_PureNone:
case ProcCC_InlineAsm:
return true;
}
return false;
}
bool is_calling_convention_odin(ProcCallingConvention calling_convention) {
switch (calling_convention) {
case ProcCC_Odin:
case ProcCC_Contextless:
return true;
}
return false;
}
Type *alloc_type_tuple() {
Type *t = alloc_type(Type_Tuple);
return t;
@@ -918,7 +964,6 @@ bool is_type_valid_for_keys(Type *t);
Type *alloc_type_map(i64 count, Type *key, Type *value) {
if (key != nullptr) {
GB_ASSERT(is_type_valid_for_keys(key));
GB_ASSERT(value != nullptr);
}
Type *t = alloc_type(Type_Map);
@@ -1192,20 +1237,6 @@ bool is_type_slice(Type *t) {
t = base_type(t);
return t->kind == Type_Slice;
}
bool is_type_u8_slice(Type *t) {
t = base_type(t);
if (t->kind == Type_Slice) {
return is_type_u8(t->Slice.elem);
}
return false;
}
bool is_type_u8_ptr(Type *t) {
t = base_type(t);
if (t->kind == Type_Pointer) {
return is_type_u8(t->Slice.elem);
}
return false;
}
bool is_type_proc(Type *t) {
t = base_type(t);
return t->kind == Type_Proc;
@@ -1249,6 +1280,37 @@ bool is_type_relative_slice(Type *t) {
return t->kind == Type_RelativeSlice;
}
bool is_type_u8_slice(Type *t) {
t = base_type(t);
if (t->kind == Type_Slice) {
return is_type_u8(t->Slice.elem);
}
return false;
}
bool is_type_u8_array(Type *t) {
t = base_type(t);
if (t->kind == Type_Array) {
return is_type_u8(t->Array.elem);
}
return false;
}
bool is_type_u8_ptr(Type *t) {
t = base_type(t);
if (t->kind == Type_Pointer) {
return is_type_u8(t->Slice.elem);
}
return false;
}
bool is_type_rune_array(Type *t) {
t = base_type(t);
if (t->kind == Type_Array) {
return is_type_rune(t->Array.elem);
}
return false;
}
Type *core_array_type(Type *t) {
for (;;) {
@@ -1261,53 +1323,7 @@ Type *core_array_type(Type *t) {
return t;
}
// NOTE(bill): type can be easily compared using memcmp
bool is_type_simple_compare(Type *t) {
t = core_type(t);
switch (t->kind) {
case Type_Array:
return is_type_simple_compare(t->Array.elem);
case Type_EnumeratedArray:
return is_type_simple_compare(t->EnumeratedArray.elem);
case Type_Basic:
if (t->Basic.flags & BasicFlag_SimpleCompare) {
return true;
}
return false;
case Type_Pointer:
case Type_Proc:
case Type_BitSet:
case Type_BitField:
return true;
case Type_Struct:
for_array(i, t->Struct.fields) {
Entity *f = t->Struct.fields[i];
if (!is_type_simple_compare(f->type)) {
return false;
}
}
return true;
case Type_Union:
for_array(i, t->Union.variants) {
Type *v = t->Union.variants[i];
if (!is_type_simple_compare(v)) {
return false;
}
}
return true;
case Type_SimdVector:
return is_type_simple_compare(t->SimdVector.elem);
}
return false;
}
Type *base_complex_elem_type(Type *t) {
t = core_type(t);
@@ -1526,6 +1542,8 @@ bool is_type_valid_for_keys(Type *t) {
if (is_type_untyped(t)) {
return false;
}
return is_type_comparable(t);
#if 0
if (is_type_integer(t)) {
return true;
}
@@ -1541,8 +1559,15 @@ bool is_type_valid_for_keys(Type *t) {
if (is_type_typeid(t)) {
return true;
}
if (is_type_simple_compare(t)) {
return true;
}
if (is_type_comparable(t)) {
return true;
}
return false;
#endif
}
bool is_type_valid_bit_set_elem(Type *t) {
@@ -1695,12 +1720,23 @@ TypeTuple *get_record_polymorphic_params(Type *t) {
bool is_type_polymorphic(Type *t, bool or_specialized=false) {
if (t->flags & TypeFlag_InProcessOfCheckingPolymorphic) {
return false;
}
switch (t->kind) {
case Type_Generic:
return true;
case Type_Named:
return is_type_polymorphic(t->Named.base, or_specialized);
{
u32 flags = t->flags;
t->flags |= TypeFlag_InProcessOfCheckingPolymorphic;
bool ok = is_type_polymorphic(t->Named.base, or_specialized);
t->flags = flags;
return ok;
}
case Type_Opaque:
return is_type_polymorphic(t->Opaque.elem, or_specialized);
case Type_Pointer:
@@ -1892,10 +1928,77 @@ bool is_type_comparable(Type *t) {
case Type_Opaque:
return is_type_comparable(t->Opaque.elem);
case Type_Struct:
if (type_size_of(t) == 0) {
return false;
}
if (t->Struct.is_raw_union) {
return is_type_simple_compare(t);
}
for_array(i, t->Struct.fields) {
Entity *f = t->Struct.fields[i];
if (!is_type_comparable(f->type)) {
return false;
}
}
return true;
}
return false;
}
// NOTE(bill): type can be easily compared using memcmp
bool is_type_simple_compare(Type *t) {
t = core_type(t);
switch (t->kind) {
case Type_Array:
return is_type_simple_compare(t->Array.elem);
case Type_EnumeratedArray:
return is_type_simple_compare(t->EnumeratedArray.elem);
case Type_Basic:
if (t->Basic.flags & BasicFlag_SimpleCompare) {
return true;
}
if (t->Basic.kind == Basic_typeid) {
return true;
}
return false;
case Type_Pointer:
case Type_Proc:
case Type_BitSet:
case Type_BitField:
return true;
case Type_Struct:
for_array(i, t->Struct.fields) {
Entity *f = t->Struct.fields[i];
if (!is_type_simple_compare(f->type)) {
return false;
}
}
return true;
case Type_Union:
for_array(i, t->Union.variants) {
Type *v = t->Union.variants[i];
if (!is_type_simple_compare(v)) {
return false;
}
}
return true;
case Type_SimdVector:
return is_type_simple_compare(t->SimdVector.elem);
}
return false;
}
Type *strip_type_aliasing(Type *x) {
if (x == nullptr) {
return x;
@@ -2317,7 +2420,7 @@ Selection lookup_field_from_index(Type *type, i64 index) {
GB_ASSERT(is_type_struct(type) || is_type_union(type) || is_type_tuple(type));
type = base_type(type);
gbAllocator a = heap_allocator();
gbAllocator a = permanent_allocator();
isize max_count = 0;
switch (type->kind) {
case Type_Struct: max_count = type->Struct.fields.count; break;
@@ -2365,7 +2468,6 @@ Selection lookup_field_from_index(Type *type, i64 index) {
return empty_selection;
}
Entity *scope_lookup_current(Scope *s, String const &name);
Selection lookup_field_with_selection(Type *type_, String field_name, bool is_type, Selection sel, bool allow_blank_ident) {
@@ -2375,7 +2477,6 @@ Selection lookup_field_with_selection(Type *type_, String field_name, bool is_ty
return empty_selection;
}
gbAllocator a = heap_allocator();
Type *type = type_deref(type_);
bool is_ptr = type != type_;
sel.indirect = sel.indirect || is_ptr;
@@ -2964,7 +3065,7 @@ i64 type_align_of_internal(Type *t, TypePath *path) {
}
Array<i64> type_set_offsets_of(Array<Entity *> const &fields, bool is_packed, bool is_raw_union) {
gbAllocator a = heap_allocator();
gbAllocator a = permanent_allocator();
auto offsets = array_make<i64>(a, fields.count);
i64 curr_offset = 0;
if (is_raw_union) {
@@ -3353,6 +3454,58 @@ Type *reduce_tuple_to_single_type(Type *original_type) {
}
Type *alloc_type_struct_from_field_types(Type **field_types, isize field_count, bool is_packed) {
Type *t = alloc_type_struct();
t->Struct.fields = array_make<Entity *>(heap_allocator(), field_count);
Scope *scope = nullptr;
for_array(i, t->Struct.fields) {
t->Struct.fields[i] = alloc_entity_field(scope, blank_token, field_types[i], false, cast(i32)i, EntityState_Resolved);
}
t->Struct.is_packed = is_packed;
return t;
}
Type *alloc_type_tuple_from_field_types(Type **field_types, isize field_count, bool is_packed, bool must_be_tuple) {
if (field_count == 0) {
return nullptr;
}
if (!must_be_tuple && field_count == 1) {
return field_types[0];
}
Type *t = alloc_type_tuple();
t->Tuple.variables = array_make<Entity *>(heap_allocator(), field_count);
Scope *scope = nullptr;
for_array(i, t->Tuple.variables) {
t->Tuple.variables[i] = alloc_entity_param(scope, blank_token, field_types[i], false, false);
}
t->Tuple.is_packed = is_packed;
return t;
}
Type *alloc_type_proc_from_types(Type **param_types, unsigned param_count, Type *results, bool is_c_vararg, ProcCallingConvention calling_convention) {
Type *params = alloc_type_tuple_from_field_types(param_types, param_count, false, true);
isize results_count = 0;
if (results != nullptr) {
if (results->kind != Type_Tuple) {
results = alloc_type_tuple_from_field_types(&results, 1, false, true);
}
results_count = results->Tuple.variables.count;
}
Scope *scope = nullptr;
Type *t = alloc_type_proc(scope, params, param_count, results, results_count, false, calling_convention);
t->Proc.c_vararg = is_c_vararg;
return t;
}
gbString write_type_to_string(gbString str, Type *type) {
if (type == nullptr) {
return gb_string_appendc(str, "<no type>");
@@ -3671,3 +3824,6 @@ gbString type_to_string(Type *type) {
return write_type_to_string(gb_string_make(heap_allocator(), ""), type);
}