Merge branch 'master' into llvm-12-support

This commit is contained in:
gingerBill
2021-07-10 15:15:10 +01:00
49 changed files with 3189 additions and 1100 deletions

View File

@@ -0,0 +1,83 @@
package bufio
import "core:io"
// Loadahead_Reader provides io lookahead.
// This is useful for tokenizers/parsers.
// Loadahead_Reader is similar to bufio.Reader, but unlike bufio.Reader, Loadahead_Reader's buffer size
// will EXACTLY match the specified size, whereas bufio.Reader's buffer size may differ from the specified size.
// This makes sure that the buffer will not be accidentally read beyond the expected size.
Loadahead_Reader :: struct {
r: io.Reader,
buf: []byte,
n: int,
}
lookahead_reader_init :: proc(lr: ^Loadahead_Reader, r: io.Reader, buf: []byte) -> ^Loadahead_Reader {
lr.r = r;
lr.buf = buf;
lr.n = 0;
return lr;
}
lookahead_reader_buffer :: proc(lr: ^Loadahead_Reader) -> []byte {
return lr.buf[:lr.n];
}
// lookahead_reader_peek returns a slice of the Lookahead_Reader which holds n bytes
// If the Lookahead_Reader cannot hold enough bytes, it will read from the underlying reader to populate the rest.
// NOTE: The returned buffer is not a copy of the underlying buffer
lookahead_reader_peek :: proc(lr: ^Loadahead_Reader, n: int) -> ([]byte, io.Error) {
switch {
case n < 0:
return nil, .Negative_Read;
case n > len(lr.buf):
return nil, .Buffer_Full;
}
n := n;
err: io.Error;
read_count: int;
if lr.n < n {
read_count, err = io.read_at_least(lr.r, lr.buf[lr.n:], n-lr.n);
if err == .Unexpected_EOF {
err = .EOF;
}
}
lr.n += read_count;
if n > lr.n {
n = lr.n;
}
return lr.buf[:n], err;
}
// lookahead_reader_peek_all returns a slice of the Lookahead_Reader populating the full buffer
// If the Lookahead_Reader cannot hold enough bytes, it will read from the underlying reader to populate the rest.
// NOTE: The returned buffer is not a copy of the underlying buffer
lookahead_reader_peek_all :: proc(lr: ^Loadahead_Reader) -> ([]byte, io.Error) {
return lookahead_reader_peek(lr, len(lr.buf));
}
// lookahead_reader_consume drops the first n populated bytes from the Lookahead_Reader.
lookahead_reader_consume :: proc(lr: ^Loadahead_Reader, n: int) -> io.Error {
switch {
case n == 0:
return nil;
case n < 0:
return .Negative_Read;
case lr.n < n:
return .Short_Buffer;
}
copy(lr.buf, lr.buf[n:lr.n]);
lr.n -= n;
return nil;
}
lookahead_reader_consume_all :: proc(lr: ^Loadahead_Reader) -> io.Error {
return lookahead_reader_consume(lr, lr.n);
}

View File

@@ -17,6 +17,8 @@ Reader :: struct {
last_byte: int, // last byte read, invalid is -1
last_rune_size: int, // size of last rune read, invalid is -1
max_consecutive_empty_reads: int,
}
@@ -25,7 +27,7 @@ DEFAULT_BUF_SIZE :: 4096;
@(private)
MIN_READ_BUFFER_SIZE :: 16;
@(private)
MAX_CONSECUTIVE_EMPTY_READS :: 128;
DEFAULT_MAX_CONSECUTIVE_EMPTY_READS :: 128;
reader_init :: proc(b: ^Reader, rd: io.Reader, size: int = DEFAULT_BUF_SIZE, allocator := context.allocator) {
size := size;
@@ -71,8 +73,12 @@ _reader_read_new_chunk :: proc(b: ^Reader) -> io.Error {
return .Buffer_Full;
}
if b.max_consecutive_empty_reads <= 0 {
b.max_consecutive_empty_reads = DEFAULT_MAX_CONSECUTIVE_EMPTY_READS;
}
// read new data, and try a limited number of times
for i := MAX_CONSECUTIVE_EMPTY_READS; i > 0; i -= 1 {
for i := b.max_consecutive_empty_reads; i > 0; i -= 1 {
n, err := io.read(b.rd, b.buf[b.w:]);
if n < 0 {
return .Negative_Read;

340
core/bufio/scanner.odin Normal file
View File

@@ -0,0 +1,340 @@
package bufio
import "core:bytes"
import "core:io"
import "core:mem"
import "core:unicode/utf8"
import "intrinsics"
// Extra errors returns by scanning procedures
Scanner_Extra_Error :: enum i32 {
Negative_Advance,
Advanced_Too_Far,
Bad_Read_Count,
Too_Long,
Too_Short,
}
Scanner_Error :: union {
io.Error,
Scanner_Extra_Error,
}
// Split_Proc is the signature of the split procedure used to tokenize the input.
Split_Proc :: proc(data: []byte, at_eof: bool) -> (advance: int, token: []byte, err: Scanner_Error, final_token: bool);
Scanner :: struct {
r: io.Reader,
split: Split_Proc,
buf: [dynamic]byte,
max_token_size: int,
start: int,
end: int,
token: []byte,
_err: Scanner_Error,
max_consecutive_empty_reads: int,
successive_empty_token_count: int,
scan_called: bool,
done: bool,
}
DEFAULT_MAX_SCAN_TOKEN_SIZE :: 1<<16;
@(private)
_INIT_BUF_SIZE :: 4096;
scanner_init :: proc(s: ^Scanner, r: io.Reader, buf_allocator := context.allocator) -> ^Scanner {
s.r = r;
s.split = scan_lines;
s.max_token_size = DEFAULT_MAX_SCAN_TOKEN_SIZE;
s.buf.allocator = buf_allocator;
return s;
}
scanner_init_with_buffer :: proc(s: ^Scanner, r: io.Reader, buf: []byte) -> ^Scanner {
s.r = r;
s.split = scan_lines;
s.max_token_size = DEFAULT_MAX_SCAN_TOKEN_SIZE;
s.buf = mem.buffer_from_slice(buf);
resize(&s.buf, cap(s.buf));
return s;
}
scanner_destroy :: proc(s: ^Scanner) {
delete(s.buf);
}
// Returns the first non-EOF error that was encounted by the scanner
scanner_error :: proc(s: ^Scanner) -> Scanner_Error {
switch s._err {
case .EOF, .None:
return nil;
}
return s._err;
}
// Returns the most recent token created by scanner_scan.
// The underlying array may point to data that may be overwritten
// by another call to scanner_scan.
// Treat the returned value as if it is immutable.
scanner_bytes :: proc(s: ^Scanner) -> []byte {
return s.token;
}
// Returns the most recent token created by scanner_scan.
// The underlying array may point to data that may be overwritten
// by another call to scanner_scan.
// Treat the returned value as if it is immutable.
scanner_text :: proc(s: ^Scanner) -> string {
return string(s.token);
}
// scanner_scan advances the scanner
scanner_scan :: proc(s: ^Scanner) -> bool {
set_err :: proc(s: ^Scanner, err: Scanner_Error) {
err := err;
if err == .None {
err = nil;
}
switch s._err {
case nil, .EOF:
s._err = err;
}
}
if s.done {
return false;
}
s.scan_called = true;
for {
// Check if a token is possible with what is available
// Allow the split procedure to recover if it fails
if s.start < s.end || s._err != nil {
advance, token, err, final_token := s.split(s.buf[s.start:s.end], s._err != nil);
if final_token {
s.token = token;
s.done = true;
return true;
}
if err != nil {
set_err(s, err);
return false;
}
// Do advance
if advance < 0 {
set_err(s, .Negative_Advance);
return false;
}
if advance > s.end-s.start {
set_err(s, .Advanced_Too_Far);
return false;
}
s.start += advance;
s.token = token;
if s.token != nil {
if s._err == nil || advance > 0 {
s.successive_empty_token_count = 0;
} else {
s.successive_empty_token_count += 1;
if s.max_consecutive_empty_reads <= 0 {
s.max_consecutive_empty_reads = DEFAULT_MAX_CONSECUTIVE_EMPTY_READS;
}
if s.successive_empty_token_count > s.max_consecutive_empty_reads {
set_err(s, .No_Progress);
return false;
}
}
return true;
}
}
// If an error is hit, no token can be created
if s._err != nil {
s.start = 0;
s.end = 0;
return false;
}
// More data must be required to be read
if s.start > 0 && (s.end == len(s.buf) || s.start > len(s.buf)/2) {
copy(s.buf[:], s.buf[s.start:s.end]);
s.end -= s.start;
s.start = 0;
}
could_be_too_short := false;
// Resize the buffer if full
if s.end == len(s.buf) {
if s.max_token_size <= 0 {
s.max_token_size = DEFAULT_MAX_SCAN_TOKEN_SIZE;
}
if len(s.buf) >= s.max_token_size {
set_err(s, .Too_Long);
return false;
}
// overflow check
new_size := _INIT_BUF_SIZE;
if len(s.buf) > 0 {
overflowed: bool;
if new_size, overflowed = intrinsics.overflow_mul(len(s.buf), 2); overflowed {
set_err(s, .Too_Long);
return false;
}
}
old_size := len(s.buf);
new_size = min(new_size, s.max_token_size);
resize(&s.buf, new_size);
s.end -= s.start;
s.start = 0;
could_be_too_short = old_size >= len(s.buf);
}
// Read data into the buffer
loop := 0;
for {
n, err := io.read(s.r, s.buf[s.end:len(s.buf)]);
if n < 0 || len(s.buf)-s.end < n {
set_err(s, .Bad_Read_Count);
break;
}
s.end += n;
if err != nil {
set_err(s, err);
break;
}
if n > 0 {
s.successive_empty_token_count = 0;
break;
}
loop += 1;
if s.max_consecutive_empty_reads <= 0 {
s.max_consecutive_empty_reads = DEFAULT_MAX_CONSECUTIVE_EMPTY_READS;
}
if loop > s.max_consecutive_empty_reads {
if could_be_too_short {
set_err(s, .Too_Short);
} else {
set_err(s, .No_Progress);
}
break;
}
}
}
}
scan_bytes :: proc(data: []byte, at_eof: bool) -> (advance: int, token: []byte, err: Scanner_Error, final_token: bool) {
if at_eof && len(data) == 0 {
return;
}
return 1, data[0:1], nil, false;
}
scan_runes :: proc(data: []byte, at_eof: bool) -> (advance: int, token: []byte, err: Scanner_Error, final_token: bool) {
if at_eof && len(data) == 0 {
return;
}
if data[0] < utf8.RUNE_SELF {
advance = 1;
token = data[0:1];
return;
}
_, width := utf8.decode_rune(data);
if width > 1 {
advance = width;
token = data[0:width];
return;
}
if !at_eof && !utf8.full_rune(data) {
return;
}
@thread_local ERROR_RUNE := []byte{0xef, 0xbf, 0xbd};
advance = 1;
token = ERROR_RUNE;
return;
}
scan_words :: proc(data: []byte, at_eof: bool) -> (advance: int, token: []byte, err: Scanner_Error, final_token: bool) {
is_space :: proc "contextless" (r: rune) -> bool {
switch r {
// lower ones
case ' ', '\t', '\n', '\v', '\f', '\r':
return true;
case '\u0085', '\u00a0':
return true;
// higher ones
case '\u2000' ..= '\u200a':
return true;
case '\u1680', '\u2028', '\u2029', '\u202f', '\u205f', '\u3000':
return true;
}
return false;
}
// skip spaces at the beginning
start := 0;
for width := 0; start < len(data); start += width {
r: rune;
r, width = utf8.decode_rune(data[start:]);
if !is_space(r) {
break;
}
}
for width, i := 0, start; i < len(data); i += width {
r: rune;
r, width = utf8.decode_rune(data[i:]);
if is_space(r) {
advance = i+width;
token = data[start:i];
return;
}
}
if at_eof && len(data) > start {
advance = len(data);
token = data[start:];
return;
}
advance = start;
return;
}
scan_lines :: proc(data: []byte, at_eof: bool) -> (advance: int, token: []byte, err: Scanner_Error, final_token: bool) {
trim_carriage_return :: proc "contextless" (data: []byte) -> []byte {
if len(data) > 0 && data[len(data)-1] == '\r' {
return data[0:len(data)-1];
}
return data;
}
if at_eof && len(data) == 0 {
return;
}
if i := bytes.index_byte(data, '\n'); i >= 0 {
advance = i+1;
token = trim_carriage_return(data[0:i]);
return;
}
if at_eof {
advance = len(data);
token = trim_carriage_return(data);
}
return;
}

View File

@@ -15,6 +15,8 @@ Writer :: struct {
err: io.Error,
max_consecutive_empty_writes: int,
}
writer_init :: proc(b: ^Writer, wr: io.Writer, size: int = DEFAULT_BUF_SIZE, allocator := context.allocator) {
@@ -185,16 +187,20 @@ writer_read_from :: proc(b: ^Writer, r: io.Reader) -> (n: i64, err: io.Error) {
return n, ferr;
}
}
if b.max_consecutive_empty_writes <= 0 {
b.max_consecutive_empty_writes = DEFAULT_MAX_CONSECUTIVE_EMPTY_READS;
}
m: int;
nr := 0;
for nr < MAX_CONSECUTIVE_EMPTY_READS {
for nr < b.max_consecutive_empty_writes {
m, err = io.read(r, b.buf[b.n:]);
if m != 0 || err != nil {
break;
}
nr += 1;
}
if nr == MAX_CONSECUTIVE_EMPTY_READS {
if nr == b.max_consecutive_empty_writes {
return n, .No_Progress;
}
b.n += m;

View File

@@ -1,187 +0,0 @@
package bytes
/*
Copyright 2021 Jeroen van Rijn <nom@duclavier.com>.
Made available under Odin's BSD-2 license.
List of contributors:
Jeroen van Rijn: Initial implementation.
`bytes.Buffer` type conversion helpers.
*/
import "core:intrinsics"
import "core:mem"
need_endian_conversion :: proc($FT: typeid, $TT: typeid) -> (res: bool) {
// true if platform endian
f: bool;
t: bool;
when ODIN_ENDIAN == "little" {
f = intrinsics.type_is_endian_platform(FT) || intrinsics.type_is_endian_little(FT);
t = intrinsics.type_is_endian_platform(TT) || intrinsics.type_is_endian_little(TT);
return f != t;
} else {
f = intrinsics.type_is_endian_platform(FT) || intrinsics.type_is_endian_big(FT);
t = intrinsics.type_is_endian_platform(TT) || intrinsics.type_is_endian_big(TT);
return f != t;
}
return;
}
/*
Input:
count: number of elements
$TT: destination type
$FT: source type
from_buffer: buffer to convert
force_convert: cast each element separately
Output:
res: Converted/created buffer of []TT.
backing: ^bytes.Buffer{} backing the converted data.
alloc: Buffer was freshly allocated because we couldn't convert in-place. Points to `from_buffer` if `false`.
err: True if we passed too few elements or allocation failed, etc.
If `from_buffer` is empty, the input type $FT is ignored and `create_buffer_of_type` is called to create a fresh buffer.
This helper will try to do as little work as possible, so if you're converting between two equally sized types,
and they have compatible endianness, the contents will simply be reinterpreted using `slice_data_cast`.
If you want each element to be converted in this case, set `force_convert` to `true`.
For example, converting `[]u8{0, 60}` from `[]f16` to `[]u16` will return `[15360]` when simply reinterpreted,
and `[1]` if force converted.
Should you for example want to promote `[]f16` to `[]f32` (or truncate `[]f32` to `[]f16`), the size of these elements
being different will result in a conversion anyway, so this flag is unnecessary in cases like these.
Example:
fmt.println("Convert []f16le (x2) to []f32 (x2).");
b := []u8{0, 60, 0, 60}; // == []f16{1.0, 1.0}
res, backing, had_to_allocate, err := bytes.buffer_convert_to_type(2, f32, f16le, b);
fmt.printf("res : %v\n", res); // [1.000, 1.000]
fmt.printf("backing : %v\n", backing); // &Buffer{buf = [0, 0, 128, 63, 0, 0, 128, 63], off = 0, last_read = Invalid}
fmt.printf("allocated: %v\n", had_to_allocate); // true
fmt.printf("err : %v\n", err); // false
if had_to_allocate { defer bytes.buffer_destroy(backing); }
fmt.println("\nConvert []f16le (x2) to []u16 (x2).");
res2: []u16;
res2, backing, had_to_allocate, err = bytes.buffer_convert_to_type(2, u16, f16le, b);
fmt.printf("res : %v\n", res2); // [15360, 15360]
fmt.printf("backing : %v\n", backing); // Buffer.buf points to `b` because it could be converted in-place.
fmt.printf("allocated: %v\n", had_to_allocate); // false
fmt.printf("err : %v\n", err); // false
if had_to_allocate { defer bytes.buffer_destroy(backing); }
fmt.println("\nConvert []f16le (x2) to []u16 (x2), force_convert=true.");
res2, backing, had_to_allocate, err = bytes.buffer_convert_to_type(2, u16, f16le, b, true);
fmt.printf("res : %v\n", res2); // [1, 1]
fmt.printf("backing : %v\n", backing); // Buffer.buf points to `b` because it could be converted in-place.
fmt.printf("allocated: %v\n", had_to_allocate); // false
fmt.printf("err : %v\n", err); // false
if had_to_allocate { defer bytes.buffer_destroy(backing); }
*/
buffer_convert_to_type :: proc(count: int, $TT: typeid, $FT: typeid, from_buffer: []u8, force_convert := false) -> (
res: []TT, backing: ^Buffer, alloc: bool, err: bool) {
backing = new(Buffer);
if len(from_buffer) > 0 {
/*
Check if we've been given enough input elements.
*/
from := mem.slice_data_cast([]FT, from_buffer);
if len(from) != count {
err = true;
return;
}
/*
We can early out if the types are exactly identical.
This needs to be `when`, or res = from will fail if the types are different.
*/
when FT == TT {
res = from;
buffer_init(backing, from_buffer);
return;
}
/*
We can do a data cast if in-size == out-size and no endian conversion is needed.
*/
convert := need_endian_conversion(FT, TT);
convert |= (size_of(TT) * count != len(from_buffer));
convert |= force_convert;
if !convert {
// It's just a data cast
res = mem.slice_data_cast([]TT, from_buffer);
buffer_init(backing, from_buffer);
if len(res) != count {
err = true;
}
return;
} else {
if size_of(TT) * count == len(from_buffer) {
/*
Same size, can do an in-place Endianness conversion.
If `force_convert`, this also handles the per-element cast instead of slice_data_cast.
*/
res = mem.slice_data_cast([]TT, from_buffer);
buffer_init(backing, from_buffer);
for v, i in from {
res[i] = TT(v);
}
} else {
/*
Result is a different size, we need to allocate an output buffer.
*/
size := size_of(TT) * count;
buffer_init_allocator(backing, size, size, context.allocator);
alloc = true;
res = mem.slice_data_cast([]TT, backing.buf[:]);
if len(res) != count {
err = true;
return;
}
for v, i in from {
res[i] = TT(v);
}
}
}
} else {
/*
The input buffer is empty, so we'll have to create a new one for []TT of length count.
*/
res, backing, err = buffer_create_of_type(count, TT);
alloc = true;
}
return;
}
buffer_create_of_type :: proc(count: int, $TT: typeid) -> (res: []TT, backing: ^Buffer, err: bool) {
backing = new(Buffer);
size := size_of(TT) * count;
buffer_init_allocator(backing, size, size, context.allocator);
res = mem.slice_data_cast([]TT, backing.buf[:]);
if len(res) != count {
err = true;
}
return;
}

View File

@@ -10,8 +10,40 @@ package compress
import "core:io"
import "core:image"
import "core:bytes"
/*
These settings bound how much compression algorithms will allocate for their output buffer.
If streaming their output, these are unnecessary and will be ignored.
*/
/*
When a decompression routine doesn't stream its output, but writes to a buffer,
we pre-allocate an output buffer to speed up decompression. The default is 1 MiB.
*/
COMPRESS_OUTPUT_ALLOCATE_MIN :: int(#config(COMPRESS_OUTPUT_ALLOCATE_MIN, 1 << 20));
/*
This bounds the maximum a buffer will resize to as needed, or the maximum we'll
pre-allocate if you inform the decompression routine you know the payload size.
For reference, the largest payload size of a GZIP file is 4 GiB.
*/
when size_of(uintptr) == 8 {
/*
For 64-bit platforms, we set the default max buffer size to 4 GiB,
which is GZIP and PKZIP's max payload size.
*/
COMPRESS_OUTPUT_ALLOCATE_MAX :: int(#config(COMPRESS_OUTPUT_ALLOCATE_MAX, 1 << 32));
} else {
/*
For 32-bit platforms, we set the default max buffer size to 512 MiB.
*/
COMPRESS_OUTPUT_ALLOCATE_MAX :: int(#config(COMPRESS_OUTPUT_ALLOCATE_MAX, 1 << 29));
}
// when #config(TRACY_ENABLE, false) { import tracy "shared:odin-tracy" }
Error :: union {
General_Error,
@@ -36,6 +68,13 @@ General_Error :: enum {
Checksum_Failed,
Incompatible_Options,
Unimplemented,
/*
Memory errors
*/
Allocation_Failed,
Resize_Failed,
}
GZIP_Error :: enum {
@@ -46,6 +85,20 @@ GZIP_Error :: enum {
Comment_Too_Long,
Payload_Length_Invalid,
Payload_CRC_Invalid,
/*
GZIP's payload can be a maximum of max(u32le), or 4 GiB.
If you tell it you expect it to contain more, that's obviously an error.
*/
Payload_Size_Exceeds_Max_Payload,
/*
For buffered instead of streamed output, the payload size can't exceed
the max set by the `COMPRESS_OUTPUT_ALLOCATE_MAX` switch in compress/common.odin.
You can tweak this setting using `-define:COMPRESS_OUTPUT_ALLOCATE_MAX=size_in_bytes`
*/
Output_Exceeds_COMPRESS_OUTPUT_ALLOCATE_MAX,
}
ZIP_Error :: enum {
@@ -72,148 +125,354 @@ Deflate_Error :: enum {
BType_3,
}
// General context for ZLIB, LZW, etc.
Context :: struct {
code_buffer: u32,
num_bits: i8,
/*
num_bits will be set to -100 if the buffer is malformed
*/
eof: b8,
input: io.Stream,
output: io.Stream,
bytes_written: i64,
/*
Used to update hash as we write instead of all at once.
*/
rolling_hash: u32,
// General I/O context for ZLIB, LZW, etc.
Context_Memory_Input :: struct #packed {
input_data: []u8,
output: ^bytes.Buffer,
bytes_written: i64,
// Sliding window buffer. Size must be a power of two.
window_size: i64,
window_mask: i64,
last: ^[dynamic]byte,
code_buffer: u64,
num_bits: u64,
/*
If we know the raw data size, we can optimize the reads.
If we know the data size, we can optimize the reads and writes.
*/
uncompressed_size: i64,
input_data: []u8,
size_packed: i64,
size_unpacked: i64,
}
#assert(size_of(Context_Memory_Input) == 64);
Context_Stream_Input :: struct #packed {
input_data: []u8,
input: io.Stream,
output: ^bytes.Buffer,
bytes_written: i64,
code_buffer: u64,
num_bits: u64,
/*
If we know the data size, we can optimize the reads and writes.
*/
size_packed: i64,
size_unpacked: i64,
/*
Flags:
`input_fully_in_memory`
true = This tells us we read input from `input_data` exclusively. [] = EOF.
false = Try to refill `input_data` from the `input` stream.
*/
input_fully_in_memory: b8,
padding: [1]u8,
}
// Stream helpers
/*
TODO: These need to be optimized.
Streams should really only check if a certain method is available once, perhaps even during setup.
TODO: The stream versions should really only check if a certain method is available once, perhaps even during setup.
Bit and byte readers may be merged so that reading bytes will grab them from the bit buffer first.
This simplifies end-of-stream handling where bits may be left in the bit buffer.
*/
read_data :: #force_inline proc(c: ^Context, $T: typeid) -> (res: T, err: io.Error) {
when #config(TRACY_ENABLE, false) { tracy.ZoneN("Read Data"); }
b := make([]u8, size_of(T), context.temp_allocator);
r, e1 := io.to_reader(c.input);
_, e2 := io.read(r, b);
if !e1 || e2 != .None {
return T{}, e2;
// TODO: Make these return compress.Error errors.
input_size_from_memory :: proc(z: ^Context_Memory_Input) -> (res: i64, err: Error) {
return i64(len(z.input_data)), nil;
}
input_size_from_stream :: proc(z: ^Context_Stream_Input) -> (res: i64, err: Error) {
return io.size(z.input), nil;
}
input_size :: proc{input_size_from_memory, input_size_from_stream};
@(optimization_mode="speed")
read_slice_from_memory :: #force_inline proc(z: ^Context_Memory_Input, size: int) -> (res: []u8, err: io.Error) {
#no_bounds_check {
if len(z.input_data) >= size {
res = z.input_data[:size];
z.input_data = z.input_data[size:];
return res, .None;
}
}
res = (^T)(raw_data(b))^;
return res, .None;
if len(z.input_data) == 0 {
return []u8{}, .EOF;
} else {
return []u8{}, .Short_Buffer;
}
}
read_u8 :: #force_inline proc(z: ^Context) -> (res: u8, err: io.Error) {
when #config(TRACY_ENABLE, false) { tracy.ZoneN("Read u8"); }
return read_data(z, u8);
@(optimization_mode="speed")
read_slice_from_stream :: #force_inline proc(z: ^Context_Stream_Input, size: int) -> (res: []u8, err: io.Error) {
b := make([]u8, size, context.temp_allocator);
_, e := z.input->impl_read(b[:]);
if e == .None {
return b, .None;
}
return []u8{}, e;
}
peek_data :: #force_inline proc(c: ^Context, $T: typeid) -> (res: T, err: io.Error) {
when #config(TRACY_ENABLE, false) { tracy.ZoneN("Peek Data"); }
read_slice :: proc{read_slice_from_memory, read_slice_from_stream};
@(optimization_mode="speed")
read_data :: #force_inline proc(z: ^$C, $T: typeid) -> (res: T, err: io.Error) {
b, e := read_slice(z, size_of(T));
if e == .None {
return (^T)(&b[0])^, .None;
}
return T{}, e;
}
@(optimization_mode="speed")
read_u8_from_memory :: #force_inline proc(z: ^Context_Memory_Input) -> (res: u8, err: io.Error) {
#no_bounds_check {
if len(z.input_data) >= 1 {
res = z.input_data[0];
z.input_data = z.input_data[1:];
return res, .None;
}
}
return 0, .EOF;
}
@(optimization_mode="speed")
read_u8_from_stream :: #force_inline proc(z: ^Context_Stream_Input) -> (res: u8, err: io.Error) {
b, e := read_slice_from_stream(z, 1);
if e == .None {
return b[0], .None;
}
return 0, e;
}
read_u8 :: proc{read_u8_from_memory, read_u8_from_stream};
/*
You would typically only use this at the end of Inflate, to drain bits from the code buffer
preferentially.
*/
@(optimization_mode="speed")
read_u8_prefer_code_buffer_lsb :: #force_inline proc(z: ^$C) -> (res: u8, err: io.Error) {
if z.num_bits >= 8 {
res = u8(read_bits_no_refill_lsb(z, 8));
} else {
size, _ := input_size(z);
if size > 0 {
res, err = read_u8(z);
} else {
err = .EOF;
}
}
return;
}
@(optimization_mode="speed")
peek_data_from_memory :: #force_inline proc(z: ^Context_Memory_Input, $T: typeid) -> (res: T, err: io.Error) {
size :: size_of(T);
#no_bounds_check {
if len(z.input_data) >= size {
buf := z.input_data[:size];
return (^T)(&buf[0])^, .None;
}
}
if len(z.input_data) == 0 {
return T{}, .EOF;
} else {
return T{}, .Short_Buffer;
}
}
@(optimization_mode="speed")
peek_data_from_stream :: #force_inline proc(z: ^Context_Stream_Input, $T: typeid) -> (res: T, err: io.Error) {
size :: size_of(T);
// Get current position to read from.
curr, e1 := c.input->impl_seek(0, .Current);
curr, e1 := z.input->impl_seek(0, .Current);
if e1 != .None {
return T{}, e1;
}
r, e2 := io.to_reader_at(c.input);
r, e2 := io.to_reader_at(z.input);
if !e2 {
return T{}, .Empty;
}
b := make([]u8, size_of(T), context.temp_allocator);
_, e3 := io.read_at(r, b, curr);
when size <= 128 {
b: [size]u8;
} else {
b := make([]u8, size, context.temp_allocator);
}
_, e3 := io.read_at(r, b[:], curr);
if e3 != .None {
return T{}, .Empty;
}
res = (^T)(raw_data(b))^;
res = (^T)(&b[0])^;
return res, .None;
}
peek_data :: proc{peek_data_from_memory, peek_data_from_stream};
// Sliding window read back
peek_back_byte :: proc(c: ^Context, offset: i64) -> (res: u8, err: io.Error) {
@(optimization_mode="speed")
peek_back_byte :: #force_inline proc(z: ^$C, offset: i64) -> (res: u8, err: io.Error) {
// Look back into the sliding window.
return c.last[offset % c.window_size], .None;
return z.output.buf[z.bytes_written - offset], .None;
}
// Generalized bit reader LSB
refill_lsb :: proc(z: ^Context, width := i8(24)) {
when #config(TRACY_ENABLE, false) { tracy.ZoneN("Refill LSB"); }
@(optimization_mode="speed")
refill_lsb_from_memory :: #force_inline proc(z: ^Context_Memory_Input, width := i8(48)) {
refill := u64(width);
b := u64(0);
if z.num_bits > refill {
return;
}
for {
if z.num_bits > width {
if len(z.input_data) != 0 {
b = u64(z.input_data[0]);
z.input_data = z.input_data[1:];
} else {
b = 0;
}
z.code_buffer |= b << u8(z.num_bits);
z.num_bits += 8;
if z.num_bits > refill {
break;
}
if z.code_buffer == 0 && z.num_bits == -1 {
}
}
// Generalized bit reader LSB
@(optimization_mode="speed")
refill_lsb_from_stream :: proc(z: ^Context_Stream_Input, width := i8(24)) {
refill := u64(width);
for {
if z.num_bits > refill {
break;
}
if z.code_buffer == 0 && z.num_bits > 63 {
z.num_bits = 0;
}
if z.code_buffer >= 1 << uint(z.num_bits) {
// Code buffer is malformed.
z.num_bits = -100;
z.num_bits = max(u64);
return;
}
c, err := read_u8(z);
b, err := read_u8(z);
if err != .None {
// This is fine at the end of the file.
z.num_bits = -42;
z.eof = true;
return;
}
z.code_buffer |= (u32(c) << u8(z.num_bits));
z.code_buffer |= (u64(b) << u8(z.num_bits));
z.num_bits += 8;
}
}
consume_bits_lsb :: #force_inline proc(z: ^Context, width: u8) {
refill_lsb :: proc{refill_lsb_from_memory, refill_lsb_from_stream};
@(optimization_mode="speed")
consume_bits_lsb_from_memory :: #force_inline proc(z: ^Context_Memory_Input, width: u8) {
z.code_buffer >>= width;
z.num_bits -= i8(width);
z.num_bits -= u64(width);
}
peek_bits_lsb :: #force_inline proc(z: ^Context, width: u8) -> u32 {
if z.num_bits < i8(width) {
@(optimization_mode="speed")
consume_bits_lsb_from_stream :: #force_inline proc(z: ^Context_Stream_Input, width: u8) {
z.code_buffer >>= width;
z.num_bits -= u64(width);
}
consume_bits_lsb :: proc{consume_bits_lsb_from_memory, consume_bits_lsb_from_stream};
@(optimization_mode="speed")
peek_bits_lsb_from_memory :: #force_inline proc(z: ^Context_Memory_Input, width: u8) -> u32 {
if z.num_bits < u64(width) {
refill_lsb(z);
}
// assert(z.num_bits >= i8(width));
return z.code_buffer & ~(~u32(0) << width);
return u32(z.code_buffer & ~(~u64(0) << width));
}
peek_bits_no_refill_lsb :: #force_inline proc(z: ^Context, width: u8) -> u32 {
assert(z.num_bits >= i8(width));
return z.code_buffer & ~(~u32(0) << width);
@(optimization_mode="speed")
peek_bits_lsb_from_stream :: #force_inline proc(z: ^Context_Stream_Input, width: u8) -> u32 {
if z.num_bits < u64(width) {
refill_lsb(z);
}
return u32(z.code_buffer & ~(~u64(0) << width));
}
read_bits_lsb :: #force_inline proc(z: ^Context, width: u8) -> u32 {
peek_bits_lsb :: proc{peek_bits_lsb_from_memory, peek_bits_lsb_from_stream};
@(optimization_mode="speed")
peek_bits_no_refill_lsb_from_memory :: #force_inline proc(z: ^Context_Memory_Input, width: u8) -> u32 {
assert(z.num_bits >= u64(width));
return u32(z.code_buffer & ~(~u64(0) << width));
}
@(optimization_mode="speed")
peek_bits_no_refill_lsb_from_stream :: #force_inline proc(z: ^Context_Stream_Input, width: u8) -> u32 {
assert(z.num_bits >= u64(width));
return u32(z.code_buffer & ~(~u64(0) << width));
}
peek_bits_no_refill_lsb :: proc{peek_bits_no_refill_lsb_from_memory, peek_bits_no_refill_lsb_from_stream};
@(optimization_mode="speed")
read_bits_lsb_from_memory :: #force_inline proc(z: ^Context_Memory_Input, width: u8) -> u32 {
k := #force_inline peek_bits_lsb(z, width);
#force_inline consume_bits_lsb(z, width);
return k;
}
@(optimization_mode="speed")
read_bits_lsb_from_stream :: #force_inline proc(z: ^Context_Stream_Input, width: u8) -> u32 {
k := peek_bits_lsb(z, width);
consume_bits_lsb(z, width);
return k;
}
read_bits_no_refill_lsb :: #force_inline proc(z: ^Context, width: u8) -> u32 {
read_bits_lsb :: proc{read_bits_lsb_from_memory, read_bits_lsb_from_stream};
@(optimization_mode="speed")
read_bits_no_refill_lsb_from_memory :: #force_inline proc(z: ^Context_Memory_Input, width: u8) -> u32 {
k := #force_inline peek_bits_no_refill_lsb(z, width);
#force_inline consume_bits_lsb(z, width);
return k;
}
@(optimization_mode="speed")
read_bits_no_refill_lsb_from_stream :: #force_inline proc(z: ^Context_Stream_Input, width: u8) -> u32 {
k := peek_bits_no_refill_lsb(z, width);
consume_bits_lsb(z, width);
return k;
}
discard_to_next_byte_lsb :: proc(z: ^Context) {
read_bits_no_refill_lsb :: proc{read_bits_no_refill_lsb_from_memory, read_bits_no_refill_lsb_from_stream};
@(optimization_mode="speed")
discard_to_next_byte_lsb_from_memory :: proc(z: ^Context_Memory_Input) {
discard := u8(z.num_bits & 7);
#force_inline consume_bits_lsb(z, discard);
}
@(optimization_mode="speed")
discard_to_next_byte_lsb_from_stream :: proc(z: ^Context_Stream_Input) {
discard := u8(z.num_bits & 7);
consume_bits_lsb(z, discard);
}
discard_to_next_byte_lsb :: proc{discard_to_next_byte_lsb_from_memory, discard_to_next_byte_lsb_from_stream};

View File

@@ -12,9 +12,10 @@ package gzip
A small GZIP implementation as an example.
*/
import "core:compress/gzip"
import "core:bytes"
import "core:os"
import "core:compress"
import "core:fmt"
// Small GZIP file with fextra, fname and fcomment present.
@private
@@ -31,7 +32,7 @@ TEST: []u8 = {
main :: proc() {
// Set up output buffer.
buf: bytes.Buffer;
buf := bytes.Buffer{};
stdout :: proc(s: string) {
os.write_string(os.stdout, s);
@@ -44,26 +45,32 @@ main :: proc() {
if len(args) < 2 {
stderr("No input file specified.\n");
err := gzip.load(TEST, &buf);
if err != nil {
err := load(slice=TEST, buf=&buf, known_gzip_size=len(TEST));
if err == nil {
stdout("Displaying test vector: ");
stdout(bytes.buffer_to_string(&buf));
stdout("\n");
} else {
fmt.printf("gzip.load returned %v\n", err);
}
bytes.buffer_destroy(&buf);
os.exit(0);
}
// The rest are all files.
args = args[1:];
err: gzip.Error;
err: Error;
for file in args {
if file == "-" {
// Read from stdin
s := os.stream_from_handle(os.stdin);
err = gzip.load(s, &buf);
ctx := &compress.Context_Stream_Input{
input = s,
};
err = load(ctx, &buf);
} else {
err = gzip.load(file, &buf);
err = load(file, &buf);
}
if err != nil {
if err != E_General.File_Not_Found {

View File

@@ -21,11 +21,6 @@ import "core:io"
import "core:bytes"
import "core:hash"
/*
*/
Magic :: enum u16le {
GZIP = 0x8b << 8 | 0x1f,
}
@@ -104,40 +99,54 @@ E_GZIP :: compress.GZIP_Error;
E_ZLIB :: compress.ZLIB_Error;
E_Deflate :: compress.Deflate_Error;
load_from_slice :: proc(slice: []u8, buf: ^bytes.Buffer, allocator := context.allocator) -> (err: Error) {
GZIP_MAX_PAYLOAD_SIZE :: int(max(u32le));
r := bytes.Reader{};
bytes.reader_init(&r, slice);
stream := bytes.reader_to_stream(&r);
load :: proc{load_from_slice, load_from_file, load_from_context};
err = load_from_stream(stream, buf, allocator);
return err;
}
load_from_file :: proc(filename: string, buf: ^bytes.Buffer, allocator := context.allocator) -> (err: Error) {
load_from_file :: proc(filename: string, buf: ^bytes.Buffer, expected_output_size := -1, allocator := context.allocator) -> (err: Error) {
data, ok := os.read_entire_file(filename, allocator);
defer delete(data);
err = E_General.File_Not_Found;
if ok {
err = load_from_slice(data, buf, allocator);
err = load_from_slice(data, buf, len(data), expected_output_size, allocator);
}
return;
}
load_from_stream :: proc(stream: io.Stream, buf: ^bytes.Buffer, allocator := context.allocator) -> (err: Error) {
ctx := compress.Context{
input = stream,
};
load_from_slice :: proc(slice: []u8, buf: ^bytes.Buffer, known_gzip_size := -1, expected_output_size := -1, allocator := context.allocator) -> (err: Error) {
buf := buf;
ws := bytes.buffer_to_stream(buf);
ctx.output = ws;
header, e := compress.read_data(&ctx, Header);
z := &compress.Context_Memory_Input{
input_data = slice,
output = buf,
};
return load_from_context(z, buf, known_gzip_size, expected_output_size, allocator);
}
load_from_context :: proc(z: ^$C, buf: ^bytes.Buffer, known_gzip_size := -1, expected_output_size := -1, allocator := context.allocator) -> (err: Error) {
buf := buf;
expected_output_size := expected_output_size;
input_data_consumed := 0;
z.output = buf;
if expected_output_size > GZIP_MAX_PAYLOAD_SIZE {
return E_GZIP.Payload_Size_Exceeds_Max_Payload;
}
if expected_output_size > compress.COMPRESS_OUTPUT_ALLOCATE_MAX {
return E_GZIP.Output_Exceeds_COMPRESS_OUTPUT_ALLOCATE_MAX;
}
b: []u8;
header, e := compress.read_data(z, Header);
if e != .None {
return E_General.File_Too_Short;
}
input_data_consumed += size_of(Header);
if header.magic != .GZIP {
return E_GZIP.Invalid_GZIP_Signature;
@@ -162,7 +171,9 @@ load_from_stream :: proc(stream: io.Stream, buf: ^bytes.Buffer, allocator := con
// printf("os: %v\n", OS_Name[header.os]);
if .extra in header.flags {
xlen, e_extra := compress.read_data(&ctx, u16le);
xlen, e_extra := compress.read_data(z, u16le);
input_data_consumed += 2;
if e_extra != .None {
return E_General.Stream_Too_Short;
}
@@ -178,19 +189,21 @@ load_from_stream :: proc(stream: io.Stream, buf: ^bytes.Buffer, allocator := con
for xlen >= 4 {
// println("Parsing Extra field(s).");
field_id, field_error = compress.read_data(&ctx, [2]u8);
field_id, field_error = compress.read_data(z, [2]u8);
if field_error != .None {
// printf("Parsing Extra returned: %v\n", field_error);
return E_General.Stream_Too_Short;
}
xlen -= 2;
input_data_consumed += 2;
field_length, field_error = compress.read_data(&ctx, u16le);
field_length, field_error = compress.read_data(z, u16le);
if field_error != .None {
// printf("Parsing Extra returned: %v\n", field_error);
return E_General.Stream_Too_Short;
}
xlen -= 2;
input_data_consumed += 2;
if xlen <= 0 {
// We're not going to try and recover by scanning for a ZLIB header.
@@ -200,13 +213,13 @@ load_from_stream :: proc(stream: io.Stream, buf: ^bytes.Buffer, allocator := con
// printf(" Field \"%v\" of length %v found: ", string(field_id[:]), field_length);
if field_length > 0 {
field_data := make([]u8, field_length, context.temp_allocator);
_, field_error = ctx.input->impl_read(field_data);
b, field_error = compress.read_slice(z, int(field_length));
if field_error != .None {
// printf("Parsing Extra returned: %v\n", field_error);
return E_General.Stream_Too_Short;
}
xlen -= field_length;
input_data_consumed += int(field_length);
// printf("%v\n", string(field_data));
}
@@ -220,16 +233,16 @@ load_from_stream :: proc(stream: io.Stream, buf: ^bytes.Buffer, allocator := con
if .name in header.flags {
// Should be enough.
name: [1024]u8;
b: [1]u8;
i := 0;
name_error: io.Error;
for i < len(name) {
_, name_error = ctx.input->impl_read(b[:]);
b, name_error = compress.read_slice(z, 1);
if name_error != .None {
return E_General.Stream_Too_Short;
}
if b == 0 {
input_data_consumed += 1;
if b[0] == 0 {
break;
}
name[i] = b[0];
@@ -244,16 +257,16 @@ load_from_stream :: proc(stream: io.Stream, buf: ^bytes.Buffer, allocator := con
if .comment in header.flags {
// Should be enough.
comment: [1024]u8;
b: [1]u8;
i := 0;
comment_error: io.Error;
for i < len(comment) {
_, comment_error = ctx.input->impl_read(b[:]);
b, comment_error = compress.read_slice(z, 1);
if comment_error != .None {
return E_General.Stream_Too_Short;
}
if b == 0 {
input_data_consumed += 1;
if b[0] == 0 {
break;
}
comment[i] = b[0];
@@ -266,9 +279,9 @@ load_from_stream :: proc(stream: io.Stream, buf: ^bytes.Buffer, allocator := con
}
if .header_crc in header.flags {
crc16: [2]u8;
crc_error: io.Error;
_, crc_error = ctx.input->impl_read(crc16[:]);
_, crc_error = compress.read_slice(z, 2);
input_data_consumed += 2;
if crc_error != .None {
return E_General.Stream_Too_Short;
}
@@ -281,42 +294,74 @@ load_from_stream :: proc(stream: io.Stream, buf: ^bytes.Buffer, allocator := con
/*
We should have arrived at the ZLIB payload.
*/
payload_u32le: u32le;
zlib_error := zlib.inflate_raw(&ctx);
// fmt.printf("known_gzip_size: %v | expected_output_size: %v\n", known_gzip_size, expected_output_size);
// fmt.printf("ZLIB returned: %v\n", zlib_error);
if expected_output_size > -1 {
/*
We already checked that it's not larger than the output buffer max,
or GZIP length field's max.
We'll just pass it on to `zlib.inflate_raw`;
*/
} else {
/*
If we know the size of the GZIP file *and* it is fully in memory,
then we can peek at the unpacked size at the end.
We'll still want to ensure there's capacity left in the output buffer when we write, of course.
*/
if known_gzip_size > -1 {
offset := i64(known_gzip_size - input_data_consumed - 4);
size, _ := compress.input_size(z);
if size >= offset + 4 {
length_bytes := z.input_data[offset:][:4];
payload_u32le = (^u32le)(&length_bytes[0])^;
expected_output_size = int(payload_u32le);
}
} else {
/*
TODO(Jeroen): When reading a GZIP from a stream, check if impl_seek is present.
If so, we can seek to the end, grab the size from the footer, and seek back to payload start.
*/
}
}
// fmt.printf("GZIP: Expected Payload Size: %v\n", expected_output_size);
zlib_error := zlib.inflate_raw(z=z, expected_output_size=expected_output_size);
if zlib_error != nil {
return zlib_error;
}
/*
Read CRC32 using the ctx bit reader because zlib may leave bytes in there.
*/
compress.discard_to_next_byte_lsb(&ctx);
compress.discard_to_next_byte_lsb(z);
footer_error: io.Error;
payload_crc_b: [4]u8;
payload_len_b: [4]u8;
for _, i in payload_crc_b {
payload_crc_b[i] = u8(compress.read_bits_lsb(&ctx, 8));
payload_crc_b[i], footer_error = compress.read_u8_prefer_code_buffer_lsb(z);
}
payload_crc := transmute(u32le)payload_crc_b;
for _, i in payload_len_b {
payload_len_b[i] = u8(compress.read_bits_lsb(&ctx, 8));
}
payload_len := int(transmute(u32le)payload_len_b);
payload := bytes.buffer_to_bytes(buf);
crc32 := u32le(hash.crc32(payload));
crc32 := u32le(hash.crc32(payload));
if crc32 != payload_crc {
return E_GZIP.Payload_CRC_Invalid;
}
if len(payload) != payload_len {
payload_len_b: [4]u8;
for _, i in payload_len_b {
payload_len_b[i], footer_error = compress.read_u8_prefer_code_buffer_lsb(z);
}
payload_len := transmute(u32le)payload_len_b;
if len(payload) != int(payload_len) {
return E_GZIP.Payload_Length_Invalid;
}
return nil;
}
load :: proc{load_from_file, load_from_slice, load_from_stream};

View File

@@ -11,7 +11,6 @@ package zlib
An example of how to use `zlib.inflate`.
*/
import "core:compress/zlib"
import "core:bytes"
import "core:fmt"
@@ -36,11 +35,12 @@ main :: proc() {
171, 15, 18, 59, 138, 112, 63, 23, 205, 110, 254, 136, 109, 78, 231,
63, 234, 138, 133, 204,
};
OUTPUT_SIZE :: 438;
buf: bytes.Buffer;
// We can pass ", true" to inflate a raw DEFLATE stream instead of a ZLIB wrapped one.
err := zlib.inflate(ODIN_DEMO, &buf);
err := inflate(input=ODIN_DEMO, buf=&buf, expected_output_size=OUTPUT_SIZE);
defer bytes.buffer_destroy(&buf);
if err != nil {
@@ -48,5 +48,5 @@ main :: proc() {
}
s := bytes.buffer_to_string(&buf);
fmt.printf("Input: %v bytes, output (%v bytes):\n%v\n", len(ODIN_DEMO), len(s), s);
assert(len(s) == 438);
assert(len(s) == OUTPUT_SIZE);
}

View File

@@ -13,17 +13,22 @@ import "core:compress"
import "core:mem"
import "core:io"
import "core:bytes"
import "core:hash"
// when #config(TRACY_ENABLE, false) { import tracy "shared:odin-tracy" }
import "core:bytes"
/*
zlib.inflate decompresses a ZLIB stream passed in as a []u8 or io.Stream.
Returns: Error.
*/
Context :: compress.Context;
/*
Do we do Adler32 as we write bytes to output?
It used to be faster to do it inline, now it's faster to do it at the end of `inflate`.
We'll see what's faster after more optimization, and might end up removing
`Context.rolling_hash` if not inlining it is still faster.
*/
Compression_Method :: enum u8 {
DEFLATE = 8,
@@ -114,7 +119,7 @@ Huffman_Table :: struct {
};
// Implementation starts here
@(optimization_mode="speed")
z_bit_reverse :: #force_inline proc(n: u16, bits: u8) -> (r: u16) {
assert(bits <= 16);
// NOTE: Can optimize with llvm.bitreverse.i64 or some bit twiddling
@@ -129,67 +134,105 @@ z_bit_reverse :: #force_inline proc(n: u16, bits: u8) -> (r: u16) {
return;
}
write_byte :: #force_inline proc(z: ^Context, c: u8) -> (err: io.Error) #no_bounds_check {
when #config(TRACY_ENABLE, false) { tracy.ZoneN("Write Byte"); }
c := c;
buf := transmute([]u8)mem.Raw_Slice{data=&c, len=1};
z.rolling_hash = hash.adler32(buf, z.rolling_hash);
_, e := z.output->impl_write(buf);
if e != .None {
return e;
@(optimization_mode="speed")
grow_buffer :: proc(buf: ^[dynamic]u8) -> (err: compress.Error) {
/*
That we get here at all means that we didn't pass an expected output size,
or that it was too little.
*/
/*
Double until we reach the maximum allowed.
*/
new_size := min(len(buf) << 1, compress.COMPRESS_OUTPUT_ALLOCATE_MAX);
resize(buf, new_size);
if len(buf) != new_size {
/*
Resize failed.
*/
return .Resize_Failed;
}
z.last[z.bytes_written & z.window_mask] = c;
return nil;
}
/*
TODO: Make these return compress.Error.
*/
@(optimization_mode="speed")
write_byte :: #force_inline proc(z: ^$C, c: u8) -> (err: io.Error) #no_bounds_check {
/*
Resize if needed.
*/
if int(z.bytes_written) + 1 >= len(z.output.buf) {
e := grow_buffer(&z.output.buf);
if e != nil {
return .Short_Write;
}
}
#no_bounds_check {
z.output.buf[z.bytes_written] = c;
}
z.bytes_written += 1;
return .None;
}
repl_byte :: proc(z: ^Context, count: u16, c: u8) -> (err: io.Error) {
when #config(TRACY_ENABLE, false) { tracy.ZoneN("Repl Byte"); }
@(optimization_mode="speed")
repl_byte :: proc(z: ^$C, count: u16, c: u8) -> (err: io.Error) #no_bounds_check {
/*
TODO(Jeroen): Once we have a magic ring buffer, we can just peek/write into it
without having to worry about wrapping, so no need for a temp allocation to give to
the output stream, just give it _that_ slice.
*/
buf := make([]u8, count, context.temp_allocator);
#no_bounds_check for i in 0..<count {
buf[i] = c;
z.last[z.bytes_written & z.window_mask] = c;
z.bytes_written += 1;
}
z.rolling_hash = hash.adler32(buf, z.rolling_hash);
_, e := z.output->impl_write(buf);
if e != .None {
return e;
/*
Resize if needed.
*/
if int(z.bytes_written) + int(count) >= len(z.output.buf) {
e := grow_buffer(&z.output.buf);
if e != nil {
return .Short_Write;
}
}
#no_bounds_check {
for _ in 0..<count {
z.output.buf[z.bytes_written] = c;
z.bytes_written += 1;
}
}
return .None;
}
repl_bytes :: proc(z: ^Context, count: u16, distance: u16) -> (err: io.Error) {
when #config(TRACY_ENABLE, false) { tracy.ZoneN("Repl Bytes"); }
@(optimization_mode="speed")
repl_bytes :: proc(z: ^$C, count: u16, distance: u16) -> (err: io.Error) {
/*
TODO(Jeroen): Once we have a magic ring buffer, we can just peek/write into it
without having to worry about wrapping, so no need for a temp allocation to give to
the output stream, just give it _that_ slice.
*/
buf := make([]u8, count, context.temp_allocator);
offset := z.bytes_written - i64(distance);
#no_bounds_check for i in 0..<count {
c := z.last[offset & z.window_mask];
offset := i64(distance);
z.last[z.bytes_written & z.window_mask] = c;
buf[i] = c;
z.bytes_written += 1; offset += 1;
if int(z.bytes_written) + int(count) >= len(z.output.buf) {
e := grow_buffer(&z.output.buf);
if e != nil {
return .Short_Write;
}
}
z.rolling_hash = hash.adler32(buf, z.rolling_hash);
_, e := z.output->impl_write(buf);
if e != .None {
return e;
#no_bounds_check {
for _ in 0..<count {
c := z.output.buf[z.bytes_written - offset];
z.output.buf[z.bytes_written] = c;
z.bytes_written += 1;
}
}
return .None;
}
@@ -198,8 +241,8 @@ allocate_huffman_table :: proc(allocator := context.allocator) -> (z: ^Huffman_T
return new(Huffman_Table, allocator), nil;
}
@(optimization_mode="speed")
build_huffman :: proc(z: ^Huffman_Table, code_lengths: []u8) -> (err: Error) {
when #config(TRACY_ENABLE, false) { tracy.ZoneN("Build Huffman Table"); }
sizes: [HUFFMAN_MAX_BITS+1]int;
next_code: [HUFFMAN_MAX_BITS]int;
@@ -257,9 +300,9 @@ build_huffman :: proc(z: ^Huffman_Table, code_lengths: []u8) -> (err: Error) {
return nil;
}
decode_huffman_slowpath :: proc(z: ^Context, t: ^Huffman_Table) -> (r: u16, err: Error) #no_bounds_check {
when #config(TRACY_ENABLE, false) { tracy.ZoneN("Decode Huffman Slow"); }
code := u16(compress.peek_bits_lsb(z, 16));
@(optimization_mode="speed")
decode_huffman_slowpath :: proc(z: ^$C, t: ^Huffman_Table) -> (r: u16, err: Error) #no_bounds_check {
code := u16(compress.peek_bits_lsb(z,16));
k := int(z_bit_reverse(code, 16));
s: u8;
@@ -288,14 +331,14 @@ decode_huffman_slowpath :: proc(z: ^Context, t: ^Huffman_Table) -> (r: u16, err:
return r, nil;
}
decode_huffman :: proc(z: ^Context, t: ^Huffman_Table) -> (r: u16, err: Error) #no_bounds_check {
when #config(TRACY_ENABLE, false) { tracy.ZoneN("Decode Huffman"); }
@(optimization_mode="speed")
decode_huffman :: proc(z: ^$C, t: ^Huffman_Table) -> (r: u16, err: Error) #no_bounds_check {
if z.num_bits < 16 {
if z.num_bits == -100 {
if z.num_bits > 63 {
return 0, E_ZLIB.Code_Buffer_Malformed;
}
compress.refill_lsb(z);
if z.eof {
if z.num_bits > 63 {
return 0, E_General.Stream_Too_Short;
}
}
@@ -308,8 +351,8 @@ decode_huffman :: proc(z: ^Context, t: ^Huffman_Table) -> (r: u16, err: Error) #
return decode_huffman_slowpath(z, t);
}
parse_huffman_block :: proc(z: ^Context, z_repeat, z_offset: ^Huffman_Table) -> (err: Error) #no_bounds_check {
when #config(TRACY_ENABLE, false) { tracy.ZoneN("Parse Huffman Block"); }
@(optimization_mode="speed")
parse_huffman_block :: proc(z: ^$C, z_repeat, z_offset: ^Huffman_Table) -> (err: Error) #no_bounds_check {
#no_bounds_check for {
value, e := decode_huffman(z, z_repeat);
if e != nil {
@@ -347,7 +390,6 @@ parse_huffman_block :: proc(z: ^Context, z_repeat, z_offset: ^Huffman_Table) ->
return E_Deflate.Bad_Distance;
}
offset := i64(z.bytes_written - i64(distance));
/*
These might be sped up with a repl_byte call that copies
from the already written output more directly, and that
@@ -360,7 +402,7 @@ parse_huffman_block :: proc(z: ^Context, z_repeat, z_offset: ^Huffman_Table) ->
Replicate the last outputted byte, length times.
*/
if length > 0 {
c := z.last[offset & z.window_mask];
c := z.output.buf[z.bytes_written - i64(distance)];
e := repl_byte(z, length, c);
if e != .None {
return E_General.Output_Too_Short;
@@ -378,22 +420,18 @@ parse_huffman_block :: proc(z: ^Context, z_repeat, z_offset: ^Huffman_Table) ->
}
}
inflate_from_stream :: proc(using ctx: ^Context, raw := false, allocator := context.allocator) -> (err: Error) #no_bounds_check {
@(optimization_mode="speed")
inflate_from_context :: proc(using ctx: ^compress.Context_Memory_Input, raw := false, expected_output_size := -1, allocator := context.allocator) -> (err: Error) #no_bounds_check {
/*
ctx.input must be an io.Stream backed by an implementation that supports:
- read
- size
ctx.output must be an io.Stream backed by an implementation that supports:
- write
ctx.output must be a bytes.Buffer for now. We'll add a separate implementation that writes to a stream.
raw determines whether the ZLIB header is processed, or we're inflating a raw
DEFLATE stream.
*/
if !raw {
data_size := io.size(ctx.input);
if data_size < 6 {
size, size_err := compress.input_size(ctx);
if size < 6 || size_err != nil {
return E_General.Stream_Too_Short;
}
@@ -408,8 +446,6 @@ inflate_from_stream :: proc(using ctx: ^Context, raw := false, allocator := cont
if cinfo > 7 {
return E_ZLIB.Unsupported_Window_Size;
}
ctx.window_size = 1 << (cinfo + 8);
flg, _ := compress.read_u8(ctx);
fcheck := flg & 0x1f;
@@ -434,12 +470,10 @@ inflate_from_stream :: proc(using ctx: ^Context, raw := false, allocator := cont
at the end to compare checksums.
*/
// Seed the Adler32 rolling checksum.
ctx.rolling_hash = 1;
}
// Parse ZLIB stream without header.
err = inflate_raw(ctx);
err = inflate_raw(z=ctx, expected_output_size=expected_output_size);
if err != nil {
return err;
}
@@ -447,21 +481,47 @@ inflate_from_stream :: proc(using ctx: ^Context, raw := false, allocator := cont
if !raw {
compress.discard_to_next_byte_lsb(ctx);
adler32 := compress.read_bits_lsb(ctx, 8) << 24 | compress.read_bits_lsb(ctx, 8) << 16 | compress.read_bits_lsb(ctx, 8) << 8 | compress.read_bits_lsb(ctx, 8);
if ctx.rolling_hash != u32(adler32) {
adler_b: [4]u8;
for _, i in adler_b {
adler_b[i], _ = compress.read_u8_prefer_code_buffer_lsb(ctx);
}
adler := transmute(u32be)adler_b;
output_hash := hash.adler32(ctx.output.buf[:]);
if output_hash != u32(adler) {
return E_General.Checksum_Failed;
}
}
return nil;
}
// @(optimization_mode="speed")
inflate_from_stream_raw :: proc(z: ^Context, allocator := context.allocator) -> (err: Error) #no_bounds_check {
when #config(TRACY_ENABLE, false) { tracy.ZoneN("Inflate Raw"); }
final := u32(0);
type := u32(0);
// TODO: Check alignment of reserve/resize.
z.num_bits = 0;
@(optimization_mode="speed")
inflate_raw :: proc(z: ^$C, expected_output_size := -1, allocator := context.allocator) -> (err: Error) #no_bounds_check {
expected_output_size := expected_output_size;
/*
Always set up a minimum allocation size.
*/
expected_output_size = max(max(expected_output_size, compress.COMPRESS_OUTPUT_ALLOCATE_MIN), 512);
// fmt.printf("\nZLIB: Expected Payload Size: %v\n\n", expected_output_size);
if expected_output_size > 0 && expected_output_size <= compress.COMPRESS_OUTPUT_ALLOCATE_MAX {
/*
Try to pre-allocate the output buffer.
*/
reserve(&z.output.buf, expected_output_size);
resize (&z.output.buf, expected_output_size);
};
if len(z.output.buf) != expected_output_size {
return .Resize_Failed;
}
z.num_bits = 0;
z.code_buffer = 0;
z_repeat: ^Huffman_Table;
@@ -484,15 +544,8 @@ inflate_from_stream_raw :: proc(z: ^Context, allocator := context.allocator) ->
defer free(z_offset);
defer free(codelength_ht);
if z.window_size == 0 {
z.window_size = DEFLATE_MAX_DISTANCE;
}
z.window_mask = z.window_size - 1;
// Allocate rolling window buffer.
last_b := mem.make_dynamic_array_len_cap([dynamic]u8, z.window_size, z.window_size, allocator);
z.last = &last_b;
defer delete(last_b);
final := u32(0);
type := u32(0);
for {
final = compress.read_bits_lsb(z, 1);
@@ -502,7 +555,6 @@ inflate_from_stream_raw :: proc(z: ^Context, allocator := context.allocator) ->
switch type {
case 0:
when #config(TRACY_ENABLE, false) { tracy.ZoneN("Literal Block"); }
// Uncompressed block
// Discard bits until next byte boundary
@@ -531,7 +583,6 @@ inflate_from_stream_raw :: proc(z: ^Context, allocator := context.allocator) ->
case 3:
return E_Deflate.BType_3;
case:
when #config(TRACY_ENABLE, false) { tracy.ZoneN("Huffman Block"); }
// log.debugf("Err: %v | Final: %v | Type: %v\n", err, final, type);
if type == 1 {
// Use fixed code lengths.
@@ -633,29 +684,32 @@ inflate_from_stream_raw :: proc(z: ^Context, allocator := context.allocator) ->
break;
}
}
if int(z.bytes_written) != len(z.output.buf) {
resize(&z.output.buf, int(z.bytes_written));
}
return nil;
}
inflate_from_byte_array :: proc(input: []u8, buf: ^bytes.Buffer, raw := false) -> (err: Error) {
ctx := Context{};
inflate_from_byte_array :: proc(input: []u8, buf: ^bytes.Buffer, raw := false, expected_output_size := -1) -> (err: Error) {
ctx := compress.Context_Memory_Input{};
r := bytes.Reader{};
bytes.reader_init(&r, input);
rs := bytes.reader_to_stream(&r);
ctx.input = rs;
ctx.input_data = input;
ctx.output = buf;
buf := buf;
ws := bytes.buffer_to_stream(buf);
ctx.output = ws;
err = inflate_from_stream(&ctx, raw);
err = inflate_from_context(ctx=&ctx, raw=raw, expected_output_size=expected_output_size);
return err;
}
inflate_from_byte_array_raw :: proc(input: []u8, buf: ^bytes.Buffer, raw := false) -> (err: Error) {
return inflate_from_byte_array(input, buf, true);
inflate_from_byte_array_raw :: proc(input: []u8, buf: ^bytes.Buffer, raw := false, expected_output_size := -1) -> (err: Error) {
ctx := compress.Context_Memory_Input{};
ctx.input_data = input;
ctx.output = buf;
return inflate_raw(z=&ctx, expected_output_size=expected_output_size);
}
inflate :: proc{inflate_from_stream, inflate_from_byte_array};
inflate_raw :: proc{inflate_from_stream_raw, inflate_from_byte_array_raw};
inflate :: proc{inflate_from_context, inflate_from_byte_array};

View File

@@ -26,6 +26,7 @@ ring_prev :: proc(r: ^$R/Ring) -> ^R {
ring_move :: proc(r: ^$R/Ring, n: int) -> ^R {
r := r;
if r.next == nil {
return ring_init(r);
}
@@ -64,7 +65,7 @@ ring_len :: proc(r: ^$R/Ring) -> int {
n := 0;
if r != nil {
n = 1;
for p := ring_next(&p); p != r; p = p.next {
for p := ring_next(r); p != r; p = p.next {
n += 1;
}
}

141
core/fmt/doc.odin Normal file
View File

@@ -0,0 +1,141 @@
/*
package fmt implemented formatted I/O with procedures similar to C's printf and Python's format.
The format 'verbs' are derived from C's but simpler.
Printing
The verbs:
General:
%v the value in a default format
%#v an expanded format of %v with newlines and indentation
%T an Odin-syntax representation of the type of the value
%% a literal percent sign; consumes no value
{{ a literal open brace; consumes no value
}} a literal close brace; consumes no value
{:v} equivalent to %v (Python-like formatting syntax)
Boolean:
%t the word "true" or "false"
Integer:
%b base 2
%c the character represented by the corresponding Unicode code point
%r synonym for %c
%o base 8
%d base 10
%i base 10
%z base 12
%x base 16, with lower-case letters for a-f
%X base 16, with upper-case letters for A-F
%U Unicode format: U+1234; same as "U+%04X"
Floating-point, complex numbers, and quaternions:
%e scientific notation, e.g. -1.23456e+78
%E scientific notation, e.g. -1.23456E+78
%f decimal point but no exponent, e.g. 123.456
%F synonym for %f
%h hexadecimal (lower-case) representation with 0h prefix (0h01234abcd)
%H hexadecimal (upper-case) representation with 0H prefix (0h01234ABCD)
String and slice of bytes
%s the uninterpreted bytes of the string or slice
%q a double-quoted string safely escaped with Odin syntax
%x base 16, lower-case, two characters per byte
%X base 16, upper-case, two characters per byte
Slice and dynamic array:
%p address of the 0th element in base 16 notation (upper-case), with leading 0x
Pointer:
%p base 16 notation (upper-case), with leading 0x
The %b, %d, %o, %z, %x, %X verbs also work with pointers,
treating it as if it was an integer
Enums:
%s prints the name of the enum field
The %i, %d, %f verbs also work with enums,
treating it as if it was a number
For compound values, the elements are printed using these rules recursively; laid out like the following:
struct: {name0 = field0, name1 = field1, ...}
array [elem0, elem1, elem2, ...]
enumerated array [key0 = elem0, key1 = elem1, key2 = elem2, ...]
maps: map[key0 = value0, key1 = value1, ...]
bit sets {key0 = elem0, key1 = elem1, ...}
pointer to above: &{}, &[], &map[]
Width is specified by an optional decimal number immediately preceding the verb.
If not present, the width is whatever is necessary to represent the value.
Precision is specified after the (optional) width followed by a period followed by a decimal number.
If no period is present, a default precision is used.
A period with no following number specifies a precision of 0.
Examples:
%f default width, default precision
%8f width 8, default precision
%.3f default width, precision 2
%8.3f width 8, precision 3
%8.f width 8, precision 0
Width and precision are measured in units of Unicode code points (runes).
n.b. C's printf uses units of bytes
Other flags:
+ always print a sign for numeric values
- pad with spaces on the right rather the left (left-justify the field)
# alternate format:
add leading 0b for binary (%#b)
add leading 0o for octal (%#o)
add leading 0z for dozenal (%#z)
add leading 0x or 0X for hexadecimal (%#x or %#X)
remove leading 0x for %p (%#p)
' ' (space) leave a space for elided sign in numbers (% d)
0 pad with leading zeros rather than spaces
Flags are ignored by verbs that don't expect them
For each printf-like procedure, there is a print function that takes no
format, and is equivalent to doing %v for every value and inserts a separator
between each value (default is a single space).
Another procedure println which has the same functionality as print but appends a newline.
Explicit argument indices:
In printf-like procedures, the default behaviour is for each formatting verb to format successive
arguments passed in the call. However, the notation [n] immediately before the verb indicates that
the nth zero-index argument is to be formatted instead.
The same notation before an '*' for a width or precision selecting the argument index holding the value.
Python-like syntax with argument indices differs for the selecting the argument index: {N:v}
Examples:
fmt.printf("%[1]d %[0]d\n", 13, 37); // C-like syntax
fmt.printf("{1:d} {0:d}\n", 13, 37); // Python-like syntax
prints "37 13", whilst:
fmt.printf("%[2]*.[1]*[0]f\n", 17.0, 2, 6); // C-like syntax
fmt.printf("%{0:[2]*.[1]*f}\n", 17.0, 2, 6); // Python-like syntax
equivalent to:
fmt.printf("%6.2f\n", 17.0, 2, 6); // C-like syntax
fmt.printf("{:6.2f}\n", 17.0, 2, 6); // Python-like syntax
prints "17.00"
Format errors:
If an invalid argument is given for a verb, such as providing a string to %d, the generated string
will contain a description of the problem. For example:
Bad enum value:
%!(BAD ENUM VALUE)
Too many arguments:
%!(EXTRA <value>, <value>, ...)
Too few arguments:
%!(MISSING ARGUMENT)
Invalid width or precision
%!(BAD WIDTH)
%!(BAD PRECISION)
Missing verb:
%!(NO VERB)
Invalid or invalid use of argument index:
%!(BAD ARGUMENT NUMBER)
Missing close brace when using Python-like formatting syntax:
%!(MISSING CLOSE BRACE)
*/
package fmt

View File

@@ -1013,6 +1013,7 @@ fmt_pointer :: proc(fi: ^Info, p: rawptr, verb: rune) {
case 'b': _fmt_int(fi, u, 2, false, 8*size_of(rawptr), __DIGITS_UPPER);
case 'o': _fmt_int(fi, u, 8, false, 8*size_of(rawptr), __DIGITS_UPPER);
case 'i', 'd': _fmt_int(fi, u, 10, false, 8*size_of(rawptr), __DIGITS_UPPER);
case 'z': _fmt_int(fi, u, 12, false, 8*size_of(rawptr), __DIGITS_UPPER);
case 'x': _fmt_int(fi, u, 16, false, 8*size_of(rawptr), __DIGITS_UPPER);
case 'X': _fmt_int(fi, u, 16, false, 8*size_of(rawptr), __DIGITS_UPPER);
@@ -1082,7 +1083,7 @@ fmt_enum :: proc(fi: ^Info, v: any, verb: rune) {
case 's', 'v':
str, ok := enum_value_to_string(v);
if !ok {
str = "!%(BAD ENUM VALUE)";
str = "%!(BAD ENUM VALUE)";
}
io.write_string(fi.writer, str);
}

View File

@@ -1,86 +1,15 @@
package hash
crc32 :: proc(data: []byte, seed := u32(0)) -> u32 #no_bounds_check {
result := ~u32(seed);
for b in data {
result = result>>8 ~ _crc32_table[(result ~ u32(b)) & 0xff];
}
return ~result;
}
@(optimization_mode="speed")
crc64 :: proc(data: []byte, seed := u32(0)) -> u64 #no_bounds_check {
result := ~u64(seed);
for b in data {
#no_bounds_check for b in data {
result = result>>8 ~ _crc64_table[(result ~ u64(b)) & 0xff];
}
return ~result;
}
@private _crc32_table := [256]u32{
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba,
0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3,
0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91,
0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec,
0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5,
0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940,
0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116,
0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f,
0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d,
0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a,
0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818,
0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457,
0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c,
0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb,
0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9,
0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086,
0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4,
0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad,
0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683,
0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe,
0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7,
0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252,
0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60,
0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79,
0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f,
0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04,
0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a,
0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21,
0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e,
0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45,
0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db,
0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0,
0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6,
0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf,
0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d,
};
@private _crc64_table := [256]u64{
0x0000000000000000, 0x42f0e1eba9ea3693, 0x85e1c3d753d46d26, 0xc711223cfa3e5bb5,
0x493366450e42ecdf, 0x0bc387aea7a8da4c, 0xccd2a5925d9681f9, 0x8e224479f47cb76a,

401
core/hash/crc32.odin Normal file
View File

@@ -0,0 +1,401 @@
package hash
import "intrinsics"
@(optimization_mode="speed")
crc32 :: proc(data: []byte, seed := u32(0)) -> u32 #no_bounds_check {
crc := ~seed;
buffer := raw_data(data);
length := len(data);
for length != 0 && uintptr(buffer) & 7 != 0 {
crc = crc32_table[0][byte(crc) ~ buffer^] ~ (crc >> 8);
buffer = intrinsics.ptr_offset(buffer, 1);
length -= 1;
}
for length >= 8 {
buf := (^[8]byte)(buffer);
word := u32((^u32le)(buffer)^);
crc ~= word;
crc = crc32_table[7][crc & 0xff] ~
crc32_table[6][(crc >> 8) & 0xff] ~
crc32_table[5][(crc >> 16) & 0xff] ~
crc32_table[4][(crc >> 24) & 0xff] ~
crc32_table[3][buf[4]] ~
crc32_table[2][buf[5]] ~
crc32_table[1][buf[6]] ~
crc32_table[0][buf[7]];
buffer = intrinsics.ptr_offset(buffer, 8);
length -= 8;
}
for length != 0 {
crc = crc32_table[0][byte(crc) ~ buffer^] ~ (crc >> 8);
buffer = intrinsics.ptr_offset(buffer, 1);
length -= 1;
}
return ~crc;
}
@(private)
crc32_table := [8][256]u32{
{
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3,
0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91,
0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5,
0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f,
0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d,
0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457,
0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb,
0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9,
0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad,
0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683,
0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7,
0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79,
0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f,
0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21,
0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45,
0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db,
0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf,
0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d,
},
{
0x00000000, 0x191b3141, 0x32366282, 0x2b2d53c3, 0x646cc504, 0x7d77f445, 0x565aa786, 0x4f4196c7,
0xc8d98a08, 0xd1c2bb49, 0xfaefe88a, 0xe3f4d9cb, 0xacb54f0c, 0xb5ae7e4d, 0x9e832d8e, 0x87981ccf,
0x4ac21251, 0x53d92310, 0x78f470d3, 0x61ef4192, 0x2eaed755, 0x37b5e614, 0x1c98b5d7, 0x05838496,
0x821b9859, 0x9b00a918, 0xb02dfadb, 0xa936cb9a, 0xe6775d5d, 0xff6c6c1c, 0xd4413fdf, 0xcd5a0e9e,
0x958424a2, 0x8c9f15e3, 0xa7b24620, 0xbea97761, 0xf1e8e1a6, 0xe8f3d0e7, 0xc3de8324, 0xdac5b265,
0x5d5daeaa, 0x44469feb, 0x6f6bcc28, 0x7670fd69, 0x39316bae, 0x202a5aef, 0x0b07092c, 0x121c386d,
0xdf4636f3, 0xc65d07b2, 0xed705471, 0xf46b6530, 0xbb2af3f7, 0xa231c2b6, 0x891c9175, 0x9007a034,
0x179fbcfb, 0x0e848dba, 0x25a9de79, 0x3cb2ef38, 0x73f379ff, 0x6ae848be, 0x41c51b7d, 0x58de2a3c,
0xf0794f05, 0xe9627e44, 0xc24f2d87, 0xdb541cc6, 0x94158a01, 0x8d0ebb40, 0xa623e883, 0xbf38d9c2,
0x38a0c50d, 0x21bbf44c, 0x0a96a78f, 0x138d96ce, 0x5ccc0009, 0x45d73148, 0x6efa628b, 0x77e153ca,
0xbabb5d54, 0xa3a06c15, 0x888d3fd6, 0x91960e97, 0xded79850, 0xc7cca911, 0xece1fad2, 0xf5facb93,
0x7262d75c, 0x6b79e61d, 0x4054b5de, 0x594f849f, 0x160e1258, 0x0f152319, 0x243870da, 0x3d23419b,
0x65fd6ba7, 0x7ce65ae6, 0x57cb0925, 0x4ed03864, 0x0191aea3, 0x188a9fe2, 0x33a7cc21, 0x2abcfd60,
0xad24e1af, 0xb43fd0ee, 0x9f12832d, 0x8609b26c, 0xc94824ab, 0xd05315ea, 0xfb7e4629, 0xe2657768,
0x2f3f79f6, 0x362448b7, 0x1d091b74, 0x04122a35, 0x4b53bcf2, 0x52488db3, 0x7965de70, 0x607eef31,
0xe7e6f3fe, 0xfefdc2bf, 0xd5d0917c, 0xcccba03d, 0x838a36fa, 0x9a9107bb, 0xb1bc5478, 0xa8a76539,
0x3b83984b, 0x2298a90a, 0x09b5fac9, 0x10aecb88, 0x5fef5d4f, 0x46f46c0e, 0x6dd93fcd, 0x74c20e8c,
0xf35a1243, 0xea412302, 0xc16c70c1, 0xd8774180, 0x9736d747, 0x8e2de606, 0xa500b5c5, 0xbc1b8484,
0x71418a1a, 0x685abb5b, 0x4377e898, 0x5a6cd9d9, 0x152d4f1e, 0x0c367e5f, 0x271b2d9c, 0x3e001cdd,
0xb9980012, 0xa0833153, 0x8bae6290, 0x92b553d1, 0xddf4c516, 0xc4eff457, 0xefc2a794, 0xf6d996d5,
0xae07bce9, 0xb71c8da8, 0x9c31de6b, 0x852aef2a, 0xca6b79ed, 0xd37048ac, 0xf85d1b6f, 0xe1462a2e,
0x66de36e1, 0x7fc507a0, 0x54e85463, 0x4df36522, 0x02b2f3e5, 0x1ba9c2a4, 0x30849167, 0x299fa026,
0xe4c5aeb8, 0xfdde9ff9, 0xd6f3cc3a, 0xcfe8fd7b, 0x80a96bbc, 0x99b25afd, 0xb29f093e, 0xab84387f,
0x2c1c24b0, 0x350715f1, 0x1e2a4632, 0x07317773, 0x4870e1b4, 0x516bd0f5, 0x7a468336, 0x635db277,
0xcbfad74e, 0xd2e1e60f, 0xf9ccb5cc, 0xe0d7848d, 0xaf96124a, 0xb68d230b, 0x9da070c8, 0x84bb4189,
0x03235d46, 0x1a386c07, 0x31153fc4, 0x280e0e85, 0x674f9842, 0x7e54a903, 0x5579fac0, 0x4c62cb81,
0x8138c51f, 0x9823f45e, 0xb30ea79d, 0xaa1596dc, 0xe554001b, 0xfc4f315a, 0xd7626299, 0xce7953d8,
0x49e14f17, 0x50fa7e56, 0x7bd72d95, 0x62cc1cd4, 0x2d8d8a13, 0x3496bb52, 0x1fbbe891, 0x06a0d9d0,
0x5e7ef3ec, 0x4765c2ad, 0x6c48916e, 0x7553a02f, 0x3a1236e8, 0x230907a9, 0x0824546a, 0x113f652b,
0x96a779e4, 0x8fbc48a5, 0xa4911b66, 0xbd8a2a27, 0xf2cbbce0, 0xebd08da1, 0xc0fdde62, 0xd9e6ef23,
0x14bce1bd, 0x0da7d0fc, 0x268a833f, 0x3f91b27e, 0x70d024b9, 0x69cb15f8, 0x42e6463b, 0x5bfd777a,
0xdc656bb5, 0xc57e5af4, 0xee530937, 0xf7483876, 0xb809aeb1, 0xa1129ff0, 0x8a3fcc33, 0x9324fd72,
},
{
0x00000000, 0x01c26a37, 0x0384d46e, 0x0246be59, 0x0709a8dc, 0x06cbc2eb, 0x048d7cb2, 0x054f1685,
0x0e1351b8, 0x0fd13b8f, 0x0d9785d6, 0x0c55efe1, 0x091af964, 0x08d89353, 0x0a9e2d0a, 0x0b5c473d,
0x1c26a370, 0x1de4c947, 0x1fa2771e, 0x1e601d29, 0x1b2f0bac, 0x1aed619b, 0x18abdfc2, 0x1969b5f5,
0x1235f2c8, 0x13f798ff, 0x11b126a6, 0x10734c91, 0x153c5a14, 0x14fe3023, 0x16b88e7a, 0x177ae44d,
0x384d46e0, 0x398f2cd7, 0x3bc9928e, 0x3a0bf8b9, 0x3f44ee3c, 0x3e86840b, 0x3cc03a52, 0x3d025065,
0x365e1758, 0x379c7d6f, 0x35dac336, 0x3418a901, 0x3157bf84, 0x3095d5b3, 0x32d36bea, 0x331101dd,
0x246be590, 0x25a98fa7, 0x27ef31fe, 0x262d5bc9, 0x23624d4c, 0x22a0277b, 0x20e69922, 0x2124f315,
0x2a78b428, 0x2bbade1f, 0x29fc6046, 0x283e0a71, 0x2d711cf4, 0x2cb376c3, 0x2ef5c89a, 0x2f37a2ad,
0x709a8dc0, 0x7158e7f7, 0x731e59ae, 0x72dc3399, 0x7793251c, 0x76514f2b, 0x7417f172, 0x75d59b45,
0x7e89dc78, 0x7f4bb64f, 0x7d0d0816, 0x7ccf6221, 0x798074a4, 0x78421e93, 0x7a04a0ca, 0x7bc6cafd,
0x6cbc2eb0, 0x6d7e4487, 0x6f38fade, 0x6efa90e9, 0x6bb5866c, 0x6a77ec5b, 0x68315202, 0x69f33835,
0x62af7f08, 0x636d153f, 0x612bab66, 0x60e9c151, 0x65a6d7d4, 0x6464bde3, 0x662203ba, 0x67e0698d,
0x48d7cb20, 0x4915a117, 0x4b531f4e, 0x4a917579, 0x4fde63fc, 0x4e1c09cb, 0x4c5ab792, 0x4d98dda5,
0x46c49a98, 0x4706f0af, 0x45404ef6, 0x448224c1, 0x41cd3244, 0x400f5873, 0x4249e62a, 0x438b8c1d,
0x54f16850, 0x55330267, 0x5775bc3e, 0x56b7d609, 0x53f8c08c, 0x523aaabb, 0x507c14e2, 0x51be7ed5,
0x5ae239e8, 0x5b2053df, 0x5966ed86, 0x58a487b1, 0x5deb9134, 0x5c29fb03, 0x5e6f455a, 0x5fad2f6d,
0xe1351b80, 0xe0f771b7, 0xe2b1cfee, 0xe373a5d9, 0xe63cb35c, 0xe7fed96b, 0xe5b86732, 0xe47a0d05,
0xef264a38, 0xeee4200f, 0xeca29e56, 0xed60f461, 0xe82fe2e4, 0xe9ed88d3, 0xebab368a, 0xea695cbd,
0xfd13b8f0, 0xfcd1d2c7, 0xfe976c9e, 0xff5506a9, 0xfa1a102c, 0xfbd87a1b, 0xf99ec442, 0xf85cae75,
0xf300e948, 0xf2c2837f, 0xf0843d26, 0xf1465711, 0xf4094194, 0xf5cb2ba3, 0xf78d95fa, 0xf64fffcd,
0xd9785d60, 0xd8ba3757, 0xdafc890e, 0xdb3ee339, 0xde71f5bc, 0xdfb39f8b, 0xddf521d2, 0xdc374be5,
0xd76b0cd8, 0xd6a966ef, 0xd4efd8b6, 0xd52db281, 0xd062a404, 0xd1a0ce33, 0xd3e6706a, 0xd2241a5d,
0xc55efe10, 0xc49c9427, 0xc6da2a7e, 0xc7184049, 0xc25756cc, 0xc3953cfb, 0xc1d382a2, 0xc011e895,
0xcb4dafa8, 0xca8fc59f, 0xc8c97bc6, 0xc90b11f1, 0xcc440774, 0xcd866d43, 0xcfc0d31a, 0xce02b92d,
0x91af9640, 0x906dfc77, 0x922b422e, 0x93e92819, 0x96a63e9c, 0x976454ab, 0x9522eaf2, 0x94e080c5,
0x9fbcc7f8, 0x9e7eadcf, 0x9c381396, 0x9dfa79a1, 0x98b56f24, 0x99770513, 0x9b31bb4a, 0x9af3d17d,
0x8d893530, 0x8c4b5f07, 0x8e0de15e, 0x8fcf8b69, 0x8a809dec, 0x8b42f7db, 0x89044982, 0x88c623b5,
0x839a6488, 0x82580ebf, 0x801eb0e6, 0x81dcdad1, 0x8493cc54, 0x8551a663, 0x8717183a, 0x86d5720d,
0xa9e2d0a0, 0xa820ba97, 0xaa6604ce, 0xaba46ef9, 0xaeeb787c, 0xaf29124b, 0xad6fac12, 0xacadc625,
0xa7f18118, 0xa633eb2f, 0xa4755576, 0xa5b73f41, 0xa0f829c4, 0xa13a43f3, 0xa37cfdaa, 0xa2be979d,
0xb5c473d0, 0xb40619e7, 0xb640a7be, 0xb782cd89, 0xb2cddb0c, 0xb30fb13b, 0xb1490f62, 0xb08b6555,
0xbbd72268, 0xba15485f, 0xb853f606, 0xb9919c31, 0xbcde8ab4, 0xbd1ce083, 0xbf5a5eda, 0xbe9834ed,
},
{
0x00000000, 0xb8bc6765, 0xaa09c88b, 0x12b5afee, 0x8f629757, 0x37def032, 0x256b5fdc, 0x9dd738b9,
0xc5b428ef, 0x7d084f8a, 0x6fbde064, 0xd7018701, 0x4ad6bfb8, 0xf26ad8dd, 0xe0df7733, 0x58631056,
0x5019579f, 0xe8a530fa, 0xfa109f14, 0x42acf871, 0xdf7bc0c8, 0x67c7a7ad, 0x75720843, 0xcdce6f26,
0x95ad7f70, 0x2d111815, 0x3fa4b7fb, 0x8718d09e, 0x1acfe827, 0xa2738f42, 0xb0c620ac, 0x087a47c9,
0xa032af3e, 0x188ec85b, 0x0a3b67b5, 0xb28700d0, 0x2f503869, 0x97ec5f0c, 0x8559f0e2, 0x3de59787,
0x658687d1, 0xdd3ae0b4, 0xcf8f4f5a, 0x7733283f, 0xeae41086, 0x525877e3, 0x40edd80d, 0xf851bf68,
0xf02bf8a1, 0x48979fc4, 0x5a22302a, 0xe29e574f, 0x7f496ff6, 0xc7f50893, 0xd540a77d, 0x6dfcc018,
0x359fd04e, 0x8d23b72b, 0x9f9618c5, 0x272a7fa0, 0xbafd4719, 0x0241207c, 0x10f48f92, 0xa848e8f7,
0x9b14583d, 0x23a83f58, 0x311d90b6, 0x89a1f7d3, 0x1476cf6a, 0xaccaa80f, 0xbe7f07e1, 0x06c36084,
0x5ea070d2, 0xe61c17b7, 0xf4a9b859, 0x4c15df3c, 0xd1c2e785, 0x697e80e0, 0x7bcb2f0e, 0xc377486b,
0xcb0d0fa2, 0x73b168c7, 0x6104c729, 0xd9b8a04c, 0x446f98f5, 0xfcd3ff90, 0xee66507e, 0x56da371b,
0x0eb9274d, 0xb6054028, 0xa4b0efc6, 0x1c0c88a3, 0x81dbb01a, 0x3967d77f, 0x2bd27891, 0x936e1ff4,
0x3b26f703, 0x839a9066, 0x912f3f88, 0x299358ed, 0xb4446054, 0x0cf80731, 0x1e4da8df, 0xa6f1cfba,
0xfe92dfec, 0x462eb889, 0x549b1767, 0xec277002, 0x71f048bb, 0xc94c2fde, 0xdbf98030, 0x6345e755,
0x6b3fa09c, 0xd383c7f9, 0xc1366817, 0x798a0f72, 0xe45d37cb, 0x5ce150ae, 0x4e54ff40, 0xf6e89825,
0xae8b8873, 0x1637ef16, 0x048240f8, 0xbc3e279d, 0x21e91f24, 0x99557841, 0x8be0d7af, 0x335cb0ca,
0xed59b63b, 0x55e5d15e, 0x47507eb0, 0xffec19d5, 0x623b216c, 0xda874609, 0xc832e9e7, 0x708e8e82,
0x28ed9ed4, 0x9051f9b1, 0x82e4565f, 0x3a58313a, 0xa78f0983, 0x1f336ee6, 0x0d86c108, 0xb53aa66d,
0xbd40e1a4, 0x05fc86c1, 0x1749292f, 0xaff54e4a, 0x322276f3, 0x8a9e1196, 0x982bbe78, 0x2097d91d,
0x78f4c94b, 0xc048ae2e, 0xd2fd01c0, 0x6a4166a5, 0xf7965e1c, 0x4f2a3979, 0x5d9f9697, 0xe523f1f2,
0x4d6b1905, 0xf5d77e60, 0xe762d18e, 0x5fdeb6eb, 0xc2098e52, 0x7ab5e937, 0x680046d9, 0xd0bc21bc,
0x88df31ea, 0x3063568f, 0x22d6f961, 0x9a6a9e04, 0x07bda6bd, 0xbf01c1d8, 0xadb46e36, 0x15080953,
0x1d724e9a, 0xa5ce29ff, 0xb77b8611, 0x0fc7e174, 0x9210d9cd, 0x2aacbea8, 0x38191146, 0x80a57623,
0xd8c66675, 0x607a0110, 0x72cfaefe, 0xca73c99b, 0x57a4f122, 0xef189647, 0xfdad39a9, 0x45115ecc,
0x764dee06, 0xcef18963, 0xdc44268d, 0x64f841e8, 0xf92f7951, 0x41931e34, 0x5326b1da, 0xeb9ad6bf,
0xb3f9c6e9, 0x0b45a18c, 0x19f00e62, 0xa14c6907, 0x3c9b51be, 0x842736db, 0x96929935, 0x2e2efe50,
0x2654b999, 0x9ee8defc, 0x8c5d7112, 0x34e11677, 0xa9362ece, 0x118a49ab, 0x033fe645, 0xbb838120,
0xe3e09176, 0x5b5cf613, 0x49e959fd, 0xf1553e98, 0x6c820621, 0xd43e6144, 0xc68bceaa, 0x7e37a9cf,
0xd67f4138, 0x6ec3265d, 0x7c7689b3, 0xc4caeed6, 0x591dd66f, 0xe1a1b10a, 0xf3141ee4, 0x4ba87981,
0x13cb69d7, 0xab770eb2, 0xb9c2a15c, 0x017ec639, 0x9ca9fe80, 0x241599e5, 0x36a0360b, 0x8e1c516e,
0x866616a7, 0x3eda71c2, 0x2c6fde2c, 0x94d3b949, 0x090481f0, 0xb1b8e695, 0xa30d497b, 0x1bb12e1e,
0x43d23e48, 0xfb6e592d, 0xe9dbf6c3, 0x516791a6, 0xccb0a91f, 0x740cce7a, 0x66b96194, 0xde0506f1,
},
{
0x00000000, 0x3d6029b0, 0x7ac05360, 0x47a07ad0, 0xf580a6c0, 0xc8e08f70, 0x8f40f5a0, 0xb220dc10,
0x30704bc1, 0x0d106271, 0x4ab018a1, 0x77d03111, 0xc5f0ed01, 0xf890c4b1, 0xbf30be61, 0x825097d1,
0x60e09782, 0x5d80be32, 0x1a20c4e2, 0x2740ed52, 0x95603142, 0xa80018f2, 0xefa06222, 0xd2c04b92,
0x5090dc43, 0x6df0f5f3, 0x2a508f23, 0x1730a693, 0xa5107a83, 0x98705333, 0xdfd029e3, 0xe2b00053,
0xc1c12f04, 0xfca106b4, 0xbb017c64, 0x866155d4, 0x344189c4, 0x0921a074, 0x4e81daa4, 0x73e1f314,
0xf1b164c5, 0xccd14d75, 0x8b7137a5, 0xb6111e15, 0x0431c205, 0x3951ebb5, 0x7ef19165, 0x4391b8d5,
0xa121b886, 0x9c419136, 0xdbe1ebe6, 0xe681c256, 0x54a11e46, 0x69c137f6, 0x2e614d26, 0x13016496,
0x9151f347, 0xac31daf7, 0xeb91a027, 0xd6f18997, 0x64d15587, 0x59b17c37, 0x1e1106e7, 0x23712f57,
0x58f35849, 0x659371f9, 0x22330b29, 0x1f532299, 0xad73fe89, 0x9013d739, 0xd7b3ade9, 0xead38459,
0x68831388, 0x55e33a38, 0x124340e8, 0x2f236958, 0x9d03b548, 0xa0639cf8, 0xe7c3e628, 0xdaa3cf98,
0x3813cfcb, 0x0573e67b, 0x42d39cab, 0x7fb3b51b, 0xcd93690b, 0xf0f340bb, 0xb7533a6b, 0x8a3313db,
0x0863840a, 0x3503adba, 0x72a3d76a, 0x4fc3feda, 0xfde322ca, 0xc0830b7a, 0x872371aa, 0xba43581a,
0x9932774d, 0xa4525efd, 0xe3f2242d, 0xde920d9d, 0x6cb2d18d, 0x51d2f83d, 0x167282ed, 0x2b12ab5d,
0xa9423c8c, 0x9422153c, 0xd3826fec, 0xeee2465c, 0x5cc29a4c, 0x61a2b3fc, 0x2602c92c, 0x1b62e09c,
0xf9d2e0cf, 0xc4b2c97f, 0x8312b3af, 0xbe729a1f, 0x0c52460f, 0x31326fbf, 0x7692156f, 0x4bf23cdf,
0xc9a2ab0e, 0xf4c282be, 0xb362f86e, 0x8e02d1de, 0x3c220dce, 0x0142247e, 0x46e25eae, 0x7b82771e,
0xb1e6b092, 0x8c869922, 0xcb26e3f2, 0xf646ca42, 0x44661652, 0x79063fe2, 0x3ea64532, 0x03c66c82,
0x8196fb53, 0xbcf6d2e3, 0xfb56a833, 0xc6368183, 0x74165d93, 0x49767423, 0x0ed60ef3, 0x33b62743,
0xd1062710, 0xec660ea0, 0xabc67470, 0x96a65dc0, 0x248681d0, 0x19e6a860, 0x5e46d2b0, 0x6326fb00,
0xe1766cd1, 0xdc164561, 0x9bb63fb1, 0xa6d61601, 0x14f6ca11, 0x2996e3a1, 0x6e369971, 0x5356b0c1,
0x70279f96, 0x4d47b626, 0x0ae7ccf6, 0x3787e546, 0x85a73956, 0xb8c710e6, 0xff676a36, 0xc2074386,
0x4057d457, 0x7d37fde7, 0x3a978737, 0x07f7ae87, 0xb5d77297, 0x88b75b27, 0xcf1721f7, 0xf2770847,
0x10c70814, 0x2da721a4, 0x6a075b74, 0x576772c4, 0xe547aed4, 0xd8278764, 0x9f87fdb4, 0xa2e7d404,
0x20b743d5, 0x1dd76a65, 0x5a7710b5, 0x67173905, 0xd537e515, 0xe857cca5, 0xaff7b675, 0x92979fc5,
0xe915e8db, 0xd475c16b, 0x93d5bbbb, 0xaeb5920b, 0x1c954e1b, 0x21f567ab, 0x66551d7b, 0x5b3534cb,
0xd965a31a, 0xe4058aaa, 0xa3a5f07a, 0x9ec5d9ca, 0x2ce505da, 0x11852c6a, 0x562556ba, 0x6b457f0a,
0x89f57f59, 0xb49556e9, 0xf3352c39, 0xce550589, 0x7c75d999, 0x4115f029, 0x06b58af9, 0x3bd5a349,
0xb9853498, 0x84e51d28, 0xc34567f8, 0xfe254e48, 0x4c059258, 0x7165bbe8, 0x36c5c138, 0x0ba5e888,
0x28d4c7df, 0x15b4ee6f, 0x521494bf, 0x6f74bd0f, 0xdd54611f, 0xe03448af, 0xa794327f, 0x9af41bcf,
0x18a48c1e, 0x25c4a5ae, 0x6264df7e, 0x5f04f6ce, 0xed242ade, 0xd044036e, 0x97e479be, 0xaa84500e,
0x4834505d, 0x755479ed, 0x32f4033d, 0x0f942a8d, 0xbdb4f69d, 0x80d4df2d, 0xc774a5fd, 0xfa148c4d,
0x78441b9c, 0x4524322c, 0x028448fc, 0x3fe4614c, 0x8dc4bd5c, 0xb0a494ec, 0xf704ee3c, 0xca64c78c,
},
{
0x00000000, 0xcb5cd3a5, 0x4dc8a10b, 0x869472ae, 0x9b914216, 0x50cd91b3, 0xd659e31d, 0x1d0530b8,
0xec53826d, 0x270f51c8, 0xa19b2366, 0x6ac7f0c3, 0x77c2c07b, 0xbc9e13de, 0x3a0a6170, 0xf156b2d5,
0x03d6029b, 0xc88ad13e, 0x4e1ea390, 0x85427035, 0x9847408d, 0x531b9328, 0xd58fe186, 0x1ed33223,
0xef8580f6, 0x24d95353, 0xa24d21fd, 0x6911f258, 0x7414c2e0, 0xbf481145, 0x39dc63eb, 0xf280b04e,
0x07ac0536, 0xccf0d693, 0x4a64a43d, 0x81387798, 0x9c3d4720, 0x57619485, 0xd1f5e62b, 0x1aa9358e,
0xebff875b, 0x20a354fe, 0xa6372650, 0x6d6bf5f5, 0x706ec54d, 0xbb3216e8, 0x3da66446, 0xf6fab7e3,
0x047a07ad, 0xcf26d408, 0x49b2a6a6, 0x82ee7503, 0x9feb45bb, 0x54b7961e, 0xd223e4b0, 0x197f3715,
0xe82985c0, 0x23755665, 0xa5e124cb, 0x6ebdf76e, 0x73b8c7d6, 0xb8e41473, 0x3e7066dd, 0xf52cb578,
0x0f580a6c, 0xc404d9c9, 0x4290ab67, 0x89cc78c2, 0x94c9487a, 0x5f959bdf, 0xd901e971, 0x125d3ad4,
0xe30b8801, 0x28575ba4, 0xaec3290a, 0x659ffaaf, 0x789aca17, 0xb3c619b2, 0x35526b1c, 0xfe0eb8b9,
0x0c8e08f7, 0xc7d2db52, 0x4146a9fc, 0x8a1a7a59, 0x971f4ae1, 0x5c439944, 0xdad7ebea, 0x118b384f,
0xe0dd8a9a, 0x2b81593f, 0xad152b91, 0x6649f834, 0x7b4cc88c, 0xb0101b29, 0x36846987, 0xfdd8ba22,
0x08f40f5a, 0xc3a8dcff, 0x453cae51, 0x8e607df4, 0x93654d4c, 0x58399ee9, 0xdeadec47, 0x15f13fe2,
0xe4a78d37, 0x2ffb5e92, 0xa96f2c3c, 0x6233ff99, 0x7f36cf21, 0xb46a1c84, 0x32fe6e2a, 0xf9a2bd8f,
0x0b220dc1, 0xc07ede64, 0x46eaacca, 0x8db67f6f, 0x90b34fd7, 0x5bef9c72, 0xdd7beedc, 0x16273d79,
0xe7718fac, 0x2c2d5c09, 0xaab92ea7, 0x61e5fd02, 0x7ce0cdba, 0xb7bc1e1f, 0x31286cb1, 0xfa74bf14,
0x1eb014d8, 0xd5ecc77d, 0x5378b5d3, 0x98246676, 0x852156ce, 0x4e7d856b, 0xc8e9f7c5, 0x03b52460,
0xf2e396b5, 0x39bf4510, 0xbf2b37be, 0x7477e41b, 0x6972d4a3, 0xa22e0706, 0x24ba75a8, 0xefe6a60d,
0x1d661643, 0xd63ac5e6, 0x50aeb748, 0x9bf264ed, 0x86f75455, 0x4dab87f0, 0xcb3ff55e, 0x006326fb,
0xf135942e, 0x3a69478b, 0xbcfd3525, 0x77a1e680, 0x6aa4d638, 0xa1f8059d, 0x276c7733, 0xec30a496,
0x191c11ee, 0xd240c24b, 0x54d4b0e5, 0x9f886340, 0x828d53f8, 0x49d1805d, 0xcf45f2f3, 0x04192156,
0xf54f9383, 0x3e134026, 0xb8873288, 0x73dbe12d, 0x6eded195, 0xa5820230, 0x2316709e, 0xe84aa33b,
0x1aca1375, 0xd196c0d0, 0x5702b27e, 0x9c5e61db, 0x815b5163, 0x4a0782c6, 0xcc93f068, 0x07cf23cd,
0xf6999118, 0x3dc542bd, 0xbb513013, 0x700de3b6, 0x6d08d30e, 0xa65400ab, 0x20c07205, 0xeb9ca1a0,
0x11e81eb4, 0xdab4cd11, 0x5c20bfbf, 0x977c6c1a, 0x8a795ca2, 0x41258f07, 0xc7b1fda9, 0x0ced2e0c,
0xfdbb9cd9, 0x36e74f7c, 0xb0733dd2, 0x7b2fee77, 0x662adecf, 0xad760d6a, 0x2be27fc4, 0xe0beac61,
0x123e1c2f, 0xd962cf8a, 0x5ff6bd24, 0x94aa6e81, 0x89af5e39, 0x42f38d9c, 0xc467ff32, 0x0f3b2c97,
0xfe6d9e42, 0x35314de7, 0xb3a53f49, 0x78f9ecec, 0x65fcdc54, 0xaea00ff1, 0x28347d5f, 0xe368aefa,
0x16441b82, 0xdd18c827, 0x5b8cba89, 0x90d0692c, 0x8dd55994, 0x46898a31, 0xc01df89f, 0x0b412b3a,
0xfa1799ef, 0x314b4a4a, 0xb7df38e4, 0x7c83eb41, 0x6186dbf9, 0xaada085c, 0x2c4e7af2, 0xe712a957,
0x15921919, 0xdececabc, 0x585ab812, 0x93066bb7, 0x8e035b0f, 0x455f88aa, 0xc3cbfa04, 0x089729a1,
0xf9c19b74, 0x329d48d1, 0xb4093a7f, 0x7f55e9da, 0x6250d962, 0xa90c0ac7, 0x2f987869, 0xe4c4abcc,
},
{
0x00000000, 0xa6770bb4, 0x979f1129, 0x31e81a9d, 0xf44f2413, 0x52382fa7, 0x63d0353a, 0xc5a73e8e,
0x33ef4e67, 0x959845d3, 0xa4705f4e, 0x020754fa, 0xc7a06a74, 0x61d761c0, 0x503f7b5d, 0xf64870e9,
0x67de9cce, 0xc1a9977a, 0xf0418de7, 0x56368653, 0x9391b8dd, 0x35e6b369, 0x040ea9f4, 0xa279a240,
0x5431d2a9, 0xf246d91d, 0xc3aec380, 0x65d9c834, 0xa07ef6ba, 0x0609fd0e, 0x37e1e793, 0x9196ec27,
0xcfbd399c, 0x69ca3228, 0x582228b5, 0xfe552301, 0x3bf21d8f, 0x9d85163b, 0xac6d0ca6, 0x0a1a0712,
0xfc5277fb, 0x5a257c4f, 0x6bcd66d2, 0xcdba6d66, 0x081d53e8, 0xae6a585c, 0x9f8242c1, 0x39f54975,
0xa863a552, 0x0e14aee6, 0x3ffcb47b, 0x998bbfcf, 0x5c2c8141, 0xfa5b8af5, 0xcbb39068, 0x6dc49bdc,
0x9b8ceb35, 0x3dfbe081, 0x0c13fa1c, 0xaa64f1a8, 0x6fc3cf26, 0xc9b4c492, 0xf85cde0f, 0x5e2bd5bb,
0x440b7579, 0xe27c7ecd, 0xd3946450, 0x75e36fe4, 0xb044516a, 0x16335ade, 0x27db4043, 0x81ac4bf7,
0x77e43b1e, 0xd19330aa, 0xe07b2a37, 0x460c2183, 0x83ab1f0d, 0x25dc14b9, 0x14340e24, 0xb2430590,
0x23d5e9b7, 0x85a2e203, 0xb44af89e, 0x123df32a, 0xd79acda4, 0x71edc610, 0x4005dc8d, 0xe672d739,
0x103aa7d0, 0xb64dac64, 0x87a5b6f9, 0x21d2bd4d, 0xe47583c3, 0x42028877, 0x73ea92ea, 0xd59d995e,
0x8bb64ce5, 0x2dc14751, 0x1c295dcc, 0xba5e5678, 0x7ff968f6, 0xd98e6342, 0xe86679df, 0x4e11726b,
0xb8590282, 0x1e2e0936, 0x2fc613ab, 0x89b1181f, 0x4c162691, 0xea612d25, 0xdb8937b8, 0x7dfe3c0c,
0xec68d02b, 0x4a1fdb9f, 0x7bf7c102, 0xdd80cab6, 0x1827f438, 0xbe50ff8c, 0x8fb8e511, 0x29cfeea5,
0xdf879e4c, 0x79f095f8, 0x48188f65, 0xee6f84d1, 0x2bc8ba5f, 0x8dbfb1eb, 0xbc57ab76, 0x1a20a0c2,
0x8816eaf2, 0x2e61e146, 0x1f89fbdb, 0xb9fef06f, 0x7c59cee1, 0xda2ec555, 0xebc6dfc8, 0x4db1d47c,
0xbbf9a495, 0x1d8eaf21, 0x2c66b5bc, 0x8a11be08, 0x4fb68086, 0xe9c18b32, 0xd82991af, 0x7e5e9a1b,
0xefc8763c, 0x49bf7d88, 0x78576715, 0xde206ca1, 0x1b87522f, 0xbdf0599b, 0x8c184306, 0x2a6f48b2,
0xdc27385b, 0x7a5033ef, 0x4bb82972, 0xedcf22c6, 0x28681c48, 0x8e1f17fc, 0xbff70d61, 0x198006d5,
0x47abd36e, 0xe1dcd8da, 0xd034c247, 0x7643c9f3, 0xb3e4f77d, 0x1593fcc9, 0x247be654, 0x820cede0,
0x74449d09, 0xd23396bd, 0xe3db8c20, 0x45ac8794, 0x800bb91a, 0x267cb2ae, 0x1794a833, 0xb1e3a387,
0x20754fa0, 0x86024414, 0xb7ea5e89, 0x119d553d, 0xd43a6bb3, 0x724d6007, 0x43a57a9a, 0xe5d2712e,
0x139a01c7, 0xb5ed0a73, 0x840510ee, 0x22721b5a, 0xe7d525d4, 0x41a22e60, 0x704a34fd, 0xd63d3f49,
0xcc1d9f8b, 0x6a6a943f, 0x5b828ea2, 0xfdf58516, 0x3852bb98, 0x9e25b02c, 0xafcdaab1, 0x09baa105,
0xfff2d1ec, 0x5985da58, 0x686dc0c5, 0xce1acb71, 0x0bbdf5ff, 0xadcafe4b, 0x9c22e4d6, 0x3a55ef62,
0xabc30345, 0x0db408f1, 0x3c5c126c, 0x9a2b19d8, 0x5f8c2756, 0xf9fb2ce2, 0xc813367f, 0x6e643dcb,
0x982c4d22, 0x3e5b4696, 0x0fb35c0b, 0xa9c457bf, 0x6c636931, 0xca146285, 0xfbfc7818, 0x5d8b73ac,
0x03a0a617, 0xa5d7ada3, 0x943fb73e, 0x3248bc8a, 0xf7ef8204, 0x519889b0, 0x6070932d, 0xc6079899,
0x304fe870, 0x9638e3c4, 0xa7d0f959, 0x01a7f2ed, 0xc400cc63, 0x6277c7d7, 0x539fdd4a, 0xf5e8d6fe,
0x647e3ad9, 0xc209316d, 0xf3e12bf0, 0x55962044, 0x90311eca, 0x3646157e, 0x07ae0fe3, 0xa1d90457,
0x579174be, 0xf1e67f0a, 0xc00e6597, 0x66796e23, 0xa3de50ad, 0x05a95b19, 0x34414184, 0x92364a30,
},
{
0x00000000, 0xccaa009e, 0x4225077d, 0x8e8f07e3, 0x844a0efa, 0x48e00e64, 0xc66f0987, 0x0ac50919,
0xd3e51bb5, 0x1f4f1b2b, 0x91c01cc8, 0x5d6a1c56, 0x57af154f, 0x9b0515d1, 0x158a1232, 0xd92012ac,
0x7cbb312b, 0xb01131b5, 0x3e9e3656, 0xf23436c8, 0xf8f13fd1, 0x345b3f4f, 0xbad438ac, 0x767e3832,
0xaf5e2a9e, 0x63f42a00, 0xed7b2de3, 0x21d12d7d, 0x2b142464, 0xe7be24fa, 0x69312319, 0xa59b2387,
0xf9766256, 0x35dc62c8, 0xbb53652b, 0x77f965b5, 0x7d3c6cac, 0xb1966c32, 0x3f196bd1, 0xf3b36b4f,
0x2a9379e3, 0xe639797d, 0x68b67e9e, 0xa41c7e00, 0xaed97719, 0x62737787, 0xecfc7064, 0x205670fa,
0x85cd537d, 0x496753e3, 0xc7e85400, 0x0b42549e, 0x01875d87, 0xcd2d5d19, 0x43a25afa, 0x8f085a64,
0x562848c8, 0x9a824856, 0x140d4fb5, 0xd8a74f2b, 0xd2624632, 0x1ec846ac, 0x9047414f, 0x5ced41d1,
0x299dc2ed, 0xe537c273, 0x6bb8c590, 0xa712c50e, 0xadd7cc17, 0x617dcc89, 0xeff2cb6a, 0x2358cbf4,
0xfa78d958, 0x36d2d9c6, 0xb85dde25, 0x74f7debb, 0x7e32d7a2, 0xb298d73c, 0x3c17d0df, 0xf0bdd041,
0x5526f3c6, 0x998cf358, 0x1703f4bb, 0xdba9f425, 0xd16cfd3c, 0x1dc6fda2, 0x9349fa41, 0x5fe3fadf,
0x86c3e873, 0x4a69e8ed, 0xc4e6ef0e, 0x084cef90, 0x0289e689, 0xce23e617, 0x40ace1f4, 0x8c06e16a,
0xd0eba0bb, 0x1c41a025, 0x92cea7c6, 0x5e64a758, 0x54a1ae41, 0x980baedf, 0x1684a93c, 0xda2ea9a2,
0x030ebb0e, 0xcfa4bb90, 0x412bbc73, 0x8d81bced, 0x8744b5f4, 0x4beeb56a, 0xc561b289, 0x09cbb217,
0xac509190, 0x60fa910e, 0xee7596ed, 0x22df9673, 0x281a9f6a, 0xe4b09ff4, 0x6a3f9817, 0xa6959889,
0x7fb58a25, 0xb31f8abb, 0x3d908d58, 0xf13a8dc6, 0xfbff84df, 0x37558441, 0xb9da83a2, 0x7570833c,
0x533b85da, 0x9f918544, 0x111e82a7, 0xddb48239, 0xd7718b20, 0x1bdb8bbe, 0x95548c5d, 0x59fe8cc3,
0x80de9e6f, 0x4c749ef1, 0xc2fb9912, 0x0e51998c, 0x04949095, 0xc83e900b, 0x46b197e8, 0x8a1b9776,
0x2f80b4f1, 0xe32ab46f, 0x6da5b38c, 0xa10fb312, 0xabcaba0b, 0x6760ba95, 0xe9efbd76, 0x2545bde8,
0xfc65af44, 0x30cfafda, 0xbe40a839, 0x72eaa8a7, 0x782fa1be, 0xb485a120, 0x3a0aa6c3, 0xf6a0a65d,
0xaa4de78c, 0x66e7e712, 0xe868e0f1, 0x24c2e06f, 0x2e07e976, 0xe2ade9e8, 0x6c22ee0b, 0xa088ee95,
0x79a8fc39, 0xb502fca7, 0x3b8dfb44, 0xf727fbda, 0xfde2f2c3, 0x3148f25d, 0xbfc7f5be, 0x736df520,
0xd6f6d6a7, 0x1a5cd639, 0x94d3d1da, 0x5879d144, 0x52bcd85d, 0x9e16d8c3, 0x1099df20, 0xdc33dfbe,
0x0513cd12, 0xc9b9cd8c, 0x4736ca6f, 0x8b9ccaf1, 0x8159c3e8, 0x4df3c376, 0xc37cc495, 0x0fd6c40b,
0x7aa64737, 0xb60c47a9, 0x3883404a, 0xf42940d4, 0xfeec49cd, 0x32464953, 0xbcc94eb0, 0x70634e2e,
0xa9435c82, 0x65e95c1c, 0xeb665bff, 0x27cc5b61, 0x2d095278, 0xe1a352e6, 0x6f2c5505, 0xa386559b,
0x061d761c, 0xcab77682, 0x44387161, 0x889271ff, 0x825778e6, 0x4efd7878, 0xc0727f9b, 0x0cd87f05,
0xd5f86da9, 0x19526d37, 0x97dd6ad4, 0x5b776a4a, 0x51b26353, 0x9d1863cd, 0x1397642e, 0xdf3d64b0,
0x83d02561, 0x4f7a25ff, 0xc1f5221c, 0x0d5f2282, 0x079a2b9b, 0xcb302b05, 0x45bf2ce6, 0x89152c78,
0x50353ed4, 0x9c9f3e4a, 0x121039a9, 0xdeba3937, 0xd47f302e, 0x18d530b0, 0x965a3753, 0x5af037cd,
0xff6b144a, 0x33c114d4, 0xbd4e1337, 0x71e413a9, 0x7b211ab0, 0xb78b1a2e, 0x39041dcd, 0xf5ae1d53,
0x2c8e0fff, 0xe0240f61, 0x6eab0882, 0xa201081c, 0xa8c40105, 0x646e019b, 0xeae10678, 0x264b06e6,
},
};
/*
@(optimization_mode="speed")
crc32 :: proc(data: []byte, seed := u32(0)) -> u32 {
result := ~u32(seed);
#no_bounds_check for b in data {
result = result>>8 ~ _crc32_table[(result ~ u32(b)) & 0xff];
}
return ~result;
}
@private _crc32_table := [256]u32{
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba,
0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3,
0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91,
0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec,
0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5,
0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940,
0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116,
0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f,
0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d,
0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a,
0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818,
0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457,
0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c,
0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb,
0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9,
0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086,
0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4,
0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad,
0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683,
0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe,
0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7,
0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252,
0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60,
0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79,
0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f,
0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04,
0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a,
0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21,
0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e,
0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45,
0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db,
0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0,
0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6,
0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf,
0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d,
};
*/

View File

@@ -1,17 +1,52 @@
package hash
import "core:mem"
import "intrinsics"
@(optimization_mode="speed")
adler32 :: proc(data: []byte, seed := u32(1)) -> u32 #no_bounds_check {
adler32 :: proc(data: []byte, seed := u32(1)) -> u32 {
ADLER_CONST :: 65521;
a, b: u32 = seed & 0xFFFF, seed >> 16;
for x in data {
a = (a + u32(x)) % ADLER_CONST;
b = (b + a) % ADLER_CONST;
buffer := raw_data(data);
a, b: u64 = u64(seed) & 0xFFFF, u64(seed) >> 16;
buf := data[:];
for len(buf) != 0 && uintptr(buffer) & 7 != 0 {
a = (a + u64(buf[0]));
b = (b + a);
buffer = intrinsics.ptr_offset(buffer, 1);
buf = buf[1:];
}
return (b << 16) | a;
for len(buf) > 7 {
count := min(len(buf), 5552);
for count > 7 {
a += u64(buf[0]); b += a;
a += u64(buf[1]); b += a;
a += u64(buf[2]); b += a;
a += u64(buf[3]); b += a;
a += u64(buf[4]); b += a;
a += u64(buf[5]); b += a;
a += u64(buf[6]); b += a;
a += u64(buf[7]); b += a;
buf = buf[8:];
count -= 8;
}
a %= ADLER_CONST;
b %= ADLER_CONST;
}
for len(buf) != 0 {
a = (a + u64(buf[0])) % ADLER_CONST;
b = (b + a) % ADLER_CONST;
buf = buf[1:];
}
return (u32(b) << 16) | u32(a);
}
@(optimization_mode="speed")
djb2 :: proc(data: []byte) -> u32 {
hash: u32 = 5381;
for b in data {
@@ -20,6 +55,7 @@ djb2 :: proc(data: []byte) -> u32 {
return hash;
}
@(optimization_mode="speed")
fnv32 :: proc(data: []byte) -> u32 {
h: u32 = 0x811c9dc5;
for b in data {
@@ -28,6 +64,7 @@ fnv32 :: proc(data: []byte) -> u32 {
return h;
}
@(optimization_mode="speed")
fnv64 :: proc(data: []byte) -> u64 {
h: u64 = 0xcbf29ce484222325;
for b in data {
@@ -36,6 +73,7 @@ fnv64 :: proc(data: []byte) -> u64 {
return h;
}
@(optimization_mode="speed")
fnv32a :: proc(data: []byte) -> u32 {
h: u32 = 0x811c9dc5;
for b in data {
@@ -44,6 +82,7 @@ fnv32a :: proc(data: []byte) -> u32 {
return h;
}
@(optimization_mode="speed")
fnv64a :: proc(data: []byte) -> u64 {
h: u64 = 0xcbf29ce484222325;
for b in data {
@@ -52,6 +91,7 @@ fnv64a :: proc(data: []byte) -> u64 {
return h;
}
@(optimization_mode="speed")
jenkins :: proc(data: []byte) -> u32 {
hash: u32 = 0;
for b in data {
@@ -65,6 +105,7 @@ jenkins :: proc(data: []byte) -> u32 {
return hash;
}
@(optimization_mode="speed")
murmur32 :: proc(data: []byte) -> u32 {
c1_32: u32 : 0xcc9e2d51;
c2_32: u32 : 0x1b873593;
@@ -114,6 +155,7 @@ murmur32 :: proc(data: []byte) -> u32 {
return h1;
}
@(optimization_mode="speed")
murmur64 :: proc(data: []byte) -> u64 {
SEED :: 0x9747b28c;
@@ -219,7 +261,7 @@ murmur64 :: proc(data: []byte) -> u64 {
}
}
@(optimization_mode="speed")
sdbm :: proc(data: []byte) -> u32 {
hash: u32 = 0;
for b in data {

View File

@@ -9,12 +9,12 @@ package png
Jeroen van Rijn: Initial implementation.
Ginger Bill: Cosmetic changes.
An example of how to use `png.load`.
An example of how to use `load`.
*/
import "core:compress"
import "core:image"
import "core:image/png"
// import "core:image/png"
import "core:bytes"
import "core:fmt"
@@ -23,41 +23,57 @@ import "core:mem"
import "core:os"
main :: proc() {
track := mem.Tracking_Allocator{};
mem.tracking_allocator_init(&track, context.allocator);
context.allocator = mem.tracking_allocator(&track);
demo();
if len(track.allocation_map) > 0 {
fmt.println("Leaks:");
for _, v in track.allocation_map {
fmt.printf("\t%v\n\n", v);
}
}
}
demo :: proc() {
file: string;
options := image.Options{.return_metadata};
options := image.Options{}; // {.return_metadata};
err: compress.Error;
img: ^image.Image;
file = "../../../misc/logo-slim.png";
img, err = png.load(file, options);
defer png.destroy(img);
img, err = load(file, options);
defer destroy(img);
if err != nil {
fmt.printf("Trying to read PNG file %v returned %v\n", file, err);
} else {
v: ^png.Info;
v: ^Info;
fmt.printf("Image: %vx%vx%v, %v-bit.\n", img.width, img.height, img.channels, img.depth);
if img.metadata_ptr != nil && img.metadata_type == Info {
v = (^Info)(img.metadata_ptr);
if img.metadata_ptr != nil && img.metadata_type == png.Info {
v = (^png.Info)(img.metadata_ptr);
// Handle ancillary chunks as you wish.
// We provide helper functions for a few types.
for c in v.chunks {
#partial switch c.header.type {
case .tIME:
t, _ := png.core_time(c);
t, _ := core_time(c);
fmt.printf("[tIME]: %v\n", t);
case .gAMA:
fmt.printf("[gAMA]: %v\n", png.gamma(c));
fmt.printf("[gAMA]: %v\n", gamma(c));
case .pHYs:
phys := png.phys(c);
phys := phys(c);
if phys.unit == .Meter {
xm := f32(img.width) / f32(phys.ppu_x);
ym := f32(img.height) / f32(phys.ppu_y);
dpi_x, dpi_y := png.phys_to_dpi(phys);
dpi_x, dpi_y := phys_to_dpi(phys);
fmt.printf("[pHYs] Image resolution is %v x %v pixels per meter.\n", phys.ppu_x, phys.ppu_y);
fmt.printf("[pHYs] Image resolution is %v x %v DPI.\n", dpi_x, dpi_y);
fmt.printf("[pHYs] Image dimensions are %v x %v meters.\n", xm, ym);
@@ -65,7 +81,7 @@ main :: proc() {
fmt.printf("[pHYs] x: %v, y: %v pixels per unknown unit.\n", phys.ppu_x, phys.ppu_y);
}
case .iTXt, .zTXt, .tEXt:
res, ok_text := png.text(c);
res, ok_text := text(c);
if ok_text {
if c.header.type == .iTXt {
fmt.printf("[iTXt] %v (%v:%v): %v\n", res.keyword, res.language, res.keyword_localized, res.text);
@@ -73,11 +89,11 @@ main :: proc() {
fmt.printf("[tEXt/zTXt] %v: %v\n", res.keyword, res.text);
}
}
defer png.text_destroy(res);
defer text_destroy(res);
case .bKGD:
fmt.printf("[bKGD] %v\n", img.background);
case .eXIf:
res, ok_exif := png.exif(c);
res, ok_exif := exif(c);
if ok_exif {
/*
Other than checking the signature and byte order, we don't handle Exif data.
@@ -86,45 +102,45 @@ main :: proc() {
fmt.printf("[eXIf] %v\n", res);
}
case .PLTE:
plte, plte_ok := png.plte(c);
plte, plte_ok := plte(c);
if plte_ok {
fmt.printf("[PLTE] %v\n", plte);
} else {
fmt.printf("[PLTE] Error\n");
}
case .hIST:
res, ok_hist := png.hist(c);
res, ok_hist := hist(c);
if ok_hist {
fmt.printf("[hIST] %v\n", res);
}
case .cHRM:
res, ok_chrm := png.chrm(c);
res, ok_chrm := chrm(c);
if ok_chrm {
fmt.printf("[cHRM] %v\n", res);
}
case .sPLT:
res, ok_splt := png.splt(c);
res, ok_splt := splt(c);
if ok_splt {
fmt.printf("[sPLT] %v\n", res);
}
png.splt_destroy(res);
splt_destroy(res);
case .sBIT:
if res, ok_sbit := png.sbit(c); ok_sbit {
if res, ok_sbit := sbit(c); ok_sbit {
fmt.printf("[sBIT] %v\n", res);
}
case .iCCP:
res, ok_iccp := png.iccp(c);
res, ok_iccp := iccp(c);
if ok_iccp {
fmt.printf("[iCCP] %v\n", res);
}
png.iccp_destroy(res);
iccp_destroy(res);
case .sRGB:
if res, ok_srgb := png.srgb(c); ok_srgb {
if res, ok_srgb := srgb(c); ok_srgb {
fmt.printf("[sRGB] Rendering intent: %v\n", res);
}
case:
type := c.header.type;
name := png.chunk_type_to_name(&type);
name := chunk_type_to_name(&type);
fmt.printf("[%v]: %v\n", name, c.data);
}
}

View File

@@ -245,24 +245,22 @@ ADAM7_Y_SPACING := []int{ 8,8,8,4,4,2,2 };
// Implementation starts here
read_chunk :: proc(ctx: ^compress.Context) -> (chunk: Chunk, err: Error) {
read_chunk :: proc(ctx: ^$C) -> (chunk: Chunk, err: Error) {
ch, e := compress.read_data(ctx, Chunk_Header);
if e != .None {
return {}, E_General.Stream_Too_Short;
}
chunk.header = ch;
data := make([]u8, ch.length, context.temp_allocator);
_, e2 := ctx.input->impl_read(data);
if e2 != .None {
chunk.data, e = compress.read_slice(ctx, int(ch.length));
if e != .None {
return {}, E_General.Stream_Too_Short;
}
chunk.data = data;
// Compute CRC over chunk type + data
type := (^[4]byte)(&ch.type)^;
computed_crc := hash.crc32(type[:]);
computed_crc = hash.crc32(data, computed_crc);
computed_crc = hash.crc32(chunk.data, computed_crc);
crc, e3 := compress.read_data(ctx, u32be);
if e3 != .None {
@@ -276,7 +274,7 @@ read_chunk :: proc(ctx: ^compress.Context) -> (chunk: Chunk, err: Error) {
return chunk, nil;
}
read_header :: proc(ctx: ^compress.Context) -> (IHDR, Error) {
read_header :: proc(ctx: ^$C) -> (IHDR, Error) {
c, e := read_chunk(ctx);
if e != nil {
return {}, e;
@@ -355,16 +353,16 @@ chunk_type_to_name :: proc(type: ^Chunk_Type) -> string {
}
load_from_slice :: proc(slice: []u8, options := Options{}, allocator := context.allocator) -> (img: ^Image, err: Error) {
r := bytes.Reader{};
bytes.reader_init(&r, slice);
stream := bytes.reader_to_stream(&r);
ctx := &compress.Context_Memory_Input{
input_data = slice,
};
/*
TODO: Add a flag to tell the PNG loader that the stream is backed by a slice.
This way the stream reader could avoid the copy into the temp memory returned by it,
and instead return a slice into the original memory that's already owned by the caller.
*/
img, err = load_from_stream(stream, options, allocator);
img, err = load_from_context(ctx, options, allocator);
return img, err;
}
@@ -382,7 +380,7 @@ load_from_file :: proc(filename: string, options := Options{}, allocator := cont
}
}
load_from_stream :: proc(stream: io.Stream, options := Options{}, allocator := context.allocator) -> (img: ^Image, err: Error) {
load_from_context :: proc(ctx: ^$C, options := Options{}, allocator := context.allocator) -> (img: ^Image, err: Error) {
options := options;
if .info in options {
options |= {.return_metadata, .do_not_decompress_image};
@@ -405,10 +403,6 @@ load_from_stream :: proc(stream: io.Stream, options := Options{}, allocator := c
img.metadata_ptr = info;
img.metadata_type = typeid_of(Info);
ctx := &compress.Context{
input = stream,
};
signature, io_error := compress.read_data(ctx, Signature);
if io_error != .None || signature != .PNG {
return img, E_PNG.Invalid_PNG_Signature;
@@ -674,39 +668,41 @@ load_from_stream :: proc(stream: io.Stream, options := Options{}, allocator := c
return img, E_PNG.IDAT_Missing;
}
/*
Calculate the expected output size, to help `inflate` make better decisions about the output buffer.
We'll also use it to check the returned buffer size is what we expected it to be.
Let's calcalate the expected size of the IDAT based on its dimensions, and whether or not it's interlaced.
*/
expected_size: int;
if header.interlace_method != .Adam7 {
expected_size = compute_buffer_size(int(header.width), int(header.height), int(img.channels), int(header.bit_depth), 1);
} else {
/*
Because Adam7 divides the image up into sub-images, and each scanline must start
with a filter byte, Adam7 interlaced images can have a larger raw size.
*/
for p := 0; p < 7; p += 1 {
x := (int(header.width) - ADAM7_X_ORIG[p] + ADAM7_X_SPACING[p] - 1) / ADAM7_X_SPACING[p];
y := (int(header.height) - ADAM7_Y_ORIG[p] + ADAM7_Y_SPACING[p] - 1) / ADAM7_Y_SPACING[p];
if x > 0 && y > 0 {
expected_size += compute_buffer_size(int(x), int(y), int(img.channels), int(header.bit_depth), 1);
}
}
}
buf: bytes.Buffer;
zlib_error := zlib.inflate(idat, &buf);
zlib_error := zlib.inflate(idat, &buf, false, expected_size);
defer bytes.buffer_destroy(&buf);
if zlib_error != nil {
return {}, zlib_error;
} else {
/*
Let's calcalate the expected size of the IDAT based on its dimensions,
and whether or not it's interlaced
*/
expected_size: int;
buf_len := len(buf.buf);
}
if header.interlace_method != .Adam7 {
expected_size = compute_buffer_size(int(header.width), int(header.height), int(img.channels), int(header.bit_depth), 1);
} else {
/*
Because Adam7 divides the image up into sub-images, and each scanline must start
with a filter byte, Adam7 interlaced images can have a larger raw size.
*/
for p := 0; p < 7; p += 1 {
x := (int(header.width) - ADAM7_X_ORIG[p] + ADAM7_X_SPACING[p] - 1) / ADAM7_X_SPACING[p];
y := (int(header.height) - ADAM7_Y_ORIG[p] + ADAM7_Y_SPACING[p] - 1) / ADAM7_Y_SPACING[p];
if x > 0 && y > 0 {
expected_size += compute_buffer_size(int(x), int(y), int(img.channels), int(header.bit_depth), 1);
}
}
}
if expected_size != buf_len {
return {}, E_PNG.IDAT_Corrupt;
}
buf_len := len(buf.buf);
if expected_size != buf_len {
return {}, E_PNG.IDAT_Corrupt;
}
/*
@@ -1657,4 +1653,4 @@ defilter :: proc(img: ^Image, filter_bytes: ^bytes.Buffer, header: ^IHDR, option
return nil;
}
load :: proc{load_from_file, load_from_slice, load_from_stream};
load :: proc{load_from_file, load_from_slice, load_from_context};

View File

@@ -41,7 +41,7 @@ scalar_dot :: proc(a, b: $T) -> T where IS_FLOAT(T), !IS_ARRAY(T) {
return a * b;
}
vector_dot :: proc(a, b: $T/[$N]$E) -> (c: E) where IS_NUMERIC(E) {
vector_dot :: proc(a, b: $T/[$N]$E) -> (c: E) where IS_NUMERIC(E) #no_bounds_check {
for i in 0..<N {
c += a[i] * b[i];
}
@@ -60,7 +60,7 @@ quaternion256_dot :: proc(a, b: $T/quaternion256) -> (c: f64) {
dot :: proc{scalar_dot, vector_dot, quaternion64_dot, quaternion128_dot, quaternion256_dot};
inner_product :: dot;
outer_product :: proc(a: $A/[$M]$E, b: $B/[$N]E) -> (out: [M][N]E) where IS_NUMERIC(E) {
outer_product :: proc(a: $A/[$M]$E, b: $B/[$N]E) -> (out: [M][N]E) where IS_NUMERIC(E) #no_bounds_check {
for i in 0..<M {
for j in 0..<N {
out[i][j] = a[i]*b[j];
@@ -156,7 +156,7 @@ projection :: proc(x, normal: $T/[$N]$E) -> T where IS_NUMERIC(E) {
return dot(x, normal) / dot(normal, normal) * normal;
}
identity :: proc($T: typeid/[$N][N]$E) -> (m: T) {
identity :: proc($T: typeid/[$N][N]$E) -> (m: T) #no_bounds_check {
for i in 0..<N {
m[i][i] = E(1);
}
@@ -170,8 +170,7 @@ trace :: proc(m: $T/[$N][N]$E) -> (tr: E) {
return;
}
transpose :: proc(a: $T/[$N][$M]$E) -> (m: [M][N]E) {
transpose :: proc(a: $T/[$N][$M]$E) -> (m: (T when N == M else [M][N]E)) #no_bounds_check {
for j in 0..<M {
for i in 0..<N {
m[j][i] = a[i][j];
@@ -181,8 +180,7 @@ transpose :: proc(a: $T/[$N][$M]$E) -> (m: [M][N]E) {
}
matrix_mul :: proc(a, b: $M/[$N][N]$E) -> (c: M)
where !IS_ARRAY(E),
IS_NUMERIC(E) {
where !IS_ARRAY(E), IS_NUMERIC(E) #no_bounds_check {
for i in 0..<N {
for k in 0..<N {
for j in 0..<N {
@@ -194,8 +192,7 @@ matrix_mul :: proc(a, b: $M/[$N][N]$E) -> (c: M)
}
matrix_comp_mul :: proc(a, b: $M/[$J][$I]$E) -> (c: M)
where !IS_ARRAY(E),
IS_NUMERIC(E) {
where !IS_ARRAY(E), IS_NUMERIC(E) #no_bounds_check {
for j in 0..<J {
for i in 0..<I {
c[j][i] = a[j][i] * b[j][i];
@@ -205,9 +202,7 @@ matrix_comp_mul :: proc(a, b: $M/[$J][$I]$E) -> (c: M)
}
matrix_mul_differ :: proc(a: $A/[$J][$I]$E, b: $B/[$K][J]E) -> (c: [K][I]E)
where !IS_ARRAY(E),
IS_NUMERIC(E),
I != K {
where !IS_ARRAY(E), IS_NUMERIC(E), I != K #no_bounds_check {
for k in 0..<K {
for j in 0..<J {
for i in 0..<I {
@@ -220,8 +215,7 @@ matrix_mul_differ :: proc(a: $A/[$J][$I]$E, b: $B/[$K][J]E) -> (c: [K][I]E)
matrix_mul_vector :: proc(a: $A/[$I][$J]$E, b: $B/[I]E) -> (c: B)
where !IS_ARRAY(E),
IS_NUMERIC(E) {
where !IS_ARRAY(E), IS_NUMERIC(E) #no_bounds_check {
for i in 0..<I {
for j in 0..<J {
c[j] += a[i][j] * b[i];
@@ -329,14 +323,14 @@ cubic :: proc(v1, v2, v3, v4: $T/[$N]$E, s: E) -> T {
array_cast :: proc(v: $A/[$N]$T, $Elem_Type: typeid) -> (w: [N]Elem_Type) {
array_cast :: proc(v: $A/[$N]$T, $Elem_Type: typeid) -> (w: [N]Elem_Type) #no_bounds_check {
for i in 0..<N {
w[i] = Elem_Type(v[i]);
}
return;
}
matrix_cast :: proc(v: $A/[$M][$N]$T, $Elem_Type: typeid) -> (w: [M][N]Elem_Type) {
matrix_cast :: proc(v: $A/[$M][$N]$T, $Elem_Type: typeid) -> (w: [M][N]Elem_Type) #no_bounds_check {
for i in 0..<M {
for j in 0..<N {
w[i][j] = Elem_Type(v[i][j]);

View File

@@ -57,11 +57,8 @@ link_error_delete :: proc(lerr: Maybe(Link_Error)) {
is_platform_error :: proc(ferr: Error) -> (err: i32, ok: bool) {
v: Platform_Error;
if v, ok = ferr.(Platform_Error); ok {
err = v.err;
}
return;
v := or_else(ferr.(Platform_Error), {});
return v.err, v.err != 0;
}

View File

@@ -13,11 +13,7 @@ error_to_io_error :: proc(ferr: Error) -> io.Error {
if ferr == nil {
return .None;
}
err, ok := ferr.(io.Error);
if !ok {
err = .Unknown;
}
return err;
return or_else(ferr.(io.Error), .Unknown);
}

View File

@@ -427,7 +427,7 @@ typeid_base :: proc "contextless" (id: typeid) -> typeid {
return ti.id;
}
typeid_core :: proc "contextless" (id: typeid) -> typeid {
ti := type_info_base_without_enum(type_info_of(id));
ti := type_info_core(type_info_of(id));
return ti.id;
}
typeid_base_without_enum :: typeid_core;
@@ -492,6 +492,6 @@ default_assertion_failure_proc :: proc(prefix, message: string, loc: Source_Code
print_string(message);
}
print_byte('\n');
// debug_trap();
trap();
// intrinsics.debug_trap();
intrinsics.trap();
}

View File

@@ -270,11 +270,22 @@ reserve_map :: proc(m: ^$T/map[$K]$V, capacity: int) {
// The delete_key built-in procedure deletes the element with the specified key (m[key]) from the map.
// If m is nil, or there is no such element, this procedure is a no-op
@builtin
delete_key :: proc(m: ^$T/map[$K]$V, key: K) {
delete_key :: proc(m: ^$T/map[$K]$V, key: K) -> (deleted_key: K, deleted_value: V) {
if m != nil {
key := key;
__dynamic_map_delete_key(__get_map_header(m), __get_map_hash(&key));
h := __get_map_header(m);
hash := __get_map_hash(&key);
fr := __dynamic_map_find(h, hash);
if fr.entry_index >= 0 {
entry := __dynamic_map_get_entry(h, fr.entry_index);
deleted_key = (^K)(uintptr(entry)+h.key_offset)^;
deleted_value = (^V)(uintptr(entry)+h.value_offset)^;
__dynamic_map_erase(h, fr);
}
}
return;
}

View File

@@ -1,10 +1,12 @@
package slice
import "intrinsics"
import "builtin"
import "core:math/bits"
import "core:mem"
_ :: intrinsics;
_ :: builtin;
_ :: bits;
_ :: mem;
@@ -292,6 +294,28 @@ filter :: proc(s: $S/[]$U, f: proc(U) -> bool, allocator := context.allocator) -
min :: proc(s: $S/[]$T) -> (res: T, ok: bool) where intrinsics.type_is_ordered(T) #optional_ok {
if len(s) != 0 {
res = s[0];
ok = true;
for v in s[1:] {
res = builtin.min(res, v);
}
}
return;
}
max :: proc(s: $S/[]$T) -> (res: T, ok: bool) where intrinsics.type_is_ordered(T) #optional_ok {
if len(s) != 0 {
res = s[0];
ok = true;
for v in s[1:] {
res = builtin.max(res, v);
}
}
return;
}
dot_product :: proc(a, b: $S/[]$T) -> T
where intrinsics.type_is_numeric(T) {
if len(a) != len(b) {

View File

@@ -5,6 +5,33 @@ _ :: intrinsics;
ORD :: intrinsics.type_is_ordered;
Ordering :: enum {
Less = -1,
Equal = 0,
Greater = +1,
}
cmp :: proc(a, b: $E) -> Ordering where ORD(E) {
switch {
case a < b:
return .Less;
case a > b:
return .Greater;
}
return .Equal;
}
cmp_proc :: proc($E: typeid) -> (proc(E, E) -> Ordering) where ORD(E) {
return proc(a, b: E) -> Ordering {
switch {
case a < b:
return .Less;
case a > b:
return .Greater;
}
return .Equal;
};
}
// sort sorts a slice
// This sort is not guaranteed to be stable
@@ -21,7 +48,15 @@ sort :: proc(data: $T/[]$E) where ORD(E) {
sort_by :: proc(data: $T/[]$E, less: proc(i, j: E) -> bool) {
when size_of(E) != 0 {
if n := len(data); n > 1 {
_quick_sort_proc(data, 0, n, _max_depth(n), less);
_quick_sort_less(data, 0, n, _max_depth(n), less);
}
}
}
sort_by_cmp :: proc(data: $T/[]$E, cmp: proc(i, j: E) -> Ordering) {
when size_of(E) != 0 {
if n := len(data); n > 1 {
_quick_sort_cmp(data, 0, n, _max_depth(n), cmp);
}
}
}
@@ -44,6 +79,16 @@ is_sorted_by :: proc(array: $T/[]$E, less: proc(i, j: E) -> bool) -> bool {
return true;
}
is_sorted_cmp :: proc(array: $T/[]$E, cmp: proc(i, j: E) -> Ordering) -> bool {
for i := len(array)-1; i > 0; i -= 1 {
if cmp(array[i], array[i-1]) == .Equal {
return false;
}
}
return true;
}
reverse_sort :: proc(data: $T/[]$E) where ORD(E) {
sort_by(data, proc(i, j: E) -> bool {
@@ -52,6 +97,23 @@ reverse_sort :: proc(data: $T/[]$E) where ORD(E) {
}
reverse_sort_by :: proc(data: $T/[]$E, less: proc(i, j: E) -> bool) where ORD(E) {
context._internal = rawptr(less);
sort_by(data, proc(i, j: E) -> bool {
k := (proc(i, j: E) -> bool)(context._internal);
return k(j, i);
});
}
reverse_sort_by_cmp :: proc(data: $T/[]$E, cmp: proc(i, j: E) -> Ordering) where ORD(E) {
context._internal = rawptr(cmp);
sort_by_cmp(data, proc(i, j: E) -> Ordering {
k := (proc(i, j: E) -> Ordering)(context._internal);
return k(j, i);
});
}
// TODO(bill): Should `sort_by_key` exist or is `sort_by` more than enough?
sort_by_key :: proc(data: $T/[]$E, key: proc(E) -> $K) where ORD(K) {
context._internal = rawptr(key);
@@ -250,7 +312,7 @@ _heap_sort :: proc(data: $T/[]$E, a, b: int) where ORD(E) {
@(private)
_quick_sort_proc :: proc(data: $T/[]$E, a, b, max_depth: int, less: proc(i, j: E) -> bool) {
_quick_sort_less :: proc(data: $T/[]$E, a, b, max_depth: int, less: proc(i, j: E) -> bool) {
median3 :: proc(data: T, m1, m0, m2: int, less: proc(i, j: E) -> bool) {
if less(data[m1], data[m0]) {
swap(data, m1, m0);
@@ -337,16 +399,16 @@ _quick_sort_proc :: proc(data: $T/[]$E, a, b, max_depth: int, less: proc(i, j: E
if b-a > 12 { // only use shell sort for lengths <= 12
if max_depth == 0 {
_heap_sort_proc(data, a, b, less);
_heap_sort_less(data, a, b, less);
return;
}
max_depth -= 1;
mlo, mhi := do_pivot(data, a, b, less);
if mlo-a < b-mhi {
_quick_sort_proc(data, a, mlo, max_depth, less);
_quick_sort_less(data, a, mlo, max_depth, less);
a = mhi;
} else {
_quick_sort_proc(data, mhi, b, max_depth, less);
_quick_sort_less(data, mhi, b, max_depth, less);
b = mlo;
}
}
@@ -357,12 +419,12 @@ _quick_sort_proc :: proc(data: $T/[]$E, a, b, max_depth: int, less: proc(i, j: E
swap(data, i, i-6);
}
}
_insertion_sort_proc(data, a, b, less);
_insertion_sort_less(data, a, b, less);
}
}
@(private)
_insertion_sort_proc :: proc(data: $T/[]$E, a, b: int, less: proc(i, j: E) -> bool) {
_insertion_sort_less :: proc(data: $T/[]$E, a, b: int, less: proc(i, j: E) -> bool) {
for i in a+1..<b {
for j := i; j > a && less(data[j], data[j-1]); j -= 1 {
swap(data, j, j-1);
@@ -371,7 +433,7 @@ _insertion_sort_proc :: proc(data: $T/[]$E, a, b: int, less: proc(i, j: E) -> bo
}
@(private)
_heap_sort_proc :: proc(data: $T/[]$E, a, b: int, less: proc(i, j: E) -> bool) {
_heap_sort_less :: proc(data: $T/[]$E, a, b: int, less: proc(i, j: E) -> bool) {
sift_down :: proc(data: T, lo, hi, first: int, less: proc(i, j: E) -> bool) {
root := lo;
for {
@@ -405,3 +467,162 @@ _heap_sort_proc :: proc(data: $T/[]$E, a, b: int, less: proc(i, j: E) -> bool) {
@(private)
_quick_sort_cmp :: proc(data: $T/[]$E, a, b, max_depth: int, cmp: proc(i, j: E) -> Ordering) {
median3 :: proc(data: T, m1, m0, m2: int, cmp: proc(i, j: E) -> Ordering) {
if cmp(data[m1], data[m0]) == .Less {
swap(data, m1, m0);
}
if cmp(data[m2], data[m1]) == .Less {
swap(data, m2, m1);
if cmp(data[m1], data[m0]) == .Less {
swap(data, m1, m0);
}
}
}
do_pivot :: proc(data: T, lo, hi: int, cmp: proc(i, j: E) -> Ordering) -> (midlo, midhi: int) {
m := int(uint(lo+hi)>>1);
if hi-lo > 40 {
s := (hi-lo)/8;
median3(data, lo, lo+s, lo+s*2, cmp);
median3(data, m, m-s, m+s, cmp);
median3(data, hi-1, hi-1-s, hi-1-s*2, cmp);
}
median3(data, lo, m, hi-1, cmp);
pivot := lo;
a, c := lo+1, hi-1;
for ; a < c && cmp(data[a], data[pivot]) == .Less; a += 1 {
}
b := a;
for {
for ; b < c && cmp(data[pivot], data[b]) >= .Equal; b += 1 { // data[b] <= pivot
}
for ; b < c && cmp(data[pivot], data[c-1]) == .Less; c -=1 { // data[c-1] > pivot
}
if b >= c {
break;
}
swap(data, b, c-1);
b += 1;
c -= 1;
}
protect := hi-c < 5;
if !protect && hi-c < (hi-lo)/4 {
dups := 0;
if cmp(data[pivot], data[hi-1]) != .Less {
swap(data, c, hi-1);
c += 1;
dups += 1;
}
if cmp(data[b-1], data[pivot]) != .Less {
b -= 1;
dups += 1;
}
if cmp(data[m], data[pivot]) != .Less {
swap(data, m, b-1);
b -= 1;
dups += 1;
}
protect = dups > 1;
}
if protect {
for {
for ; a < b && cmp(data[b-1], data[pivot]) >= .Equal; b -= 1 {
}
for ; a < b && cmp(data[a], data[pivot]) == .Less; a += 1 {
}
if a >= b {
break;
}
swap(data, a, b-1);
a += 1;
b -= 1;
}
}
swap(data, pivot, b-1);
return b-1, c;
}
a, b, max_depth := a, b, max_depth;
if b-a > 12 { // only use shell sort for lengths <= 12
if max_depth == 0 {
_heap_sort_cmp(data, a, b, cmp);
return;
}
max_depth -= 1;
mlo, mhi := do_pivot(data, a, b, cmp);
if mlo-a < b-mhi {
_quick_sort_cmp(data, a, mlo, max_depth, cmp);
a = mhi;
} else {
_quick_sort_cmp(data, mhi, b, max_depth, cmp);
b = mlo;
}
}
if b-a > 1 {
// Shell short with gap 6
for i in a+6..<b {
if cmp(data[i], data[i-6]) == .Less {
swap(data, i, i-6);
}
}
_insertion_sort_cmp(data, a, b, cmp);
}
}
@(private)
_insertion_sort_cmp :: proc(data: $T/[]$E, a, b: int, cmp: proc(i, j: E) -> Ordering) {
for i in a+1..<b {
for j := i; j > a && cmp(data[j], data[j-1]) == .Less; j -= 1 {
swap(data, j, j-1);
}
}
}
@(private)
_heap_sort_cmp :: proc(data: $T/[]$E, a, b: int, cmp: proc(i, j: E) -> Ordering) {
sift_down :: proc(data: T, lo, hi, first: int, cmp: proc(i, j: E) -> Ordering) {
root := lo;
for {
child := 2*root + 1;
if child >= hi {
break;
}
if child+1 < hi && cmp(data[first+child], data[first+child+1]) == .Less {
child += 1;
}
if cmp(data[first+root], data[first+child]) >= .Equal {
return;
}
swap(data, first+root, first+child);
root = child;
}
}
first, lo, hi := a, 0, b-a;
for i := (hi-1)/2; i >= 0; i -= 1 {
sift_down(data, i, hi, first, cmp);
}
for i := hi-1; i >= 0; i -= 1 {
swap(data, first, first+i);
sift_down(data, lo, i, first, cmp);
}
}

View File

@@ -1,6 +1,7 @@
package sort
import "core:mem"
import _slice "core:slice"
import "intrinsics"
_ :: intrinsics;
@@ -29,9 +30,11 @@ sort :: proc(it: Interface) {
}
@(deprecated="use slice.sort")
slice :: proc(array: $T/[]$E) where ORD(E) {
s := array;
sort(slice_interface(&s));
_slice.sort(array);
// s := array;
// sort(slice_interface(&s));
}
slice_interface :: proc(s: ^$T/[]$E) -> Interface where ORD(E) {
@@ -76,7 +79,10 @@ reverse_sort :: proc(it: Interface) {
sort(reverse_interface(&it));
}
@(deprecated="use slice.reverse")
reverse_slice :: proc(array: $T/[]$E) where ORD(E) {
_slice.reverse(array);
/*
s := array;
sort(Interface{
collection = rawptr(&s),
@@ -93,6 +99,7 @@ reverse_slice :: proc(array: $T/[]$E) where ORD(E) {
s[i], s[j] = s[j], s[i];
},
});
*/
}
@@ -678,55 +685,3 @@ compare_strings :: proc(a, b: string) -> int {
y := transmute(mem.Raw_String)b;
return mem.compare_byte_ptrs(x.data, y.data, min(x.len, y.len));
}
@(deprecated="use slice.binary_search")
binary_search :: proc(array: $A/[]$T, key: T) -> (index: int, found: bool)
where intrinsics.type_is_ordered(T) #no_bounds_check {
n := len(array);
switch n {
case 0:
return -1, false;
case 1:
if array[0] == key {
return 0, true;
}
return -1, false;
}
lo, hi := 0, n-1;
for array[hi] != array[lo] && key >= array[lo] && key <= array[hi] {
when intrinsics.type_is_ordered_numeric(T) {
// NOTE(bill): This is technically interpolation search
m := lo + int((key - array[lo]) * T(hi - lo) / (array[hi] - array[lo]));
} else {
m := (lo + hi)/2;
}
switch {
case array[m] < key:
lo = m + 1;
case key < array[m]:
hi = m - 1;
case:
return m, true;
}
}
if key == array[lo] {
return lo, true;
}
return -1, false;
}
@(deprecated="use slice.linear_search")
linear_search :: proc(array: $A/[]$T, key: T) -> (index: int, found: bool)
where intrinsics.type_is_comparable(T) #no_bounds_check {
for x, i in array {
if x == key {
return i, true;
}
}
return -1, false;
}

View File

@@ -21,6 +21,28 @@ reader_to_stream :: proc(r: ^Reader) -> (s: io.Stream) {
return;
}
to_reader :: proc(r: ^Reader, s: string) -> io.Reader {
reader_init(r, s);
rr, _ := io.to_reader(reader_to_stream(r));
return rr;
}
to_reader_at :: proc(r: ^Reader, s: string) -> io.Reader_At {
reader_init(r, s);
rr, _ := io.to_reader_at(reader_to_stream(r));
return rr;
}
to_byte_reader :: proc(r: ^Reader, s: string) -> io.Byte_Reader {
reader_init(r, s);
rr, _ := io.to_byte_reader(reader_to_stream(r));
return rr;
}
to_rune_reader :: proc(r: ^Reader, s: string) -> io.Rune_Reader {
reader_init(r, s);
rr, _ := io.to_rune_reader(reader_to_stream(r));
return rr;
}
reader_length :: proc(r: ^Reader) -> int {
if r.i >= i64(len(r.s)) {
return 0;

View File

@@ -68,12 +68,7 @@ Thread_Os_Specific :: struct {
thread_create :: proc(procedure: Thread_Proc) -> ^Thread {
__windows_thread_entry_proc :: proc "stdcall" (t_: rawptr) -> win32.DWORD {
t := (^Thread)(t_);
context = runtime.default_context();
c := context;
if ic, ok := t.init_context.?; ok {
c = ic;
}
context = c;
context = or_else(t.init_context.?, runtime.default_context());
t.procedure(t);

View File

@@ -39,7 +39,6 @@ Thread_Os_Specific :: struct #align 16 {
_create :: proc(procedure: Thread_Proc, priority := Thread_Priority.Normal) -> ^Thread {
__linux_thread_entry_proc :: proc "c" (t: rawptr) -> rawptr {
context = runtime.default_context();
t := (^Thread)(t);
sync.condition_wait_for(&t.start_gate);
sync.condition_destroy(&t.start_gate);
@@ -47,11 +46,7 @@ _create :: proc(procedure: Thread_Proc, priority := Thread_Priority.Normal) -> ^
t.start_gate = {};
t.start_mutex = {};
c := context;
if ic, ok := t.init_context.?; ok {
c = ic;
}
context = c;
context = or_else(t.init_context.?, runtime.default_context());
t.procedure(t);

View File

@@ -23,12 +23,7 @@ _create :: proc(procedure: Thread_Proc, priority := Thread_Priority.Normal) -> ^
__windows_thread_entry_proc :: proc "stdcall" (t_: rawptr) -> win32.DWORD {
t := (^Thread)(t_);
context = runtime.default_context();
c := context;
if ic, ok := t.init_context.?; ok {
c = ic;
}
context = c;
context = or_else(t.init_context.?, runtime.default_context());
t.procedure(t);

View File

@@ -1999,6 +1999,40 @@ relative_data_types :: proc() {
fmt.println(rel_slice[1]);
}
or_else_procedure :: proc() {
fmt.println("\n#'or_else'");
// IMPORTANT NOTE: 'or_else' is experimental features and subject to change/removal
{
// 'or_else' does a similar value check as 'try' but instead of doing an
// early return, it will give a default value to be used instead
m: map[string]int;
i: int;
ok: bool;
if i, ok = m["hellope"]; !ok {
i = 123;
}
// The above can be mapped to 'or_else'
i = or_else(m["hellope"], 123);
assert(i == 123);
}
{
// 'or_else' can be used with type assertions too, as they
// have optional ok semantics
v: union{int, f64};
i: int;
i = or_else(v.(int), 123);
i = or_else(v.?, 123); // Type inference magic
assert(i == 123);
m: Maybe(int);
i = or_else(m.?, 456);
assert(i == 456);
}
}
main :: proc() {
when true {
the_basics();
@@ -2031,5 +2065,6 @@ main :: proc() {
union_maybe();
explicit_context_definition();
relative_data_types();
or_else_procedure();
}
}

View File

@@ -89,6 +89,19 @@ template <typename T>
Slice<T> slice_from_array(Array<T> const &a) {
return {a.data, a.count};
}
template <typename T>
Slice<T> slice_array(Array<T> const &array, isize lo, isize hi) {
GB_ASSERT(0 <= lo && lo <= hi && hi <= array.count);
Slice<T> out = {};
isize len = hi-lo;
if (len > 0) {
out.data = array.data+lo;
out.count = len;
}
return out;
}
template <typename T>
Slice<T> slice_clone(gbAllocator const &allocator, Slice<T> const &a) {
T *data = cast(T *)gb_alloc_copy_align(allocator, a.data, a.count*gb_size_of(T), gb_align_of(T));

View File

@@ -48,6 +48,70 @@ BuiltinTypeIsProc *builtin_type_is_procs[BuiltinProc__type_simple_boolean_end -
};
void check_try_split_types(CheckerContext *c, Operand *x, String const &name, Type **left_type_, Type **right_type_) {
Type *left_type = nullptr;
Type *right_type = nullptr;
if (x->type->kind == Type_Tuple) {
auto const &vars = x->type->Tuple.variables;
auto lhs = array_slice(vars, 0, vars.count-1);
auto rhs = vars[vars.count-1];
if (lhs.count == 1) {
left_type = lhs[0]->type;
} else if (lhs.count != 0) {
left_type = alloc_type_tuple();
left_type->Tuple.variables = array_make_from_ptr(lhs.data, lhs.count, lhs.count);
}
right_type = rhs->type;
} else {
check_promote_optional_ok(c, x, &left_type, &right_type);
}
if (left_type_) *left_type_ = left_type;
if (right_type_) *right_type_ = right_type;
if (!is_type_boolean(right_type)) {
gbString str = type_to_string(right_type);
error(x->expr, "'%.*s' expects an \"optional ok\" like value, got %s", LIT(name), str);
gb_string_free(str);
}
// if (!type_has_nil(right_type) && !is_type_boolean(right_type)) {
// gbString str = type_to_string(right_type);
// error(x->expr, "'%.*s' expects an \"optional ok\" like value, or an n-valued expression where the last value is either a boolean or can be compared against 'nil', got %s", LIT(name), str);
// gb_string_free(str);
// }
}
void check_try_expr_no_value_error(CheckerContext *c, String const &name, Operand const &x, Type *type_hint) {
// TODO(bill): better error message
gbString t = type_to_string(x.type);
error(x.expr, "'%.*s' does not return a value, value is of type %s", LIT(name), t);
if (is_type_union(type_deref(x.type))) {
Type *bsrc = base_type(type_deref(x.type));
gbString th = nullptr;
if (type_hint != nullptr) {
GB_ASSERT(bsrc->kind == Type_Union);
for_array(i, bsrc->Union.variants) {
Type *vt = bsrc->Union.variants[i];
if (are_types_identical(vt, type_hint)) {
th = type_to_string(type_hint);
break;
}
}
}
gbString expr_str = expr_to_string(x.expr);
if (th != nullptr) {
error_line("\tSuggestion: was a type assertion such as %s.(%s) or %s.? wanted?\n", expr_str, th, expr_str);
} else {
error_line("\tSuggestion: was a type assertion such as %s.(T) or %s.? wanted?\n", expr_str, expr_str);
}
gb_string_free(th);
gb_string_free(expr_str);
}
gb_string_free(t);
}
bool check_builtin_procedure(CheckerContext *c, Operand *operand, Ast *call, i32 id, Type *type_hint) {
ast_node(ce, CallExpr, call);
@@ -86,6 +150,10 @@ bool check_builtin_procedure(CheckerContext *c, Operand *operand, Ast *call, i32
// NOTE(bill): The first arg may be a Type, this will be checked case by case
break;
case BuiltinProc_or_else:
// NOTE(bill): The arguments may be multi-expr
break;
case BuiltinProc_DIRECTIVE: {
ast_node(bd, BasicDirective, ce->proc);
String name = bd->name.string;
@@ -445,38 +513,82 @@ bool check_builtin_procedure(CheckerContext *c, Operand *operand, Ast *call, i32
case BuiltinProc_offset_of: {
// offset_of :: proc(value.field) -> uintptr
// offset_of :: proc(Type, field) -> uintptr
Operand op = {};
Type *bt = check_type(c, ce->args[0]);
Type *type = base_type(bt);
if (type == nullptr || type == t_invalid) {
error(ce->args[0], "Expected a type for 'offset_of'");
Type *type = nullptr;
Ast *field_arg = nullptr;
if (ce->args.count == 1) {
Ast *arg0 = unparen_expr(ce->args[0]);
if (arg0->kind != Ast_SelectorExpr) {
gbString x = expr_to_string(arg0);
error(ce->args[0], "Invalid expression for 'offset_of', '%s' is not a selector expression", x);
gb_string_free(x);
return false;
}
ast_node(se, SelectorExpr, arg0);
Operand x = {};
check_expr(c, &x, se->expr);
if (x.mode == Addressing_Invalid) {
return false;
}
type = type_deref(x.type);
Type *bt = base_type(type);
if (bt == nullptr || bt == t_invalid) {
error(ce->args[0], "Expected a type for 'offset_of'");
return false;
}
field_arg = unparen_expr(se->selector);
} else if (ce->args.count == 2) {
type = check_type(c, ce->args[0]);
Type *bt = base_type(type);
if (bt == nullptr || bt == t_invalid) {
error(ce->args[0], "Expected a type for 'offset_of'");
return false;
}
field_arg = unparen_expr(ce->args[1]);
} else {
error(ce->args[0], "Expected either 1 or 2 arguments to 'offset_of', in the format of 'offset_of(Type, field)', 'offset_of(value.field)'");
return false;
}
GB_ASSERT(type != nullptr);
Ast *field_arg = unparen_expr(ce->args[1]);
if (field_arg == nullptr ||
field_arg->kind != Ast_Ident) {
error(field_arg, "Expected an identifier for field argument");
return false;
}
if (is_type_array(type)) {
error(field_arg, "Invalid type for 'offset_of'");
gbString t = type_to_string(type);
error(field_arg, "Invalid a struct type for 'offset_of', got '%s'", t);
gb_string_free(t);
return false;
}
ast_node(arg, Ident, field_arg);
Selection sel = lookup_field(type, arg->token.string, operand->mode == Addressing_Type);
String field_name = arg->token.string;
Selection sel = lookup_field(type, field_name, false);
if (sel.entity == nullptr) {
gbString type_str = type_to_string(bt);
gbString type_str = type_to_string(type);
error(ce->args[0],
"'%s' has no field named '%.*s'", type_str, LIT(arg->token.string));
gb_string_free(type_str);
Type *bt = base_type(type);
if (bt->kind == Type_Struct) {
check_did_you_mean_type(arg->token.string, bt->Struct.fields);
}
return false;
}
if (sel.indirect) {
gbString type_str = type_to_string(bt);
gbString type_str = type_to_string(type);
error(ce->args[0],
"Field '%.*s' is embedded via a pointer in '%s'", LIT(arg->token.string), type_str);
gb_string_free(type_str);
@@ -486,7 +598,6 @@ bool check_builtin_procedure(CheckerContext *c, Operand *operand, Ast *call, i32
operand->mode = Addressing_Constant;
operand->value = exact_value_i64(type_offset_of_from_selection(type, sel));
operand->type = t_uintptr;
break;
}
@@ -1675,6 +1786,46 @@ bool check_builtin_procedure(CheckerContext *c, Operand *operand, Ast *call, i32
break;
}
case BuiltinProc_or_else: {
GB_ASSERT(ce->args.count == 2);
Ast *arg = ce->args[0];
Ast *default_value = ce->args[1];
Operand x = {};
Operand y = {};
check_multi_expr_with_type_hint(c, &x, arg, type_hint);
if (x.mode == Addressing_Invalid) {
operand->mode = Addressing_Value;
operand->type = t_invalid;
return false;
}
check_multi_expr_with_type_hint(c, &y, default_value, x.type);
error_operand_no_value(&y);
if (y.mode == Addressing_Invalid) {
operand->mode = Addressing_Value;
operand->type = t_invalid;
return false;
}
Type *left_type = nullptr;
Type *right_type = nullptr;
check_try_split_types(c, &x, builtin_name, &left_type, &right_type);
add_type_and_value(&c->checker->info, arg, x.mode, x.type, x.value);
if (left_type != nullptr) {
check_assignment(c, &y, left_type, builtin_name);
} else {
check_try_expr_no_value_error(c, builtin_name, x, type_hint);
}
if (left_type == nullptr) {
left_type = t_invalid;
}
operand->mode = Addressing_Value;
operand->type = left_type;
return true;
}
case BuiltinProc_simd_vector: {
Operand x = {};
@@ -1783,7 +1934,7 @@ bool check_builtin_procedure(CheckerContext *c, Operand *operand, Ast *call, i32
Entity *new_field = alloc_entity_field(scope, token, array_type, false, cast(i32)i);
soa_struct->Struct.fields[i] = new_field;
add_entity(c->checker, scope, nullptr, new_field);
add_entity(c, scope, nullptr, new_field);
add_entity_use(c, nullptr, new_field);
}
@@ -1808,7 +1959,7 @@ bool check_builtin_procedure(CheckerContext *c, Operand *operand, Ast *call, i32
Type *array_type = alloc_type_array(old_field->type, count);
Entity *new_field = alloc_entity_field(scope, old_field->token, array_type, false, old_field->Variable.field_src_index);
soa_struct->Struct.fields[i] = new_field;
add_entity(c->checker, scope, nullptr, new_field);
add_entity(c, scope, nullptr, new_field);
} else {
soa_struct->Struct.fields[i] = old_field;
}
@@ -1820,7 +1971,7 @@ bool check_builtin_procedure(CheckerContext *c, Operand *operand, Ast *call, i32
Token token = {};
token.string = str_lit("Base_Type");
Entity *base_type_entity = alloc_entity_type_name(scope, token, elem, EntityState_Resolved);
add_entity(c->checker, scope, nullptr, base_type_entity);
add_entity(c, scope, nullptr, base_type_entity);
add_type_info_type(c, soa_struct);
@@ -2936,6 +3087,10 @@ bool check_builtin_procedure(CheckerContext *c, Operand *operand, Ast *call, i32
error(ce->args[0],
"'%s' has no field named '%.*s'", type_str, LIT(field_name));
gb_string_free(type_str);
if (bt->kind == Type_Struct) {
check_did_you_mean_type(field_name, bt->Struct.fields);
}
return false;
}
if (sel.indirect) {

View File

@@ -313,7 +313,7 @@ void check_type_decl(CheckerContext *ctx, Entity *e, Ast *init_expr, Type *def)
if (is_blank_ident(name)) {
continue;
}
add_entity(ctx->checker, parent, nullptr, f);
add_entity(ctx, parent, nullptr, f);
}
}
}
@@ -786,6 +786,7 @@ void check_proc_decl(CheckerContext *ctx, Entity *e, DeclInfo *d) {
GB_ASSERT(pl->body->kind == Ast_BlockStmt);
if (!pt->is_polymorphic) {
// check_procedure_now(ctx->checker, ctx->file, e->token, d, proc_type, pl->body, pl->tags);
check_procedure_later(ctx->checker, ctx->file, e->token, d, proc_type, pl->body, pl->tags);
}
} else if (!is_foreign) {
@@ -808,7 +809,9 @@ void check_proc_decl(CheckerContext *ctx, Entity *e, DeclInfo *d) {
if (ac.deferred_procedure.entity != nullptr) {
e->Procedure.deferred_procedure = ac.deferred_procedure;
gb_mutex_lock(&ctx->checker->procs_with_deferred_to_check_mutex);
array_add(&ctx->checker->procs_with_deferred_to_check, e);
gb_mutex_unlock(&ctx->checker->procs_with_deferred_to_check_mutex);
}
if (is_foreign) {

View File

@@ -48,8 +48,8 @@ struct CallArgumentData {
};
struct PolyProcData {
Entity * gen_entity;
ProcInfo proc_info;
Entity * gen_entity;
ProcInfo *proc_info;
};
struct ValidIndexAndScore {
@@ -73,6 +73,7 @@ typedef CALL_ARGUMENT_CHECKER(CallArgumentCheckerType);
void check_expr (CheckerContext *c, Operand *operand, Ast *expression);
void check_multi_expr (CheckerContext *c, Operand *operand, Ast *expression);
void check_multi_expr_or_type (CheckerContext *c, Operand *operand, Ast *expression);
void check_multi_expr_with_type_hint(CheckerContext *c, Operand *o, Ast *e, Type *type_hint);
void check_expr_or_type (CheckerContext *c, Operand *operand, Ast *expression, Type *type_hint);
ExprKind check_expr_base (CheckerContext *c, Operand *operand, Ast *expression, Type *type_hint);
void check_expr_with_type_hint (CheckerContext *c, Operand *o, Ast *e, Type *t);
@@ -111,6 +112,8 @@ Type *make_soa_struct_dynamic_array(CheckerContext *ctx, Ast *array_typ_expr, As
bool check_builtin_procedure(CheckerContext *c, Operand *operand, Ast *call, i32 id, Type *type_hint);
void check_promote_optional_ok(CheckerContext *c, Operand *x, Type **val_type_, Type **ok_type_);
Entity *entity_from_expr(Ast *expr) {
expr = unparen_expr(expr);
switch (expr->kind) {
@@ -276,7 +279,7 @@ bool find_or_generate_polymorphic_procedure(CheckerContext *c, Entity *base_enti
});
CheckerInfo *info = c->info;
CheckerContext nctx = *c;
Scope *scope = create_scope(base_entity->scope);
@@ -291,7 +294,6 @@ bool find_or_generate_polymorphic_procedure(CheckerContext *c, Entity *base_enti
}
auto *pt = &src->Proc;
// NOTE(bill): This is slightly memory leaking if the type already exists
@@ -303,8 +305,13 @@ bool find_or_generate_polymorphic_procedure(CheckerContext *c, Entity *base_enti
return false;
}
auto *found_gen_procs = map_get(&nctx.info->gen_procs, hash_pointer(base_entity->identifier));
gb_mutex_lock(&info->gen_procs_mutex);
auto *found_gen_procs = map_get(&info->gen_procs, hash_pointer(base_entity->identifier));
gb_mutex_unlock(&info->gen_procs_mutex);
if (found_gen_procs) {
gb_mutex_lock(&info->gen_procs_mutex);
defer (gb_mutex_unlock(&info->gen_procs_mutex));
auto procs = *found_gen_procs;
for_array(i, procs) {
Entity *other = procs[i];
@@ -341,6 +348,9 @@ bool find_or_generate_polymorphic_procedure(CheckerContext *c, Entity *base_enti
}
if (found_gen_procs) {
gb_mutex_lock(&info->gen_procs_mutex);
defer (gb_mutex_unlock(&info->gen_procs_mutex));
auto procs = *found_gen_procs;
for_array(i, procs) {
Entity *other = procs[i];
@@ -400,23 +410,25 @@ bool find_or_generate_polymorphic_procedure(CheckerContext *c, Entity *base_enti
}
}
ProcInfo proc_info = {};
proc_info.file = file;
proc_info.token = token;
proc_info.decl = d;
proc_info.type = final_proc_type;
proc_info.body = pl->body;
proc_info.tags = tags;
proc_info.generated_from_polymorphic = true;
proc_info.poly_def_node = poly_def_node;
ProcInfo *proc_info = gb_alloc_item(permanent_allocator(), ProcInfo);
proc_info->file = file;
proc_info->token = token;
proc_info->decl = d;
proc_info->type = final_proc_type;
proc_info->body = pl->body;
proc_info->tags = tags;
proc_info->generated_from_polymorphic = true;
proc_info->poly_def_node = poly_def_node;
gb_mutex_lock(&info->gen_procs_mutex);
if (found_gen_procs) {
array_add(found_gen_procs, entity);
} else {
auto array = array_make<Entity *>(heap_allocator());
array_add(&array, entity);
map_set(&nctx.checker->info.gen_procs, hash_pointer(base_entity->identifier), array);
map_set(&info->gen_procs, hash_pointer(base_entity->identifier), array);
}
gb_mutex_unlock(&info->gen_procs_mutex);
GB_ASSERT(entity != nullptr);
@@ -2594,15 +2606,16 @@ void check_binary_expr(CheckerContext *c, Operand *x, Ast *node, Type *type_hint
case Token_in:
case Token_not_in:
{
// IMPORTANT NOTE(bill): This uses right-left evaluation in type checking only no in
check_expr(c, y, be->right);
Type *rhs_type = type_deref(y->type);
if (is_type_bit_set(y->type)) {
Type *elem = base_type(y->type)->BitSet.elem;
if (is_type_bit_set(rhs_type)) {
Type *elem = base_type(rhs_type)->BitSet.elem;
check_expr_with_type_hint(c, x, be->left, elem);
} else if (is_type_map(y->type)) {
Type *key = base_type(y->type)->Map.key;
} else if (is_type_map(rhs_type)) {
Type *key = base_type(rhs_type)->Map.key;
check_expr_with_type_hint(c, x, be->left, key);
} else {
check_expr(c, x, be->left);
@@ -2617,8 +2630,8 @@ void check_binary_expr(CheckerContext *c, Operand *x, Ast *node, Type *type_hint
return;
}
if (is_type_map(y->type)) {
Type *yt = base_type(y->type);
if (is_type_map(rhs_type)) {
Type *yt = base_type(rhs_type);
if (op.kind == Token_in) {
check_assignment(c, x, yt->Map.key, str_lit("map 'in'"));
} else {
@@ -2626,8 +2639,8 @@ void check_binary_expr(CheckerContext *c, Operand *x, Ast *node, Type *type_hint
}
add_package_dependency(c, "runtime", "__dynamic_map_get");
} else if (is_type_bit_set(y->type)) {
Type *yt = base_type(y->type);
} else if (is_type_bit_set(rhs_type)) {
Type *yt = base_type(rhs_type);
if (op.kind == Token_in) {
check_assignment(c, x, yt->BitSet.elem, str_lit("bit_set 'in'"));
@@ -2676,6 +2689,7 @@ void check_binary_expr(CheckerContext *c, Operand *x, Ast *node, Type *type_hint
x->expr = node;
return;
}
default:
if (is_ise_expr(be->left)) {
@@ -2884,8 +2898,8 @@ void check_binary_expr(CheckerContext *c, Operand *x, Ast *node, Type *type_hint
void update_expr_type(CheckerContext *c, Ast *e, Type *type, bool final) {
GB_ASSERT(e != nullptr);
ExprInfo *found = check_get_expr_info(&c->checker->info, e);
if (found == nullptr) {
ExprInfo *old = check_get_expr_info(&c->checker->info, e);
if (old == nullptr) {
if (type != nullptr && type != t_invalid) {
if (e->tav.type == nullptr || e->tav.type == t_invalid) {
add_type_and_value(&c->checker->info, e, e->tav.mode, type ? type : e->tav.type, e->tav.value);
@@ -2893,11 +2907,10 @@ void update_expr_type(CheckerContext *c, Ast *e, Type *type, bool final) {
}
return;
}
ExprInfo old = *found;
switch (e->kind) {
case_ast_node(ue, UnaryExpr, e);
if (old.value.kind != ExactValue_Invalid) {
if (old->value.kind != ExactValue_Invalid) {
// NOTE(bill): if 'e' is constant, the operands will be constant too.
// They don't need to be updated as they will be updated later and
// checked at the end of general checking stage.
@@ -2907,7 +2920,7 @@ void update_expr_type(CheckerContext *c, Ast *e, Type *type, bool final) {
case_end;
case_ast_node(be, BinaryExpr, e);
if (old.value.kind != ExactValue_Invalid) {
if (old->value.kind != ExactValue_Invalid) {
// See above note in UnaryExpr case
break;
}
@@ -2922,7 +2935,7 @@ void update_expr_type(CheckerContext *c, Ast *e, Type *type, bool final) {
case_end;
case_ast_node(te, TernaryIfExpr, e);
if (old.value.kind != ExactValue_Invalid) {
if (old->value.kind != ExactValue_Invalid) {
// See above note in UnaryExpr case
break;
}
@@ -2932,7 +2945,7 @@ void update_expr_type(CheckerContext *c, Ast *e, Type *type, bool final) {
case_end;
case_ast_node(te, TernaryWhenExpr, e);
if (old.value.kind != ExactValue_Invalid) {
if (old->value.kind != ExactValue_Invalid) {
// See above note in UnaryExpr case
break;
}
@@ -2947,15 +2960,14 @@ void update_expr_type(CheckerContext *c, Ast *e, Type *type, bool final) {
}
if (!final && is_type_untyped(type)) {
old.type = base_type(type);
check_set_expr_info(&c->checker->info, e, old);
old->type = base_type(type);
return;
}
// We need to remove it and then give it a new one
check_remove_expr_info(&c->checker->info, e);
if (old.is_lhs && !is_type_integer(type)) {
if (old->is_lhs && !is_type_integer(type)) {
gbString expr_str = expr_to_string(e);
gbString type_str = type_to_string(type);
error(e, "Shifted operand %s must be an integer, got %s", expr_str, type_str);
@@ -2964,7 +2976,7 @@ void update_expr_type(CheckerContext *c, Ast *e, Type *type, bool final) {
return;
}
add_type_and_value(&c->checker->info, e, old.mode, type, old.value);
add_type_and_value(&c->checker->info, e, old->mode, type, old->value);
}
void update_expr_value(CheckerContext *c, Ast *e, ExactValue value) {
@@ -3573,6 +3585,40 @@ ExactValue get_constant_field(CheckerContext *c, Operand const *operand, Selecti
if (success_) *success_ = true;
return empty_exact_value;
}
void check_did_you_mean_print(DidYouMeanAnswers *d) {
auto results = did_you_mean_results(d);
if (results.count != 0) {
error_line("\tSuggestion: Did you mean?\n");
for_array(i, results) {
String const &target = results[i].target;
error_line("\t\t%.*s\n", LIT(target));
// error_line("\t\t%.*s %td\n", LIT(target), results[i].distance);
}
}
}
void check_did_you_mean_type(String const &name, Array<Entity *> const &fields) {
DidYouMeanAnswers d = did_you_mean_make(heap_allocator(), fields.count, name);
defer (did_you_mean_destroy(&d));
for_array(i, fields) {
did_you_mean_append(&d, fields[i]->token.string);
}
check_did_you_mean_print(&d);
}
void check_did_you_mean_scope(String const &name, Scope *scope) {
DidYouMeanAnswers d = did_you_mean_make(heap_allocator(), scope->elements.entries.count, name);
defer (did_you_mean_destroy(&d));
for_array(i, scope->elements.entries) {
Entity *e = scope->elements.entries[i].value;
did_you_mean_append(&d, e->token.string);
}
check_did_you_mean_print(&d);
}
Entity *check_selector(CheckerContext *c, Operand *operand, Ast *node, Type *type_hint) {
ast_node(se, SelectorExpr, node);
@@ -3638,6 +3684,8 @@ Entity *check_selector(CheckerContext *c, Operand *operand, Ast *node, Type *typ
error(op_expr, "'%.*s' is not declared by '%.*s'", LIT(entity_name), LIT(import_name));
operand->mode = Addressing_Invalid;
operand->expr = node;
check_did_you_mean_scope(entity_name, import_scope);
return nullptr;
}
@@ -3785,7 +3833,9 @@ Entity *check_selector(CheckerContext *c, Operand *operand, Ast *node, Type *typ
Type *swizzle_array_type = nullptr;
Type *bth = base_type(type_hint);
if (bth != nullptr && bth->kind == Type_Array && bth->Array.count == index_count) {
if (bth != nullptr && bth->kind == Type_Array &&
bth->Array.count == index_count &&
are_types_identical(bth->Array.elem, array_type->Array.elem)) {
swizzle_array_type = type_hint;
} else {
swizzle_array_type = alloc_type_array(array_type->Array.elem, index_count);
@@ -3815,6 +3865,17 @@ Entity *check_selector(CheckerContext *c, Operand *operand, Ast *node, Type *typ
gbString type_str = type_to_string(operand->type);
gbString sel_str = expr_to_string(selector);
error(op_expr, "'%s' of type '%s' has no field '%s'", op_str, type_str, sel_str);
if (operand->type != nullptr && selector->kind == Ast_Ident) {
String const &name = selector->Ident.token.string;
Type *bt = base_type(operand->type);
if (bt->kind == Type_Struct) {
check_did_you_mean_type(name, bt->Struct.fields);
} else if (bt->kind == Type_Enum) {
check_did_you_mean_type(name, bt->Enum.fields);
}
}
gb_string_free(sel_str);
gb_string_free(type_str);
gb_string_free(op_str);
@@ -4045,26 +4106,7 @@ bool check_assignment_arguments(CheckerContext *ctx, Array<Operand> const &lhs,
val1.mode = Addressing_Value;
val1.type = t_untyped_bool;
if (expr->kind == Ast_CallExpr) {
Type *pt = base_type(type_of_expr(expr->CallExpr.proc));
if (is_type_proc(pt)) {
do_normal = false;
Type *tuple = pt->Proc.results;
add_type_and_value(&c->checker->info, o.expr, o.mode, tuple, o.value);
if (pt->Proc.result_count >= 2) {
Type *t1 = tuple->Tuple.variables[1]->type;
val1.type = t1;
}
expr->CallExpr.optional_ok_one = false;
}
}
if (do_normal) {
Type *tuple = make_optional_ok_type(o.type);
add_type_and_value(&c->checker->info, o.expr, o.mode, tuple, o.value);
}
check_promote_optional_ok(c, &o, nullptr, &val1.type);
if (expr->kind == Ast_TypeAssertion &&
(o.mode == Addressing_OptionalOk || o.mode == Addressing_OptionalOkPtr)) {
@@ -4170,26 +4212,7 @@ bool check_unpack_arguments(CheckerContext *ctx, Entity **lhs, isize lhs_count,
val1.mode = Addressing_Value;
val1.type = t_untyped_bool;
if (expr->kind == Ast_CallExpr) {
Type *pt = base_type(type_of_expr(expr->CallExpr.proc));
if (is_type_proc(pt)) {
do_normal = false;
Type *tuple = pt->Proc.results;
add_type_and_value(&c->checker->info, o.expr, o.mode, tuple, o.value);
if (pt->Proc.result_count >= 2) {
Type *t1 = tuple->Tuple.variables[1]->type;
val1.type = t1;
}
expr->CallExpr.optional_ok_one = false;
}
}
if (do_normal) {
Type *tuple = make_optional_ok_type(o.type);
add_type_and_value(&c->checker->info, o.expr, o.mode, tuple, o.value);
}
check_promote_optional_ok(c, &o, nullptr, &val1.type);
if (expr->kind == Ast_TypeAssertion &&
(o.mode == Addressing_OptionalOk || o.mode == Addressing_OptionalOkPtr)) {
@@ -5495,7 +5518,7 @@ CallArgumentError check_polymorphic_record_type(CheckerContext *c, Operand *oper
return err;
}
while (ordered_operands.count >= 0) {
while (ordered_operands.count > 0) {
if (ordered_operands[ordered_operands.count-1].expr != nullptr) {
break;
}
@@ -5544,8 +5567,9 @@ CallArgumentError check_polymorphic_record_type(CheckerContext *c, Operand *oper
}
}
isize oo_count = gb_min(param_count, ordered_operands.count);
i64 score = 0;
for (isize i = 0; i < param_count; i++) {
for (isize i = 0; i < oo_count; i++) {
Entity *e = tuple->variables[i];
Operand *o = &ordered_operands[i];
if (o->mode == Addressing_Invalid) {
@@ -5753,7 +5777,7 @@ ExprKind check_call_expr(CheckerContext *c, Operand *operand, Ast *call, Ast *pr
arg = arg->FieldValue.value;
// NOTE(bill): Carry on the cast regardless
}
check_expr(c, operand, arg);
check_expr_with_type_hint(c, operand, arg, t);
if (operand->mode != Addressing_Invalid) {
if (is_type_polymorphic(t)) {
error(call, "A polymorphic type cannot be used in a type conversion");
@@ -5764,6 +5788,10 @@ ExprKind check_call_expr(CheckerContext *c, Operand *operand, Ast *call, Ast *pr
}
}
operand->type = t;
operand->expr = call;
if (operand->mode != Addressing_Invalid) {
update_expr_type(c, arg, t, false);
}
break;
}
}
@@ -6215,9 +6243,14 @@ ExprKind check_implicit_selector_expr(CheckerContext *c, Operand *o, Ast *node,
String name = ise->selector->Ident.token.string;
if (is_type_enum(th)) {
Type *bt = base_type(th);
GB_ASSERT(bt->kind == Type_Enum);
gbString typ = type_to_string(th);
error(node, "Undeclared name %.*s for type '%s'", LIT(name), typ);
gb_string_free(typ);
defer (gb_string_free(typ));
error(node, "Undeclared name '%.*s' for type '%s'", LIT(name), typ);
check_did_you_mean_type(name, bt->Enum.fields);
} else {
gbString typ = type_to_string(th);
gbString str = expr_to_string(node);
@@ -6231,6 +6264,44 @@ ExprKind check_implicit_selector_expr(CheckerContext *c, Operand *o, Ast *node,
return Expr_Expr;
}
void check_promote_optional_ok(CheckerContext *c, Operand *x, Type **val_type_, Type **ok_type_) {
switch (x->mode) {
case Addressing_MapIndex:
case Addressing_OptionalOk:
case Addressing_OptionalOkPtr:
if (val_type_) *val_type_ = x->type;
break;
default:
if (ok_type_) *ok_type_ = x->type;
return;
}
Ast *expr = unparen_expr(x->expr);
if (expr->kind == Ast_CallExpr) {
Type *pt = base_type(type_of_expr(expr->CallExpr.proc));
if (is_type_proc(pt)) {
Type *tuple = pt->Proc.results;
add_type_and_value(&c->checker->info, x->expr, x->mode, tuple, x->value);
if (pt->Proc.result_count >= 2) {
if (ok_type_) *ok_type_ = tuple->Tuple.variables[1]->type;
}
expr->CallExpr.optional_ok_one = false;
x->type = tuple;
return;
}
}
Type *tuple = make_optional_ok_type(x->type);
if (ok_type_) *ok_type_ = tuple->Tuple.variables[1]->type;
add_type_and_value(&c->checker->info, x->expr, x->mode, tuple, x->value);
x->type = tuple;
GB_ASSERT(is_type_tuple(type_of_expr(x->expr)));
}
ExprKind check_expr_base_internal(CheckerContext *c, Operand *o, Ast *node, Type *type_hint) {
u32 prev_state_flags = c->state_flags;
defer (c->state_flags = prev_state_flags);
@@ -6450,20 +6521,13 @@ ExprKind check_expr_base_internal(CheckerContext *c, Operand *o, Ast *node, Type
o->type = type;
o->mode = Addressing_Value;
// if (cond.mode == Addressing_Constant && is_type_boolean(cond.type) &&
// x.mode == Addressing_Constant &&
// y.mode == Addressing_Constant) {
// o->mode = Addressing_Constant;
// if (cond.value.value_bool) {
// o->value = x.value;
// } else {
// o->value = y.value;
// }
// }
if (type_hint != nullptr && is_type_untyped(type)) {
if (check_cast_internal(c, &x, type_hint) &&
check_cast_internal(c, &y, type_hint)) {
update_expr_type(c, node, type_hint, !is_type_untyped(type_hint));
o->type = type_hint;
}
}
case_end;
case_ast_node(te, TernaryWhenExpr, node);
@@ -6557,10 +6621,54 @@ ExprKind check_expr_base_internal(CheckerContext *c, Operand *o, Ast *node, Type
break; // NOTE(bill): No need to init
}
if (t->Struct.is_raw_union) {
if (cl->elems.count != 0) {
gbString type_str = type_to_string(type);
error(node, "Illegal compound literal type '%s'", type_str);
gb_string_free(type_str);
if (cl->elems.count > 0) {
// NOTE: unions cannot be constant
is_constant = false;
if (cl->elems[0]->kind != Ast_FieldValue) {
gbString type_str = type_to_string(type);
error(node, "%s ('struct #raw_union') compound literals are only allowed to contain 'field = value' elements", type_str);
gb_string_free(type_str);
} else {
if (cl->elems.count != 1) {
gbString type_str = type_to_string(type);
error(node, "%s ('struct #raw_union') compound literals are only allowed to contain up to 1 'field = value' element, got %td", type_str, cl->elems.count);
gb_string_free(type_str);
} else {
Ast *elem = cl->elems[0];
ast_node(fv, FieldValue, elem);
if (fv->field->kind != Ast_Ident) {
gbString expr_str = expr_to_string(fv->field);
error(elem, "Invalid field name '%s' in structure literal", expr_str);
gb_string_free(expr_str);
break;
}
String name = fv->field->Ident.token.string;
Selection sel = lookup_field(type, name, o->mode == Addressing_Type);
bool is_unknown = sel.entity == nullptr;
if (is_unknown) {
error(elem, "Unknown field '%.*s' in structure literal", LIT(name));
break;
}
if (sel.index.count > 1) {
error(elem, "Cannot assign to an anonymous field '%.*s' in a structure literal (at the moment)", LIT(name));
break;
}
Entity *field = t->Struct.fields[sel.index[0]];
add_entity_use(c, fv->field, field);
Operand o = {};
check_expr_or_type(c, &o, fv->value, field->type);
check_assignment(c, &o, field->type, str_lit("structure literal"));
}
}
}
break;
}
@@ -7556,8 +7664,6 @@ ExprKind check_expr_base_internal(CheckerContext *c, Operand *o, Ast *node, Type
}
case_end;
case_ast_node(se, SelectorExpr, node);
check_selector(c, o, node, type_hint);
node->viral_state_flags |= se->expr->viral_state_flags;
@@ -8148,6 +8254,21 @@ void check_multi_expr(CheckerContext *c, Operand *o, Ast *e) {
o->mode = Addressing_Invalid;
}
void check_multi_expr_with_type_hint(CheckerContext *c, Operand *o, Ast *e, Type *type_hint) {
check_expr_base(c, o, e, type_hint);
switch (o->mode) {
default:
return; // NOTE(bill): Valid
case Addressing_NoValue:
error_operand_no_value(o);
break;
case Addressing_Type:
error_operand_not_expression(o);
break;
}
o->mode = Addressing_Invalid;
}
void check_not_tuple(CheckerContext *c, Operand *o) {
if (o->mode == Addressing_Value) {
// NOTE(bill): Tuples are not first class thus never named
@@ -8428,9 +8549,15 @@ gbString write_expr_to_string(gbString str, Ast *node, bool shorthand) {
case_ast_node(ta, TypeAssertion, node);
str = write_expr_to_string(str, ta->expr, shorthand);
str = gb_string_appendc(str, ".(");
str = write_expr_to_string(str, ta->type, shorthand);
str = gb_string_append_rune(str, ')');
if (ta->type != nullptr &&
ta->type->kind == Ast_UnaryExpr &&
ta->type->UnaryExpr.op.kind == Token_Question) {
str = gb_string_appendc(str, ".?");
} else {
str = gb_string_appendc(str, ".(");
str = write_expr_to_string(str, ta->type, shorthand);
str = gb_string_append_rune(str, ')');
}
case_end;
case_ast_node(tc, TypeCast, node);

View File

@@ -558,7 +558,7 @@ void check_label(CheckerContext *ctx, Ast *label, Ast *parent) {
}
Entity *e = alloc_entity_label(ctx->scope, l->name->Ident.token, t_invalid, label, parent);
add_entity(ctx->checker, ctx->scope, l->name, e);
add_entity(ctx, ctx->scope, l->name, e);
e->parent_proc_decl = ctx->curr_proc_decl;
if (ok) {
@@ -861,7 +861,7 @@ void check_inline_range_stmt(CheckerContext *ctx, Ast *node, u32 mod_flags) {
}
for (isize i = 0; i < entity_count; i++) {
add_entity(ctx->checker, ctx->scope, entities[i]->identifier, entities[i]);
add_entity(ctx, ctx->scope, entities[i]->identifier, entities[i]);
}
@@ -1344,7 +1344,7 @@ void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_flags) {
if (!is_reference) {
tag_var->flags |= EntityFlag_Value;
}
add_entity(ctx->checker, ctx->scope, lhs, tag_var);
add_entity(ctx, ctx->scope, lhs, tag_var);
add_entity_use(ctx, lhs, tag_var);
add_implicit_entity(ctx, stmt, tag_var);
}
@@ -1667,7 +1667,7 @@ void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags) {
GB_ASSERT(ctx->curr_proc_sig != nullptr);
if (ctx->in_defer) {
error(rs->token, "You cannot 'return' within a defer statement");
error(rs->token, "'return' cannot be used within a defer statement");
break;
}
@@ -1884,7 +1884,8 @@ void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags) {
error(operand.expr, "Cannot iterate over '%s' of type '%s'", s, t);
if (rs->vals.count == 1) {
if (is_type_map(operand.type) || is_type_bit_set(operand.type)) {
Type *t = type_deref(operand.type);
if (is_type_map(t) || is_type_bit_set(t)) {
gbString v = expr_to_string(rs->vals[0]);
defer (gb_string_free(v));
error_line("\tSuggestion: place parentheses around the expression\n");
@@ -1965,7 +1966,7 @@ void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags) {
Entity *e = entities[i];
DeclInfo *d = decl_info_of_entity(e);
GB_ASSERT(d == nullptr);
add_entity(ctx->checker, ctx->scope, e->identifier, e);
add_entity(ctx, ctx->scope, e->identifier, e);
d = make_decl_info(ctx->scope, ctx->decl);
add_entity_and_decl_info(ctx, e->identifier, e, d);
}
@@ -2285,7 +2286,7 @@ void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags) {
}
}
}
add_entity(ctx->checker, ctx->scope, e->identifier, e);
add_entity(ctx, ctx->scope, e->identifier, e);
}
if (vd->is_using != 0) {

View File

@@ -23,7 +23,7 @@ void populate_using_array_index(CheckerContext *ctx, Ast *node, AstField *field,
tok.pos = ast_token(field->type).pos;
}
Entity *f = alloc_entity_array_elem(nullptr, tok, t->Array.elem, idx);
add_entity(ctx->checker, ctx->scope, nullptr, f);
add_entity(ctx, ctx->scope, nullptr, f);
}
}
@@ -52,7 +52,7 @@ void populate_using_entity_scope(CheckerContext *ctx, Ast *node, AstField *field
error(e->token, "'%.*s' is already declared", LIT(name));
}
} else {
add_entity(ctx->checker, ctx->scope, nullptr, f);
add_entity(ctx, ctx->scope, nullptr, f);
if (f->flags & EntityFlag_Using) {
populate_using_entity_scope(ctx, node, field, f->type);
}
@@ -89,12 +89,8 @@ bool does_field_type_allow_using(Type *t) {
t = base_type(t);
if (is_type_struct(t)) {
return true;
} else if (is_type_raw_union(t)) {
return true;
} else if (is_type_array(t)) {
return t->Array.count <= 4;
} else if (is_type_typeid(t)) {
return true;
}
return false;
}
@@ -161,7 +157,7 @@ void check_struct_fields(CheckerContext *ctx, Ast *node, Array<Entity *> *fields
Token name_token = name->Ident.token;
Entity *field = alloc_entity_field(ctx->scope, name_token, type, is_using, field_src_index);
add_entity(ctx->checker, ctx->scope, name, field);
add_entity(ctx, ctx->scope, name, field);
array_add(fields, field);
array_add(tags, p->tag.string);
@@ -240,7 +236,10 @@ bool check_custom_align(CheckerContext *ctx, Ast *node, i64 *align_) {
Entity *find_polymorphic_record_entity(CheckerContext *ctx, Type *original_type, isize param_count, Array<Operand> const &ordered_operands, bool *failure) {
auto *found_gen_types = map_get(&ctx->checker->info.gen_types, hash_pointer(original_type));
gb_mutex_lock(&ctx->info->gen_types_mutex);
defer (gb_mutex_unlock(&ctx->info->gen_types_mutex));
auto *found_gen_types = map_get(&ctx->info->gen_types, hash_pointer(original_type));
if (found_gen_types != nullptr) {
for_array(i, *found_gen_types) {
Entity *e = (*found_gen_types)[i];
@@ -319,14 +318,16 @@ void add_polymorphic_record_entity(CheckerContext *ctx, Ast *node, Type *named_t
named_type->Named.type_name = e;
auto *found_gen_types = map_get(&ctx->checker->info.gen_types, hash_pointer(original_type));
gb_mutex_lock(&ctx->info->gen_types_mutex);
auto *found_gen_types = map_get(&ctx->info->gen_types, hash_pointer(original_type));
if (found_gen_types) {
array_add(found_gen_types, e);
} else {
auto array = array_make<Entity *>(heap_allocator());
array_add(&array, e);
map_set(&ctx->checker->info.gen_types, hash_pointer(original_type), array);
map_set(&ctx->info->gen_types, hash_pointer(original_type), array);
}
gb_mutex_unlock(&ctx->info->gen_types_mutex);
}
Type *check_record_polymorphic_params(CheckerContext *ctx, Ast *polymorphic_params,
@@ -487,7 +488,7 @@ Type *check_record_polymorphic_params(CheckerContext *ctx, Ast *polymorphic_para
}
e->state = EntityState_Resolved;
add_entity(ctx->checker, scope, name, e);
add_entity(ctx, scope, name, e);
array_add(&entities, e);
}
}
@@ -799,7 +800,7 @@ void check_enum_type(CheckerContext *ctx, Type *enum_type, Type *named_type, Ast
if (scope_lookup_current(ctx->scope, name) != nullptr) {
error(ident, "'%.*s' is already declared in this enumeration", LIT(name));
} else {
add_entity(ctx->checker, ctx->scope, nullptr, e);
add_entity(ctx, ctx->scope, nullptr, e);
array_add(&fields, e);
// TODO(bill): Should I add a use for the enum value?
add_entity_use(ctx, field, e);
@@ -1626,7 +1627,7 @@ Type *check_get_params(CheckerContext *ctx, Scope *scope, Ast *_params, bool *is
}
param->state = EntityState_Resolved; // NOTE(bill): This should have be resolved whilst determining it
add_entity(ctx->checker, scope, name, param);
add_entity(ctx, scope, name, param);
if (is_using) {
add_entity_use(ctx, name, param);
}
@@ -1753,7 +1754,7 @@ Type *check_get_results(CheckerContext *ctx, Scope *scope, Ast *_results) {
param->flags |= EntityFlag_Result;
param->Variable.param_value = param_value;
array_add(&variables, param);
add_entity(ctx->checker, scope, name, param);
add_entity(ctx, scope, name, param);
// NOTE(bill): Removes `declared but not used` when using -vet
add_entity_use(ctx, name, param);
}
@@ -2247,7 +2248,7 @@ Type *make_soa_struct_internal(CheckerContext *ctx, Ast *array_typ_expr, Ast *el
Entity *new_field = alloc_entity_field(scope, token, field_type, false, cast(i32)i);
soa_struct->Struct.fields[i] = new_field;
add_entity(ctx->checker, scope, nullptr, new_field);
add_entity(ctx, scope, nullptr, new_field);
add_entity_use(ctx, nullptr, new_field);
}
@@ -2281,7 +2282,7 @@ Type *make_soa_struct_internal(CheckerContext *ctx, Ast *array_typ_expr, Ast *el
}
Entity *new_field = alloc_entity_field(scope, old_field->token, field_type, false, old_field->Variable.field_src_index);
soa_struct->Struct.fields[i] = new_field;
add_entity(ctx->checker, scope, nullptr, new_field);
add_entity(ctx, scope, nullptr, new_field);
add_entity_use(ctx, nullptr, new_field);
} else {
soa_struct->Struct.fields[i] = old_field;
@@ -2294,13 +2295,13 @@ Type *make_soa_struct_internal(CheckerContext *ctx, Ast *array_typ_expr, Ast *el
if (soa_kind != StructSoa_Fixed) {
Entity *len_field = alloc_entity_field(scope, empty_token, t_int, false, cast(i32)field_count+0);
soa_struct->Struct.fields[field_count+0] = len_field;
add_entity(ctx->checker, scope, nullptr, len_field);
add_entity(ctx, scope, nullptr, len_field);
add_entity_use(ctx, nullptr, len_field);
if (soa_kind == StructSoa_Dynamic) {
Entity *cap_field = alloc_entity_field(scope, empty_token, t_int, false, cast(i32)field_count+1);
soa_struct->Struct.fields[field_count+1] = cap_field;
add_entity(ctx->checker, scope, nullptr, cap_field);
add_entity(ctx, scope, nullptr, cap_field);
add_entity_use(ctx, nullptr, cap_field);
Token token = {};
@@ -2308,7 +2309,7 @@ Type *make_soa_struct_internal(CheckerContext *ctx, Ast *array_typ_expr, Ast *el
init_mem_allocator(ctx->checker);
Entity *allocator_field = alloc_entity_field(scope, token, t_allocator, false, cast(i32)field_count+2);
soa_struct->Struct.fields[field_count+2] = allocator_field;
add_entity(ctx->checker, scope, nullptr, allocator_field);
add_entity(ctx, scope, nullptr, allocator_field);
add_entity_use(ctx, nullptr, allocator_field);
}
}
@@ -2316,7 +2317,7 @@ Type *make_soa_struct_internal(CheckerContext *ctx, Ast *array_typ_expr, Ast *el
Token token = {};
token.string = str_lit("Base_Type");
Entity *base_type_entity = alloc_entity_type_name(scope, token, elem, EntityState_Resolved);
add_entity(ctx->checker, scope, nullptr, base_type_entity);
add_entity(ctx, scope, nullptr, base_type_entity);
add_type_info_type(ctx, soa_struct);
@@ -2429,8 +2430,8 @@ bool check_type_internal(CheckerContext *ctx, Ast *e, Type **type, Type *named_t
t->Generic.entity = e;
e->TypeName.is_type_alias = true;
e->state = EntityState_Resolved;
add_entity(ctx->checker, ps, ident, e);
add_entity(ctx->checker, s, ident, e);
add_entity(ctx, ps, ident, e);
add_entity(ctx, s, ident, e);
} else {
error(ident, "Invalid use of a polymorphic parameter '$%.*s'", LIT(token.string));
*type = t_invalid;
@@ -2800,7 +2801,7 @@ Type *check_type_expr(CheckerContext *ctx, Ast *e, Type *named_type) {
#endif
if (is_type_typed(type)) {
add_type_and_value(&ctx->checker->info, e, Addressing_Type, type, empty_exact_value);
add_type_and_value(ctx->info, e, Addressing_Type, type, empty_exact_value);
} else {
gbString name = type_to_string(type);
error(e, "Invalid type definition of %s", name);

View File

@@ -4,6 +4,8 @@
void check_expr(CheckerContext *c, Operand *operand, Ast *expression);
void check_expr_or_type(CheckerContext *c, Operand *operand, Ast *expression, Type *type_hint=nullptr);
void add_comparison_procedures_for_fields(CheckerContext *c, Type *t);
void check_proc_info(Checker *c, ProcInfo *pi);
bool is_operand_value(Operand o) {
switch (o.mode) {
@@ -242,7 +244,7 @@ Scope *create_scope(Scope *parent, isize init_elements_capacity=DEFAULT_SCOPE_CA
return s;
}
Scope *create_scope_from_file(CheckerContext *c, AstFile *f) {
Scope *create_scope_from_file(AstFile *f) {
GB_ASSERT(f != nullptr);
GB_ASSERT(f->pkg != nullptr);
GB_ASSERT(f->pkg->scope != nullptr);
@@ -850,7 +852,10 @@ void init_checker_info(CheckerInfo *i) {
array_init(&i->identifier_uses, a);
}
map_init(&i->atom_op_map, a);
gb_mutex_init(&i->untyped_mutex);
gb_mutex_init(&i->gen_procs_mutex);
gb_mutex_init(&i->gen_types_mutex);
gb_mutex_init(&i->type_info_mutex);
}
@@ -870,11 +875,14 @@ void destroy_checker_info(CheckerInfo *i) {
array_free(&i->required_foreign_imports_through_force);
array_free(&i->required_global_variables);
map_destroy(&i->atom_op_map);
gb_mutex_destroy(&i->untyped_mutex);
gb_mutex_destroy(&i->gen_procs_mutex);
gb_mutex_destroy(&i->gen_types_mutex);
gb_mutex_destroy(&i->type_info_mutex);
}
CheckerContext make_checker_context(Checker *c) {
CheckerContext ctx = c->init_ctx;
CheckerContext ctx = {};
ctx.checker = c;
ctx.info = &c->info;
ctx.scope = builtin_pkg->scope;
@@ -887,6 +895,40 @@ CheckerContext make_checker_context(Checker *c) {
return ctx;
}
void add_curr_ast_file(CheckerContext *ctx, AstFile *file) {
if (file != nullptr) {
TokenPos zero_pos = {};
global_error_collector.prev = zero_pos;
ctx->file = file;
ctx->decl = file->pkg->decl_info;
ctx->scope = file->scope;
ctx->pkg = file->pkg;
}
}
void reset_checker_context(CheckerContext *ctx, AstFile *file) {
if (ctx == nullptr) {
return;
}
auto checker = ctx->checker;
auto info = ctx->info;
auto type_path = ctx->type_path;
auto poly_path = ctx->poly_path;
array_clear(type_path);
array_clear(poly_path);
gb_zero_item(ctx);
ctx->checker = checker;
ctx->info = info;
ctx->type_path = type_path;
ctx->poly_path = poly_path;
ctx->scope = builtin_pkg->scope;
ctx->pkg = builtin_pkg;
add_curr_ast_file(ctx, file);
}
void destroy_checker_context(CheckerContext *ctx) {
destroy_checker_type_path(ctx->type_path);
destroy_checker_poly_path(ctx->poly_path);
@@ -911,7 +953,10 @@ bool init_checker(Checker *c, Parser *parser) {
isize total_token_count = c->parser->total_token_count;
isize arena_size = 2 * item_size * total_token_count;
c->init_ctx = make_checker_context(c);
c->builtin_ctx = make_checker_context(c);
gb_mutex_init(&c->procs_to_check_mutex);
gb_mutex_init(&c->procs_with_deferred_to_check_mutex);
return true;
}
@@ -921,7 +966,10 @@ void destroy_checker(Checker *c) {
array_free(&c->procs_to_check);
array_free(&c->procs_with_deferred_to_check);
destroy_checker_context(&c->init_ctx);
destroy_checker_context(&c->builtin_ctx);
gb_mutex_destroy(&c->procs_to_check_mutex);
gb_mutex_destroy(&c->procs_with_deferred_to_check_mutex);
}
@@ -998,13 +1046,19 @@ Scope *scope_of_node(Ast *node) {
return node->scope;
}
ExprInfo *check_get_expr_info(CheckerInfo *i, Ast *expr) {
return map_get(&i->untyped, hash_node(expr));
}
void check_set_expr_info(CheckerInfo *i, Ast *expr, ExprInfo info) {
map_set(&i->untyped, hash_node(expr), info);
gb_mutex_lock(&i->untyped_mutex);
ExprInfo *res = nullptr;
ExprInfo **found = map_get(&i->untyped, hash_node(expr));
if (found) {
res = *found;
}
gb_mutex_unlock(&i->untyped_mutex);
return res;
}
void check_remove_expr_info(CheckerInfo *i, Ast *expr) {
gb_mutex_lock(&i->untyped_mutex);
map_remove(&i->untyped, hash_node(expr));
gb_mutex_unlock(&i->untyped_mutex);
}
@@ -1015,6 +1069,8 @@ isize type_info_index(CheckerInfo *info, Type *type, bool error_on_failure) {
type = t_bool;
}
gb_mutex_lock(&info->type_info_mutex);
isize entry_index = -1;
HashKey key = hash_type(type);
isize *found_entry_index = map_get(&info->type_info_map, key);
@@ -1036,6 +1092,8 @@ isize type_info_index(CheckerInfo *info, Type *type, bool error_on_failure) {
}
}
gb_mutex_unlock(&info->type_info_mutex);
if (error_on_failure && entry_index < 0) {
compiler_error("Type_Info for '%s' could not be found", type_to_string(type));
}
@@ -1053,7 +1111,9 @@ void add_untyped(CheckerInfo *i, Ast *expression, bool lhs, AddressingMode mode,
if (mode == Addressing_Constant && type == t_invalid) {
compiler_error("add_untyped - invalid type: %s", type_to_string(type));
}
gb_mutex_lock(&i->untyped_mutex);
map_set(&i->untyped, hash_node(expression), make_expr_info(mode, type, value, lhs));
gb_mutex_unlock(&i->untyped_mutex);
}
void add_type_and_value(CheckerInfo *i, Ast *expr, AddressingMode mode, Type *type, ExactValue value) {
@@ -1147,7 +1207,7 @@ bool redeclaration_error(String name, Entity *prev, Entity *found) {
return false;
}
bool add_entity_with_name(Checker *c, Scope *scope, Ast *identifier, Entity *entity, String name) {
bool add_entity_with_name(CheckerContext *c, Scope *scope, Ast *identifier, Entity *entity, String name) {
if (scope == nullptr) {
return false;
}
@@ -1159,14 +1219,13 @@ bool add_entity_with_name(Checker *c, Scope *scope, Ast *identifier, Entity *ent
}
if (identifier != nullptr) {
if (entity->file == nullptr) {
GB_ASSERT(c->curr_ctx != nullptr);
entity->file = c->curr_ctx->file;
entity->file = c->file;
}
add_entity_definition(&c->info, identifier, entity);
add_entity_definition(c->info, identifier, entity);
}
return true;
}
bool add_entity(Checker *c, Scope *scope, Ast *identifier, Entity *entity) {
bool add_entity(CheckerContext *c, Scope *scope, Ast *identifier, Entity *entity) {
return add_entity_with_name(c, scope, identifier, entity, entity->token.string);
}
@@ -1217,7 +1276,7 @@ void add_entity_and_decl_info(CheckerContext *c, Ast *identifier, Entity *e, Dec
scope = pkg->scope;
}
}
add_entity(c->checker, scope, identifier, e);
add_entity(c, scope, identifier, e);
}
add_entity_definition(&c->checker->info, identifier, e);
@@ -1255,6 +1314,9 @@ void add_type_info_type(CheckerContext *c, Type *t) {
add_type_info_dependency(c->decl, t);
gb_mutex_lock(&c->info->type_info_mutex);
defer (gb_mutex_unlock(&c->info->type_info_mutex));
auto found = map_get(&c->info->type_info_map, hash_type(t));
if (found != nullptr) {
// Types have already been added
@@ -1448,33 +1510,25 @@ void add_type_info_type(CheckerContext *c, Type *t) {
}
}
void check_procedure_later(Checker *c, ProcInfo info) {
GB_ASSERT(info.decl != nullptr);
void check_procedure_later(Checker *c, ProcInfo *info) {
GB_ASSERT(info != nullptr);
GB_ASSERT(info->decl != nullptr);
gb_mutex_lock(&c->procs_to_check_mutex);
array_add(&c->procs_to_check, info);
gb_mutex_unlock(&c->procs_to_check_mutex);
}
void check_procedure_later(Checker *c, AstFile *file, Token token, DeclInfo *decl, Type *type, Ast *body, u64 tags) {
ProcInfo info = {};
info.file = file;
info.token = token;
info.decl = decl;
info.type = type;
info.body = body;
info.tags = tags;
ProcInfo *info = gb_alloc_item(permanent_allocator(), ProcInfo);
info->file = file;
info->token = token;
info->decl = decl;
info->type = type;
info->body = body;
info->tags = tags;
check_procedure_later(c, info);
}
void add_curr_ast_file(CheckerContext *ctx, AstFile *file) {
if (file != nullptr) {
TokenPos zero_pos = {};
global_error_collector.prev = zero_pos;
ctx->file = file;
ctx->decl = file->pkg->decl_info;
ctx->scope = file->scope;
ctx->pkg = file->pkg;
ctx->checker->curr_ctx = ctx;
}
}
void add_min_dep_type_info(Checker *c, Type *t) {
if (t == nullptr) {
@@ -1492,7 +1546,7 @@ void add_min_dep_type_info(Checker *c, Type *t) {
isize ti_index = type_info_index(&c->info, t, false);
if (ti_index < 0) {
add_type_info_type(&c->init_ctx, t); // Missing the type information
add_type_info_type(&c->builtin_ctx, t); // Missing the type information
ti_index = type_info_index(&c->info, t, false);
}
GB_ASSERT(ti_index >= 0);
@@ -2286,8 +2340,7 @@ void init_core_map_type(Checker *c) {
if (t_map_hash == nullptr) {
Entity *e = find_core_entity(c, str_lit("Map_Hash"));
if (e->state == EntityState_Unresolved) {
auto ctx = c->init_ctx;
check_entity_decl(&ctx, e, nullptr, nullptr);
check_entity_decl(&c->builtin_ctx, e, nullptr, nullptr);
}
t_map_hash = e->type;
GB_ASSERT(t_map_hash != nullptr);
@@ -2296,8 +2349,7 @@ void init_core_map_type(Checker *c) {
if (t_map_header == nullptr) {
Entity *e = find_core_entity(c, str_lit("Map_Header"));
if (e->state == EntityState_Unresolved) {
auto ctx = c->init_ctx;
check_entity_decl(&ctx, e, nullptr, nullptr);
check_entity_decl(&c->builtin_ctx, e, nullptr, nullptr);
}
t_map_header = e->type;
GB_ASSERT(t_map_header != nullptr);
@@ -2891,7 +2943,7 @@ void check_builtin_attributes(CheckerContext *ctx, Entity *e, Array<Ast *> *attr
}
if (name == "builtin") {
add_entity(ctx->checker, builtin_pkg->scope, nullptr, e);
add_entity(ctx, builtin_pkg->scope, nullptr, e);
GB_ASSERT(scope_lookup(builtin_pkg->scope, e->token.string) != nullptr);
if (value != nullptr) {
error(value, "'builtin' cannot have a field value");
@@ -3240,8 +3292,8 @@ void check_collect_entities(CheckerContext *c, Slice<Ast *> const &nodes) {
}
CheckerContext *create_checker_context(Checker *c) {
CheckerContext *ctx = gb_alloc_item(heap_allocator(), CheckerContext);
*ctx = c->init_ctx;
CheckerContext *ctx = gb_alloc_item(permanent_allocator(), CheckerContext);
*ctx = make_checker_context(c);
return ctx;
}
@@ -3615,7 +3667,7 @@ void check_add_import_decl(CheckerContext *ctx, Ast *decl) {
id->fullpath, id->import_name.string,
scope);
add_entity(ctx->checker, parent_scope, nullptr, e);
add_entity(ctx, parent_scope, nullptr, e);
if (force_use || id->is_using) {
add_entity_use(ctx, nullptr, e);
}
@@ -3642,7 +3694,7 @@ void check_add_import_decl(CheckerContext *ctx, Ast *decl) {
// file scope otherwise the error would be the wrong way around
redeclaration_error(name, found, e);
} else {
add_entity_with_name(ctx->checker, parent_scope, e->identifier, e, name);
add_entity_with_name(ctx, parent_scope, e->identifier, e, name);
}
}
}
@@ -3704,7 +3756,7 @@ void check_add_foreign_import_decl(CheckerContext *ctx, Ast *decl) {
Entity *e = alloc_entity_library_name(parent_scope, fl->library_name, t_invalid,
fl->fullpaths, library_name);
add_entity(ctx->checker, parent_scope, nullptr, e);
add_entity(ctx, parent_scope, nullptr, e);
AttributeContext ac = {};
@@ -3962,6 +4014,8 @@ void check_import_entities(Checker *c) {
}
}
CheckerContext ctx = make_checker_context(c);
for (isize loop_count = 0; ; loop_count++) {
bool new_files = false;
for_array(i, package_order) {
@@ -3974,8 +4028,7 @@ void check_import_entities(Checker *c) {
for_array(i, pkg->files) {
AstFile *f = pkg->files[i];
CheckerContext ctx = c->init_ctx;
add_curr_ast_file(&ctx, f);
reset_checker_context(&ctx, f);
new_files |= collect_checked_packages_from_decl_list(c, f->decls);
}
}
@@ -3998,9 +4051,8 @@ void check_import_entities(Checker *c) {
for_array(i, pkg->files) {
AstFile *f = pkg->files[i];
CheckerContext ctx = c->init_ctx;
reset_checker_context(&ctx, f);
ctx.collect_delayed_decls = true;
add_curr_ast_file(&ctx, f);
if (collect_file_decls(&ctx, f->decls)) {
new_packages = true;
@@ -4021,8 +4073,7 @@ void check_import_entities(Checker *c) {
for_array(i, pkg->files) {
AstFile *f = pkg->files[i];
CheckerContext ctx = c->init_ctx;
add_curr_ast_file(&ctx, f);
reset_checker_context(&ctx, f);
for_array(j, f->scope->delayed_imports) {
Ast *decl = f->scope->delayed_imports[j];
@@ -4031,8 +4082,7 @@ void check_import_entities(Checker *c) {
}
for_array(i, pkg->files) {
AstFile *f = pkg->files[i];
CheckerContext ctx = c->init_ctx;
add_curr_ast_file(&ctx, f);
reset_checker_context(&ctx, f);
for_array(j, f->scope->delayed_directives) {
Ast *expr = f->scope->delayed_directives[j];
@@ -4211,37 +4261,40 @@ void calculate_global_init_order(Checker *c) {
}
void check_proc_info(Checker *c, ProcInfo pi) {
if (pi.type == nullptr) {
void check_proc_info(Checker *c, ProcInfo *pi) {
if (pi == nullptr) {
return;
}
if (pi->type == nullptr) {
return;
}
CheckerContext ctx = make_checker_context(c);
defer (destroy_checker_context(&ctx));
add_curr_ast_file(&ctx, pi.file);
ctx.decl = pi.decl;
reset_checker_context(&ctx, pi->file);
ctx.decl = pi->decl;
TypeProc *pt = &pi.type->Proc;
String name = pi.token.string;
TypeProc *pt = &pi->type->Proc;
String name = pi->token.string;
if (pt->is_polymorphic && !pt->is_poly_specialized) {
Token token = pi.token;
if (pi.poly_def_node != nullptr) {
token = ast_token(pi.poly_def_node);
Token token = pi->token;
if (pi->poly_def_node != nullptr) {
token = ast_token(pi->poly_def_node);
}
error(token, "Unspecialized polymorphic procedure '%.*s'", LIT(name));
return;
}
if (pt->is_polymorphic && pt->is_poly_specialized) {
Entity *e = pi.decl->entity;
Entity *e = pi->decl->entity;
if ((e->flags & EntityFlag_Used) == 0) {
// NOTE(bill, 2019-08-31): It was never used, don't check
return;
}
}
bool bounds_check = (pi.tags & ProcTag_bounds_check) != 0;
bool no_bounds_check = (pi.tags & ProcTag_no_bounds_check) != 0;
bool bounds_check = (pi->tags & ProcTag_bounds_check) != 0;
bool no_bounds_check = (pi->tags & ProcTag_no_bounds_check) != 0;
if (bounds_check) {
ctx.state_flags |= StateFlag_bounds_check;
@@ -4251,17 +4304,19 @@ void check_proc_info(Checker *c, ProcInfo pi) {
ctx.state_flags &= ~StateFlag_bounds_check;
}
check_proc_body(&ctx, pi.token, pi.decl, pi.type, pi.body);
if (pi.body != nullptr && pi.decl->entity != nullptr) {
pi.decl->entity->flags |= EntityFlag_ProcBodyChecked;
check_proc_body(&ctx, pi->token, pi->decl, pi->type, pi->body);
if (pi->body != nullptr && pi->decl->entity != nullptr) {
pi->decl->entity->flags |= EntityFlag_ProcBodyChecked;
}
}
GB_STATIC_ASSERT(sizeof(isize) == sizeof(void *));
GB_THREAD_PROC(check_proc_info_worker_proc) {
if (thread == nullptr) return 0;
auto *c = cast(Checker *)thread->user_data;
isize index = thread->user_index;
check_proc_info(c, c->procs_to_check[index]);
ProcInfo *pi = cast(ProcInfo *)cast(uintptr)thread->user_index;
check_proc_info(c, pi);
return 0;
}
@@ -4295,7 +4350,7 @@ void check_unchecked_bodies(Checker *c) {
continue;
}
check_proc_info(c, pi);
check_proc_info(c, &pi);
}
}
}
@@ -4332,17 +4387,44 @@ void check_test_names(Checker *c) {
}
void check_procedure_bodies(Checker *c) {
// TODO(bill): Make this an actual FIFO queue rather than this monstrosity
while (c->procs_to_check.count != 0) {
ProcInfo *pi = c->procs_to_check.data[0];
// Preparing to multithread the procedure checking code
#if 0
gb_mutex_lock(&c->procs_to_check_mutex);
defer (gb_mutex_unlock(&c->procs_to_check_mutex));
array_ordered_remove(&c->procs_to_check, 0);
if (pi->decl->parent && pi->decl->parent->entity) {
Entity *parent = pi->decl->parent->entity;
if (parent->kind == Entity_Procedure && (parent->flags & EntityFlag_ProcBodyChecked) == 0) {
array_add(&c->procs_to_check, pi);
continue;
}
}
#else
array_ordered_remove(&c->procs_to_check, 0);
#endif
check_proc_info(c, pi);
}
}
void check_parsed_files(Checker *c) {
#define TIME_SECTION(str) do { if (build_context.show_more_timings) timings_start_section(&global_timings, str_lit(str)); } while (0)
TIME_SECTION("map full filepaths to scope");
add_type_info_type(&c->init_ctx, t_invalid);
add_type_info_type(&c->builtin_ctx, t_invalid);
// Map full filepaths to Scopes
for_array(i, c->parser->packages) {
AstPackage *p = c->parser->packages[i];
Scope *scope = create_scope_from_package(&c->init_ctx, p);
p->decl_info = make_decl_info(scope, c->init_ctx.decl);
Scope *scope = create_scope_from_package(&c->builtin_ctx, p);
p->decl_info = make_decl_info(scope, c->builtin_ctx.decl);
string_map_set(&c->info.packages, p->fullpath, p);
if (scope->flags&ScopeFlag_Init) {
@@ -4357,21 +4439,20 @@ void check_parsed_files(Checker *c) {
TIME_SECTION("collect entities");
// Collect Entities
CheckerContext collect_entity_ctx = make_checker_context(c);
defer (destroy_checker_context(&collect_entity_ctx));
for_array(i, c->parser->packages) {
AstPackage *pkg = c->parser->packages[i];
CheckerContext ctx = make_checker_context(c);
defer (destroy_checker_context(&ctx));
ctx.pkg = pkg;
ctx.collect_delayed_decls = false;
CheckerContext *ctx = &collect_entity_ctx;
for_array(j, pkg->files) {
AstFile *f = pkg->files[j];
create_scope_from_file(&ctx, f);
string_map_set(&c->info.files, f->fullpath, f);
add_curr_ast_file(&ctx, f);
check_collect_entities(&ctx, f->decls);
create_scope_from_file(f);
reset_checker_context(ctx, f);
check_collect_entities(ctx, f->decls);
}
pkg->used = true;
@@ -4386,16 +4467,12 @@ void check_parsed_files(Checker *c) {
TIME_SECTION("init preload");
init_preload(c);
CheckerContext prev_context = c->init_ctx;
defer (c->init_ctx = prev_context);
c->init_ctx.decl = make_decl_info(nullptr, nullptr);
CheckerContext prev_context = c->builtin_ctx;
defer (c->builtin_ctx = prev_context);
c->builtin_ctx.decl = make_decl_info(nullptr, nullptr);
TIME_SECTION("check procedure bodies");
// NOTE(bill): Nested procedures bodies will be added to this "queue"
for_array(i, c->procs_to_check) {
ProcInfo pi = c->procs_to_check[i];
check_proc_info(c, pi);
}
check_procedure_bodies(c);
TIME_SECTION("check scope usage");
for_array(i, c->info.files.entries) {
@@ -4422,11 +4499,18 @@ void check_parsed_files(Checker *c) {
auto *entry = &c->info.untyped.entries[i];
HashKey key = entry->key;
Ast *expr = cast(Ast *)cast(uintptr)key.key;
ExprInfo *info = &entry->value;
ExprInfo *info = entry->value;
if (info != nullptr && expr != nullptr) {
if (is_type_typed(info->type)) {
compiler_error("%s (type %s) is typed!", expr_to_string(expr), type_to_string(info->type));
}
if (info->mode == Addressing_Constant) {
} else if (info->type == t_untyped_nil) {
} else if (info->type == t_untyped_undef) {
} else if (info->type == t_untyped_bool) {
} else {
gb_printf_err("UNTYPED %s %s\n", expr_to_string(expr), type_to_string(info->type));
}
add_type_and_value(&c->info, expr, info->mode, info->type, info->value);
}
}
@@ -4441,7 +4525,7 @@ void check_parsed_files(Checker *c) {
Type *t = &basic_types[i];
if (t->Basic.size > 0 &&
(t->Basic.flags & BasicFlag_LLVM) == 0) {
add_type_info_type(&c->init_ctx, t);
add_type_info_type(&c->builtin_ctx, t);
}
}
@@ -4453,7 +4537,7 @@ void check_parsed_files(Checker *c) {
// i64 size = type_size_of(c->allocator, e->type);
i64 align = type_align_of(e->type);
if (align > 0 && ptr_set_exists(&c->info.minimum_dependency_set, e)) {
add_type_info_type(&c->init_ctx, e->type);
add_type_info_type(&c->builtin_ctx, e->type);
}
} else if (e->kind == Entity_Procedure) {

View File

@@ -20,12 +20,12 @@ struct ExprInfo {
bool is_lhs; // Debug info
};
gb_inline ExprInfo make_expr_info(AddressingMode mode, Type *type, ExactValue value, bool is_lhs) {
ExprInfo ei = {};
ei.mode = mode;
ei.type = type;
ei.value = value;
ei.is_lhs = is_lhs;
gb_inline ExprInfo *make_expr_info(AddressingMode mode, Type *type, ExactValue const &value, bool is_lhs) {
ExprInfo *ei = gb_alloc_item(permanent_allocator(), ExprInfo);
ei->mode = mode;
ei->type = type;
ei->value = value;
ei->is_lhs = is_lhs;
return ei;
}
@@ -262,9 +262,6 @@ struct CheckerContext;
struct CheckerInfo {
Checker *checker;
Map<ExprInfo> untyped; // Key: Ast * | Expression -> ExprInfo
// NOTE(bill): This needs to be a map and not on the Ast
// as it needs to be iterated across
StringMap<AstFile *> files; // Key (full path)
StringMap<AstPackage *> packages; // Key (full path)
StringMap<Entity *> foreigns;
@@ -272,12 +269,6 @@ struct CheckerInfo {
Array<Entity *> entities;
Array<DeclInfo *> variable_init_order;
Map<Array<Entity *> > gen_procs; // Key: Ast * | Identifier -> Entity
Map<Array<Entity *> > gen_types; // Key: Type *
Array<Type *> type_info_types;
Map<isize> type_info_map; // Key: Type *
AstPackage * builtin_package;
AstPackage * runtime_package;
@@ -290,12 +281,29 @@ struct CheckerInfo {
Array<Entity *> required_foreign_imports_through_force;
Array<Entity *> required_global_variables;
Map<AtomOpMapEntry> atom_op_map; // Key: Ast *
Array<Entity *> testing_procedures;
bool allow_identifier_uses;
Array<Ast *> identifier_uses; // only used by 'odin query'
// Below are accessed within procedures
// NOTE(bill): If the semantic checker (check_proc_body) is to ever to be multithreaded,
// these variables will be of contention
gbMutex untyped_mutex;
gbMutex gen_procs_mutex;
gbMutex gen_types_mutex;
gbMutex type_info_mutex;
Map<ExprInfo *> untyped; // Key: Ast * | Expression -> ExprInfo *
// NOTE(bill): This needs to be a map and not on the Ast
// as it needs to be iterated across
Map<Array<Entity *> > gen_procs; // Key: Ast * | Identifier -> Entity
Map<Array<Entity *> > gen_types; // Key: Type *
Array<Type *> type_info_types;
Map<isize> type_info_map; // Key: Type *
};
struct CheckerContext {
@@ -341,11 +349,13 @@ struct Checker {
Parser * parser;
CheckerInfo info;
Array<ProcInfo> procs_to_check;
Array<Entity *> procs_with_deferred_to_check;
CheckerContext builtin_ctx;
CheckerContext *curr_ctx;
CheckerContext init_ctx;
gbMutex procs_to_check_mutex;
gbMutex procs_with_deferred_to_check_mutex;
Array<ProcInfo *> procs_to_check;
Array<Entity *> procs_with_deferred_to_check;
};
@@ -384,7 +394,6 @@ Entity *scope_insert (Scope *s, Entity *entity);
ExprInfo *check_get_expr_info (CheckerInfo *i, Ast *expr);
void check_set_expr_info (CheckerInfo *i, Ast *expr, ExprInfo info);
void check_remove_expr_info (CheckerInfo *i, Ast *expr);
void add_untyped (CheckerInfo *i, Ast *expression, bool lhs, AddressingMode mode, Type *basic_type, ExactValue value);
void add_type_and_value (CheckerInfo *i, Ast *expression, AddressingMode mode, Type *type, ExactValue value);

View File

@@ -33,6 +33,8 @@ enum BuiltinProcId {
BuiltinProc_soa_zip,
BuiltinProc_soa_unzip,
BuiltinProc_or_else,
BuiltinProc_DIRECTIVE, // NOTE(bill): This is used for specialized hash-prefixed procedures
// "Intrinsics"
@@ -238,7 +240,7 @@ gb_global BuiltinProc builtin_procs[BuiltinProc_COUNT] = {
{STR_LIT("size_of"), 1, false, Expr_Expr, BuiltinProcPkg_builtin},
{STR_LIT("align_of"), 1, false, Expr_Expr, BuiltinProcPkg_builtin},
{STR_LIT("offset_of"), 2, false, Expr_Expr, BuiltinProcPkg_builtin},
{STR_LIT("offset_of"), 1, true, Expr_Expr, BuiltinProcPkg_builtin},
{STR_LIT("type_of"), 1, false, Expr_Expr, BuiltinProcPkg_builtin},
{STR_LIT("type_info_of"), 1, false, Expr_Expr, BuiltinProcPkg_builtin},
{STR_LIT("typeid_of"), 1, false, Expr_Expr, BuiltinProcPkg_builtin},
@@ -263,6 +265,8 @@ gb_global BuiltinProc builtin_procs[BuiltinProc_COUNT] = {
{STR_LIT("soa_zip"), 1, true, Expr_Expr, BuiltinProcPkg_builtin},
{STR_LIT("soa_unzip"), 1, false, Expr_Expr, BuiltinProcPkg_builtin},
{STR_LIT("or_else"), 2, false, Expr_Expr, BuiltinProcPkg_builtin},
{STR_LIT(""), 0, true, Expr_Expr, BuiltinProcPkg_builtin}, // DIRECTIVE

View File

@@ -325,13 +325,13 @@ gb_global u64 const unsigned_integer_maxs[] = {
bool add_overflow_u64(u64 x, u64 y, u64 *result) {
*result = x + y;
return *result < x || *result < y;
*result = x + y;
return *result < x || *result < y;
}
bool sub_overflow_u64(u64 x, u64 y, u64 *result) {
*result = x - y;
return *result > x;
*result = x - y;
return *result > x;
}
void mul_overflow_u64(u64 x, u64 y, u64 *lo, u64 *hi) {
@@ -1174,3 +1174,97 @@ ReadDirectoryError read_directory(String path, Array<FileInfo> *fi) {
#else
#error Implement read_directory
#endif
#define USE_DAMERAU_LEVENSHTEIN 1
isize levenstein_distance_case_insensitive(String const &a, String const &b) {
isize w = a.len+1;
isize h = b.len+1;
isize *matrix = gb_alloc_array(temporary_allocator(), isize, w*h);
for (isize i = 0; i <= a.len; i++) {
matrix[i*w + 0] = i;
}
for (isize i = 0; i <= b.len; i++) {
matrix[0*w + i] = i;
}
for (isize i = 1; i <= a.len; i++) {
char a_c = gb_char_to_lower(cast(char)a.text[i-1]);
for (isize j = 1; j <= b.len; j++) {
char b_c = gb_char_to_lower(cast(char)b.text[j-1]);
if (a_c == b_c) {
matrix[i*w + j] = matrix[(i-1)*w + j-1];
} else {
isize remove = matrix[(i-1)*w + j] + 1;
isize insert = matrix[i*w + j-1] + 1;
isize substitute = matrix[(i-1)*w + j-1] + 1;
isize minimum = remove;
if (insert < minimum) {
minimum = insert;
}
if (substitute < minimum) {
minimum = substitute;
}
// Damerau-Levenshtein (transposition extension)
#if USE_DAMERAU_LEVENSHTEIN
if (i > 1 && j > 1) {
isize transpose = matrix[(i-2)*w + j-2] + 1;
if (transpose < minimum) {
minimum = transpose;
}
}
#endif
matrix[i*w + j] = minimum;
}
}
}
return matrix[a.len*w + b.len];
}
struct DistanceAndTarget {
isize distance;
String target;
};
struct DidYouMeanAnswers {
Array<DistanceAndTarget> distances;
String key;
};
enum {MAX_SMALLEST_DID_YOU_MEAN_DISTANCE = 3-USE_DAMERAU_LEVENSHTEIN};
DidYouMeanAnswers did_you_mean_make(gbAllocator allocator, isize cap, String const &key) {
DidYouMeanAnswers d = {};
array_init(&d.distances, allocator, 0, cap);
d.key = key;
return d;
}
void did_you_mean_destroy(DidYouMeanAnswers *d) {
array_free(&d->distances);
}
void did_you_mean_append(DidYouMeanAnswers *d, String const &target) {
if (target.len == 0 || target == "_") {
return;
}
DistanceAndTarget dat = {};
dat.target = target;
dat.distance = levenstein_distance_case_insensitive(d->key, target);
array_add(&d->distances, dat);
}
Slice<DistanceAndTarget> did_you_mean_results(DidYouMeanAnswers *d) {
gb_sort_array(d->distances.data, d->distances.count, gb_isize_cmp(gb_offset_of(DistanceAndTarget, distance)));
isize count = 0;
for (isize i = 0; i < d->distances.count; i++) {
isize distance = d->distances[i].distance;
if (distance > MAX_SMALLEST_DID_YOU_MEAN_DISTANCE) {
break;
}
count += 1;
}
return slice_array(d->distances, 0, count);
}

View File

@@ -306,15 +306,16 @@ bool lb_try_update_alignment(lbValue ptr, unsigned alignment) {
bool lb_try_vector_cast(lbModule *m, lbValue ptr, LLVMTypeRef *vector_type_) {
Type *array_type = base_type(type_deref(ptr.type));
GB_ASSERT(array_type->kind == Type_Array);
Type *elem_type = base_type(array_type->Array.elem);
GB_ASSERT(is_type_array_like(array_type));
i64 count = get_array_type_count(array_type);
Type *elem_type = base_array_type(array_type);
// TODO(bill): Determine what is the correct limit for doing vector arithmetic
if (type_size_of(array_type) <= build_context.max_align &&
is_type_valid_vector_elem(elem_type)) {
// Try to treat it like a vector if possible
bool possible = false;
LLVMTypeRef vector_type = LLVMVectorType(lb_type(m, elem_type), cast(unsigned)array_type->Array.count);
LLVMTypeRef vector_type = LLVMVectorType(lb_type(m, elem_type), cast(unsigned)count);
unsigned vector_alignment = cast(unsigned)lb_alignof(vector_type);
LLVMValueRef addr_ptr = ptr.value;
@@ -509,6 +510,9 @@ void lb_addr_store(lbProcedure *p, lbAddr addr, lbValue value) {
} else if (addr.kind == lbAddr_Swizzle) {
GB_ASSERT(addr.swizzle.count <= 4);
GB_ASSERT(value.value != nullptr);
value = lb_emit_conv(p, value, lb_addr_type(addr));
lbValue dst = lb_addr_get_ptr(p, addr);
lbValue src = lb_address_from_load_or_generate_local(p, value);
{
@@ -2132,6 +2136,28 @@ LLVMMetadataRef lb_debug_type_internal(lbModule *m, Type *type) {
return nullptr;
}
LLVMMetadataRef lb_get_base_scope_metadata(lbModule *m, Scope *scope) {
LLVMMetadataRef found = nullptr;
for (;;) {
if (scope == nullptr) {
return nullptr;
}
if (scope->flags & ScopeFlag_Proc) {
found = lb_get_llvm_metadata(m, scope->procedure_entity);
if (found) {
return found;
}
}
if (scope->flags & ScopeFlag_File) {
found = lb_get_llvm_metadata(m, scope->file);
if (found) {
return found;
}
}
scope = scope->parent;
}
}
LLVMMetadataRef lb_debug_type(lbModule *m, Type *type) {
GB_ASSERT(type != nullptr);
LLVMMetadataRef found = lb_get_llvm_metadata(m, type);
@@ -2147,7 +2173,7 @@ LLVMMetadataRef lb_debug_type(lbModule *m, Type *type) {
if (type->Named.type_name != nullptr) {
Entity *e = type->Named.type_name;
scope = lb_get_llvm_metadata(m, e->scope);
scope = lb_get_base_scope_metadata(m, e->scope);
if (scope != nullptr) {
file = LLVMDIScopeGetFile(scope);
}
@@ -2174,8 +2200,6 @@ LLVMMetadataRef lb_debug_type(lbModule *m, Type *type) {
switch (bt->kind) {
case Type_Enum:
{
LLVMMetadataRef scope = nullptr;
LLVMMetadataRef file = nullptr;
unsigned line = 0;
unsigned element_count = cast(unsigned)bt->Enum.fields.count;
LLVMMetadataRef *elements = gb_alloc_array(permanent_allocator(), LLVMMetadataRef, element_count);
@@ -2732,9 +2756,7 @@ lbProcedure *lb_create_procedure(lbModule *m, Entity *entity, bool ignore_body)
lbValue *found = string_map_get(&m->members, key);
if (found) {
lb_add_entity(m, entity, *found);
lbProcedure **p_found = string_map_get(&m->procedures, key);
GB_ASSERT(p_found != nullptr);
return *p_found;
return string_map_must_get(&m->procedures, key);
}
}
@@ -3601,7 +3623,7 @@ void lb_mem_zero_ptr_internal(lbProcedure *p, LLVMValueRef ptr, LLVMValueRef len
lb_type(p->module, t_int)
};
unsigned id = LLVMLookupIntrinsicID(name, gb_strlen(name));
GB_ASSERT_MSG(id != 0, "Unable to find %s.%s.%s.%s", name, LLVMPrintTypeToString(types[0]), LLVMPrintTypeToString(types[1]), LLVMPrintTypeToString(types[2]));
GB_ASSERT_MSG(id != 0, "Unable to find %s.%s.%s", name, LLVMPrintTypeToString(types[0]), LLVMPrintTypeToString(types[1]));
LLVMValueRef ip = LLVMGetIntrinsicDeclaration(p->module->mod, id, types, gb_count_of(types));
LLVMValueRef args[4] = {};
@@ -5315,14 +5337,39 @@ void lb_build_assignment(lbProcedure *p, Array<lbAddr> &lvals, Slice<Ast *> cons
}
}
void lb_build_return_stmt(lbProcedure *p, AstReturnStmt *rs) {
void lb_build_return_stmt_internal(lbProcedure *p, lbValue const &res) {
lbFunctionType *ft = lb_get_function_type(p->module, p, p->type);
bool return_by_pointer = ft->ret.kind == lbArg_Indirect;
if (return_by_pointer) {
if (res.value != nullptr) {
LLVMBuildStore(p->builder, res.value, p->return_ptr.addr.value);
} else {
LLVMBuildStore(p->builder, LLVMConstNull(p->abi_function_type->ret.type), p->return_ptr.addr.value);
}
lb_emit_defer_stmts(p, lbDeferExit_Return, nullptr);
LLVMBuildRetVoid(p->builder);
} else {
LLVMValueRef ret_val = res.value;
ret_val = OdinLLVMBuildTransmute(p, ret_val, p->abi_function_type->ret.type);
if (p->abi_function_type->ret.cast_type != nullptr) {
ret_val = OdinLLVMBuildTransmute(p, ret_val, p->abi_function_type->ret.cast_type);
}
lb_emit_defer_stmts(p, lbDeferExit_Return, nullptr);
LLVMBuildRet(p->builder, ret_val);
}
}
void lb_build_return_stmt(lbProcedure *p, Slice<Ast *> const &return_results) {
lb_ensure_abi_function_type(p->module, p);
lbValue res = {};
TypeTuple *tuple = &p->type->Proc.results->Tuple;
isize return_count = p->type->Proc.result_count;
isize res_count = rs->results.count;
isize res_count = return_results.count;
lbFunctionType *ft = lb_get_function_type(p->module, p, p->type);
bool return_by_pointer = ft->ret.kind == lbArg_Indirect;
@@ -5337,19 +5384,17 @@ void lb_build_return_stmt(lbProcedure *p, AstReturnStmt *rs) {
} else if (return_count == 1) {
Entity *e = tuple->variables[0];
if (res_count == 0) {
lbValue *found = map_get(&p->module->values, hash_entity(e));
GB_ASSERT(found);
res = lb_emit_load(p, *found);
lbValue found = map_must_get(&p->module->values, hash_entity(e));
res = lb_emit_load(p, found);
} else {
res = lb_build_expr(p, rs->results[0]);
res = lb_build_expr(p, return_results[0]);
res = lb_emit_conv(p, res, e->type);
}
if (p->type->Proc.has_named_results) {
// NOTE(bill): store the named values before returning
if (e->token.string != "") {
lbValue *found = map_get(&p->module->values, hash_entity(e));
GB_ASSERT(found != nullptr);
lb_emit_store(p, *found, lb_emit_conv(p, res, e->type));
lbValue found = map_must_get(&p->module->values, hash_entity(e));
lb_emit_store(p, found, lb_emit_conv(p, res, e->type));
}
}
@@ -5358,7 +5403,7 @@ void lb_build_return_stmt(lbProcedure *p, AstReturnStmt *rs) {
if (res_count != 0) {
for (isize res_index = 0; res_index < res_count; res_index++) {
lbValue res = lb_build_expr(p, rs->results[res_index]);
lbValue res = lb_build_expr(p, return_results[res_index]);
Type *t = res.type;
if (t->kind == Type_Tuple) {
for_array(i, t->Tuple.variables) {
@@ -5373,9 +5418,8 @@ void lb_build_return_stmt(lbProcedure *p, AstReturnStmt *rs) {
} else {
for (isize res_index = 0; res_index < return_count; res_index++) {
Entity *e = tuple->variables[res_index];
lbValue *found = map_get(&p->module->values, hash_entity(e));
GB_ASSERT(found);
lbValue res = lb_emit_load(p, *found);
lbValue found = map_must_get(&p->module->values, hash_entity(e));
lbValue res = lb_emit_load(p, found);
array_add(&results, res);
}
}
@@ -5396,9 +5440,7 @@ void lb_build_return_stmt(lbProcedure *p, AstReturnStmt *rs) {
if (e->token.string == "") {
continue;
}
lbValue *found = map_get(&p->module->values, hash_entity(e));
GB_ASSERT(found != nullptr);
named_results[i] = *found;
named_results[i] = map_must_get(&p->module->values, hash_entity(e));
values[i] = lb_emit_conv(p, results[i], e->type);
}
@@ -5437,28 +5479,7 @@ void lb_build_return_stmt(lbProcedure *p, AstReturnStmt *rs) {
res = lb_emit_load(p, res);
}
if (return_by_pointer) {
if (res.value != nullptr) {
LLVMBuildStore(p->builder, res.value, p->return_ptr.addr.value);
} else {
LLVMBuildStore(p->builder, LLVMConstNull(p->abi_function_type->ret.type), p->return_ptr.addr.value);
}
lb_emit_defer_stmts(p, lbDeferExit_Return, nullptr);
LLVMBuildRetVoid(p->builder);
} else {
LLVMValueRef ret_val = res.value;
ret_val = OdinLLVMBuildTransmute(p, ret_val, p->abi_function_type->ret.type);
if (p->abi_function_type->ret.cast_type != nullptr) {
ret_val = OdinLLVMBuildTransmute(p, ret_val, p->abi_function_type->ret.cast_type);
}
lb_emit_defer_stmts(p, lbDeferExit_Return, nullptr);
LLVMBuildRet(p->builder, ret_val);
}
lb_build_return_stmt_internal(p, res);
}
void lb_build_if_stmt(lbProcedure *p, Ast *node) {
@@ -5562,9 +5583,9 @@ void lb_build_assign_stmt_array(lbProcedure *p, TokenKind op, lbAddr const &lhs,
Type *lhs_type = lb_addr_type(lhs);
Type *rhs_type = value.type;
Type *array_type = base_type(lhs_type);
GB_ASSERT(array_type->kind == Type_Array);
i64 count = array_type->Array.count;
Type *elem_type = array_type->Array.elem;
GB_ASSERT(is_type_array_like(array_type));
i64 count = get_array_type_count(array_type);
Type *elem_type = base_array_type(array_type);
lbValue rhs = lb_emit_conv(p, value, lhs_type);
@@ -5869,7 +5890,7 @@ void lb_build_stmt(lbProcedure *p, Ast *node) {
case_end;
case_ast_node(rs, ReturnStmt, node);
lb_build_return_stmt(p, rs);
lb_build_return_stmt(p, rs->results);
case_end;
case_ast_node(is, IfStmt, node);
@@ -6335,7 +6356,7 @@ lbValue lb_find_value_from_entity(lbModule *m, Entity *e) {
}
}
GB_PANIC("\n\tError in: %s, missing value %.*s\n", token_pos_to_string(e->token.pos), LIT(e->token.string));
GB_PANIC("\n\tError in: %s, missing value '%.*s'\n", token_pos_to_string(e->token.pos), LIT(e->token.string));
return {};
}
@@ -6839,6 +6860,10 @@ lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bool allow_loc
return lb_const_nil(m, original_type);
}
if (is_type_raw_union(type)) {
return lb_const_nil(m, original_type);
}
isize offset = 0;
if (type->Struct.custom_align > 0) {
offset = 1;
@@ -7032,11 +7057,11 @@ lbValue lb_emit_unary_arith(lbProcedure *p, TokenKind op, lbValue x, Type *type)
break;
}
if (is_type_array(x.type)) {
if (is_type_array_like(x.type)) {
// IMPORTANT TODO(bill): This is very wasteful with regards to stack memory
Type *tl = base_type(x.type);
lbValue val = lb_address_from_load_or_generate_local(p, x);
GB_ASSERT(is_type_array(type));
GB_ASSERT(is_type_array_like(type));
Type *elem_type = base_array_type(type);
// NOTE(bill): Doesn't need to be zero because it will be initialized in the loops
@@ -7045,7 +7070,7 @@ lbValue lb_emit_unary_arith(lbProcedure *p, TokenKind op, lbValue x, Type *type)
bool inline_array_arith = type_size_of(type) <= build_context.max_align;
i32 count = cast(i32)tl->Array.count;
i32 count = cast(i32)get_array_type_count(tl);
LLVMTypeRef vector_type = nullptr;
if (op != Token_Not && lb_try_vector_cast(p->module, val, &vector_type)) {
@@ -7177,7 +7202,7 @@ lbValue lb_emit_unary_arith(lbProcedure *p, TokenKind op, lbValue x, Type *type)
}
bool lb_try_direct_vector_arith(lbProcedure *p, TokenKind op, lbValue lhs, lbValue rhs, Type *type, lbValue *res_) {
GB_ASSERT(is_type_array(type));
GB_ASSERT(is_type_array_like(type));
Type *elem_type = base_array_type(type);
// NOTE(bill): Shift operations cannot be easily dealt with due to Odin's semantics
@@ -7312,15 +7337,15 @@ bool lb_try_direct_vector_arith(lbProcedure *p, TokenKind op, lbValue lhs, lbVal
lbValue lb_emit_arith_array(lbProcedure *p, TokenKind op, lbValue lhs, lbValue rhs, Type *type) {
GB_ASSERT(is_type_array(lhs.type) || is_type_array(rhs.type));
GB_ASSERT(is_type_array_like(lhs.type) || is_type_array_like(rhs.type));
lhs = lb_emit_conv(p, lhs, type);
rhs = lb_emit_conv(p, rhs, type);
GB_ASSERT(is_type_array(type));
GB_ASSERT(is_type_array_like(type));
Type *elem_type = base_array_type(type);
i64 count = base_type(type)->Array.count;
i64 count = get_array_type_count(type);
unsigned n = cast(unsigned)count;
// NOTE(bill, 2021-06-12): Try to do a direct operation as a vector, if possible
@@ -7388,7 +7413,7 @@ lbValue lb_emit_arith_array(lbProcedure *p, TokenKind op, lbValue lhs, lbValue r
lbValue lb_emit_arith(lbProcedure *p, TokenKind op, lbValue lhs, lbValue rhs, Type *type) {
lbModule *m = p->module;
if (is_type_array(lhs.type) || is_type_array(rhs.type)) {
if (is_type_array_like(lhs.type) || is_type_array_like(rhs.type)) {
return lb_emit_arith_array(p, op, lhs, rhs, type);
} else if (is_type_complex(type)) {
lhs = lb_emit_conv(p, lhs, type);
@@ -7728,6 +7753,11 @@ lbValue lb_build_binary_expr(lbProcedure *p, Ast *expr) {
Type *type = default_type(tv.type);
lbValue right = lb_build_expr(p, be->right);
Type *rt = base_type(right.type);
if (is_type_pointer(rt)) {
right = lb_emit_load(p, right);
rt = type_deref(rt);
}
switch (rt->kind) {
case Type_Map:
{
@@ -8277,12 +8307,12 @@ lbValue lb_emit_conv(lbProcedure *p, lbValue value, Type *t) {
return lb_emit_transmute(p, value, t);
}
if (is_type_array(dst)) {
Type *elem = dst->Array.elem;
if (is_type_array_like(dst)) {
Type *elem = base_array_type(dst);
lbValue e = lb_emit_conv(p, value, elem);
// NOTE(bill): Doesn't need to be zero because it will be initialized in the loops
lbAddr v = lb_add_local_generated(p, t, false);
isize index_count = cast(isize)dst->Array.count;
isize index_count = cast(isize)get_array_type_count(dst);
for (isize i = 0; i < index_count; i++) {
lbValue elem = lb_emit_array_epi(p, v.addr, i);
@@ -9525,6 +9555,131 @@ lbValue lb_soa_unzip(lbProcedure *p, AstCallExpr *ce, TypeAndValue const &tv) {
return lb_addr_load(p, res);
}
void lb_emit_try_lhs_rhs(lbProcedure *p, Ast *arg, TypeAndValue const &tv, lbValue *lhs_, lbValue *rhs_) {
lbValue lhs = {};
lbValue rhs = {};
lbValue value = lb_build_expr(p, arg);
if (is_type_tuple(value.type)) {
i32 n = cast(i32)(value.type->Tuple.variables.count-1);
if (value.type->Tuple.variables.count == 2) {
lhs = lb_emit_struct_ev(p, value, 0);
} else {
lbAddr lhs_addr = lb_add_local_generated(p, tv.type, false);
lbValue lhs_ptr = lb_addr_get_ptr(p, lhs_addr);
for (i32 i = 0; i < n; i++) {
lb_emit_store(p, lb_emit_struct_ep(p, lhs_ptr, i), lb_emit_struct_ev(p, value, i));
}
lhs = lb_addr_load(p, lhs_addr);
}
rhs = lb_emit_struct_ev(p, value, n);
} else {
rhs = value;
}
GB_ASSERT(rhs.value != nullptr);
if (lhs_) *lhs_ = lhs;
if (rhs_) *rhs_ = rhs;
}
lbValue lb_emit_try_has_value(lbProcedure *p, lbValue rhs) {
lbValue has_value = {};
if (is_type_boolean(rhs.type)) {
has_value = rhs;
} else {
GB_ASSERT_MSG(type_has_nil(rhs.type), "%s", type_to_string(rhs.type));
has_value = lb_emit_comp_against_nil(p, Token_CmpEq, rhs);
}
GB_ASSERT(has_value.value != nullptr);
return has_value;
}
// lbValue lb_emit_try(lbProcedure *p, Ast *arg, TypeAndValue const &tv) {
// lbValue lhs = {};
// lbValue rhs = {};
// lb_emit_try_lhs_rhs(p, arg, tv, &lhs, &rhs);
// lbBlock *return_block = lb_create_block(p, "try.return", false);
// lbBlock *continue_block = lb_create_block(p, "try.continue", false);
// lb_emit_if(p, lb_emit_try_has_value(p, rhs), continue_block, return_block);
// lb_start_block(p, return_block);
// {
// Type *proc_type = base_type(p->type);
// Type *results = proc_type->Proc.results;
// GB_ASSERT(results != nullptr && results->kind == Type_Tuple);
// TypeTuple *tuple = &results->Tuple;
// GB_ASSERT(tuple->variables.count != 0);
// Entity *end_entity = tuple->variables[tuple->variables.count-1];
// rhs = lb_emit_conv(p, rhs, end_entity->type);
// if (p->type->Proc.has_named_results) {
// GB_ASSERT(end_entity->token.string.len != 0);
// // NOTE(bill): store the named values before returning
// lbValue found = map_must_get(&p->module->values, hash_entity(end_entity));
// lb_emit_store(p, found, rhs);
// lb_build_return_stmt(p, {});
// } else {
// GB_ASSERT(tuple->variables.count == 1);
// lb_build_return_stmt_internal(p, rhs);
// }
// }
// lb_start_block(p, continue_block);
// if (tv.type != nullptr) {
// return lb_emit_conv(p, lhs, tv.type);
// }
// return {};
// }
lbValue lb_emit_or_else(lbProcedure *p, Ast *arg, Ast *else_expr, TypeAndValue const &tv) {
lbValue lhs = {};
lbValue rhs = {};
lb_emit_try_lhs_rhs(p, arg, tv, &lhs, &rhs);
LLVMValueRef incoming_values[2] = {};
LLVMBasicBlockRef incoming_blocks[2] = {};
GB_ASSERT(else_expr != nullptr);
lbBlock *then = lb_create_block(p, "or_else.then");
lbBlock *done = lb_create_block(p, "or_else.done"); // NOTE(bill): Append later
lbBlock *else_ = lb_create_block(p, "or_else.else");
lb_emit_if(p, lb_emit_try_has_value(p, rhs), then, else_);
lb_start_block(p, then);
Type *type = default_type(tv.type);
incoming_values[0] = lb_emit_conv(p, lhs, type).value;
lb_emit_jump(p, done);
lb_start_block(p, else_);
incoming_values[1] = lb_emit_conv(p, lb_build_expr(p, else_expr), type).value;
lb_emit_jump(p, done);
lb_start_block(p, done);
lbValue res = {};
res.value = LLVMBuildPhi(p->builder, lb_type(p->module, type), "");
res.type = type;
GB_ASSERT(p->curr_block->preds.count >= 2);
incoming_blocks[0] = p->curr_block->preds[0]->block;
incoming_blocks[1] = p->curr_block->preds[1]->block;
LLVMAddIncoming(res.value, incoming_values, incoming_blocks, 2);
return res;
}
lbValue lb_build_builtin_proc(lbProcedure *p, Ast *expr, TypeAndValue const &tv, BuiltinProcId id) {
ast_node(ce, CallExpr, expr);
@@ -9828,10 +9983,11 @@ lbValue lb_build_builtin_proc(lbProcedure *p, Ast *expr, TypeAndValue const &tv,
lbValue ep = lb_emit_struct_ep(p, tuple, cast(i32)src_index);
lb_emit_store(p, ep, f);
}
} else if (t->kind == Type_Array) {
} else if (is_type_array_like(t)) {
// TODO(bill): Clean-up this code
lbValue ap = lb_address_from_load_or_generate_local(p, val);
for (i32 i = 0; i < cast(i32)t->Array.count; i++) {
i32 n = cast(i32)get_array_type_count(t);
for (i32 i = 0; i < n; i++) {
lbValue f = lb_emit_load(p, lb_emit_array_epi(p, ap, i));
lbValue ep = lb_emit_struct_ep(p, tuple, i);
lb_emit_store(p, ep, f);
@@ -9913,6 +10069,8 @@ lbValue lb_build_builtin_proc(lbProcedure *p, Ast *expr, TypeAndValue const &tv,
case BuiltinProc_soa_unzip:
return lb_soa_unzip(p, ce, tv);
case BuiltinProc_or_else:
return lb_emit_or_else(p, ce->args[0], ce->args[1], tv);
// "Intrinsics"
@@ -11103,7 +11261,14 @@ lbValue lb_emit_comp_against_nil(lbProcedure *p, TokenKind op_kind, lbValue x) {
lbValue res = {};
res.type = t_llvm_bool;
Type *t = x.type;
if (is_type_pointer(t)) {
if (is_type_enum(t)) {
if (op_kind == Token_CmpEq) {
res.value = LLVMBuildIsNull(p->builder, x.value, "");
} else if (op_kind == Token_NotEq) {
res.value = LLVMBuildIsNotNull(p->builder, x.value, "");
}
return res;
} else if (is_type_pointer(t)) {
if (op_kind == Token_CmpEq) {
res.value = LLVMBuildIsNull(p->builder, x.value, "");
} else if (op_kind == Token_NotEq) {
@@ -11141,26 +11306,27 @@ lbValue lb_emit_comp_against_nil(lbProcedure *p, TokenKind op_kind, lbValue x) {
return res;
}
} else if (is_type_slice(t)) {
lbValue len = lb_emit_struct_ev(p, x, 1);
lbValue data = lb_emit_struct_ev(p, x, 0);
if (op_kind == Token_CmpEq) {
res.value = LLVMBuildIsNull(p->builder, len.value, "");
res.value = LLVMBuildIsNull(p->builder, data.value, "");
return res;
} else if (op_kind == Token_NotEq) {
res.value = LLVMBuildIsNotNull(p->builder, len.value, "");
res.value = LLVMBuildIsNotNull(p->builder, data.value, "");
return res;
}
} else if (is_type_dynamic_array(t)) {
lbValue cap = lb_emit_struct_ev(p, x, 2);
lbValue data = lb_emit_struct_ev(p, x, 0);
if (op_kind == Token_CmpEq) {
res.value = LLVMBuildIsNull(p->builder, cap.value, "");
res.value = LLVMBuildIsNull(p->builder, data.value, "");
return res;
} else if (op_kind == Token_NotEq) {
res.value = LLVMBuildIsNotNull(p->builder, cap.value, "");
res.value = LLVMBuildIsNotNull(p->builder, data.value, "");
return res;
}
} else if (is_type_map(t)) {
lbValue cap = lb_map_cap(p, x);
return lb_emit_comp(p, op_kind, cap, lb_zero(p->module, cap.type));
lbValue hashes = lb_emit_struct_ev(p, x, 0);
lbValue data = lb_emit_struct_ev(p, hashes, 0);
return lb_emit_comp(p, op_kind, data, lb_zero(p->module, data.type));
} else if (is_type_union(t)) {
if (type_size_of(t) == 0) {
if (op_kind == Token_CmpEq) {
@@ -11181,21 +11347,35 @@ lbValue lb_emit_comp_against_nil(lbProcedure *p, TokenKind op_kind, lbValue x) {
} else if (is_type_soa_struct(t)) {
Type *bt = base_type(t);
if (bt->Struct.soa_kind == StructSoa_Slice) {
lbValue len = lb_soa_struct_len(p, x);
LLVMValueRef the_value = {};
if (bt->Struct.fields.count == 0) {
lbValue len = lb_soa_struct_len(p, x);
the_value = len.value;
} else {
lbValue first_field = lb_emit_struct_ev(p, x, 0);
the_value = first_field.value;
}
if (op_kind == Token_CmpEq) {
res.value = LLVMBuildIsNull(p->builder, len.value, "");
res.value = LLVMBuildIsNull(p->builder, the_value, "");
return res;
} else if (op_kind == Token_NotEq) {
res.value = LLVMBuildIsNotNull(p->builder, len.value, "");
res.value = LLVMBuildIsNotNull(p->builder, the_value, "");
return res;
}
} else if (bt->Struct.soa_kind == StructSoa_Dynamic) {
lbValue cap = lb_soa_struct_cap(p, x);
LLVMValueRef the_value = {};
if (bt->Struct.fields.count == 0) {
lbValue cap = lb_soa_struct_cap(p, x);
the_value = cap.value;
} else {
lbValue first_field = lb_emit_struct_ev(p, x, 0);
the_value = first_field.value;
}
if (op_kind == Token_CmpEq) {
res.value = LLVMBuildIsNull(p->builder, cap.value, "");
res.value = LLVMBuildIsNull(p->builder, the_value, "");
return res;
} else if (op_kind == Token_NotEq) {
res.value = LLVMBuildIsNotNull(p->builder, cap.value, "");
res.value = LLVMBuildIsNotNull(p->builder, the_value, "");
return res;
}
}
@@ -12682,9 +12862,7 @@ lbValue lb_build_expr(lbProcedure *p, Ast *expr) {
}
lbAddr lb_get_soa_variable_addr(lbProcedure *p, Entity *e) {
lbAddr *found = map_get(&p->module->soa_values, hash_entity(e));
GB_ASSERT(found != nullptr);
return *found;
return map_must_get(&p->module->soa_values, hash_entity(e));
}
lbValue lb_get_using_variable(lbProcedure *p, Entity *e) {
GB_ASSERT(e->kind == Entity_Variable && e->flags & EntityFlag_Using);
@@ -12939,8 +13117,13 @@ lbAddr lb_build_addr(lbProcedure *p, Ast *expr) {
u8 index = swizzle_indices_raw>>(i*2) & 3;
swizzle_indices[i] = index;
}
lbAddr addr = lb_build_addr(p, se->expr);
lbValue a = lb_addr_get_ptr(p, addr);
lbValue a = {};
if (is_type_pointer(tav.type)) {
a = lb_build_expr(p, se->expr);
} else {
lbAddr addr = lb_build_addr(p, se->expr);
a = lb_addr_get_ptr(p, addr);
}
GB_ASSERT(is_type_array(expr->tav.type));
return lb_addr_swizzle(a, expr->tav.type, swizzle_count, swizzle_indices);
@@ -13441,7 +13624,6 @@ lbAddr lb_build_addr(lbProcedure *p, Ast *expr) {
default: GB_PANIC("Unknown CompoundLit type: %s", type_to_string(type)); break;
case Type_Struct: {
// TODO(bill): "constant" '#raw_union's are not initialized constantly at the moment.
// NOTE(bill): This is due to the layout of the unions when printed to LLVM-IR
bool is_raw_union = is_type_raw_union(bt);
@@ -13449,6 +13631,8 @@ lbAddr lb_build_addr(lbProcedure *p, Ast *expr) {
TypeStruct *st = &bt->Struct;
if (cl->elems.count > 0) {
lb_addr_store(p, v, lb_const_value(p->module, type, exact_value_compound(expr)));
lbValue comp_lit_ptr = lb_addr_get_ptr(p, v);
for_array(field_index, cl->elems) {
Ast *elem = cl->elems[field_index];
@@ -13477,6 +13661,12 @@ lbAddr lb_build_addr(lbProcedure *p, Ast *expr) {
field_expr = lb_build_expr(p, elem);
lbValue gep = {};
if (is_raw_union) {
gep = lb_emit_conv(p, comp_lit_ptr, alloc_type_pointer(ft));
} else {
gep = lb_emit_struct_ep(p, comp_lit_ptr, cast(i32)index);
}
Type *fet = field_expr.type;
GB_ASSERT(fet->kind != Type_Tuple);
@@ -13485,11 +13675,9 @@ lbAddr lb_build_addr(lbProcedure *p, Ast *expr) {
if (is_type_union(ft) && !are_types_identical(fet, ft) && !is_type_untyped(fet)) {
GB_ASSERT_MSG(union_variant_index(ft, fet) > 0, "%s", type_to_string(fet));
lbValue gep = lb_emit_struct_ep(p, lb_addr_get_ptr(p, v), cast(i32)index);
lb_emit_store_union_variant(p, gep, field_expr, fet);
} else {
lbValue fv = lb_emit_conv(p, field_expr, ft);
lbValue gep = lb_emit_struct_ep(p, lb_addr_get_ptr(p, v), cast(i32)index);
lb_emit_store(p, gep, fv);
}
}
@@ -14174,7 +14362,6 @@ lbValue lb_find_runtime_value(lbModule *m, String const &name) {
}
lbValue lb_find_package_value(lbModule *m, String const &pkg, String const &name) {
Entity *e = find_entity_in_pkg(m->info, pkg, name);
lbValue *found = map_get(&m->values, hash_entity(e));
return lb_find_value_from_entity(m, e);
}
@@ -15023,9 +15210,23 @@ lbProcedure *lb_create_startup_runtime(lbModule *main_module, lbProcedure *start
GB_ASSERT(e->kind == Entity_Variable);
e->code_gen_module = entity_module;
if (var->decl->init_expr != nullptr) {
// gb_printf_err("%s\n", expr_to_string(var->decl->init_expr));
lbValue init = lb_build_expr(p, var->decl->init_expr);
Ast *init_expr = var->decl->init_expr;
if (init_expr != nullptr) {
lbValue init = lb_build_expr(p, init_expr);
if (init.value == nullptr) {
LLVMTypeRef global_type = LLVMGetElementType(LLVMTypeOf(var->var.value));
if (is_type_untyped_undef(init.type)) {
LLVMSetInitializer(var->var.value, LLVMGetUndef(global_type));
var->is_initialized = true;
continue;
} else if (is_type_untyped_nil(init.type)) {
LLVMSetInitializer(var->var.value, LLVMConstNull(global_type));
var->is_initialized = true;
continue;
}
GB_PANIC("Invalid init value, got %s", expr_to_string(init_expr));
}
LLVMValueKind value_kind = LLVMGetValueKind(init.value);
// gb_printf_err("%s %d\n", LLVMPrintValueToString(init.value));
@@ -15702,7 +15903,7 @@ void lb_generate_code(lbGenerator *gen) {
if (is_export) {
LLVMSetLinkage(g.value, LLVMDLLExportLinkage);
LLVMSetDLLStorageClass(g.value, LLVMDLLExportStorageClass);
} else {
} else if (!is_foreign) {
if (USE_SEPARTE_MODULES) {
LLVMSetLinkage(g.value, LLVMExternalLinkage);
} else {

View File

@@ -1106,24 +1106,24 @@ bool parse_build_flags(Array<String> args) {
}
if (!found) {
struct DistanceAndTarget {
struct DistanceAndTargetIndex {
isize distance;
isize target_index;
};
DistanceAndTarget distances[gb_count_of(named_targets)] = {};
DistanceAndTargetIndex distances[gb_count_of(named_targets)] = {};
for (isize i = 0; i < gb_count_of(named_targets); i++) {
distances[i].target_index = i;
distances[i].distance = levenstein_distance_case_insensitive(str, named_targets[i].name);
}
gb_sort_array(distances, gb_count_of(distances), gb_isize_cmp(gb_offset_of(DistanceAndTarget, distance)));
gb_sort_array(distances, gb_count_of(distances), gb_isize_cmp(gb_offset_of(DistanceAndTargetIndex, distance)));
gb_printf_err("Unknown target '%.*s'\n", LIT(str));
enum {MAX_SMALLEST_DISTANCE = 3};
if (distances[0].distance <= MAX_SMALLEST_DISTANCE) {
if (distances[0].distance <= MAX_SMALLEST_DID_YOU_MEAN_DISTANCE) {
gb_printf_err("Did you mean:\n");
for (isize i = 0; i < gb_count_of(named_targets); i++) {
if (distances[i].distance > MAX_SMALLEST_DISTANCE) {
if (distances[i].distance > MAX_SMALLEST_DID_YOU_MEAN_DISTANCE) {
break;
}
gb_printf_err("\t%.*s\n", LIT(named_targets[distances[i].target_index].name));
@@ -1809,6 +1809,10 @@ void print_show_help(String const arg0, String const &command) {
print_usage_line(1, "-warnings-as-errors");
print_usage_line(2, "Treats warning messages as error messages");
print_usage_line(0, "");
print_usage_line(1, "-verbose-errors");
print_usage_line(2, "Prints verbose error messages showing the code on that line and the location in that line");
print_usage_line(0, "");
}
if (run_or_build) {

View File

@@ -69,6 +69,7 @@ struct Map {
template <typename T> void map_init (Map<T> *h, gbAllocator a, isize capacity = 16);
template <typename T> void map_destroy (Map<T> *h);
template <typename T> T * map_get (Map<T> *h, HashKey const &key);
template <typename T> T & map_must_get (Map<T> *h, HashKey const &key);
template <typename T> void map_set (Map<T> *h, HashKey const &key, T const &value);
template <typename T> void map_remove (Map<T> *h, HashKey const &key);
template <typename T> void map_clear (Map<T> *h);
@@ -202,6 +203,13 @@ T *map_get(Map<T> *h, HashKey const &key) {
return nullptr;
}
template <typename T>
T &map_must_get(Map<T> *h, HashKey const &key) {
isize index = map__find(h, key).entry_index;
GB_ASSERT(index >= 0);
return h->entries[index].value;
}
template <typename T>
void map_set(Map<T> *h, HashKey const &key, T const &value) {
isize index;

View File

@@ -680,6 +680,7 @@ Ast *ast_auto_cast(AstFile *f, Token token, Ast *expr) {
return result;
}
Ast *ast_inline_asm_expr(AstFile *f, Token token, Token open, Token close,
Array<Ast *> const &param_types,
Ast *return_type,
@@ -1878,13 +1879,13 @@ Ast *parse_force_inlining_operand(AstFile *f, Token token) {
if (e->kind == Ast_ProcLit) {
if (expr->ProcLit.inlining != ProcInlining_none &&
expr->ProcLit.inlining != pi) {
syntax_error(expr, "You cannot apply both '#force_inline' and '#force_no_inline' to a procedure literal");
syntax_error(expr, "Cannot apply both '#force_inline' and '#force_no_inline' to a procedure literal");
}
expr->ProcLit.inlining = pi;
} else if (e->kind == Ast_CallExpr) {
if (expr->CallExpr.inlining != ProcInlining_none &&
expr->CallExpr.inlining != pi) {
syntax_error(expr, "You cannot apply both '#force_inline' and '#force_no_inline' to a procedure call");
syntax_error(expr, "Cannot apply both '#force_inline' and '#force_no_inline' to a procedure call");
}
expr->CallExpr.inlining = pi;
}
@@ -1924,6 +1925,12 @@ Ast *parse_operand(AstFile *f, bool lhs) {
Token open, close;
// NOTE(bill): Skip the Paren Expression
open = expect_token(f, Token_OpenParen);
if (f->prev_token.kind == Token_CloseParen) {
close = expect_token(f, Token_CloseParen);
syntax_error(open, "Invalid parentheses expression with no inside expression");
return ast_bad_expr(f, open, close);
}
allow_newline = f->allow_newline;
if (f->expr_level < 0) {
f->allow_newline = false;
@@ -2723,7 +2730,6 @@ Ast *parse_unary_expr(AstFile *f, bool lhs) {
return ast_auto_cast(f, token, expr);
}
case Token_Add:
case Token_Sub:
case Token_Xor:
@@ -3555,7 +3561,9 @@ Ast *parse_field_list(AstFile *f, isize *name_count_, u32 allowed_flags, TokenKi
if (f->curr_token.kind != Token_Eq) {
type = parse_var_type(f, allow_ellipsis, allow_typeid_token);
Ast *tt = unparen_expr(type);
if (is_signature && !any_polymorphic_names && tt->kind == Ast_TypeidType && tt->TypeidType.specialization != nullptr) {
if (tt == nullptr) {
syntax_error(f->prev_token, "Invalid type expression in field list");
} else if (is_signature && !any_polymorphic_names && tt->kind == Ast_TypeidType && tt->TypeidType.specialization != nullptr) {
syntax_error(type, "Specialization of typeid is not allowed without polymorphic names");
}
}

View File

@@ -779,41 +779,3 @@ i32 unquote_string(gbAllocator a, String *s_, u8 quote=0, bool has_carriage_retu
return 2;
}
isize levenstein_distance_case_insensitive(String const &a, String const &b) {
isize w = a.len+1;
isize h = b.len+1;
isize *matrix = gb_alloc_array(heap_allocator(), isize, w*h);
for (isize i = 0; i <= a.len; i++) {
matrix[i*w + 0] = i;
}
for (isize i = 0; i <= b.len; i++) {
matrix[0*w + i] = i;
}
for (isize i = 1; i <= a.len; i++) {
char a_c = gb_char_to_lower(cast(char)a.text[i-1]);
for (isize j = 1; j <= b.len; j++) {
char b_c = gb_char_to_lower(cast(char)b.text[j-1]);
if (a_c == b_c) {
matrix[i*w + j] = matrix[(i-1)*w + j-1];
} else {
isize remove = matrix[(i-1)*w + j] + 1;
isize insert = matrix[i*w + j-1] + 1;
isize substitute = matrix[(i-1)*w + j-1] + 1;
isize minimum = remove;
if (insert < minimum) {
minimum = insert;
}
if (substitute < minimum) {
minimum = substitute;
}
matrix[i*w + j] = minimum;
}
}
}
isize res = matrix[a.len*w + b.len];
gb_free(heap_allocator(), matrix);
return res;
}

View File

@@ -54,6 +54,10 @@ template <typename T> T * string_map_get (StringMap<T> *h, char co
template <typename T> T * string_map_get (StringMap<T> *h, String const &key);
template <typename T> T * string_map_get (StringMap<T> *h, StringHashKey const &key);
template <typename T> T & string_map_must_get (StringMap<T> *h, char const *key);
template <typename T> T & string_map_must_get (StringMap<T> *h, String const &key);
template <typename T> T & string_map_must_get (StringMap<T> *h, StringHashKey const &key);
template <typename T> void string_map_set (StringMap<T> *h, StringHashKey const &key, T const &value);
template <typename T> void string_map_set (StringMap<T> *h, String const &key, T const &value);
template <typename T> void string_map_set (StringMap<T> *h, char const *key, T const &value);
@@ -187,6 +191,23 @@ gb_inline T *string_map_get(StringMap<T> *h, char const *key) {
return string_map_get(h, string_hash_string(make_string_c(key)));
}
template <typename T>
T &string_map_must_get(StringMap<T> *h, StringHashKey const &key) {
isize index = string_map__find(h, key).entry_index;
GB_ASSERT(index >= 0);
return h->entries[index].value;
}
template <typename T>
gb_inline T &string_map_must_get(StringMap<T> *h, String const &key) {
return string_map_must_get(h, string_hash_string(key));
}
template <typename T>
gb_inline T &string_map_must_get(StringMap<T> *h, char const *key) {
return string_map_must_get(h, string_hash_string(make_string_c(key)));
}
template <typename T>
void string_map_set(StringMap<T> *h, StringHashKey const &key, T const &value) {
isize index;

View File

@@ -1409,14 +1409,14 @@ void tokenizer_get_token(Tokenizer *t, Token *token, int repeat=0) {
if (t->curr_rune == '=') {
advance_to_next_rune(t);
token->kind = Token_SubEq;
} else if (t->curr_rune == '-' && peek_byte(t) == '-') {
advance_to_next_rune(t);
advance_to_next_rune(t);
token->kind = Token_Undef;
} else if (t->curr_rune == '-') {
insert_semicolon = true;
advance_to_next_rune(t);
token->kind = Token_Decrement;
insert_semicolon = true;
if (t->curr_rune == '-') {
advance_to_next_rune(t);
token->kind = Token_Undef;
}
} else if (t->curr_rune == '>') {
advance_to_next_rune(t);
token->kind = Token_ArrowRight;

View File

@@ -1262,6 +1262,20 @@ bool is_type_rune_array(Type *t) {
}
bool is_type_array_like(Type *t) {
return is_type_array(t) || is_type_enumerated_array(t);
}
i64 get_array_type_count(Type *t) {
Type *bt = base_type(t);
if (bt->kind == Type_Array) {
return bt->Array.count;
} else if (bt->kind == Type_EnumeratedArray) {
return bt->EnumeratedArray.count;
}
GB_ASSERT(is_type_array_like(t));
return -1;
}
Type *core_array_type(Type *t) {