Merge remote-tracking branch 'upstream/master' into parser-fix

This commit is contained in:
Daniel Gavin
2022-01-27 13:58:41 +01:00
45 changed files with 472 additions and 146 deletions

View File

@@ -111,9 +111,9 @@ ZFAST_MASK :: ((1 << ZFAST_BITS) - 1)
*/
Huffman_Table :: struct {
fast: [1 << ZFAST_BITS]u16,
firstcode: [16]u16,
firstcode: [17]u16,
maxcode: [17]int,
firstsymbol: [16]u16,
firstsymbol: [17]u16,
size: [288]u8,
value: [288]u16,
}
@@ -244,7 +244,7 @@ allocate_huffman_table :: proc(allocator := context.allocator) -> (z: ^Huffman_T
@(optimization_mode="speed")
build_huffman :: proc(z: ^Huffman_Table, code_lengths: []u8) -> (err: Error) {
sizes: [HUFFMAN_MAX_BITS+1]int
next_code: [HUFFMAN_MAX_BITS]int
next_code: [HUFFMAN_MAX_BITS+1]int
k := int(0)
@@ -256,14 +256,14 @@ build_huffman :: proc(z: ^Huffman_Table, code_lengths: []u8) -> (err: Error) {
}
sizes[0] = 0
for i in 1..<(HUFFMAN_MAX_BITS+1) {
for i in 1 ..< HUFFMAN_MAX_BITS {
if sizes[i] > (1 << uint(i)) {
return E_Deflate.Huffman_Bad_Sizes
}
}
code := int(0)
for i in 1..<HUFFMAN_MAX_BITS {
for i in 1 ..= HUFFMAN_MAX_BITS {
next_code[i] = code
z.firstcode[i] = u16(code)
z.firstsymbol[i] = u16(k)

View File

@@ -88,7 +88,7 @@ create :: proc(max_index: int, min_index := 0, allocator := context.allocator) -
res = Bit_Array{
bias = min_index,
}
return res, resize_if_needed(&res, size_in_bits)
return res, resize_if_needed(&res, legs)
}
/*

View File

@@ -2,6 +2,7 @@ package container_queue
import "core:builtin"
import "core:runtime"
_ :: runtime
// Dynamically resizable double-ended queue/ring-buffer
Queue :: struct($T: typeid) {

View File

@@ -44,7 +44,7 @@ hash_bytes_224 :: proc "contextless" (data: []byte) -> [DIGEST_SIZE_224]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_224 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_224(transmute([]byte)(data), hash);
hash_bytes_to_buffer_224(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_224 will hash the given input and write the
@@ -123,7 +123,7 @@ hash_bytes_256 :: proc "contextless" (data: []byte) -> [DIGEST_SIZE_256]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_256(transmute([]byte)(data), hash);
hash_bytes_to_buffer_256(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_256 will hash the given input and write the
@@ -202,7 +202,7 @@ hash_bytes_384 :: proc "contextless" (data: []byte) -> [DIGEST_SIZE_384]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_384 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_384(transmute([]byte)(data), hash);
hash_bytes_to_buffer_384(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_384 will hash the given input and write the
@@ -281,7 +281,7 @@ hash_bytes_512 :: proc "contextless" (data: []byte) -> [DIGEST_SIZE_512]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_512 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_512(transmute([]byte)(data), hash);
hash_bytes_to_buffer_512(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_512 will hash the given input and write the

View File

@@ -46,7 +46,7 @@ hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer(transmute([]byte)(data), hash);
hash_bytes_to_buffer(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer will hash the given input and write the

View File

@@ -47,7 +47,7 @@ hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer(transmute([]byte)(data), hash);
hash_bytes_to_buffer(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer will hash the given input and write the

View File

@@ -41,7 +41,7 @@ hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer(transmute([]byte)(data), hash);
hash_bytes_to_buffer(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer will hash the given input and write the

View File

@@ -44,7 +44,7 @@ hash_bytes_224 :: proc(data: []byte) -> [DIGEST_SIZE_224]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_224 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_224(transmute([]byte)(data), hash);
hash_bytes_to_buffer_224(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_224 will hash the given input and write the
@@ -123,7 +123,7 @@ hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_256(transmute([]byte)(data), hash);
hash_bytes_to_buffer_256(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_256 will hash the given input and write the
@@ -202,7 +202,7 @@ hash_bytes_384 :: proc(data: []byte) -> [DIGEST_SIZE_384]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_384 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_384(transmute([]byte)(data), hash);
hash_bytes_to_buffer_384(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_384 will hash the given input and write the
@@ -281,7 +281,7 @@ hash_bytes_512 :: proc(data: []byte) -> [DIGEST_SIZE_512]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_512 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_512(transmute([]byte)(data), hash);
hash_bytes_to_buffer_512(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_512 will hash the given input and write the

View File

@@ -50,7 +50,7 @@ hash_bytes_128_3 :: proc(data: []byte) -> [DIGEST_SIZE_128]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_128_3 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_128_3(transmute([]byte)(data), hash);
hash_bytes_to_buffer_128_3(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_128_3 will hash the given input and write the
@@ -135,7 +135,7 @@ hash_bytes_128_4 :: proc(data: []byte) -> [DIGEST_SIZE_128]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_128_4 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_128_4(transmute([]byte)(data), hash);
hash_bytes_to_buffer_128_4(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_128_4 will hash the given input and write the
@@ -220,7 +220,7 @@ hash_bytes_128_5 :: proc(data: []byte) -> [DIGEST_SIZE_128]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_128_5 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_128_5(transmute([]byte)(data), hash);
hash_bytes_to_buffer_128_5(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_128_5 will hash the given input and write the
@@ -305,7 +305,7 @@ hash_bytes_160_3 :: proc(data: []byte) -> [DIGEST_SIZE_160]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_160_3 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_160_3(transmute([]byte)(data), hash);
hash_bytes_to_buffer_160_3(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_160_3 will hash the given input and write the
@@ -390,7 +390,7 @@ hash_bytes_160_4 :: proc(data: []byte) -> [DIGEST_SIZE_160]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_160_4 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_160_4(transmute([]byte)(data), hash);
hash_bytes_to_buffer_160_4(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_160_4 will hash the given input and write the
@@ -475,7 +475,7 @@ hash_bytes_160_5 :: proc(data: []byte) -> [DIGEST_SIZE_160]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_160_5 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_160_5(transmute([]byte)(data), hash);
hash_bytes_to_buffer_160_5(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_160_5 will hash the given input and write the
@@ -560,7 +560,7 @@ hash_bytes_192_3 :: proc(data: []byte) -> [DIGEST_SIZE_192]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_192_3 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_192_3(transmute([]byte)(data), hash);
hash_bytes_to_buffer_192_3(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_192_3 will hash the given input and write the
@@ -645,7 +645,7 @@ hash_bytes_192_4 :: proc(data: []byte) -> [DIGEST_SIZE_192]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_192_4 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_192_4(transmute([]byte)(data), hash);
hash_bytes_to_buffer_192_4(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_192_4 will hash the given input and write the
@@ -730,7 +730,7 @@ hash_bytes_192_5 :: proc(data: []byte) -> [DIGEST_SIZE_192]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_192_5 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_192_5(transmute([]byte)(data), hash);
hash_bytes_to_buffer_192_5(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_192_5 will hash the given input and write the
@@ -815,7 +815,7 @@ hash_bytes_224_3 :: proc(data: []byte) -> [DIGEST_SIZE_224]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_224_3 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_224_3(transmute([]byte)(data), hash);
hash_bytes_to_buffer_224_3(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_224_3 will hash the given input and write the
@@ -900,7 +900,7 @@ hash_bytes_224_4 :: proc(data: []byte) -> [DIGEST_SIZE_224]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_224_4 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_224_4(transmute([]byte)(data), hash);
hash_bytes_to_buffer_224_4(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_224_4 will hash the given input and write the
@@ -985,7 +985,7 @@ hash_bytes_224_5 :: proc(data: []byte) -> [DIGEST_SIZE_224]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_224_5 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_224_5(transmute([]byte)(data), hash);
hash_bytes_to_buffer_224_5(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_224_5 will hash the given input and write the
@@ -1070,7 +1070,7 @@ hash_bytes_256_3 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_256_3 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_256_3(transmute([]byte)(data), hash);
hash_bytes_to_buffer_256_3(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_256_3 will hash the given input and write the
@@ -1155,7 +1155,7 @@ hash_bytes_256_4 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_256_4 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_256_4(transmute([]byte)(data), hash);
hash_bytes_to_buffer_256_4(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_256_4 will hash the given input and write the
@@ -1240,7 +1240,7 @@ hash_bytes_256_5 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_256_5 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_256_5(transmute([]byte)(data), hash);
hash_bytes_to_buffer_256_5(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_256_5 will hash the given input and write the

View File

@@ -44,7 +44,7 @@ hash_bytes_224 :: proc(data: []byte) -> [DIGEST_SIZE_224]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_224 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_224(transmute([]byte)(data), hash);
hash_bytes_to_buffer_224(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_224 will hash the given input and write the
@@ -123,7 +123,7 @@ hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_256(transmute([]byte)(data), hash);
hash_bytes_to_buffer_256(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_256 will hash the given input and write the
@@ -202,7 +202,7 @@ hash_bytes_384 :: proc(data: []byte) -> [DIGEST_SIZE_384]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_384 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_384(transmute([]byte)(data), hash);
hash_bytes_to_buffer_384(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_384 will hash the given input and write the
@@ -281,7 +281,7 @@ hash_bytes_512 :: proc(data: []byte) -> [DIGEST_SIZE_512]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_512 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_512(transmute([]byte)(data), hash);
hash_bytes_to_buffer_512(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_512 will hash the given input and write the

View File

@@ -49,7 +49,7 @@ hash_bytes_224 :: proc(data: []byte) -> [DIGEST_SIZE_224]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_224 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_224(transmute([]byte)(data), hash);
hash_bytes_to_buffer_224(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_224 will hash the given input and write the
@@ -131,7 +131,7 @@ hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_256(transmute([]byte)(data), hash);
hash_bytes_to_buffer_256(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_256 will hash the given input and write the
@@ -213,7 +213,7 @@ hash_bytes_384 :: proc(data: []byte) -> [DIGEST_SIZE_384]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_384 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_384(transmute([]byte)(data), hash);
hash_bytes_to_buffer_384(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_384 will hash the given input and write the
@@ -295,7 +295,7 @@ hash_bytes_512 :: proc(data: []byte) -> [DIGEST_SIZE_512]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_512 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_512(transmute([]byte)(data), hash);
hash_bytes_to_buffer_512(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_512 will hash the given input and write the

View File

@@ -40,7 +40,7 @@ hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer(transmute([]byte)(data), hash);
hash_bytes_to_buffer(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer will hash the given input and write the

View File

@@ -44,7 +44,7 @@ hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer(transmute([]byte)(data), hash);
hash_bytes_to_buffer(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer will hash the given input and write the

View File

@@ -43,7 +43,7 @@ hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer(transmute([]byte)(data), hash);
hash_bytes_to_buffer(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer will hash the given input and write the

View File

@@ -45,7 +45,7 @@ hash_bytes_128 :: proc(data: []byte) -> [DIGEST_SIZE_128]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_128 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_128(transmute([]byte)(data), hash);
hash_bytes_to_buffer_128(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_128 will hash the given input and write the
@@ -121,7 +121,7 @@ hash_bytes_160 :: proc(data: []byte) -> [DIGEST_SIZE_160]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_160 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_160(transmute([]byte)(data), hash);
hash_bytes_to_buffer_160(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_160 will hash the given input and write the
@@ -197,7 +197,7 @@ hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_256(transmute([]byte)(data), hash);
hash_bytes_to_buffer_256(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_256 will hash the given input and write the
@@ -273,7 +273,7 @@ hash_bytes_320 :: proc(data: []byte) -> [DIGEST_SIZE_320]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_320 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_320(transmute([]byte)(data), hash);
hash_bytes_to_buffer_320(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_320 will hash the given input and write the

View File

@@ -43,7 +43,7 @@ hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer(transmute([]byte)(data), hash);
hash_bytes_to_buffer(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer will hash the given input and write the

View File

@@ -48,7 +48,7 @@ hash_bytes_224 :: proc(data: []byte) -> [DIGEST_SIZE_224]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_224 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_224(transmute([]byte)(data), hash);
hash_bytes_to_buffer_224(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_224 will hash the given input and write the
@@ -127,7 +127,7 @@ hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_256(transmute([]byte)(data), hash);
hash_bytes_to_buffer_256(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_256 will hash the given input and write the
@@ -206,7 +206,7 @@ hash_bytes_384 :: proc(data: []byte) -> [DIGEST_SIZE_384]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_384 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_384(transmute([]byte)(data), hash);
hash_bytes_to_buffer_384(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_384 will hash the given input and write the
@@ -285,7 +285,7 @@ hash_bytes_512 :: proc(data: []byte) -> [DIGEST_SIZE_512]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_512 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_512(transmute([]byte)(data), hash);
hash_bytes_to_buffer_512(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_512 will hash the given input and write the

View File

@@ -47,7 +47,7 @@ hash_bytes_224 :: proc(data: []byte) -> [DIGEST_SIZE_224]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_224 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_224(transmute([]byte)(data), hash);
hash_bytes_to_buffer_224(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_224 will hash the given input and write the
@@ -126,7 +126,7 @@ hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_256(transmute([]byte)(data), hash);
hash_bytes_to_buffer_256(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_256 will hash the given input and write the
@@ -205,7 +205,7 @@ hash_bytes_384 :: proc(data: []byte) -> [DIGEST_SIZE_384]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_384 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_384(transmute([]byte)(data), hash);
hash_bytes_to_buffer_384(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_384 will hash the given input and write the
@@ -284,7 +284,7 @@ hash_bytes_512 :: proc(data: []byte) -> [DIGEST_SIZE_512]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_512 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_512(transmute([]byte)(data), hash);
hash_bytes_to_buffer_512(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_512 will hash the given input and write the

View File

@@ -46,7 +46,7 @@ hash_bytes_128 :: proc(data: []byte) -> [DIGEST_SIZE_128]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_128 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_128(transmute([]byte)(data), hash);
hash_bytes_to_buffer_128(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_128 will hash the given input and write the
@@ -128,7 +128,7 @@ hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_256(transmute([]byte)(data), hash);
hash_bytes_to_buffer_256(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_256 will hash the given input and write the

View File

@@ -42,7 +42,7 @@ hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer(transmute([]byte)(data), hash);
hash_bytes_to_buffer(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer will hash the given input and write the

View File

@@ -44,7 +44,7 @@ hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_256(transmute([]byte)(data), hash);
hash_bytes_to_buffer_256(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_256 will hash the given input and write the
@@ -122,7 +122,7 @@ hash_bytes_512 :: proc(data: []byte) -> [DIGEST_SIZE_512]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_512 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_512(transmute([]byte)(data), hash);
hash_bytes_to_buffer_512(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_512 will hash the given input and write the

View File

@@ -45,7 +45,7 @@ hash_bytes_128 :: proc(data: []byte) -> [DIGEST_SIZE_128]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_128 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_128(transmute([]byte)(data), hash);
hash_bytes_to_buffer_128(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_128 will hash the given input and write the
@@ -124,7 +124,7 @@ hash_bytes_160 :: proc(data: []byte) -> [DIGEST_SIZE_160]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_160 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_160(transmute([]byte)(data), hash);
hash_bytes_to_buffer_160(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_160 will hash the given input and write the
@@ -203,7 +203,7 @@ hash_bytes_192 :: proc(data: []byte) -> [DIGEST_SIZE_192]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_192 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_192(transmute([]byte)(data), hash);
hash_bytes_to_buffer_192(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_192 will hash the given input and write the

View File

@@ -45,7 +45,7 @@ hash_bytes_128 :: proc(data: []byte) -> [DIGEST_SIZE_128]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_128 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_128(transmute([]byte)(data), hash);
hash_bytes_to_buffer_128(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_128 will hash the given input and write the
@@ -124,7 +124,7 @@ hash_bytes_160 :: proc(data: []byte) -> [DIGEST_SIZE_160]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_160 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_160(transmute([]byte)(data), hash);
hash_bytes_to_buffer_160(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_160 will hash the given input and write the
@@ -203,7 +203,7 @@ hash_bytes_192 :: proc(data: []byte) -> [DIGEST_SIZE_192]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer_192 :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer_192(transmute([]byte)(data), hash);
hash_bytes_to_buffer_192(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer_192 will hash the given input and write the

View File

@@ -42,7 +42,7 @@ hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
// computed hash to the second parameter.
// It requires that the destination buffer is at least as big as the digest size
hash_string_to_buffer :: proc(data: string, hash: []byte) {
hash_bytes_to_buffer(transmute([]byte)(data), hash);
hash_bytes_to_buffer(transmute([]byte)(data), hash)
}
// hash_bytes_to_buffer will hash the given input and write the

View File

@@ -11,6 +11,7 @@ import "core:time"
import "core:unicode/utf8"
import "core:intrinsics"
// Internal data structure that stores the required information for formatted printing
Info :: struct {
minus: bool,
plus: bool,
@@ -46,9 +47,13 @@ Register_User_Formatter_Error :: enum {
// it is prefixed with `_` rather than marked with a private attribute so that users can access it if necessary
_user_formatters: ^map[typeid]User_Formatter
// set_user_formatters assigns m to a global value allowing the user have custom print formatting for specific
// types
set_user_formatters :: proc(m: ^map[typeid]User_Formatter) {
_user_formatters = m
}
// register_user_formatter assigns a formatter to a specific typeid. set_user_formatters must be called
// before any use of this procedure.
register_user_formatter :: proc(id: typeid, formatter: User_Formatter) -> Register_User_Formatter_Error {
if _user_formatters == nil {
return .No_User_Formatter
@@ -61,7 +66,7 @@ register_user_formatter :: proc(id: typeid, formatter: User_Formatter) -> Regist
}
// aprint* procedures return a string that was allocated with the current context
// aprint procedure return a string that was allocated with the current context
// They must be freed accordingly
aprint :: proc(args: ..any, sep := " ") -> string {
str: strings.Builder
@@ -69,12 +74,16 @@ aprint :: proc(args: ..any, sep := " ") -> string {
sbprint(buf=&str, args=args, sep=sep)
return strings.to_string(str)
}
// aprintln procedure return a string that was allocated with the current context
// They must be freed accordingly
aprintln :: proc(args: ..any, sep := " ") -> string {
str: strings.Builder
strings.init_builder(&str)
sbprintln(buf=&str, args=args, sep=sep)
return strings.to_string(str)
}
// aprintf procedure return a string that was allocated with the current context
// They must be freed accordingly
aprintf :: proc(fmt: string, args: ..any) -> string {
str: strings.Builder
strings.init_builder(&str)
@@ -83,19 +92,21 @@ aprintf :: proc(fmt: string, args: ..any) -> string {
}
// tprint* procedures return a string that was allocated with the current context's temporary allocator
// tprint procedure return a string that was allocated with the current context's temporary allocator
tprint :: proc(args: ..any, sep := " ") -> string {
str: strings.Builder
strings.init_builder(&str, context.temp_allocator)
sbprint(buf=&str, args=args, sep=sep)
return strings.to_string(str)
}
// tprintln procedure return a string that was allocated with the current context's temporary allocator
tprintln :: proc(args: ..any, sep := " ") -> string {
str: strings.Builder
strings.init_builder(&str, context.temp_allocator)
sbprintln(buf=&str, args=args, sep=sep)
return strings.to_string(str)
}
// tprintf procedure return a string that was allocated with the current context's temporary allocator
tprintf :: proc(fmt: string, args: ..any) -> string {
str: strings.Builder
strings.init_builder(&str, context.temp_allocator)
@@ -104,21 +115,24 @@ tprintf :: proc(fmt: string, args: ..any) -> string {
}
// bprint* procedures return a string using a buffer from an array
// bprint procedures return a string using a buffer from an array
bprint :: proc(buf: []byte, args: ..any, sep := " ") -> string {
sb := strings.builder_from_slice(buf[0:len(buf)])
return sbprint(buf=&sb, args=args, sep=sep)
}
// bprintln procedures return a string using a buffer from an array
bprintln :: proc(buf: []byte, args: ..any, sep := " ") -> string {
sb := strings.builder_from_slice(buf[0:len(buf)])
return sbprintln(buf=&sb, args=args, sep=sep)
}
// bprintf procedures return a string using a buffer from an array
bprintf :: proc(buf: []byte, fmt: string, args: ..any) -> string {
sb := strings.builder_from_slice(buf[0:len(buf)])
return sbprintf(&sb, fmt, ..args)
}
// formatted assert
assertf :: proc(condition: bool, fmt: string, args: ..any, loc := #caller_location) -> bool {
if !condition {
p := context.assertion_failure_proc
@@ -131,6 +145,7 @@ assertf :: proc(condition: bool, fmt: string, args: ..any, loc := #caller_locati
return condition
}
// formatted panic
panicf :: proc(fmt: string, args: ..any, loc := #caller_location) -> ! {
p := context.assertion_failure_proc
if p == nil {
@@ -142,24 +157,26 @@ panicf :: proc(fmt: string, args: ..any, loc := #caller_location) -> ! {
// sbprint formats using the default print settings and writes to buf
sbprint :: proc(buf: ^strings.Builder, args: ..any, sep := " ") -> string {
wprint(w=strings.to_writer(buf), args=args, sep=sep)
return strings.to_string(buf^)
}
// sbprintln formats using the default print settings and writes to buf
sbprintln :: proc(buf: ^strings.Builder, args: ..any, sep := " ") -> string {
wprintln(w=strings.to_writer(buf), args=args, sep=sep)
return strings.to_string(buf^)
}
// sbprintf formats according to the specififed format string and writes to buf
sbprintf :: proc(buf: ^strings.Builder, fmt: string, args: ..any) -> string {
wprintf(w=strings.to_writer(buf), fmt=fmt, args=args)
return strings.to_string(buf^)
}
// wprint formats using the default print settings and writes to w
wprint :: proc(w: io.Writer, args: ..any, sep := " ") -> int {
fi: Info
fi.writer = w
@@ -194,6 +211,7 @@ wprint :: proc(w: io.Writer, args: ..any, sep := " ") -> int {
return int(size1 - size0)
}
// wprintln formats using the default print settings and writes to w
wprintln :: proc(w: io.Writer, args: ..any, sep := " ") -> int {
fi: Info
fi.writer = w
@@ -214,6 +232,7 @@ wprintln :: proc(w: io.Writer, args: ..any, sep := " ") -> int {
return int(size1 - size0)
}
// wprintf formats according to the specififed format string and writes to w
wprintf :: proc(w: io.Writer, fmt: string, args: ..any) -> int {
fi: Info
arg_index: int = 0
@@ -493,11 +512,13 @@ wprintf :: proc(w: io.Writer, fmt: string, args: ..any) -> int {
return int(size1 - size0)
}
// wprint_type is a utility procedure to write a ^runtime.Type_Info value to w
wprint_type :: proc(w: io.Writer, info: ^runtime.Type_Info) -> (int, io.Error) {
n, err := reflect.write_type(w, info)
io.flush(auto_cast w)
return n, err
}
// wprint_typeid is a utility procedure to write a typeid value to w
wprint_typeid :: proc(w: io.Writer, id: typeid) -> (int, io.Error) {
n, err := reflect.write_type(w, type_info_of(id))
io.flush(auto_cast w)
@@ -829,7 +850,7 @@ _pad :: proc(fi: ^Info, s: string) {
fmt_float :: proc(fi: ^Info, v: f64, bit_size: int, verb: rune) {
switch verb {
case 'f', 'F', 'v':
case 'f', 'F', 'g', 'G', 'v':
prec: int = 3
if fi.prec_set {
prec = fi.prec

View File

@@ -34,11 +34,16 @@ stderr := io.Writer{
},
}
// print* procedures return the number of bytes written
// print formats using the default print settings and writes to stdout
print :: proc(args: ..any, sep := " ") -> int { return wprint(w=stdout, args=args, sep=sep) }
// println formats using the default print settings and writes to stdout
println :: proc(args: ..any, sep := " ") -> int { return wprintln(w=stdout, args=args, sep=sep) }
// printf formats according to the specififed format string and writes to stdout
printf :: proc(fmt: string, args: ..any) -> int { return wprintf(stdout, fmt, ..args) }
// eprint formats using the default print settings and writes to stderr
eprint :: proc(args: ..any, sep := " ") -> int { return wprint(w=stderr, args=args, sep=sep) }
// eprintln formats using the default print settings and writes to stderr
eprintln :: proc(args: ..any, sep := " ") -> int { return wprintln(w=stderr, args=args, sep=sep) }
// eprintf formats according to the specififed format string and writes to stderr
eprintf :: proc(fmt: string, args: ..any) -> int { return wprintf(stderr, fmt, ..args) }

View File

@@ -5,15 +5,18 @@ import "core:runtime"
import "core:os"
import "core:io"
// fprint formats using the default print settings and writes to fd
fprint :: proc(fd: os.Handle, args: ..any, sep := " ") -> int {
w := io.to_writer(os.stream_from_handle(fd))
return wprint(w=w, args=args, sep=sep)
}
// fprintln formats using the default print settings and writes to fd
fprintln :: proc(fd: os.Handle, args: ..any, sep := " ") -> int {
w := io.to_writer(os.stream_from_handle(fd))
return wprintln(w=w, args=args, sep=sep)
}
// fprintf formats according to the specififed format string and writes to fd
fprintf :: proc(fd: os.Handle, fmt: string, args: ..any) -> int {
w := io.to_writer(os.stream_from_handle(fd))
return wprintf(w, fmt, ..args)
@@ -27,11 +30,16 @@ fprint_typeid :: proc(fd: os.Handle, id: typeid) -> (n: int, err: io.Error) {
return wprint_typeid(w, id)
}
// print* procedures return the number of bytes written
// print formats using the default print settings and writes to os.stdout
print :: proc(args: ..any, sep := " ") -> int { return fprint(fd=os.stdout, args=args, sep=sep) }
// println formats using the default print settings and writes to os.stdout
println :: proc(args: ..any, sep := " ") -> int { return fprintln(fd=os.stdout, args=args, sep=sep) }
// printf formats according to the specififed format string and writes to os.stdout
printf :: proc(fmt: string, args: ..any) -> int { return fprintf(os.stdout, fmt, ..args) }
// eprint formats using the default print settings and writes to os.stderr
eprint :: proc(args: ..any, sep := " ") -> int { return fprint(fd=os.stderr, args=args, sep=sep) }
// eprintln formats using the default print settings and writes to os.stderr
eprintln :: proc(args: ..any, sep := " ") -> int { return fprintln(fd=os.stderr, args=args, sep=sep) }
// eprintf formats according to the specififed format string and writes to os.stderr
eprintf :: proc(fmt: string, args: ..any) -> int { return fprintf(os.stderr, fmt, ..args) }

View File

@@ -1,9 +1,13 @@
// package io provides basic interfaces for generic data stream primitives.
// The purpose of this package is wrap existing data structures and their
// operations into an abstracted stream interface.
package io
import "core:intrinsics"
import "core:runtime"
import "core:unicode/utf8"
// Seek whence values
Seek_From :: enum {
Start = 0, // seek relative to the origin of the file
Current = 1, // seek relative to the current offset
@@ -139,6 +143,10 @@ destroy :: proc(s: Stream) -> Error {
return .Empty
}
// read reads up to len(p) bytes into s. It returns the number of bytes read and any error if occurred.
//
// When read encounters an .EOF or error after successfully reading n > 0 bytes, it returns the number of
// bytes read along with the error.
read :: proc(s: Reader, p: []byte, n_read: ^int = nil) -> (n: int, err: Error) {
if s.stream_vtable != nil && s.impl_read != nil {
n, err = s->impl_read(p)
@@ -150,6 +158,7 @@ read :: proc(s: Reader, p: []byte, n_read: ^int = nil) -> (n: int, err: Error) {
return 0, .Empty
}
// write writes up to len(p) bytes into s. It returns the number of bytes written and any error if occurred.
write :: proc(s: Writer, p: []byte, n_written: ^int = nil) -> (n: int, err: Error) {
if s.stream_vtable != nil && s.impl_write != nil {
n, err = s->impl_write(p)
@@ -161,6 +170,13 @@ write :: proc(s: Writer, p: []byte, n_written: ^int = nil) -> (n: int, err: Erro
return 0, .Empty
}
// seek sets the offset of the next read or write to offset.
//
// .Start means seek relative to the origin of the file.
// .Current means seek relative to the current offset.
// .End means seek relative to the end.
//
// seek returns the new offset to the start of the file/stream, and any error if occurred.
seek :: proc(s: Seeker, offset: i64, whence: Seek_From) -> (n: i64, err: Error) {
if s.stream_vtable != nil && s.impl_seek != nil {
return s->impl_seek(offset, whence)
@@ -168,6 +184,8 @@ seek :: proc(s: Seeker, offset: i64, whence: Seek_From) -> (n: i64, err: Error)
return 0, .Empty
}
// The behaviour of close after the first call is stream implementation defined.
// Different streams may document their own behaviour.
close :: proc(s: Closer) -> Error {
if s.stream_vtable != nil && s.impl_close != nil {
return s->impl_close()
@@ -184,6 +202,7 @@ flush :: proc(s: Flusher) -> Error {
return .None
}
// size returns the size of the stream. If the stream does not support querying its size, 0 will be returned.
size :: proc(s: Stream) -> i64 {
if s.stream_vtable == nil {
return 0
@@ -214,7 +233,12 @@ size :: proc(s: Stream) -> i64 {
// read_at reads len(p) bytes into p starting with the provided offset in the underlying Reader_At stream r.
// It returns the number of bytes read and any error if occurred.
//
// When read_at returns n < len(p), it returns a non-nil Error explaining why.
//
// If n == len(p), err may be either nil or .EOF
read_at :: proc(r: Reader_At, p: []byte, offset: i64, n_read: ^int = nil) -> (n: int, err: Error) {
defer if n_read != nil {
n_read^ += n
@@ -245,6 +269,11 @@ read_at :: proc(r: Reader_At, p: []byte, offset: i64, n_read: ^int = nil) -> (n:
}
// write_at writes len(p) bytes into p starting with the provided offset in the underlying Writer_At stream w.
// It returns the number of bytes written and any error if occurred.
//
// If write_at is writing to a Writer_At which has a seek offset, then write_at should not affect the underlying
// seek offset.
write_at :: proc(w: Writer_At, p: []byte, offset: i64, n_written: ^int = nil) -> (n: int, err: Error) {
defer if n_written != nil {
n_written^ += n
@@ -294,6 +323,7 @@ read_from :: proc(w: Reader_From, r: Reader) -> (n: i64, err: Error) {
}
// read_byte reads and returns the next byte from r.
read_byte :: proc(r: Byte_Reader, n_read: ^int = nil) -> (b: byte, err: Error) {
defer if err == nil && n_read != nil {
n_read^ += 1
@@ -347,6 +377,7 @@ _write_byte :: proc(w: Byte_Writer, c: byte, n_written: ^int = nil) -> (err: Err
return err
}
// read_rune reads a single UTF-8 encoded Unicode codepoint and returns the rune and its size in bytes.
read_rune :: proc(br: Rune_Reader, n_read: ^int = nil) -> (ch: rune, size: int, err: Error) {
defer if err == nil && n_read != nil {
n_read^ += size
@@ -405,10 +436,12 @@ unread_rune :: proc(s: Rune_Scanner) -> Error {
}
// write_string writes the contents of the string s to w.
write_string :: proc(s: Writer, str: string, n_written: ^int = nil) -> (n: int, err: Error) {
return write(s, transmute([]byte)str, n_written)
}
// write_rune writes a UTF-8 encoded rune to w.
write_rune :: proc(s: Writer, r: rune, n_written: ^int = nil) -> (size: int, err: Error) {
defer if err == nil && n_written != nil {
n_written^ += size
@@ -430,12 +463,16 @@ write_rune :: proc(s: Writer, r: rune, n_written: ^int = nil) -> (size: int, err
}
// read_full expected exactly len(buf) bytes from r into buf.
read_full :: proc(r: Reader, buf: []byte) -> (n: int, err: Error) {
return read_at_least(r, buf, len(buf))
}
// read_at_least reads from r into buf until it has read at least min bytes. It returns the number
// of bytes copied and an error if fewer bytes were read. `.EOF` is only returned if no bytes were read.
// `.Unexpected_EOF` is returned when an `.EOF ` is returned by the passed Reader after reading
// fewer than min bytes. If len(buf) is less than min, `.Short_Buffer` is returned.
read_at_least :: proc(r: Reader, buf: []byte, min: int) -> (n: int, err: Error) {
if len(buf) < min {
return 0, .Short_Buffer

View File

@@ -70,7 +70,7 @@ int31_max :: proc(n: i32, r: ^Rand = nil) -> i32 {
if n&(n-1) == 0 {
return int31(r) & (n-1)
}
max := i32((1<<31) - 1 - (1<<31)&u32(n))
max := i32((1<<31) - 1 - (1<<31)%u32(n))
v := int31(r)
for v > max {
v = int31(r)
@@ -85,7 +85,7 @@ int63_max :: proc(n: i64, r: ^Rand = nil) -> i64 {
if n&(n-1) == 0 {
return int63(r) & (n-1)
}
max := i64((1<<63) - 1 - (1<<63)&u64(n))
max := i64((1<<63) - 1 - (1<<63)%u64(n))
v := int63(r)
for v > max {
v = int63(r)
@@ -100,7 +100,7 @@ int127_max :: proc(n: i128, r: ^Rand = nil) -> i128 {
if n&(n-1) == 0 {
return int127(r) & (n-1)
}
max := i128((1<<63) - 1 - (1<<63)&u128(n))
max := i128((1<<127) - 1 - (1<<127)%u128(n))
v := int127(r)
for v > max {
v = int127(r)

View File

@@ -143,11 +143,21 @@ print_int :: proc "contextless" (x: int) { print_i64(i64(x)) }
print_caller_location :: proc "contextless" (using loc: Source_Code_Location) {
print_string(file_path)
print_byte('(')
print_u64(u64(line))
print_byte(':')
print_u64(u64(column))
print_byte(')')
when ODIN_ERROR_POS_STYLE == .Default {
print_byte('(')
print_u64(u64(line))
print_byte(':')
print_u64(u64(column))
print_byte(')')
} else when ODIN_ERROR_POS_STYLE == .Unix {
print_byte(':')
print_u64(u64(line))
print_byte(':')
print_u64(u64(column))
print_byte(':')
} else {
#panic("unhandled ODIN_ERROR_POS_STYLE")
}
}
print_typeid :: proc "contextless" (id: typeid) {
if id == nil {

View File

@@ -5,25 +5,66 @@ package all
import bufio "core:bufio"
import bytes "core:bytes"
import c "core:c"
import libc "core:c/libc"
import compress "core:compress"
import gzip "core:compress/gzip"
import zlib "core:compress/zlib"
import container "core:container"
import bit_array "core:container/bit_array"
import priority_queue "core:container/priority_queue"
import queue "core:container/queue"
import small_array "core:container/queue"
import crypto "core:crypto"
import blake "core:crypto/blake"
import blake2b "core:crypto/blake2b"
import blake2s "core:crypto/blake2s"
import chacha20 "core:crypto/chacha20"
import chacha20poly1305 "core:crypto/chacha20poly1305"
import gost "core:crypto/gost"
import groestl "core:crypto/groestl"
import haval "core:crypto/haval"
import jh "core:crypto/jh"
import keccak "core:crypto/keccak"
import md2 "core:crypto/md2"
import md4 "core:crypto/md4"
import md5 "core:crypto/md5"
import poly1305 "core:crypto/poly1305"
import ripemd "core:crypto/ripemd"
import sha1 "core:crypto/sha1"
import sha2 "core:crypto/sha2"
import sha3 "core:crypto/sha3"
import shake "core:crypto/shake"
import sm3 "core:crypto/sm3"
import streebog "core:crypto/streebog"
import tiger "core:crypto/tiger"
import tiger2 "core:crypto/tiger2"
import crypto_util "core:crypto/util"
import whirlpool "core:crypto/whirlpool"
import x25519 "core:crypto/x25519"
import dynlib "core:dynlib"
import encoding "core:encoding"
import base32 "core:encoding/base32"
import base64 "core:encoding/base64"
import csv "core:encoding/csv"
import hxa "core:encoding/hxa"
import json "core:encoding/json"
import fmt "core:fmt"
import hash "core:hash"
import image "core:image"
import png "core:image/png"
import io "core:io"
import log "core:log"
import math "core:math"
import big "core:math/big"
import bits "core:math/bits"
@@ -32,16 +73,22 @@ import linalg "core:math/linalg"
import glm "core:math/linalg/glsl"
import hlm "core:math/linalg/hlsl"
import rand "core:math/rand"
import mem "core:mem"
// import virtual "core:mem/virtual"
import ast "core:odin/ast"
import doc_format "core:odin/doc-format"
import odin_format "core:odin/format"
import odin_parser "core:odin/parser"
import odin_printer "core:odin/printer"
import odin_tokenizer "core:odin/tokenizer"
import os "core:os"
import slashpath "core:path/slashpath"
import filepath "core:path/filepath"
import reflect "core:reflect"
import runtime "core:runtime"
import slice "core:slice"
@@ -50,9 +97,11 @@ import strconv "core:strconv"
import strings "core:strings"
import sync "core:sync"
import sync2 "core:sync/sync2"
import testing "core:testing"
import scanner "core:text/scanner"
import thread "core:thread"
import time "core:time"
import unicode "core:unicode"
import utf8 "core:unicode/utf8"
import utf16 "core:unicode/utf16"
@@ -68,6 +117,37 @@ _ :: compress
_ :: gzip
_ :: zlib
_ :: container
_ :: bit_array
_ :: priority_queue
_ :: queue
_ :: small_array
_ :: crypto
_ :: blake
_ :: blake2b
_ :: blake2s
_ :: chacha20
_ :: chacha20poly1305
_ :: gost
_ :: groestl
_ :: haval
_ :: jh
_ :: keccak
_ :: md2
_ :: md4
_ :: md5
_ :: poly1305
_ :: ripemd
_ :: sha1
_ :: sha2
_ :: sha3
_ :: shake
_ :: sm3
_ :: streebog
_ :: tiger
_ :: tiger2
_ :: crypto_util
_ :: whirlpool
_ :: x25519
_ :: dynlib
_ :: encoding
_ :: base32
@@ -107,6 +187,7 @@ _ :: strconv
_ :: strings
_ :: sync
_ :: sync2
_ :: testing
_ :: scanner
_ :: thread
_ :: time

View File

@@ -165,6 +165,13 @@ enum TimingsExportFormat : i32 {
TimingsExportCSV = 2,
};
enum ErrorPosStyle {
ErrorPosStyle_Default, // path(line:column) msg
ErrorPosStyle_Unix, // path:line:column: msg
ErrorPosStyle_COUNT
};
// This stores the information for the specify architecture of this build
struct BuildContext {
// Constants
@@ -175,7 +182,9 @@ struct BuildContext {
String ODIN_ROOT; // Odin ROOT
bool ODIN_DEBUG; // Odin in debug mode
bool ODIN_DISABLE_ASSERT; // Whether the default 'assert' et al is disabled in code or not
bool ODIN_DEFAULT_TO_NIL_ALLOCATOR; // Whether the default allocator is a "nil" allocator or not (i.e. it does nothing)
bool ODIN_DEFAULT_TO_NIL_ALLOCATOR; // Whether the default allocator is a "nil" allocator or not (i.e. it does nothing)
ErrorPosStyle ODIN_ERROR_POS_STYLE;
TargetEndianKind endian_kind;
@@ -254,6 +263,7 @@ bool ODIN_DEFAULT_TO_NIL_ALLOCATOR; // Whether the default allocator is a "nil
isize thread_count;
PtrMap<char const *, ExactValue> defined_values;
};
@@ -843,6 +853,22 @@ bool has_asm_extension(String const &path) {
return false;
}
// temporary
char *token_pos_to_string(TokenPos const &pos) {
gbString s = gb_string_make_reserve(temporary_allocator(), 128);
String file = get_file_path_string(pos.file_id);
switch (build_context.ODIN_ERROR_POS_STYLE) {
default: /*fallthrough*/
case ErrorPosStyle_Default:
s = gb_string_append_fmt(s, "%.*s(%d:%d)", LIT(file), pos.line, pos.column);
break;
case ErrorPosStyle_Unix:
s = gb_string_append_fmt(s, "%.*s:%d:%d:", LIT(file), pos.line, pos.column);
break;
}
return s;
}
void init_build_context(TargetMetrics *cross_target) {
BuildContext *bc = &build_context;
@@ -855,6 +881,31 @@ void init_build_context(TargetMetrics *cross_target) {
bc->ODIN_VENDOR = str_lit("odin");
bc->ODIN_VERSION = ODIN_VERSION;
bc->ODIN_ROOT = odin_root_dir();
{
char const *found = gb_get_env("ODIN_ERROR_POS_STYLE", permanent_allocator());
if (found) {
ErrorPosStyle kind = ErrorPosStyle_Default;
String style = make_string_c(found);
style = string_trim_whitespace(style);
if (style == "" || style == "default" || style == "odin") {
kind = ErrorPosStyle_Default;
} else if (style == "unix" || style == "gcc" || style == "clang" || style == "llvm") {
kind = ErrorPosStyle_Unix;
} else {
gb_printf_err("Invalid ODIN_ERROR_POS_STYLE: got %.*s\n", LIT(style));
gb_printf_err("Valid formats:\n");
gb_printf_err("\t\"default\" or \"odin\"\n");
gb_printf_err("\t\tpath(line:column) message\n");
gb_printf_err("\t\"unix\"\n");
gb_printf_err("\t\tpath:line:column: message\n");
gb_exit(1);
}
build_context.ODIN_ERROR_POS_STYLE = kind;
}
}
bc->copy_file_contents = true;

View File

@@ -5755,8 +5755,12 @@ CallArgumentData check_call_arguments(CheckerContext *c, Operand *operand, Type
ctx.curr_proc_sig = e->type;
GB_ASSERT(decl->proc_lit->kind == Ast_ProcLit);
evaluate_where_clauses(&ctx, call, decl->scope, &decl->proc_lit->ProcLit.where_clauses, true);
bool ok = evaluate_where_clauses(&ctx, call, decl->scope, &decl->proc_lit->ProcLit.where_clauses, true);
decl->where_clauses_evaluated = true;
if (ok && (data.gen_entity->flags & EntityFlag_ProcBodyChecked) == 0) {
check_procedure_later(c, e->file, e->token, decl, e->type, decl->proc_lit->ProcLit.body, decl->proc_lit->ProcLit.tags);
}
}
return data;
}
@@ -5769,6 +5773,7 @@ CallArgumentData check_call_arguments(CheckerContext *c, Operand *operand, Type
Entity *e = entity_of_node(ident);
CallArgumentData data = {};
CallArgumentError err = call_checker(c, call, proc_type, e, operands, CallArgumentMode_ShowErrors, &data);
gb_unused(err);
@@ -5777,7 +5782,6 @@ CallArgumentData check_call_arguments(CheckerContext *c, Operand *operand, Type
if (entity_to_use != nullptr) {
update_untyped_expr_type(c, operand->expr, entity_to_use->type, true);
}
if (data.gen_entity != nullptr) {
Entity *e = data.gen_entity;
DeclInfo *decl = data.gen_entity->decl_info;
@@ -5789,8 +5793,12 @@ CallArgumentData check_call_arguments(CheckerContext *c, Operand *operand, Type
ctx.curr_proc_sig = e->type;
GB_ASSERT(decl->proc_lit->kind == Ast_ProcLit);
evaluate_where_clauses(&ctx, call, decl->scope, &decl->proc_lit->ProcLit.where_clauses, true);
bool ok = evaluate_where_clauses(&ctx, call, decl->scope, &decl->proc_lit->ProcLit.where_clauses, true);
decl->where_clauses_evaluated = true;
if (ok && (data.gen_entity->flags & EntityFlag_ProcBodyChecked) == 0) {
check_procedure_later(c, e->file, e->token, decl, e->type, decl->proc_lit->ProcLit.body, decl->proc_lit->ProcLit.tags);
}
}
return data;
}
@@ -9341,6 +9349,13 @@ gbString write_expr_to_string(gbString str, Ast *node, bool shorthand) {
str = gb_string_appendc(str, " = ");
str = write_expr_to_string(str, fv->value, shorthand);
case_end;
case_ast_node(fv, EnumFieldValue, node);
str = write_expr_to_string(str, fv->name, shorthand);
if (fv->value) {
str = gb_string_appendc(str, " = ");
str = write_expr_to_string(str, fv->value, shorthand);
}
case_end;
case_ast_node(ht, HelperType, node);
str = gb_string_appendc(str, "#type ");

View File

@@ -732,20 +732,19 @@ void check_enum_type(CheckerContext *ctx, Type *enum_type, Type *named_type, Ast
Ast *ident = nullptr;
Ast *init = nullptr;
u32 entity_flags = 0;
if (field->kind == Ast_FieldValue) {
ast_node(fv, FieldValue, field);
if (fv->field == nullptr || fv->field->kind != Ast_Ident) {
error(field, "An enum field's name must be an identifier");
continue;
}
ident = fv->field;
init = fv->value;
} else if (field->kind == Ast_Ident) {
ident = field;
} else {
if (field->kind != Ast_EnumFieldValue) {
error(field, "An enum field's name must be an identifier");
continue;
}
ident = field->EnumFieldValue.name;
init = field->EnumFieldValue.value;
if (ident == nullptr || ident->kind != Ast_Ident) {
error(field, "An enum field's name must be an identifier");
continue;
}
CommentGroup *docs = field->EnumFieldValue.docs;
CommentGroup *comment = field->EnumFieldValue.comment;
String name = ident->Ident.token.string;
if (init != nullptr) {
@@ -803,6 +802,8 @@ void check_enum_type(CheckerContext *ctx, Type *enum_type, Type *named_type, Ast
e->flags |= EntityFlag_Visited;
e->state = EntityState_Resolved;
e->Constant.flags |= entity_flags;
e->Constant.docs = docs;
e->Constant.comment = comment;
if (scope_lookup_current(ctx->scope, name) != nullptr) {
error(ident, "'%.*s' is already declared in this enumeration", LIT(name));

View File

@@ -893,6 +893,16 @@ void init_universal(void) {
add_global_enum_constant(fields, "ODIN_ENDIAN", target_endians[bc->metrics.arch]);
}
{
GlobalEnumValue values[ErrorPosStyle_COUNT] = {
{"Default", ErrorPosStyle_Default},
{"Unix", ErrorPosStyle_Unix},
};
auto fields = add_global_enum_type(str_lit("Odin_Error_Pos_Style_Type"), values, gb_count_of(values));
add_global_enum_constant(fields, "ODIN_ERROR_POS_STYLE", build_context.ODIN_ERROR_POS_STYLE);
}
add_global_bool_constant("ODIN_DEBUG", bc->ODIN_DEBUG);
add_global_bool_constant("ODIN_DISABLE_ASSERT", bc->ODIN_DISABLE_ASSERT);

View File

@@ -15,7 +15,7 @@ struct OdinDocVersionType {
#define OdinDocVersionType_Major 0
#define OdinDocVersionType_Minor 2
#define OdinDocVersionType_Patch 3
#define OdinDocVersionType_Patch 4
struct OdinDocHeaderBase {
u8 magic[8];
@@ -154,6 +154,7 @@ enum OdinDocEntityKind : u32 {
OdinDocEntity_ProcGroup = 5,
OdinDocEntity_ImportName = 6,
OdinDocEntity_LibraryName = 7,
OdinDocEntity_Builtin = 8,
};
enum OdinDocEntityFlag : u64 {
@@ -170,6 +171,9 @@ enum OdinDocEntityFlag : u64 {
OdinDocEntityFlag_Type_Alias = 1ull<<20,
OdinDocEntityFlag_Builtin_Pkg_Builtin = 1ull<<30,
OdinDocEntityFlag_Builtin_Pkg_Intrinsics = 1ull<<31,
OdinDocEntityFlag_Var_Thread_Local = 1ull<<40,
OdinDocEntityFlag_Var_Static = 1ull<<41,
@@ -201,15 +205,21 @@ enum OdinDocPkgFlags : u32 {
OdinDocPkgFlag_Init = 1<<2,
};
struct OdinDocScopeEntry {
OdinDocString name;
OdinDocEntityIndex entity;
};
struct OdinDocPkg {
OdinDocString fullpath;
OdinDocString name;
u32 flags;
OdinDocString docs;
OdinDocArray<OdinDocFileIndex> files;
OdinDocArray<OdinDocEntityIndex> entities;
OdinDocArray<OdinDocFileIndex> files;
OdinDocArray<OdinDocScopeEntry> entries;
};
struct OdinDocHeader {
OdinDocHeaderBase base;

View File

@@ -811,14 +811,17 @@ OdinDocEntityIndex odin_doc_add_entity(OdinDocWriter *w, Entity *e) {
comment = e->decl_info->comment;
docs = e->decl_info->docs;
}
if (!comment && e->kind == Entity_Variable) {
comment = e->Variable.comment;
}
if (!docs && e->kind == Entity_Variable) {
docs = e->Variable.docs;
if (e->kind == Entity_Variable) {
if (!comment) { comment = e->Variable.comment; }
if (!docs) { docs = e->Variable.docs; }
} else if (e->kind == Entity_Constant) {
if (!comment) { comment = e->Constant.comment; }
if (!docs) { docs = e->Constant.docs; }
}
String name = e->token.string;
String link_name = {};
TokenPos pos = e->token.pos;
OdinDocEntityKind kind = OdinDocEntity_Invalid;
u64 flags = 0;
@@ -833,6 +836,7 @@ OdinDocEntityIndex odin_doc_add_entity(OdinDocWriter *w, Entity *e) {
case Entity_ProcGroup: kind = OdinDocEntity_ProcGroup; break;
case Entity_ImportName: kind = OdinDocEntity_ImportName; break;
case Entity_LibraryName: kind = OdinDocEntity_LibraryName; break;
case Entity_Builtin: kind = OdinDocEntity_Builtin; break;
}
switch (e->kind) {
@@ -862,6 +866,24 @@ OdinDocEntityIndex odin_doc_add_entity(OdinDocWriter *w, Entity *e) {
if (e->Procedure.is_export) { flags |= OdinDocEntityFlag_Export; }
link_name = e->Procedure.link_name;
break;
case Entity_Builtin:
{
auto bp = builtin_procs[e->Builtin.id];
pos = {};
name = bp.name;
switch (bp.pkg) {
case BuiltinProcPkg_builtin:
flags |= OdinDocEntityFlag_Builtin_Pkg_Builtin;
break;
case BuiltinProcPkg_intrinsics:
flags |= OdinDocEntityFlag_Builtin_Pkg_Intrinsics;
break;
default:
GB_PANIC("Unhandled BuiltinProcPkg");
}
GB_PANIC("HERE");
}
break;
}
if (e->flags & EntityFlag_Param) {
@@ -897,8 +919,8 @@ OdinDocEntityIndex odin_doc_add_entity(OdinDocWriter *w, Entity *e) {
doc_entity.kind = kind;
doc_entity.flags = flags;
doc_entity.pos = odin_doc_token_pos_cast(w, e->token.pos);
doc_entity.name = odin_doc_write_string(w, e->token.string);
doc_entity.pos = odin_doc_token_pos_cast(w, pos);
doc_entity.name = odin_doc_write_string(w, name);
doc_entity.type = 0; // Set later
doc_entity.init_string = init_string;
doc_entity.comment = odin_doc_comment_group_string(w, comment);
@@ -975,7 +997,7 @@ void odin_doc_update_entities(OdinDocWriter *w) {
OdinDocArray<OdinDocEntityIndex> odin_doc_add_pkg_entities(OdinDocWriter *w, AstPackage *pkg) {
OdinDocArray<OdinDocScopeEntry> odin_doc_add_pkg_entries(OdinDocWriter *w, AstPackage *pkg) {
if (pkg->scope == nullptr) {
return {};
}
@@ -983,14 +1005,14 @@ OdinDocArray<OdinDocEntityIndex> odin_doc_add_pkg_entities(OdinDocWriter *w, Ast
return {};
}
auto entities = array_make<Entity *>(heap_allocator(), 0, pkg->scope->elements.entries.count);
defer (array_free(&entities));
auto entries = array_make<OdinDocScopeEntry>(heap_allocator(), 0, w->entity_cache.entries.count);
defer (array_free(&entries));
for_array(i, pkg->scope->elements.entries) {
String name = pkg->scope->elements.entries[i].key.string;
Entity *e = pkg->scope->elements.entries[i].value;
switch (e->kind) {
case Entity_Invalid:
case Entity_Builtin:
case Entity_Nil:
case Entity_Label:
continue;
@@ -1001,34 +1023,27 @@ OdinDocArray<OdinDocEntityIndex> odin_doc_add_pkg_entities(OdinDocWriter *w, Ast
case Entity_ProcGroup:
case Entity_ImportName:
case Entity_LibraryName:
case Entity_Builtin:
// Fine
break;
}
array_add(&entities, e);
}
gb_sort_array(entities.data, entities.count, cmp_entities_for_printing);
auto entity_indices = array_make<OdinDocEntityIndex>(heap_allocator(), 0, w->entity_cache.entries.count);
defer (array_free(&entity_indices));
for_array(i, entities) {
Entity *e = entities[i];
if (e->pkg != pkg) {
continue;
}
if (!is_entity_exported(e)) {
if (!is_entity_exported(e, true)) {
continue;
}
if (e->token.string.len == 0) {
continue;
}
OdinDocEntityIndex doc_entity_index = 0;
doc_entity_index = odin_doc_add_entity(w, e);
array_add(&entity_indices, doc_entity_index);
OdinDocScopeEntry entry = {};
entry.name = odin_doc_write_string(w, name);
entry.entity = odin_doc_add_entity(w, e);
array_add(&entries, entry);
}
return odin_write_slice(w, entity_indices.data, entity_indices.count);
return odin_write_slice(w, entries.data, entries.count);
}
@@ -1096,7 +1111,7 @@ void odin_doc_write_docs(OdinDocWriter *w) {
}
doc_pkg.files = odin_write_slice(w, file_indices.data, file_indices.count);
doc_pkg.entities = odin_doc_add_pkg_entities(w, pkg);
doc_pkg.entries = odin_doc_add_pkg_entries(w, pkg);
if (dst) {
*dst = doc_pkg;

View File

@@ -161,6 +161,8 @@ struct Entity {
ParameterValue param_value;
u32 flags;
i32 field_group_index;
CommentGroup *docs;
CommentGroup *comment;
} Constant;
struct {
Ast *init_expr; // only used for some variables within procedure bodies

View File

@@ -33,6 +33,10 @@ void init_global_error_collector(void) {
}
// temporary
// defined in build_settings.cpp
char *token_pos_to_string(TokenPos const &pos);
bool set_file_path_string(i32 index, String const &path) {
bool ok = false;
GB_ASSERT(index >= 0);

View File

@@ -3460,7 +3460,8 @@ lbAddr lb_build_addr(lbProcedure *p, Ast *expr) {
GB_ASSERT_MSG(is_type_indexable(t), "%s %s", type_to_string(t), expr_to_string(expr));
if (is_type_map(t)) {
lbValue map_val = lb_build_addr_ptr(p, ie->expr);
lbAddr map_addr = lb_build_addr(p, ie->expr);
lbValue map_val = lb_addr_load(p, map_addr);
if (deref) {
map_val = lb_emit_load(p, map_val);
}
@@ -3469,7 +3470,8 @@ lbAddr lb_build_addr(lbProcedure *p, Ast *expr) {
key = lb_emit_conv(p, key, t->Map.key);
Type *result_type = type_of_expr(expr);
return lb_addr_map(map_val, key, t, result_type);
lbValue map_ptr = lb_address_from_load_or_generate_local(p, map_val);
return lb_addr_map(map_ptr, key, t, result_type);
}
switch (t->kind) {

View File

@@ -271,6 +271,10 @@ lbAddr lb_addr(lbValue addr) {
lbAddr lb_addr_map(lbValue addr, lbValue map_key, Type *map_type, Type *map_result) {
GB_ASSERT(is_type_pointer(addr.type));
Type *mt = type_deref(addr.type);
GB_ASSERT(is_type_map(mt));
lbAddr v = {lbAddr_Map, addr};
v.map.key = map_key;
v.map.type = map_type;
@@ -1598,8 +1602,9 @@ LLVMTypeRef lb_type_internal(lbModule *m, Type *type) {
return llvm_type;
}
llvm_type = LLVMStructCreateNamed(ctx, name);
LLVMTypeRef found_val = *found;
map_set(&m->types, type, llvm_type);
lb_clone_struct_type(llvm_type, *found);
lb_clone_struct_type(llvm_type, found_val);
return llvm_type;
}
}

View File

@@ -61,7 +61,7 @@ lbProcedure *lb_create_procedure(lbModule *m, Entity *entity, bool ignore_body)
GB_ASSERT(entity != nullptr);
GB_ASSERT(entity->kind == Entity_Procedure);
if (!entity->Procedure.is_foreign) {
GB_ASSERT(entity->flags & EntityFlag_ProcBodyChecked);
GB_ASSERT_MSG(entity->flags & EntityFlag_ProcBodyChecked, "%.*s :: %s", LIT(entity->token.string), type_to_string(entity->type));
}
String link_name = {};

View File

@@ -693,6 +693,16 @@ Ast *ast_field_value(AstFile *f, Ast *field, Ast *value, Token eq) {
return result;
}
Ast *ast_enum_field_value(AstFile *f, Ast *name, Ast *value, CommentGroup *docs, CommentGroup *comment) {
Ast *result = alloc_ast_node(f, Ast_EnumFieldValue);
result->EnumFieldValue.name = name;
result->EnumFieldValue.value = value;
result->EnumFieldValue.docs = docs;
result->EnumFieldValue.comment = comment;
return result;
}
Ast *ast_compound_lit(AstFile *f, Ast *type, Array<Ast *> const &elems, Token open, Token close) {
Ast *result = alloc_ast_node(f, Ast_CompoundLit);
result->CompoundLit.type = type;
@@ -1234,7 +1244,7 @@ CommentGroup *consume_comment_group(AstFile *f, isize n, isize *end_line_) {
return comments;
}
void comsume_comment_groups(AstFile *f, Token prev) {
void consume_comment_groups(AstFile *f, Token prev) {
if (f->curr_token.kind == Token_Comment) {
CommentGroup *comment = nullptr;
isize end_line = 0;
@@ -1278,7 +1288,7 @@ Token advance_token(AstFile *f) {
if (ok) {
switch (f->curr_token.kind) {
case Token_Comment:
comsume_comment_groups(f, prev);
consume_comment_groups(f, prev);
break;
case Token_Semicolon:
if (ignore_newlines(f) && f->curr_token.string == "\n") {
@@ -1689,6 +1699,46 @@ Array<Ast *> parse_element_list(AstFile *f) {
return elems;
}
CommentGroup *consume_line_comment(AstFile *f) {
CommentGroup *comment = f->line_comment;
if (f->line_comment == f->lead_comment) {
f->lead_comment = nullptr;
}
f->line_comment = nullptr;
return comment;
}
Array<Ast *> parse_enum_field_list(AstFile *f) {
auto elems = array_make<Ast *>(heap_allocator());
while (f->curr_token.kind != Token_CloseBrace &&
f->curr_token.kind != Token_EOF) {
CommentGroup *docs = f->lead_comment;
CommentGroup *comment = nullptr;
Ast *name = parse_value(f);
Ast *value = nullptr;
if (f->curr_token.kind == Token_Eq) {
Token eq = expect_token(f, Token_Eq);
value = parse_value(f);
}
comment = consume_line_comment(f);
Ast *elem = ast_enum_field_value(f, name, value, docs, comment);
array_add(&elems, elem);
if (!allow_token(f, Token_Comma)) {
break;
}
if (!elem->EnumFieldValue.comment) {
elem->EnumFieldValue.comment = consume_line_comment(f);
}
}
return elems;
}
Ast *parse_literal_value(AstFile *f, Ast *type) {
Array<Ast *> elems = {};
@@ -2449,7 +2499,7 @@ Ast *parse_operand(AstFile *f, bool lhs) {
skip_possible_newline_for_literal(f);
Token open = expect_token(f, Token_OpenBrace);
Array<Ast *> values = parse_element_list(f);
Array<Ast *> values = parse_enum_field_list(f);
Token close = expect_closing_brace_of_field_list(f);
return ast_enum_type(f, token, base_type, values);
@@ -5398,7 +5448,7 @@ bool parse_file(Parser *p, AstFile *f) {
String filepath = f->tokenizer.fullpath;
String base_dir = dir_from_path(filepath);
if (f->curr_token.kind == Token_Comment) {
comsume_comment_groups(f, f->prev_token);
consume_comment_groups(f, f->prev_token);
}
CommentGroup *docs = f->lead_comment;

View File

@@ -383,6 +383,12 @@ AST_KIND(_ExprBegin, "", bool) \
void *sce_temp_data; \
}) \
AST_KIND(FieldValue, "field value", struct { Token eq; Ast *field, *value; }) \
AST_KIND(EnumFieldValue, "enum field value", struct { \
Ast *name; \
Ast *value; \
CommentGroup *docs; \
CommentGroup *comment; \
}) \
AST_KIND(TernaryIfExpr, "ternary if expression", struct { Ast *x, *cond, *y; }) \
AST_KIND(TernaryWhenExpr, "ternary when expression", struct { Ast *x, *cond, *y; }) \
AST_KIND(OrElseExpr, "or_else expression", struct { Ast *x; Token token; Ast *y; }) \

View File

@@ -201,14 +201,6 @@ struct TokenPos {
i32 column; // starting at 1
};
// temporary
char *token_pos_to_string(TokenPos const &pos) {
gbString s = gb_string_make_reserve(temporary_allocator(), 128);
String file = get_file_path_string(pos.file_id);
s = gb_string_append_fmt(s, "%.*s(%d:%d)", LIT(file), pos.line, pos.column);
return s;
}
i32 token_pos_cmp(TokenPos const &a, TokenPos const &b) {
if (a.offset != b.offset) {
return (a.offset < b.offset) ? -1 : +1;