From cf390bf8b9f0652679a4ddc2ad66674e3793e3e7 Mon Sep 17 00:00:00 2001
From: Daniel Gavin
Date: Wed, 24 Nov 2021 21:20:46 +0100
Subject: [PATCH 001/710] Recover from closing brace not found in field list
---
core/odin/parser/parser.odin | 11 ++++++++++-
1 file changed, 10 insertions(+), 1 deletion(-)
diff --git a/core/odin/parser/parser.odin b/core/odin/parser/parser.odin
index 7660005e0..b4efc1460 100644
--- a/core/odin/parser/parser.odin
+++ b/core/odin/parser/parser.odin
@@ -416,7 +416,16 @@ expect_closing_brace_of_field_list :: proc(p: ^Parser) -> tokenizer.Token {
str := tokenizer.token_to_string(token)
error(p, end_of_line_pos(p, p.prev_tok), "expected a comma, got %s", str)
}
- return expect_token(p, .Close_Brace)
+ expect_brace := expect_token(p, .Close_Brace)
+
+ if expect_brace.kind != .Close_Brace {
+ for p.curr_tok.kind != .Close_Brace && p.curr_tok.kind != .EOF {
+ advance_token(p)
+ }
+ return p.curr_tok
+ }
+
+ return expect_brace
}
From a4ba91a55435febb0b5daeadcff2450f52680044 Mon Sep 17 00:00:00 2001
From: Daniel Gavin
Date: Thu, 25 Nov 2021 18:47:58 +0100
Subject: [PATCH 002/710] Check for non inserted semicolon in
*expect_closing_brace_of_field_list*
---
core/odin/parser/parser.odin | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/core/odin/parser/parser.odin b/core/odin/parser/parser.odin
index b4efc1460..1d27b4a79 100644
--- a/core/odin/parser/parser.odin
+++ b/core/odin/parser/parser.odin
@@ -419,7 +419,7 @@ expect_closing_brace_of_field_list :: proc(p: ^Parser) -> tokenizer.Token {
expect_brace := expect_token(p, .Close_Brace)
if expect_brace.kind != .Close_Brace {
- for p.curr_tok.kind != .Close_Brace && p.curr_tok.kind != .EOF {
+ for p.curr_tok.kind != .Close_Brace && p.curr_tok.kind != .EOF && !is_non_inserted_semicolon(p.curr_tok) {
advance_token(p)
}
return p.curr_tok
@@ -428,6 +428,9 @@ expect_closing_brace_of_field_list :: proc(p: ^Parser) -> tokenizer.Token {
return expect_brace
}
+is_non_inserted_semicolon :: proc(tok: tokenizer.Token) -> bool {
+ return tok.kind == .Semicolon && tok.text != "\n"
+}
is_blank_ident :: proc{
is_blank_ident_string,
From a7138b22a5d98631c2d4fa3f573a249252da556f Mon Sep 17 00:00:00 2001
From: Phil H
Date: Wed, 1 Dec 2021 14:16:23 -0800
Subject: [PATCH 003/710] Fix 'unmarsal' typo
---
core/encoding/json/unmarshal.odin | 50 +++++++++++++++----------------
1 file changed, 25 insertions(+), 25 deletions(-)
diff --git a/core/encoding/json/unmarshal.odin b/core/encoding/json/unmarshal.odin
index fe3137b7e..bd48011f1 100644
--- a/core/encoding/json/unmarshal.odin
+++ b/core/encoding/json/unmarshal.odin
@@ -52,11 +52,11 @@ unmarshal_any :: proc(data: []byte, v: any, spec := DEFAULT_SPECIFICATION, alloc
if p.spec == .MJSON {
#partial switch p.curr_token.kind {
case .Ident, .String:
- return unmarsal_object(&p, data, .EOF)
+ return unmarshal_object(&p, data, .EOF)
}
}
- return unmarsal_value(&p, data)
+ return unmarshal_value(&p, data)
}
@@ -148,7 +148,7 @@ assign_float :: proc(val: any, f: $T) -> bool {
@(private)
-unmarsal_string :: proc(p: ^Parser, val: any, str: string, ti: ^reflect.Type_Info) -> bool {
+unmarshal_string_token :: proc(p: ^Parser, val: any, str: string, ti: ^reflect.Type_Info) -> bool {
val := val
switch dst in &val {
case string:
@@ -198,7 +198,7 @@ unmarsal_string :: proc(p: ^Parser, val: any, str: string, ti: ^reflect.Type_Inf
@(private)
-unmarsal_value :: proc(p: ^Parser, v: any) -> (err: Unmarshal_Error) {
+unmarshal_value :: proc(p: ^Parser, v: any) -> (err: Unmarshal_Error) {
UNSUPPORTED_TYPE := Unsupported_Type_Error{v.id, p.curr_token}
token := p.curr_token
@@ -257,7 +257,7 @@ unmarsal_value :: proc(p: ^Parser, v: any) -> (err: Unmarshal_Error) {
case .Ident:
advance_token(p)
if p.spec == .MJSON {
- if unmarsal_string(p, any{v.data, ti.id}, token.text, ti) {
+ if unmarshal_string_token(p, any{v.data, ti.id}, token.text, ti) {
return nil
}
}
@@ -266,7 +266,7 @@ unmarsal_value :: proc(p: ^Parser, v: any) -> (err: Unmarshal_Error) {
case .String:
advance_token(p)
str := unquote_string(token, p.spec, p.allocator) or_return
- if unmarsal_string(p, any{v.data, ti.id}, str, ti) {
+ if unmarshal_string_token(p, any{v.data, ti.id}, str, ti) {
return nil
}
delete(str, p.allocator)
@@ -274,10 +274,10 @@ unmarsal_value :: proc(p: ^Parser, v: any) -> (err: Unmarshal_Error) {
case .Open_Brace:
- return unmarsal_object(p, v, .Close_Brace)
+ return unmarshal_object(p, v, .Close_Brace)
case .Open_Bracket:
- return unmarsal_array(p, v)
+ return unmarshal_array(p, v)
case:
if p.spec != .JSON {
@@ -312,16 +312,16 @@ unmarsal_value :: proc(p: ^Parser, v: any) -> (err: Unmarshal_Error) {
@(private)
-unmarsal_expect_token :: proc(p: ^Parser, kind: Token_Kind, loc := #caller_location) -> Token {
+unmarshal_expect_token :: proc(p: ^Parser, kind: Token_Kind, loc := #caller_location) -> Token {
prev := p.curr_token
err := expect_token(p, kind)
- assert(err == nil, "unmarsal_expect_token")
+ assert(err == nil, "unmarshal_expect_token")
return prev
}
@(private)
-unmarsal_object :: proc(p: ^Parser, v: any, end_token: Token_Kind) -> (err: Unmarshal_Error) {
+unmarshal_object :: proc(p: ^Parser, v: any, end_token: Token_Kind) -> (err: Unmarshal_Error) {
UNSUPPORTED_TYPE := Unsupported_Type_Error{v.id, p.curr_token}
if end_token == .Close_Brace {
@@ -342,7 +342,7 @@ unmarsal_object :: proc(p: ^Parser, v: any, end_token: Token_Kind) -> (err: Unma
key, _ := parse_object_key(p, p.allocator)
defer delete(key, p.allocator)
- unmarsal_expect_token(p, .Colon)
+ unmarshal_expect_token(p, .Colon)
fields := reflect.struct_fields_zipped(ti.id)
@@ -378,7 +378,7 @@ unmarsal_object :: proc(p: ^Parser, v: any, end_token: Token_Kind) -> (err: Unma
field_ptr := rawptr(uintptr(v.data) + offset)
field := any{field_ptr, type.id}
- unmarsal_value(p, field) or_return
+ unmarshal_value(p, field) or_return
if parse_comma(p) {
break struct_loop
@@ -407,11 +407,11 @@ unmarsal_object :: proc(p: ^Parser, v: any, end_token: Token_Kind) -> (err: Unma
map_loop: for p.curr_token.kind != end_token {
key, _ := parse_object_key(p, p.allocator)
- unmarsal_expect_token(p, .Colon)
+ unmarshal_expect_token(p, .Colon)
mem.zero_slice(elem_backing)
- if err := unmarsal_value(p, map_backing_value); err != nil {
+ if err := unmarshal_value(p, map_backing_value); err != nil {
delete(key, p.allocator)
return err
}
@@ -443,7 +443,7 @@ unmarsal_object :: proc(p: ^Parser, v: any, end_token: Token_Kind) -> (err: Unma
enumerated_array_loop: for p.curr_token.kind != end_token {
key, _ := parse_object_key(p, p.allocator)
- unmarsal_expect_token(p, .Colon)
+ unmarshal_expect_token(p, .Colon)
defer delete(key, p.allocator)
index := -1
@@ -460,7 +460,7 @@ unmarsal_object :: proc(p: ^Parser, v: any, end_token: Token_Kind) -> (err: Unma
index_ptr := rawptr(uintptr(v.data) + uintptr(index*t.elem_size))
index_any := any{index_ptr, t.elem.id}
- unmarsal_value(p, index_any) or_return
+ unmarshal_value(p, index_any) or_return
if parse_comma(p) {
break enumerated_array_loop
@@ -480,10 +480,10 @@ unmarsal_object :: proc(p: ^Parser, v: any, end_token: Token_Kind) -> (err: Unma
@(private)
-unmarsal_count_array :: proc(p: ^Parser) -> (length: uintptr) {
+unmarshal_count_array :: proc(p: ^Parser) -> (length: uintptr) {
p_backup := p^
p.allocator = mem.nil_allocator()
- unmarsal_expect_token(p, .Open_Bracket)
+ unmarshal_expect_token(p, .Open_Bracket)
array_length_loop: for p.curr_token.kind != .Close_Bracket {
_, _ = parse_value(p)
length += 1
@@ -497,9 +497,9 @@ unmarsal_count_array :: proc(p: ^Parser) -> (length: uintptr) {
}
@(private)
-unmarsal_array :: proc(p: ^Parser, v: any) -> (err: Unmarshal_Error) {
+unmarshal_array :: proc(p: ^Parser, v: any) -> (err: Unmarshal_Error) {
assign_array :: proc(p: ^Parser, base: rawptr, elem: ^reflect.Type_Info, length: uintptr) -> Unmarshal_Error {
- unmarsal_expect_token(p, .Open_Bracket)
+ unmarshal_expect_token(p, .Open_Bracket)
for idx: uintptr = 0; p.curr_token.kind != .Close_Bracket; idx += 1 {
assert(idx < length)
@@ -507,14 +507,14 @@ unmarsal_array :: proc(p: ^Parser, v: any) -> (err: Unmarshal_Error) {
elem_ptr := rawptr(uintptr(base) + idx*uintptr(elem.size))
elem := any{elem_ptr, elem.id}
- unmarsal_value(p, elem) or_return
+ unmarshal_value(p, elem) or_return
if parse_comma(p) {
break
}
}
- unmarsal_expect_token(p, .Close_Bracket)
+ unmarshal_expect_token(p, .Close_Bracket)
return nil
@@ -524,7 +524,7 @@ unmarsal_array :: proc(p: ^Parser, v: any) -> (err: Unmarshal_Error) {
ti := reflect.type_info_base(type_info_of(v.id))
- length := unmarsal_count_array(p)
+ length := unmarshal_count_array(p)
#partial switch t in ti.variant {
case reflect.Type_Info_Slice:
@@ -578,4 +578,4 @@ unmarsal_array :: proc(p: ^Parser, v: any) -> (err: Unmarshal_Error) {
}
return UNSUPPORTED_TYPE
-}
\ No newline at end of file
+}
From 9d4fe9035626f4f36ae84ca731ffc5fca00ebe17 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Tue, 7 Dec 2021 17:35:41 +0000
Subject: [PATCH 004/710] Fix bugs in big.Rat caused by typos
---
core/math/big/rat.odin | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/core/math/big/rat.odin b/core/math/big/rat.odin
index 121f0ab50..837af6fd3 100644
--- a/core/math/big/rat.odin
+++ b/core/math/big/rat.odin
@@ -112,14 +112,14 @@ rat_set_u64 :: proc(dst: ^Rat, x: u64, allocator := context.allocator) -> (err:
assert_if_nil(dst)
context.allocator = allocator
internal_set(&dst.a, x) or_return
- internal_set(&dst.a, 1) or_return
+ internal_set(&dst.b, 1) or_return
return
}
rat_set_i64 :: proc(dst: ^Rat, x: i64, allocator := context.allocator) -> (err: Error) {
assert_if_nil(dst)
context.allocator = allocator
internal_set(&dst.a, x) or_return
- internal_set(&dst.a, 1) or_return
+ internal_set(&dst.b, 1) or_return
return
}
@@ -265,7 +265,7 @@ rat_mul_rat :: proc(dst, x, y: ^Rat, allocator := context.allocator) -> (err: Er
return
}
- int_sub(&dst.a, &x.a, &y.a) or_return
+ int_mul(&dst.a, &x.a, &y.a) or_return
internal_int_mul_denom(&dst.b, &x.b, &y.b) or_return
return internal_rat_norm(dst)
}
From c94098c2ab550425075509e3031bacc3a588a6db Mon Sep 17 00:00:00 2001
From: Jeroen van Rijn
Date: Thu, 9 Dec 2021 16:14:04 +0100
Subject: [PATCH 005/710] [math/big] Fix int_set and int_get.
---
core/math/big/common.odin | 13 +++----
core/math/big/internal.odin | 67 ++++++++++++++++++++++++-------------
tests/core/math/big/test.py | 27 +++++++++------
3 files changed, 66 insertions(+), 41 deletions(-)
diff --git a/core/math/big/common.odin b/core/math/big/common.odin
index 5b7d162bc..c9aab4afa 100644
--- a/core/math/big/common.odin
+++ b/core/math/big/common.odin
@@ -158,13 +158,14 @@ Error :: enum int {
Invalid_Pointer = 2,
Invalid_Argument = 3,
- Assignment_To_Immutable = 4,
- Max_Iterations_Reached = 5,
- Buffer_Overflow = 6,
- Integer_Overflow = 7,
+ Assignment_To_Immutable = 10,
+ Max_Iterations_Reached = 11,
+ Buffer_Overflow = 12,
+ Integer_Overflow = 13,
+ Integer_Underflow = 14,
- Division_by_Zero = 8,
- Math_Domain_Error = 9,
+ Division_by_Zero = 30,
+ Math_Domain_Error = 31,
Cannot_Open_File = 50,
Cannot_Read_File = 51,
diff --git a/core/math/big/internal.odin b/core/math/big/internal.odin
index 4702e76a3..abe592f9b 100644
--- a/core/math/big/internal.odin
+++ b/core/math/big/internal.odin
@@ -34,6 +34,10 @@ package math_big
import "core:mem"
import "core:intrinsics"
import rnd "core:math/rand"
+import "core:builtin"
+
+import "core:fmt"
+__ :: fmt
/*
Low-level addition, unsigned. Handbook of Applied Cryptography, algorithm 14.7.
@@ -1880,8 +1884,6 @@ internal_int_set_from_integer :: proc(dest: ^Int, src: $T, minimize := false, al
where intrinsics.type_is_integer(T) {
context.allocator = allocator
- src := src
-
internal_error_if_immutable(dest) or_return
/*
Most internal procs asssume an Int to have already been initialize,
@@ -1892,13 +1894,27 @@ internal_int_set_from_integer :: proc(dest: ^Int, src: $T, minimize := false, al
dest.flags = {} // We're not -Inf, Inf, NaN or Immutable.
dest.used = 0
- dest.sign = .Zero_or_Positive if src >= 0 else .Negative
- src = internal_abs(src)
+ dest.sign = .Negative if src < 0 else .Zero_or_Positive
- #no_bounds_check for src != 0 {
- dest.digit[dest.used] = DIGIT(src) & _MASK
+ temp := src
+
+ is_maximally_negative := src == min(T)
+ if is_maximally_negative {
+ /*
+ Prevent overflow on abs()
+ */
+ temp += 1
+ }
+ temp = -temp if temp < 0 else temp
+
+ #no_bounds_check for temp != 0 {
+ dest.digit[dest.used] = DIGIT(temp) & _MASK
dest.used += 1
- src >>= _DIGIT_BITS
+ temp >>= _DIGIT_BITS
+ }
+
+ if is_maximally_negative {
+ return internal_sub(dest, dest, 1)
}
internal_zero_unused(dest)
return nil
@@ -2307,28 +2323,31 @@ internal_int_get_i32 :: proc(a: ^Int) -> (res: i32, err: Error) {
}
internal_get_i32 :: proc { internal_int_get_i32, }
-/*
- TODO: Think about using `count_bits` to check if the value could be returned completely,
- and maybe return max(T), .Integer_Overflow if not?
-*/
internal_int_get :: proc(a: ^Int, $T: typeid) -> (res: T, err: Error) where intrinsics.type_is_integer(T) {
- size_in_bits := int(size_of(T) * 8)
- i := int((size_in_bits + _DIGIT_BITS - 1) / _DIGIT_BITS)
- i = min(int(a.used), i)
-
- #no_bounds_check for ; i >= 0; i -= 1 {
- res <<= uint(0) if size_in_bits <= _DIGIT_BITS else _DIGIT_BITS
- res |= T(a.digit[i])
- if size_in_bits <= _DIGIT_BITS {
- break
+ /*
+ Calculate target bit size.
+ */
+ target_bit_size := int(size_of(T) * 8)
+ when !intrinsics.type_is_unsigned(T) {
+ if a.sign == .Zero_or_Positive {
+ target_bit_size -= 1
}
}
+ bits_used := internal_count_bits(a)
+
+ if bits_used > target_bit_size {
+ if a.sign == .Negative {
+ return min(T), .Integer_Underflow
+ }
+ return max(T), .Integer_Overflow
+ }
+
+ for i := a.used; i > 0; i -= 1 {
+ res <<= _DIGIT_BITS
+ res |= T(a.digit[i - 1])
+ }
when !intrinsics.type_is_unsigned(T) {
- /*
- Mask off sign bit.
- */
- res ~= 1 << uint(size_in_bits - 1)
/*
Set the sign.
*/
diff --git a/tests/core/math/big/test.py b/tests/core/math/big/test.py
index 6b17336bc..629e76e6e 100644
--- a/tests/core/math/big/test.py
+++ b/tests/core/math/big/test.py
@@ -127,17 +127,22 @@ def we_iterate():
# Error enum values
#
class Error(Enum):
- Okay = 0
- Out_Of_Memory = 1
- Invalid_Pointer = 2
- Invalid_Argument = 3
- Unknown_Error = 4
- Max_Iterations_Reached = 5
- Buffer_Overflow = 6
- Integer_Overflow = 7
- Division_by_Zero = 8
- Math_Domain_Error = 9
- Unimplemented = 127
+ Okay = 0
+ Out_Of_Memory = 1
+ Invalid_Pointer = 2
+ Invalid_Argument = 3
+ Unknown_Error = 4
+ Assignment_To_Immutable = 10
+ Max_Iterations_Reached = 11
+ Buffer_Overflow = 12
+ Integer_Overflow = 13
+ Integer_Underflow = 14
+ Division_by_Zero = 30
+ Math_Domain_Error = 31
+ Cannot_Open_File = 50
+ Cannot_Read_File = 51
+ Cannot_Write_File = 52
+ Unimplemented = 127
#
# Disable garbage collection
From b2b79b86f00a229cc9d9cc0226ffb594a4e4d910 Mon Sep 17 00:00:00 2001
From: Jeroen van Rijn
Date: Thu, 9 Dec 2021 16:31:54 +0100
Subject: [PATCH 006/710] [math/big] Return 0, .Integer_Underflow if trying to
get a negative number to an unsigned int.
---
core/math/big/internal.odin | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/core/math/big/internal.odin b/core/math/big/internal.odin
index abe592f9b..70914228e 100644
--- a/core/math/big/internal.odin
+++ b/core/math/big/internal.odin
@@ -2332,7 +2332,12 @@ internal_int_get :: proc(a: ^Int, $T: typeid) -> (res: T, err: Error) where intr
if a.sign == .Zero_or_Positive {
target_bit_size -= 1
}
+ } else {
+ if a.sign == .Negative {
+ return 0, .Integer_Underflow
+ }
}
+
bits_used := internal_count_bits(a)
if bits_used > target_bit_size {
From 1e9b30666fc9ff462877b413a92d15d11870ec35 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Thu, 9 Dec 2021 15:34:17 +0000
Subject: [PATCH 007/710] Minor style change
---
core/math/big/common.odin | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/core/math/big/common.odin b/core/math/big/common.odin
index 5b7d162bc..d534cc90e 100644
--- a/core/math/big/common.odin
+++ b/core/math/big/common.odin
@@ -215,7 +215,7 @@ _MIN_DIGIT_COUNT :: max(3, ((size_of(u128) + _DIGIT_BITS) - 1) / _DIGIT_BITS)
/*
Maximum number of digits.
- Must be small enough such that `_bit_count` does not overflow.
- - Must be small enough such that `_radix_size` for base 2 does not overflow.
+ - Must be small enough such that `_radix_size` for base 2 does not overflow.
`_radix_size` needs two additional bytes for zero termination and sign.
*/
_MAX_BIT_COUNT :: (max(int) - 2)
@@ -251,7 +251,7 @@ Order :: enum i8 {
}
Endianness :: enum i8 {
- Little = -1,
- Platform = 0,
- Big = 1,
-};
\ No newline at end of file
+ Little = -1,
+ Platform = 0,
+ Big = 1,
+}
\ No newline at end of file
From 1e17d5d86f135d1c1f004a475a4700bd082773ba Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Thu, 9 Dec 2021 15:34:35 +0000
Subject: [PATCH 008/710] Add utility procedures to get low values
---
core/math/big/internal.odin | 29 +++++++++++++++++++++++++++++
1 file changed, 29 insertions(+)
diff --git a/core/math/big/internal.odin b/core/math/big/internal.odin
index 4702e76a3..a5e1e7ba3 100644
--- a/core/math/big/internal.odin
+++ b/core/math/big/internal.odin
@@ -2307,6 +2307,35 @@ internal_int_get_i32 :: proc(a: ^Int) -> (res: i32, err: Error) {
}
internal_get_i32 :: proc { internal_int_get_i32, }
+internal_get_low_u32 :: proc(a: ^Int) -> u32 #no_bounds_check {
+ if a == nil {
+ return 0
+ }
+
+ if a.used == 0 {
+ return 0
+ }
+
+ return u32(a.digit[0])
+}
+internal_get_low_u64 :: proc(a: ^Int) -> u64 #no_bounds_check {
+ if a == nil {
+ return 0
+ }
+
+ if a.used == 0 {
+ return 0
+ }
+
+ v := u64(a.digit[0])
+ when size_of(DIGIT) == 4 {
+ if a.used > 1 {
+ return u64(a.digit[1])<<32 | v
+ }
+ }
+ return v
+}
+
/*
TODO: Think about using `count_bits` to check if the value could be returned completely,
and maybe return max(T), .Integer_Overflow if not?
From 1d7c9cf87223971620e1812ca3c84c9581a1c43a Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Thu, 9 Dec 2021 15:35:00 +0000
Subject: [PATCH 009/710] Make `strconv` more robust
---
core/strconv/strconv.odin | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/core/strconv/strconv.odin b/core/strconv/strconv.odin
index 6b3a91b4c..6ea8b39e6 100644
--- a/core/strconv/strconv.odin
+++ b/core/strconv/strconv.odin
@@ -882,7 +882,9 @@ unquote_string :: proc(lit: string, allocator := context.allocator) -> (res: str
return -1
}
- assert(len(lit) >= 2)
+ if len(lit) < 2 {
+ return
+ }
if lit[0] == '`' {
return lit[1:len(lit)-1], false, true
}
From e2f53ee107862d7c5b7dca6df3e68add74273336 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Sat, 11 Dec 2021 12:02:23 +0000
Subject: [PATCH 010/710] Fix #1362 `strings.index_any`
---
core/strings/strings.odin | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/core/strings/strings.odin b/core/strings/strings.odin
index a8199e0cf..7a9744d83 100644
--- a/core/strings/strings.odin
+++ b/core/strings/strings.odin
@@ -504,8 +504,8 @@ index_any :: proc(s, chars: string) -> int {
}
}
- for c in chars {
- if i := index_rune(s, c); i >= 0 {
+ for c in s {
+ if i := index_rune(chars, c); i >= 0 {
return i
}
}
From 85f8c8df91022fae3fef996cd45b9e631e6b2a66 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Sat, 11 Dec 2021 12:04:34 +0000
Subject: [PATCH 011/710] Fix `fields_proc` in `strings` and `bytes`
---
core/bytes/bytes.odin | 2 +-
core/strings/strings.odin | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/core/bytes/bytes.odin b/core/bytes/bytes.odin
index cbc1e2506..1e83b93c8 100644
--- a/core/bytes/bytes.odin
+++ b/core/bytes/bytes.odin
@@ -1143,7 +1143,7 @@ fields_proc :: proc(s: []byte, f: proc(rune) -> bool, allocator := context.alloc
}
if start >= 0 {
- append(&subslices, s[start : end])
+ append(&subslices, s[start : len(s)])
}
return subslices[:]
diff --git a/core/strings/strings.odin b/core/strings/strings.odin
index 7a9744d83..72f29e5d6 100644
--- a/core/strings/strings.odin
+++ b/core/strings/strings.odin
@@ -1288,7 +1288,7 @@ fields_proc :: proc(s: string, f: proc(rune) -> bool, allocator := context.alloc
}
if start >= 0 {
- append(&substrings, s[start : end])
+ append(&substrings, s[start : len(s)])
}
return substrings[:]
From 84b84d9f7de0d17e74ca0b482f784497b509c282 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Sat, 11 Dec 2021 12:47:05 +0000
Subject: [PATCH 012/710] Fix `rat_set_f64`
---
core/math/big/rat.odin | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/core/math/big/rat.odin b/core/math/big/rat.odin
index 837af6fd3..c3efc30aa 100644
--- a/core/math/big/rat.odin
+++ b/core/math/big/rat.odin
@@ -42,9 +42,9 @@ rat_set_f64 :: proc(dst: ^Rat, f: f64, allocator := context.allocator) -> (err:
dst.a.sign = .Negative if f < 0 else .Zero_or_Positive
if shift > 0 {
- internal_int_shl_digit(&dst.b, shift) or_return
+ internal_int_shl(&dst.b, &dst.b, shift) or_return
} else {
- internal_int_shl_digit(&dst.a, -shift) or_return
+ internal_int_shl(&dst.a, &dst.a, -shift) or_return
}
return internal_rat_norm(dst)
@@ -389,9 +389,9 @@ internal_rat_to_float :: proc($T: typeid, z: ^Rat, allocator := context.allocato
internal_int_abs(b2, b) or_return
if shift := MSIZE2 - exp; shift > 0 {
- internal_int_shl_digit(a2, shift) or_return
- } else {
- internal_int_shl_digit(b2, -shift) or_return
+ internal_int_shl(a2, a2, shift) or_return
+ } else if shift < 0 {
+ internal_int_shl(b2, b2, -shift) or_return
}
q, r := &Int{}, &Int{}
From 938744b2760193ff1dffc8ae03c740e91a4dfec5 Mon Sep 17 00:00:00 2001
From: Jeroen van Rijn
Date: Sat, 11 Dec 2021 15:22:24 +0100
Subject: [PATCH 013/710] [math/big] Rename `internal_int_shl_digit` to
`_private_int_shl_leg`.
Same for the SHR variant. These are pure implementation details to shift by a leg/word at a time.
Prevent accidental usage.
---
core/math/big/internal.odin | 74 +--------------------
core/math/big/logical.odin | 33 +---------
core/math/big/private.odin | 118 +++++++++++++++++++++++++++-------
tests/core/math/big/test.odin | 8 +--
tests/core/math/big/test.py | 32 ++++-----
5 files changed, 116 insertions(+), 149 deletions(-)
diff --git a/core/math/big/internal.odin b/core/math/big/internal.odin
index 69497b150..437f6e5fc 100644
--- a/core/math/big/internal.odin
+++ b/core/math/big/internal.odin
@@ -2648,7 +2648,7 @@ internal_int_shrmod :: proc(quotient, remainder, numerator: ^Int, bits: int, all
Shift by as many digits in the bit count.
*/
if bits >= _DIGIT_BITS {
- internal_shr_digit(quotient, bits / _DIGIT_BITS) or_return
+ _private_int_shr_leg(quotient, bits / _DIGIT_BITS) or_return
}
/*
@@ -2687,37 +2687,6 @@ internal_int_shr :: proc(dest, source: ^Int, bits: int, allocator := context.all
}
internal_shr :: proc { internal_int_shr, }
-/*
- Shift right by `digits` * _DIGIT_BITS bits.
-*/
-internal_int_shr_digit :: proc(quotient: ^Int, digits: int, allocator := context.allocator) -> (err: Error) {
- context.allocator = allocator
-
- if digits <= 0 { return nil }
-
- /*
- If digits > used simply zero and return.
- */
- if digits > quotient.used { return internal_zero(quotient) }
-
- /*
- Much like `int_shl_digit`, this is implemented using a sliding window,
- except the window goes the other way around.
-
- b-2 | b-1 | b0 | b1 | b2 | ... | bb | ---->
- /\ | ---->
- \-------------------/ ---->
- */
-
- #no_bounds_check for x := 0; x < (quotient.used - digits); x += 1 {
- quotient.digit[x] = quotient.digit[x + digits]
- }
- quotient.used -= digits
- internal_zero_unused(quotient)
- return internal_clamp(quotient)
-}
-internal_shr_digit :: proc { internal_int_shr_digit, }
-
/*
Shift right by a certain bit count with sign extension.
*/
@@ -2756,7 +2725,7 @@ internal_int_shl :: proc(dest, src: ^Int, bits: int, allocator := context.alloca
Shift by as many digits in the bit count as we have.
*/
if bits >= _DIGIT_BITS {
- internal_shl_digit(dest, bits / _DIGIT_BITS) or_return
+ _private_int_shl_leg(dest, bits / _DIGIT_BITS) or_return
}
/*
@@ -2786,45 +2755,6 @@ internal_int_shl :: proc(dest, src: ^Int, bits: int, allocator := context.alloca
}
internal_shl :: proc { internal_int_shl, }
-
-/*
- Shift left by `digits` * _DIGIT_BITS bits.
-*/
-internal_int_shl_digit :: proc(quotient: ^Int, digits: int, allocator := context.allocator) -> (err: Error) {
- context.allocator = allocator
-
- if digits <= 0 { return nil }
-
- /*
- No need to shift a zero.
- */
- if #force_inline internal_is_zero(quotient) {
- return nil
- }
-
- /*
- Resize `quotient` to accomodate extra digits.
- */
- #force_inline internal_grow(quotient, quotient.used + digits) or_return
-
- /*
- Increment the used by the shift amount then copy upwards.
- */
-
- /*
- Much like `int_shr_digit`, this is implemented using a sliding window,
- except the window goes the other way around.
- */
- #no_bounds_check for x := quotient.used; x > 0; x -= 1 {
- quotient.digit[x+digits-1] = quotient.digit[x-1]
- }
-
- quotient.used += digits
- mem.zero_slice(quotient.digit[:digits])
- return nil
-}
-internal_shl_digit :: proc { internal_int_shl_digit, }
-
/*
Count bits in an `Int`.
Assumes `a` not to be `nil` and to have been initialized.
diff --git a/core/math/big/logical.odin b/core/math/big/logical.odin
index dbcf566c8..e7e55cc47 100644
--- a/core/math/big/logical.odin
+++ b/core/math/big/logical.odin
@@ -86,21 +86,6 @@ int_shr :: proc(dest, source: ^Int, bits: int, allocator := context.allocator) -
}
shr :: proc { int_shr, }
-/*
- Shift right by `digits` * _DIGIT_BITS bits.
-*/
-int_shr_digit :: proc(quotient: ^Int, digits: int, allocator := context.allocator) -> (err: Error) {
- /*
- Check that `quotient` is usable.
- */
- assert_if_nil(quotient)
- context.allocator = allocator
-
- internal_clear_if_uninitialized(quotient) or_return
- return #force_inline internal_int_shr_digit(quotient, digits)
-}
-shr_digit :: proc { int_shr_digit, }
-
/*
Shift right by a certain bit count with sign extension.
*/
@@ -124,20 +109,4 @@ int_shl :: proc(dest, src: ^Int, bits: int, allocator := context.allocator) -> (
internal_clear_if_uninitialized(dest, src) or_return
return #force_inline internal_int_shl(dest, src, bits)
}
-shl :: proc { int_shl, }
-
-
-/*
- Shift left by `digits` * _DIGIT_BITS bits.
-*/
-int_shl_digit :: proc(quotient: ^Int, digits: int, allocator := context.allocator) -> (err: Error) {
- /*
- Check that `quotient` is usable.
- */
- assert_if_nil(quotient)
- context.allocator = allocator
-
- internal_clear_if_uninitialized(quotient) or_return
- return #force_inline internal_int_shl_digit(quotient, digits)
-}
-shl_digit :: proc { int_shl_digit, };
\ No newline at end of file
+shl :: proc { int_shl, }
\ No newline at end of file
diff --git a/core/math/big/private.odin b/core/math/big/private.odin
index 14a27f600..9989a208a 100644
--- a/core/math/big/private.odin
+++ b/core/math/big/private.odin
@@ -211,12 +211,12 @@ _private_int_mul_toom :: proc(dest, a, b: ^Int, allocator := context.allocator)
/*
P = b1*x^4+ S2*x^3+ S1*x^2+ a1*x + a0;
*/
- internal_shl_digit(b1, 4 * B) or_return
- internal_shl_digit(S2, 3 * B) or_return
+ _private_int_shl_leg(b1, 4 * B) or_return
+ _private_int_shl_leg(S2, 3 * B) or_return
internal_add(b1, b1, S2) or_return
- internal_shl_digit(S1, 2 * B) or_return
+ _private_int_shl_leg(S1, 2 * B) or_return
internal_add(b1, b1, S1) or_return
- internal_shl_digit(a1, 1 * B) or_return
+ _private_int_shl_leg(a1, 1 * B) or_return
internal_add(b1, b1, a1) or_return
internal_add(dest, b1, a0) or_return
@@ -317,8 +317,8 @@ _private_int_mul_karatsuba :: proc(dest, a, b: ^Int, allocator := context.alloca
/*
shift by B.
*/
- internal_shl_digit(t1, B) or_return /* t1 = (x0y0 + x1y1 - (x1-x0)*(y1-y0))<= n then x = x - n
@@ -2026,7 +2026,7 @@ _private_int_reduce :: proc(x, m, mu: ^Int, allocator := context.allocator) -> (
/*
q1 = x / b**(k-1)
*/
- internal_shr_digit(q, um - 1)
+ _private_int_shr_leg(q, um - 1)
/*
According to HAC this optimization is ok.
@@ -2040,7 +2040,7 @@ _private_int_reduce :: proc(x, m, mu: ^Int, allocator := context.allocator) -> (
/*
q3 = q2 / b**(k+1)
*/
- internal_shr_digit(q, um + 1)
+ _private_int_shr_leg(q, um + 1)
/*
x = x mod b**(k+1), quick (no division)
@@ -2062,7 +2062,7 @@ _private_int_reduce :: proc(x, m, mu: ^Int, allocator := context.allocator) -> (
*/
if internal_is_negative(x) {
internal_set(q, 1) or_return
- internal_shl_digit(q, um + 1) or_return
+ _private_int_shl_leg(q, um + 1) or_return
internal_add(x, x, q) or_return
}
@@ -3192,6 +3192,74 @@ _private_copy_digits :: proc(dest, src: ^Int, digits: int, offset := int(0)) ->
return nil
}
+
+/*
+ Shift left by `digits` * _DIGIT_BITS bits.
+*/
+_private_int_shl_leg :: proc(quotient: ^Int, digits: int, allocator := context.allocator) -> (err: Error) {
+ context.allocator = allocator
+
+ if digits <= 0 { return nil }
+
+ /*
+ No need to shift a zero.
+ */
+ if #force_inline internal_is_zero(quotient) {
+ return nil
+ }
+
+ /*
+ Resize `quotient` to accomodate extra digits.
+ */
+ #force_inline internal_grow(quotient, quotient.used + digits) or_return
+
+ /*
+ Increment the used by the shift amount then copy upwards.
+ */
+
+ /*
+ Much like `_private_int_shr_leg`, this is implemented using a sliding window,
+ except the window goes the other way around.
+ */
+ #no_bounds_check for x := quotient.used; x > 0; x -= 1 {
+ quotient.digit[x+digits-1] = quotient.digit[x-1]
+ }
+
+ quotient.used += digits
+ mem.zero_slice(quotient.digit[:digits])
+ return nil
+}
+
+/*
+ Shift right by `digits` * _DIGIT_BITS bits.
+*/
+_private_int_shr_leg :: proc(quotient: ^Int, digits: int, allocator := context.allocator) -> (err: Error) {
+ context.allocator = allocator
+
+ if digits <= 0 { return nil }
+
+ /*
+ If digits > used simply zero and return.
+ */
+ if digits > quotient.used { return internal_zero(quotient) }
+
+ /*
+ Much like `int_shl_digit`, this is implemented using a sliding window,
+ except the window goes the other way around.
+
+ b-2 | b-1 | b0 | b1 | b2 | ... | bb | ---->
+ /\ | ---->
+ \-------------------/ ---->
+ */
+
+ #no_bounds_check for x := 0; x < (quotient.used - digits); x += 1 {
+ quotient.digit[x] = quotient.digit[x + digits]
+ }
+ quotient.used -= digits
+ internal_zero_unused(quotient)
+ return internal_clamp(quotient)
+}
+
/*
======================== End of private procedures =======================
diff --git a/tests/core/math/big/test.odin b/tests/core/math/big/test.odin
index 07fa0364b..81f1956dc 100644
--- a/tests/core/math/big/test.odin
+++ b/tests/core/math/big/test.odin
@@ -208,7 +208,7 @@ print_to_buffer :: proc(val: ^big.Int) -> cstring {
/*
dest = shr_digit(src, digits)
*/
-@export test_shr_digit :: proc "c" (source: cstring, digits: int) -> (res: PyRes) {
+@export test_shr_leg :: proc "c" (source: cstring, digits: int) -> (res: PyRes) {
context = runtime.default_context()
err: big.Error
@@ -216,7 +216,7 @@ print_to_buffer :: proc(val: ^big.Int) -> cstring {
defer big.internal_destroy(src)
if err = big.atoi(src, string(source), 16); err != nil { return PyRes{res=":shr_digit:atoi(src):", err=err} }
- if err = #force_inline big.internal_shr_digit(src, digits); err != nil { return PyRes{res=":shr_digit:shr_digit(src):", err=err} }
+ if err = #force_inline big._private_int_shr_leg(src, digits); err != nil { return PyRes{res=":shr_digit:shr_digit(src):", err=err} }
r := print_to_buffer(src)
return PyRes{res = r, err = nil}
@@ -225,7 +225,7 @@ print_to_buffer :: proc(val: ^big.Int) -> cstring {
/*
dest = shl_digit(src, digits)
*/
-@export test_shl_digit :: proc "c" (source: cstring, digits: int) -> (res: PyRes) {
+@export test_shl_leg :: proc "c" (source: cstring, digits: int) -> (res: PyRes) {
context = runtime.default_context()
err: big.Error
@@ -233,7 +233,7 @@ print_to_buffer :: proc(val: ^big.Int) -> cstring {
defer big.internal_destroy(src)
if err = big.atoi(src, string(source), 16); err != nil { return PyRes{res=":shl_digit:atoi(src):", err=err} }
- if err = #force_inline big.internal_shl_digit(src, digits); err != nil { return PyRes{res=":shl_digit:shr_digit(src):", err=err} }
+ if err = #force_inline big._private_int_shl_leg(src, digits); err != nil { return PyRes{res=":shl_digit:shr_digit(src):", err=err} }
r := print_to_buffer(src)
return PyRes{res = r, err = nil}
diff --git a/tests/core/math/big/test.py b/tests/core/math/big/test.py
index 629e76e6e..d292a3ff4 100644
--- a/tests/core/math/big/test.py
+++ b/tests/core/math/big/test.py
@@ -187,8 +187,8 @@ int_sqrt = load(l.test_sqrt, [c_char_p ], Res)
int_root_n = load(l.test_root_n, [c_char_p, c_longlong], Res)
# Logical operations
-int_shl_digit = load(l.test_shl_digit, [c_char_p, c_longlong], Res)
-int_shr_digit = load(l.test_shr_digit, [c_char_p, c_longlong], Res)
+int_shl_leg = load(l.test_shl_leg, [c_char_p, c_longlong], Res)
+int_shr_leg = load(l.test_shr_leg, [c_char_p, c_longlong], Res)
int_shl = load(l.test_shl, [c_char_p, c_longlong], Res)
int_shr = load(l.test_shr, [c_char_p, c_longlong], Res)
int_shr_signed = load(l.test_shr_signed, [c_char_p, c_longlong], Res)
@@ -402,17 +402,17 @@ def test_root_n(number = 0, root = 0, expected_error = Error.Okay):
return test("test_root_n", res, [number, root], expected_error, expected_result)
-def test_shl_digit(a = 0, digits = 0, expected_error = Error.Okay):
+def test_shl_leg(a = 0, digits = 0, expected_error = Error.Okay):
args = [arg_to_odin(a), digits]
- res = int_shl_digit(*args)
+ res = int_shl_leg(*args)
expected_result = None
if expected_error == Error.Okay:
expected_result = a << (digits * 60)
- return test("test_shl_digit", res, [a, digits], expected_error, expected_result)
+ return test("test_shl_leg", res, [a, digits], expected_error, expected_result)
-def test_shr_digit(a = 0, digits = 0, expected_error = Error.Okay):
+def test_shr_leg(a = 0, digits = 0, expected_error = Error.Okay):
args = [arg_to_odin(a), digits]
- res = int_shr_digit(*args)
+ res = int_shr_leg(*args)
expected_result = None
if expected_error == Error.Okay:
if a < 0:
@@ -421,7 +421,7 @@ def test_shr_digit(a = 0, digits = 0, expected_error = Error.Okay):
else:
expected_result = a >> (digits * 60)
- return test("test_shr_digit", res, [a, digits], expected_error, expected_result)
+ return test("test_shr_leg", res, [a, digits], expected_error, expected_result)
def test_shl(a = 0, bits = 0, expected_error = Error.Okay):
args = [arg_to_odin(a), bits]
@@ -556,12 +556,12 @@ TESTS = {
test_root_n: [
[ 1298074214633706907132624082305024, 2, Error.Okay, ],
],
- test_shl_digit: [
+ test_shl_leg: [
[ 3192, 1 ],
[ 1298074214633706907132624082305024, 2 ],
[ 1024, 3 ],
],
- test_shr_digit: [
+ test_shr_leg: [
[ 3680125442705055547392, 1 ],
[ 1725436586697640946858688965569256363112777243042596638790631055949824, 2 ],
[ 219504133884436710204395031992179571, 2 ],
@@ -619,10 +619,10 @@ total_failures = 0
# test_shr_signed also tests shr, so we're not going to test shr randomly.
#
RANDOM_TESTS = [
- test_add, test_sub, test_mul, test_sqr, test_div,
- test_log, test_pow, test_sqrt, test_root_n,
- test_shl_digit, test_shr_digit, test_shl, test_shr_signed,
- test_gcd, test_lcm, test_is_square,
+ test_add, test_sub, test_mul, test_sqr,
+ test_log, test_pow, test_sqrt, test_root_n,
+ test_shl_leg, test_shr_leg, test_shl, test_shr_signed,
+ test_gcd, test_lcm, test_is_square, test_div,
]
SKIP_LARGE = [
test_pow, test_root_n, # test_gcd,
@@ -719,9 +719,9 @@ if __name__ == '__main__':
a = randint(1, 1 << BITS)
b = TEST_ROOT_N_PARAMS[index]
index = (index + 1) % len(TEST_ROOT_N_PARAMS)
- elif test_proc == test_shl_digit:
+ elif test_proc == test_shl_leg:
b = randint(0, 10);
- elif test_proc == test_shr_digit:
+ elif test_proc == test_shr_leg:
a = abs(a)
b = randint(0, 10);
elif test_proc == test_shl:
From b7c78da1fbabca7288088e0ce257c06c058bcf73 Mon Sep 17 00:00:00 2001
From: Rehkitzdev
Date: Sat, 11 Dec 2021 18:38:32 +0100
Subject: [PATCH 014/710] Fix storeInt call in webgl glue code
---
vendor/wasm/WebGL/runtime.js | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/vendor/wasm/WebGL/runtime.js b/vendor/wasm/WebGL/runtime.js
index b6994a8ec..18b540b5c 100644
--- a/vendor/wasm/WebGL/runtime.js
+++ b/vendor/wasm/WebGL/runtime.js
@@ -416,7 +416,7 @@ class WebGLInterface {
log = log.substring(0, n);
this.mem.loadBytes(buf_ptr, buf_len).set(new TextEncoder("utf-8").encode(log))
- storeInt(length_ptr, n);
+ this.mem.storeInt(length_ptr, n);
}
},
GetShaderInfoLog: (shader, buf_ptr, buf_len, length_ptr) => {
@@ -429,7 +429,7 @@ class WebGLInterface {
log = log.substring(0, n);
this.mem.loadBytes(buf_ptr, buf_len).set(new TextEncoder("utf-8").encode(log))
- storeInt(length_ptr, n);
+ this.mem.storeInt(length_ptr, n);
}
},
GetShaderiv: (shader, pname, p) => {
@@ -439,11 +439,11 @@ class WebGLInterface {
if (log === null) {
log = "(unknown error)";
}
- storeInt(p, log.length+1);
+ this.mem.storeInt(p, log.length+1);
} else if (pname == 35720) {
let source = this.ctx.getShaderSource(this.shaders[shader]);
let sourceLength = (source === null || source.length == 0) ? 0 : source.length+1;
- storeInt(p, sourceLength);
+ this.mem.storeInt(p, sourceLength);
} else {
let param = this.ctx.getShaderParameter(this.shaders[shader], pname);
this.mem.storeI32(p, param);
@@ -994,7 +994,7 @@ class WebGLInterface {
let n = Math.min(buf_len, name.length);
name = name.substring(0, n);
this.mem.loadBytes(buf_ptr, buf_len).set(new TextEncoder("utf-8").encode(name))
- storeInt(length_ptr, n);
+ this.mem.storeInt(length_ptr, n);
},
UniformBlockBinding: (program, uniformBlockIndex, uniformBlockBinding) => {
this.assertWebGL2();
From 08a081ed45520eac4c4ebd8501fa3ab7970b5c77 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Sat, 11 Dec 2021 17:42:58 +0000
Subject: [PATCH 015/710] Improve debug symbol retention with `-debug -opt:0`
---
src/llvm_backend_opt.cpp | 26 +++++++++++---------------
1 file changed, 11 insertions(+), 15 deletions(-)
diff --git a/src/llvm_backend_opt.cpp b/src/llvm_backend_opt.cpp
index de925655f..5b8468799 100644
--- a/src/llvm_backend_opt.cpp
+++ b/src/llvm_backend_opt.cpp
@@ -48,12 +48,6 @@ LLVMBool lb_must_preserve_predicate_callback(LLVMValueRef value, void *user_data
return LLVMIsAAllocaInst(value) != nullptr;
}
-void lb_add_must_preserve_predicate_pass(lbModule *m, LLVMPassManagerRef fpm, i32 optimization_level) {
- if (false && optimization_level == 0 && m->debug_builder) {
- // LLVMAddInternalizePassWithMustPreservePredicate(fpm, m, lb_must_preserve_predicate_callback);
- }
-}
-
#if LLVM_VERSION_MAJOR < 12
#define LLVM_ADD_CONSTANT_VALUE_PASS(fpm) LLVMAddConstantPropagationPass(fpm)
@@ -61,7 +55,10 @@ void lb_add_must_preserve_predicate_pass(lbModule *m, LLVMPassManagerRef fpm, i3
#define LLVM_ADD_CONSTANT_VALUE_PASS(fpm)
#endif
-void lb_basic_populate_function_pass_manager(LLVMPassManagerRef fpm) {
+void lb_basic_populate_function_pass_manager(LLVMPassManagerRef fpm, i32 optimization_level) {
+ if (optimization_level == 0 && build_context.ODIN_DEBUG) {
+ return;
+ }
LLVMAddPromoteMemoryToRegisterPass(fpm);
LLVMAddMergedLoadStoreMotionPass(fpm);
LLVM_ADD_CONSTANT_VALUE_PASS(fpm);
@@ -78,14 +75,12 @@ void lb_populate_function_pass_manager(lbModule *m, LLVMPassManagerRef fpm, bool
// TODO(bill): Determine which opt definitions should exist in the first place
optimization_level = gb_clamp(optimization_level, 0, 2);
- lb_add_must_preserve_predicate_pass(m, fpm, optimization_level);
-
if (ignore_memcpy_pass) {
- lb_basic_populate_function_pass_manager(fpm);
+ lb_basic_populate_function_pass_manager(fpm, optimization_level);
return;
} else if (optimization_level == 0) {
LLVMAddMemCpyOptPass(fpm);
- lb_basic_populate_function_pass_manager(fpm);
+ lb_basic_populate_function_pass_manager(fpm, optimization_level);
return;
}
@@ -96,7 +91,7 @@ void lb_populate_function_pass_manager(lbModule *m, LLVMPassManagerRef fpm, bool
LLVMPassManagerBuilderPopulateFunctionPassManager(pmb, fpm);
#else
LLVMAddMemCpyOptPass(fpm);
- lb_basic_populate_function_pass_manager(fpm);
+ lb_basic_populate_function_pass_manager(fpm, optimization_level);
LLVMAddSCCPPass(fpm);
@@ -114,11 +109,9 @@ void lb_populate_function_pass_manager_specific(lbModule *m, LLVMPassManagerRef
// TODO(bill): Determine which opt definitions should exist in the first place
optimization_level = gb_clamp(optimization_level, 0, 2);
- lb_add_must_preserve_predicate_pass(m, fpm, optimization_level);
-
if (optimization_level == 0) {
LLVMAddMemCpyOptPass(fpm);
- lb_basic_populate_function_pass_manager(fpm);
+ lb_basic_populate_function_pass_manager(fpm, optimization_level);
return;
}
@@ -191,6 +184,9 @@ void lb_populate_module_pass_manager(LLVMTargetMachineRef target_machine, LLVMPa
// NOTE(bill): Treat -opt:3 as if it was -opt:2
// TODO(bill): Determine which opt definitions should exist in the first place
optimization_level = gb_clamp(optimization_level, 0, 2);
+ if (optimization_level == 0 && build_context.ODIN_DEBUG) {
+ return;
+ }
LLVMAddAlwaysInlinerPass(mpm);
LLVMAddStripDeadPrototypesPass(mpm);
From 4423bc0706a6a1a64cf419720fd65bc723fdf58a Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Sun, 12 Dec 2021 01:10:40 +0000
Subject: [PATCH 016/710] Fix typo
---
core/strings/strings.odin | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/core/strings/strings.odin b/core/strings/strings.odin
index 72f29e5d6..3f703372f 100644
--- a/core/strings/strings.odin
+++ b/core/strings/strings.odin
@@ -504,8 +504,8 @@ index_any :: proc(s, chars: string) -> int {
}
}
- for c in s {
- if i := index_rune(chars, c); i >= 0 {
+ for c, i in s {
+ if index_rune(chars, c) >= 0 {
return i
}
}
From d0240b8981068b3f2bcbecae51d1ca8246e0113a Mon Sep 17 00:00:00 2001
From: ryuukk <44361234+ryuukk@users.noreply.github.com>
Date: Wed, 15 Dec 2021 06:12:26 +0100
Subject: [PATCH 017/710] [WASM] Added missing zoffset parameters to some gl
functions
---
vendor/wasm/WebGL/runtime.js | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/vendor/wasm/WebGL/runtime.js b/vendor/wasm/WebGL/runtime.js
index 18b540b5c..3dc5186ca 100644
--- a/vendor/wasm/WebGL/runtime.js
+++ b/vendor/wasm/WebGL/runtime.js
@@ -672,9 +672,9 @@ class WebGLInterface {
this.ctx.texImage3D(target, level, internalformat, width, height, depth, border, format, type, null);
}
},
- TexSubImage3D: (target, level, xoffset, yoffset, width, height, depth, format, type, size, data) => {
+ TexSubImage3D: (target, level, xoffset, yoffset, zoffset, width, height, depth, format, type, size, data) => {
this.assertWebGL2();
- this.ctx.texSubImage3D(target, level, xoffset, yoffset, width, height, depth, format, type, this.mem.loadBytes(data, size));
+ this.ctx.texSubImage3D(target, level, xoffset, yoffset, zoffset, width, height, depth, format, type, this.mem.loadBytes(data, size));
},
CompressedTexImage3D: (target, level, internalformat, width, height, depth, border, imageSize, data) => {
this.assertWebGL2();
@@ -684,12 +684,12 @@ class WebGLInterface {
this.ctx.compressedTexImage3D(target, level, internalformat, width, height, depth, border, null);
}
},
- CompressedTexSubImage3D: (target, level, xoffset, yoffset, width, height, depth, format, imageSize, data) => {
+ CompressedTexSubImage3D: (target, level, xoffset, yoffset, zoffset, width, height, depth, format, imageSize, data) => {
this.assertWebGL2();
if (data) {
- this.ctx.compressedTexSubImage3D(target, level, xoffset, yoffset, width, height, depth, format, this.mem.loadBytes(data, imageSize));
+ this.ctx.compressedTexSubImage3D(target, level, xoffset, yoffset, zoffset, width, height, depth, format, this.mem.loadBytes(data, imageSize));
} else {
- this.ctx.compressedTexSubImage3D(target, level, xoffset, yoffset, width, height, depth, format, null);
+ this.ctx.compressedTexSubImage3D(target, level, xoffset, yoffset, zoffset, width, height, depth, format, null);
}
},
@@ -1031,4 +1031,4 @@ class WebGLInterface {
};
-export {WebGLInterface};
\ No newline at end of file
+export {WebGLInterface};
From 4ebdb6740ef041f4b600663b3b597e882c3fc42d Mon Sep 17 00:00:00 2001
From: gilles
Date: Thu, 16 Dec 2021 18:20:10 +0100
Subject: [PATCH 018/710] fix math.prod
accumulator was not initialized to one
---
core/math/math.odin | 1 +
1 file changed, 1 insertion(+)
diff --git a/core/math/math.odin b/core/math/math.odin
index caaa6f51b..b81598da9 100644
--- a/core/math/math.odin
+++ b/core/math/math.odin
@@ -1196,6 +1196,7 @@ sum :: proc "contextless" (x: $T/[]$E) -> (res: E)
prod :: proc "contextless" (x: $T/[]$E) -> (res: E)
where intrinsics.type_is_numeric(E) {
+ res = 1
for i in x {
res *= i
}
From 0548db423067bce16d45af651819bf56feb5d411 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Fri, 17 Dec 2021 11:06:17 +0000
Subject: [PATCH 019/710] Disallow `@(static)` and `@(thread_local)` within
`defer` statements
---
src/check_stmt.cpp | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/src/check_stmt.cpp b/src/check_stmt.cpp
index 1a424240c..396388629 100644
--- a/src/check_stmt.cpp
+++ b/src/check_stmt.cpp
@@ -2243,6 +2243,9 @@ void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags) {
error(e->token, "The 'static' attribute is not allowed to be applied to '_'");
} else {
e->flags |= EntityFlag_Static;
+ if (ctx->in_defer) {
+ error(e->token, "'static' variables cannot be declared within a defer statement");
+ }
}
}
if (ac.thread_local_model != "") {
@@ -2251,9 +2254,13 @@ void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags) {
error(e->token, "The 'thread_local' attribute is not allowed to be applied to '_'");
} else {
e->flags |= EntityFlag_Static;
+ if (ctx->in_defer) {
+ error(e->token, "'thread_local' variables cannot be declared within a defer statement");
+ }
}
e->Variable.thread_local_model = ac.thread_local_model;
}
+
if (ac.is_static && ac.thread_local_model != "") {
error(e->token, "The 'static' attribute is not needed if 'thread_local' is applied");
From 29ca6ee420f36381ee0fba6bd409dc51716ab206 Mon Sep 17 00:00:00 2001
From: CiD-
Date: Fri, 17 Dec 2021 10:41:49 -0500
Subject: [PATCH 020/710] add zeroing to new region from realloc
---
core/os/os.odin | 13 +++++++++++--
1 file changed, 11 insertions(+), 2 deletions(-)
diff --git a/core/os/os.odin b/core/os/os.odin
index 83158be80..9230bc22c 100644
--- a/core/os/os.odin
+++ b/core/os/os.odin
@@ -206,11 +206,20 @@ heap_allocator_proc :: proc(allocator_data: rawptr, mode: mem.Allocator_Mode,
}
}
- aligned_resize :: proc(p: rawptr, old_size: int, new_size: int, new_alignment: int) -> ([]byte, mem.Allocator_Error) {
+ aligned_resize :: proc(p: rawptr, old_size: int, new_size: int, new_alignment: int) -> (new_memory: []byte, err: mem.Allocator_Error) {
if p == nil {
return nil, nil
}
- return aligned_alloc(new_size, new_alignment, p)
+
+ new_memory = aligned_alloc(new_size, new_alignment, p) or_return
+ when ODIN_OS != "windows" {
+ // NOTE: realloc does not zero the new memory, so we do it
+ if new_size > old_size {
+ new_region := mem.raw_data(new_memory[old_size:])
+ mem.zero(new_region, new_size - old_size)
+ }
+ }
+ return
}
switch mode {
From ebdb3ab43a8cdc49cb715ecb6f5fd38522912aa5 Mon Sep 17 00:00:00 2001
From: CiD-
Date: Fri, 17 Dec 2021 12:04:05 -0500
Subject: [PATCH 021/710] added notes about _unix_alloc
---
core/os/os_darwin.odin | 2 ++
core/os/os_freebsd.odin | 2 ++
core/os/os_linux.odin | 2 ++
3 files changed, 6 insertions(+)
diff --git a/core/os/os_darwin.odin b/core/os/os_darwin.odin
index d40c80aeb..6fa43bf09 100644
--- a/core/os/os_darwin.odin
+++ b/core/os/os_darwin.odin
@@ -530,6 +530,8 @@ heap_alloc :: proc(size: int) -> rawptr {
return _unix_calloc(1, size)
}
heap_resize :: proc(ptr: rawptr, new_size: int) -> rawptr {
+ // NOTE: _unix_realloc doesn't guarantee new memory will be zeroed on
+ // POSIX platforms. Ensure your caller takes this into account.
return _unix_realloc(ptr, new_size)
}
heap_free :: proc(ptr: rawptr) {
diff --git a/core/os/os_freebsd.odin b/core/os/os_freebsd.odin
index e9314b468..82317532d 100644
--- a/core/os/os_freebsd.odin
+++ b/core/os/os_freebsd.odin
@@ -378,6 +378,8 @@ heap_alloc :: proc(size: int) -> rawptr {
}
heap_resize :: proc(ptr: rawptr, new_size: int) -> rawptr {
+ // NOTE: _unix_realloc doesn't guarantee new memory will be zeroed on
+ // POSIX platforms. Ensure your caller takes this into account.
return _unix_realloc(ptr, c.size_t(new_size));
}
diff --git a/core/os/os_linux.odin b/core/os/os_linux.odin
index 260a051ce..116fbdba5 100644
--- a/core/os/os_linux.odin
+++ b/core/os/os_linux.odin
@@ -543,6 +543,8 @@ heap_alloc :: proc(size: int) -> rawptr {
}
heap_resize :: proc(ptr: rawptr, new_size: int) -> rawptr {
+ // NOTE: _unix_realloc doesn't guarantee new memory will be zeroed on
+ // POSIX platforms. Ensure your caller takes this into account.
return _unix_realloc(ptr, c.size_t(new_size))
}
From a48317deee95b956430ace83f0db3e34bef590dd Mon Sep 17 00:00:00 2001
From: Wes Hardee
Date: Sat, 18 Dec 2021 12:43:24 -0600
Subject: [PATCH 022/710] use '___$startup_runtime' for MacOS
MacOS needs 3 underscores unlike the 2 needed by Linux.
---
src/main.cpp | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/src/main.cpp b/src/main.cpp
index 7b4bc92ee..36b30112f 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -439,13 +439,14 @@ i32 linker_stage(lbGenerator *gen) {
// so use ld instead.
// :UseLDForShared
linker = "ld";
- link_settings = gb_string_appendc(link_settings, "-init '__$startup_runtime' ");
// Shared libraries are .dylib on MacOS and .so on Linux.
#if defined(GB_SYSTEM_OSX)
output_ext = STR_LIT(".dylib");
+ link_settings = gb_string_appendc(link_settings, "-init '___$startup_runtime' ");
link_settings = gb_string_appendc(link_settings, "-dylib -dynamic ");
#else
output_ext = STR_LIT(".so");
+ link_settings = gb_string_appendc(link_settings, "-init '__$startup_runtime' ");
link_settings = gb_string_appendc(link_settings, "-shared ");
#endif
} else {
From 3e465c7e84490ea73f9419286fd53e95ba911c38 Mon Sep 17 00:00:00 2001
From: Platin21
Date: Sun, 19 Dec 2021 21:51:51 +0100
Subject: [PATCH 023/710] Changes to required llvm version 13 as both 12 and 11
don't work correctly on macOS Apple Silicon
---
Makefile | 14 ++++++++++++--
1 file changed, 12 insertions(+), 2 deletions(-)
diff --git a/Makefile b/Makefile
index 23fb7be66..a7aecbb2d 100644
--- a/Makefile
+++ b/Makefile
@@ -8,11 +8,21 @@ CC=clang
OS=$(shell uname)
ifeq ($(OS), Darwin)
+ ARCH=$(shell uname -m)
LLVM_CONFIG=llvm-config
- ifneq ($(shell llvm-config --version | grep '^11\.'),)
+
+ # LLVM Version Setting
+ LLVM_VERSION_PATTERN="^11\."
+ LLVM_VERSION="11"
+ ifeq ($(ARCH), arm64)
+ LLVM_VERSION="13"
+ LLVM_VERSION_PATTERN="^13"
+ endif
+
+ ifneq ($(shell llvm-config --version | grep $(LLVM_VERSION_PATTERN)),)
LLVM_CONFIG=llvm-config
else
- $(error "Requirement: llvm-config must be version 11")
+ $(error "Requirement: llvm-config must be version $(LLVM_VERSION)")
endif
LDFLAGS:=$(LDFLAGS) -liconv
From e2b36c4004130f8566ec63037cfc584b7318c91c Mon Sep 17 00:00:00 2001
From: Tetralux
Date: Tue, 21 Dec 2021 02:11:56 +0000
Subject: [PATCH 024/710] Rename slice.to_dynamic to slice.clone_to_dynamic
---
core/slice/slice.odin | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/core/slice/slice.odin b/core/slice/slice.odin
index 487dd46c2..a82c5fa96 100644
--- a/core/slice/slice.odin
+++ b/core/slice/slice.odin
@@ -185,7 +185,7 @@ concatenate :: proc(a: []$T/[]$E, allocator := context.allocator) -> (res: T) {
return
}
-// copies slice into a new dynamic array
+// copies a slice into a new slice
clone :: proc(a: $T/[]$E, allocator := context.allocator) -> []E {
d := make([]E, len(a), allocator)
copy(d[:], a)
@@ -194,11 +194,12 @@ clone :: proc(a: $T/[]$E, allocator := context.allocator) -> []E {
// copies slice into a new dynamic array
-to_dynamic :: proc(a: $T/[]$E, allocator := context.allocator) -> [dynamic]E {
+clone_to_dynamic :: proc(a: $T/[]$E, allocator := context.allocator) -> [dynamic]E {
d := make([dynamic]E, len(a), allocator)
copy(d[:], a)
return d
}
+to_dynamic :: clone_to_dynamic
// Converts slice into a dynamic array without cloning or allocating memory
into_dynamic :: proc(a: $T/[]$E) -> [dynamic]E {
From 8dbeed8a9faba5b341823ae3a4ea4f7a453f3f87 Mon Sep 17 00:00:00 2001
From: Platin21
Date: Thu, 23 Dec 2021 01:59:31 +0100
Subject: [PATCH 025/710] Removes unneeded lookups / Adds sret to call site
which fixes the mac bug
---
src/llvm_abi.cpp | 12 ++++++------
src/llvm_backend_proc.cpp | 4 ++++
2 files changed, 10 insertions(+), 6 deletions(-)
diff --git a/src/llvm_abi.cpp b/src/llvm_abi.cpp
index e18dc344b..c30f6531a 100644
--- a/src/llvm_abi.cpp
+++ b/src/llvm_abi.cpp
@@ -981,16 +981,16 @@ namespace lbAbiArm64 {
if (size <= 16) {
LLVMTypeRef cast_type = nullptr;
if (size <= 1) {
- cast_type = LLVMIntTypeInContext(c, 8);
+ cast_type = LLVMInt8TypeInContext(c);
} else if (size <= 2) {
- cast_type = LLVMIntTypeInContext(c, 16);
+ cast_type = LLVMInt16TypeInContext(c);
} else if (size <= 4) {
- cast_type = LLVMIntTypeInContext(c, 32);
+ cast_type = LLVMInt32TypeInContext(c);
} else if (size <= 8) {
- cast_type = LLVMIntTypeInContext(c, 64);
+ cast_type = LLVMInt64TypeInContext(c);
} else {
unsigned count = cast(unsigned)((size+7)/8);
- cast_type = LLVMArrayType(LLVMIntTypeInContext(c, 64), count);
+ cast_type = LLVMArrayType(LLVMInt64TypeInContext(c), count);
}
return lb_arg_type_direct(type, cast_type, nullptr, nullptr);
} else {
@@ -999,7 +999,7 @@ namespace lbAbiArm64 {
}
}
}
-
+
Array compute_arg_types(LLVMContextRef c, LLVMTypeRef *arg_types, unsigned arg_count) {
auto args = array_make(heap_allocator(), arg_count);
diff --git a/src/llvm_backend_proc.cpp b/src/llvm_backend_proc.cpp
index 25b27ee47..84fddd9e2 100644
--- a/src/llvm_backend_proc.cpp
+++ b/src/llvm_backend_proc.cpp
@@ -736,6 +736,10 @@ lbValue lb_emit_call_internal(lbProcedure *p, lbValue value, lbValue return_ptr,
LLVMValueRef ret = LLVMBuildCall2(p->builder, fnp, fn, args, arg_count, "");
+ if (return_ptr.value != nullptr) {
+ LLVMAddCallSiteAttribute(ret, 1, lb_create_enum_attribute_with_type(p->module->ctx, "sret", LLVMTypeOf(args[0])));
+ }
+
switch (inlining) {
case ProcInlining_none:
break;
From dce120258fbca70dfaa9a738bc168463df7a3dda Mon Sep 17 00:00:00 2001
From: Yawning Angel
Date: Thu, 23 Dec 2021 02:46:32 +0000
Subject: [PATCH 026/710] src: Add preliminary support for Linux AArch64
Tested via `tests/core`, on a Raspberry Pi 4 running the latest
64-bit Raspberry Pi OS image (LLVM 11).
---
src/build_settings.cpp | 14 ++++++++++++++
src/gb/gb.h | 2 ++
src/threading.cpp | 4 ++++
3 files changed, 20 insertions(+)
diff --git a/src/build_settings.cpp b/src/build_settings.cpp
index 29abd441c..b8d50898d 100644
--- a/src/build_settings.cpp
+++ b/src/build_settings.cpp
@@ -300,6 +300,14 @@ gb_global TargetMetrics target_linux_amd64 = {
str_lit("x86_64-pc-linux-gnu"),
str_lit("e-m:w-i64:64-f80:128-n8:16:32:64-S128"),
};
+gb_global TargetMetrics target_linux_arm64 = {
+ TargetOs_linux,
+ TargetArch_arm64,
+ 8,
+ 16,
+ str_lit("aarch64-linux-elf"),
+ str_lit("e-m:e-i8:8:32-i16:32-i64:64-i128:128-n32:64-S128"),
+};
gb_global TargetMetrics target_darwin_amd64 = {
TargetOs_darwin,
@@ -394,6 +402,7 @@ gb_global NamedTargetMetrics named_targets[] = {
{ str_lit("essence_amd64"), &target_essence_amd64 },
{ str_lit("linux_386"), &target_linux_386 },
{ str_lit("linux_amd64"), &target_linux_amd64 },
+ { str_lit("linux_arm64"), &target_linux_arm64 },
{ str_lit("windows_386"), &target_windows_386 },
{ str_lit("windows_amd64"), &target_windows_amd64 },
{ str_lit("freebsd_386"), &target_freebsd_386 },
@@ -880,6 +889,8 @@ void init_build_context(TargetMetrics *cross_target) {
#endif
#elif defined(GB_SYSTEM_FREEBSD)
metrics = &target_freebsd_amd64;
+ #elif defined(GB_CPU_ARM)
+ metrics = &target_linux_arm64;
#else
metrics = &target_linux_amd64;
#endif
@@ -959,6 +970,9 @@ void init_build_context(TargetMetrics *cross_target) {
case TargetOs_darwin:
bc->link_flags = str_lit("-arch arm64 ");
break;
+ case TargetOs_linux:
+ bc->link_flags = str_lit("-arch aarch64 ");
+ break;
}
} else if (is_arch_wasm()) {
gbString link_flags = gb_string_make(heap_allocator(), " ");
diff --git a/src/gb/gb.h b/src/gb/gb.h
index f716b0840..d9bf09436 100644
--- a/src/gb/gb.h
+++ b/src/gb/gb.h
@@ -3355,6 +3355,8 @@ gb_inline u32 gb_thread_current_id(void) {
__asm__("mov %%gs:0x08,%0" : "=r"(thread_id));
#elif defined(GB_ARCH_64_BIT) && defined(GB_CPU_X86)
__asm__("mov %%fs:0x10,%0" : "=r"(thread_id));
+#elif defined(GB_SYSTEM_LINUX)
+ thread_id = gettid();
#else
#error Unsupported architecture for gb_thread_current_id()
#endif
diff --git a/src/threading.cpp b/src/threading.cpp
index e9412b411..b318e4ff1 100644
--- a/src/threading.cpp
+++ b/src/threading.cpp
@@ -296,6 +296,8 @@ u32 thread_current_id(void) {
__asm__("mov %%gs:0x08,%0" : "=r"(thread_id));
#elif defined(GB_ARCH_64_BIT) && defined(GB_CPU_X86)
__asm__("mov %%fs:0x10,%0" : "=r"(thread_id));
+#elif defined(GB_SYSTEM_LINUX)
+ thread_id = gettid();
#else
#error Unsupported architecture for thread_current_id()
#endif
@@ -315,6 +317,8 @@ gb_inline void yield_thread(void) {
#endif
#elif defined(GB_CPU_X86)
_mm_pause();
+#elif defined(GB_CPU_ARM)
+ __asm__ volatile ("yield" : : : "memory");
#else
#error Unknown architecture
#endif
From 5d80e242244501a5eb256a72ed6ad5ca180bc49d Mon Sep 17 00:00:00 2001
From: Andrea Piseri
Date: Thu, 23 Dec 2021 12:49:40 +0100
Subject: [PATCH 027/710] Add slice/scanner proc
---
core/slice/slice.odin | 21 +++++++++++++++++++++
1 file changed, 21 insertions(+)
diff --git a/core/slice/slice.odin b/core/slice/slice.odin
index a82c5fa96..69aae1e39 100644
--- a/core/slice/slice.odin
+++ b/core/slice/slice.odin
@@ -304,6 +304,27 @@ filter :: proc(s: $S/[]$U, f: proc(U) -> bool, allocator := context.allocator) -
return r[:]
}
+scanner :: proc (s: $S/[]$U, initializer: $V, f: proc(V, U)->V, allocator := context.allocator) -> []V {
+ if len(s) == 0 { return {} }
+ p := as_ptr(s)
+
+ res := make([]V, len(s), allocator)
+
+ q := as_ptr(res)
+ l := len(res)
+
+ r := initializer
+
+ for l > 0 {
+ r = f(r, p^)
+ q^ = r
+ p = intrinsics.ptr_offset(p, 1)
+ q = intrinsics.ptr_offset(q, 1)
+ l -= 1
+ }
+
+ return res
+}
min :: proc(s: $S/[]$T) -> (res: T, ok: bool) where intrinsics.type_is_ordered(T) #optional_ok {
From 9b2fe56d149fa23b03b678de2eb51c4f599d2711 Mon Sep 17 00:00:00 2001
From: Tetralux
Date: Sat, 25 Dec 2021 18:53:20 +0000
Subject: [PATCH 028/710] Parse #no_nil on unions
---
core/odin/ast/ast.odin | 1 +
core/odin/parser/parser.odin | 7 +++++++
2 files changed, 8 insertions(+)
diff --git a/core/odin/ast/ast.odin b/core/odin/ast/ast.odin
index 260979d89..9db57541b 100644
--- a/core/odin/ast/ast.odin
+++ b/core/odin/ast/ast.odin
@@ -711,6 +711,7 @@ Union_Type :: struct {
poly_params: ^Field_List,
align: ^Expr,
is_maybe: bool,
+ is_no_nil: bool,
where_token: tokenizer.Token,
where_clauses: []^Expr,
variants: []^Expr,
diff --git a/core/odin/parser/parser.odin b/core/odin/parser/parser.odin
index 52d4b5e5a..aade2051a 100644
--- a/core/odin/parser/parser.odin
+++ b/core/odin/parser/parser.odin
@@ -2600,6 +2600,7 @@ parse_operand :: proc(p: ^Parser, lhs: bool) -> ^ast.Expr {
poly_params: ^ast.Field_List
align: ^ast.Expr
is_maybe: bool
+ is_no_nil: bool
if allow_token(p, .Open_Paren) {
param_count: int
@@ -2626,6 +2627,11 @@ parse_operand :: proc(p: ^Parser, lhs: bool) -> ^ast.Expr {
error(p, tag.pos, "duplicate union tag '#%s'", tag.text)
}
is_maybe = true
+ case "no_nil":
+ if is_no_nil {
+ error(p, tag.pos, "duplicate union tag '#%s'", tag.text)
+ }
+ is_no_nil = true
case:
error(p, tag.pos, "invalid union tag '#%s", tag.text)
}
@@ -2669,6 +2675,7 @@ parse_operand :: proc(p: ^Parser, lhs: bool) -> ^ast.Expr {
ut.where_token = where_token
ut.where_clauses = where_clauses
ut.is_maybe = is_maybe
+ ut.is_no_nil = is_no_nil
return ut
From a60667e900f3e7f3b6253019961a17b65b76e479 Mon Sep 17 00:00:00 2001
From: Tetralux
Date: Sat, 25 Dec 2021 19:17:34 +0000
Subject: [PATCH 029/710] core:odin/parser: Fix parsing of Allman style braces
in for loops
---
core/odin/parser/parser.odin | 1 +
1 file changed, 1 insertion(+)
diff --git a/core/odin/parser/parser.odin b/core/odin/parser/parser.odin
index 52d4b5e5a..679623108 100644
--- a/core/odin/parser/parser.odin
+++ b/core/odin/parser/parser.odin
@@ -888,6 +888,7 @@ parse_for_stmt :: proc(p: ^Parser) -> ^ast.Stmt {
error(p, body.pos, "the body of a 'do' must be on the same line as the 'for' token")
}
} else {
+ allow_token(p, .Semicolon)
body = parse_body(p)
}
From 86f831ddd19e5f4e21179b32d603a6ae2cc6ce2f Mon Sep 17 00:00:00 2001
From: Platin21
Date: Mon, 27 Dec 2021 22:10:52 +0100
Subject: [PATCH 030/710] This adds code which checks how big the return is and
if it is to big returns the value via sret
---
src/llvm_abi.cpp | 15 ++++++++++++++-
1 file changed, 14 insertions(+), 1 deletion(-)
diff --git a/src/llvm_abi.cpp b/src/llvm_abi.cpp
index c30f6531a..42f05bb27 100644
--- a/src/llvm_abi.cpp
+++ b/src/llvm_abi.cpp
@@ -965,6 +965,10 @@ namespace lbAbiArm64 {
}
return false;
}
+
+ unsigned is_homogenous_aggregate_small_enough(LLVMTypeRef *base_type_, unsigned member_count_) {
+ return (member_count_ <= 4);
+ }
lbArgType compute_return_type(LLVMContextRef c, LLVMTypeRef type, bool return_is_defined) {
LLVMTypeRef homo_base_type = {};
@@ -975,7 +979,16 @@ namespace lbAbiArm64 {
} else if (is_register(type)) {
return non_struct(c, type);
} else if (is_homogenous_aggregate(c, type, &homo_base_type, &homo_member_count)) {
- return lb_arg_type_direct(type, LLVMArrayType(homo_base_type, homo_member_count), nullptr, nullptr);
+ if(is_homogenous_aggregate_small_enough(&homo_base_type, homo_member_count)) {
+ return lb_arg_type_direct(type, LLVMArrayType(homo_base_type, homo_member_count), nullptr, nullptr);
+ } else {
+ //TODO(Platin): do i need to create stuff that can handle the diffrent return type?
+ // else this needs a fix in llvm_backend_proc as we would need to cast it to the correct array type
+
+ //LLVMTypeRef array_type = LLVMArrayType(homo_base_type, homo_member_count);
+ LLVMAttributeRef attr = lb_create_enum_attribute_with_type(c, "sret", type);
+ return lb_arg_type_indirect(type, attr);
+ }
} else {
i64 size = lb_sizeof(type);
if (size <= 16) {
From 7f61a90ea1be96c22cae87fdbfdc08b30f2421d6 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Tue, 28 Dec 2021 14:05:09 +0000
Subject: [PATCH 031/710] Remove `core:container` contents
---
core/container/array.odin | 216 -----------------
core/container/bloom_filter.odin | 80 ------
core/container/map.odin | 377 -----------------------------
core/container/priority_queue.odin | 121 ---------
core/container/queue.odin | 175 -------------
core/container/ring.odin | 74 ------
core/container/set.odin | 240 ------------------
core/container/small_array.odin | 95 --------
8 files changed, 1378 deletions(-)
delete mode 100644 core/container/array.odin
delete mode 100644 core/container/bloom_filter.odin
delete mode 100644 core/container/map.odin
delete mode 100644 core/container/priority_queue.odin
delete mode 100644 core/container/queue.odin
delete mode 100644 core/container/ring.odin
delete mode 100644 core/container/set.odin
delete mode 100644 core/container/small_array.odin
diff --git a/core/container/array.odin b/core/container/array.odin
deleted file mode 100644
index 2d5a64ec3..000000000
--- a/core/container/array.odin
+++ /dev/null
@@ -1,216 +0,0 @@
-package container
-
-import "core:mem"
-import "core:runtime"
-
-Array :: struct($T: typeid) {
- data: ^T,
- len: int,
- cap: int,
- allocator: mem.Allocator,
-}
-
-ARRAY_DEFAULT_CAPACITY :: 16
-
-/*
-array_init :: proc {
- array_init_none,
- array_init_len,
- array_init_len_cap,
-}
-array_init
-array_delete
-array_len
-array_cap
-array_space
-array_slice
-array_get
-array_get_ptr
-array_set
-array_reserve
-array_resize
-array_push = array_append :: proc{
- array_push_back,
- array_push_back_elems,
-}
-array_push_front
-array_pop_back
-array_pop_front
-array_consume
-array_trim
-array_clear
-array_clone
-array_set_capacity
-array_grow
-*/
-
-
-array_init_none :: proc(a: ^$A/Array, allocator := context.allocator) {
- array_init_len_cap(a, 0, ARRAY_DEFAULT_CAPACITY, allocator)
-}
-array_init_len :: proc(a: ^$A/Array, len: int, allocator := context.allocator) {
- array_init_len_cap(a, len, len, allocator)
-}
-array_init_len_cap :: proc(a: ^$A/Array($T), len: int, cap: int, allocator := context.allocator) {
- a.allocator = allocator
- a.data = (^T)(mem.alloc(size_of(T)*cap, align_of(T), a.allocator))
- a.len = len
- a.cap = cap
-}
-
-array_init :: proc{array_init_none, array_init_len, array_init_len_cap}
-
-array_delete :: proc(a: $A/Array) {
- mem.free(a.data, a.allocator)
-}
-
-array_len :: proc(a: $A/Array) -> int {
- return a.len
-}
-
-array_cap :: proc(a: $A/Array) -> int {
- return a.cap
-}
-
-array_space :: proc(a: $A/Array) -> int {
- return a.cap - a.len
-}
-
-array_slice :: proc(a: $A/Array($T)) -> []T {
- s := mem.Raw_Slice{a.data, a.len}
- return transmute([]T)s
-}
-
-array_cap_slice :: proc(a: $A/Array($T)) -> []T {
- s := mem.Raw_Slice{a.data, a.cap}
- return transmute([]T)s
-}
-
-array_get :: proc(a: $A/Array($T), index: int, loc := #caller_location) -> T {
- runtime.bounds_check_error_loc(loc, index, array_len(a))
- return (^T)(uintptr(a.data) + size_of(T)*uintptr(index))^
-}
-array_get_ptr :: proc(a: $A/Array($T), index: int, loc := #caller_location) -> ^T {
- runtime.bounds_check_error_loc(loc, index, array_len(a))
- return (^T)(uintptr(a.data) + size_of(T)*uintptr(index))
-}
-
-array_set :: proc(a: ^$A/Array($T), index: int, item: T, loc := #caller_location) {
- runtime.bounds_check_error_loc(loc, index, array_len(a^))
- (^T)(uintptr(a.data) + size_of(T)*uintptr(index))^ = item
-}
-
-
-array_reserve :: proc(a: ^$A/Array, capacity: int) {
- if capacity > a.len {
- array_set_capacity(a, capacity)
- }
-}
-
-array_resize :: proc(a: ^$A/Array, length: int) {
- if length > a.len {
- array_set_capacity(a, length)
- }
- a.len = length
-}
-
-
-
-array_push_back :: proc(a: ^$A/Array($T), item: T) {
- if array_space(a^) == 0 {
- array_grow(a)
- }
-
- a.len += 1
- array_set(a, a.len-1, item)
-}
-
-array_push_front :: proc(a: ^$A/Array($T), item: T) {
- if array_space(a^) == 0 {
- array_grow(a)
- }
-
- a.len += 1
- data := array_slice(a^)
- copy(data[1:], data[:])
- data[0] = item
-}
-
-array_pop_back :: proc(a: ^$A/Array($T), loc := #caller_location) -> T {
- assert(condition=a.len > 0, loc=loc)
- item := array_get(a^, a.len-1)
- a.len -= 1
- return item
-}
-
-array_pop_front :: proc(a: ^$A/Array($T), loc := #caller_location) -> T {
- assert(condition=a.len > 0, loc=loc)
- item := array_get(a^, 0)
- s := array_slice(a^)
- copy(s[:], s[1:])
- a.len -= 1
- return item
-}
-
-
-array_consume :: proc(a: ^$A/Array($T), count: int, loc := #caller_location) {
- assert(condition=a.len >= count, loc=loc)
- a.len -= count
-}
-
-
-array_trim :: proc(a: ^$A/Array($T)) {
- array_set_capacity(a, a.len)
-}
-
-array_clear :: proc(a: ^$A/Array($T)) {
- array_resize(a, 0)
-}
-
-array_clone :: proc(a: $A/Array($T), allocator := context.allocator) -> A {
- res: A
- array_init(&res, array_len(a), array_len(a), allocator)
- copy(array_slice(res), array_slice(a))
- return res
-}
-
-array_push_back_elems :: proc(a: ^$A/Array($T), items: ..T) {
- if array_space(a^) < len(items) {
- array_grow(a, a.len + len(items))
- }
- offset := a.len
- data := array_cap_slice(a^)
- n := copy(data[a.len:], items)
- a.len += n
-}
-
-array_push :: proc{array_push_back, array_push_back_elems}
-array_append :: proc{array_push_back, array_push_back_elems}
-
-array_set_capacity :: proc(a: ^$A/Array($T), new_capacity: int) {
- if new_capacity == a.cap {
- return
- }
-
- if new_capacity < a.len {
- array_resize(a, new_capacity)
- }
-
- new_data: ^T
- if new_capacity > 0 {
- if a.allocator.procedure == nil {
- a.allocator = context.allocator
- }
- new_data = (^T)(mem.alloc(size_of(T)*new_capacity, align_of(T), a.allocator))
- if new_data != nil {
- mem.copy(new_data, a.data, size_of(T)*a.len)
- }
- }
- mem.free(a.data, a.allocator)
- a.data = new_data
- a.cap = new_capacity
-}
-array_grow :: proc(a: ^$A/Array, min_capacity: int = 0) {
- new_capacity := max(array_len(a^)*2 + 8, min_capacity)
- array_set_capacity(a, new_capacity)
-}
diff --git a/core/container/bloom_filter.odin b/core/container/bloom_filter.odin
deleted file mode 100644
index 8af7aeb85..000000000
--- a/core/container/bloom_filter.odin
+++ /dev/null
@@ -1,80 +0,0 @@
-package container
-
-import "core:mem"
-
-Bloom_Hash_Proc :: #type proc(data: []byte) -> u32
-
-Bloom_Hash :: struct {
- hash_proc: Bloom_Hash_Proc,
- next: ^Bloom_Hash,
-}
-
-Bloom_Filter :: struct {
- allocator: mem.Allocator,
- hash: ^Bloom_Hash,
- bits: []byte,
-}
-
-bloom_filter_init :: proc(b: ^Bloom_Filter, size: int, allocator := context.allocator) {
- b.allocator = allocator
- b.bits = make([]byte, size, allocator)
-}
-
-bloom_filter_destroy :: proc(b: ^Bloom_Filter) {
- context.allocator = b.allocator
- delete(b.bits)
- for b.hash != nil {
- hash := b.hash
- b.hash = b.hash.next
- free(hash)
- }
-}
-
-bloom_filter_add_hash_proc :: proc(b: ^Bloom_Filter, hash_proc: Bloom_Hash_Proc) {
- context.allocator = b.allocator
- h := new(Bloom_Hash)
- h.hash_proc = hash_proc
-
- head := &b.hash
- for head^ != nil {
- head = &(head^.next)
- }
- head^ = h
-}
-
-bloom_filter_add :: proc(b: ^Bloom_Filter, item: []byte) {
- #no_bounds_check for h := b.hash; h != nil; h = h.next {
- hash := h.hash_proc(item)
- hash %= u32(len(b.bits) * 8)
- b.bits[hash >> 3] |= 1 << (hash & 3)
- }
-}
-
-bloom_filter_add_string :: proc(b: ^Bloom_Filter, item: string) {
- bloom_filter_add(b, transmute([]byte)item)
-}
-
-bloom_filter_add_raw :: proc(b: ^Bloom_Filter, data: rawptr, size: int) {
- item := mem.slice_ptr((^byte)(data), size)
- bloom_filter_add(b, item)
-}
-
-bloom_filter_test :: proc(b: ^Bloom_Filter, item: []byte) -> bool {
- #no_bounds_check for h := b.hash; h != nil; h = h.next {
- hash := h.hash_proc(item)
- hash %= u32(len(b.bits) * 8)
- if (b.bits[hash >> 3] & (1 << (hash & 3)) == 0) {
- return false
- }
- }
- return true
-}
-
-bloom_filter_test_string :: proc(b: ^Bloom_Filter, item: string) -> bool {
- return bloom_filter_test(b, transmute([]byte)item)
-}
-
-bloom_filter_test_raw :: proc(b: ^Bloom_Filter, data: rawptr, size: int) -> bool {
- item := mem.slice_ptr((^byte)(data), size)
- return bloom_filter_test(b, item)
-}
diff --git a/core/container/map.odin b/core/container/map.odin
deleted file mode 100644
index 54cbc22fc..000000000
--- a/core/container/map.odin
+++ /dev/null
@@ -1,377 +0,0 @@
-package container
-
-import "core:intrinsics"
-_ :: intrinsics
-
-
-Map :: struct($Key, $Value: typeid) where intrinsics.type_is_valid_map_key(Key) {
- hash: Array(int),
- entries: Array(Map_Entry(Key, Value)),
-}
-
-Map_Entry :: struct($Key, $Value: typeid) where intrinsics.type_is_valid_map_key(Key) {
- hash: uintptr,
- next: int,
- key: Key,
- value: Value,
-}
-
-
-/*
-map_init :: proc{
- map_init_none,
- map_init_cap,
-}
-map_delete
-
-map_has
-map_get
-map_get_default
-map_get_ptr
-map_set
-map_remove
-map_reserve
-map_clear
-
-// Multi Map
-
-multi_map_find_first
-multi_map_find_next
-multi_map_count
-multi_map_get :: proc{
- multi_map_get_array,
- multi_map_get_slice,
-};
-multi_map_get_as_slice
-multi_map_insert
-multi_map_remove
-multi_map_remove_all
-
-*/
-
-map_init :: proc{map_init_none, map_init_cap}
-
-map_init_none :: proc(m: ^$M/Map($Key, $Value), allocator := context.allocator) {
- m.hash.allocator = allocator
- m.entries.allocator = allocator
-}
-
-map_init_cap :: proc(m: ^$M/Map($Key, $Value), cap: int, allocator := context.allocator) {
- m.hash.allocator = allocator
- m.entries.allocator = allocator
- map_reserve(m, cap)
-}
-
-map_delete :: proc(m: $M/Map($Key, $Value)) {
- array_delete(m.hash)
- array_delete(m.entries)
-}
-
-
-map_has :: proc(m: $M/Map($Key, $Value), key: Key) -> bool {
- return _map_find_or_fail(m, key) >= 0
-}
-
-map_get :: proc(m: $M/Map($Key, $Value), key: Key) -> (res: Value, ok: bool) #optional_ok {
- i := _map_find_or_fail(m, key)
- if i < 0 {
- return {}, false
- }
- return array_get(m.entries, i).value, true
-}
-
-map_get_default :: proc(m: $M/Map($Key, $Value), key: Key, default: Value) -> (res: Value, ok: bool) #optional_ok {
- i := _map_find_or_fail(m, key)
- if i < 0 {
- return default, false
- }
- return array_get(m.entries, i).value, true
-}
-
-map_get_ptr :: proc(m: $M/Map($Key, $Value), key: Key) -> ^Value {
- i := _map_find_or_fail(m, key)
- if i < 0 {
- return nil
- }
- return array_get_ptr(m.entries, i).value
-}
-
-map_set :: proc(m: ^$M/Map($Key, $Value), key: Key, value: Value) {
- if array_len(m.hash) == 0 {
- _map_grow(m)
- }
-
- i := _map_find_or_make(m, key)
- array_get_ptr(m.entries, i).value = value
- if _map_full(m^) {
- _map_grow(m)
- }
-}
-
-map_remove :: proc(m: ^$M/Map($Key, $Value), key: Key) {
- fr := _map_find_key(m^, key)
- if fr.entry_index >= 0 {
- _map_erase(m, fr)
- }
-}
-
-
-map_reserve :: proc(m: ^$M/Map($Key, $Value), new_size: int) {
- nm: M
- map_init(&nm, m.hash.allocator)
- array_resize(&nm.hash, new_size)
- array_reserve(&nm.entries, array_len(m.entries))
-
- for i in 0.. ^Map_Entry(Key, Value) {
- i := _map_find_or_fail(m, key)
- if i < 0 {
- return nil
- }
- return array_get_ptr(m.entries, i)
-}
-
-multi_map_find_next :: proc(m: $M/Map($Key, $Value), e: ^Map_Entry(Key, Value)) -> ^Map_Entry(Key, Value) {
- i := e.next
- for i >= 0 {
- it := array_get_ptr(m.entries, i)
- if it.hash == e.hash && it.key == e.key {
- return it
- }
- i = it.next
- }
- return nil
-}
-
-multi_map_count :: proc(m: $M/Map($Key, $Value), key: Key) -> int {
- n := 0
- e := multi_map_find_first(m, key)
- for e != nil {
- n += 1
- e = multi_map_find_next(m, e)
- }
- return n
-}
-
-multi_map_get :: proc{multi_map_get_array, multi_map_get_slice}
-
-multi_map_get_array :: proc(m: $M/Map($Key, $Value), key: Key, items: ^Array(Value)) {
- if items == nil {
- return
- }
- e := multi_map_find_first(m, key)
- for e != nil {
- array_append(items, e.value)
- e = multi_map_find_next(m, e)
- }
-}
-
-multi_map_get_slice :: proc(m: $M/Map($Key, $Value), key: Key, items: []Value) {
- e := multi_map_find_first(m, key)
- i := 0
- for e != nil && i < len(items) {
- items[i] = e.value
- i += 1
- e = multi_map_find_next(m, e)
- }
-}
-
-multi_map_get_as_slice :: proc(m: $M/Map($Key, $Value), key: Key) -> []Value {
- items: Array(Value)
- array_init(&items, 0)
-
- e := multi_map_find_first(m, key)
- for e != nil {
- array_append(&items, e.value)
- e = multi_map_find_next(m, e)
- }
-
- return array_slice(items)
-}
-
-
-multi_map_insert :: proc(m: ^$M/Map($Key, $Value), key: Key, value: Value) {
- if array_len(m.hash) == 0 {
- _map_grow(m)
- }
-
- i := _map_make(m, key)
- array_get_ptr(m.entries, i).value = value
- if _map_full(m^) {
- _map_grow(m)
- }
-}
-
-multi_map_remove :: proc(m: ^$M/Map($Key, $Value), e: ^Map_Entry(Key, Value)) {
- fr := _map_find_entry(m, e)
- if fr.entry_index >= 0 {
- _map_erase(m, fr)
- }
-}
-
-multi_map_remove_all :: proc(m: ^$M/Map($Key, $Value), key: Key) {
- for map_exist(m^, key) {
- map_remove(m, key)
- }
-}
-
-
-/// Internal
-
-
-Map_Find_Result :: struct {
- hash_index: int,
- entry_prev: int,
- entry_index: int,
-}
-
-_map_add_entry :: proc(m: ^$M/Map($Key, $Value), key: Key) -> int where intrinsics.type_is_valid_map_key(Key) {
- hasher := intrinsics.type_hasher_proc(Key)
-
- e: Map_Entry(Key, Value)
- e.key = key
- e.hash = hasher(&e.key, 0)
- e.next = -1
- idx := array_len(m.entries)
- array_push(&m.entries, e)
- return idx
-}
-
-_map_erase :: proc(m: ^$M/Map, fr: Map_Find_Result) {
- if fr.entry_prev < 0 {
- array_set(&m.hash, fr.hash_index, array_get(m.entries, fr.entry_index).next)
- } else {
- array_get_ptr(m.entries, fr.entry_prev).next = array_get(m.entries, fr.entry_index).next
- }
-
- if fr.entry_index == array_len(m.entries)-1 {
- array_pop_back(&m.entries)
- return
- }
-
- array_set(&m.entries, fr.entry_index, array_get(m.entries, array_len(m.entries)-1))
- last := _map_find_key(m^, array_get(m.entries, fr.entry_index).key)
-
- if last.entry_prev < 0 {
- array_get_ptr(m.entries, last.entry_prev).next = fr.entry_index
- } else {
- array_set(&m.hash, last.hash_index, fr.entry_index)
- }
-}
-
-
-_map_find_key :: proc(m: $M/Map($Key, $Value), key: Key) -> Map_Find_Result where intrinsics.type_is_valid_map_key(Key) {
- fr: Map_Find_Result
- fr.hash_index = -1
- fr.entry_prev = -1
- fr.entry_index = -1
-
- if array_len(m.hash) == 0 {
- return fr
- }
-
- hasher := intrinsics.type_hasher_proc(Key)
-
- key := key
- hash := hasher(&key, 0)
-
- fr.hash_index = int(hash % uintptr(array_len(m.hash)))
- fr.entry_index = array_get(m.hash, fr.hash_index)
- for fr.entry_index >= 0 {
- it := array_get_ptr(m.entries, fr.entry_index)
- if it.hash == hash && it.key == key {
- return fr
- }
- fr.entry_prev = fr.entry_index
- fr.entry_index = it.next
- }
- return fr
-}
-
-_map_find_entry :: proc(m: ^$M/Map($Key, $Value), e: ^Map_Entry(Key, Value)) -> Map_Find_Result {
- fr: Map_Find_Result
- fr.hash_index = -1
- fr.entry_prev = -1
- fr.entry_index = -1
-
- if array_len(m.hash) == 0 {
- return fr
- }
-
- fr.hash_index = int(e.hash % uintptr(array_len(m.hash)))
- fr.entry_index = array_get(m.hash, fr.hash_index)
- for fr.entry_index >= 0 {
- it := array_get_ptr(m.entries, fr.entry_index)
- if it == e {
- return fr
- }
- fr.entry_prev = fr.entry_index
- fr.entry_index = it.next
- }
- return fr
-}
-
-_map_find_or_fail :: proc(m: $M/Map($Key, $Value), key: Key) -> int {
- return _map_find_key(m, key).entry_index
-}
-_map_find_or_make :: proc(m: ^$M/Map($Key, $Value), key: Key) -> int {
- fr := _map_find_key(m^, key)
- if fr.entry_index >= 0 {
- return fr.entry_index
- }
-
- i := _map_add_entry(m, key)
- if fr.entry_prev < 0 {
- array_set(&m.hash, fr.hash_index, i)
- } else {
- array_get_ptr(m.entries, fr.entry_prev).next = i
- }
- return i
-}
-
-
-_map_make :: proc(m: ^$M/Map($Key, $Value), key: Key) -> int {
- fr := _map_find_key(m^, key)
- i := _map_add_entry(m, key)
-
- if fr.entry_prev < 0 {
- array_set(&m.hash, fr.hash_index, i)
- } else {
- array_get_ptr(m.entries, fr.entry_prev).next = i
- }
-
- array_get_ptr(m.entries, i).next = fr.entry_index
-
- return i
-}
-
-
-_map_full :: proc(m: $M/Map($Key, $Value)) -> bool {
- // TODO(bill): Determine good max load factor
- return array_len(m.entries) >= (array_len(m.hash) / 4)*3
-}
-
-_map_grow :: proc(m: ^$M/Map($Key, $Value)) {
- new_size := array_len(m.entries) * 4 + 7 // TODO(bill): Determine good grow rate
- map_reserve(m, new_size)
-}
-
-
diff --git a/core/container/priority_queue.odin b/core/container/priority_queue.odin
deleted file mode 100644
index c54e964a6..000000000
--- a/core/container/priority_queue.odin
+++ /dev/null
@@ -1,121 +0,0 @@
-package container
-
-Priority_Queue :: struct($T: typeid) {
- data: Array(T),
- len: int,
- priority: proc(item: T) -> int,
-}
-
-priority_queue_init_none :: proc(q: ^$Q/Priority_Queue($T), f: proc(item: T) -> int, allocator := context.allocator) {
- queue_init_len(q, f, 0, allocator)
-}
-priority_queue_init_len :: proc(q: ^$Q/Priority_Queue($T), f: proc(item: T) -> int, len: int, allocator := context.allocator) {
- queue_init_len_cap(q, f, 0, 16, allocator)
-}
-priority_queue_init_len_cap :: proc(q: ^$Q/Priority_Queue($T), f: proc(item: T) -> int, len: int, cap: int, allocator := context.allocator) {
- array_init(&q.data, len, cap, allocator)
- q.len = len
- q.priority = f
-}
-
-priority_queue_init :: proc{priority_queue_init_none, priority_queue_init_len, priority_queue_init_len_cap}
-
-
-priority_queue_delete :: proc(q: $Q/Priority_Queue($T)) {
- array_delete(q.data)
-}
-
-priority_queue_clear :: proc(q: ^$Q/Priority_Queue($T)) {
- q.len = 0
-}
-
-priority_queue_len :: proc(q: $Q/Priority_Queue($T)) -> int {
- return q.len
-}
-
-priority_queue_cap :: proc(q: $Q/Priority_Queue($T)) -> int {
- return array_cap(q.data)
-}
-
-priority_queue_space :: proc(q: $Q/Priority_Queue($T)) -> int {
- return array_len(q.data) - q.len
-}
-
-priority_queue_reserve :: proc(q: ^$Q/Priority_Queue($T), capacity: int) {
- if capacity > q.len {
- array_resize(&q.data, new_capacity)
- }
-}
-
-priority_queue_resize :: proc(q: ^$Q/Priority_Queue($T), length: int) {
- if length > q.len {
- array_resize(&q.data, new_capacity)
- }
- q.len = length
-}
-
-_priority_queue_grow :: proc(q: ^$Q/Priority_Queue($T), min_capacity: int = 0) {
- new_capacity := max(array_len(q.data)*2 + 8, min_capacity)
- array_resize(&q.data, new_capacity)
-}
-
-
-priority_queue_push :: proc(q: ^$Q/Priority_Queue($T), item: T) {
- if array_len(q.data) - q.len == 0 {
- _priority_queue_grow(q)
- }
-
- s := array_slice(q.data)
- s[q.len] = item
-
- i := q.len
- for i > 0 {
- p := (i - 1) / 2
- if q.priority(s[p]) <= q.priority(item) {
- break
- }
- s[i] = s[p]
- i = p
- }
-
- q.len += 1
- if q.len > 0 {
- s[i] = item
- }
-}
-
-
-
-priority_queue_pop :: proc(q: ^$Q/Priority_Queue($T)) -> T {
- assert(q.len > 0)
-
- s := array_slice(q.data)
- min := s[0]
- root := s[q.len-1]
- q.len -= 1
-
- i := 0
- for i * 2 + 1 < q.len {
- a := i * 2 + 1
- b := i * 2 + 2
- c := b < q.len && q.priority(s[b]) < q.priority(s[a]) ? b : a
-
- if q.priority(s[c]) >= q.priority(root) {
- break
- }
- s[i] = s[c]
- i = c
- }
-
- if q.len > 0 {
- s[i] = root
- }
- return min
-}
-
-priority_queue_peek :: proc(q: ^$Q/Priority_Queue($T)) -> T {
- assert(q.len > 0)
-
- s := array_slice(q.data)
- return s[0]
-}
diff --git a/core/container/queue.odin b/core/container/queue.odin
deleted file mode 100644
index bab4a18e6..000000000
--- a/core/container/queue.odin
+++ /dev/null
@@ -1,175 +0,0 @@
-package container
-
-Queue :: struct($T: typeid) {
- data: Array(T),
- len: int,
- offset: int,
-}
-
-/*
-queue_init :: proc{
- queue_init_none,
- queue_init_len,
- queue_init_len_cap,
-}
-queue_delete
-queue_clear
-queue_len
-queue_cap
-queue_space
-queue_get
-queue_set
-queue_reserve
-queue_resize
-queue_push :: proc{
- queue_push_back,
- queue_push_elems,
-};
-queue_push_front
-queue_pop_front
-queue_pop_back
-queue_consume
-*/
-
-queue_init_none :: proc(q: ^$Q/Queue($T), allocator := context.allocator) {
- queue_init_len(q, 0, allocator)
-}
-queue_init_len :: proc(q: ^$Q/Queue($T), len: int, allocator := context.allocator) {
- queue_init_len_cap(q, 0, 16, allocator)
-}
-queue_init_len_cap :: proc(q: ^$Q/Queue($T), len: int, cap: int, allocator := context.allocator) {
- array_init(&q.data, len, cap, allocator)
- q.len = len
- q.offset = 0
-}
-
-queue_init :: proc{queue_init_none, queue_init_len, queue_init_len_cap}
-
-queue_delete :: proc(q: $Q/Queue($T)) {
- array_delete(q.data)
-}
-
-queue_clear :: proc(q: ^$Q/Queue($T)) {
- q.len = 0
-}
-
-queue_len :: proc(q: $Q/Queue($T)) -> int {
- return q.len
-}
-
-queue_cap :: proc(q: $Q/Queue($T)) -> int {
- return array_cap(q.data)
-}
-
-queue_space :: proc(q: $Q/Queue($T)) -> int {
- return array_len(q.data) - q.len
-}
-
-queue_get :: proc(q: $Q/Queue($T), index: int) -> T {
- i := (index + q.offset) % array_len(q.data)
- data := array_slice(q.data)
- return data[i]
-}
-
-queue_set :: proc(q: ^$Q/Queue($T), index: int, item: T) {
- i := (index + q.offset) % array_len(q.data)
- data := array_slice(q.data)
- data[i] = item
-}
-
-
-queue_reserve :: proc(q: ^$Q/Queue($T), capacity: int) {
- if capacity > q.len {
- _queue_increase_capacity(q, capacity)
- }
-}
-
-queue_resize :: proc(q: ^$Q/Queue($T), length: int) {
- if length > q.len {
- _queue_increase_capacity(q, length)
- }
- q.len = length
-}
-
-queue_push_back :: proc(q: ^$Q/Queue($T), item: T) {
- if queue_space(q^) == 0 {
- _queue_grow(q)
- }
-
- queue_set(q, q.len, item)
- q.len += 1
-}
-
-queue_push_front :: proc(q: ^$Q/Queue($T), item: T) {
- if queue_space(q^) == 0 {
- _queue_grow(q)
- }
-
- q.offset = (q.offset - 1 + array_len(q.data)) % array_len(q.data)
- q.len += 1
- queue_set(q, 0, item)
-}
-
-queue_pop_front :: proc(q: ^$Q/Queue($T)) -> T {
- assert(q.len > 0)
- item := queue_get(q^, 0)
- q.offset = (q.offset + 1) % array_len(q.data)
- q.len -= 1
- if q.len == 0 {
- q.offset = 0
- }
- return item
-}
-
-queue_pop_back :: proc(q: ^$Q/Queue($T)) -> T {
- assert(q.len > 0)
- item := queue_get(q^, q.len-1)
- q.len -= 1
- return item
-}
-
-queue_consume :: proc(q: ^$Q/Queue($T), count: int) {
- q.offset = (q.offset + count) & array_len(q.data)
- q.len -= count
-}
-
-
-queue_push_elems :: proc(q: ^$Q/Queue($T), items: ..T) {
- if queue_space(q^) < len(items) {
- _queue_grow(q, q.len + len(items))
- }
- size := array_len(q.data)
- insert := (q.offset + q.len) % size
-
- to_insert := len(items)
- if insert + to_insert > size {
- to_insert = size - insert
- }
-
- the_items := items[:]
-
- data := array_slice(q.data)
-
- q.len += copy(data[insert:][:to_insert], the_items)
- the_items = the_items[to_insert:]
- q.len += copy(data[:], the_items)
-}
-
-queue_push :: proc{queue_push_back, queue_push_elems}
-
-
-
-_queue_increase_capacity :: proc(q: ^$Q/Queue($T), new_capacity: int) {
- end := array_len(q.data)
- array_resize(&q.data, new_capacity)
- if q.offset + q.len > end {
- end_items := q.len + end
- data := array_slice(q.data)
- copy(data[new_capacity-end_items:][:end_items], data[q.offset:][:end_items])
- q.offset += new_capacity - end
- }
-}
-_queue_grow :: proc(q: ^$Q/Queue($T), min_capacity: int = 0) {
- new_capacity := max(array_len(q.data)*2 + 8, min_capacity)
- _queue_increase_capacity(q, new_capacity)
-}
diff --git a/core/container/ring.odin b/core/container/ring.odin
deleted file mode 100644
index 61492ec84..000000000
--- a/core/container/ring.odin
+++ /dev/null
@@ -1,74 +0,0 @@
-package container
-
-
-Ring :: struct($T: typeid) {
- next, prev: ^Ring(T),
- value: T,
-}
-
-ring_init :: proc(r: ^$R/Ring) -> ^R {
- r.prev, r.next = r, r
- return r
-}
-
-ring_next :: proc(r: ^$R/Ring) -> ^R {
- if r.next == nil {
- return ring_init(r)
- }
- return r.next
-}
-ring_prev :: proc(r: ^$R/Ring) -> ^R {
- if r.prev == nil {
- return ring_init(r)
- }
- return r.prev
-}
-
-
-ring_move :: proc(r: ^$R/Ring, n: int) -> ^R {
- r := r
- if r.next == nil {
- return ring_init(r)
- }
-
- switch {
- case n < 0:
- for _ in n..<0 {
- r = r.prev
- }
- case n > 0:
- for _ in 0.. ^R {
- n := ring_next(r)
- if s != nil {
- p := ring_prev(s)
- r.next = s
- s.prev = r
- n.prev = p
- p.next = n
- }
- return n
-}
-ring_unlink :: proc(r: ^$R/Ring, n: int) -> ^R {
- if n <= 0 {
- return nil
- }
- return ring_link(r, ring_move(r, n+1))
-}
-ring_len :: proc(r: ^$R/Ring) -> int {
- n := 0
- if r != nil {
- n = 1
- for p := ring_next(r); p != r; p = p.next {
- n += 1
- }
- }
- return n
-}
-
diff --git a/core/container/set.odin b/core/container/set.odin
deleted file mode 100644
index 562ac5409..000000000
--- a/core/container/set.odin
+++ /dev/null
@@ -1,240 +0,0 @@
-package container
-
-Set :: struct {
- hash: Array(int),
- entries: Array(Set_Entry),
-}
-
-Set_Entry :: struct {
- key: u64,
- next: int,
-}
-
-
-/*
-set_init :: proc{
- set_init_none,
- set_init_cap,
-}
-set_delete
-
-set_in
-set_not_in
-set_add
-set_remove
-set_reserve
-set_clear
-*/
-
-set_init :: proc{set_init_none, set_init_cap}
-
-set_init_none :: proc(m: ^Set, allocator := context.allocator) {
- m.hash.allocator = allocator
- m.entries.allocator = allocator
-}
-
-set_init_cap :: proc(m: ^Set, cap: int, allocator := context.allocator) {
- m.hash.allocator = allocator
- m.entries.allocator = allocator
- set_reserve(m, cap)
-}
-
-set_delete :: proc(m: Set) {
- array_delete(m.hash)
- array_delete(m.entries)
-}
-
-
-set_in :: proc(m: Set, key: u64) -> bool {
- return _set_find_or_fail(m, key) >= 0
-}
-set_not_in :: proc(m: Set, key: u64) -> bool {
- return _set_find_or_fail(m, key) < 0
-}
-
-set_add :: proc(m: ^Set, key: u64) {
- if array_len(m.hash) == 0 {
- _set_grow(m)
- }
-
- _ = _set_find_or_make(m, key)
- if _set_full(m^) {
- _set_grow(m)
- }
-}
-
-set_remove :: proc(m: ^Set, key: u64) {
- fr := _set_find_key(m^, key)
- if fr.entry_index >= 0 {
- _set_erase(m, fr)
- }
-}
-
-
-set_reserve :: proc(m: ^Set, new_size: int) {
- nm: Set
- set_init(&nm, m.hash.allocator)
- array_resize(&nm.hash, new_size)
- array_reserve(&nm.entries, array_len(m.entries))
-
- for i in 0.. bool {
- a_entries := array_slice(a.entries)
- b_entries := array_slice(b.entries)
- if len(a_entries) != len(b_entries) {
- return false
- }
- for e in a_entries {
- if set_not_in(b, e.key) {
- return false
- }
- }
-
- return true
-}
-
-
-
-/// Internal
-
-_set_add_entry :: proc(m: ^Set, key: u64) -> int {
- e: Set_Entry
- e.key = key
- e.next = -1
- idx := array_len(m.entries)
- array_push(&m.entries, e)
- return idx
-}
-
-_set_erase :: proc(m: ^Set, fr: Map_Find_Result) {
- if fr.entry_prev < 0 {
- array_set(&m.hash, fr.hash_index, array_get(m.entries, fr.entry_index).next)
- } else {
- array_get_ptr(m.entries, fr.entry_prev).next = array_get(m.entries, fr.entry_index).next
- }
-
- if fr.entry_index == array_len(m.entries)-1 {
- array_pop_back(&m.entries)
- return
- }
-
- array_set(&m.entries, fr.entry_index, array_get(m.entries, array_len(m.entries)-1))
- last := _set_find_key(m^, array_get(m.entries, fr.entry_index).key)
-
- if last.entry_prev < 0 {
- array_get_ptr(m.entries, last.entry_prev).next = fr.entry_index
- } else {
- array_set(&m.hash, last.hash_index, fr.entry_index)
- }
-}
-
-
-_set_find_key :: proc(m: Set, key: u64) -> Map_Find_Result {
- fr: Map_Find_Result
- fr.hash_index = -1
- fr.entry_prev = -1
- fr.entry_index = -1
-
- if array_len(m.hash) == 0 {
- return fr
- }
-
- fr.hash_index = int(key % u64(array_len(m.hash)))
- fr.entry_index = array_get(m.hash, fr.hash_index)
- for fr.entry_index >= 0 {
- it := array_get_ptr(m.entries, fr.entry_index)
- if it.key == key {
- return fr
- }
- fr.entry_prev = fr.entry_index
- fr.entry_index = it.next
- }
- return fr
-}
-
-_set_find_entry :: proc(m: ^Set, e: ^Set_Entry) -> Map_Find_Result {
- fr: Map_Find_Result
- fr.hash_index = -1
- fr.entry_prev = -1
- fr.entry_index = -1
-
- if array_len(m.hash) == 0 {
- return fr
- }
-
- fr.hash_index = int(e.key % u64(array_len(m.hash)))
- fr.entry_index = array_get(m.hash, fr.hash_index)
- for fr.entry_index >= 0 {
- it := array_get_ptr(m.entries, fr.entry_index)
- if it == e {
- return fr
- }
- fr.entry_prev = fr.entry_index
- fr.entry_index = it.next
- }
- return fr
-}
-
-_set_find_or_fail :: proc(m: Set, key: u64) -> int {
- return _set_find_key(m, key).entry_index
-}
-_set_find_or_make :: proc(m: ^Set, key: u64) -> int {
- fr := _set_find_key(m^, key)
- if fr.entry_index >= 0 {
- return fr.entry_index
- }
-
- i := _set_add_entry(m, key)
- if fr.entry_prev < 0 {
- array_set(&m.hash, fr.hash_index, i)
- } else {
- array_get_ptr(m.entries, fr.entry_prev).next = i
- }
- return i
-}
-
-
-_set_make :: proc(m: ^Set, key: u64) -> int {
- fr := _set_find_key(m^, key)
- i := _set_add_entry(m, key)
-
- if fr.entry_prev < 0 {
- array_set(&m.hash, fr.hash_index, i)
- } else {
- array_get_ptr(m.entries, fr.entry_prev).next = i
- }
-
- array_get_ptr(m.entries, i).next = fr.entry_index
-
- return i
-}
-
-
-_set_full :: proc(m: Set) -> bool {
- // TODO(bill): Determine good max load factor
- return array_len(m.entries) >= (array_len(m.hash) / 4)*3
-}
-
-_set_grow :: proc(m: ^Set) {
- new_size := array_len(m.entries) * 4 + 7 // TODO(bill): Determine good grow rate
- set_reserve(m, new_size)
-}
-
-
diff --git a/core/container/small_array.odin b/core/container/small_array.odin
deleted file mode 100644
index 43b879d2d..000000000
--- a/core/container/small_array.odin
+++ /dev/null
@@ -1,95 +0,0 @@
-package container
-
-Small_Array :: struct($N: int, $T: typeid) where N >= 0 {
- data: [N]T,
- len: int,
-}
-
-
-small_array_len :: proc(a: $A/Small_Array) -> int {
- return a.len
-}
-
-small_array_cap :: proc(a: $A/Small_Array) -> int {
- return len(a.data)
-}
-
-small_array_space :: proc(a: $A/Small_Array) -> int {
- return len(a.data) - a.len
-}
-
-small_array_slice :: proc(a: ^$A/Small_Array($N, $T)) -> []T {
- return a.data[:a.len]
-}
-
-
-small_array_get :: proc(a: $A/Small_Array($N, $T), index: int, loc := #caller_location) -> T {
- return a.data[index]
-}
-small_array_get_ptr :: proc(a: $A/Small_Array($N, $T), index: int, loc := #caller_location) -> ^T {
- return &a.data[index]
-}
-
-small_array_set :: proc(a: ^$A/Small_Array($N, $T), index: int, item: T, loc := #caller_location) {
- a.data[index] = item
-}
-
-small_array_resize :: proc(a: ^$A/Small_Array, length: int) {
- a.len = min(length, len(a.data))
-}
-
-
-small_array_push_back :: proc(a: ^$A/Small_Array($N, $T), item: T) -> bool {
- if a.len < len(a.data) {
- a.len += 1
- a.data[a.len-1] = item
- return true
- }
- return false
-}
-
-small_array_push_front :: proc(a: ^$A/Small_Array($N, $T), item: T) -> bool {
- if a.len < len(a.data) {
- a.len += 1
- data := small_array_slice(a)
- copy(data[1:], data[:])
- data[0] = item
- return true
- }
- return false
-}
-
-small_array_pop_back :: proc(a: ^$A/Small_Array($N, $T), loc := #caller_location) -> T {
- assert(condition=a.len > 0, loc=loc)
- item := a.data[a.len-1]
- a.len -= 1
- return item
-}
-
-small_array_pop_front :: proc(a: ^$A/Small_Array($N, $T), loc := #caller_location) -> T {
- assert(condition=a.len > 0, loc=loc)
- item := a.data[0]
- s := small_array_slice(a)
- copy(s[:], s[1:])
- a.len -= 1
- return item
-}
-
-
-small_array_consume :: proc(a: ^$A/Small_Array($N, $T), count: int, loc := #caller_location) {
- assert(condition=a.len >= count, loc=loc)
- a.len -= count
-}
-
-small_array_clear :: proc(a: ^$A/Small_Array($N, $T)) {
- small_array_resize(a, 0)
-}
-
-small_array_push_back_elems :: proc(a: ^$A/Small_Array($N, $T), items: ..T) {
- n := copy(a.data[a.len:], items[:])
- a.len += n
-}
-
-small_array_push :: proc{small_array_push_back, small_array_push_back_elems}
-small_array_append :: proc{small_array_push_back, small_array_push_back_elems}
-
From dbf42d2469eec3980c6525c784aa6e594f49565e Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Tue, 28 Dec 2021 14:16:27 +0000
Subject: [PATCH 032/710] make `slice.as_ptr` return `[^]E`
---
core/slice/slice.odin | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/core/slice/slice.odin b/core/slice/slice.odin
index a82c5fa96..c06e796ce 100644
--- a/core/slice/slice.odin
+++ b/core/slice/slice.odin
@@ -273,7 +273,7 @@ get_ptr :: proc(array: $T/[]$E, index: int) -> (value: ^E, ok: bool) {
return
}
-as_ptr :: proc(array: $T/[]$E) -> ^E {
+as_ptr :: proc(array: $T/[]$E) -> [^]E {
return raw_data(array)
}
From 53e30e4621558f5d003eacc7b4a7f209723670c4 Mon Sep 17 00:00:00 2001
From: Jeroen van Rijn
Date: Tue, 7 Dec 2021 18:45:46 +0100
Subject: [PATCH 033/710] [core:container/bit_vector] Create new package.
A dynamic bit array, optionally allowing negative indices.
---
core/container/bit_array/bit_array.odin | 124 ++++++++++++++++++++++++
core/container/bit_array/doc.odin | 52 ++++++++++
2 files changed, 176 insertions(+)
create mode 100644 core/container/bit_array/bit_array.odin
create mode 100644 core/container/bit_array/doc.odin
diff --git a/core/container/bit_array/bit_array.odin b/core/container/bit_array/bit_array.odin
new file mode 100644
index 000000000..61f6f86e8
--- /dev/null
+++ b/core/container/bit_array/bit_array.odin
@@ -0,0 +1,124 @@
+package dynamic_bit_array
+
+import "core:intrinsics"
+
+/*
+ Note that these constants are dependent on the backing being a u64.
+*/
+@(private="file")
+INDEX_SHIFT :: 6
+
+@(private="file")
+INDEX_MASK :: 63
+
+Bit_Array :: struct {
+ bits: [dynamic]u64,
+ bias: int,
+}
+
+/*
+ In:
+ - ba: ^Bit_Array - a pointer to the Bit Array
+ - index: The bit index. Can be an enum member.
+
+ Out:
+ - res: The bit you're interested in.
+ - ok: Whether the index was valid. Returns `false` if the index is smaller than the bias.
+
+ The `ok` return value may be ignored.
+*/
+get :: proc(ba: ^Bit_Array, #any_int index: uint, allocator := context.allocator) -> (res: bool, ok: bool) {
+ idx := int(index) - ba.bias
+
+ if ba == nil || int(index) < ba.bias { return false, false }
+ context.allocator = allocator
+
+ leg_index := idx >> INDEX_SHIFT
+ bit_index := idx & INDEX_MASK
+
+ /*
+ If we `get` a bit that doesn't fit in the Bit Array, it's naturally `false`.
+ This early-out prevents unnecessary resizing.
+ */
+ if leg_index + 1 > len(ba.bits) { return false, true }
+
+ val := u64(1 << uint(bit_index))
+ res = ba.bits[leg_index] & val == val
+
+ return res, true
+}
+
+/*
+ In:
+ - ba: ^Bit_Array - a pointer to the Bit Array
+ - index: The bit index. Can be an enum member.
+
+ Out:
+ - ok: Whether or not we managed to set requested bit.
+
+ `set` automatically resizes the Bit Array to accommodate the requested index if needed.
+*/
+set :: proc(ba: ^Bit_Array, #any_int index: uint, allocator := context.allocator) -> (ok: bool) {
+
+ idx := int(index) - ba.bias
+
+ if ba == nil || int(index) < ba.bias { return false }
+ context.allocator = allocator
+
+ leg_index := idx >> INDEX_SHIFT
+ bit_index := idx & INDEX_MASK
+
+ resize_if_needed(ba, leg_index) or_return
+
+ ba.bits[leg_index] |= 1 << uint(bit_index)
+ return true
+}
+
+/*
+ A helper function to create a Bit Array with optional bias, in case your smallest index is non-zero (including negative).
+*/
+create :: proc(max_index: int, min_index := 0, allocator := context.allocator) -> (res: Bit_Array, ok: bool) #optional_ok {
+ context.allocator = allocator
+ size_in_bits := max_index - min_index
+
+ if size_in_bits < 1 { return {}, false }
+
+ legs := size_in_bits >> INDEX_SHIFT
+
+ res = Bit_Array{
+ bias = min_index,
+ }
+ return res, resize_if_needed(&res, size_in_bits)
+}
+
+/*
+ Sets all bits to `false`.
+*/
+clear :: proc(ba: ^Bit_Array) {
+ if ba == nil { return }
+ ba.bits = {}
+}
+
+/*
+ Releases the memory used by the Bit Array.
+*/
+destroy :: proc(ba: ^Bit_Array) {
+ if ba == nil { return }
+ delete(ba.bits)
+}
+
+/*
+ Resizes the Bit Array. For internal use.
+ If you want to reserve the memory for a given-sized Bit Array up front, you can use `create`.
+*/
+@(private="file")
+resize_if_needed :: proc(ba: ^Bit_Array, legs: int, allocator := context.allocator) -> (ok: bool) {
+ if ba == nil { return false }
+
+ context.allocator = allocator
+
+ if legs + 1 > len(ba.bits) {
+ resize(&ba.bits, legs + 1)
+ }
+ return len(ba.bits) > legs
+}
\ No newline at end of file
diff --git a/core/container/bit_array/doc.odin b/core/container/bit_array/doc.odin
new file mode 100644
index 000000000..91e1362dd
--- /dev/null
+++ b/core/container/bit_array/doc.odin
@@ -0,0 +1,52 @@
+package dynamic_bit_array
+
+/*
+ The Bit Array can be used in several ways:
+
+ -- By default you don't need to instantiate a Bit Array:
+
+ package test
+
+ import "core:fmt"
+ import "core:container/bit_array"
+
+ main :: proc() {
+ using bit_array
+
+ bits: Bit_Array
+
+ // returns `true`
+ fmt.println(set(&bits, 42))
+
+ // returns `false`, `false`, because this Bit Array wasn't created to allow negative indices.
+ was_set, was_retrieved := get(&bits, -1)
+ fmt.println(was_set, was_retrieved)
+ }
+
+ -- A Bit Array can optionally allow for negative indices, if the mininum value was given during creation:
+
+ package test
+
+ import "core:fmt"
+ import "core:container/bit_array"
+
+ main :: proc() {
+ Foo :: enum int {
+ Negative_Test = -42,
+ Bar = 420,
+ Leaves = 69105,
+ }
+
+ using bit_array
+
+ bits := create(int(max(Foo)), int(min(Foo)))
+ defer destroy(&bits)
+
+ fmt.printf("Set(Bar): %v\n", set(&bits, Foo.Bar))
+ fmt.printf("Get(Bar): %v, %v\n", get(&bits, Foo.Bar))
+ fmt.printf("Set(Negative_Test): %v\n", set(&bits, Foo.Negative_Test))
+ fmt.printf("Get(Leaves): %v, %v\n", get(&bits, Foo.Leaves))
+ fmt.printf("Get(Negative_Test): %v, %v\n", get(&bits, Foo.Negative_Test))
+ fmt.printf("Freed.\n")
+ }
+*/
\ No newline at end of file
From 92e70b9a589038bdcfd0f715bb1172b15432caa6 Mon Sep 17 00:00:00 2001
From: Andrea Piseri
Date: Tue, 28 Dec 2021 16:19:38 +0100
Subject: [PATCH 034/710] use multipointers instead of simple pointers
---
core/slice/slice.odin | 18 +++++++-----------
1 file changed, 7 insertions(+), 11 deletions(-)
diff --git a/core/slice/slice.odin b/core/slice/slice.odin
index 992efb3f1..426829a22 100644
--- a/core/slice/slice.odin
+++ b/core/slice/slice.odin
@@ -306,21 +306,17 @@ filter :: proc(s: $S/[]$U, f: proc(U) -> bool, allocator := context.allocator) -
scanner :: proc (s: $S/[]$U, initializer: $V, f: proc(V, U)->V, allocator := context.allocator) -> []V {
if len(s) == 0 { return {} }
- p := as_ptr(s)
res := make([]V, len(s), allocator)
-
+ p := as_ptr(s)
q := as_ptr(res)
- l := len(res)
+ r := initializer
- r := initializer
-
- for l > 0 {
- r = f(r, p^)
- q^ = r
- p = intrinsics.ptr_offset(p, 1)
- q = intrinsics.ptr_offset(q, 1)
- l -= 1
+ for l := len(s); l > 0; l -= 1 {
+ r = f(r, p[0])
+ q[0] = r
+ p = p[1:]
+ q = q[1:]
}
return res
From c46e7eda1db0d509589dfca140802fdd3f2580eb Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Wed, 29 Dec 2021 11:26:22 +0000
Subject: [PATCH 035/710] Add `core:container/small_array`
---
core/container/small_array/small_array.odin | 117 ++++++++++++++++++++
1 file changed, 117 insertions(+)
create mode 100644 core/container/small_array/small_array.odin
diff --git a/core/container/small_array/small_array.odin b/core/container/small_array/small_array.odin
new file mode 100644
index 000000000..60c22837c
--- /dev/null
+++ b/core/container/small_array/small_array.odin
@@ -0,0 +1,117 @@
+package container_small_array
+
+import "core:builtin"
+
+Small_Array :: struct($N: int, $T: typeid) where N >= 0 {
+ data: [N]T,
+ len: int,
+}
+
+
+len :: proc(a: $A/Small_Array) -> int {
+ return a.len
+}
+
+cap :: proc(a: $A/Small_Array) -> int {
+ return builtin.len(a.data)
+}
+
+space :: proc(a: $A/Small_Array) -> int {
+ return builtin.len(a.data) - a.len
+}
+
+slice :: proc(a: ^$A/Small_Array($N, $T)) -> []T {
+ return a.data[:a.len]
+}
+
+
+get :: proc(a: $A/Small_Array($N, $T), index: int, loc := #caller_location) -> T {
+ return a.data[index]
+}
+get_ptr :: proc(a: $A/Small_Array($N, $T), index: int, loc := #caller_location) -> ^T {
+ return &a.data[index]
+}
+
+set :: proc(a: ^$A/Small_Array($N, $T), index: int, item: T, loc := #caller_location) {
+ a.data[index] = item
+}
+
+resize :: proc(a: ^$A/Small_Array, length: int) {
+ a.len = min(length, builtin.len(a.data))
+}
+
+
+push_back :: proc(a: ^$A/Small_Array($N, $T), item: T) -> bool {
+ if a.len < builtin.len(a.data) {
+ a.len += 1
+ a.data[a.len-1] = item
+ return true
+ }
+ return false
+}
+
+push_front :: proc(a: ^$A/Small_Array($N, $T), item: T) -> bool {
+ if a.len < builtin.len(a.data) {
+ a.len += 1
+ data := slice(a)
+ copy(data[1:], data[:])
+ data[0] = item
+ return true
+ }
+ return false
+}
+
+pop_back :: proc(a: ^$A/Small_Array($N, $T), loc := #caller_location) -> T {
+ assert(condition=(N > 0 && a.len > 0), loc=loc)
+ item := a.data[a.len-1]
+ a.len -= 1
+ return item
+}
+
+pop_front :: proc(a: ^$A/Small_Array($N, $T), loc := #caller_location) -> T {
+ assert(condition=(N > 0 && a.len > 0), loc=loc)
+ item := a.data[0]
+ s := slice(a)
+ copy(s[:], s[1:])
+ a.len -= 1
+ return item
+}
+
+pop_back_safe :: proc(a: ^$A/Small_Array($N, $T)) -> (item: T, ok: bool) {
+ if N > 0 && a.len > 0 {
+ item = a.data[a.len-1]
+ a.len -= 1
+ ok = true
+ }
+ return
+}
+
+pop_front_safe :: proc(a: ^$A/Small_Array($N, $T)) -> (T, bool) {
+ if N > 0 && a.len > 0 {
+ item = a.data[0]
+ s := slice(a)
+ copy(s[:], s[1:])
+ a.len -= 1
+ ok = true
+ }
+ return
+}
+
+consume :: proc(a: ^$A/Small_Array($N, $T), count: int, loc := #caller_location) {
+ assert(condition=a.len >= count, loc=loc)
+ a.len -= count
+}
+
+clear :: proc(a: ^$A/Small_Array($N, $T)) {
+ resize(a, 0)
+}
+
+push_back_elems :: proc(a: ^$A/Small_Array($N, $T), items: ..T) {
+ n := copy(a.data[a.len:], items[:])
+ a.len += n
+}
+
+append_elem :: push_back
+append_elems :: push_back_elems
+push :: proc{push_back, push_back_elems}
+append :: proc{push_back, push_back_elems}
\ No newline at end of file
From a66f859fb49b2d31809767aaa8b4d8872b73c062 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Wed, 29 Dec 2021 11:58:27 +0000
Subject: [PATCH 036/710] Minor improvements to `core:container/small_array`
---
core/container/small_array/small_array.odin | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/core/container/small_array/small_array.odin b/core/container/small_array/small_array.odin
index 60c22837c..d09e0c81c 100644
--- a/core/container/small_array/small_array.odin
+++ b/core/container/small_array/small_array.odin
@@ -42,16 +42,16 @@ resize :: proc(a: ^$A/Small_Array, length: int) {
push_back :: proc(a: ^$A/Small_Array($N, $T), item: T) -> bool {
- if a.len < builtin.len(a.data) {
+ if a.len < cap(a^) {
+ a.data[a.len] = item
a.len += 1
- a.data[a.len-1] = item
return true
}
return false
}
push_front :: proc(a: ^$A/Small_Array($N, $T), item: T) -> bool {
- if a.len < builtin.len(a.data) {
+ if a.len < cap(a^) {
a.len += 1
data := slice(a)
copy(data[1:], data[:])
From a9b17b5a37b611dae51da6153de8239dfa08f202 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Wed, 29 Dec 2021 12:01:07 +0000
Subject: [PATCH 037/710] Add `hash.djbx33a`
---
core/hash/djbx33a.odin | 18 ++++++++++++++++++
1 file changed, 18 insertions(+)
create mode 100644 core/hash/djbx33a.odin
diff --git a/core/hash/djbx33a.odin b/core/hash/djbx33a.odin
new file mode 100644
index 000000000..db286b660
--- /dev/null
+++ b/core/hash/djbx33a.odin
@@ -0,0 +1,18 @@
+package hash
+
+djbx33a :: proc(data: []byte) -> (result: [16]byte) #no_bounds_check {
+ state := [4]u32{5381, 5381, 5381, 5381}
+
+ s: u32 = 0
+ for p in data {
+ state[s] = (state[s] << 5) + state[s] + u32(p)
+ s = (s + 1) & 3
+ }
+
+
+ (^u32le)(&result[0])^ = u32le(state[0])
+ (^u32le)(&result[4])^ = u32le(state[1])
+ (^u32le)(&result[8])^ = u32le(state[2])
+ (^u32le)(&result[12])^ = u32le(state[3])
+ return
+}
\ No newline at end of file
From c987b8429219d9d823a658fd25a6661fe949bebd Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Wed, 29 Dec 2021 12:24:47 +0000
Subject: [PATCH 038/710] Move bash.djbx33a to hash.odin
---
core/hash/djbx33a.odin | 18 ------------------
core/hash/hash.odin | 17 +++++++++++++++++
2 files changed, 17 insertions(+), 18 deletions(-)
delete mode 100644 core/hash/djbx33a.odin
diff --git a/core/hash/djbx33a.odin b/core/hash/djbx33a.odin
deleted file mode 100644
index db286b660..000000000
--- a/core/hash/djbx33a.odin
+++ /dev/null
@@ -1,18 +0,0 @@
-package hash
-
-djbx33a :: proc(data: []byte) -> (result: [16]byte) #no_bounds_check {
- state := [4]u32{5381, 5381, 5381, 5381}
-
- s: u32 = 0
- for p in data {
- state[s] = (state[s] << 5) + state[s] + u32(p)
- s = (s + 1) & 3
- }
-
-
- (^u32le)(&result[0])^ = u32le(state[0])
- (^u32le)(&result[4])^ = u32le(state[1])
- (^u32le)(&result[8])^ = u32le(state[2])
- (^u32le)(&result[12])^ = u32le(state[3])
- return
-}
\ No newline at end of file
diff --git a/core/hash/hash.odin b/core/hash/hash.odin
index f0d01bd25..5044d567a 100644
--- a/core/hash/hash.odin
+++ b/core/hash/hash.odin
@@ -55,6 +55,23 @@ djb2 :: proc(data: []byte, seed := u32(5381)) -> u32 {
return hash
}
+djbx33a :: proc(data: []byte, seed := u32(5381)) -> (result: [16]byte) #no_bounds_check {
+ state := [4]u32{seed, seed, seed, seed}
+
+ s: u32 = 0
+ for p in data {
+ state[s] = (state[s] << 5) + state[s] + u32(p) // hash * 33 + u32(b)
+ s = (s + 1) & 3
+ }
+
+
+ (^u32le)(&result[0])^ = u32le(state[0])
+ (^u32le)(&result[4])^ = u32le(state[1])
+ (^u32le)(&result[8])^ = u32le(state[2])
+ (^u32le)(&result[12])^ = u32le(state[3])
+ return
+}
+
@(optimization_mode="speed")
fnv32 :: proc(data: []byte, seed := u32(0x811c9dc5)) -> u32 {
h: u32 = seed
From ed8b20da787edd3747cc85708a0b9dd2c2e256d1 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Wed, 29 Dec 2021 14:38:39 +0000
Subject: [PATCH 039/710] Add `core:container/priority_queue`
---
.../priority_queue/priority_queue.odin | 140 ++++++++++++++++++
1 file changed, 140 insertions(+)
create mode 100644 core/container/priority_queue/priority_queue.odin
diff --git a/core/container/priority_queue/priority_queue.odin b/core/container/priority_queue/priority_queue.odin
new file mode 100644
index 000000000..f53dced38
--- /dev/null
+++ b/core/container/priority_queue/priority_queue.odin
@@ -0,0 +1,140 @@
+package container_priority_queue
+
+import "core:builtin"
+
+Priority_Queue :: struct($T: typeid) {
+ data: [dynamic]T,
+ len: int,
+ priority: proc(item: T) -> int,
+}
+
+DEFAULT_CAPACITY :: 16
+
+init_none :: proc(q: ^$Q/Priority_Queue($T), f: proc(item: T) -> int, allocator := context.allocator) {
+ init_len(q, f, 0, allocator)
+}
+init_len :: proc(q: ^$Q/Priority_Queue($T), f: proc(item: T) -> int, len: int, allocator := context.allocator) {
+ init_len_cap(q, f, 0, DEFAULT_CAPACITY, allocator)
+}
+init_len_cap :: proc(q: ^$Q/Priority_Queue($T), f: proc(item: T) -> int, len: int, cap: int, allocator := context.allocator) {
+ if q.data.allocator.procedure == nil {
+ q.data.allocator = allocator
+ }
+ builtin.resize(&q.data, cap)
+ q.len = len
+ q.priority = f
+}
+
+init :: proc{init_none, init_len, init_len_cap}
+
+
+delete :: proc(q: $Q/Priority_Queue($T)) {
+ builtin.delete(q.data)
+}
+
+clear :: proc(q: ^$Q/Priority_Queue($T)) {
+ q.len = 0
+}
+
+len :: proc(q: $Q/Priority_Queue($T)) -> int {
+ return q.len
+}
+
+cap :: proc(q: $Q/Priority_Queue($T)) -> int {
+ return builtin.cap(q.data)
+}
+
+space :: proc(q: $Q/Priority_Queue($T)) -> int {
+ return builtin.len(q.data) - q.len
+}
+
+reserve :: proc(q: ^$Q/Priority_Queue($T), capacity: int) {
+ if capacity > q.len {
+ builtin.resize(&q.data, capacity)
+ }
+}
+
+resize :: proc(q: ^$Q/Priority_Queue($T), length: int) {
+ if length > q.len {
+ builtin.resize(&q.data, length)
+ }
+ q.len = length
+}
+
+_grow :: proc(q: ^$Q/Priority_Queue($T), min_capacity: int = 8) {
+ new_capacity := max(builtin.len(q.data)*2, min_capacity, 1)
+ builtin.resize(&q.data, new_capacity)
+}
+
+
+push :: proc(q: ^$Q/Priority_Queue($T), item: T) {
+ if builtin.len(q.data) - q.len == 0 {
+ _grow(q)
+ }
+
+ s := q.data[:]
+ s[q.len] = item
+
+ i := q.len
+ for i > 0 {
+ p := (i - 1) / 2
+ if q.priority(s[p]) <= q.priority(item) {
+ break
+ }
+ s[i] = s[p]
+ i = p
+ }
+
+ q.len += 1
+ if q.len > 0 {
+ s[i] = item
+ }
+}
+
+pop :: proc(q: ^$Q/Priority_Queue($T), loc := #caller_location) -> T {
+ val, ok := pop_safe(q)
+ assert(condition=ok, loc=loc)
+ return val
+}
+
+
+pop_safe :: proc(q: ^$Q/Priority_Queue($T)) -> (T, bool) {
+ if q.len > 0 {
+ s := q.data[:]
+ min := s[0]
+ root := s[q.len-1]
+ q.len -= 1
+
+ i := 0
+ for i * 2 + 1 < q.len {
+ a := i * 2 + 1
+ b := i * 2 + 2
+ c := b < q.len && q.priority(s[b]) < q.priority(s[a]) ? b : a
+
+ if q.priority(s[c]) >= q.priority(root) {
+ break
+ }
+ s[i] = s[c]
+ i = c
+ }
+
+ if q.len > 0 {
+ s[i] = root
+ }
+ return min, true
+ }
+ return T{}, false
+}
+
+peek :: proc(q: ^$Q/Priority_Queue($T), loc := #caller_location) -> T {
+ assert(condition=q.len > 0, loc=loc)
+
+ return q.data[0]
+}
+
+peek_safe :: proc(q: ^$Q/Priority_Queue($T)) -> (T, bool) {
+ if q.len > 0 {
+ return q.data[0], true
+ }
+ return T{}, false
+}
\ No newline at end of file
From ed742846cb6447d9d9ff6a4cfde285d7f4bb0eb9 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Wed, 29 Dec 2021 15:01:56 +0000
Subject: [PATCH 040/710] Correct `lb_emit_ptr_offset` bug caused by
`LLVMConstGEP` assuming a signed index
---
src/llvm_backend_utility.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/llvm_backend_utility.cpp b/src/llvm_backend_utility.cpp
index 0350f7287..3fe96459f 100644
--- a/src/llvm_backend_utility.cpp
+++ b/src/llvm_backend_utility.cpp
@@ -1200,7 +1200,7 @@ lbValue lb_emit_array_epi(lbProcedure *p, lbValue s, isize index) {
}
lbValue lb_emit_ptr_offset(lbProcedure *p, lbValue ptr, lbValue index) {
- index = lb_correct_endianness(p, index);
+ index = lb_emit_conv(p, index, t_int);
LLVMValueRef indices[1] = {index.value};
lbValue res = {};
res.type = ptr.type;
From 750ee4ecdbe4c0f9d2eb72fa0fc0710f11ba035f Mon Sep 17 00:00:00 2001
From: kleeon
Date: Thu, 30 Dec 2021 15:00:45 +0300
Subject: [PATCH 041/710] Fixed wrong function name in README.md
---
vendor/OpenGL/README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/vendor/OpenGL/README.md b/vendor/OpenGL/README.md
index 9e7a7bbbc..928d5eb5d 100644
--- a/vendor/OpenGL/README.md
+++ b/vendor/OpenGL/README.md
@@ -9,7 +9,7 @@ gl.load_up_to(4, 5, proc(p: rawptr, name: cstring) do (cast(^rawptr)p)^ = glfw.G
```
[odin-glfw](https://github.com/vassvik/odin-glfw) also provides a useful helper you can pass straight to `gl.load_up_to`:
```go
-gl.load_up_to(4, 5, glfw.set_proc_address);
+gl.load_up_to(4, 5, glfw.gl_set_proc_address);
```
#### NOTE: It is recommended to put this into the shared collection:
From c7ff296bef1b51004cfee2470d5d95abe391dabe Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Thu, 30 Dec 2021 13:42:10 +0000
Subject: [PATCH 042/710] Change the implementation of `Priority_Queue` to have
a better interface that allows for a `less` and `swap` procedure
---
.../priority_queue/priority_queue.odin | 220 +++++++++---------
1 file changed, 109 insertions(+), 111 deletions(-)
diff --git a/core/container/priority_queue/priority_queue.odin b/core/container/priority_queue/priority_queue.odin
index f53dced38..df26edb1b 100644
--- a/core/container/priority_queue/priority_queue.odin
+++ b/core/container/priority_queue/priority_queue.odin
@@ -3,138 +3,136 @@ package container_priority_queue
import "core:builtin"
Priority_Queue :: struct($T: typeid) {
- data: [dynamic]T,
- len: int,
- priority: proc(item: T) -> int,
+ queue: [dynamic]T,
+
+ less: proc(a, b: T) -> bool,
+ swap: proc(q: []T, i, j: int),
}
DEFAULT_CAPACITY :: 16
-init_none :: proc(q: ^$Q/Priority_Queue($T), f: proc(item: T) -> int, allocator := context.allocator) {
- init_len(q, f, 0, allocator)
-}
-init_len :: proc(q: ^$Q/Priority_Queue($T), f: proc(item: T) -> int, len: int, allocator := context.allocator) {
- init_len_cap(q, f, 0, DEFAULT_CAPACITY, allocator)
-}
-init_len_cap :: proc(q: ^$Q/Priority_Queue($T), f: proc(item: T) -> int, len: int, cap: int, allocator := context.allocator) {
- if q.data.allocator.procedure == nil {
- q.data.allocator = allocator
+init :: proc(pq: ^$Q/Priority_Queue($T), less: proc(a, b: T) -> bool, swap: proc(q: []T, i, j: int), capacity := DEFAULT_CAPACITY, allocator := context.allocator) {
+ if pq.queue.allocator.procedure == nil {
+ pq.queue.allocator = allocator
}
- builtin.resize(&q.data, cap)
- q.len = len
- q.priority = f
+ reserve(pq, capacity)
+ pq.less = less
+ pq.swap = swap
}
-init :: proc{init_none, init_len, init_len_cap}
-
-
-delete :: proc(q: $Q/Priority_Queue($T)) {
- builtin.delete(q.data)
-}
-
-clear :: proc(q: ^$Q/Priority_Queue($T)) {
- q.len = 0
-}
-
-len :: proc(q: $Q/Priority_Queue($T)) -> int {
- return q.len
-}
-
-cap :: proc(q: $Q/Priority_Queue($T)) -> int {
- return builtin.cap(q.data)
-}
-
-space :: proc(q: $Q/Priority_Queue($T)) -> int {
- return builtin.len(q.data) - q.len
-}
-
-reserve :: proc(q: ^$Q/Priority_Queue($T), capacity: int) {
- if capacity > q.len {
- builtin.resize(&q.data, capacity)
+init_from_dynamic_array :: proc(pq: ^$Q/Priority_Queue($T), queue: [dynamic]T, less: proc(a, b: T) -> bool, swap: proc(q: []T, i, j: int)) {
+ pq.queue = queue
+ pq.less = less
+ pq.swap = swap
+ n := builtin.len(pq.queue)
+ for i := n/2 - 1; i >= 0; i -= 1 {
+ _shift_down(pq, i, n)
}
}
-resize :: proc(q: ^$Q/Priority_Queue($T), length: int) {
- if length > q.len {
- builtin.resize(&q.data, length)
- }
- q.len = length
+destroy :: proc(pq: ^$Q/Priority_Queue($T)) {
+ clear(pq)
+ delete(pq.queue)
}
-_grow :: proc(q: ^$Q/Priority_Queue($T), min_capacity: int = 8) {
- new_capacity := max(builtin.len(q.data)*2, min_capacity, 1)
- builtin.resize(&q.data, new_capacity)
+reserve :: proc(pq: ^$Q/Priority_Queue($T), capacity: int) {
+ builtin.reserve(&pq.queue, capacity)
+}
+clear :: proc(pq: ^$Q/Priority_Queue($T)) {
+ builtin.clear(&pq.queue)
+}
+len :: proc(pq: $Q/Priority_Queue($T)) -> int {
+ return builtin.len(pq.queue)
+}
+cap :: proc(pq: $Q/Priority_Queue($T)) -> int {
+ return builtin.cap(pq.queue)
}
-
-push :: proc(q: ^$Q/Priority_Queue($T), item: T) {
- if builtin.len(q.data) - q.len == 0 {
- _grow(q)
+_shift_down :: proc(pq: ^$Q/Priority_Queue($T), i0, n: int) -> bool {
+ // O(n log n)
+ i := i0
+ j, j1, j2: int
+ if 0 > i || i > n {
+ return false
}
-
- s := q.data[:]
- s[q.len] = item
-
- i := q.len
- for i > 0 {
- p := (i - 1) / 2
- if q.priority(s[p]) <= q.priority(item) {
- break
+
+ queue := pq.queue[:]
+
+ for {
+ j1 := 2*i + 1
+ if 0 > j1 || j1 >= n {
+ break
}
- s[i] = s[p]
- i = p
- }
-
- q.len += 1
- if q.len > 0 {
- s[i] = item
- }
-}
-
-pop :: proc(q: ^$Q/Priority_Queue($T), loc := #caller_location) -> T {
- val, ok := pop_safe(q)
- assert(condition=ok, loc=loc)
- return val
-}
-
-
-pop_safe :: proc(q: ^$Q/Priority_Queue($T)) -> (T, bool) {
- if q.len > 0 {
- s := q.data[:]
- min := s[0]
- root := s[q.len-1]
- q.len -= 1
-
- i := 0
- for i * 2 + 1 < q.len {
- a := i * 2 + 1
- b := i * 2 + 2
- c := b < q.len && q.priority(s[b]) < q.priority(s[a]) ? b : a
-
- if q.priority(s[c]) >= q.priority(root) {
- break
- }
- s[i] = s[c]
- i = c
+ j, j2 = j1, j1+1
+ if j1 < n && pq.less(queue[j2], queue[j1]) {
+ j1 = j2
}
-
- if q.len > 0 {
- s[i] = root
+ if !pq.less(queue[i], queue[j]) {
+ break
}
- return min, true
+
+ pq.swap(queue, i, j)
+ i = j
}
- return T{}, false
+ return i > i0
}
-peek :: proc(q: ^$Q/Priority_Queue($T), loc := #caller_location) -> T {
- assert(condition=q.len > 0, loc=loc)
-
- return q.data[0]
+_shift_up :: proc(pq: ^$Q/Priority_Queue($T), j: int) {
+ j := j
+ queue := pq.queue[:]
+ n := builtin.len(queue)
+ for 0 <= j && j < n {
+ i := (j-1)/2
+ if i == j || !pq.less(queue[j], queue[i]) {
+ break
+ }
+ pq.swap(queue, i, j)
+ j = i
+ }
}
-peek_safe :: proc(q: ^$Q/Priority_Queue($T)) -> (T, bool) {
- if q.len > 0 {
- return q.data[0], true
+// NOTE(bill): When an element at index 'i' has changed its value, this will fix the
+// the heap ordering. This is using a basic "heapsort" with shift up and a shift down parts.
+fix :: proc(pq: ^$Q/Priority_Queue($T), i: int) {
+ if !_shift_down(pq, i, builtin.len(pq.queue)) {
+ _shift_up(pq, i)
}
- return T{}, false
-}
\ No newline at end of file
+}
+
+push :: proc(pq: ^$Q/Priority_Queue($T), value: T) {
+ append(&pq.queue, value)
+ _shift_up(pq, builtin.len(pq.queue)-1)
+}
+
+pop :: proc(pq: ^$Q/Priority_Queue($T), loc := #caller_location) -> (value: T) {
+ assert(condition=builtin.len(pq.queue)>0, loc=loc)
+
+ n := builtin.len(pq.queue)-1
+ pq.swap(pq.queue[:], 0, n)
+ _shift_down(pq, 0, n)
+ return builtin.pop(&pq.queue)
+}
+
+pop_safe :: proc(pq: ^$Q/Priority_Queue($T), loc := #caller_location) -> (value: T, ok: bool) {
+ if builtin.len(pq.queue) > 0 {
+ n := builtin.len(pq.queue)-1
+ pq.swap(pq.queue[:], 0, n)
+ _shift_down(pq, 0, n)
+ return builtin.pop_safe(&pq.queue)
+ }
+ return
+}
+
+remove :: proc(pq: ^$Q/Priority_Queue($T), i: int) -> (value: T, ok: bool) {
+ n := builtin.len(pq.queue)
+ if 0 <= i && i < n {
+ if n != i {
+ pq.swap(pq.queue[:], i, n)
+ _shift_down(pq, i, n)
+ _shift_up(pq, i)
+ }
+ value, ok = builtin.pop_safe(&pq.queue)
+ }
+ return
+}
+
From 42033ea808ae3b97d909f538e422e63dadfd8f6c Mon Sep 17 00:00:00 2001
From: zhibog
Date: Fri, 31 Dec 2021 13:16:11 +0100
Subject: [PATCH 043/710] Extended crypto API by variants that write the result
into a destination buffer, instead of returning it
---
core/crypto/README.md | 8 +-
core/crypto/blake/blake.odin | 145 +++++--
core/crypto/blake2b/blake2b.odin | 40 +-
core/crypto/blake2s/blake2s.odin | 40 +-
core/crypto/gost/gost.odin | 36 +-
core/crypto/groestl/groestl.odin | 145 +++++--
core/crypto/haval/haval.odin | 564 +++++++++++++++++++++-----
core/crypto/jh/jh.odin | 145 +++++--
core/crypto/keccak/keccak.odin | 167 ++++++--
core/crypto/md2/md2.odin | 64 ++-
core/crypto/md4/md4.odin | 40 +-
core/crypto/md5/md5.odin | 40 +-
core/crypto/ripemd/ripemd.odin | 141 +++++--
core/crypto/sha1/sha1.odin | 37 +-
core/crypto/sha2/sha2.odin | 145 +++++--
core/crypto/sha3/sha3.odin | 161 ++++++--
core/crypto/shake/shake.odin | 85 +++-
core/crypto/sm3/sm3.odin | 43 +-
core/crypto/streebog/streebog.odin | 72 +++-
core/crypto/tiger/tiger.odin | 109 ++++-
core/crypto/tiger2/tiger2.odin | 109 ++++-
core/crypto/whirlpool/whirlpool.odin | 36 +-
vendor/botan/README.md | 8 +-
vendor/botan/blake2b/blake2b.odin | 37 +-
vendor/botan/gost/gost.odin | 37 +-
vendor/botan/keccak/keccak.odin | 37 +-
vendor/botan/md4/md4.odin | 37 +-
vendor/botan/md5/md5.odin | 37 +-
vendor/botan/ripemd/ripemd.odin | 37 +-
vendor/botan/sha1/sha1.odin | 37 +-
vendor/botan/sha2/sha2.odin | 145 +++++--
vendor/botan/sha3/sha3.odin | 145 +++++--
vendor/botan/shake/shake.odin | 73 +++-
vendor/botan/skein512/skein512.odin | 94 ++++-
vendor/botan/sm3/sm3.odin | 37 +-
vendor/botan/streebog/streebog.odin | 73 +++-
vendor/botan/tiger/tiger.odin | 109 ++++-
vendor/botan/whirlpool/whirlpool.odin | 37 +-
38 files changed, 2690 insertions(+), 662 deletions(-)
diff --git a/core/crypto/README.md b/core/crypto/README.md
index 5955f9c56..ddcb12d81 100644
--- a/core/crypto/README.md
+++ b/core/crypto/README.md
@@ -32,9 +32,11 @@ Please see the chart below for the options.
#### High level API
Each hash algorithm contains a procedure group named `hash`, or if the algorithm provides more than one digest size `hash_`\*.
-Included in these groups are four procedures.
+Included in these groups are six procedures.
* `hash_string` - Hash a given string and return the computed hash. Just calls `hash_bytes` internally
* `hash_bytes` - Hash a given byte slice and return the computed hash
+* `hash_string_to_buffer` - Hash a given string and put the computed hash in the second proc parameter. Just calls `hash_bytes_to_buffer` internally
+* `hash_bytes_to_buffer` - Hash a given string and put the computed hash in the second proc parameter. The destination buffer has to be at least as big as the digest size of the hash
* `hash_stream` - Takes a stream from io.Stream and returns the computed hash from it
* `hash_file` - Takes a file handle and returns the computed hash from it. A second optional boolean parameter controls if the file is streamed (this is the default) or read at once (set to true)
@@ -59,6 +61,10 @@ main :: proc() {
// Compute the hash, using the high level API
computed_hash := md4.hash(input)
+ // Variant that takes a destination buffer, instead of returning the computed hash
+ hash := make([]byte, md4.DIGEST_SIZE) // @note: Destination buffer has to be at least as big as the digest size of the hash
+ md4.hash(input, hash[:])
+
// Compute the hash, using the low level API
ctx: md4.Md4_Context
computed_hash_low: [16]byte
diff --git a/core/crypto/blake/blake.odin b/core/crypto/blake/blake.odin
index 9d53f8a89..81924ab1e 100644
--- a/core/crypto/blake/blake.odin
+++ b/core/crypto/blake/blake.odin
@@ -17,16 +17,21 @@ import "core:io"
High level API
*/
+DIGEST_SIZE_224 :: 28
+DIGEST_SIZE_256 :: 32
+DIGEST_SIZE_384 :: 48
+DIGEST_SIZE_512 :: 64
+
// hash_string_224 will hash the given input and return the
// computed hash
-hash_string_224 :: proc "contextless" (data: string) -> [28]byte {
+hash_string_224 :: proc "contextless" (data: string) -> [DIGEST_SIZE_224]byte {
return hash_bytes_224(transmute([]byte)(data))
}
// hash_bytes_224 will hash the given input and return the
// computed hash
-hash_bytes_224 :: proc "contextless" (data: []byte) -> [28]byte {
- hash: [28]byte
+hash_bytes_224 :: proc "contextless" (data: []byte) -> [DIGEST_SIZE_224]byte {
+ hash: [DIGEST_SIZE_224]byte
ctx: Blake256_Context
ctx.is224 = true
init(&ctx)
@@ -35,10 +40,29 @@ hash_bytes_224 :: proc "contextless" (data: []byte) -> [28]byte {
return hash
}
+// hash_string_to_buffer_224 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_224 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_224(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_224 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_224 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_224, "Size of destination buffer is smaller than the digest size")
+ ctx: Blake256_Context
+ ctx.is224 = true
+ init(&ctx)
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
// hash_stream_224 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_224 :: proc(s: io.Stream) -> ([28]byte, bool) {
- hash: [28]byte
+hash_stream_224 :: proc(s: io.Stream) -> ([DIGEST_SIZE_224]byte, bool) {
+ hash: [DIGEST_SIZE_224]byte
ctx: Blake256_Context
ctx.is224 = true
init(&ctx)
@@ -57,7 +81,7 @@ hash_stream_224 :: proc(s: io.Stream) -> ([28]byte, bool) {
// hash_file_224 will read the file provided by the given handle
// and compute a hash
-hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([28]byte, bool) {
+hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_224]byte, bool) {
if !load_at_once {
return hash_stream_224(os.stream_from_handle(hd))
} else {
@@ -65,7 +89,7 @@ hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([28]byte, bool)
return hash_bytes_224(buf[:]), ok
}
}
- return [28]byte{}, false
+ return [DIGEST_SIZE_224]byte{}, false
}
hash_224 :: proc {
@@ -73,18 +97,20 @@ hash_224 :: proc {
hash_file_224,
hash_bytes_224,
hash_string_224,
+ hash_bytes_to_buffer_224,
+ hash_string_to_buffer_224,
}
// hash_string_256 will hash the given input and return the
// computed hash
-hash_string_256 :: proc "contextless" (data: string) -> [32]byte {
+hash_string_256 :: proc "contextless" (data: string) -> [DIGEST_SIZE_256]byte {
return hash_bytes_256(transmute([]byte)(data))
}
// hash_bytes_256 will hash the given input and return the
// computed hash
-hash_bytes_256 :: proc "contextless" (data: []byte) -> [32]byte {
- hash: [32]byte
+hash_bytes_256 :: proc "contextless" (data: []byte) -> [DIGEST_SIZE_256]byte {
+ hash: [DIGEST_SIZE_256]byte
ctx: Blake256_Context
ctx.is224 = false
init(&ctx)
@@ -93,10 +119,29 @@ hash_bytes_256 :: proc "contextless" (data: []byte) -> [32]byte {
return hash
}
+// hash_string_to_buffer_256 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_256(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_256 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_256 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_256, "Size of destination buffer is smaller than the digest size")
+ ctx: Blake256_Context
+ ctx.is224 = false
+ init(&ctx)
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
// hash_stream_256 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
- hash: [32]byte
+hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
+ hash: [DIGEST_SIZE_256]byte
ctx: Blake256_Context
ctx.is224 = false
init(&ctx)
@@ -115,7 +160,7 @@ hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
// hash_file_256 will read the file provided by the given handle
// and compute a hash
-hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
+hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
if !load_at_once {
return hash_stream_256(os.stream_from_handle(hd))
} else {
@@ -123,7 +168,7 @@ hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool)
return hash_bytes_256(buf[:]), ok
}
}
- return [32]byte{}, false
+ return [DIGEST_SIZE_256]byte{}, false
}
hash_256 :: proc {
@@ -131,18 +176,20 @@ hash_256 :: proc {
hash_file_256,
hash_bytes_256,
hash_string_256,
+ hash_bytes_to_buffer_256,
+ hash_string_to_buffer_256,
}
// hash_string_384 will hash the given input and return the
// computed hash
-hash_string_384 :: proc "contextless" (data: string) -> [48]byte {
+hash_string_384 :: proc "contextless" (data: string) -> [DIGEST_SIZE_384]byte {
return hash_bytes_384(transmute([]byte)(data))
}
// hash_bytes_384 will hash the given input and return the
// computed hash
-hash_bytes_384 :: proc "contextless" (data: []byte) -> [48]byte {
- hash: [48]byte
+hash_bytes_384 :: proc "contextless" (data: []byte) -> [DIGEST_SIZE_384]byte {
+ hash: [DIGEST_SIZE_384]byte
ctx: Blake512_Context
ctx.is384 = true
init(&ctx)
@@ -151,10 +198,29 @@ hash_bytes_384 :: proc "contextless" (data: []byte) -> [48]byte {
return hash
}
+// hash_string_to_buffer_384 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_384 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_384(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_384 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_384 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_384, "Size of destination buffer is smaller than the digest size")
+ ctx: Blake512_Context
+ ctx.is384 = true
+ init(&ctx)
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
// hash_stream_384 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_384 :: proc(s: io.Stream) -> ([48]byte, bool) {
- hash: [48]byte
+hash_stream_384 :: proc(s: io.Stream) -> ([DIGEST_SIZE_384]byte, bool) {
+ hash: [DIGEST_SIZE_384]byte
ctx: Blake512_Context
ctx.is384 = true
init(&ctx)
@@ -173,7 +239,7 @@ hash_stream_384 :: proc(s: io.Stream) -> ([48]byte, bool) {
// hash_file_384 will read the file provided by the given handle
// and compute a hash
-hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([48]byte, bool) {
+hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_384]byte, bool) {
if !load_at_once {
return hash_stream_384(os.stream_from_handle(hd))
} else {
@@ -181,7 +247,7 @@ hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([48]byte, bool)
return hash_bytes_384(buf[:]), ok
}
}
- return [48]byte{}, false
+ return [DIGEST_SIZE_384]byte{}, false
}
hash_384 :: proc {
@@ -189,18 +255,20 @@ hash_384 :: proc {
hash_file_384,
hash_bytes_384,
hash_string_384,
+ hash_bytes_to_buffer_384,
+ hash_string_to_buffer_384,
}
// hash_string_512 will hash the given input and return the
// computed hash
-hash_string_512 :: proc "contextless" (data: string) -> [64]byte {
+hash_string_512 :: proc "contextless" (data: string) -> [DIGEST_SIZE_512]byte {
return hash_bytes_512(transmute([]byte)(data))
}
// hash_bytes_512 will hash the given input and return the
// computed hash
-hash_bytes_512 :: proc "contextless" (data: []byte) -> [64]byte {
- hash: [64]byte
+hash_bytes_512 :: proc "contextless" (data: []byte) -> [DIGEST_SIZE_512]byte {
+ hash: [DIGEST_SIZE_512]byte
ctx: Blake512_Context
ctx.is384 = false
init(&ctx)
@@ -209,10 +277,29 @@ hash_bytes_512 :: proc "contextless" (data: []byte) -> [64]byte {
return hash
}
+// hash_string_to_buffer_512 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_512 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_512(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_512 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_512 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_512, "Size of destination buffer is smaller than the digest size")
+ ctx: Blake512_Context
+ ctx.is384 = false
+ init(&ctx)
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
// hash_stream_512 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
- hash: [64]byte
+hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
+ hash: [DIGEST_SIZE_512]byte
ctx: Blake512_Context
ctx.is384 = false
init(&ctx)
@@ -231,7 +318,7 @@ hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
// hash_file_512 will read the file provided by the given handle
// and compute a hash
-hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool) {
+hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_512]byte, bool) {
if !load_at_once {
return hash_stream_512(os.stream_from_handle(hd))
} else {
@@ -239,7 +326,7 @@ hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool)
return hash_bytes_512(buf[:]), ok
}
}
- return [64]byte{}, false
+ return [DIGEST_SIZE_512]byte{}, false
}
hash_512 :: proc {
@@ -247,6 +334,8 @@ hash_512 :: proc {
hash_file_512,
hash_bytes_512,
hash_string_512,
+ hash_bytes_to_buffer_512,
+ hash_string_to_buffer_512,
}
/*
diff --git a/core/crypto/blake2b/blake2b.odin b/core/crypto/blake2b/blake2b.odin
index 85f9611f9..6d4689b88 100644
--- a/core/crypto/blake2b/blake2b.odin
+++ b/core/crypto/blake2b/blake2b.odin
@@ -20,16 +20,18 @@ import "../_blake2"
High level API
*/
+DIGEST_SIZE :: 64
+
// hash_string will hash the given input and return the
// computed hash
-hash_string :: proc(data: string) -> [64]byte {
+hash_string :: proc(data: string) -> [DIGEST_SIZE]byte {
return hash_bytes(transmute([]byte)(data))
}
// hash_bytes will hash the given input and return the
// computed hash
-hash_bytes :: proc(data: []byte) -> [64]byte {
- hash: [64]byte
+hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
+ hash: [DIGEST_SIZE]byte
ctx: _blake2.Blake2b_Context
cfg: _blake2.Blake2_Config
cfg.size = _blake2.BLAKE2B_SIZE
@@ -40,10 +42,32 @@ hash_bytes :: proc(data: []byte) -> [64]byte {
return hash
}
+// hash_string_to_buffer will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size")
+ ctx: _blake2.Blake2b_Context
+ cfg: _blake2.Blake2_Config
+ cfg.size = _blake2.BLAKE2B_SIZE
+ ctx.cfg = cfg
+ _blake2.init(&ctx)
+ _blake2.update(&ctx, data)
+ _blake2.final(&ctx, hash)
+}
+
+
// hash_stream will read the stream in chunks and compute a
// hash from its contents
-hash_stream :: proc(s: io.Stream) -> ([64]byte, bool) {
- hash: [64]byte
+hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
+ hash: [DIGEST_SIZE]byte
ctx: _blake2.Blake2b_Context
cfg: _blake2.Blake2_Config
cfg.size = _blake2.BLAKE2B_SIZE
@@ -64,7 +88,7 @@ hash_stream :: proc(s: io.Stream) -> ([64]byte, bool) {
// hash_file will read the file provided by the given handle
// and compute a hash
-hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool) {
+hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
if !load_at_once {
return hash_stream(os.stream_from_handle(hd))
} else {
@@ -72,7 +96,7 @@ hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool) {
return hash_bytes(buf[:]), ok
}
}
- return [64]byte{}, false
+ return [DIGEST_SIZE]byte{}, false
}
hash :: proc {
@@ -80,6 +104,8 @@ hash :: proc {
hash_file,
hash_bytes,
hash_string,
+ hash_bytes_to_buffer,
+ hash_string_to_buffer,
}
/*
diff --git a/core/crypto/blake2s/blake2s.odin b/core/crypto/blake2s/blake2s.odin
index 72d15b227..ad2e800fd 100644
--- a/core/crypto/blake2s/blake2s.odin
+++ b/core/crypto/blake2s/blake2s.odin
@@ -20,16 +20,18 @@ import "../_blake2"
High level API
*/
+DIGEST_SIZE :: 32
+
// hash_string will hash the given input and return the
// computed hash
-hash_string :: proc(data: string) -> [32]byte {
+hash_string :: proc(data: string) -> [DIGEST_SIZE]byte {
return hash_bytes(transmute([]byte)(data))
}
// hash_bytes will hash the given input and return the
// computed hash
-hash_bytes :: proc(data: []byte) -> [32]byte {
- hash: [32]byte
+hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
+ hash: [DIGEST_SIZE]byte
ctx: _blake2.Blake2s_Context
cfg: _blake2.Blake2_Config
cfg.size = _blake2.BLAKE2S_SIZE
@@ -40,10 +42,32 @@ hash_bytes :: proc(data: []byte) -> [32]byte {
return hash
}
+
+// hash_string_to_buffer will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size")
+ ctx: _blake2.Blake2s_Context
+ cfg: _blake2.Blake2_Config
+ cfg.size = _blake2.BLAKE2S_SIZE
+ ctx.cfg = cfg
+ _blake2.init(&ctx)
+ _blake2.update(&ctx, data)
+ _blake2.final(&ctx, hash)
+}
+
// hash_stream will read the stream in chunks and compute a
// hash from its contents
-hash_stream :: proc(s: io.Stream) -> ([32]byte, bool) {
- hash: [32]byte
+hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
+ hash: [DIGEST_SIZE]byte
ctx: _blake2.Blake2s_Context
cfg: _blake2.Blake2_Config
cfg.size = _blake2.BLAKE2S_SIZE
@@ -64,7 +88,7 @@ hash_stream :: proc(s: io.Stream) -> ([32]byte, bool) {
// hash_file will read the file provided by the given handle
// and compute a hash
-hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
+hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
if !load_at_once {
return hash_stream(os.stream_from_handle(hd))
} else {
@@ -72,7 +96,7 @@ hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
return hash_bytes(buf[:]), ok
}
}
- return [32]byte{}, false
+ return [DIGEST_SIZE]byte{}, false
}
hash :: proc {
@@ -80,6 +104,8 @@ hash :: proc {
hash_file,
hash_bytes,
hash_string,
+ hash_bytes_to_buffer,
+ hash_string_to_buffer,
}
/*
diff --git a/core/crypto/gost/gost.odin b/core/crypto/gost/gost.odin
index c687e9080..eed684f72 100644
--- a/core/crypto/gost/gost.odin
+++ b/core/crypto/gost/gost.odin
@@ -18,16 +18,18 @@ import "core:io"
High level API
*/
+DIGEST_SIZE :: 32
+
// hash_string will hash the given input and return the
// computed hash
-hash_string :: proc(data: string) -> [32]byte {
+hash_string :: proc(data: string) -> [DIGEST_SIZE]byte {
return hash_bytes(transmute([]byte)(data))
}
// hash_bytes will hash the given input and return the
// computed hash
-hash_bytes :: proc(data: []byte) -> [32]byte {
- hash: [32]byte
+hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
+ hash: [DIGEST_SIZE]byte
ctx: Gost_Context
init(&ctx)
update(&ctx, data)
@@ -35,10 +37,28 @@ hash_bytes :: proc(data: []byte) -> [32]byte {
return hash
}
+// hash_string_to_buffer will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size")
+ ctx: Gost_Context
+ init(&ctx)
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
// hash_stream will read the stream in chunks and compute a
// hash from its contents
-hash_stream :: proc(s: io.Stream) -> ([32]byte, bool) {
- hash: [32]byte
+hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
+ hash: [DIGEST_SIZE]byte
ctx: Gost_Context
init(&ctx)
buf := make([]byte, 512)
@@ -56,7 +76,7 @@ hash_stream :: proc(s: io.Stream) -> ([32]byte, bool) {
// hash_file will read the file provided by the given handle
// and compute a hash
-hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
+hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
if !load_at_once {
return hash_stream(os.stream_from_handle(hd))
} else {
@@ -64,7 +84,7 @@ hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
return hash_bytes(buf[:]), ok
}
}
- return [32]byte{}, false
+ return [DIGEST_SIZE]byte{}, false
}
hash :: proc {
@@ -72,6 +92,8 @@ hash :: proc {
hash_file,
hash_bytes,
hash_string,
+ hash_bytes_to_buffer,
+ hash_string_to_buffer,
}
/*
diff --git a/core/crypto/groestl/groestl.odin b/core/crypto/groestl/groestl.odin
index 0d305a1d1..5434e31e0 100644
--- a/core/crypto/groestl/groestl.odin
+++ b/core/crypto/groestl/groestl.odin
@@ -17,16 +17,21 @@ import "core:io"
High level API
*/
+DIGEST_SIZE_224 :: 28
+DIGEST_SIZE_256 :: 32
+DIGEST_SIZE_384 :: 48
+DIGEST_SIZE_512 :: 64
+
// hash_string_224 will hash the given input and return the
// computed hash
-hash_string_224 :: proc(data: string) -> [28]byte {
+hash_string_224 :: proc(data: string) -> [DIGEST_SIZE_224]byte {
return hash_bytes_224(transmute([]byte)(data))
}
// hash_bytes_224 will hash the given input and return the
// computed hash
-hash_bytes_224 :: proc(data: []byte) -> [28]byte {
- hash: [28]byte
+hash_bytes_224 :: proc(data: []byte) -> [DIGEST_SIZE_224]byte {
+ hash: [DIGEST_SIZE_224]byte
ctx: Groestl_Context
ctx.hashbitlen = 224
init(&ctx)
@@ -35,10 +40,29 @@ hash_bytes_224 :: proc(data: []byte) -> [28]byte {
return hash
}
+// hash_string_to_buffer_224 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_224 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_224(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_224 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_224 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_224, "Size of destination buffer is smaller than the digest size")
+ ctx: Groestl_Context
+ ctx.hashbitlen = 224
+ init(&ctx)
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
// hash_stream_224 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_224 :: proc(s: io.Stream) -> ([28]byte, bool) {
- hash: [28]byte
+hash_stream_224 :: proc(s: io.Stream) -> ([DIGEST_SIZE_224]byte, bool) {
+ hash: [DIGEST_SIZE_224]byte
ctx: Groestl_Context
ctx.hashbitlen = 224
init(&ctx)
@@ -57,7 +81,7 @@ hash_stream_224 :: proc(s: io.Stream) -> ([28]byte, bool) {
// hash_file_224 will read the file provided by the given handle
// and compute a hash
-hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([28]byte, bool) {
+hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_224]byte, bool) {
if !load_at_once {
return hash_stream_224(os.stream_from_handle(hd))
} else {
@@ -65,7 +89,7 @@ hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([28]byte, bool)
return hash_bytes_224(buf[:]), ok
}
}
- return [28]byte{}, false
+ return [DIGEST_SIZE_224]byte{}, false
}
hash_224 :: proc {
@@ -73,18 +97,20 @@ hash_224 :: proc {
hash_file_224,
hash_bytes_224,
hash_string_224,
+ hash_bytes_to_buffer_224,
+ hash_string_to_buffer_224,
}
// hash_string_256 will hash the given input and return the
// computed hash
-hash_string_256 :: proc(data: string) -> [32]byte {
+hash_string_256 :: proc(data: string) -> [DIGEST_SIZE_256]byte {
return hash_bytes_256(transmute([]byte)(data))
}
// hash_bytes_256 will hash the given input and return the
// computed hash
-hash_bytes_256 :: proc(data: []byte) -> [32]byte {
- hash: [32]byte
+hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
+ hash: [DIGEST_SIZE_256]byte
ctx: Groestl_Context
ctx.hashbitlen = 256
init(&ctx)
@@ -93,10 +119,29 @@ hash_bytes_256 :: proc(data: []byte) -> [32]byte {
return hash
}
+// hash_string_to_buffer_256 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_256(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_256 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_256 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_256, "Size of destination buffer is smaller than the digest size")
+ ctx: Groestl_Context
+ ctx.hashbitlen = 256
+ init(&ctx)
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
// hash_stream_256 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
- hash: [32]byte
+hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
+ hash: [DIGEST_SIZE_256]byte
ctx: Groestl_Context
ctx.hashbitlen = 256
init(&ctx)
@@ -115,7 +160,7 @@ hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
// hash_file_256 will read the file provided by the given handle
// and compute a hash
-hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
+hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
if !load_at_once {
return hash_stream_256(os.stream_from_handle(hd))
} else {
@@ -123,7 +168,7 @@ hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool)
return hash_bytes_256(buf[:]), ok
}
}
- return [32]byte{}, false
+ return [DIGEST_SIZE_256]byte{}, false
}
hash_256 :: proc {
@@ -131,18 +176,20 @@ hash_256 :: proc {
hash_file_256,
hash_bytes_256,
hash_string_256,
+ hash_bytes_to_buffer_256,
+ hash_string_to_buffer_256,
}
// hash_string_384 will hash the given input and return the
// computed hash
-hash_string_384 :: proc(data: string) -> [48]byte {
+hash_string_384 :: proc(data: string) -> [DIGEST_SIZE_384]byte {
return hash_bytes_384(transmute([]byte)(data))
}
// hash_bytes_384 will hash the given input and return the
// computed hash
-hash_bytes_384 :: proc(data: []byte) -> [48]byte {
- hash: [48]byte
+hash_bytes_384 :: proc(data: []byte) -> [DIGEST_SIZE_384]byte {
+ hash: [DIGEST_SIZE_384]byte
ctx: Groestl_Context
ctx.hashbitlen = 384
init(&ctx)
@@ -151,10 +198,29 @@ hash_bytes_384 :: proc(data: []byte) -> [48]byte {
return hash
}
+// hash_string_to_buffer_384 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_384 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_384(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_384 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_384 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_384, "Size of destination buffer is smaller than the digest size")
+ ctx: Groestl_Context
+ ctx.hashbitlen = 384
+ init(&ctx)
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
// hash_stream_384 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_384 :: proc(s: io.Stream) -> ([48]byte, bool) {
- hash: [48]byte
+hash_stream_384 :: proc(s: io.Stream) -> ([DIGEST_SIZE_384]byte, bool) {
+ hash: [DIGEST_SIZE_384]byte
ctx: Groestl_Context
ctx.hashbitlen = 384
init(&ctx)
@@ -173,7 +239,7 @@ hash_stream_384 :: proc(s: io.Stream) -> ([48]byte, bool) {
// hash_file_384 will read the file provided by the given handle
// and compute a hash
-hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([48]byte, bool) {
+hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_384]byte, bool) {
if !load_at_once {
return hash_stream_384(os.stream_from_handle(hd))
} else {
@@ -181,7 +247,7 @@ hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([48]byte, bool)
return hash_bytes_384(buf[:]), ok
}
}
- return [48]byte{}, false
+ return [DIGEST_SIZE_384]byte{}, false
}
hash_384 :: proc {
@@ -189,18 +255,20 @@ hash_384 :: proc {
hash_file_384,
hash_bytes_384,
hash_string_384,
+ hash_bytes_to_buffer_384,
+ hash_string_to_buffer_384,
}
// hash_string_512 will hash the given input and return the
// computed hash
-hash_string_512 :: proc(data: string) -> [64]byte {
+hash_string_512 :: proc(data: string) -> [DIGEST_SIZE_512]byte {
return hash_bytes_512(transmute([]byte)(data))
}
// hash_bytes_512 will hash the given input and return the
// computed hash
-hash_bytes_512 :: proc(data: []byte) -> [64]byte {
- hash: [64]byte
+hash_bytes_512 :: proc(data: []byte) -> [DIGEST_SIZE_512]byte {
+ hash: [DIGEST_SIZE_512]byte
ctx: Groestl_Context
ctx.hashbitlen = 512
init(&ctx)
@@ -209,10 +277,29 @@ hash_bytes_512 :: proc(data: []byte) -> [64]byte {
return hash
}
+// hash_string_to_buffer_512 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_512 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_512(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_512 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_512 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_512, "Size of destination buffer is smaller than the digest size")
+ ctx: Groestl_Context
+ ctx.hashbitlen = 512
+ init(&ctx)
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
// hash_stream_512 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
- hash: [64]byte
+hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
+ hash: [DIGEST_SIZE_512]byte
ctx: Groestl_Context
ctx.hashbitlen = 512
init(&ctx)
@@ -231,7 +318,7 @@ hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
// hash_file_512 will read the file provided by the given handle
// and compute a hash
-hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool) {
+hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_512]byte, bool) {
if !load_at_once {
return hash_stream_512(os.stream_from_handle(hd))
} else {
@@ -239,7 +326,7 @@ hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool)
return hash_bytes_512(buf[:]), ok
}
}
- return [64]byte{}, false
+ return [DIGEST_SIZE_512]byte{}, false
}
hash_512 :: proc {
@@ -247,6 +334,8 @@ hash_512 :: proc {
hash_file_512,
hash_bytes_512,
hash_string_512,
+ hash_bytes_to_buffer_512,
+ hash_string_to_buffer_512,
}
/*
diff --git a/core/crypto/haval/haval.odin b/core/crypto/haval/haval.odin
index 76532d4cd..f95ea344d 100644
--- a/core/crypto/haval/haval.odin
+++ b/core/crypto/haval/haval.odin
@@ -20,16 +20,22 @@ import "../util"
High level API
*/
+DIGEST_SIZE_128 :: 16
+DIGEST_SIZE_160 :: 20
+DIGEST_SIZE_192 :: 24
+DIGEST_SIZE_224 :: 28
+DIGEST_SIZE_256 :: 32
+
// hash_string_128_3 will hash the given input and return the
// computed hash
-hash_string_128_3 :: proc(data: string) -> [16]byte {
+hash_string_128_3 :: proc(data: string) -> [DIGEST_SIZE_128]byte {
return hash_bytes_128_3(transmute([]byte)(data))
}
// hash_bytes_128_3 will hash the given input and return the
// computed hash
-hash_bytes_128_3 :: proc(data: []byte) -> [16]byte {
- hash: [16]byte
+hash_bytes_128_3 :: proc(data: []byte) -> [DIGEST_SIZE_128]byte {
+ hash: [DIGEST_SIZE_128]byte
ctx: Haval_Context
ctx.hashbitlen = 128
ctx.rounds = 3
@@ -40,10 +46,31 @@ hash_bytes_128_3 :: proc(data: []byte) -> [16]byte {
return hash
}
+// hash_string_to_buffer_128_3 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_128_3 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_128_3(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_128_3 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_128_3 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_128, "Size of destination buffer is smaller than the digest size")
+ ctx: Haval_Context
+ ctx.hashbitlen = 128
+ ctx.rounds = 3
+ init(&ctx)
+ ctx.str_len = u32(len(data))
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
// hash_stream_128_3 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_128_3 :: proc(s: io.Stream) -> ([16]byte, bool) {
- hash: [16]byte
+hash_stream_128_3 :: proc(s: io.Stream) -> ([DIGEST_SIZE_128]byte, bool) {
+ hash: [DIGEST_SIZE_128]byte
ctx: Haval_Context
ctx.hashbitlen = 128
ctx.rounds = 3
@@ -64,7 +91,7 @@ hash_stream_128_3 :: proc(s: io.Stream) -> ([16]byte, bool) {
// hash_file_128_3 will read the file provided by the given handle
// and compute a hash
-hash_file_128_3 :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool) {
+hash_file_128_3 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_128]byte, bool) {
if !load_at_once {
return hash_stream_128_3(os.stream_from_handle(hd))
} else {
@@ -72,7 +99,7 @@ hash_file_128_3 :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool
return hash_bytes_128_3(buf[:]), ok
}
}
- return [16]byte{}, false
+ return [DIGEST_SIZE_128]byte{}, false
}
hash_128_3 :: proc {
@@ -80,18 +107,20 @@ hash_128_3 :: proc {
hash_file_128_3,
hash_bytes_128_3,
hash_string_128_3,
+ hash_bytes_to_buffer_128_3,
+ hash_string_to_buffer_128_3,
}
// hash_string_128_4 will hash the given input and return the
// computed hash
-hash_string_128_4 :: proc(data: string) -> [16]byte {
+hash_string_128_4 :: proc(data: string) -> [DIGEST_SIZE_128]byte {
return hash_bytes_128_4(transmute([]byte)(data))
}
// hash_bytes_128_4 will hash the given input and return the
// computed hash
-hash_bytes_128_4 :: proc(data: []byte) -> [16]byte {
- hash: [16]byte
+hash_bytes_128_4 :: proc(data: []byte) -> [DIGEST_SIZE_128]byte {
+ hash: [DIGEST_SIZE_128]byte
ctx: Haval_Context
ctx.hashbitlen = 128
ctx.rounds = 4
@@ -102,10 +131,31 @@ hash_bytes_128_4 :: proc(data: []byte) -> [16]byte {
return hash
}
+// hash_string_to_buffer_128_4 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_128_4 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_128_4(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_128_4 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_128_4 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_128, "Size of destination buffer is smaller than the digest size")
+ ctx: Haval_Context
+ ctx.hashbitlen = 128
+ ctx.rounds = 4
+ init(&ctx)
+ ctx.str_len = u32(len(data))
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
// hash_stream_128_4 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_128_4 :: proc(s: io.Stream) -> ([16]byte, bool) {
- hash: [16]byte
+hash_stream_128_4 :: proc(s: io.Stream) -> ([DIGEST_SIZE_128]byte, bool) {
+ hash: [DIGEST_SIZE_128]byte
ctx: Haval_Context
ctx.hashbitlen = 128
ctx.rounds = 4
@@ -126,7 +176,7 @@ hash_stream_128_4 :: proc(s: io.Stream) -> ([16]byte, bool) {
// hash_file_128_4 will read the file provided by the given handle
// and compute a hash
-hash_file_128_4 :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool) {
+hash_file_128_4 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_128]byte, bool) {
if !load_at_once {
return hash_stream_128_4(os.stream_from_handle(hd))
} else {
@@ -134,7 +184,7 @@ hash_file_128_4 :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool
return hash_bytes_128_4(buf[:]), ok
}
}
- return [16]byte{}, false
+ return [DIGEST_SIZE_128]byte{}, false
}
hash_128_4 :: proc {
@@ -142,18 +192,20 @@ hash_128_4 :: proc {
hash_file_128_4,
hash_bytes_128_4,
hash_string_128_4,
+ hash_bytes_to_buffer_128_4,
+ hash_string_to_buffer_128_4,
}
// hash_string_128_5 will hash the given input and return the
// computed hash
-hash_string_128_5 :: proc(data: string) -> [16]byte {
+hash_string_128_5 :: proc(data: string) -> [DIGEST_SIZE_128]byte {
return hash_bytes_128_5(transmute([]byte)(data))
}
// hash_bytes_128_5 will hash the given input and return the
// computed hash
-hash_bytes_128_5 :: proc(data: []byte) -> [16]byte {
- hash: [16]byte
+hash_bytes_128_5 :: proc(data: []byte) -> [DIGEST_SIZE_128]byte {
+ hash: [DIGEST_SIZE_128]byte
ctx: Haval_Context
ctx.hashbitlen = 128
ctx.rounds = 5
@@ -164,10 +216,31 @@ hash_bytes_128_5 :: proc(data: []byte) -> [16]byte {
return hash
}
+// hash_string_to_buffer_128_5 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_128_5 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_128_5(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_128_5 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_128_5 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_128, "Size of destination buffer is smaller than the digest size")
+ ctx: Haval_Context
+ ctx.hashbitlen = 128
+ ctx.rounds = 5
+ init(&ctx)
+ ctx.str_len = u32(len(data))
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
// hash_stream_128_5 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_128_5 :: proc(s: io.Stream) -> ([16]byte, bool) {
- hash: [16]byte
+hash_stream_128_5 :: proc(s: io.Stream) -> ([DIGEST_SIZE_128]byte, bool) {
+ hash: [DIGEST_SIZE_128]byte
ctx: Haval_Context
ctx.hashbitlen = 128
ctx.rounds = 5
@@ -188,7 +261,7 @@ hash_stream_128_5 :: proc(s: io.Stream) -> ([16]byte, bool) {
// hash_file_128_5 will read the file provided by the given handle
// and compute a hash
-hash_file_128_5 :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool) {
+hash_file_128_5 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_128]byte, bool) {
if !load_at_once {
return hash_stream_128_5(os.stream_from_handle(hd))
} else {
@@ -196,7 +269,7 @@ hash_file_128_5 :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool
return hash_bytes_128_5(buf[:]), ok
}
}
- return [16]byte{}, false
+ return [DIGEST_SIZE_128]byte{}, false
}
hash_128_5 :: proc {
@@ -204,18 +277,20 @@ hash_128_5 :: proc {
hash_file_128_5,
hash_bytes_128_5,
hash_string_128_5,
+ hash_bytes_to_buffer_128_5,
+ hash_string_to_buffer_128_5,
}
// hash_string_160_3 will hash the given input and return the
// computed hash
-hash_string_160_3 :: proc(data: string) -> [20]byte {
+hash_string_160_3 :: proc(data: string) -> [DIGEST_SIZE_160]byte {
return hash_bytes_160_3(transmute([]byte)(data))
}
// hash_bytes_160_3 will hash the given input and return the
// computed hash
-hash_bytes_160_3 :: proc(data: []byte) -> [20]byte {
- hash: [20]byte
+hash_bytes_160_3 :: proc(data: []byte) -> [DIGEST_SIZE_160]byte {
+ hash: [DIGEST_SIZE_160]byte
ctx: Haval_Context
ctx.hashbitlen = 160
ctx.rounds = 3
@@ -226,10 +301,31 @@ hash_bytes_160_3 :: proc(data: []byte) -> [20]byte {
return hash
}
+// hash_string_to_buffer_160_3 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_160_3 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_160_3(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_160_3 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_160_3 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_160, "Size of destination buffer is smaller than the digest size")
+ ctx: Haval_Context
+ ctx.hashbitlen = 160
+ ctx.rounds = 3
+ init(&ctx)
+ ctx.str_len = u32(len(data))
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
// hash_stream_160_3 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_160_3 :: proc(s: io.Stream) -> ([20]byte, bool) {
- hash: [20]byte
+hash_stream_160_3 :: proc(s: io.Stream) -> ([DIGEST_SIZE_160]byte, bool) {
+ hash: [DIGEST_SIZE_160]byte
ctx: Haval_Context
ctx.hashbitlen = 160
ctx.rounds = 3
@@ -250,7 +346,7 @@ hash_stream_160_3 :: proc(s: io.Stream) -> ([20]byte, bool) {
// hash_file_160_3 will read the file provided by the given handle
// and compute a hash
-hash_file_160_3 :: proc(hd: os.Handle, load_at_once := false) -> ([20]byte, bool) {
+hash_file_160_3 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_160]byte, bool) {
if !load_at_once {
return hash_stream_160_3(os.stream_from_handle(hd))
} else {
@@ -258,7 +354,7 @@ hash_file_160_3 :: proc(hd: os.Handle, load_at_once := false) -> ([20]byte, bool
return hash_bytes_160_3(buf[:]), ok
}
}
- return [20]byte{}, false
+ return [DIGEST_SIZE_160]byte{}, false
}
hash_160_3 :: proc {
@@ -266,18 +362,20 @@ hash_160_3 :: proc {
hash_file_160_3,
hash_bytes_160_3,
hash_string_160_3,
+ hash_bytes_to_buffer_160_3,
+ hash_string_to_buffer_160_3,
}
// hash_string_160_4 will hash the given input and return the
// computed hash
-hash_string_160_4 :: proc(data: string) -> [20]byte {
+hash_string_160_4 :: proc(data: string) -> [DIGEST_SIZE_160]byte {
return hash_bytes_160_4(transmute([]byte)(data))
}
// hash_bytes_160_4 will hash the given input and return the
// computed hash
-hash_bytes_160_4 :: proc(data: []byte) -> [20]byte {
- hash: [20]byte
+hash_bytes_160_4 :: proc(data: []byte) -> [DIGEST_SIZE_160]byte {
+ hash: [DIGEST_SIZE_160]byte
ctx: Haval_Context
ctx.hashbitlen = 160
ctx.rounds = 4
@@ -288,10 +386,31 @@ hash_bytes_160_4 :: proc(data: []byte) -> [20]byte {
return hash
}
+// hash_string_to_buffer_160_4 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_160_4 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_160_4(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_160_4 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_160_4 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_160, "Size of destination buffer is smaller than the digest size")
+ ctx: Haval_Context
+ ctx.hashbitlen = 160
+ ctx.rounds = 4
+ init(&ctx)
+ ctx.str_len = u32(len(data))
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
// hash_stream_160_4 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_160_4 :: proc(s: io.Stream) -> ([20]byte, bool) {
- hash: [20]byte
+hash_stream_160_4 :: proc(s: io.Stream) -> ([DIGEST_SIZE_160]byte, bool) {
+ hash: [DIGEST_SIZE_160]byte
ctx: Haval_Context
ctx.hashbitlen = 160
ctx.rounds = 4
@@ -312,7 +431,7 @@ hash_stream_160_4 :: proc(s: io.Stream) -> ([20]byte, bool) {
// hash_file_160_4 will read the file provided by the given handle
// and compute a hash
-hash_file_160_4 :: proc(hd: os.Handle, load_at_once := false) -> ([20]byte, bool) {
+hash_file_160_4 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_160]byte, bool) {
if !load_at_once {
return hash_stream_160_4(os.stream_from_handle(hd))
} else {
@@ -320,7 +439,7 @@ hash_file_160_4 :: proc(hd: os.Handle, load_at_once := false) -> ([20]byte, bool
return hash_bytes_160_4(buf[:]), ok
}
}
- return [20]byte{}, false
+ return [DIGEST_SIZE_160]byte{}, false
}
hash_160_4 :: proc {
@@ -328,18 +447,20 @@ hash_160_4 :: proc {
hash_file_160_4,
hash_bytes_160_4,
hash_string_160_4,
+ hash_bytes_to_buffer_160_4,
+ hash_string_to_buffer_160_4,
}
// hash_string_160_5 will hash the given input and return the
// computed hash
-hash_string_160_5 :: proc(data: string) -> [20]byte {
+hash_string_160_5 :: proc(data: string) -> [DIGEST_SIZE_160]byte {
return hash_bytes_160_5(transmute([]byte)(data))
}
// hash_bytes_160_5 will hash the given input and return the
// computed hash
-hash_bytes_160_5 :: proc(data: []byte) -> [20]byte {
- hash: [20]byte
+hash_bytes_160_5 :: proc(data: []byte) -> [DIGEST_SIZE_160]byte {
+ hash: [DIGEST_SIZE_160]byte
ctx: Haval_Context
ctx.hashbitlen = 160
ctx.rounds = 5
@@ -350,10 +471,31 @@ hash_bytes_160_5 :: proc(data: []byte) -> [20]byte {
return hash
}
+// hash_string_to_buffer_160_5 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_160_5 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_160_5(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_160_5 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_160_5 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_160, "Size of destination buffer is smaller than the digest size")
+ ctx: Haval_Context
+ ctx.hashbitlen = 160
+ ctx.rounds = 5
+ init(&ctx)
+ ctx.str_len = u32(len(data))
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
// hash_stream_160_5 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_160_5 :: proc(s: io.Stream) -> ([20]byte, bool) {
- hash: [20]byte
+hash_stream_160_5 :: proc(s: io.Stream) -> ([DIGEST_SIZE_160]byte, bool) {
+ hash: [DIGEST_SIZE_160]byte
ctx: Haval_Context
ctx.hashbitlen = 160
ctx.rounds = 5
@@ -374,7 +516,7 @@ hash_stream_160_5 :: proc(s: io.Stream) -> ([20]byte, bool) {
// hash_file_160_5 will read the file provided by the given handle
// and compute a hash
-hash_file_160_5 :: proc(hd: os.Handle, load_at_once := false) -> ([20]byte, bool) {
+hash_file_160_5 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_160]byte, bool) {
if !load_at_once {
return hash_stream_160_5(os.stream_from_handle(hd))
} else {
@@ -382,7 +524,7 @@ hash_file_160_5 :: proc(hd: os.Handle, load_at_once := false) -> ([20]byte, bool
return hash_bytes_160_5(buf[:]), ok
}
}
- return [20]byte{}, false
+ return [DIGEST_SIZE_160]byte{}, false
}
hash_160_5 :: proc {
@@ -390,18 +532,20 @@ hash_160_5 :: proc {
hash_file_160_5,
hash_bytes_160_5,
hash_string_160_5,
+ hash_bytes_to_buffer_160_5,
+ hash_string_to_buffer_160_5,
}
// hash_string_192_3 will hash the given input and return the
// computed hash
-hash_string_192_3 :: proc(data: string) -> [24]byte {
+hash_string_192_3 :: proc(data: string) -> [DIGEST_SIZE_192]byte {
return hash_bytes_192_3(transmute([]byte)(data))
}
// hash_bytes_192_3 will hash the given input and return the
// computed hash
-hash_bytes_192_3 :: proc(data: []byte) -> [24]byte {
- hash: [24]byte
+hash_bytes_192_3 :: proc(data: []byte) -> [DIGEST_SIZE_192]byte {
+ hash: [DIGEST_SIZE_192]byte
ctx: Haval_Context
ctx.hashbitlen = 192
ctx.rounds = 3
@@ -412,10 +556,31 @@ hash_bytes_192_3 :: proc(data: []byte) -> [24]byte {
return hash
}
+// hash_string_to_buffer_192_3 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_192_3 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_192_3(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_192_3 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_192_3 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_192, "Size of destination buffer is smaller than the digest size")
+ ctx: Haval_Context
+ ctx.hashbitlen = 192
+ ctx.rounds = 3
+ init(&ctx)
+ ctx.str_len = u32(len(data))
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
// hash_stream_192_3 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_192_3 :: proc(s: io.Stream) -> ([24]byte, bool) {
- hash: [24]byte
+hash_stream_192_3 :: proc(s: io.Stream) -> ([DIGEST_SIZE_192]byte, bool) {
+ hash: [DIGEST_SIZE_192]byte
ctx: Haval_Context
ctx.hashbitlen = 192
ctx.rounds = 3
@@ -436,7 +601,7 @@ hash_stream_192_3 :: proc(s: io.Stream) -> ([24]byte, bool) {
// hash_file_192_3 will read the file provided by the given handle
// and compute a hash
-hash_file_192_3 :: proc(hd: os.Handle, load_at_once := false) -> ([24]byte, bool) {
+hash_file_192_3 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_192]byte, bool) {
if !load_at_once {
return hash_stream_192_3(os.stream_from_handle(hd))
} else {
@@ -444,7 +609,7 @@ hash_file_192_3 :: proc(hd: os.Handle, load_at_once := false) -> ([24]byte, bool
return hash_bytes_192_3(buf[:]), ok
}
}
- return [24]byte{}, false
+ return [DIGEST_SIZE_192]byte{}, false
}
hash_192_3 :: proc {
@@ -452,18 +617,20 @@ hash_192_3 :: proc {
hash_file_192_3,
hash_bytes_192_3,
hash_string_192_3,
+ hash_bytes_to_buffer_192_3,
+ hash_string_to_buffer_192_3,
}
// hash_string_192_4 will hash the given input and return the
// computed hash
-hash_string_192_4 :: proc(data: string) -> [24]byte {
+hash_string_192_4 :: proc(data: string) -> [DIGEST_SIZE_192]byte {
return hash_bytes_192_4(transmute([]byte)(data))
}
// hash_bytes_192_4 will hash the given input and return the
// computed hash
-hash_bytes_192_4 :: proc(data: []byte) -> [24]byte {
- hash: [24]byte
+hash_bytes_192_4 :: proc(data: []byte) -> [DIGEST_SIZE_192]byte {
+ hash: [DIGEST_SIZE_192]byte
ctx: Haval_Context
ctx.hashbitlen = 192
ctx.rounds = 4
@@ -474,10 +641,31 @@ hash_bytes_192_4 :: proc(data: []byte) -> [24]byte {
return hash
}
+// hash_string_to_buffer_192_4 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_192_4 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_192_4(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_192_4 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_192_4 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_192, "Size of destination buffer is smaller than the digest size")
+ ctx: Haval_Context
+ ctx.hashbitlen = 192
+ ctx.rounds = 4
+ init(&ctx)
+ ctx.str_len = u32(len(data))
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
// hash_stream_192_4 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_192_4 :: proc(s: io.Stream) -> ([24]byte, bool) {
- hash: [24]byte
+hash_stream_192_4 :: proc(s: io.Stream) -> ([DIGEST_SIZE_192]byte, bool) {
+ hash: [DIGEST_SIZE_192]byte
ctx: Haval_Context
ctx.hashbitlen = 192
ctx.rounds = 4
@@ -498,7 +686,7 @@ hash_stream_192_4 :: proc(s: io.Stream) -> ([24]byte, bool) {
// hash_file_192_4 will read the file provided by the given handle
// and compute a hash
-hash_file_192_4 :: proc(hd: os.Handle, load_at_once := false) -> ([24]byte, bool) {
+hash_file_192_4 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_192]byte, bool) {
if !load_at_once {
return hash_stream_192_4(os.stream_from_handle(hd))
} else {
@@ -506,7 +694,7 @@ hash_file_192_4 :: proc(hd: os.Handle, load_at_once := false) -> ([24]byte, bool
return hash_bytes_192_4(buf[:]), ok
}
}
- return [24]byte{}, false
+ return [DIGEST_SIZE_192]byte{}, false
}
hash_192_4 :: proc {
@@ -514,18 +702,20 @@ hash_192_4 :: proc {
hash_file_192_4,
hash_bytes_192_4,
hash_string_192_4,
+ hash_bytes_to_buffer_192_4,
+ hash_string_to_buffer_192_4,
}
// hash_string_192_5 will hash the given input and return the
// computed hash
-hash_string_192_5 :: proc(data: string) -> [24]byte {
+hash_string_192_5 :: proc(data: string) -> [DIGEST_SIZE_192]byte {
return hash_bytes_192_5(transmute([]byte)(data))
}
-// hash_bytes_224_5 will hash the given input and return the
+// hash_bytes_2DIGEST_SIZE_192_5 will hash the given input and return the
// computed hash
-hash_bytes_192_5 :: proc(data: []byte) -> [24]byte {
- hash: [24]byte
+hash_bytes_192_5 :: proc(data: []byte) -> [DIGEST_SIZE_192]byte {
+ hash: [DIGEST_SIZE_192]byte
ctx: Haval_Context
ctx.hashbitlen = 192
ctx.rounds = 5
@@ -536,10 +726,31 @@ hash_bytes_192_5 :: proc(data: []byte) -> [24]byte {
return hash
}
+// hash_string_to_buffer_192_5 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_192_5 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_192_5(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_192_5 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_192_5 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_192, "Size of destination buffer is smaller than the digest size")
+ ctx: Haval_Context
+ ctx.hashbitlen = 192
+ ctx.rounds = 5
+ init(&ctx)
+ ctx.str_len = u32(len(data))
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
// hash_stream_192_5 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_192_5 :: proc(s: io.Stream) -> ([24]byte, bool) {
- hash: [24]byte
+hash_stream_192_5 :: proc(s: io.Stream) -> ([DIGEST_SIZE_192]byte, bool) {
+ hash: [DIGEST_SIZE_192]byte
ctx: Haval_Context
ctx.hashbitlen = 192
ctx.rounds = 5
@@ -560,7 +771,7 @@ hash_stream_192_5 :: proc(s: io.Stream) -> ([24]byte, bool) {
// hash_file_192_5 will read the file provided by the given handle
// and compute a hash
-hash_file_192_5 :: proc(hd: os.Handle, load_at_once := false) -> ([24]byte, bool) {
+hash_file_192_5 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_192]byte, bool) {
if !load_at_once {
return hash_stream_192_5(os.stream_from_handle(hd))
} else {
@@ -568,7 +779,7 @@ hash_file_192_5 :: proc(hd: os.Handle, load_at_once := false) -> ([24]byte, bool
return hash_bytes_192_5(buf[:]), ok
}
}
- return [24]byte{}, false
+ return [DIGEST_SIZE_192]byte{}, false
}
hash_192_5 :: proc {
@@ -576,18 +787,20 @@ hash_192_5 :: proc {
hash_file_192_5,
hash_bytes_192_5,
hash_string_192_5,
+ hash_bytes_to_buffer_192_5,
+ hash_string_to_buffer_192_5,
}
// hash_string_224_3 will hash the given input and return the
// computed hash
-hash_string_224_3 :: proc(data: string) -> [28]byte {
+hash_string_224_3 :: proc(data: string) -> [DIGEST_SIZE_224]byte {
return hash_bytes_224_3(transmute([]byte)(data))
}
// hash_bytes_224_3 will hash the given input and return the
// computed hash
-hash_bytes_224_3 :: proc(data: []byte) -> [28]byte {
- hash: [28]byte
+hash_bytes_224_3 :: proc(data: []byte) -> [DIGEST_SIZE_224]byte {
+ hash: [DIGEST_SIZE_224]byte
ctx: Haval_Context
ctx.hashbitlen = 224
ctx.rounds = 3
@@ -598,10 +811,31 @@ hash_bytes_224_3 :: proc(data: []byte) -> [28]byte {
return hash
}
+// hash_string_to_buffer_224_3 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_224_3 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_224_3(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_224_3 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_224_3 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_224, "Size of destination buffer is smaller than the digest size")
+ ctx: Haval_Context
+ ctx.hashbitlen = 224
+ ctx.rounds = 3
+ init(&ctx)
+ ctx.str_len = u32(len(data))
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
// hash_stream_224_3 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_224_3 :: proc(s: io.Stream) -> ([28]byte, bool) {
- hash: [28]byte
+hash_stream_224_3 :: proc(s: io.Stream) -> ([DIGEST_SIZE_224]byte, bool) {
+ hash: [DIGEST_SIZE_224]byte
ctx: Haval_Context
ctx.hashbitlen = 224
ctx.rounds = 3
@@ -622,7 +856,7 @@ hash_stream_224_3 :: proc(s: io.Stream) -> ([28]byte, bool) {
// hash_file_224_3 will read the file provided by the given handle
// and compute a hash
-hash_file_224_3 :: proc(hd: os.Handle, load_at_once := false) -> ([28]byte, bool) {
+hash_file_224_3 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_224]byte, bool) {
if !load_at_once {
return hash_stream_224_3(os.stream_from_handle(hd))
} else {
@@ -630,7 +864,7 @@ hash_file_224_3 :: proc(hd: os.Handle, load_at_once := false) -> ([28]byte, bool
return hash_bytes_224_3(buf[:]), ok
}
}
- return [28]byte{}, false
+ return [DIGEST_SIZE_224]byte{}, false
}
hash_224_3 :: proc {
@@ -638,18 +872,20 @@ hash_224_3 :: proc {
hash_file_224_3,
hash_bytes_224_3,
hash_string_224_3,
+ hash_bytes_to_buffer_224_3,
+ hash_string_to_buffer_224_3,
}
// hash_string_224_4 will hash the given input and return the
// computed hash
-hash_string_224_4 :: proc(data: string) -> [28]byte {
+hash_string_224_4 :: proc(data: string) -> [DIGEST_SIZE_224]byte {
return hash_bytes_224_4(transmute([]byte)(data))
}
// hash_bytes_224_4 will hash the given input and return the
// computed hash
-hash_bytes_224_4 :: proc(data: []byte) -> [28]byte {
- hash: [28]byte
+hash_bytes_224_4 :: proc(data: []byte) -> [DIGEST_SIZE_224]byte {
+ hash: [DIGEST_SIZE_224]byte
ctx: Haval_Context
ctx.hashbitlen = 224
ctx.rounds = 4
@@ -660,10 +896,31 @@ hash_bytes_224_4 :: proc(data: []byte) -> [28]byte {
return hash
}
+// hash_string_to_buffer_224_4 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_224_4 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_224_4(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_224_4 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_224_4 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_224, "Size of destination buffer is smaller than the digest size")
+ ctx: Haval_Context
+ ctx.hashbitlen = 224
+ ctx.rounds = 4
+ init(&ctx)
+ ctx.str_len = u32(len(data))
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
// hash_stream_224_4 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_224_4 :: proc(s: io.Stream) -> ([28]byte, bool) {
- hash: [28]byte
+hash_stream_224_4 :: proc(s: io.Stream) -> ([DIGEST_SIZE_224]byte, bool) {
+ hash: [DIGEST_SIZE_224]byte
ctx: Haval_Context
ctx.hashbitlen = 224
ctx.rounds = 4
@@ -684,7 +941,7 @@ hash_stream_224_4 :: proc(s: io.Stream) -> ([28]byte, bool) {
// hash_file_224_4 will read the file provided by the given handle
// and compute a hash
-hash_file_224_4 :: proc(hd: os.Handle, load_at_once := false) -> ([28]byte, bool) {
+hash_file_224_4 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_224]byte, bool) {
if !load_at_once {
return hash_stream_224_4(os.stream_from_handle(hd))
} else {
@@ -692,7 +949,7 @@ hash_file_224_4 :: proc(hd: os.Handle, load_at_once := false) -> ([28]byte, bool
return hash_bytes_224_4(buf[:]), ok
}
}
- return [28]byte{}, false
+ return [DIGEST_SIZE_224]byte{}, false
}
hash_224_4 :: proc {
@@ -700,18 +957,20 @@ hash_224_4 :: proc {
hash_file_224_4,
hash_bytes_224_4,
hash_string_224_4,
+ hash_bytes_to_buffer_224_4,
+ hash_string_to_buffer_224_4,
}
// hash_string_224_5 will hash the given input and return the
// computed hash
-hash_string_224_5 :: proc(data: string) -> [28]byte {
+hash_string_224_5 :: proc(data: string) -> [DIGEST_SIZE_224]byte {
return hash_bytes_224_5(transmute([]byte)(data))
}
// hash_bytes_224_5 will hash the given input and return the
// computed hash
-hash_bytes_224_5 :: proc(data: []byte) -> [28]byte {
- hash: [28]byte
+hash_bytes_224_5 :: proc(data: []byte) -> [DIGEST_SIZE_224]byte {
+ hash: [DIGEST_SIZE_224]byte
ctx: Haval_Context
ctx.hashbitlen = 224
ctx.rounds = 5
@@ -722,10 +981,31 @@ hash_bytes_224_5 :: proc(data: []byte) -> [28]byte {
return hash
}
+// hash_string_to_buffer_224_5 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_224_5 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_224_5(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_224_5 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_224_5 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_224, "Size of destination buffer is smaller than the digest size")
+ ctx: Haval_Context
+ ctx.hashbitlen = 224
+ ctx.rounds = 5
+ init(&ctx)
+ ctx.str_len = u32(len(data))
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
// hash_stream_224_5 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_224_5 :: proc(s: io.Stream) -> ([28]byte, bool) {
- hash: [28]byte
+hash_stream_224_5 :: proc(s: io.Stream) -> ([DIGEST_SIZE_224]byte, bool) {
+ hash: [DIGEST_SIZE_224]byte
ctx: Haval_Context
ctx.hashbitlen = 224
ctx.rounds = 5
@@ -746,7 +1026,7 @@ hash_stream_224_5 :: proc(s: io.Stream) -> ([28]byte, bool) {
// hash_file_224_5 will read the file provided by the given handle
// and compute a hash
-hash_file_224_5 :: proc(hd: os.Handle, load_at_once := false) -> ([28]byte, bool) {
+hash_file_224_5 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_224]byte, bool) {
if !load_at_once {
return hash_stream_224_5(os.stream_from_handle(hd))
} else {
@@ -754,7 +1034,7 @@ hash_file_224_5 :: proc(hd: os.Handle, load_at_once := false) -> ([28]byte, bool
return hash_bytes_224_5(buf[:]), ok
}
}
- return [28]byte{}, false
+ return [DIGEST_SIZE_224]byte{}, false
}
hash_224_5 :: proc {
@@ -762,18 +1042,20 @@ hash_224_5 :: proc {
hash_file_224_5,
hash_bytes_224_5,
hash_string_224_5,
+ hash_bytes_to_buffer_224_5,
+ hash_string_to_buffer_224_5,
}
// hash_string_256_3 will hash the given input and return the
// computed hash
-hash_string_256_3 :: proc(data: string) -> [32]byte {
+hash_string_256_3 :: proc(data: string) -> [DIGEST_SIZE_256]byte {
return hash_bytes_256_3(transmute([]byte)(data))
}
// hash_bytes_256_3 will hash the given input and return the
// computed hash
-hash_bytes_256_3 :: proc(data: []byte) -> [32]byte {
- hash: [32]byte
+hash_bytes_256_3 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
+ hash: [DIGEST_SIZE_256]byte
ctx: Haval_Context
ctx.hashbitlen = 256
ctx.rounds = 3
@@ -784,10 +1066,31 @@ hash_bytes_256_3 :: proc(data: []byte) -> [32]byte {
return hash
}
+// hash_string_to_buffer_256_3 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_256_3 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_256_3(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_256_3 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_256_3 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_256, "Size of destination buffer is smaller than the digest size")
+ ctx: Haval_Context
+ ctx.hashbitlen = 256
+ ctx.rounds = 3
+ init(&ctx)
+ ctx.str_len = u32(len(data))
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
// hash_stream_256_3 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_256_3 :: proc(s: io.Stream) -> ([32]byte, bool) {
- hash: [32]byte
+hash_stream_256_3 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
+ hash: [DIGEST_SIZE_256]byte
ctx: Haval_Context
ctx.hashbitlen = 256
ctx.rounds = 3
@@ -808,7 +1111,7 @@ hash_stream_256_3 :: proc(s: io.Stream) -> ([32]byte, bool) {
// hash_file_256_3 will read the file provided by the given handle
// and compute a hash
-hash_file_256_3 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
+hash_file_256_3 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
if !load_at_once {
return hash_stream_256_3(os.stream_from_handle(hd))
} else {
@@ -816,7 +1119,7 @@ hash_file_256_3 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool
return hash_bytes_256_3(buf[:]), ok
}
}
- return [32]byte{}, false
+ return [DIGEST_SIZE_256]byte{}, false
}
hash_256_3 :: proc {
@@ -824,18 +1127,20 @@ hash_256_3 :: proc {
hash_file_256_3,
hash_bytes_256_3,
hash_string_256_3,
+ hash_bytes_to_buffer_256_3,
+ hash_string_to_buffer_256_3,
}
// hash_string_256_4 will hash the given input and return the
// computed hash
-hash_string_256_4 :: proc(data: string) -> [32]byte {
+hash_string_256_4 :: proc(data: string) -> [DIGEST_SIZE_256]byte {
return hash_bytes_256_4(transmute([]byte)(data))
}
// hash_bytes_256_4 will hash the given input and return the
// computed hash
-hash_bytes_256_4 :: proc(data: []byte) -> [32]byte {
- hash: [32]byte
+hash_bytes_256_4 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
+ hash: [DIGEST_SIZE_256]byte
ctx: Haval_Context
ctx.hashbitlen = 256
ctx.rounds = 4
@@ -846,10 +1151,31 @@ hash_bytes_256_4 :: proc(data: []byte) -> [32]byte {
return hash
}
+// hash_string_to_buffer_256_4 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_256_4 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_256_4(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_256_4 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_256_4 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_256, "Size of destination buffer is smaller than the digest size")
+ ctx: Haval_Context
+ ctx.hashbitlen = 256
+ ctx.rounds = 4
+ init(&ctx)
+ ctx.str_len = u32(len(data))
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
// hash_stream_256_4 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_256_4 :: proc(s: io.Stream) -> ([32]byte, bool) {
- hash: [32]byte
+hash_stream_256_4 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
+ hash: [DIGEST_SIZE_256]byte
ctx: Haval_Context
ctx.hashbitlen = 256
ctx.rounds = 4
@@ -870,7 +1196,7 @@ hash_stream_256_4 :: proc(s: io.Stream) -> ([32]byte, bool) {
// hash_file_256_4 will read the file provided by the given handle
// and compute a hash
-hash_file_256_4 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
+hash_file_256_4 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
if !load_at_once {
return hash_stream_256_4(os.stream_from_handle(hd))
} else {
@@ -878,7 +1204,7 @@ hash_file_256_4 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool
return hash_bytes_256_4(buf[:]), ok
}
}
- return [32]byte{}, false
+ return [DIGEST_SIZE_256]byte{}, false
}
hash_256_4 :: proc {
@@ -886,18 +1212,20 @@ hash_256_4 :: proc {
hash_file_256_4,
hash_bytes_256_4,
hash_string_256_4,
+ hash_bytes_to_buffer_256_4,
+ hash_string_to_buffer_256_4,
}
// hash_string_256_5 will hash the given input and return the
// computed hash
-hash_string_256_5 :: proc(data: string) -> [32]byte {
+hash_string_256_5 :: proc(data: string) -> [DIGEST_SIZE_256]byte {
return hash_bytes_256_5(transmute([]byte)(data))
}
// hash_bytes_256_5 will hash the given input and return the
// computed hash
-hash_bytes_256_5 :: proc(data: []byte) -> [32]byte {
- hash: [32]byte
+hash_bytes_256_5 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
+ hash: [DIGEST_SIZE_256]byte
ctx: Haval_Context
ctx.hashbitlen = 256
ctx.rounds = 5
@@ -908,10 +1236,32 @@ hash_bytes_256_5 :: proc(data: []byte) -> [32]byte {
return hash
}
+// hash_string_to_buffer_256_5 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_256_5 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_256_5(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_256_5 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_256_5 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_256, "Size of destination buffer is smaller than the digest size")
+ ctx: Haval_Context
+ ctx.hashbitlen = 256
+ ctx.rounds = 5
+ init(&ctx)
+ ctx.str_len = u32(len(data))
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
+
// hash_stream_256_5 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_256_5 :: proc(s: io.Stream) -> ([32]byte, bool) {
- hash: [32]byte
+hash_stream_256_5 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
+ hash: [DIGEST_SIZE_256]byte
ctx: Haval_Context
ctx.hashbitlen = 256
ctx.rounds = 5
@@ -932,7 +1282,7 @@ hash_stream_256_5 :: proc(s: io.Stream) -> ([32]byte, bool) {
// hash_file_256_5 will read the file provided by the given handle
// and compute a hash
-hash_file_256_5 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
+hash_file_256_5 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
if !load_at_once {
return hash_stream_256_5(os.stream_from_handle(hd))
} else {
@@ -940,7 +1290,7 @@ hash_file_256_5 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool
return hash_bytes_256_5(buf[:]), ok
}
}
- return [32]byte{}, false
+ return [DIGEST_SIZE_256]byte{}, false
}
hash_256_5 :: proc {
@@ -948,6 +1298,8 @@ hash_256_5 :: proc {
hash_file_256_5,
hash_bytes_256_5,
hash_string_256_5,
+ hash_bytes_to_buffer_256_5,
+ hash_string_to_buffer_256_5,
}
/*
diff --git a/core/crypto/jh/jh.odin b/core/crypto/jh/jh.odin
index f251424d2..4ebc0e5cb 100644
--- a/core/crypto/jh/jh.odin
+++ b/core/crypto/jh/jh.odin
@@ -17,16 +17,21 @@ import "core:io"
High level API
*/
+DIGEST_SIZE_224 :: 28
+DIGEST_SIZE_256 :: 32
+DIGEST_SIZE_384 :: 48
+DIGEST_SIZE_512 :: 64
+
// hash_string_224 will hash the given input and return the
// computed hash
-hash_string_224 :: proc(data: string) -> [28]byte {
+hash_string_224 :: proc(data: string) -> [DIGEST_SIZE_224]byte {
return hash_bytes_224(transmute([]byte)(data))
}
// hash_bytes_224 will hash the given input and return the
// computed hash
-hash_bytes_224 :: proc(data: []byte) -> [28]byte {
- hash: [28]byte
+hash_bytes_224 :: proc(data: []byte) -> [DIGEST_SIZE_224]byte {
+ hash: [DIGEST_SIZE_224]byte
ctx: Jh_Context
ctx.hashbitlen = 224
init(&ctx)
@@ -35,10 +40,29 @@ hash_bytes_224 :: proc(data: []byte) -> [28]byte {
return hash
}
+// hash_string_to_buffer_224 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_224 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_224(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_224 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_224 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_224, "Size of destination buffer is smaller than the digest size")
+ ctx: Jh_Context
+ ctx.hashbitlen = 224
+ init(&ctx)
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
// hash_stream_224 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_224 :: proc(s: io.Stream) -> ([28]byte, bool) {
- hash: [28]byte
+hash_stream_224 :: proc(s: io.Stream) -> ([DIGEST_SIZE_224]byte, bool) {
+ hash: [DIGEST_SIZE_224]byte
ctx: Jh_Context
ctx.hashbitlen = 224
init(&ctx)
@@ -57,7 +81,7 @@ hash_stream_224 :: proc(s: io.Stream) -> ([28]byte, bool) {
// hash_file_224 will read the file provided by the given handle
// and compute a hash
-hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([28]byte, bool) {
+hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_224]byte, bool) {
if !load_at_once {
return hash_stream_224(os.stream_from_handle(hd))
} else {
@@ -65,7 +89,7 @@ hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([28]byte, bool)
return hash_bytes_224(buf[:]), ok
}
}
- return [28]byte{}, false
+ return [DIGEST_SIZE_224]byte{}, false
}
hash_224 :: proc {
@@ -73,18 +97,20 @@ hash_224 :: proc {
hash_file_224,
hash_bytes_224,
hash_string_224,
+ hash_bytes_to_buffer_224,
+ hash_string_to_buffer_224,
}
// hash_string_256 will hash the given input and return the
// computed hash
-hash_string_256 :: proc(data: string) -> [32]byte {
+hash_string_256 :: proc(data: string) -> [DIGEST_SIZE_256]byte {
return hash_bytes_256(transmute([]byte)(data))
}
// hash_bytes_256 will hash the given input and return the
// computed hash
-hash_bytes_256 :: proc(data: []byte) -> [32]byte {
- hash: [32]byte
+hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
+ hash: [DIGEST_SIZE_256]byte
ctx: Jh_Context
ctx.hashbitlen = 256
init(&ctx)
@@ -93,10 +119,29 @@ hash_bytes_256 :: proc(data: []byte) -> [32]byte {
return hash
}
+// hash_string_to_buffer_256 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_256(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_256 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_256 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_256, "Size of destination buffer is smaller than the digest size")
+ ctx: Jh_Context
+ ctx.hashbitlen = 256
+ init(&ctx)
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
// hash_stream_256 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
- hash: [32]byte
+hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
+ hash: [DIGEST_SIZE_256]byte
ctx: Jh_Context
ctx.hashbitlen = 256
init(&ctx)
@@ -115,7 +160,7 @@ hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
// hash_file_256 will read the file provided by the given handle
// and compute a hash
-hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
+hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
if !load_at_once {
return hash_stream_256(os.stream_from_handle(hd))
} else {
@@ -123,7 +168,7 @@ hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool)
return hash_bytes_256(buf[:]), ok
}
}
- return [32]byte{}, false
+ return [DIGEST_SIZE_256]byte{}, false
}
hash_256 :: proc {
@@ -131,18 +176,20 @@ hash_256 :: proc {
hash_file_256,
hash_bytes_256,
hash_string_256,
+ hash_bytes_to_buffer_256,
+ hash_string_to_buffer_256,
}
// hash_string_384 will hash the given input and return the
// computed hash
-hash_string_384 :: proc(data: string) -> [48]byte {
+hash_string_384 :: proc(data: string) -> [DIGEST_SIZE_384]byte {
return hash_bytes_384(transmute([]byte)(data))
}
// hash_bytes_384 will hash the given input and return the
// computed hash
-hash_bytes_384 :: proc(data: []byte) -> [48]byte {
- hash: [48]byte
+hash_bytes_384 :: proc(data: []byte) -> [DIGEST_SIZE_384]byte {
+ hash: [DIGEST_SIZE_384]byte
ctx: Jh_Context
ctx.hashbitlen = 384
init(&ctx)
@@ -151,10 +198,29 @@ hash_bytes_384 :: proc(data: []byte) -> [48]byte {
return hash
}
+// hash_string_to_buffer_384 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_384 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_384(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_384 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_384 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_384, "Size of destination buffer is smaller than the digest size")
+ ctx: Jh_Context
+ ctx.hashbitlen = 384
+ init(&ctx)
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
// hash_stream_384 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_384 :: proc(s: io.Stream) -> ([48]byte, bool) {
- hash: [48]byte
+hash_stream_384 :: proc(s: io.Stream) -> ([DIGEST_SIZE_384]byte, bool) {
+ hash: [DIGEST_SIZE_384]byte
ctx: Jh_Context
ctx.hashbitlen = 384
init(&ctx)
@@ -173,7 +239,7 @@ hash_stream_384 :: proc(s: io.Stream) -> ([48]byte, bool) {
// hash_file_384 will read the file provided by the given handle
// and compute a hash
-hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([48]byte, bool) {
+hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_384]byte, bool) {
if !load_at_once {
return hash_stream_384(os.stream_from_handle(hd))
} else {
@@ -181,7 +247,7 @@ hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([48]byte, bool)
return hash_bytes_384(buf[:]), ok
}
}
- return [48]byte{}, false
+ return [DIGEST_SIZE_384]byte{}, false
}
hash_384 :: proc {
@@ -189,18 +255,20 @@ hash_384 :: proc {
hash_file_384,
hash_bytes_384,
hash_string_384,
+ hash_bytes_to_buffer_384,
+ hash_string_to_buffer_384,
}
// hash_string_512 will hash the given input and return the
// computed hash
-hash_string_512 :: proc(data: string) -> [64]byte {
+hash_string_512 :: proc(data: string) -> [DIGEST_SIZE_512]byte {
return hash_bytes_512(transmute([]byte)(data))
}
// hash_bytes_512 will hash the given input and return the
// computed hash
-hash_bytes_512 :: proc(data: []byte) -> [64]byte {
- hash: [64]byte
+hash_bytes_512 :: proc(data: []byte) -> [DIGEST_SIZE_512]byte {
+ hash: [DIGEST_SIZE_512]byte
ctx: Jh_Context
ctx.hashbitlen = 512
init(&ctx)
@@ -209,10 +277,29 @@ hash_bytes_512 :: proc(data: []byte) -> [64]byte {
return hash
}
+// hash_string_to_buffer_512 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_512 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_512(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_512 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_512 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_512, "Size of destination buffer is smaller than the digest size")
+ ctx: Jh_Context
+ ctx.hashbitlen = 512
+ init(&ctx)
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
// hash_stream_512 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
- hash: [64]byte
+hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
+ hash: [DIGEST_SIZE_512]byte
ctx: Jh_Context
ctx.hashbitlen = 512
init(&ctx)
@@ -231,7 +318,7 @@ hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
// hash_file_512 will read the file provided by the given handle
// and compute a hash
-hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool) {
+hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_512]byte, bool) {
if !load_at_once {
return hash_stream_512(os.stream_from_handle(hd))
} else {
@@ -239,7 +326,7 @@ hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool)
return hash_bytes_512(buf[:]), ok
}
}
- return [64]byte{}, false
+ return [DIGEST_SIZE_512]byte{}, false
}
hash_512 :: proc {
@@ -247,6 +334,8 @@ hash_512 :: proc {
hash_file_512,
hash_bytes_512,
hash_string_512,
+ hash_bytes_to_buffer_512,
+ hash_string_to_buffer_512,
}
/*
diff --git a/core/crypto/keccak/keccak.odin b/core/crypto/keccak/keccak.odin
index 19c4c7dda..f5d4826b1 100644
--- a/core/crypto/keccak/keccak.odin
+++ b/core/crypto/keccak/keccak.odin
@@ -21,18 +21,23 @@ import "../_sha3"
High level API
*/
+DIGEST_SIZE_224 :: 28
+DIGEST_SIZE_256 :: 32
+DIGEST_SIZE_384 :: 48
+DIGEST_SIZE_512 :: 64
+
// hash_string_224 will hash the given input and return the
// computed hash
-hash_string_224 :: proc(data: string) -> [28]byte {
+hash_string_224 :: proc(data: string) -> [DIGEST_SIZE_224]byte {
return hash_bytes_224(transmute([]byte)(data))
}
// hash_bytes_224 will hash the given input and return the
// computed hash
-hash_bytes_224 :: proc(data: []byte) -> [28]byte {
- hash: [28]byte
+hash_bytes_224 :: proc(data: []byte) -> [DIGEST_SIZE_224]byte {
+ hash: [DIGEST_SIZE_224]byte
ctx: _sha3.Sha3_Context
- ctx.mdlen = 28
+ ctx.mdlen = DIGEST_SIZE_224
ctx.is_keccak = true
_sha3.init(&ctx)
_sha3.update(&ctx, data)
@@ -40,12 +45,32 @@ hash_bytes_224 :: proc(data: []byte) -> [28]byte {
return hash
}
+// hash_string_to_buffer_224 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_224 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_224(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_224 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_224 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_224, "Size of destination buffer is smaller than the digest size")
+ ctx: _sha3.Sha3_Context
+ ctx.mdlen = DIGEST_SIZE_224
+ ctx.is_keccak = true
+ _sha3.init(&ctx)
+ _sha3.update(&ctx, data)
+ _sha3.final(&ctx, hash)
+}
+
// hash_stream_224 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_224 :: proc(s: io.Stream) -> ([28]byte, bool) {
- hash: [28]byte
+hash_stream_224 :: proc(s: io.Stream) -> ([DIGEST_SIZE_224]byte, bool) {
+ hash: [DIGEST_SIZE_224]byte
ctx: _sha3.Sha3_Context
- ctx.mdlen = 28
+ ctx.mdlen = DIGEST_SIZE_224
ctx.is_keccak = true
_sha3.init(&ctx)
buf := make([]byte, 512)
@@ -63,7 +88,7 @@ hash_stream_224 :: proc(s: io.Stream) -> ([28]byte, bool) {
// hash_file_224 will read the file provided by the given handle
// and compute a hash
-hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([28]byte, bool) {
+hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_224]byte, bool) {
if !load_at_once {
return hash_stream_224(os.stream_from_handle(hd))
} else {
@@ -71,7 +96,7 @@ hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([28]byte, bool)
return hash_bytes_224(buf[:]), ok
}
}
- return [28]byte{}, false
+ return [DIGEST_SIZE_224]byte{}, false
}
hash_224 :: proc {
@@ -79,20 +104,22 @@ hash_224 :: proc {
hash_file_224,
hash_bytes_224,
hash_string_224,
+ hash_bytes_to_buffer_224,
+ hash_string_to_buffer_224,
}
// hash_string_256 will hash the given input and return the
// computed hash
-hash_string_256 :: proc(data: string) -> [32]byte {
+hash_string_256 :: proc(data: string) -> [DIGEST_SIZE_256]byte {
return hash_bytes_256(transmute([]byte)(data))
}
// hash_bytes_256 will hash the given input and return the
// computed hash
-hash_bytes_256 :: proc(data: []byte) -> [32]byte {
- hash: [32]byte
+hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
+ hash: [DIGEST_SIZE_256]byte
ctx: _sha3.Sha3_Context
- ctx.mdlen = 32
+ ctx.mdlen = DIGEST_SIZE_256
ctx.is_keccak = true
_sha3.init(&ctx)
_sha3.update(&ctx, data)
@@ -100,12 +127,32 @@ hash_bytes_256 :: proc(data: []byte) -> [32]byte {
return hash
}
+// hash_string_to_buffer_256 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_256(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_256 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_256 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_256, "Size of destination buffer is smaller than the digest size")
+ ctx: _sha3.Sha3_Context
+ ctx.mdlen = DIGEST_SIZE_256
+ ctx.is_keccak = true
+ _sha3.init(&ctx)
+ _sha3.update(&ctx, data)
+ _sha3.final(&ctx, hash)
+}
+
// hash_stream_256 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
- hash: [32]byte
+hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
+ hash: [DIGEST_SIZE_256]byte
ctx: _sha3.Sha3_Context
- ctx.mdlen = 32
+ ctx.mdlen = DIGEST_SIZE_256
ctx.is_keccak = true
_sha3.init(&ctx)
buf := make([]byte, 512)
@@ -123,7 +170,7 @@ hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
// hash_file_256 will read the file provided by the given handle
// and compute a hash
-hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
+hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
if !load_at_once {
return hash_stream_256(os.stream_from_handle(hd))
} else {
@@ -131,7 +178,7 @@ hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool)
return hash_bytes_256(buf[:]), ok
}
}
- return [32]byte{}, false
+ return [DIGEST_SIZE_256]byte{}, false
}
hash_256 :: proc {
@@ -139,20 +186,22 @@ hash_256 :: proc {
hash_file_256,
hash_bytes_256,
hash_string_256,
+ hash_bytes_to_buffer_256,
+ hash_string_to_buffer_256,
}
// hash_string_384 will hash the given input and return the
// computed hash
-hash_string_384 :: proc(data: string) -> [48]byte {
+hash_string_384 :: proc(data: string) -> [DIGEST_SIZE_384]byte {
return hash_bytes_384(transmute([]byte)(data))
}
// hash_bytes_384 will hash the given input and return the
// computed hash
-hash_bytes_384 :: proc(data: []byte) -> [48]byte {
- hash: [48]byte
+hash_bytes_384 :: proc(data: []byte) -> [DIGEST_SIZE_384]byte {
+ hash: [DIGEST_SIZE_384]byte
ctx: _sha3.Sha3_Context
- ctx.mdlen = 48
+ ctx.mdlen = DIGEST_SIZE_384
ctx.is_keccak = true
_sha3.init(&ctx)
_sha3.update(&ctx, data)
@@ -160,12 +209,32 @@ hash_bytes_384 :: proc(data: []byte) -> [48]byte {
return hash
}
+// hash_string_to_buffer_384 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_384 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_384(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_384 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_384 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_384, "Size of destination buffer is smaller than the digest size")
+ ctx: _sha3.Sha3_Context
+ ctx.mdlen = DIGEST_SIZE_384
+ ctx.is_keccak = true
+ _sha3.init(&ctx)
+ _sha3.update(&ctx, data)
+ _sha3.final(&ctx, hash)
+}
+
// hash_stream_384 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_384 :: proc(s: io.Stream) -> ([48]byte, bool) {
- hash: [48]byte
+hash_stream_384 :: proc(s: io.Stream) -> ([DIGEST_SIZE_384]byte, bool) {
+ hash: [DIGEST_SIZE_384]byte
ctx: _sha3.Sha3_Context
- ctx.mdlen = 48
+ ctx.mdlen = DIGEST_SIZE_384
ctx.is_keccak = true
_sha3.init(&ctx)
buf := make([]byte, 512)
@@ -183,7 +252,7 @@ hash_stream_384 :: proc(s: io.Stream) -> ([48]byte, bool) {
// hash_file_384 will read the file provided by the given handle
// and compute a hash
-hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([48]byte, bool) {
+hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_384]byte, bool) {
if !load_at_once {
return hash_stream_384(os.stream_from_handle(hd))
} else {
@@ -191,7 +260,7 @@ hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([48]byte, bool)
return hash_bytes_384(buf[:]), ok
}
}
- return [48]byte{}, false
+ return [DIGEST_SIZE_384]byte{}, false
}
hash_384 :: proc {
@@ -199,20 +268,22 @@ hash_384 :: proc {
hash_file_384,
hash_bytes_384,
hash_string_384,
+ hash_bytes_to_buffer_384,
+ hash_string_to_buffer_384,
}
// hash_string_512 will hash the given input and return the
// computed hash
-hash_string_512 :: proc(data: string) -> [64]byte {
+hash_string_512 :: proc(data: string) -> [DIGEST_SIZE_512]byte {
return hash_bytes_512(transmute([]byte)(data))
}
// hash_bytes_512 will hash the given input and return the
// computed hash
-hash_bytes_512 :: proc(data: []byte) -> [64]byte {
- hash: [64]byte
+hash_bytes_512 :: proc(data: []byte) -> [DIGEST_SIZE_512]byte {
+ hash: [DIGEST_SIZE_512]byte
ctx: _sha3.Sha3_Context
- ctx.mdlen = 64
+ ctx.mdlen = DIGEST_SIZE_512
ctx.is_keccak = true
_sha3.init(&ctx)
_sha3.update(&ctx, data)
@@ -220,12 +291,32 @@ hash_bytes_512 :: proc(data: []byte) -> [64]byte {
return hash
}
+// hash_string_to_buffer_512 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_512 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_512(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_512 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_512 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_512, "Size of destination buffer is smaller than the digest size")
+ ctx: _sha3.Sha3_Context
+ ctx.mdlen = DIGEST_SIZE_512
+ ctx.is_keccak = true
+ _sha3.init(&ctx)
+ _sha3.update(&ctx, data)
+ _sha3.final(&ctx, hash)
+}
+
// hash_stream_512 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
- hash: [64]byte
+hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
+ hash: [DIGEST_SIZE_512]byte
ctx: _sha3.Sha3_Context
- ctx.mdlen = 64
+ ctx.mdlen = DIGEST_SIZE_512
ctx.is_keccak = true
_sha3.init(&ctx)
buf := make([]byte, 512)
@@ -243,7 +334,7 @@ hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
// hash_file_512 will read the file provided by the given handle
// and compute a hash
-hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool) {
+hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_512]byte, bool) {
if !load_at_once {
return hash_stream_512(os.stream_from_handle(hd))
} else {
@@ -251,7 +342,7 @@ hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool)
return hash_bytes_512(buf[:]), ok
}
}
- return [64]byte{}, false
+ return [DIGEST_SIZE_512]byte{}, false
}
hash_512 :: proc {
@@ -259,13 +350,15 @@ hash_512 :: proc {
hash_file_512,
hash_bytes_512,
hash_string_512,
+ hash_bytes_to_buffer_512,
+ hash_string_to_buffer_512,
}
/*
Low level API
*/
-Sha3_Context :: _sha3.Sha3_Context
+Keccak_Context :: _sha3.Sha3_Context
init :: proc(ctx: ^_sha3.Sha3_Context) {
ctx.is_keccak = true
diff --git a/core/crypto/md2/md2.odin b/core/crypto/md2/md2.odin
index 5e027c13c..102c1b8b4 100644
--- a/core/crypto/md2/md2.odin
+++ b/core/crypto/md2/md2.odin
@@ -17,16 +17,18 @@ import "core:io"
High level API
*/
+DIGEST_SIZE :: 16
+
// hash_string will hash the given input and return the
// computed hash
-hash_string :: proc(data: string) -> [16]byte {
+hash_string :: proc(data: string) -> [DIGEST_SIZE]byte {
return hash_bytes(transmute([]byte)(data))
}
// hash_bytes will hash the given input and return the
// computed hash
-hash_bytes :: proc(data: []byte) -> [16]byte {
- hash: [16]byte
+hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
+ hash: [DIGEST_SIZE]byte
ctx: Md2_Context
// init(&ctx) No-op
update(&ctx, data)
@@ -34,10 +36,28 @@ hash_bytes :: proc(data: []byte) -> [16]byte {
return hash
}
+// hash_string_to_buffer will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size")
+ ctx: Md2_Context
+ // init(&ctx) No-op
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
// hash_stream will read the stream in chunks and compute a
// hash from its contents
-hash_stream :: proc(s: io.Stream) -> ([16]byte, bool) {
- hash: [16]byte
+hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
+ hash: [DIGEST_SIZE]byte
ctx: Md2_Context
// init(&ctx) No-op
buf := make([]byte, 512)
@@ -55,7 +75,7 @@ hash_stream :: proc(s: io.Stream) -> ([16]byte, bool) {
// hash_file will read the file provided by the given handle
// and compute a hash
-hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool) {
+hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
if !load_at_once {
return hash_stream(os.stream_from_handle(hd))
} else {
@@ -63,7 +83,7 @@ hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool) {
return hash_bytes(buf[:]), ok
}
}
- return [16]byte{}, false
+ return [DIGEST_SIZE]byte{}, false
}
hash :: proc {
@@ -71,6 +91,8 @@ hash :: proc {
hash_file,
hash_bytes,
hash_string,
+ hash_bytes_to_buffer,
+ hash_string_to_buffer,
}
/*
@@ -86,7 +108,7 @@ update :: proc(ctx: ^Md2_Context, data: []byte) {
for i := 0; i < len(data); i += 1 {
ctx.data[ctx.datalen] = data[i]
ctx.datalen += 1
- if (ctx.datalen == 16) {
+ if (ctx.datalen == DIGEST_SIZE) {
transform(ctx, ctx.data[:])
ctx.datalen = 0
}
@@ -94,14 +116,14 @@ update :: proc(ctx: ^Md2_Context, data: []byte) {
}
final :: proc(ctx: ^Md2_Context, hash: []byte) {
- to_pad := byte(16 - ctx.datalen)
- for ctx.datalen < 16 {
+ to_pad := byte(DIGEST_SIZE - ctx.datalen)
+ for ctx.datalen < DIGEST_SIZE {
ctx.data[ctx.datalen] = to_pad
ctx.datalen += 1
}
transform(ctx, ctx.data[:])
transform(ctx, ctx.checksum[:])
- for i := 0; i < 16; i += 1 {
+ for i := 0; i < DIGEST_SIZE; i += 1 {
hash[i] = ctx.state[i]
}
}
@@ -111,9 +133,9 @@ final :: proc(ctx: ^Md2_Context, hash: []byte) {
*/
Md2_Context :: struct {
- data: [16]byte,
- state: [16 * 3]byte,
- checksum: [16]byte,
+ data: [DIGEST_SIZE]byte,
+ state: [DIGEST_SIZE * 3]byte,
+ checksum: [DIGEST_SIZE]byte,
datalen: int,
}
@@ -140,20 +162,20 @@ PI_TABLE := [?]byte {
transform :: proc(ctx: ^Md2_Context, data: []byte) {
j,k,t: byte
- for j = 0; j < 16; j += 1 {
- ctx.state[j + 16] = data[j]
- ctx.state[j + 16 * 2] = (ctx.state[j + 16] ~ ctx.state[j])
+ for j = 0; j < DIGEST_SIZE; j += 1 {
+ ctx.state[j + DIGEST_SIZE] = data[j]
+ ctx.state[j + DIGEST_SIZE * 2] = (ctx.state[j + DIGEST_SIZE] ~ ctx.state[j])
}
t = 0
- for j = 0; j < 16 + 2; j += 1 {
- for k = 0; k < 16 * 3; k += 1 {
+ for j = 0; j < DIGEST_SIZE + 2; j += 1 {
+ for k = 0; k < DIGEST_SIZE * 3; k += 1 {
ctx.state[k] ~= PI_TABLE[t]
t = ctx.state[k]
}
t = (t + j) & 0xff
}
- t = ctx.checksum[16 - 1]
- for j = 0; j < 16; j += 1 {
+ t = ctx.checksum[DIGEST_SIZE - 1]
+ for j = 0; j < DIGEST_SIZE; j += 1 {
ctx.checksum[j] ~= PI_TABLE[data[j] ~ t]
t = ctx.checksum[j]
}
diff --git a/core/crypto/md4/md4.odin b/core/crypto/md4/md4.odin
index 813db578a..d944daa1d 100644
--- a/core/crypto/md4/md4.odin
+++ b/core/crypto/md4/md4.odin
@@ -21,16 +21,18 @@ import "../util"
High level API
*/
+DIGEST_SIZE :: 16
+
// hash_string will hash the given input and return the
// computed hash
-hash_string :: proc(data: string) -> [16]byte {
+hash_string :: proc(data: string) -> [DIGEST_SIZE]byte {
return hash_bytes(transmute([]byte)(data))
}
// hash_bytes will hash the given input and return the
// computed hash
-hash_bytes :: proc(data: []byte) -> [16]byte {
- hash: [16]byte
+hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
+ hash: [DIGEST_SIZE]byte
ctx: Md4_Context
init(&ctx)
update(&ctx, data)
@@ -38,10 +40,28 @@ hash_bytes :: proc(data: []byte) -> [16]byte {
return hash
}
+// hash_string_to_buffer will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size")
+ ctx: Md4_Context
+ init(&ctx)
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
// hash_stream will read the stream in chunks and compute a
// hash from its contents
-hash_stream :: proc(s: io.Stream) -> ([16]byte, bool) {
- hash: [16]byte
+hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
+ hash: [DIGEST_SIZE]byte
ctx: Md4_Context
init(&ctx)
buf := make([]byte, 512)
@@ -59,7 +79,7 @@ hash_stream :: proc(s: io.Stream) -> ([16]byte, bool) {
// hash_file will read the file provided by the given handle
// and compute a hash
-hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool) {
+hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
if !load_at_once {
return hash_stream(os.stream_from_handle(hd))
} else {
@@ -67,7 +87,7 @@ hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool) {
return hash_bytes(buf[:]), ok
}
}
- return [16]byte{}, false
+ return [DIGEST_SIZE]byte{}, false
}
hash :: proc {
@@ -75,6 +95,8 @@ hash :: proc {
hash_file,
hash_bytes,
hash_string,
+ hash_bytes_to_buffer,
+ hash_string_to_buffer,
}
/*
@@ -171,9 +193,9 @@ HH :: #force_inline proc "contextless"(a, b, c, d, x: u32, s : int) -> u32 {
transform :: proc(ctx: ^Md4_Context, data: []byte) {
a, b, c, d, i, j: u32
- m: [16]u32
+ m: [DIGEST_SIZE]u32
- for i, j = 0, 0; i < 16; i += 1 {
+ for i, j = 0, 0; i < DIGEST_SIZE; i += 1 {
m[i] = u32(data[j]) | (u32(data[j + 1]) << 8) | (u32(data[j + 2]) << 16) | (u32(data[j + 3]) << 24)
j += 4
}
diff --git a/core/crypto/md5/md5.odin b/core/crypto/md5/md5.odin
index a41ed16f8..9129e6384 100644
--- a/core/crypto/md5/md5.odin
+++ b/core/crypto/md5/md5.odin
@@ -20,16 +20,18 @@ import "../util"
High level API
*/
+DIGEST_SIZE :: 16
+
// hash_string will hash the given input and return the
// computed hash
-hash_string :: proc(data: string) -> [16]byte {
+hash_string :: proc(data: string) -> [DIGEST_SIZE]byte {
return hash_bytes(transmute([]byte)(data))
}
// hash_bytes will hash the given input and return the
// computed hash
-hash_bytes :: proc(data: []byte) -> [16]byte {
- hash: [16]byte
+hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
+ hash: [DIGEST_SIZE]byte
ctx: Md5_Context
init(&ctx)
update(&ctx, data)
@@ -37,10 +39,28 @@ hash_bytes :: proc(data: []byte) -> [16]byte {
return hash
}
+// hash_string_to_buffer will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size")
+ ctx: Md5_Context
+ init(&ctx)
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
// hash_stream will read the stream in chunks and compute a
// hash from its contents
-hash_stream :: proc(s: io.Stream) -> ([16]byte, bool) {
- hash: [16]byte
+hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
+ hash: [DIGEST_SIZE]byte
ctx: Md5_Context
init(&ctx)
buf := make([]byte, 512)
@@ -58,7 +78,7 @@ hash_stream :: proc(s: io.Stream) -> ([16]byte, bool) {
// hash_file will read the file provided by the given handle
// and compute a hash
-hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool) {
+hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
if !load_at_once {
return hash_stream(os.stream_from_handle(hd))
} else {
@@ -66,7 +86,7 @@ hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool) {
return hash_bytes(buf[:]), ok
}
}
- return [16]byte{}, false
+ return [DIGEST_SIZE]byte{}, false
}
hash :: proc {
@@ -74,6 +94,8 @@ hash :: proc {
hash_file,
hash_bytes,
hash_string,
+ hash_bytes_to_buffer,
+ hash_string_to_buffer,
}
/*
@@ -176,9 +198,9 @@ II :: #force_inline proc "contextless" (a, b, c, d, m: u32, s: int, t: u32) -> u
transform :: proc(ctx: ^Md5_Context, data: []byte) {
i, j: u32
- m: [16]u32
+ m: [DIGEST_SIZE]u32
- for i, j = 0, 0; i < 16; i+=1 {
+ for i, j = 0, 0; i < DIGEST_SIZE; i+=1 {
m[i] = u32(data[j]) + u32(data[j + 1]) << 8 + u32(data[j + 2]) << 16 + u32(data[j + 3]) << 24
j += 4
}
diff --git a/core/crypto/ripemd/ripemd.odin b/core/crypto/ripemd/ripemd.odin
index a9a5d1126..c475c4803 100644
--- a/core/crypto/ripemd/ripemd.odin
+++ b/core/crypto/ripemd/ripemd.odin
@@ -19,16 +19,21 @@ import "../util"
High level API
*/
+DIGEST_SIZE_128 :: 16
+DIGEST_SIZE_160 :: 20
+DIGEST_SIZE_256 :: 32
+DIGEST_SIZE_320 :: 40
+
// hash_string_128 will hash the given input and return the
// computed hash
-hash_string_128 :: proc(data: string) -> [16]byte {
+hash_string_128 :: proc(data: string) -> [DIGEST_SIZE_128]byte {
return hash_bytes_128(transmute([]byte)(data))
}
// hash_bytes_128 will hash the given input and return the
// computed hash
-hash_bytes_128 :: proc(data: []byte) -> [16]byte {
- hash: [16]byte
+hash_bytes_128 :: proc(data: []byte) -> [DIGEST_SIZE_128]byte {
+ hash: [DIGEST_SIZE_128]byte
ctx: Ripemd128_Context
init(&ctx)
update(&ctx, data)
@@ -36,10 +41,28 @@ hash_bytes_128 :: proc(data: []byte) -> [16]byte {
return hash
}
+// hash_string_to_buffer_128 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_128 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_128(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_128 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_128 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_128, "Size of destination buffer is smaller than the digest size")
+ ctx: Ripemd128_Context
+ init(&ctx)
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
// hash_stream_128 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_128 :: proc(s: io.Stream) -> ([16]byte, bool) {
- hash: [16]byte
+hash_stream_128 :: proc(s: io.Stream) -> ([DIGEST_SIZE_128]byte, bool) {
+ hash: [DIGEST_SIZE_128]byte
ctx: Ripemd128_Context
init(&ctx)
buf := make([]byte, 512)
@@ -57,7 +80,7 @@ hash_stream_128 :: proc(s: io.Stream) -> ([16]byte, bool) {
// hash_file_128 will read the file provided by the given handle
// and compute a hash
-hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool) {
+hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_128]byte, bool) {
if !load_at_once {
return hash_stream_128(os.stream_from_handle(hd))
} else {
@@ -65,7 +88,7 @@ hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool)
return hash_bytes_128(buf[:]), ok
}
}
- return [16]byte{}, false
+ return [DIGEST_SIZE_128]byte{}, false
}
hash_128 :: proc {
@@ -73,18 +96,20 @@ hash_128 :: proc {
hash_file_128,
hash_bytes_128,
hash_string_128,
+ hash_bytes_to_buffer_128,
+ hash_string_to_buffer_128,
}
// hash_string_160 will hash the given input and return the
// computed hash
-hash_string_160 :: proc(data: string) -> [20]byte {
+hash_string_160 :: proc(data: string) -> [DIGEST_SIZE_160]byte {
return hash_bytes_160(transmute([]byte)(data))
}
// hash_bytes_160 will hash the given input and return the
// computed hash
-hash_bytes_160 :: proc(data: []byte) -> [20]byte {
- hash: [20]byte
+hash_bytes_160 :: proc(data: []byte) -> [DIGEST_SIZE_160]byte {
+ hash: [DIGEST_SIZE_160]byte
ctx: Ripemd160_Context
init(&ctx)
update(&ctx, data)
@@ -92,10 +117,28 @@ hash_bytes_160 :: proc(data: []byte) -> [20]byte {
return hash
}
+// hash_string_to_buffer_160 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_160 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_160(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_160 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_160 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_160, "Size of destination buffer is smaller than the digest size")
+ ctx: Ripemd160_Context
+ init(&ctx)
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
// hash_stream_160 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_160 :: proc(s: io.Stream) -> ([20]byte, bool) {
- hash: [20]byte
+hash_stream_160 :: proc(s: io.Stream) -> ([DIGEST_SIZE_160]byte, bool) {
+ hash: [DIGEST_SIZE_160]byte
ctx: Ripemd160_Context
init(&ctx)
buf := make([]byte, 512)
@@ -113,7 +156,7 @@ hash_stream_160 :: proc(s: io.Stream) -> ([20]byte, bool) {
// hash_file_160 will read the file provided by the given handle
// and compute a hash
-hash_file_160 :: proc(hd: os.Handle, load_at_once := false) -> ([20]byte, bool) {
+hash_file_160 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_160]byte, bool) {
if !load_at_once {
return hash_stream_160(os.stream_from_handle(hd))
} else {
@@ -121,7 +164,7 @@ hash_file_160 :: proc(hd: os.Handle, load_at_once := false) -> ([20]byte, bool)
return hash_bytes_160(buf[:]), ok
}
}
- return [20]byte{}, false
+ return [DIGEST_SIZE_160]byte{}, false
}
hash_160 :: proc {
@@ -129,18 +172,20 @@ hash_160 :: proc {
hash_file_160,
hash_bytes_160,
hash_string_160,
+ hash_bytes_to_buffer_160,
+ hash_string_to_buffer_160,
}
// hash_string_256 will hash the given input and return the
// computed hash
-hash_string_256 :: proc(data: string) -> [32]byte {
+hash_string_256 :: proc(data: string) -> [DIGEST_SIZE_256]byte {
return hash_bytes_256(transmute([]byte)(data))
}
// hash_bytes_256 will hash the given input and return the
// computed hash
-hash_bytes_256 :: proc(data: []byte) -> [32]byte {
- hash: [32]byte
+hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
+ hash: [DIGEST_SIZE_256]byte
ctx: Ripemd256_Context
init(&ctx)
update(&ctx, data)
@@ -148,10 +193,28 @@ hash_bytes_256 :: proc(data: []byte) -> [32]byte {
return hash
}
+// hash_string_to_buffer_256 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_256(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_256 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_256 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_256, "Size of destination buffer is smaller than the digest size")
+ ctx: Ripemd256_Context
+ init(&ctx)
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
// hash_stream_256 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
- hash: [32]byte
+hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
+ hash: [DIGEST_SIZE_256]byte
ctx: Ripemd256_Context
init(&ctx)
buf := make([]byte, 512)
@@ -169,7 +232,7 @@ hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
// hash_file_256 will read the file provided by the given handle
// and compute a hash
-hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
+hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
if !load_at_once {
return hash_stream_256(os.stream_from_handle(hd))
} else {
@@ -177,7 +240,7 @@ hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool)
return hash_bytes_256(buf[:]), ok
}
}
- return [32]byte{}, false
+ return [DIGEST_SIZE_256]byte{}, false
}
hash_256 :: proc {
@@ -185,18 +248,20 @@ hash_256 :: proc {
hash_file_256,
hash_bytes_256,
hash_string_256,
+ hash_bytes_to_buffer_256,
+ hash_string_to_buffer_256,
}
// hash_string_320 will hash the given input and return the
// computed hash
-hash_string_320 :: proc(data: string) -> [40]byte {
+hash_string_320 :: proc(data: string) -> [DIGEST_SIZE_320]byte {
return hash_bytes_320(transmute([]byte)(data))
}
// hash_bytes_320 will hash the given input and return the
// computed hash
-hash_bytes_320 :: proc(data: []byte) -> [40]byte {
- hash: [40]byte
+hash_bytes_320 :: proc(data: []byte) -> [DIGEST_SIZE_320]byte {
+ hash: [DIGEST_SIZE_320]byte
ctx: Ripemd320_Context
init(&ctx)
update(&ctx, data)
@@ -204,10 +269,28 @@ hash_bytes_320 :: proc(data: []byte) -> [40]byte {
return hash
}
+// hash_string_to_buffer_320 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_320 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_320(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_320 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_320 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_320, "Size of destination buffer is smaller than the digest size")
+ ctx: Ripemd320_Context
+ init(&ctx)
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
// hash_stream_320 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_320 :: proc(s: io.Stream) -> ([40]byte, bool) {
- hash: [40]byte
+hash_stream_320 :: proc(s: io.Stream) -> ([DIGEST_SIZE_320]byte, bool) {
+ hash: [DIGEST_SIZE_320]byte
ctx: Ripemd320_Context
init(&ctx)
buf := make([]byte, 512)
@@ -225,7 +308,7 @@ hash_stream_320 :: proc(s: io.Stream) -> ([40]byte, bool) {
// hash_file_320 will read the file provided by the given handle
// and compute a hash
-hash_file_320 :: proc(hd: os.Handle, load_at_once := false) -> ([40]byte, bool) {
+hash_file_320 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_320]byte, bool) {
if !load_at_once {
return hash_stream_320(os.stream_from_handle(hd))
} else {
@@ -233,7 +316,7 @@ hash_file_320 :: proc(hd: os.Handle, load_at_once := false) -> ([40]byte, bool)
return hash_bytes_320(buf[:]), ok
}
}
- return [40]byte{}, false
+ return [DIGEST_SIZE_320]byte{}, false
}
hash_320 :: proc {
@@ -241,6 +324,8 @@ hash_320 :: proc {
hash_file_320,
hash_bytes_320,
hash_string_320,
+ hash_bytes_to_buffer_320,
+ hash_string_to_buffer_320,
}
/*
diff --git a/core/crypto/sha1/sha1.odin b/core/crypto/sha1/sha1.odin
index 736b207a3..e8df3c7f6 100644
--- a/core/crypto/sha1/sha1.odin
+++ b/core/crypto/sha1/sha1.odin
@@ -19,16 +19,19 @@ import "../util"
/*
High level API
*/
+
+DIGEST_SIZE :: 20
+
// hash_string will hash the given input and return the
// computed hash
-hash_string :: proc(data: string) -> [20]byte {
+hash_string :: proc(data: string) -> [DIGEST_SIZE]byte {
return hash_bytes(transmute([]byte)(data))
}
// hash_bytes will hash the given input and return the
// computed hash
-hash_bytes :: proc(data: []byte) -> [20]byte {
- hash: [20]byte
+hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
+ hash: [DIGEST_SIZE]byte
ctx: Sha1_Context
init(&ctx)
update(&ctx, data)
@@ -36,10 +39,28 @@ hash_bytes :: proc(data: []byte) -> [20]byte {
return hash
}
+// hash_string_to_buffer will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size")
+ ctx: Sha1_Context
+ init(&ctx)
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
// hash_stream will read the stream in chunks and compute a
// hash from its contents
-hash_stream :: proc(s: io.Stream) -> ([20]byte, bool) {
- hash: [20]byte
+hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
+ hash: [DIGEST_SIZE]byte
ctx: Sha1_Context
init(&ctx)
buf := make([]byte, 512)
@@ -57,7 +78,7 @@ hash_stream :: proc(s: io.Stream) -> ([20]byte, bool) {
// hash_file will read the file provided by the given handle
// and compute a hash
-hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([20]byte, bool) {
+hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
if !load_at_once {
return hash_stream(os.stream_from_handle(hd))
} else {
@@ -65,7 +86,7 @@ hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([20]byte, bool) {
return hash_bytes(buf[:]), ok
}
}
- return [20]byte{}, false
+ return [DIGEST_SIZE]byte{}, false
}
hash :: proc {
@@ -73,6 +94,8 @@ hash :: proc {
hash_file,
hash_bytes,
hash_string,
+ hash_bytes_to_buffer,
+ hash_string_to_buffer,
}
/*
diff --git a/core/crypto/sha2/sha2.odin b/core/crypto/sha2/sha2.odin
index 8b7ccf38a..2178b70b5 100644
--- a/core/crypto/sha2/sha2.odin
+++ b/core/crypto/sha2/sha2.odin
@@ -21,16 +21,21 @@ import "../util"
High level API
*/
+DIGEST_SIZE_224 :: 28
+DIGEST_SIZE_256 :: 32
+DIGEST_SIZE_384 :: 48
+DIGEST_SIZE_512 :: 64
+
// hash_string_224 will hash the given input and return the
// computed hash
-hash_string_224 :: proc(data: string) -> [28]byte {
+hash_string_224 :: proc(data: string) -> [DIGEST_SIZE_224]byte {
return hash_bytes_224(transmute([]byte)(data))
}
// hash_bytes_224 will hash the given input and return the
// computed hash
-hash_bytes_224 :: proc(data: []byte) -> [28]byte {
- hash: [28]byte
+hash_bytes_224 :: proc(data: []byte) -> [DIGEST_SIZE_224]byte {
+ hash: [DIGEST_SIZE_224]byte
ctx: Sha256_Context
ctx.is224 = true
init(&ctx)
@@ -39,10 +44,29 @@ hash_bytes_224 :: proc(data: []byte) -> [28]byte {
return hash
}
+// hash_string_to_buffer_224 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_224 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_224(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_224 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_224 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_224, "Size of destination buffer is smaller than the digest size")
+ ctx: Sha256_Context
+ ctx.is224 = true
+ init(&ctx)
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
// hash_stream_224 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_224 :: proc(s: io.Stream) -> ([28]byte, bool) {
- hash: [28]byte
+hash_stream_224 :: proc(s: io.Stream) -> ([DIGEST_SIZE_224]byte, bool) {
+ hash: [DIGEST_SIZE_224]byte
ctx: Sha512_Context
ctx.is384 = false
init(&ctx)
@@ -61,7 +85,7 @@ hash_stream_224 :: proc(s: io.Stream) -> ([28]byte, bool) {
// hash_file_224 will read the file provided by the given handle
// and compute a hash
-hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([28]byte, bool) {
+hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_224]byte, bool) {
if !load_at_once {
return hash_stream_224(os.stream_from_handle(hd))
} else {
@@ -69,7 +93,7 @@ hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([28]byte, bool)
return hash_bytes_224(buf[:]), ok
}
}
- return [28]byte{}, false
+ return [DIGEST_SIZE_224]byte{}, false
}
hash_224 :: proc {
@@ -77,18 +101,20 @@ hash_224 :: proc {
hash_file_224,
hash_bytes_224,
hash_string_224,
+ hash_bytes_to_buffer_224,
+ hash_string_to_buffer_224,
}
// hash_string_256 will hash the given input and return the
// computed hash
-hash_string_256 :: proc(data: string) -> [32]byte {
+hash_string_256 :: proc(data: string) -> [DIGEST_SIZE_256]byte {
return hash_bytes_256(transmute([]byte)(data))
}
// hash_bytes_256 will hash the given input and return the
// computed hash
-hash_bytes_256 :: proc(data: []byte) -> [32]byte {
- hash: [32]byte
+hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
+ hash: [DIGEST_SIZE_256]byte
ctx: Sha256_Context
ctx.is224 = false
init(&ctx)
@@ -97,10 +123,29 @@ hash_bytes_256 :: proc(data: []byte) -> [32]byte {
return hash
}
+// hash_string_to_buffer_256 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_256(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_256 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_256 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_256, "Size of destination buffer is smaller than the digest size")
+ ctx: Sha256_Context
+ ctx.is224 = false
+ init(&ctx)
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
// hash_stream_256 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
- hash: [32]byte
+hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
+ hash: [DIGEST_SIZE_256]byte
ctx: Sha512_Context
ctx.is384 = false
init(&ctx)
@@ -119,7 +164,7 @@ hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
// hash_file_256 will read the file provided by the given handle
// and compute a hash
-hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
+hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
if !load_at_once {
return hash_stream_256(os.stream_from_handle(hd))
} else {
@@ -127,7 +172,7 @@ hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool)
return hash_bytes_256(buf[:]), ok
}
}
- return [32]byte{}, false
+ return [DIGEST_SIZE_256]byte{}, false
}
hash_256 :: proc {
@@ -135,18 +180,20 @@ hash_256 :: proc {
hash_file_256,
hash_bytes_256,
hash_string_256,
+ hash_bytes_to_buffer_256,
+ hash_string_to_buffer_256,
}
// hash_string_384 will hash the given input and return the
// computed hash
-hash_string_384 :: proc(data: string) -> [48]byte {
+hash_string_384 :: proc(data: string) -> [DIGEST_SIZE_384]byte {
return hash_bytes_384(transmute([]byte)(data))
}
// hash_bytes_384 will hash the given input and return the
// computed hash
-hash_bytes_384 :: proc(data: []byte) -> [48]byte {
- hash: [48]byte
+hash_bytes_384 :: proc(data: []byte) -> [DIGEST_SIZE_384]byte {
+ hash: [DIGEST_SIZE_384]byte
ctx: Sha512_Context
ctx.is384 = true
init(&ctx)
@@ -155,10 +202,29 @@ hash_bytes_384 :: proc(data: []byte) -> [48]byte {
return hash
}
+// hash_string_to_buffer_384 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_384 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_384(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_384 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_384 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_384, "Size of destination buffer is smaller than the digest size")
+ ctx: Sha512_Context
+ ctx.is384 = true
+ init(&ctx)
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
// hash_stream_384 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_384 :: proc(s: io.Stream) -> ([48]byte, bool) {
- hash: [48]byte
+hash_stream_384 :: proc(s: io.Stream) -> ([DIGEST_SIZE_384]byte, bool) {
+ hash: [DIGEST_SIZE_384]byte
ctx: Sha512_Context
ctx.is384 = true
init(&ctx)
@@ -177,7 +243,7 @@ hash_stream_384 :: proc(s: io.Stream) -> ([48]byte, bool) {
// hash_file_384 will read the file provided by the given handle
// and compute a hash
-hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([48]byte, bool) {
+hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_384]byte, bool) {
if !load_at_once {
return hash_stream_384(os.stream_from_handle(hd))
} else {
@@ -185,7 +251,7 @@ hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([48]byte, bool)
return hash_bytes_384(buf[:]), ok
}
}
- return [48]byte{}, false
+ return [DIGEST_SIZE_384]byte{}, false
}
hash_384 :: proc {
@@ -193,18 +259,20 @@ hash_384 :: proc {
hash_file_384,
hash_bytes_384,
hash_string_384,
+ hash_bytes_to_buffer_384,
+ hash_string_to_buffer_384,
}
// hash_string_512 will hash the given input and return the
// computed hash
-hash_string_512 :: proc(data: string) -> [64]byte {
+hash_string_512 :: proc(data: string) -> [DIGEST_SIZE_512]byte {
return hash_bytes_512(transmute([]byte)(data))
}
// hash_bytes_512 will hash the given input and return the
// computed hash
-hash_bytes_512 :: proc(data: []byte) -> [64]byte {
- hash: [64]byte
+hash_bytes_512 :: proc(data: []byte) -> [DIGEST_SIZE_512]byte {
+ hash: [DIGEST_SIZE_512]byte
ctx: Sha512_Context
ctx.is384 = false
init(&ctx)
@@ -213,10 +281,29 @@ hash_bytes_512 :: proc(data: []byte) -> [64]byte {
return hash
}
+// hash_string_to_buffer_512 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_512 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_512(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_512 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_512 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_512, "Size of destination buffer is smaller than the digest size")
+ ctx: Sha512_Context
+ ctx.is384 = false
+ init(&ctx)
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
// hash_stream_512 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
- hash: [64]byte
+hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
+ hash: [DIGEST_SIZE_512]byte
ctx: Sha512_Context
ctx.is384 = false
init(&ctx)
@@ -235,7 +322,7 @@ hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
// hash_file_512 will read the file provided by the given handle
// and compute a hash
-hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool) {
+hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_512]byte, bool) {
if !load_at_once {
return hash_stream_512(os.stream_from_handle(hd))
} else {
@@ -243,7 +330,7 @@ hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool)
return hash_bytes_512(buf[:]), ok
}
}
- return [64]byte{}, false
+ return [DIGEST_SIZE_512]byte{}, false
}
hash_512 :: proc {
@@ -251,6 +338,8 @@ hash_512 :: proc {
hash_file_512,
hash_bytes_512,
hash_string_512,
+ hash_bytes_to_buffer_512,
+ hash_string_to_buffer_512,
}
/*
diff --git a/core/crypto/sha3/sha3.odin b/core/crypto/sha3/sha3.odin
index 1becf7640..2eceeaff6 100644
--- a/core/crypto/sha3/sha3.odin
+++ b/core/crypto/sha3/sha3.odin
@@ -20,30 +20,54 @@ import "../_sha3"
High level API
*/
+DIGEST_SIZE_224 :: 28
+DIGEST_SIZE_256 :: 32
+DIGEST_SIZE_384 :: 48
+DIGEST_SIZE_512 :: 64
+
// hash_string_224 will hash the given input and return the
// computed hash
-hash_string_224 :: proc(data: string) -> [28]byte {
+hash_string_224 :: proc(data: string) -> [DIGEST_SIZE_224]byte {
return hash_bytes_224(transmute([]byte)(data))
}
// hash_bytes_224 will hash the given input and return the
// computed hash
-hash_bytes_224 :: proc(data: []byte) -> [28]byte {
- hash: [28]byte
+hash_bytes_224 :: proc(data: []byte) -> [DIGEST_SIZE_224]byte {
+ hash: [DIGEST_SIZE_224]byte
ctx: _sha3.Sha3_Context
- ctx.mdlen = 28
+ ctx.mdlen = DIGEST_SIZE_224
_sha3.init(&ctx)
_sha3.update(&ctx, data)
_sha3.final(&ctx, hash[:])
return hash
}
+// hash_string_to_buffer_224 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_224 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_224(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_224 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_224 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_224, "Size of destination buffer is smaller than the digest size")
+ ctx: _sha3.Sha3_Context
+ ctx.mdlen = DIGEST_SIZE_224
+ _sha3.init(&ctx)
+ _sha3.update(&ctx, data)
+ _sha3.final(&ctx, hash)
+}
+
// hash_stream_224 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_224 :: proc(s: io.Stream) -> ([28]byte, bool) {
- hash: [28]byte
+hash_stream_224 :: proc(s: io.Stream) -> ([DIGEST_SIZE_224]byte, bool) {
+ hash: [DIGEST_SIZE_224]byte
ctx: _sha3.Sha3_Context
- ctx.mdlen = 28
+ ctx.mdlen = DIGEST_SIZE_224
_sha3.init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
@@ -60,7 +84,7 @@ hash_stream_224 :: proc(s: io.Stream) -> ([28]byte, bool) {
// hash_file_224 will read the file provided by the given handle
// and compute a hash
-hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([28]byte, bool) {
+hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_224]byte, bool) {
if !load_at_once {
return hash_stream_224(os.stream_from_handle(hd))
} else {
@@ -68,7 +92,7 @@ hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([28]byte, bool)
return hash_bytes_224(buf[:]), ok
}
}
- return [28]byte{}, false
+ return [DIGEST_SIZE_224]byte{}, false
}
hash_224 :: proc {
@@ -76,32 +100,53 @@ hash_224 :: proc {
hash_file_224,
hash_bytes_224,
hash_string_224,
+ hash_bytes_to_buffer_224,
+ hash_string_to_buffer_224,
}
// hash_string_256 will hash the given input and return the
// computed hash
-hash_string_256 :: proc(data: string) -> [32]byte {
+hash_string_256 :: proc(data: string) -> [DIGEST_SIZE_256]byte {
return hash_bytes_256(transmute([]byte)(data))
}
// hash_bytes_256 will hash the given input and return the
// computed hash
-hash_bytes_256 :: proc(data: []byte) -> [32]byte {
- hash: [32]byte
+hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
+ hash: [DIGEST_SIZE_256]byte
ctx: _sha3.Sha3_Context
- ctx.mdlen = 32
+ ctx.mdlen = DIGEST_SIZE_256
_sha3.init(&ctx)
_sha3.update(&ctx, data)
_sha3.final(&ctx, hash[:])
return hash
}
+// hash_string_to_buffer_256 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_256(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_256 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_256 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_256, "Size of destination buffer is smaller than the digest size")
+ ctx: _sha3.Sha3_Context
+ ctx.mdlen = DIGEST_SIZE_256
+ _sha3.init(&ctx)
+ _sha3.update(&ctx, data)
+ _sha3.final(&ctx, hash)
+}
+
// hash_stream_256 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
- hash: [32]byte
+hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
+ hash: [DIGEST_SIZE_256]byte
ctx: _sha3.Sha3_Context
- ctx.mdlen = 32
+ ctx.mdlen = DIGEST_SIZE_256
_sha3.init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
@@ -118,7 +163,7 @@ hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
// hash_file_256 will read the file provided by the given handle
// and compute a hash
-hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
+hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
if !load_at_once {
return hash_stream_256(os.stream_from_handle(hd))
} else {
@@ -126,7 +171,7 @@ hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool)
return hash_bytes_256(buf[:]), ok
}
}
- return [32]byte{}, false
+ return [DIGEST_SIZE_256]byte{}, false
}
hash_256 :: proc {
@@ -134,32 +179,53 @@ hash_256 :: proc {
hash_file_256,
hash_bytes_256,
hash_string_256,
+ hash_bytes_to_buffer_256,
+ hash_string_to_buffer_256,
}
// hash_string_384 will hash the given input and return the
// computed hash
-hash_string_384 :: proc(data: string) -> [48]byte {
+hash_string_384 :: proc(data: string) -> [DIGEST_SIZE_384]byte {
return hash_bytes_384(transmute([]byte)(data))
}
// hash_bytes_384 will hash the given input and return the
// computed hash
-hash_bytes_384 :: proc(data: []byte) -> [48]byte {
- hash: [48]byte
+hash_bytes_384 :: proc(data: []byte) -> [DIGEST_SIZE_384]byte {
+ hash: [DIGEST_SIZE_384]byte
ctx: _sha3.Sha3_Context
- ctx.mdlen = 48
+ ctx.mdlen = DIGEST_SIZE_384
_sha3.init(&ctx)
_sha3.update(&ctx, data)
_sha3.final(&ctx, hash[:])
return hash
}
+// hash_string_to_buffer_384 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_384 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_384(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_384 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_384 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_384, "Size of destination buffer is smaller than the digest size")
+ ctx: _sha3.Sha3_Context
+ ctx.mdlen = DIGEST_SIZE_384
+ _sha3.init(&ctx)
+ _sha3.update(&ctx, data)
+ _sha3.final(&ctx, hash)
+}
+
// hash_stream_384 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_384 :: proc(s: io.Stream) -> ([48]byte, bool) {
- hash: [48]byte
+hash_stream_384 :: proc(s: io.Stream) -> ([DIGEST_SIZE_384]byte, bool) {
+ hash: [DIGEST_SIZE_384]byte
ctx: _sha3.Sha3_Context
- ctx.mdlen = 48
+ ctx.mdlen = DIGEST_SIZE_384
_sha3.init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
@@ -176,7 +242,7 @@ hash_stream_384 :: proc(s: io.Stream) -> ([48]byte, bool) {
// hash_file_384 will read the file provided by the given handle
// and compute a hash
-hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([48]byte, bool) {
+hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_384]byte, bool) {
if !load_at_once {
return hash_stream_384(os.stream_from_handle(hd))
} else {
@@ -184,7 +250,7 @@ hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([48]byte, bool)
return hash_bytes_384(buf[:]), ok
}
}
- return [48]byte{}, false
+ return [DIGEST_SIZE_384]byte{}, false
}
hash_384 :: proc {
@@ -192,32 +258,53 @@ hash_384 :: proc {
hash_file_384,
hash_bytes_384,
hash_string_384,
+ hash_bytes_to_buffer_384,
+ hash_string_to_buffer_384,
}
// hash_string_512 will hash the given input and return the
// computed hash
-hash_string_512 :: proc(data: string) -> [64]byte {
+hash_string_512 :: proc(data: string) -> [DIGEST_SIZE_512]byte {
return hash_bytes_512(transmute([]byte)(data))
}
// hash_bytes_512 will hash the given input and return the
// computed hash
-hash_bytes_512 :: proc(data: []byte) -> [64]byte {
- hash: [64]byte
+hash_bytes_512 :: proc(data: []byte) -> [DIGEST_SIZE_512]byte {
+ hash: [DIGEST_SIZE_512]byte
ctx: _sha3.Sha3_Context
- ctx.mdlen = 64
+ ctx.mdlen = DIGEST_SIZE_512
_sha3.init(&ctx)
_sha3.update(&ctx, data)
_sha3.final(&ctx, hash[:])
return hash
}
+// hash_string_to_buffer_512 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_512 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_512(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_512 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_512 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_512, "Size of destination buffer is smaller than the digest size")
+ ctx: _sha3.Sha3_Context
+ ctx.mdlen = DIGEST_SIZE_512
+ _sha3.init(&ctx)
+ _sha3.update(&ctx, data)
+ _sha3.final(&ctx, hash)
+}
+
// hash_stream_512 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
- hash: [64]byte
+hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
+ hash: [DIGEST_SIZE_512]byte
ctx: _sha3.Sha3_Context
- ctx.mdlen = 64
+ ctx.mdlen = DIGEST_SIZE_512
_sha3.init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
@@ -234,7 +321,7 @@ hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
// hash_file_512 will read the file provided by the given handle
// and compute a hash
-hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool) {
+hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_512]byte, bool) {
if !load_at_once {
return hash_stream_512(os.stream_from_handle(hd))
} else {
@@ -242,7 +329,7 @@ hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool)
return hash_bytes_512(buf[:]), ok
}
}
- return [64]byte{}, false
+ return [DIGEST_SIZE_512]byte{}, false
}
hash_512 :: proc {
@@ -250,6 +337,8 @@ hash_512 :: proc {
hash_file_512,
hash_bytes_512,
hash_string_512,
+ hash_bytes_to_buffer_512,
+ hash_string_to_buffer_512,
}
/*
diff --git a/core/crypto/shake/shake.odin b/core/crypto/shake/shake.odin
index ff477b1a9..9fdc3ebf1 100644
--- a/core/crypto/shake/shake.odin
+++ b/core/crypto/shake/shake.odin
@@ -20,18 +20,21 @@ import "../_sha3"
High level API
*/
+DIGEST_SIZE_128 :: 16
+DIGEST_SIZE_256 :: 32
+
// hash_string_128 will hash the given input and return the
// computed hash
-hash_string_128 :: proc(data: string) -> [16]byte {
+hash_string_128 :: proc(data: string) -> [DIGEST_SIZE_128]byte {
return hash_bytes_128(transmute([]byte)(data))
}
// hash_bytes_128 will hash the given input and return the
// computed hash
-hash_bytes_128 :: proc(data: []byte) -> [16]byte {
- hash: [16]byte
+hash_bytes_128 :: proc(data: []byte) -> [DIGEST_SIZE_128]byte {
+ hash: [DIGEST_SIZE_128]byte
ctx: _sha3.Sha3_Context
- ctx.mdlen = 16
+ ctx.mdlen = DIGEST_SIZE_128
_sha3.init(&ctx)
_sha3.update(&ctx, data)
_sha3.shake_xof(&ctx)
@@ -39,12 +42,32 @@ hash_bytes_128 :: proc(data: []byte) -> [16]byte {
return hash
}
+// hash_string_to_buffer_128 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_128 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_128(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_128 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_128 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_128, "Size of destination buffer is smaller than the digest size")
+ ctx: _sha3.Sha3_Context
+ ctx.mdlen = DIGEST_SIZE_128
+ _sha3.init(&ctx)
+ _sha3.update(&ctx, data)
+ _sha3.shake_xof(&ctx)
+ _sha3.shake_out(&ctx, hash)
+}
+
// hash_stream_128 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_128 :: proc(s: io.Stream) -> ([16]byte, bool) {
- hash: [16]byte
+hash_stream_128 :: proc(s: io.Stream) -> ([DIGEST_SIZE_128]byte, bool) {
+ hash: [DIGEST_SIZE_128]byte
ctx: _sha3.Sha3_Context
- ctx.mdlen = 16
+ ctx.mdlen = DIGEST_SIZE_128
_sha3.init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
@@ -62,7 +85,7 @@ hash_stream_128 :: proc(s: io.Stream) -> ([16]byte, bool) {
// hash_file_128 will read the file provided by the given handle
// and compute a hash
-hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool) {
+hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_128]byte, bool) {
if !load_at_once {
return hash_stream_128(os.stream_from_handle(hd))
} else {
@@ -70,7 +93,7 @@ hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool)
return hash_bytes_128(buf[:]), ok
}
}
- return [16]byte{}, false
+ return [DIGEST_SIZE_128]byte{}, false
}
hash_128 :: proc {
@@ -78,20 +101,22 @@ hash_128 :: proc {
hash_file_128,
hash_bytes_128,
hash_string_128,
+ hash_bytes_to_buffer_128,
+ hash_string_to_buffer_128,
}
// hash_string_256 will hash the given input and return the
// computed hash
-hash_string_256 :: proc(data: string) -> [32]byte {
+hash_string_256 :: proc(data: string) -> [DIGEST_SIZE_256]byte {
return hash_bytes_256(transmute([]byte)(data))
}
// hash_bytes_256 will hash the given input and return the
// computed hash
-hash_bytes_256 :: proc(data: []byte) -> [32]byte {
- hash: [32]byte
+hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
+ hash: [DIGEST_SIZE_256]byte
ctx: _sha3.Sha3_Context
- ctx.mdlen = 32
+ ctx.mdlen = DIGEST_SIZE_256
_sha3.init(&ctx)
_sha3.update(&ctx, data)
_sha3.shake_xof(&ctx)
@@ -99,12 +124,32 @@ hash_bytes_256 :: proc(data: []byte) -> [32]byte {
return hash
}
+// hash_string_to_buffer_256 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_256(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_256 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_256 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_256, "Size of destination buffer is smaller than the digest size")
+ ctx: _sha3.Sha3_Context
+ ctx.mdlen = DIGEST_SIZE_256
+ _sha3.init(&ctx)
+ _sha3.update(&ctx, data)
+ _sha3.shake_xof(&ctx)
+ _sha3.shake_out(&ctx, hash)
+}
+
// hash_stream_256 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
- hash: [32]byte
+hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
+ hash: [DIGEST_SIZE_256]byte
ctx: _sha3.Sha3_Context
- ctx.mdlen = 32
+ ctx.mdlen = DIGEST_SIZE_256
_sha3.init(&ctx)
buf := make([]byte, 512)
defer delete(buf)
@@ -122,7 +167,7 @@ hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
// hash_file_256 will read the file provided by the given handle
// and compute a hash
-hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
+hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
if !load_at_once {
return hash_stream_256(os.stream_from_handle(hd))
} else {
@@ -130,7 +175,7 @@ hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool)
return hash_bytes_256(buf[:]), ok
}
}
- return [32]byte{}, false
+ return [DIGEST_SIZE_256]byte{}, false
}
hash_256 :: proc {
@@ -138,13 +183,15 @@ hash_256 :: proc {
hash_file_256,
hash_bytes_256,
hash_string_256,
+ hash_bytes_to_buffer_256,
+ hash_string_to_buffer_256,
}
/*
Low level API
*/
-Sha3_Context :: _sha3.Sha3_Context
+Shake_Context :: _sha3.Sha3_Context
init :: proc(ctx: ^_sha3.Sha3_Context) {
_sha3.init(ctx)
diff --git a/core/crypto/sm3/sm3.odin b/core/crypto/sm3/sm3.odin
index c72bd4f15..e72973e33 100644
--- a/core/crypto/sm3/sm3.odin
+++ b/core/crypto/sm3/sm3.odin
@@ -15,16 +15,22 @@ import "core:io"
import "../util"
+/*
+ High level API
+*/
+
+DIGEST_SIZE :: 32
+
// hash_string will hash the given input and return the
// computed hash
-hash_string :: proc(data: string) -> [32]byte {
+hash_string :: proc(data: string) -> [DIGEST_SIZE]byte {
return hash_bytes(transmute([]byte)(data))
}
// hash_bytes will hash the given input and return the
// computed hash
-hash_bytes :: proc(data: []byte) -> [32]byte {
- hash: [32]byte
+hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
+ hash: [DIGEST_SIZE]byte
ctx: Sm3_Context
init(&ctx)
update(&ctx, data)
@@ -32,10 +38,28 @@ hash_bytes :: proc(data: []byte) -> [32]byte {
return hash
}
+// hash_string_to_buffer will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size")
+ ctx: Sm3_Context
+ init(&ctx)
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
// hash_stream will read the stream in chunks and compute a
// hash from its contents
-hash_stream :: proc(s: io.Stream) -> ([32]byte, bool) {
- hash: [32]byte
+hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
+ hash: [DIGEST_SIZE]byte
ctx: Sm3_Context
init(&ctx)
buf := make([]byte, 512)
@@ -53,7 +77,7 @@ hash_stream :: proc(s: io.Stream) -> ([32]byte, bool) {
// hash_file will read the file provided by the given handle
// and compute a hash
-hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
+hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
if !load_at_once {
return hash_stream(os.stream_from_handle(hd))
} else {
@@ -61,7 +85,7 @@ hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
return hash_bytes(buf[:]), ok
}
}
- return [32]byte{}, false
+ return [DIGEST_SIZE]byte{}, false
}
hash :: proc {
@@ -69,6 +93,8 @@ hash :: proc {
hash_file,
hash_bytes,
hash_string,
+ hash_bytes_to_buffer,
+ hash_string_to_buffer,
}
/*
@@ -146,9 +172,6 @@ Sm3_Context :: struct {
length: u64,
}
-BLOCK_SIZE_IN_BYTES :: 64
-BLOCK_SIZE_IN_32 :: 16
-
IV := [8]u32 {
0x7380166f, 0x4914b2b9, 0x172442d7, 0xda8a0600,
0xa96f30bc, 0x163138aa, 0xe38dee4d, 0xb0fb0e4e,
diff --git a/core/crypto/streebog/streebog.odin b/core/crypto/streebog/streebog.odin
index b90ef8e86..deb71120d 100644
--- a/core/crypto/streebog/streebog.odin
+++ b/core/crypto/streebog/streebog.odin
@@ -19,16 +19,19 @@ import "../util"
High level API
*/
+DIGEST_SIZE_256 :: 32
+DIGEST_SIZE_512 :: 64
+
// hash_string_256 will hash the given input and return the
// computed hash
-hash_string_256 :: proc(data: string) -> [32]byte {
+hash_string_256 :: proc(data: string) -> [DIGEST_SIZE_256]byte {
return hash_bytes_256(transmute([]byte)(data))
}
// hash_bytes_256 will hash the given input and return the
// computed hash
-hash_bytes_256 :: proc(data: []byte) -> [32]byte {
- hash: [32]byte
+hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
+ hash: [DIGEST_SIZE_256]byte
ctx: Streebog_Context
ctx.is256 = true
init(&ctx)
@@ -37,10 +40,29 @@ hash_bytes_256 :: proc(data: []byte) -> [32]byte {
return hash
}
+// hash_string_to_buffer_256 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_256(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_256 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_256 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_256, "Size of destination buffer is smaller than the digest size")
+ ctx: Streebog_Context
+ ctx.is256 = true
+ init(&ctx)
+ update(&ctx, data)
+ final(&ctx, hash[:])
+}
+
// hash_stream_256 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
- hash: [32]byte
+hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
+ hash: [DIGEST_SIZE_256]byte
ctx: Streebog_Context
ctx.is256 = true
init(&ctx)
@@ -59,7 +81,7 @@ hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
// hash_file_256 will read the file provided by the given handle
// and compute a hash
-hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
+hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
if !load_at_once {
return hash_stream_256(os.stream_from_handle(hd))
} else {
@@ -67,7 +89,7 @@ hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool)
return hash_bytes_256(buf[:]), ok
}
}
- return [32]byte{}, false
+ return [DIGEST_SIZE_256]byte{}, false
}
hash_256 :: proc {
@@ -75,18 +97,20 @@ hash_256 :: proc {
hash_file_256,
hash_bytes_256,
hash_string_256,
+ hash_bytes_to_buffer_256,
+ hash_string_to_buffer_256,
}
// hash_string_512 will hash the given input and return the
// computed hash
-hash_string_512 :: proc(data: string) -> [64]byte {
+hash_string_512 :: proc(data: string) -> [DIGEST_SIZE_512]byte {
return hash_bytes_512(transmute([]byte)(data))
}
// hash_bytes_512 will hash the given input and return the
// computed hash
-hash_bytes_512 :: proc(data: []byte) -> [64]byte {
- hash: [64]byte
+hash_bytes_512 :: proc(data: []byte) -> [DIGEST_SIZE_512]byte {
+ hash: [DIGEST_SIZE_512]byte
ctx: Streebog_Context
init(&ctx)
update(&ctx, data)
@@ -94,10 +118,28 @@ hash_bytes_512 :: proc(data: []byte) -> [64]byte {
return hash
}
+// hash_string_to_buffer_512 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_512 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_512(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_512 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_512 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_512, "Size of destination buffer is smaller than the digest size")
+ ctx: Streebog_Context
+ init(&ctx)
+ update(&ctx, data)
+ final(&ctx, hash[:])
+}
+
// hash_stream_512 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
- hash: [64]byte
+hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
+ hash: [DIGEST_SIZE_512]byte
ctx: Streebog_Context
init(&ctx)
buf := make([]byte, 512)
@@ -115,7 +157,7 @@ hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
// hash_file_512 will read the file provided by the given handle
// and compute a hash
-hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool) {
+hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_512]byte, bool) {
if !load_at_once {
return hash_stream_512(os.stream_from_handle(hd))
} else {
@@ -123,7 +165,7 @@ hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool)
return hash_bytes_512(buf[:]), ok
}
}
- return [64]byte{}, false
+ return [DIGEST_SIZE_512]byte{}, false
}
hash_512 :: proc {
@@ -131,6 +173,8 @@ hash_512 :: proc {
hash_file_512,
hash_bytes_512,
hash_string_512,
+ hash_bytes_to_buffer_512,
+ hash_string_to_buffer_512,
}
/*
diff --git a/core/crypto/tiger/tiger.odin b/core/crypto/tiger/tiger.odin
index ecd7f5583..4ea80c66c 100644
--- a/core/crypto/tiger/tiger.odin
+++ b/core/crypto/tiger/tiger.odin
@@ -19,16 +19,20 @@ import "../_tiger"
High level API
*/
+DIGEST_SIZE_128 :: 16
+DIGEST_SIZE_160 :: 20
+DIGEST_SIZE_192 :: 24
+
// hash_string_128 will hash the given input and return the
// computed hash
-hash_string_128 :: proc(data: string) -> [16]byte {
+hash_string_128 :: proc(data: string) -> [DIGEST_SIZE_128]byte {
return hash_bytes_128(transmute([]byte)(data))
}
// hash_bytes_128 will hash the given input and return the
// computed hash
-hash_bytes_128 :: proc(data: []byte) -> [16]byte {
- hash: [16]byte
+hash_bytes_128 :: proc(data: []byte) -> [DIGEST_SIZE_128]byte {
+ hash: [DIGEST_SIZE_128]byte
ctx: _tiger.Tiger_Context
ctx.ver = 1
_tiger.init(&ctx)
@@ -37,10 +41,29 @@ hash_bytes_128 :: proc(data: []byte) -> [16]byte {
return hash
}
+// hash_string_to_buffer_128 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_128 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_128(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_128 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_128 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_128, "Size of destination buffer is smaller than the digest size")
+ ctx: _tiger.Tiger_Context
+ ctx.ver = 1
+ _tiger.init(&ctx)
+ _tiger.update(&ctx, data)
+ _tiger.final(&ctx, hash)
+}
+
// hash_stream_128 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_128 :: proc(s: io.Stream) -> ([16]byte, bool) {
- hash: [16]byte
+hash_stream_128 :: proc(s: io.Stream) -> ([DIGEST_SIZE_128]byte, bool) {
+ hash: [DIGEST_SIZE_128]byte
ctx: _tiger.Tiger_Context
ctx.ver = 1
_tiger.init(&ctx)
@@ -59,7 +82,7 @@ hash_stream_128 :: proc(s: io.Stream) -> ([16]byte, bool) {
// hash_file_128 will read the file provided by the given handle
// and compute a hash
-hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool) {
+hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_128]byte, bool) {
if !load_at_once {
return hash_stream_128(os.stream_from_handle(hd))
} else {
@@ -67,7 +90,7 @@ hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool)
return hash_bytes_128(buf[:]), ok
}
}
- return [16]byte{}, false
+ return [DIGEST_SIZE_128]byte{}, false
}
hash_128 :: proc {
@@ -75,18 +98,20 @@ hash_128 :: proc {
hash_file_128,
hash_bytes_128,
hash_string_128,
+ hash_bytes_to_buffer_128,
+ hash_string_to_buffer_128,
}
// hash_string_160 will hash the given input and return the
// computed hash
-hash_string_160 :: proc(data: string) -> [20]byte {
+hash_string_160 :: proc(data: string) -> [DIGEST_SIZE_160]byte {
return hash_bytes_160(transmute([]byte)(data))
}
// hash_bytes_160 will hash the given input and return the
// computed hash
-hash_bytes_160 :: proc(data: []byte) -> [20]byte {
- hash: [20]byte
+hash_bytes_160 :: proc(data: []byte) -> [DIGEST_SIZE_160]byte {
+ hash: [DIGEST_SIZE_160]byte
ctx: _tiger.Tiger_Context
ctx.ver = 1
_tiger.init(&ctx)
@@ -95,10 +120,29 @@ hash_bytes_160 :: proc(data: []byte) -> [20]byte {
return hash
}
+// hash_string_to_buffer_160 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_160 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_160(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_160 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_160 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_160, "Size of destination buffer is smaller than the digest size")
+ ctx: _tiger.Tiger_Context
+ ctx.ver = 1
+ _tiger.init(&ctx)
+ _tiger.update(&ctx, data)
+ _tiger.final(&ctx, hash)
+}
+
// hash_stream_160 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_160 :: proc(s: io.Stream) -> ([20]byte, bool) {
- hash: [20]byte
+hash_stream_160 :: proc(s: io.Stream) -> ([DIGEST_SIZE_160]byte, bool) {
+ hash: [DIGEST_SIZE_160]byte
ctx: _tiger.Tiger_Context
ctx.ver = 1
_tiger.init(&ctx)
@@ -117,7 +161,7 @@ hash_stream_160 :: proc(s: io.Stream) -> ([20]byte, bool) {
// hash_file_160 will read the file provided by the given handle
// and compute a hash
-hash_file_160 :: proc(hd: os.Handle, load_at_once := false) -> ([20]byte, bool) {
+hash_file_160 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_160]byte, bool) {
if !load_at_once {
return hash_stream_160(os.stream_from_handle(hd))
} else {
@@ -125,7 +169,7 @@ hash_file_160 :: proc(hd: os.Handle, load_at_once := false) -> ([20]byte, bool)
return hash_bytes_160(buf[:]), ok
}
}
- return [20]byte{}, false
+ return [DIGEST_SIZE_160]byte{}, false
}
hash_160 :: proc {
@@ -133,18 +177,20 @@ hash_160 :: proc {
hash_file_160,
hash_bytes_160,
hash_string_160,
+ hash_bytes_to_buffer_160,
+ hash_string_to_buffer_160,
}
// hash_string_192 will hash the given input and return the
// computed hash
-hash_string_192 :: proc(data: string) -> [24]byte {
+hash_string_192 :: proc(data: string) -> [DIGEST_SIZE_192]byte {
return hash_bytes_192(transmute([]byte)(data))
}
// hash_bytes_192 will hash the given input and return the
// computed hash
-hash_bytes_192 :: proc(data: []byte) -> [24]byte {
- hash: [24]byte
+hash_bytes_192 :: proc(data: []byte) -> [DIGEST_SIZE_192]byte {
+ hash: [DIGEST_SIZE_192]byte
ctx: _tiger.Tiger_Context
ctx.ver = 1
_tiger.init(&ctx)
@@ -153,10 +199,29 @@ hash_bytes_192 :: proc(data: []byte) -> [24]byte {
return hash
}
+// hash_string_to_buffer_192 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_192 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_192(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_192 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_192 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_192, "Size of destination buffer is smaller than the digest size")
+ ctx: _tiger.Tiger_Context
+ ctx.ver = 1
+ _tiger.init(&ctx)
+ _tiger.update(&ctx, data)
+ _tiger.final(&ctx, hash)
+}
+
// hash_stream_192 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_192 :: proc(s: io.Stream) -> ([24]byte, bool) {
- hash: [24]byte
+hash_stream_192 :: proc(s: io.Stream) -> ([DIGEST_SIZE_192]byte, bool) {
+ hash: [DIGEST_SIZE_192]byte
ctx: _tiger.Tiger_Context
ctx.ver = 1
_tiger.init(&ctx)
@@ -175,7 +240,7 @@ hash_stream_192 :: proc(s: io.Stream) -> ([24]byte, bool) {
// hash_file_192 will read the file provided by the given handle
// and compute a hash
-hash_file_192 :: proc(hd: os.Handle, load_at_once := false) -> ([24]byte, bool) {
+hash_file_192 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_192]byte, bool) {
if !load_at_once {
return hash_stream_192(os.stream_from_handle(hd))
} else {
@@ -183,7 +248,7 @@ hash_file_192 :: proc(hd: os.Handle, load_at_once := false) -> ([24]byte, bool)
return hash_bytes_192(buf[:]), ok
}
}
- return [24]byte{}, false
+ return [DIGEST_SIZE_192]byte{}, false
}
hash_192 :: proc {
@@ -191,6 +256,8 @@ hash_192 :: proc {
hash_file_192,
hash_bytes_192,
hash_string_192,
+ hash_bytes_to_buffer_192,
+ hash_string_to_buffer_192,
}
/*
diff --git a/core/crypto/tiger2/tiger2.odin b/core/crypto/tiger2/tiger2.odin
index a93e19319..84333f344 100644
--- a/core/crypto/tiger2/tiger2.odin
+++ b/core/crypto/tiger2/tiger2.odin
@@ -19,16 +19,20 @@ import "../_tiger"
High level API
*/
+DIGEST_SIZE_128 :: 16
+DIGEST_SIZE_160 :: 20
+DIGEST_SIZE_192 :: 24
+
// hash_string_128 will hash the given input and return the
// computed hash
-hash_string_128 :: proc(data: string) -> [16]byte {
+hash_string_128 :: proc(data: string) -> [DIGEST_SIZE_128]byte {
return hash_bytes_128(transmute([]byte)(data))
}
// hash_bytes_128 will hash the given input and return the
// computed hash
-hash_bytes_128 :: proc(data: []byte) -> [16]byte {
- hash: [16]byte
+hash_bytes_128 :: proc(data: []byte) -> [DIGEST_SIZE_128]byte {
+ hash: [DIGEST_SIZE_128]byte
ctx: _tiger.Tiger_Context
ctx.ver = 2
_tiger.init(&ctx)
@@ -37,10 +41,29 @@ hash_bytes_128 :: proc(data: []byte) -> [16]byte {
return hash
}
+// hash_string_to_buffer_128 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_128 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_128(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_128 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_128 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_128, "Size of destination buffer is smaller than the digest size")
+ ctx: _tiger.Tiger_Context
+ ctx.ver = 2
+ _tiger.init(&ctx)
+ _tiger.update(&ctx, data)
+ _tiger.final(&ctx, hash)
+}
+
// hash_stream_128 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_128 :: proc(s: io.Stream) -> ([16]byte, bool) {
- hash: [16]byte
+hash_stream_128 :: proc(s: io.Stream) -> ([DIGEST_SIZE_128]byte, bool) {
+ hash: [DIGEST_SIZE_128]byte
ctx: _tiger.Tiger_Context
ctx.ver = 2
_tiger.init(&ctx)
@@ -59,7 +82,7 @@ hash_stream_128 :: proc(s: io.Stream) -> ([16]byte, bool) {
// hash_file_128 will read the file provided by the given handle
// and compute a hash
-hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool) {
+hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_128]byte, bool) {
if !load_at_once {
return hash_stream_128(os.stream_from_handle(hd))
} else {
@@ -67,7 +90,7 @@ hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool)
return hash_bytes_128(buf[:]), ok
}
}
- return [16]byte{}, false
+ return [DIGEST_SIZE_128]byte{}, false
}
hash_128 :: proc {
@@ -75,18 +98,20 @@ hash_128 :: proc {
hash_file_128,
hash_bytes_128,
hash_string_128,
+ hash_bytes_to_buffer_128,
+ hash_string_to_buffer_128,
}
// hash_string_160 will hash the given input and return the
// computed hash
-hash_string_160 :: proc(data: string) -> [20]byte {
+hash_string_160 :: proc(data: string) -> [DIGEST_SIZE_160]byte {
return hash_bytes_160(transmute([]byte)(data))
}
// hash_bytes_160 will hash the given input and return the
// computed hash
-hash_bytes_160 :: proc(data: []byte) -> [20]byte {
- hash: [20]byte
+hash_bytes_160 :: proc(data: []byte) -> [DIGEST_SIZE_160]byte {
+ hash: [DIGEST_SIZE_160]byte
ctx: _tiger.Tiger_Context
ctx.ver = 2
_tiger.init(&ctx)
@@ -95,10 +120,29 @@ hash_bytes_160 :: proc(data: []byte) -> [20]byte {
return hash
}
+// hash_string_to_buffer_160 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_160 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_160(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_160 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_160 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_160, "Size of destination buffer is smaller than the digest size")
+ ctx: _tiger.Tiger_Context
+ ctx.ver = 2
+ _tiger.init(&ctx)
+ _tiger.update(&ctx, data)
+ _tiger.final(&ctx, hash)
+}
+
// hash_stream_160 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_160 :: proc(s: io.Stream) -> ([20]byte, bool) {
- hash: [20]byte
+hash_stream_160 :: proc(s: io.Stream) -> ([DIGEST_SIZE_160]byte, bool) {
+ hash: [DIGEST_SIZE_160]byte
ctx: _tiger.Tiger_Context
ctx.ver = 2
_tiger.init(&ctx)
@@ -117,7 +161,7 @@ hash_stream_160 :: proc(s: io.Stream) -> ([20]byte, bool) {
// hash_file_160 will read the file provided by the given handle
// and compute a hash
-hash_file_160 :: proc(hd: os.Handle, load_at_once := false) -> ([20]byte, bool) {
+hash_file_160 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_160]byte, bool) {
if !load_at_once {
return hash_stream_160(os.stream_from_handle(hd))
} else {
@@ -125,7 +169,7 @@ hash_file_160 :: proc(hd: os.Handle, load_at_once := false) -> ([20]byte, bool)
return hash_bytes_160(buf[:]), ok
}
}
- return [20]byte{}, false
+ return [DIGEST_SIZE_160]byte{}, false
}
hash_160 :: proc {
@@ -133,18 +177,20 @@ hash_160 :: proc {
hash_file_160,
hash_bytes_160,
hash_string_160,
+ hash_bytes_to_buffer_160,
+ hash_string_to_buffer_160,
}
// hash_string_192 will hash the given input and return the
// computed hash
-hash_string_192 :: proc(data: string) -> [24]byte {
+hash_string_192 :: proc(data: string) -> [DIGEST_SIZE_192]byte {
return hash_bytes_192(transmute([]byte)(data))
}
// hash_bytes_192 will hash the given input and return the
// computed hash
-hash_bytes_192 :: proc(data: []byte) -> [24]byte {
- hash: [24]byte
+hash_bytes_192 :: proc(data: []byte) -> [DIGEST_SIZE_192]byte {
+ hash: [DIGEST_SIZE_192]byte
ctx: _tiger.Tiger_Context
ctx.ver = 2
_tiger.init(&ctx)
@@ -153,10 +199,29 @@ hash_bytes_192 :: proc(data: []byte) -> [24]byte {
return hash
}
+// hash_string_to_buffer_192 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_192 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_192(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_192 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_192 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_192, "Size of destination buffer is smaller than the digest size")
+ ctx: _tiger.Tiger_Context
+ ctx.ver = 2
+ _tiger.init(&ctx)
+ _tiger.update(&ctx, data)
+ _tiger.final(&ctx, hash)
+}
+
// hash_stream_192 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_192 :: proc(s: io.Stream) -> ([24]byte, bool) {
- hash: [24]byte
+hash_stream_192 :: proc(s: io.Stream) -> ([DIGEST_SIZE_192]byte, bool) {
+ hash: [DIGEST_SIZE_192]byte
ctx: _tiger.Tiger_Context
ctx.ver = 2
_tiger.init(&ctx)
@@ -175,7 +240,7 @@ hash_stream_192 :: proc(s: io.Stream) -> ([24]byte, bool) {
// hash_file_192 will read the file provided by the given handle
// and compute a hash
-hash_file_192 :: proc(hd: os.Handle, load_at_once := false) -> ([24]byte, bool) {
+hash_file_192 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_192]byte, bool) {
if !load_at_once {
return hash_stream_192(os.stream_from_handle(hd))
} else {
@@ -183,7 +248,7 @@ hash_file_192 :: proc(hd: os.Handle, load_at_once := false) -> ([24]byte, bool)
return hash_bytes_192(buf[:]), ok
}
}
- return [24]byte{}, false
+ return [DIGEST_SIZE_192]byte{}, false
}
hash_192 :: proc {
@@ -191,6 +256,8 @@ hash_192 :: proc {
hash_file_192,
hash_bytes_192,
hash_string_192,
+ hash_bytes_to_buffer_192,
+ hash_string_to_buffer_192,
}
/*
diff --git a/core/crypto/whirlpool/whirlpool.odin b/core/crypto/whirlpool/whirlpool.odin
index 43ad2a0a5..255f57bc2 100644
--- a/core/crypto/whirlpool/whirlpool.odin
+++ b/core/crypto/whirlpool/whirlpool.odin
@@ -19,16 +19,18 @@ import "../util"
High level API
*/
+DIGEST_SIZE :: 64
+
// hash_string will hash the given input and return the
// computed hash
-hash_string :: proc(data: string) -> [64]byte {
+hash_string :: proc(data: string) -> [DIGEST_SIZE]byte {
return hash_bytes(transmute([]byte)(data))
}
// hash_bytes will hash the given input and return the
// computed hash
-hash_bytes :: proc(data: []byte) -> [64]byte {
- hash: [64]byte
+hash_bytes :: proc(data: []byte) -> [DIGEST_SIZE]byte {
+ hash: [DIGEST_SIZE]byte
ctx: Whirlpool_Context
// init(&ctx) No-op
update(&ctx, data)
@@ -36,10 +38,28 @@ hash_bytes :: proc(data: []byte) -> [64]byte {
return hash
}
+// hash_string_to_buffer will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size")
+ ctx: Whirlpool_Context
+ // init(&ctx) No-op
+ update(&ctx, data)
+ final(&ctx, hash)
+}
+
// hash_stream will read the stream in chunks and compute a
// hash from its contents
-hash_stream :: proc(s: io.Stream) -> ([64]byte, bool) {
- hash: [64]byte
+hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
+ hash: [DIGEST_SIZE]byte
ctx: Whirlpool_Context
// init(&ctx) No-op
buf := make([]byte, 512)
@@ -57,7 +77,7 @@ hash_stream :: proc(s: io.Stream) -> ([64]byte, bool) {
// hash_file will read the file provided by the given handle
// and compute a hash
-hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool) {
+hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
if !load_at_once {
return hash_stream(os.stream_from_handle(hd))
} else {
@@ -65,7 +85,7 @@ hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool) {
return hash_bytes(buf[:]), ok
}
}
- return [64]byte{}, false
+ return [DIGEST_SIZE]byte{}, false
}
hash :: proc {
@@ -73,6 +93,8 @@ hash :: proc {
hash_file,
hash_bytes,
hash_string,
+ hash_bytes_to_buffer,
+ hash_string_to_buffer,
}
/*
diff --git a/vendor/botan/README.md b/vendor/botan/README.md
index 057aed422..b7d4d01a1 100644
--- a/vendor/botan/README.md
+++ b/vendor/botan/README.md
@@ -26,9 +26,11 @@ Wrappers for hashing algorithms have been added to match the API within the Odin
#### High level API
Each hash algorithm contains a procedure group named `hash`, or if the algorithm provides more than one digest size `hash_`.
-Included in these groups are four procedures.
+Included in these groups are six procedures.
* `hash_string` - Hash a given string and return the computed hash. Just calls `hash_bytes` internally
* `hash_bytes` - Hash a given byte slice and return the computed hash
+* `hash_string_to_buffer` - Hash a given string and put the computed hash in the second proc parameter. Just calls `hash_bytes_to_buffer` internally
+* `hash_bytes_to_buffer` - Hash a given string and put the computed hash in the second proc parameter. The destination buffer has to be at least as big as the digest size of the hash
* `hash_stream` - Takes a stream from io.Stream and returns the computed hash from it
* `hash_file` - Takes a file handle and returns the computed hash from it. A second optional boolean parameter controls if the file is streamed (this is the default) or read at once (set to true)
@@ -49,6 +51,10 @@ main :: proc() {
// Compute the hash, using the high level API
computed_hash := md4.hash(input)
+ // Variant that takes a destination buffer, instead of returning the computed hash
+ hash := make([]byte, md4.DIGEST_SIZE) // @note: Destination buffer has to be at least as big as the digest size of the hash
+ md4.hash(input, hash[:])
+
// Compute the hash, using the low level API
// @note: Botan's structs are opaque by design, they don't expose any fields
ctx: md4.Md4_Context
diff --git a/vendor/botan/blake2b/blake2b.odin b/vendor/botan/blake2b/blake2b.odin
index efd4f464b..226502e83 100644
--- a/vendor/botan/blake2b/blake2b.odin
+++ b/vendor/botan/blake2b/blake2b.odin
@@ -20,16 +20,18 @@ import botan "../bindings"
High level API
*/
+DIGEST_SIZE :: 64
+
// hash_string will hash the given input and return the
// computed hash
-hash_string :: proc "contextless" (data: string) -> [64]byte {
+hash_string :: proc "contextless" (data: string) -> [DIGEST_SIZE]byte {
return hash_bytes(transmute([]byte)(data))
}
// hash_bytes will hash the given input and return the
// computed hash
-hash_bytes :: proc "contextless" (data: []byte) -> [64]byte {
- hash: [64]byte
+hash_bytes :: proc "contextless" (data: []byte) -> [DIGEST_SIZE]byte {
+ hash: [DIGEST_SIZE]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_BLAKE2B, 0)
botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
@@ -38,10 +40,29 @@ hash_bytes :: proc "contextless" (data: []byte) -> [64]byte {
return hash
}
+// hash_string_to_buffer will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size")
+ ctx: botan.hash_t
+ botan.hash_init(&ctx, botan.HASH_BLAKE2B, 0)
+ botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
+ botan.hash_final(ctx, &hash[0])
+ botan.hash_destroy(ctx)
+}
+
// hash_stream will read the stream in chunks and compute a
// hash from its contents
-hash_stream :: proc(s: io.Stream) -> ([64]byte, bool) {
- hash: [64]byte
+hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
+ hash: [DIGEST_SIZE]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_BLAKE2B, 0)
buf := make([]byte, 512)
@@ -60,7 +81,7 @@ hash_stream :: proc(s: io.Stream) -> ([64]byte, bool) {
// hash_file will read the file provided by the given handle
// and compute a hash
-hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool) {
+hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
if !load_at_once {
return hash_stream(os.stream_from_handle(hd))
} else {
@@ -68,7 +89,7 @@ hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool) {
return hash_bytes(buf[:]), ok
}
}
- return [64]byte{}, false
+ return [DIGEST_SIZE]byte{}, false
}
hash :: proc {
@@ -76,6 +97,8 @@ hash :: proc {
hash_file,
hash_bytes,
hash_string,
+ hash_bytes_to_buffer,
+ hash_string_to_buffer,
}
/*
diff --git a/vendor/botan/gost/gost.odin b/vendor/botan/gost/gost.odin
index 266078c7d..9f081f9cb 100644
--- a/vendor/botan/gost/gost.odin
+++ b/vendor/botan/gost/gost.odin
@@ -20,16 +20,18 @@ import botan "../bindings"
High level API
*/
+DIGEST_SIZE :: 32
+
// hash_string will hash the given input and return the
// computed hash
-hash_string :: proc "contextless" (data: string) -> [32]byte {
+hash_string :: proc "contextless" (data: string) -> [DIGEST_SIZE]byte {
return hash_bytes(transmute([]byte)(data))
}
// hash_bytes will hash the given input and return the
// computed hash
-hash_bytes :: proc "contextless" (data: []byte) -> [32]byte {
- hash: [32]byte
+hash_bytes :: proc "contextless" (data: []byte) -> [DIGEST_SIZE]byte {
+ hash: [DIGEST_SIZE]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_GOST, 0)
botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
@@ -38,10 +40,29 @@ hash_bytes :: proc "contextless" (data: []byte) -> [32]byte {
return hash
}
+// hash_string_to_buffer will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size")
+ ctx: botan.hash_t
+ botan.hash_init(&ctx, botan.HASH_GOST, 0)
+ botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
+ botan.hash_final(ctx, &hash[0])
+ botan.hash_destroy(ctx)
+}
+
// hash_stream will read the stream in chunks and compute a
// hash from its contents
-hash_stream :: proc(s: io.Stream) -> ([32]byte, bool) {
- hash: [32]byte
+hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
+ hash: [DIGEST_SIZE]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_GOST, 0)
buf := make([]byte, 512)
@@ -60,7 +81,7 @@ hash_stream :: proc(s: io.Stream) -> ([32]byte, bool) {
// hash_file will read the file provided by the given handle
// and compute a hash
-hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
+hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
if !load_at_once {
return hash_stream(os.stream_from_handle(hd))
} else {
@@ -68,7 +89,7 @@ hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
return hash_bytes(buf[:]), ok
}
}
- return [32]byte{}, false
+ return [DIGEST_SIZE]byte{}, false
}
hash :: proc {
@@ -76,6 +97,8 @@ hash :: proc {
hash_file,
hash_bytes,
hash_string,
+ hash_bytes_to_buffer,
+ hash_string_to_buffer,
}
/*
diff --git a/vendor/botan/keccak/keccak.odin b/vendor/botan/keccak/keccak.odin
index c2f52bfdc..3316de017 100644
--- a/vendor/botan/keccak/keccak.odin
+++ b/vendor/botan/keccak/keccak.odin
@@ -20,16 +20,18 @@ import botan "../bindings"
High level API
*/
+DIGEST_SIZE_512 :: 64
+
// hash_string_512 will hash the given input and return the
// computed hash
-hash_string_512 :: proc(data: string) -> [64]byte {
+hash_string_512 :: proc(data: string) -> [DIGEST_SIZE_512]byte {
return hash_bytes_512(transmute([]byte)(data))
}
// hash_bytes_512 will hash the given input and return the
// computed hash
-hash_bytes_512 :: proc(data: []byte) -> [64]byte {
- hash: [64]byte
+hash_bytes_512 :: proc(data: []byte) -> [DIGEST_SIZE_512]byte {
+ hash: [DIGEST_SIZE_512]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_KECCAK_512, 0)
botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
@@ -38,10 +40,29 @@ hash_bytes_512 :: proc(data: []byte) -> [64]byte {
return hash
}
+// hash_string_to_buffer_512 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_512 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_512(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_512 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_512 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_512, "Size of destination buffer is smaller than the digest size")
+ ctx: botan.hash_t
+ botan.hash_init(&ctx, botan.HASH_KECCAK_512, 0)
+ botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
+ botan.hash_final(ctx, &hash[0])
+ botan.hash_destroy(ctx)
+}
+
// hash_stream_512 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
- hash: [64]byte
+hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
+ hash: [DIGEST_SIZE_512]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_KECCAK_512, 0)
buf := make([]byte, 512)
@@ -60,7 +81,7 @@ hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
// hash_file_512 will read the file provided by the given handle
// and compute a hash
-hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool) {
+hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_512]byte, bool) {
if !load_at_once {
return hash_stream_512(os.stream_from_handle(hd))
} else {
@@ -68,7 +89,7 @@ hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool)
return hash_bytes_512(buf[:]), ok
}
}
- return [64]byte{}, false
+ return [DIGEST_SIZE_512]byte{}, false
}
hash_512 :: proc {
@@ -76,6 +97,8 @@ hash_512 :: proc {
hash_file_512,
hash_bytes_512,
hash_string_512,
+ hash_bytes_to_buffer_512,
+ hash_string_to_buffer_512,
}
/*
diff --git a/vendor/botan/md4/md4.odin b/vendor/botan/md4/md4.odin
index 47a77c0fb..c8a1ad903 100644
--- a/vendor/botan/md4/md4.odin
+++ b/vendor/botan/md4/md4.odin
@@ -20,16 +20,18 @@ import botan "../bindings"
High level API
*/
+DIGEST_SIZE :: 16
+
// hash_string will hash the given input and return the
// computed hash
-hash_string :: proc "contextless" (data: string) -> [16]byte {
+hash_string :: proc "contextless" (data: string) -> [DIGEST_SIZE]byte {
return hash_bytes(transmute([]byte)(data))
}
// hash_bytes will hash the given input and return the
// computed hash
-hash_bytes :: proc "contextless" (data: []byte) -> [16]byte {
- hash: [16]byte
+hash_bytes :: proc "contextless" (data: []byte) -> [DIGEST_SIZE]byte {
+ hash: [DIGEST_SIZE]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_MD4, 0)
botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
@@ -38,10 +40,29 @@ hash_bytes :: proc "contextless" (data: []byte) -> [16]byte {
return hash
}
+// hash_string_to_buffer will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size")
+ ctx: botan.hash_t
+ botan.hash_init(&ctx, botan.HASH_MD4, 0)
+ botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
+ botan.hash_final(ctx, &hash[0])
+ botan.hash_destroy(ctx)
+}
+
// hash_stream will read the stream in chunks and compute a
// hash from its contents
-hash_stream :: proc(s: io.Stream) -> ([16]byte, bool) {
- hash: [16]byte
+hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
+ hash: [DIGEST_SIZE]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_MD4, 0)
buf := make([]byte, 512)
@@ -60,7 +81,7 @@ hash_stream :: proc(s: io.Stream) -> ([16]byte, bool) {
// hash_file will read the file provided by the given handle
// and compute a hash
-hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool) {
+hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
if !load_at_once {
return hash_stream(os.stream_from_handle(hd))
} else {
@@ -68,7 +89,7 @@ hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool) {
return hash_bytes(buf[:]), ok
}
}
- return [16]byte{}, false
+ return [DIGEST_SIZE]byte{}, false
}
hash :: proc {
@@ -76,6 +97,8 @@ hash :: proc {
hash_file,
hash_bytes,
hash_string,
+ hash_bytes_to_buffer,
+ hash_string_to_buffer,
}
/*
diff --git a/vendor/botan/md5/md5.odin b/vendor/botan/md5/md5.odin
index 15ad1e05a..203f2d092 100644
--- a/vendor/botan/md5/md5.odin
+++ b/vendor/botan/md5/md5.odin
@@ -20,16 +20,18 @@ import botan "../bindings"
High level API
*/
+DIGEST_SIZE :: 16
+
// hash_string will hash the given input and return the
// computed hash
-hash_string :: proc "contextless" (data: string) -> [16]byte {
+hash_string :: proc "contextless" (data: string) -> [DIGEST_SIZE]byte {
return hash_bytes(transmute([]byte)(data))
}
// hash_bytes will hash the given input and return the
// computed hash
-hash_bytes :: proc "contextless" (data: []byte) -> [16]byte {
- hash: [16]byte
+hash_bytes :: proc "contextless" (data: []byte) -> [DIGEST_SIZE]byte {
+ hash: [DIGEST_SIZE]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_MD5, 0)
botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
@@ -38,10 +40,29 @@ hash_bytes :: proc "contextless" (data: []byte) -> [16]byte {
return hash
}
+// hash_string_to_buffer will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size")
+ ctx: botan.hash_t
+ botan.hash_init(&ctx, botan.HASH_MD5, 0)
+ botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
+ botan.hash_final(ctx, &hash[0])
+ botan.hash_destroy(ctx)
+}
+
// hash_stream will read the stream in chunks and compute a
// hash from its contents
-hash_stream :: proc(s: io.Stream) -> ([16]byte, bool) {
- hash: [16]byte
+hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
+ hash: [DIGEST_SIZE]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_MD5, 0)
buf := make([]byte, 512)
@@ -60,7 +81,7 @@ hash_stream :: proc(s: io.Stream) -> ([16]byte, bool) {
// hash_file will read the file provided by the given handle
// and compute a hash
-hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool) {
+hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
if !load_at_once {
return hash_stream(os.stream_from_handle(hd))
} else {
@@ -68,7 +89,7 @@ hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool) {
return hash_bytes(buf[:]), ok
}
}
- return [16]byte{}, false
+ return [DIGEST_SIZE]byte{}, false
}
hash :: proc {
@@ -76,6 +97,8 @@ hash :: proc {
hash_file,
hash_bytes,
hash_string,
+ hash_bytes_to_buffer,
+ hash_string_to_buffer,
}
/*
diff --git a/vendor/botan/ripemd/ripemd.odin b/vendor/botan/ripemd/ripemd.odin
index 66260e520..0a8195a96 100644
--- a/vendor/botan/ripemd/ripemd.odin
+++ b/vendor/botan/ripemd/ripemd.odin
@@ -20,16 +20,18 @@ import botan "../bindings"
High level API
*/
+DIGEST_SIZE_160 :: 20
+
// hash_string_160 will hash the given input and return the
// computed hash
-hash_string_160 :: proc(data: string) -> [20]byte {
+hash_string_160 :: proc(data: string) -> [DIGEST_SIZE_160]byte {
return hash_bytes_160(transmute([]byte)(data))
}
// hash_bytes_160 will hash the given input and return the
// computed hash
-hash_bytes_160 :: proc(data: []byte) -> [20]byte {
- hash: [20]byte
+hash_bytes_160 :: proc(data: []byte) -> [DIGEST_SIZE_160]byte {
+ hash: [DIGEST_SIZE_160]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_RIPEMD_160, 0)
botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
@@ -38,10 +40,29 @@ hash_bytes_160 :: proc(data: []byte) -> [20]byte {
return hash
}
+// hash_string_to_buffer_160 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_160 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_160(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_160 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_160 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_160, "Size of destination buffer is smaller than the digest size")
+ ctx: botan.hash_t
+ botan.hash_init(&ctx, botan.HASH_RIPEMD_160, 0)
+ botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
+ botan.hash_final(ctx, &hash[0])
+ botan.hash_destroy(ctx)
+}
+
// hash_stream_160 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_160 :: proc(s: io.Stream) -> ([20]byte, bool) {
- hash: [20]byte
+hash_stream_160 :: proc(s: io.Stream) -> ([DIGEST_SIZE_160]byte, bool) {
+ hash: [DIGEST_SIZE_160]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_RIPEMD_160, 0)
buf := make([]byte, 512)
@@ -60,7 +81,7 @@ hash_stream_160 :: proc(s: io.Stream) -> ([20]byte, bool) {
// hash_file_160 will read the file provided by the given handle
// and compute a hash
-hash_file_160 :: proc(hd: os.Handle, load_at_once := false) -> ([20]byte, bool) {
+hash_file_160 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_160]byte, bool) {
if !load_at_once {
return hash_stream_160(os.stream_from_handle(hd))
} else {
@@ -68,7 +89,7 @@ hash_file_160 :: proc(hd: os.Handle, load_at_once := false) -> ([20]byte, bool)
return hash_bytes_160(buf[:]), ok
}
}
- return [20]byte{}, false
+ return [DIGEST_SIZE_160]byte{}, false
}
hash_160 :: proc {
@@ -76,6 +97,8 @@ hash_160 :: proc {
hash_file_160,
hash_bytes_160,
hash_string_160,
+ hash_bytes_to_buffer_160,
+ hash_string_to_buffer_160,
}
/*
diff --git a/vendor/botan/sha1/sha1.odin b/vendor/botan/sha1/sha1.odin
index 2eb799cb6..005b01821 100644
--- a/vendor/botan/sha1/sha1.odin
+++ b/vendor/botan/sha1/sha1.odin
@@ -20,16 +20,18 @@ import botan "../bindings"
High level API
*/
+DIGEST_SIZE :: 20
+
// hash_string will hash the given input and return the
// computed hash
-hash_string :: proc "contextless" (data: string) -> [20]byte {
+hash_string :: proc "contextless" (data: string) -> [DIGEST_SIZE]byte {
return hash_bytes(transmute([]byte)(data))
}
// hash_bytes will hash the given input and return the
// computed hash
-hash_bytes :: proc "contextless" (data: []byte) -> [20]byte {
- hash: [20]byte
+hash_bytes :: proc "contextless" (data: []byte) -> [DIGEST_SIZE]byte {
+ hash: [DIGEST_SIZE]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_SHA1, 0)
botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
@@ -38,10 +40,29 @@ hash_bytes :: proc "contextless" (data: []byte) -> [20]byte {
return hash
}
+// hash_string_to_buffer will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size")
+ ctx: botan.hash_t
+ botan.hash_init(&ctx, botan.HASH_SHA1, 0)
+ botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
+ botan.hash_final(ctx, &hash[0])
+ botan.hash_destroy(ctx)
+}
+
// hash_stream will read the stream in chunks and compute a
// hash from its contents
-hash_stream :: proc(s: io.Stream) -> ([20]byte, bool) {
- hash: [20]byte
+hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
+ hash: [DIGEST_SIZE]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_SHA1, 0)
buf := make([]byte, 512)
@@ -60,7 +81,7 @@ hash_stream :: proc(s: io.Stream) -> ([20]byte, bool) {
// hash_file will read the file provided by the given handle
// and compute a hash
-hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([20]byte, bool) {
+hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
if !load_at_once {
return hash_stream(os.stream_from_handle(hd))
} else {
@@ -68,7 +89,7 @@ hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([20]byte, bool) {
return hash_bytes(buf[:]), ok
}
}
- return [20]byte{}, false
+ return [DIGEST_SIZE]byte{}, false
}
hash :: proc {
@@ -76,6 +97,8 @@ hash :: proc {
hash_file,
hash_bytes,
hash_string,
+ hash_bytes_to_buffer,
+ hash_string_to_buffer,
}
/*
diff --git a/vendor/botan/sha2/sha2.odin b/vendor/botan/sha2/sha2.odin
index cc5cd1d65..f5d6921a8 100644
--- a/vendor/botan/sha2/sha2.odin
+++ b/vendor/botan/sha2/sha2.odin
@@ -20,16 +20,21 @@ import botan "../bindings"
High level API
*/
+DIGEST_SIZE_224 :: 28
+DIGEST_SIZE_256 :: 32
+DIGEST_SIZE_384 :: 48
+DIGEST_SIZE_512 :: 64
+
// hash_string_224 will hash the given input and return the
// computed hash
-hash_string_224 :: proc(data: string) -> [28]byte {
+hash_string_224 :: proc(data: string) -> [DIGEST_SIZE_224]byte {
return hash_bytes_224(transmute([]byte)(data))
}
// hash_bytes_224 will hash the given input and return the
// computed hash
-hash_bytes_224 :: proc(data: []byte) -> [28]byte {
- hash: [28]byte
+hash_bytes_224 :: proc(data: []byte) -> [DIGEST_SIZE_224]byte {
+ hash: [DIGEST_SIZE_224]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_SHA_224, 0)
botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
@@ -38,10 +43,29 @@ hash_bytes_224 :: proc(data: []byte) -> [28]byte {
return hash
}
+// hash_string_to_buffer_224 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_224 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_224(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_224 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_224 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_224, "Size of destination buffer is smaller than the digest size")
+ ctx: botan.hash_t
+ botan.hash_init(&ctx, botan.HASH_SHA_224, 0)
+ botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
+ botan.hash_final(ctx, &hash[0])
+ botan.hash_destroy(ctx)
+}
+
// hash_stream_224 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_224 :: proc(s: io.Stream) -> ([28]byte, bool) {
- hash: [28]byte
+hash_stream_224 :: proc(s: io.Stream) -> ([DIGEST_SIZE_224]byte, bool) {
+ hash: [DIGEST_SIZE_224]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_SHA_224, 0)
buf := make([]byte, 512)
@@ -60,7 +84,7 @@ hash_stream_224 :: proc(s: io.Stream) -> ([28]byte, bool) {
// hash_file_224 will read the file provided by the given handle
// and compute a hash
-hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([28]byte, bool) {
+hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_224]byte, bool) {
if !load_at_once {
return hash_stream_224(os.stream_from_handle(hd))
} else {
@@ -68,7 +92,7 @@ hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([28]byte, bool)
return hash_bytes_224(buf[:]), ok
}
}
- return [28]byte{}, false
+ return [DIGEST_SIZE_224]byte{}, false
}
hash_224 :: proc {
@@ -76,18 +100,20 @@ hash_224 :: proc {
hash_file_224,
hash_bytes_224,
hash_string_224,
+ hash_bytes_to_buffer_224,
+ hash_string_to_buffer_224,
}
// hash_string_256 will hash the given input and return the
// computed hash
-hash_string_256 :: proc(data: string) -> [32]byte {
+hash_string_256 :: proc(data: string) -> [DIGEST_SIZE_256]byte {
return hash_bytes_256(transmute([]byte)(data))
}
// hash_bytes_256 will hash the given input and return the
// computed hash
-hash_bytes_256 :: proc(data: []byte) -> [32]byte {
- hash: [32]byte
+hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
+ hash: [DIGEST_SIZE_256]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_SHA_256, 0)
botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
@@ -96,10 +122,29 @@ hash_bytes_256 :: proc(data: []byte) -> [32]byte {
return hash
}
+// hash_string_to_buffer_256 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_256(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_256 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_256 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_256, "Size of destination buffer is smaller than the digest size")
+ ctx: botan.hash_t
+ botan.hash_init(&ctx, botan.HASH_SHA_256, 0)
+ botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
+ botan.hash_final(ctx, &hash[0])
+ botan.hash_destroy(ctx)
+}
+
// hash_stream_256 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
- hash: [32]byte
+hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
+ hash: [DIGEST_SIZE_256]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_SHA_256, 0)
buf := make([]byte, 512)
@@ -118,7 +163,7 @@ hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
// hash_file_256 will read the file provided by the given handle
// and compute a hash
-hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
+hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
if !load_at_once {
return hash_stream_256(os.stream_from_handle(hd))
} else {
@@ -126,7 +171,7 @@ hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool)
return hash_bytes_256(buf[:]), ok
}
}
- return [32]byte{}, false
+ return [DIGEST_SIZE_256]byte{}, false
}
hash_256 :: proc {
@@ -134,18 +179,20 @@ hash_256 :: proc {
hash_file_256,
hash_bytes_256,
hash_string_256,
+ hash_bytes_to_buffer_256,
+ hash_string_to_buffer_256,
}
// hash_string_384 will hash the given input and return the
// computed hash
-hash_string_384 :: proc(data: string) -> [48]byte {
+hash_string_384 :: proc(data: string) -> [DIGEST_SIZE_384]byte {
return hash_bytes_384(transmute([]byte)(data))
}
// hash_bytes_384 will hash the given input and return the
// computed hash
-hash_bytes_384 :: proc(data: []byte) -> [48]byte {
- hash: [48]byte
+hash_bytes_384 :: proc(data: []byte) -> [DIGEST_SIZE_384]byte {
+ hash: [DIGEST_SIZE_384]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_SHA_384, 0)
botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
@@ -154,10 +201,29 @@ hash_bytes_384 :: proc(data: []byte) -> [48]byte {
return hash
}
+// hash_string_to_buffer_384 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_384 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_384(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_384 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_384 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_384, "Size of destination buffer is smaller than the digest size")
+ ctx: botan.hash_t
+ botan.hash_init(&ctx, botan.HASH_SHA_384, 0)
+ botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
+ botan.hash_final(ctx, &hash[0])
+ botan.hash_destroy(ctx)
+}
+
// hash_stream_384 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_384 :: proc(s: io.Stream) -> ([48]byte, bool) {
- hash: [48]byte
+hash_stream_384 :: proc(s: io.Stream) -> ([DIGEST_SIZE_384]byte, bool) {
+ hash: [DIGEST_SIZE_384]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_SHA_384, 0)
buf := make([]byte, 512)
@@ -176,7 +242,7 @@ hash_stream_384 :: proc(s: io.Stream) -> ([48]byte, bool) {
// hash_file_384 will read the file provided by the given handle
// and compute a hash
-hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([48]byte, bool) {
+hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_384]byte, bool) {
if !load_at_once {
return hash_stream_384(os.stream_from_handle(hd))
} else {
@@ -184,7 +250,7 @@ hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([48]byte, bool)
return hash_bytes_384(buf[:]), ok
}
}
- return [48]byte{}, false
+ return [DIGEST_SIZE_384]byte{}, false
}
hash_384 :: proc {
@@ -192,18 +258,20 @@ hash_384 :: proc {
hash_file_384,
hash_bytes_384,
hash_string_384,
+ hash_bytes_to_buffer_384,
+ hash_string_to_buffer_384,
}
// hash_string_512 will hash the given input and return the
// computed hash
-hash_string_512 :: proc(data: string) -> [64]byte {
+hash_string_512 :: proc(data: string) -> [DIGEST_SIZE_512]byte {
return hash_bytes_512(transmute([]byte)(data))
}
// hash_bytes_512 will hash the given input and return the
// computed hash
-hash_bytes_512 :: proc(data: []byte) -> [64]byte {
- hash: [64]byte
+hash_bytes_512 :: proc(data: []byte) -> [DIGEST_SIZE_512]byte {
+ hash: [DIGEST_SIZE_512]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_SHA_512, 0)
botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
@@ -212,10 +280,29 @@ hash_bytes_512 :: proc(data: []byte) -> [64]byte {
return hash
}
+// hash_string_to_buffer_512 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_512 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_512(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_512 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_512 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_512, "Size of destination buffer is smaller than the digest size")
+ ctx: botan.hash_t
+ botan.hash_init(&ctx, botan.HASH_SHA_512, 0)
+ botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
+ botan.hash_final(ctx, &hash[0])
+ botan.hash_destroy(ctx)
+}
+
// hash_stream_512 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
- hash: [64]byte
+hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
+ hash: [DIGEST_SIZE_512]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_SHA_512, 0)
buf := make([]byte, 512)
@@ -234,7 +321,7 @@ hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
// hash_file_512 will read the file provided by the given handle
// and compute a hash
-hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool) {
+hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_512]byte, bool) {
if !load_at_once {
return hash_stream_512(os.stream_from_handle(hd))
} else {
@@ -242,7 +329,7 @@ hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool)
return hash_bytes_512(buf[:]), ok
}
}
- return [64]byte{}, false
+ return [DIGEST_SIZE_512]byte{}, false
}
hash_512 :: proc {
@@ -250,6 +337,8 @@ hash_512 :: proc {
hash_file_512,
hash_bytes_512,
hash_string_512,
+ hash_bytes_to_buffer_512,
+ hash_string_to_buffer_512,
}
/*
diff --git a/vendor/botan/sha3/sha3.odin b/vendor/botan/sha3/sha3.odin
index 1211d836a..cf9fa5b2b 100644
--- a/vendor/botan/sha3/sha3.odin
+++ b/vendor/botan/sha3/sha3.odin
@@ -20,16 +20,21 @@ import botan "../bindings"
High level API
*/
+DIGEST_SIZE_224 :: 28
+DIGEST_SIZE_256 :: 32
+DIGEST_SIZE_384 :: 48
+DIGEST_SIZE_512 :: 64
+
// hash_string_224 will hash the given input and return the
// computed hash
-hash_string_224 :: proc(data: string) -> [28]byte {
+hash_string_224 :: proc(data: string) -> [DIGEST_SIZE_224]byte {
return hash_bytes_224(transmute([]byte)(data))
}
// hash_bytes_224 will hash the given input and return the
// computed hash
-hash_bytes_224 :: proc(data: []byte) -> [28]byte {
- hash: [28]byte
+hash_bytes_224 :: proc(data: []byte) -> [DIGEST_SIZE_224]byte {
+ hash: [DIGEST_SIZE_224]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_SHA3_224, 0)
botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
@@ -38,10 +43,29 @@ hash_bytes_224 :: proc(data: []byte) -> [28]byte {
return hash
}
+// hash_string_to_buffer_224 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_224 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_224(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_224 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_224 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_224, "Size of destination buffer is smaller than the digest size")
+ ctx: botan.hash_t
+ botan.hash_init(&ctx, botan.HASH_SHA3_224, 0)
+ botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
+ botan.hash_final(ctx, &hash[0])
+ botan.hash_destroy(ctx)
+}
+
// hash_stream_224 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_224 :: proc(s: io.Stream) -> ([28]byte, bool) {
- hash: [28]byte
+hash_stream_224 :: proc(s: io.Stream) -> ([DIGEST_SIZE_224]byte, bool) {
+ hash: [DIGEST_SIZE_224]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_SHA3_224, 0)
buf := make([]byte, 512)
@@ -60,7 +84,7 @@ hash_stream_224 :: proc(s: io.Stream) -> ([28]byte, bool) {
// hash_file_224 will read the file provided by the given handle
// and compute a hash
-hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([28]byte, bool) {
+hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_224]byte, bool) {
if !load_at_once {
return hash_stream_224(os.stream_from_handle(hd))
} else {
@@ -68,7 +92,7 @@ hash_file_224 :: proc(hd: os.Handle, load_at_once := false) -> ([28]byte, bool)
return hash_bytes_224(buf[:]), ok
}
}
- return [28]byte{}, false
+ return [DIGEST_SIZE_224]byte{}, false
}
hash_224 :: proc {
@@ -76,18 +100,20 @@ hash_224 :: proc {
hash_file_224,
hash_bytes_224,
hash_string_224,
+ hash_bytes_to_buffer_224,
+ hash_string_to_buffer_224,
}
// hash_string_256 will hash the given input and return the
// computed hash
-hash_string_256 :: proc(data: string) -> [32]byte {
+hash_string_256 :: proc(data: string) -> [DIGEST_SIZE_256]byte {
return hash_bytes_256(transmute([]byte)(data))
}
// hash_bytes_256 will hash the given input and return the
// computed hash
-hash_bytes_256 :: proc(data: []byte) -> [32]byte {
- hash: [32]byte
+hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
+ hash: [DIGEST_SIZE_256]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_SHA3_256, 0)
botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
@@ -96,10 +122,29 @@ hash_bytes_256 :: proc(data: []byte) -> [32]byte {
return hash
}
+// hash_string_to_buffer_256 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_256(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_256 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_256 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_256, "Size of destination buffer is smaller than the digest size")
+ ctx: botan.hash_t
+ botan.hash_init(&ctx, botan.HASH_SHA3_256, 0)
+ botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
+ botan.hash_final(ctx, &hash[0])
+ botan.hash_destroy(ctx)
+}
+
// hash_stream_256 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
- hash: [32]byte
+hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
+ hash: [DIGEST_SIZE_256]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_SHA3_256, 0)
buf := make([]byte, 512)
@@ -118,7 +163,7 @@ hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
// hash_file_256 will read the file provided by the given handle
// and compute a hash
-hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
+hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
if !load_at_once {
return hash_stream_256(os.stream_from_handle(hd))
} else {
@@ -126,7 +171,7 @@ hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool)
return hash_bytes_256(buf[:]), ok
}
}
- return [32]byte{}, false
+ return [DIGEST_SIZE_256]byte{}, false
}
hash_256 :: proc {
@@ -134,18 +179,20 @@ hash_256 :: proc {
hash_file_256,
hash_bytes_256,
hash_string_256,
+ hash_bytes_to_buffer_256,
+ hash_string_to_buffer_256,
}
// hash_string_384 will hash the given input and return the
// computed hash
-hash_string_384 :: proc(data: string) -> [48]byte {
+hash_string_384 :: proc(data: string) -> [DIGEST_SIZE_384]byte {
return hash_bytes_384(transmute([]byte)(data))
}
// hash_bytes_384 will hash the given input and return the
// computed hash
-hash_bytes_384 :: proc(data: []byte) -> [48]byte {
- hash: [48]byte
+hash_bytes_384 :: proc(data: []byte) -> [DIGEST_SIZE_384]byte {
+ hash: [DIGEST_SIZE_384]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_SHA3_384, 0)
botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
@@ -154,10 +201,29 @@ hash_bytes_384 :: proc(data: []byte) -> [48]byte {
return hash
}
+// hash_string_to_buffer_384 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_384 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_384(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_384 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_384 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_384, "Size of destination buffer is smaller than the digest size")
+ ctx: botan.hash_t
+ botan.hash_init(&ctx, botan.HASH_SHA3_384, 0)
+ botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
+ botan.hash_final(ctx, &hash[0])
+ botan.hash_destroy(ctx)
+}
+
// hash_stream_384 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_384 :: proc(s: io.Stream) -> ([48]byte, bool) {
- hash: [48]byte
+hash_stream_384 :: proc(s: io.Stream) -> ([DIGEST_SIZE_384]byte, bool) {
+ hash: [DIGEST_SIZE_384]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_SHA3_384, 0)
buf := make([]byte, 512)
@@ -176,7 +242,7 @@ hash_stream_384 :: proc(s: io.Stream) -> ([48]byte, bool) {
// hash_file_384 will read the file provided by the given handle
// and compute a hash
-hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([48]byte, bool) {
+hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_384]byte, bool) {
if !load_at_once {
return hash_stream_384(os.stream_from_handle(hd))
} else {
@@ -184,7 +250,7 @@ hash_file_384 :: proc(hd: os.Handle, load_at_once := false) -> ([48]byte, bool)
return hash_bytes_384(buf[:]), ok
}
}
- return [48]byte{}, false
+ return [DIGEST_SIZE_384]byte{}, false
}
hash_384 :: proc {
@@ -192,18 +258,20 @@ hash_384 :: proc {
hash_file_384,
hash_bytes_384,
hash_string_384,
+ hash_bytes_to_buffer_384,
+ hash_string_to_buffer_384,
}
// hash_string_512 will hash the given input and return the
// computed hash
-hash_string_512 :: proc(data: string) -> [64]byte {
+hash_string_512 :: proc(data: string) -> [DIGEST_SIZE_512]byte {
return hash_bytes_512(transmute([]byte)(data))
}
// hash_bytes_512 will hash the given input and return the
// computed hash
-hash_bytes_512 :: proc(data: []byte) -> [64]byte {
- hash: [64]byte
+hash_bytes_512 :: proc(data: []byte) -> [DIGEST_SIZE_512]byte {
+ hash: [DIGEST_SIZE_512]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_SHA3_512, 0)
botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
@@ -212,10 +280,29 @@ hash_bytes_512 :: proc(data: []byte) -> [64]byte {
return hash
}
+// hash_string_to_buffer_512 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_512 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_512(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_512 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_512 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_512, "Size of destination buffer is smaller than the digest size")
+ ctx: botan.hash_t
+ botan.hash_init(&ctx, botan.HASH_SHA3_512, 0)
+ botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
+ botan.hash_final(ctx, &hash[0])
+ botan.hash_destroy(ctx)
+}
+
// hash_stream_512 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
- hash: [64]byte
+hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
+ hash: [DIGEST_SIZE_512]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_SHA3_512, 0)
buf := make([]byte, 512)
@@ -234,7 +321,7 @@ hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
// hash_file_512 will read the file provided by the given handle
// and compute a hash
-hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool) {
+hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_512]byte, bool) {
if !load_at_once {
return hash_stream_512(os.stream_from_handle(hd))
} else {
@@ -242,7 +329,7 @@ hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool)
return hash_bytes_512(buf[:]), ok
}
}
- return [64]byte{}, false
+ return [DIGEST_SIZE_512]byte{}, false
}
hash_512 :: proc {
@@ -250,6 +337,8 @@ hash_512 :: proc {
hash_file_512,
hash_bytes_512,
hash_string_512,
+ hash_bytes_to_buffer_512,
+ hash_string_to_buffer_512,
}
/*
diff --git a/vendor/botan/shake/shake.odin b/vendor/botan/shake/shake.odin
index 82bf7ad15..ac8432f64 100644
--- a/vendor/botan/shake/shake.odin
+++ b/vendor/botan/shake/shake.odin
@@ -20,16 +20,19 @@ import botan "../bindings"
High level API
*/
+DIGEST_SIZE_128 :: 16
+DIGEST_SIZE_256 :: 32
+
// hash_string_128 will hash the given input and return the
// computed hash
-hash_string_128 :: proc(data: string) -> [16]byte {
+hash_string_128 :: proc(data: string) -> [DIGEST_SIZE_128]byte {
return hash_bytes_128(transmute([]byte)(data))
}
// hash_bytes_128 will hash the given input and return the
// computed hash
-hash_bytes_128 :: proc(data: []byte) -> [16]byte {
- hash: [16]byte
+hash_bytes_128 :: proc(data: []byte) -> [DIGEST_SIZE_128]byte {
+ hash: [DIGEST_SIZE_128]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_SHAKE_128, 0)
botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
@@ -38,10 +41,29 @@ hash_bytes_128 :: proc(data: []byte) -> [16]byte {
return hash
}
+// hash_string_to_buffer_128 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_128 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_128(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_128 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_128 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_128, "Size of destination buffer is smaller than the digest size")
+ ctx: botan.hash_t
+ botan.hash_init(&ctx, botan.HASH_SHAKE_128, 0)
+ botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
+ botan.hash_final(ctx, &hash[0])
+ botan.hash_destroy(ctx)
+}
+
// hash_stream_128 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_128 :: proc(s: io.Stream) -> ([16]byte, bool) {
- hash: [16]byte
+hash_stream_128 :: proc(s: io.Stream) -> ([DIGEST_SIZE_128]byte, bool) {
+ hash: [DIGEST_SIZE_128]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_SHAKE_128, 0)
buf := make([]byte, 512)
@@ -60,7 +82,7 @@ hash_stream_128 :: proc(s: io.Stream) -> ([16]byte, bool) {
// hash_file_128 will read the file provided by the given handle
// and compute a hash
-hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool) {
+hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_128]byte, bool) {
if !load_at_once {
return hash_stream_128(os.stream_from_handle(hd))
} else {
@@ -68,7 +90,7 @@ hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool)
return hash_bytes_128(buf[:]), ok
}
}
- return [16]byte{}, false
+ return [DIGEST_SIZE_128]byte{}, false
}
hash_128 :: proc {
@@ -76,18 +98,20 @@ hash_128 :: proc {
hash_file_128,
hash_bytes_128,
hash_string_128,
+ hash_bytes_to_buffer_128,
+ hash_string_to_buffer_128,
}
// hash_string_256 will hash the given input and return the
// computed hash
-hash_string_256 :: proc(data: string) -> [32]byte {
+hash_string_256 :: proc(data: string) -> [DIGEST_SIZE_256]byte {
return hash_bytes_256(transmute([]byte)(data))
}
// hash_bytes_256 will hash the given input and return the
// computed hash
-hash_bytes_256 :: proc(data: []byte) -> [32]byte {
- hash: [32]byte
+hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
+ hash: [DIGEST_SIZE_256]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_SHAKE_256, 0)
botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
@@ -96,10 +120,29 @@ hash_bytes_256 :: proc(data: []byte) -> [32]byte {
return hash
}
+// hash_string_to_buffer_256 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_256(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_256 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_256 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_256, "Size of destination buffer is smaller than the digest size")
+ ctx: botan.hash_t
+ botan.hash_init(&ctx, botan.HASH_SHAKE_256, 0)
+ botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
+ botan.hash_final(ctx, &hash[0])
+ botan.hash_destroy(ctx)
+}
+
// hash_stream_256 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
- hash: [32]byte
+hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
+ hash: [DIGEST_SIZE_256]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_SHAKE_256, 0)
buf := make([]byte, 512)
@@ -118,7 +161,7 @@ hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
// hash_file_256 will read the file provided by the given handle
// and compute a hash
-hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
+hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
if !load_at_once {
return hash_stream_256(os.stream_from_handle(hd))
} else {
@@ -126,7 +169,7 @@ hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool)
return hash_bytes_256(buf[:]), ok
}
}
- return [32]byte{}, false
+ return [DIGEST_SIZE_256]byte{}, false
}
hash_256 :: proc {
@@ -134,6 +177,8 @@ hash_256 :: proc {
hash_file_256,
hash_bytes_256,
hash_string_256,
+ hash_bytes_to_buffer_256,
+ hash_string_to_buffer_256,
}
/*
diff --git a/vendor/botan/skein512/skein512.odin b/vendor/botan/skein512/skein512.odin
index dc808edb9..490eeba03 100644
--- a/vendor/botan/skein512/skein512.odin
+++ b/vendor/botan/skein512/skein512.odin
@@ -22,16 +22,19 @@ import botan "../bindings"
High level API
*/
+DIGEST_SIZE_256 :: 32
+DIGEST_SIZE_512 :: 64
+
// hash_string_256 will hash the given input and return the
// computed hash
-hash_string_256 :: proc(data: string) -> [32]byte {
+hash_string_256 :: proc(data: string) -> [DIGEST_SIZE_256]byte {
return hash_bytes_256(transmute([]byte)(data))
}
// hash_bytes_256 will hash the given input and return the
// computed hash
-hash_bytes_256 :: proc(data: []byte) -> [32]byte {
- hash: [32]byte
+hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
+ hash: [DIGEST_SIZE_256]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_SKEIN_512_256, 0)
botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
@@ -40,10 +43,29 @@ hash_bytes_256 :: proc(data: []byte) -> [32]byte {
return hash
}
+// hash_string_to_buffer_256 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_256(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_256 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_256 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_256, "Size of destination buffer is smaller than the digest size")
+ ctx: botan.hash_t
+ botan.hash_init(&ctx, botan.HASH_SKEIN_512_256, 0)
+ botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
+ botan.hash_final(ctx, &hash[0])
+ botan.hash_destroy(ctx)
+}
+
// hash_stream_256 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
- hash: [32]byte
+hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
+ hash: [DIGEST_SIZE_256]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_SKEIN_512_256, 0)
buf := make([]byte, 512)
@@ -62,7 +84,7 @@ hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
// hash_file_256 will read the file provided by the given handle
// and compute a hash
-hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
+hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
if !load_at_once {
return hash_stream_256(os.stream_from_handle(hd))
} else {
@@ -70,7 +92,7 @@ hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool)
return hash_bytes_256(buf[:]), ok
}
}
- return [32]byte{}, false
+ return [DIGEST_SIZE_256]byte{}, false
}
hash_256 :: proc {
@@ -78,18 +100,20 @@ hash_256 :: proc {
hash_file_256,
hash_bytes_256,
hash_string_256,
+ hash_bytes_to_buffer_256,
+ hash_string_to_buffer_256,
}
// hash_string_512 will hash the given input and return the
// computed hash
-hash_string_512 :: proc(data: string) -> [64]byte {
+hash_string_512 :: proc(data: string) -> [DIGEST_SIZE_512]byte {
return hash_bytes_512(transmute([]byte)(data))
}
// hash_bytes_512 will hash the given input and return the
// computed hash
-hash_bytes_512 :: proc(data: []byte) -> [64]byte {
- hash: [64]byte
+hash_bytes_512 :: proc(data: []byte) -> [DIGEST_SIZE_512]byte {
+ hash: [DIGEST_SIZE_512]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_SKEIN_512_512, 0)
botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
@@ -98,10 +122,29 @@ hash_bytes_512 :: proc(data: []byte) -> [64]byte {
return hash
}
+// hash_string_to_buffer_512 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_512 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_512(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_512 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_512 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_512, "Size of destination buffer is smaller than the digest size")
+ ctx: botan.hash_t
+ botan.hash_init(&ctx, botan.HASH_SKEIN_512_512, 0)
+ botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
+ botan.hash_final(ctx, &hash[0])
+ botan.hash_destroy(ctx)
+}
+
// hash_stream_512 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
- hash: [64]byte
+hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
+ hash: [DIGEST_SIZE_512]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_SKEIN_512_512, 0)
buf := make([]byte, 512)
@@ -120,7 +163,7 @@ hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
// hash_file_512 will read the file provided by the given handle
// and compute a hash
-hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool) {
+hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_512]byte, bool) {
if !load_at_once {
return hash_stream_512(os.stream_from_handle(hd))
} else {
@@ -128,7 +171,7 @@ hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool)
return hash_bytes_512(buf[:]), ok
}
}
- return [64]byte{}, false
+ return [DIGEST_SIZE_512]byte{}, false
}
hash_512 :: proc {
@@ -136,6 +179,8 @@ hash_512 :: proc {
hash_file_512,
hash_bytes_512,
hash_string_512,
+ hash_bytes_to_buffer_512,
+ hash_string_to_buffer_512,
}
// hash_string_slice will hash the given input and return the
@@ -156,6 +201,25 @@ hash_bytes_slice :: proc(data: []byte, bit_size: int, allocator := context.alloc
return hash
}
+// hash_string_to_buffer_512 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_slice :: proc(data: string, hash: []byte, bit_size: int, allocator := context.allocator) {
+ hash_bytes_to_buffer_slice(transmute([]byte)(data), hash, bit_size, allocator);
+}
+
+// hash_bytes_to_buffer_slice will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_slice :: proc(data, hash: []byte, bit_size: int, allocator := context.allocator) {
+ assert(len(hash) >= bit_size, "Size of destination buffer is smaller than the digest size")
+ ctx: botan.hash_t
+ botan.hash_init(&ctx, strings.unsafe_string_to_cstring(fmt.tprintf("Skein-512(%d)", bit_size * 8)), 0)
+ botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
+ botan.hash_final(ctx, &hash[0])
+ botan.hash_destroy(ctx)
+}
+
// hash_stream_slice will read the stream in chunks and compute a
// hash from its contents
hash_stream_slice :: proc(s: io.Stream, bit_size: int, allocator := context.allocator) -> ([]byte, bool) {
@@ -194,6 +258,8 @@ hash_slice :: proc {
hash_file_slice,
hash_bytes_slice,
hash_string_slice,
+ hash_bytes_to_buffer_slice,
+ hash_string_to_buffer_slice,
}
/*
diff --git a/vendor/botan/sm3/sm3.odin b/vendor/botan/sm3/sm3.odin
index eada2a5b3..7eb3f1f8d 100644
--- a/vendor/botan/sm3/sm3.odin
+++ b/vendor/botan/sm3/sm3.odin
@@ -20,16 +20,18 @@ import botan "../bindings"
High level API
*/
+DIGEST_SIZE :: 32
+
// hash_string will hash the given input and return the
// computed hash
-hash_string :: proc "contextless" (data: string) -> [32]byte {
+hash_string :: proc "contextless" (data: string) -> [DIGEST_SIZE]byte {
return hash_bytes(transmute([]byte)(data))
}
// hash_bytes will hash the given input and return the
// computed hash
-hash_bytes :: proc "contextless" (data: []byte) -> [32]byte {
- hash: [32]byte
+hash_bytes :: proc "contextless" (data: []byte) -> [DIGEST_SIZE]byte {
+ hash: [DIGEST_SIZE]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_SM3, 0)
botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
@@ -38,10 +40,29 @@ hash_bytes :: proc "contextless" (data: []byte) -> [32]byte {
return hash
}
+// hash_string_to_buffer will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size")
+ ctx: botan.hash_t
+ botan.hash_init(&ctx, botan.HASH_SM3, 0)
+ botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
+ botan.hash_final(ctx, &hash[0])
+ botan.hash_destroy(ctx)
+}
+
// hash_stream will read the stream in chunks and compute a
// hash from its contents
-hash_stream :: proc(s: io.Stream) -> ([32]byte, bool) {
- hash: [32]byte
+hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
+ hash: [DIGEST_SIZE]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_SM3, 0)
buf := make([]byte, 512)
@@ -60,7 +81,7 @@ hash_stream :: proc(s: io.Stream) -> ([32]byte, bool) {
// hash_file will read the file provided by the given handle
// and compute a hash
-hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
+hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
if !load_at_once {
return hash_stream(os.stream_from_handle(hd))
} else {
@@ -68,7 +89,7 @@ hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
return hash_bytes(buf[:]), ok
}
}
- return [32]byte{}, false
+ return [DIGEST_SIZE]byte{}, false
}
hash :: proc {
@@ -76,6 +97,8 @@ hash :: proc {
hash_file,
hash_bytes,
hash_string,
+ hash_bytes_to_buffer,
+ hash_string_to_buffer,
}
/*
diff --git a/vendor/botan/streebog/streebog.odin b/vendor/botan/streebog/streebog.odin
index acee1a78a..cbf2047ed 100644
--- a/vendor/botan/streebog/streebog.odin
+++ b/vendor/botan/streebog/streebog.odin
@@ -20,16 +20,19 @@ import botan "../bindings"
High level API
*/
+DIGEST_SIZE_256 :: 32
+DIGEST_SIZE_512 :: 64
+
// hash_string_256 will hash the given input and return the
// computed hash
-hash_string_256 :: proc(data: string) -> [32]byte {
+hash_string_256 :: proc(data: string) -> [DIGEST_SIZE_256]byte {
return hash_bytes_256(transmute([]byte)(data))
}
// hash_bytes_256 will hash the given input and return the
// computed hash
-hash_bytes_256 :: proc(data: []byte) -> [32]byte {
- hash: [32]byte
+hash_bytes_256 :: proc(data: []byte) -> [DIGEST_SIZE_256]byte {
+ hash: [DIGEST_SIZE_256]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_STREEBOG_256, 0)
botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
@@ -38,10 +41,29 @@ hash_bytes_256 :: proc(data: []byte) -> [32]byte {
return hash
}
+// hash_string_to_buffer_256 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_256 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_256(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_256 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_256 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_256, "Size of destination buffer is smaller than the digest size")
+ ctx: botan.hash_t
+ botan.hash_init(&ctx, botan.HASH_STREEBOG_256, 0)
+ botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
+ botan.hash_final(ctx, &hash[0])
+ botan.hash_destroy(ctx)
+}
+
// hash_stream_256 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
- hash: [32]byte
+hash_stream_256 :: proc(s: io.Stream) -> ([DIGEST_SIZE_256]byte, bool) {
+ hash: [DIGEST_SIZE_256]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_STREEBOG_256, 0)
buf := make([]byte, 512)
@@ -60,7 +82,7 @@ hash_stream_256 :: proc(s: io.Stream) -> ([32]byte, bool) {
// hash_file_256 will read the file provided by the given handle
// and compute a hash
-hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool) {
+hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_256]byte, bool) {
if !load_at_once {
return hash_stream_256(os.stream_from_handle(hd))
} else {
@@ -68,7 +90,7 @@ hash_file_256 :: proc(hd: os.Handle, load_at_once := false) -> ([32]byte, bool)
return hash_bytes_256(buf[:]), ok
}
}
- return [32]byte{}, false
+ return [DIGEST_SIZE_256]byte{}, false
}
hash_256 :: proc {
@@ -76,18 +98,20 @@ hash_256 :: proc {
hash_file_256,
hash_bytes_256,
hash_string_256,
+ hash_bytes_to_buffer_256,
+ hash_string_to_buffer_256,
}
// hash_string_512 will hash the given input and return the
// computed hash
-hash_string_512 :: proc(data: string) -> [64]byte {
+hash_string_512 :: proc(data: string) -> [DIGEST_SIZE_512]byte {
return hash_bytes_512(transmute([]byte)(data))
}
// hash_bytes_512 will hash the given input and return the
// computed hash
-hash_bytes_512 :: proc(data: []byte) -> [64]byte {
- hash: [64]byte
+hash_bytes_512 :: proc(data: []byte) -> [DIGEST_SIZE_512]byte {
+ hash: [DIGEST_SIZE_512]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_STREEBOG_512, 0)
botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
@@ -96,10 +120,29 @@ hash_bytes_512 :: proc(data: []byte) -> [64]byte {
return hash
}
+// hash_string_to_buffer_512 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_512 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_512(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_512 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_512 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_512, "Size of destination buffer is smaller than the digest size")
+ ctx: botan.hash_t
+ botan.hash_init(&ctx, botan.HASH_STREEBOG_512, 0)
+ botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
+ botan.hash_final(ctx, &hash[0])
+ botan.hash_destroy(ctx)
+}
+
// hash_stream_512 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
- hash: [64]byte
+hash_stream_512 :: proc(s: io.Stream) -> ([DIGEST_SIZE_512]byte, bool) {
+ hash: [DIGEST_SIZE_512]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_STREEBOG_512, 0)
buf := make([]byte, 512)
@@ -118,7 +161,7 @@ hash_stream_512 :: proc(s: io.Stream) -> ([64]byte, bool) {
// hash_file_512 will read the file provided by the given handle
// and compute a hash
-hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool) {
+hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_512]byte, bool) {
if !load_at_once {
return hash_stream_512(os.stream_from_handle(hd))
} else {
@@ -126,7 +169,7 @@ hash_file_512 :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool)
return hash_bytes_512(buf[:]), ok
}
}
- return [64]byte{}, false
+ return [DIGEST_SIZE_512]byte{}, false
}
hash_512 :: proc {
@@ -134,6 +177,8 @@ hash_512 :: proc {
hash_file_512,
hash_bytes_512,
hash_string_512,
+ hash_bytes_to_buffer_512,
+ hash_string_to_buffer_512,
}
/*
diff --git a/vendor/botan/tiger/tiger.odin b/vendor/botan/tiger/tiger.odin
index b240457a6..b29602b26 100644
--- a/vendor/botan/tiger/tiger.odin
+++ b/vendor/botan/tiger/tiger.odin
@@ -20,16 +20,20 @@ import botan "../bindings"
High level API
*/
+DIGEST_SIZE_128 :: 16
+DIGEST_SIZE_160 :: 20
+DIGEST_SIZE_192 :: 24
+
// hash_string_128 will hash the given input and return the
// computed hash
-hash_string_128 :: proc(data: string) -> [16]byte {
+hash_string_128 :: proc(data: string) -> [DIGEST_SIZE_128]byte {
return hash_bytes_128(transmute([]byte)(data))
}
// hash_bytes_128 will hash the given input and return the
// computed hash
-hash_bytes_128 :: proc(data: []byte) -> [16]byte {
- hash: [16]byte
+hash_bytes_128 :: proc(data: []byte) -> [DIGEST_SIZE_128]byte {
+ hash: [DIGEST_SIZE_128]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_TIGER_128, 0)
botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
@@ -38,10 +42,29 @@ hash_bytes_128 :: proc(data: []byte) -> [16]byte {
return hash
}
+// hash_string_to_buffer_128 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_128 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_128(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_128 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_128 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_128, "Size of destination buffer is smaller than the digest size")
+ ctx: botan.hash_t
+ botan.hash_init(&ctx, botan.HASH_TIGER_128, 0)
+ botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
+ botan.hash_final(ctx, &hash[0])
+ botan.hash_destroy(ctx)
+}
+
// hash_stream_128 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_128 :: proc(s: io.Stream) -> ([16]byte, bool) {
- hash: [16]byte
+hash_stream_128 :: proc(s: io.Stream) -> ([DIGEST_SIZE_128]byte, bool) {
+ hash: [DIGEST_SIZE_128]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_TIGER_128, 0)
buf := make([]byte, 512)
@@ -60,7 +83,7 @@ hash_stream_128 :: proc(s: io.Stream) -> ([16]byte, bool) {
// hash_file_128 will read the file provided by the given handle
// and compute a hash
-hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool) {
+hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_128]byte, bool) {
if !load_at_once {
return hash_stream_128(os.stream_from_handle(hd))
} else {
@@ -68,7 +91,7 @@ hash_file_128 :: proc(hd: os.Handle, load_at_once := false) -> ([16]byte, bool)
return hash_bytes_128(buf[:]), ok
}
}
- return [16]byte{}, false
+ return [DIGEST_SIZE_128]byte{}, false
}
hash_128 :: proc {
@@ -76,18 +99,20 @@ hash_128 :: proc {
hash_file_128,
hash_bytes_128,
hash_string_128,
+ hash_bytes_to_buffer_128,
+ hash_string_to_buffer_128,
}
// hash_string_160 will hash the given input and return the
// computed hash
-hash_string_160 :: proc(data: string) -> [20]byte {
+hash_string_160 :: proc(data: string) -> [DIGEST_SIZE_160]byte {
return hash_bytes_160(transmute([]byte)(data))
}
// hash_bytes_160 will hash the given input and return the
// computed hash
-hash_bytes_160 :: proc(data: []byte) -> [20]byte {
- hash: [20]byte
+hash_bytes_160 :: proc(data: []byte) -> [DIGEST_SIZE_160]byte {
+ hash: [DIGEST_SIZE_160]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_TIGER_160, 0)
botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
@@ -96,10 +121,29 @@ hash_bytes_160 :: proc(data: []byte) -> [20]byte {
return hash
}
+// hash_string_to_buffer_160 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_160 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_160(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_160 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_160 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_160, "Size of destination buffer is smaller than the digest size")
+ ctx: botan.hash_t
+ botan.hash_init(&ctx, botan.HASH_TIGER_160, 0)
+ botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
+ botan.hash_final(ctx, &hash[0])
+ botan.hash_destroy(ctx)
+}
+
// hash_stream_160 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_160 :: proc(s: io.Stream) -> ([20]byte, bool) {
- hash: [20]byte
+hash_stream_160 :: proc(s: io.Stream) -> ([DIGEST_SIZE_160]byte, bool) {
+ hash: [DIGEST_SIZE_160]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_TIGER_160, 0)
buf := make([]byte, 512)
@@ -118,7 +162,7 @@ hash_stream_160 :: proc(s: io.Stream) -> ([20]byte, bool) {
// hash_file_160 will read the file provided by the given handle
// and compute a hash
-hash_file_160 :: proc(hd: os.Handle, load_at_once := false) -> ([20]byte, bool) {
+hash_file_160 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_160]byte, bool) {
if !load_at_once {
return hash_stream_160(os.stream_from_handle(hd))
} else {
@@ -126,7 +170,7 @@ hash_file_160 :: proc(hd: os.Handle, load_at_once := false) -> ([20]byte, bool)
return hash_bytes_160(buf[:]), ok
}
}
- return [20]byte{}, false
+ return [DIGEST_SIZE_160]byte{}, false
}
hash_160 :: proc {
@@ -134,18 +178,20 @@ hash_160 :: proc {
hash_file_160,
hash_bytes_160,
hash_string_160,
+ hash_bytes_to_buffer_160,
+ hash_string_to_buffer_160,
}
// hash_string_192 will hash the given input and return the
// computed hash
-hash_string_192 :: proc(data: string) -> [24]byte {
+hash_string_192 :: proc(data: string) -> [DIGEST_SIZE_192]byte {
return hash_bytes_192(transmute([]byte)(data))
}
// hash_bytes_192 will hash the given input and return the
// computed hash
-hash_bytes_192 :: proc(data: []byte) -> [24]byte {
- hash: [24]byte
+hash_bytes_192 :: proc(data: []byte) -> [DIGEST_SIZE_192]byte {
+ hash: [DIGEST_SIZE_192]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_TIGER_192, 0)
botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
@@ -154,10 +200,29 @@ hash_bytes_192 :: proc(data: []byte) -> [24]byte {
return hash
}
+// hash_string_to_buffer_192 will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer_192 :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer_192(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer_192 will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer_192 :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE_192, "Size of destination buffer is smaller than the digest size")
+ ctx: botan.hash_t
+ botan.hash_init(&ctx, botan.HASH_TIGER_192, 0)
+ botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
+ botan.hash_final(ctx, &hash[0])
+ botan.hash_destroy(ctx)
+}
+
// hash_stream_192 will read the stream in chunks and compute a
// hash from its contents
-hash_stream_192 :: proc(s: io.Stream) -> ([24]byte, bool) {
- hash: [24]byte
+hash_stream_192 :: proc(s: io.Stream) -> ([DIGEST_SIZE_192]byte, bool) {
+ hash: [DIGEST_SIZE_192]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_TIGER_192, 0)
buf := make([]byte, 512)
@@ -176,7 +241,7 @@ hash_stream_192 :: proc(s: io.Stream) -> ([24]byte, bool) {
// hash_file_192 will read the file provided by the given handle
// and compute a hash
-hash_file_192 :: proc(hd: os.Handle, load_at_once := false) -> ([24]byte, bool) {
+hash_file_192 :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE_192]byte, bool) {
if !load_at_once {
return hash_stream_192(os.stream_from_handle(hd))
} else {
@@ -184,7 +249,7 @@ hash_file_192 :: proc(hd: os.Handle, load_at_once := false) -> ([24]byte, bool)
return hash_bytes_192(buf[:]), ok
}
}
- return [24]byte{}, false
+ return [DIGEST_SIZE_192]byte{}, false
}
hash_192 :: proc {
@@ -192,6 +257,8 @@ hash_192 :: proc {
hash_file_192,
hash_bytes_192,
hash_string_192,
+ hash_bytes_to_buffer_192,
+ hash_string_to_buffer_192,
}
/*
diff --git a/vendor/botan/whirlpool/whirlpool.odin b/vendor/botan/whirlpool/whirlpool.odin
index 130386ff3..2aff3c8ed 100644
--- a/vendor/botan/whirlpool/whirlpool.odin
+++ b/vendor/botan/whirlpool/whirlpool.odin
@@ -20,16 +20,18 @@ import botan "../bindings"
High level API
*/
+DIGEST_SIZE :: 64
+
// hash_string will hash the given input and return the
// computed hash
-hash_string :: proc "contextless" (data: string) -> [64]byte {
+hash_string :: proc "contextless" (data: string) -> [DIGEST_SIZE]byte {
return hash_bytes(transmute([]byte)(data))
}
// hash_bytes will hash the given input and return the
// computed hash
-hash_bytes :: proc "contextless" (data: []byte) -> [64]byte {
- hash: [64]byte
+hash_bytes :: proc "contextless" (data: []byte) -> [DIGEST_SIZE]byte {
+ hash: [DIGEST_SIZE]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_WHIRLPOOL, 0)
botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
@@ -38,10 +40,29 @@ hash_bytes :: proc "contextless" (data: []byte) -> [64]byte {
return hash
}
+// hash_string_to_buffer will hash the given input and assign the
+// computed hash to the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_string_to_buffer :: proc(data: string, hash: []byte) {
+ hash_bytes_to_buffer(transmute([]byte)(data), hash);
+}
+
+// hash_bytes_to_buffer will hash the given input and write the
+// computed hash into the second parameter.
+// It requires that the destination buffer is at least as big as the digest size
+hash_bytes_to_buffer :: proc(data, hash: []byte) {
+ assert(len(hash) >= DIGEST_SIZE, "Size of destination buffer is smaller than the digest size")
+ ctx: botan.hash_t
+ botan.hash_init(&ctx, botan.HASH_WHIRLPOOL, 0)
+ botan.hash_update(ctx, len(data) == 0 ? nil : &data[0], uint(len(data)))
+ botan.hash_final(ctx, &hash[0])
+ botan.hash_destroy(ctx)
+}
+
// hash_stream will read the stream in chunks and compute a
// hash from its contents
-hash_stream :: proc(s: io.Stream) -> ([64]byte, bool) {
- hash: [64]byte
+hash_stream :: proc(s: io.Stream) -> ([DIGEST_SIZE]byte, bool) {
+ hash: [DIGEST_SIZE]byte
ctx: botan.hash_t
botan.hash_init(&ctx, botan.HASH_WHIRLPOOL, 0)
buf := make([]byte, 512)
@@ -60,7 +81,7 @@ hash_stream :: proc(s: io.Stream) -> ([64]byte, bool) {
// hash_file will read the file provided by the given handle
// and compute a hash
-hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool) {
+hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([DIGEST_SIZE]byte, bool) {
if !load_at_once {
return hash_stream(os.stream_from_handle(hd))
} else {
@@ -68,7 +89,7 @@ hash_file :: proc(hd: os.Handle, load_at_once := false) -> ([64]byte, bool) {
return hash_bytes(buf[:]), ok
}
}
- return [64]byte{}, false
+ return [DIGEST_SIZE]byte{}, false
}
hash :: proc {
@@ -76,6 +97,8 @@ hash :: proc {
hash_file,
hash_bytes,
hash_string,
+ hash_bytes_to_buffer,
+ hash_string_to_buffer,
}
/*
From bdf66bb1e1096690be66eda90b35c6cfdc8a5cf0 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Fri, 31 Dec 2021 22:54:12 +0000
Subject: [PATCH 044/710] Correct `abs` type behaviour for quaternions
---
src/check_builtin.cpp | 13 ++++++++++---
src/types.cpp | 7 +++++++
2 files changed, 17 insertions(+), 3 deletions(-)
diff --git a/src/check_builtin.cpp b/src/check_builtin.cpp
index f93cf9886..dc8c209c9 100644
--- a/src/check_builtin.cpp
+++ b/src/check_builtin.cpp
@@ -1858,7 +1858,14 @@ bool check_builtin_procedure(CheckerContext *c, Operand *operand, Ast *call, i32
f64 r = operand->value.value_complex->real;
f64 i = operand->value.value_complex->imag;
operand->value = exact_value_float(gb_sqrt(r*r + i*i));
-
+ break;
+ }
+ case ExactValue_Quaternion: {
+ f64 r = operand->value.value_quaternion->real;
+ f64 i = operand->value.value_quaternion->imag;
+ f64 j = operand->value.value_quaternion->jmag;
+ f64 k = operand->value.value_quaternion->kmag;
+ operand->value = exact_value_float(gb_sqrt(r*r + i*i + j*j + k*k));
break;
}
default:
@@ -1877,10 +1884,10 @@ bool check_builtin_procedure(CheckerContext *c, Operand *operand, Ast *call, i32
}
}
- if (is_type_complex(operand->type)) {
+ if (is_type_complex_or_quaternion(operand->type)) {
operand->type = base_complex_elem_type(operand->type);
}
- GB_ASSERT(!is_type_complex(operand->type));
+ GB_ASSERT(!is_type_complex_or_quaternion(operand->type));
break;
}
diff --git a/src/types.cpp b/src/types.cpp
index 2b7ea93dc..f621d4346 100644
--- a/src/types.cpp
+++ b/src/types.cpp
@@ -1253,6 +1253,13 @@ bool is_type_quaternion(Type *t) {
}
return false;
}
+bool is_type_complex_or_quaternion(Type *t) {
+ t = core_type(t);
+ if (t->kind == Type_Basic) {
+ return (t->Basic.flags & (BasicFlag_Complex|BasicFlag_Quaternion)) != 0;
+ }
+ return false;
+}
bool is_type_f16(Type *t) {
t = core_type(t);
if (t->kind == Type_Basic) {
From 0d7cb02386c765f6f4fe343b463e84c3a7c1d1fc Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Fri, 31 Dec 2021 23:20:14 +0000
Subject: [PATCH 045/710] Fix conversion from float to quaternion
---
core/math/linalg/extended.odin | 4 ++--
src/check_expr.cpp | 7 +++++++
src/llvm_backend_expr.cpp | 30 +++++++-----------------------
3 files changed, 16 insertions(+), 25 deletions(-)
diff --git a/core/math/linalg/extended.odin b/core/math/linalg/extended.odin
index ba64356ce..c2bf5552a 100644
--- a/core/math/linalg/extended.odin
+++ b/core/math/linalg/extended.odin
@@ -103,10 +103,10 @@ max :: proc{max_single, max_double, max_triple}
abs :: proc(a: $T) -> (out: T) where IS_NUMERIC(ELEM_TYPE(T)) {
when IS_ARRAY(T) {
for i in 0..
Date: Sat, 1 Jan 2022 13:11:53 +0000
Subject: [PATCH 046/710] Fix typo in priority_queue.odin and add
`default_swap_proc`
---
core/container/priority_queue/priority_queue.odin | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/core/container/priority_queue/priority_queue.odin b/core/container/priority_queue/priority_queue.odin
index df26edb1b..2bdb3c777 100644
--- a/core/container/priority_queue/priority_queue.odin
+++ b/core/container/priority_queue/priority_queue.odin
@@ -11,6 +11,12 @@ Priority_Queue :: struct($T: typeid) {
DEFAULT_CAPACITY :: 16
+default_swap_proc :: proc($T: typeid) -> proc(q: []T, i, j: int) {
+ return proc(q: []T, i, j: int) {
+ q[i], q[j] = q[j], q[i]
+ }
+}
+
init :: proc(pq: ^$Q/Priority_Queue($T), less: proc(a, b: T) -> bool, swap: proc(q: []T, i, j: int), capacity := DEFAULT_CAPACITY, allocator := context.allocator) {
if pq.queue.allocator.procedure == nil {
pq.queue.allocator = allocator
@@ -65,7 +71,7 @@ _shift_down :: proc(pq: ^$Q/Priority_Queue($T), i0, n: int) -> bool {
}
j, j2 = j1, j1+1
if j1 < n && pq.less(queue[j2], queue[j1]) {
- j1 = j2
+ j = j2
}
if !pq.less(queue[i], queue[j]) {
break
From 43763ddfda018925ef55fc0f22f2e9ab363a848f Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Sat, 1 Jan 2022 13:44:37 +0000
Subject: [PATCH 047/710] Correct `_shift_down` logic
---
core/container/priority_queue/priority_queue.odin | 15 +++++++--------
1 file changed, 7 insertions(+), 8 deletions(-)
diff --git a/core/container/priority_queue/priority_queue.odin b/core/container/priority_queue/priority_queue.odin
index 2bdb3c777..e324287f3 100644
--- a/core/container/priority_queue/priority_queue.odin
+++ b/core/container/priority_queue/priority_queue.odin
@@ -56,24 +56,23 @@ cap :: proc(pq: $Q/Priority_Queue($T)) -> int {
_shift_down :: proc(pq: ^$Q/Priority_Queue($T), i0, n: int) -> bool {
// O(n log n)
- i := i0
- j, j1, j2: int
- if 0 > i || i > n {
+ if 0 > i0 || i0 > n {
return false
}
+ i := i0
queue := pq.queue[:]
for {
j1 := 2*i + 1
- if 0 > j1 || j1 >= n {
+ if j1 < 0 || j1 >= n {
break
}
- j, j2 = j1, j1+1
- if j1 < n && pq.less(queue[j2], queue[j1]) {
+ j := j1
+ if j2 := j1+1; j2 < n && pq.less(queue[j2], queue[j1]) {
j = j2
}
- if !pq.less(queue[i], queue[j]) {
+ if !pq.less(queue[j], queue[i]) {
break
}
@@ -87,7 +86,7 @@ _shift_up :: proc(pq: ^$Q/Priority_Queue($T), j: int) {
j := j
queue := pq.queue[:]
n := builtin.len(queue)
- for 0 <= j && j < n {
+ for 0 <= j {
i := (j-1)/2
if i == j || !pq.less(queue[j], queue[i]) {
break
From f364ac60c290790e7df82c5f6ed9bf79824c223c Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Sat, 1 Jan 2022 15:31:51 +0000
Subject: [PATCH 048/710] Remove the hidden NUL byte past the end from
`strings.clone`
---
core/strings/strings.odin | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/core/strings/strings.odin b/core/strings/strings.odin
index 3f703372f..b93c5bcc0 100644
--- a/core/strings/strings.odin
+++ b/core/strings/strings.odin
@@ -6,9 +6,8 @@ import "core:unicode"
import "core:unicode/utf8"
clone :: proc(s: string, allocator := context.allocator, loc := #caller_location) -> string {
- c := make([]byte, len(s)+1, allocator, loc)
+ c := make([]byte, len(s), allocator, loc)
copy(c, s)
- c[len(s)] = 0
return string(c[:len(s)])
}
From a032a2ef322de150587117396eaf6e5ae7a11768 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Sat, 1 Jan 2022 15:33:19 +0000
Subject: [PATCH 049/710] Remove the hidden NUL byte past the end from
`bytes.clone`
---
core/bytes/bytes.odin | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/core/bytes/bytes.odin b/core/bytes/bytes.odin
index 1e83b93c8..1bf11e0b0 100644
--- a/core/bytes/bytes.odin
+++ b/core/bytes/bytes.odin
@@ -5,9 +5,8 @@ import "core:unicode"
import "core:unicode/utf8"
clone :: proc(s: []byte, allocator := context.allocator, loc := #caller_location) -> []byte {
- c := make([]byte, len(s)+1, allocator, loc)
+ c := make([]byte, len(s), allocator, loc)
copy(c, s)
- c[len(s)] = 0
return c[:len(s)]
}
From a60b9735a2ce49d4d8389db83ed53372b7f6c413 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Sat, 1 Jan 2022 15:46:22 +0000
Subject: [PATCH 050/710] Add `core:container/queue`
---
core/container/queue/queue.odin | 205 ++++++++++++++++++++++++++++++++
1 file changed, 205 insertions(+)
create mode 100644 core/container/queue/queue.odin
diff --git a/core/container/queue/queue.odin b/core/container/queue/queue.odin
new file mode 100644
index 000000000..ff1e85fbd
--- /dev/null
+++ b/core/container/queue/queue.odin
@@ -0,0 +1,205 @@
+package container_queue
+
+import "core:builtin"
+import "core:runtime"
+
+// Dynamically resizable double-ended queue/ring-buffer
+Queue :: struct($T: typeid) {
+ data: [dynamic]T,
+ len: uint,
+ offset: uint,
+}
+
+DEFAULT_CAPACITY :: 16
+
+// Procedure to initialize a queue
+init :: proc(q: ^$Q/Queue($T), capacity := DEFAULT_CAPACITY, allocator := context.allocator) -> bool {
+ if q.data.allocator.procedure == nil {
+ q.data.allocator = allocator
+ }
+ clear(q)
+ return reserve(q, capacity)
+}
+
+// Procedure to initialize a queue from a fixed backing slice
+init_from_slice :: proc(q: ^$Q/Queue($T), backing: []T) -> bool {
+ clear(q)
+ q.data = transmute([dynamic]T)runtime.Raw_Dynamic_Array{
+ data = raw_data(backing),
+ len = builtin.len(backing),
+ cap = builtin.len(backing),
+ allocator = {procedure=runtime.nil_allocator_proc, data=nil},
+ }
+ return true
+}
+
+// Procedure to destroy a queue
+destroy :: proc(q: ^$Q/Queue($T)) {
+ delete(q.data)
+}
+
+// The length of the queue
+len :: proc(q: $Q/Queue($T)) -> int {
+ return int(q.len)
+}
+
+// The current capacity of the queue
+cap :: proc(q: $Q/Queue($T)) -> int {
+ return builtin.len(q.data)
+}
+
+// Remaining space in the queue (cap-len)
+space :: proc(q: $Q/Queue($T)) -> int {
+ return builtin.len(q.data) - int(q.len)
+}
+
+// Reserve enough space for at least the specified capacity
+reserve :: proc(q: ^$Q/Queue($T), capacity: int) -> bool {
+ if uint(capacity) > q.len {
+ return _grow(q, uint(capacity))
+ }
+ return true
+}
+
+
+get :: proc(q: ^$Q/Queue($T), #any_int i: int, loc := #caller_location) -> T {
+ runtime.bounds_check_error_loc(loc, i, builtin.len(q.data))
+
+ idx := (uint(i)+q.offset)%builtin.len(q.data)
+ return q.data[idx]
+}
+set :: proc(q: ^$Q/Queue($T), #any_int i: int, val: T, loc := #caller_location) {
+ runtime.bounds_check_error_loc(loc, i, builtin.len(q.data))
+
+ idx := (uint(i)+q.offset)%builtin.len(q.data)
+ q.data[idx] = val
+}
+get_ptr :: proc(q: ^$Q/Queue($T), #any_int i: int, loc := #caller_location) -> ^T {
+ runtime.bounds_check_error_loc(loc, i, builtin.len(q.data))
+
+ idx := (uint(i)+q.offset)%builtin.len(q.data)
+ return &q.data[idx]
+}
+
+// Push an element to the back of the queue
+push_back :: proc(q: ^$Q/Queue($T), elem: T) -> bool {
+ if space(q^) == 0 {
+ _grow(q) or_return
+ }
+ q.data[q.len] = elem
+ q.len += 1
+ return true
+}
+
+// Push an element to the front of the queue
+push_front :: proc(q: ^$Q/Queue($T), elem: T) -> bool {
+ if space(q^) == 0 {
+ _grow(q) or_return
+ }
+ q.offset = uint(q.offset - 1 + builtin.len(q.data)) % builtin.len(q.data)
+ q.len += 1
+ q.data[q.offset] = elem
+ return true
+}
+
+
+// Pop an element from the back of the queue
+pop_back :: proc(q: ^$Q/Queue($T), loc := #caller_location) -> (elem: T) {
+ assert(condition=q.len > 0, loc=loc)
+ q.len -= 1
+ idx := (q.offset+uint(q.len))%builtin.len(q.data)
+ elem = q.data[idx]
+ return
+}
+// Safely pop an element from the back of the queue
+pop_back_safe :: proc(q: ^$Q/Queue($T)) -> (elem: T, ok: bool) {
+ if q.len > 0 {
+ q.len -= 1
+ idx := (q.offset+uint(q.len))%builtin.len(q.data)
+ elem = q.data[idx]
+ ok = true
+ }
+ return
+}
+
+// Pop an element from the front of the queue
+pop_front :: proc(q: ^$Q/Queue($T), loc := #caller_location) -> (elem: T) {
+ assert(condition=q.len > 0, loc=loc)
+ elem = q.data[q.offset]
+ q.len -= 1
+ return
+}
+// Safely pop an element from the front of the queue
+pop_front_safe :: proc(q: ^$Q/Queue($T)) -> (elem: T, ok: bool) {
+ if q.len > 0 {
+ elem = q.data[q.offset]
+ q.len -= 1
+ ok = true
+ }
+ return
+}
+
+// Push multiple elements to the front of the queue
+push_back_elems :: proc(q: ^$Q/Queue($T), elems: ..T) -> bool {
+ n := uint(builtin.len(elems))
+ if space(q^) < int(n) {
+ _grow(q, q.len + n) or_return
+ }
+
+ sz := uint(builtin.len(q.data))
+ insert_from := (q.offset + q.len) % sz
+ insert_to := n
+ if insert_from + insert_to > sz {
+ insert_to = sz - insert_from
+ }
+ copy(q.data[insert_from:], elems[:insert_to])
+ copy(q.data[:insert_from], elems[insert_to:])
+ q.len += n
+ return true
+}
+
+// Consume `n` elements from the front of the queue
+consume_front :: proc(q: ^$Q/Queue($T), n: int, loc := #caller_location) {
+ assert(condition=int(q.len) >= n, loc=loc)
+ if n > 0 {
+ nu := uint(n)
+ q.offset = (q.offset + nu) % builtin.len(q.data)
+ q.len -= nu
+ }
+}
+
+// Consume `n` elements from the back of the queue
+consume_back :: proc(q: ^$Q/Queue($T), n: int, loc := #caller_location) {
+ assert(condition=int(q.len) >= n, loc=loc)
+ if n > 0 {
+ q.len -= uint(n)
+ }
+}
+
+
+
+append_elem :: push_back
+append_elems :: push_back_elems
+push :: proc{push_back, push_back_elems}
+append :: proc{push_back, push_back_elems}
+
+
+// Clear the contents of the queue
+clear :: proc(q: ^$Q/Queue($T)) {
+ q.len = 0
+ q.offset = 0
+}
+
+
+// Internal growinh procedure
+_grow :: proc(q: ^$Q/Queue($T), min_capacity: uint = 0) -> bool {
+ new_capacity := max(min_capacity, uint(8), uint(builtin.len(q.data))*2)
+ n := uint(builtin.len(q.data))
+ builtin.resize(&q.data, int(new_capacity)) or_return
+ if q.offset + q.len > n {
+ diff := n - q.offset
+ copy(q.data[new_capacity-diff:], q.data[q.offset:][:diff])
+ q.offset += new_capacity - n
+ }
+ return true
+}
From 50188f03086da716a8f23926ae10be6fd87abab4 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Sat, 1 Jan 2022 17:13:11 +0000
Subject: [PATCH 051/710] Add `sort.map_entries_by_key`
`sort.map_entries_by_value`
---
core/sort/map.odin | 33 +++++++++++++++++++++++++++++++++
1 file changed, 33 insertions(+)
create mode 100644 core/sort/map.odin
diff --git a/core/sort/map.odin b/core/sort/map.odin
new file mode 100644
index 000000000..dff2dced3
--- /dev/null
+++ b/core/sort/map.odin
@@ -0,0 +1,33 @@
+package sort
+
+import "core:intrinsics"
+import "core:runtime"
+import "core:slice"
+
+map_entries_by_key :: proc(m: ^$M/map[$K]$V, loc := #caller_location) where intrinsics.type_is_ordered(K) {
+ Entry :: struct {
+ hash: uintptr,
+ next: int,
+ key: K,
+ value: V,
+ }
+
+ header := runtime.__get_map_header(m)
+ entries := (^[dynamic]Entry)(&header.m.entries)
+ slice.sort_by_key(entries[:], proc(e: Entry) -> K { return e.key })
+ runtime.__dynamic_map_reset_entries(header, loc)
+}
+
+map_entries_by_value :: proc(m: ^$M/map[$K]$V, loc := #caller_location) where intrinsics.type_is_ordered(V) {
+ Entry :: struct {
+ hash: uintptr,
+ next: int,
+ key: K,
+ value: V,
+ }
+
+ header := runtime.__get_map_header(m)
+ entries := (^[dynamic]Entry)(&header.m.entries)
+ slice.sort_by_key(entries[:], proc(e: Entry) -> V { return e.value })
+ runtime.__dynamic_map_reset_entries(header, loc)
+}
\ No newline at end of file
From 3cbf9c37193a2852db9ac52b9d9699169318d277 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Sun, 2 Jan 2022 14:45:39 +0000
Subject: [PATCH 052/710] Fix #1381
---
src/parser.cpp | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/src/parser.cpp b/src/parser.cpp
index cbd4d61d5..5bf43cee9 100644
--- a/src/parser.cpp
+++ b/src/parser.cpp
@@ -5362,6 +5362,15 @@ isize calc_decl_count(Ast *decl) {
count += calc_decl_count(decl->BlockStmt.stmts.data[i]);
}
break;
+ case Ast_WhenStmt:
+ {
+ isize inner_count = calc_decl_count(decl->WhenStmt.body);
+ if (decl->WhenStmt.else_stmt) {
+ inner_count = gb_max(inner_count, calc_decl_count(decl->WhenStmt.else_stmt));
+ }
+ count += inner_count;
+ }
+ break;
case Ast_ValueDecl:
count = decl->ValueDecl.names.count;
break;
From 65434911489fdb5b9136b3b06b279a07137d7415 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Sun, 2 Jan 2022 15:31:47 +0000
Subject: [PATCH 053/710] Clean up code for queue (no logic changed)
---
src/queue.cpp | 45 +++++++++++++++++++++++++++------------------
1 file changed, 27 insertions(+), 18 deletions(-)
diff --git a/src/queue.cpp b/src/queue.cpp
index d69a2845c..ee8b1b086 100644
--- a/src/queue.cpp
+++ b/src/queue.cpp
@@ -71,6 +71,29 @@ void mpmc_destroy(MPMCQueue *q) {
}
+template
+bool mpmc_internal_grow(MPMCQueue *q) {
+ mutex_lock(&q->mutex);
+ i32 old_size = q->mask+1;
+ i32 new_size = old_size*2;
+ resize_array_raw(&q->nodes, q->allocator, old_size, new_size);
+ if (q->nodes == nullptr) {
+ GB_PANIC("Unable to resize enqueue: %td -> %td", old_size, new_size);
+ mutex_unlock(&q->mutex);
+ return false;
+ }
+ resize_array_raw(&q->indices, q->allocator, old_size, new_size);
+ if (q->indices == nullptr) {
+ GB_PANIC("Unable to resize enqueue: %td -> %td", old_size, new_size);
+ mutex_unlock(&q->mutex);
+ return false;
+ }
+ mpmc_internal_init_indices(q->indices, old_size, new_size);
+ q->mask = new_size-1;
+ mutex_unlock(&q->mutex);
+ return true;
+}
+
template
i32 mpmc_enqueue(MPMCQueue *q, T const &data) {
GB_ASSERT(q->mask != 0);
@@ -78,8 +101,9 @@ i32 mpmc_enqueue(MPMCQueue *q, T const &data) {
i32 head_idx = q->head_idx.load(std::memory_order_relaxed);
for (;;) {
- auto node = &q->nodes[head_idx & q->mask];
- auto node_idx_ptr = &q->indices[head_idx & q->mask];
+ i32 index = head_idx & q->mask;
+ auto node = &q->nodes[index];
+ auto node_idx_ptr = &q->indices[index];
i32 node_idx = node_idx_ptr->load(std::memory_order_acquire);
i32 diff = node_idx - head_idx;
@@ -91,24 +115,9 @@ i32 mpmc_enqueue(MPMCQueue *q, T const &data) {
return q->count.fetch_add(1, std::memory_order_release);
}
} else if (diff < 0) {
- mutex_lock(&q->mutex);
- i32 old_size = q->mask+1;
- i32 new_size = old_size*2;
- resize_array_raw(&q->nodes, q->allocator, old_size, new_size);
- if (q->nodes == nullptr) {
- GB_PANIC("Unable to resize enqueue: %td -> %td", old_size, new_size);
- mutex_unlock(&q->mutex);
+ if (!mpmc_internal_grow(q)) {
return -1;
}
- resize_array_raw(&q->indices, q->allocator, old_size, new_size);
- if (q->indices == nullptr) {
- GB_PANIC("Unable to resize enqueue: %td -> %td", old_size, new_size);
- mutex_unlock(&q->mutex);
- return -1;
- }
- mpmc_internal_init_indices(q->indices, old_size, new_size);
- q->mask = new_size-1;
- mutex_unlock(&q->mutex);
} else {
head_idx = q->head_idx.load(std::memory_order_relaxed);
}
From e4f28de3de7b8bd72c89b87884302bfc7f943b4f Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Mon, 3 Jan 2022 12:14:01 +0000
Subject: [PATCH 054/710] Fix #1311
---
vendor/microui/microui.odin | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
diff --git a/vendor/microui/microui.odin b/vendor/microui/microui.odin
index 9d3e01fa0..947f59f40 100644
--- a/vendor/microui/microui.odin
+++ b/vendor/microui/microui.odin
@@ -1030,9 +1030,7 @@ number_textbox :: proc(ctx: ^Context, value: ^Real, r: Rect, id: Id, fmt_string:
if ctx.number_edit_id == id {
res := textbox_raw(ctx, ctx.number_edit_buf[:], &ctx.number_edit_len, id, r, {})
if .SUBMIT in res || ctx.focus_id != id {
- ok: bool
- value^, ok = parse_real(string(ctx.number_edit_buf[:ctx.number_edit_len]))
- assert(ok == true)
+ value^, _ = parse_real(string(ctx.number_edit_buf[:ctx.number_edit_len]))
ctx.number_edit_id = 0
} else {
return true
From 236b08cb4921d5c6000d5029f2936271acb45f29 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Mon, 3 Jan 2022 12:51:32 +0000
Subject: [PATCH 055/710] Fix #1356
---
src/check_type.cpp | 9 ++++-----
1 file changed, 4 insertions(+), 5 deletions(-)
diff --git a/src/check_type.cpp b/src/check_type.cpp
index b4f30d2f0..282da4d0a 100644
--- a/src/check_type.cpp
+++ b/src/check_type.cpp
@@ -922,20 +922,19 @@ void check_bit_set_type(CheckerContext *c, Type *type, Type *named_type, Ast *no
i64 lower = big_int_to_i64(&i);
i64 upper = big_int_to_i64(&j);
- bool lower_changed = false;
+ i64 actual_lower = lower;
i64 bits = MAX_BITS;
if (type->BitSet.underlying != nullptr) {
bits = 8*type_size_of(type->BitSet.underlying);
if (lower > 0) {
- lower = 0;
- lower_changed = true;
+ actual_lower = 0;
} else if (lower < 0) {
error(bs->elem, "bit_set does not allow a negative lower bound (%lld) when an underlying type is set", lower);
}
}
- i64 bits_required = upper-lower;
+ i64 bits_required = upper-actual_lower;
switch (be->op.kind) {
case Token_Ellipsis:
case Token_RangeFull:
@@ -959,7 +958,7 @@ void check_bit_set_type(CheckerContext *c, Type *type, Type *named_type, Ast *no
break;
}
if (!is_valid) {
- if (lower_changed) {
+ if (actual_lower != lower) {
error(bs->elem, "bit_set range is greater than %lld bits, %lld bits are required (internal the lower changed was changed 0 as an underlying type was set)", bits, bits_required);
} else {
error(bs->elem, "bit_set range is greater than %lld bits, %lld bits are required", bits, bits_required);
From e6b8f7e77a419c8ff9e5f7de23fe15bef63264b1 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Mon, 3 Jan 2022 12:54:31 +0000
Subject: [PATCH 056/710] Fix #1398
---
src/check_expr.cpp | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/src/check_expr.cpp b/src/check_expr.cpp
index 67e2f3bd7..cfffffd9f 100644
--- a/src/check_expr.cpp
+++ b/src/check_expr.cpp
@@ -7688,6 +7688,14 @@ ExprKind check_expr_base_internal(CheckerContext *c, Operand *o, Ast *node, Type
}
}
+ if (t->kind == Type_Matrix) {
+ if (cl->elems.count > 0 && cl->elems[0]->kind != Ast_FieldValue) {
+ if (0 < max && max < max_type_count) {
+ error(node, "Expected %lld values for this matrix literal, got %lld", cast(long long)max_type_count, cast(long long)max);
+ }
+ }
+ }
+
break;
}
From 12f459b5fb7904bfa926b5ad3fc5f80c6b5b4193 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Mon, 3 Jan 2022 13:12:39 +0000
Subject: [PATCH 057/710] Fix #1344
---
src/check_stmt.cpp | 32 ++++++++++++++++++++++++++++++++
1 file changed, 32 insertions(+)
diff --git a/src/check_stmt.cpp b/src/check_stmt.cpp
index 396388629..c3b8c46ca 100644
--- a/src/check_stmt.cpp
+++ b/src/check_stmt.cpp
@@ -58,6 +58,30 @@ bool contains_deferred_call(Ast *node) {
return false;
}
+Ast *last_stmt_blocking_in_list(Slice const &stmts) {
+ for_array(i, stmts) {
+ Ast *n = stmts[i];
+ switch (n->kind) {
+ case Ast_ReturnStmt:
+ return n;
+ case Ast_BranchStmt:
+ return n;
+ case Ast_ExprStmt:
+ if (is_diverging_stmt(n)) {
+ return n;
+ }
+ break;
+ case Ast_BlockStmt:
+ n = last_stmt_blocking_in_list(n->BlockStmt.stmts);
+ if (n != nullptr) {
+ return n;
+ }
+ break;
+ }
+ }
+ return nullptr;
+}
+
void check_stmt_list(CheckerContext *ctx, Slice const &stmts, u32 flags) {
if (stmts.count == 0) {
return;
@@ -102,6 +126,7 @@ void check_stmt_list(CheckerContext *ctx, Slice const &stmts, u32 flags)
check_stmt(ctx, n, new_flags);
if (i+1 < max_non_constant_declaration) {
+ never_executed_error:;
switch (n->kind) {
case Ast_ReturnStmt:
error(n, "Statements after this 'return' are never executed");
@@ -116,6 +141,13 @@ void check_stmt_list(CheckerContext *ctx, Slice const &stmts, u32 flags)
error(n, "Statements after a diverging procedure call are never executed");
}
break;
+
+ case Ast_BlockStmt:
+ n = last_stmt_blocking_in_list(n->BlockStmt.stmts);
+ if (n != nullptr) {
+ goto never_executed_error;
+ }
+ break;
}
} else if (i+1 == max_non_constant_declaration) {
if (is_diverging_stmt(n)) {
From defc1672c3d1b27c4720f53e95a0e1be0775e5e9 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Mon, 3 Jan 2022 13:48:12 +0000
Subject: [PATCH 058/710] Revert fix #1344
---
src/check_stmt.cpp | 32 --------------------------------
1 file changed, 32 deletions(-)
diff --git a/src/check_stmt.cpp b/src/check_stmt.cpp
index c3b8c46ca..396388629 100644
--- a/src/check_stmt.cpp
+++ b/src/check_stmt.cpp
@@ -58,30 +58,6 @@ bool contains_deferred_call(Ast *node) {
return false;
}
-Ast *last_stmt_blocking_in_list(Slice const &stmts) {
- for_array(i, stmts) {
- Ast *n = stmts[i];
- switch (n->kind) {
- case Ast_ReturnStmt:
- return n;
- case Ast_BranchStmt:
- return n;
- case Ast_ExprStmt:
- if (is_diverging_stmt(n)) {
- return n;
- }
- break;
- case Ast_BlockStmt:
- n = last_stmt_blocking_in_list(n->BlockStmt.stmts);
- if (n != nullptr) {
- return n;
- }
- break;
- }
- }
- return nullptr;
-}
-
void check_stmt_list(CheckerContext *ctx, Slice const &stmts, u32 flags) {
if (stmts.count == 0) {
return;
@@ -126,7 +102,6 @@ void check_stmt_list(CheckerContext *ctx, Slice const &stmts, u32 flags)
check_stmt(ctx, n, new_flags);
if (i+1 < max_non_constant_declaration) {
- never_executed_error:;
switch (n->kind) {
case Ast_ReturnStmt:
error(n, "Statements after this 'return' are never executed");
@@ -141,13 +116,6 @@ void check_stmt_list(CheckerContext *ctx, Slice const &stmts, u32 flags)
error(n, "Statements after a diverging procedure call are never executed");
}
break;
-
- case Ast_BlockStmt:
- n = last_stmt_blocking_in_list(n->BlockStmt.stmts);
- if (n != nullptr) {
- goto never_executed_error;
- }
- break;
}
} else if (i+1 == max_non_constant_declaration) {
if (is_diverging_stmt(n)) {
From 38e5e13b3feb4d037fcf0b3df7d335bdb0dda45d Mon Sep 17 00:00:00 2001
From: CiD-
Date: Mon, 3 Jan 2022 09:24:39 -0500
Subject: [PATCH 059/710] add more Linux syscalls
---
core/sys/unix/syscalls_linux.odin | 1497 ++++++++++++++++++++++++++++-
1 file changed, 1481 insertions(+), 16 deletions(-)
diff --git a/core/sys/unix/syscalls_linux.odin b/core/sys/unix/syscalls_linux.odin
index 659eedfbb..25c5ed0a1 100644
--- a/core/sys/unix/syscalls_linux.odin
+++ b/core/sys/unix/syscalls_linux.odin
@@ -16,37 +16,1502 @@ import "core:intrinsics"
// arm: arch/arm/tools/syscall.tbl
when ODIN_ARCH == "amd64" {
+ SYS_read : uintptr : 0
+ SYS_write : uintptr : 1
+ SYS_open : uintptr : 2
+ SYS_close : uintptr : 3
+ SYS_stat : uintptr : 4
+ SYS_fstat : uintptr : 5
+ SYS_lstat : uintptr : 6
+ SYS_poll : uintptr : 7
+ SYS_lseek : uintptr : 8
SYS_mmap : uintptr : 9
SYS_mprotect : uintptr : 10
SYS_munmap : uintptr : 11
+ SYS_brk : uintptr : 12
+ SYS_rt_sigaction : uintptr : 13
+ SYS_rt_sigprocmask : uintptr : 14
+ SYS_rt_sigreturn : uintptr : 15
+ SYS_ioctl : uintptr : 16
+ SYS_pread : uintptr : 17
+ SYS_pwrite : uintptr : 18
+ SYS_readv : uintptr : 19
+ SYS_writev : uintptr : 20
+ SYS_access : uintptr : 21
+ SYS_pipe : uintptr : 22
+ SYS_select : uintptr : 23
+ SYS_sched_yield : uintptr : 24
+ SYS_mremap : uintptr : 25
+ SYS_msync : uintptr : 26
+ SYS_mincore : uintptr : 27
SYS_madvise : uintptr : 28
- SYS_futex : uintptr : 202
+ SYS_shmget : uintptr : 29
+ SYS_shmat : uintptr : 30
+ SYS_shmctl : uintptr : 31
+ SYS_dup : uintptr : 32
+ SYS_dup2 : uintptr : 33
+ SYS_pause : uintptr : 34
+ SYS_nanosleep : uintptr : 35
+ SYS_getitimer : uintptr : 36
+ SYS_alarm : uintptr : 37
+ SYS_setitimer : uintptr : 38
+ SYS_getpid : uintptr : 39
+ SYS_sendfile : uintptr : 40
+ SYS_socket : uintptr : 41
+ SYS_connect : uintptr : 42
+ SYS_accept : uintptr : 43
+ SYS_sendto : uintptr : 44
+ SYS_recvfrom : uintptr : 45
+ SYS_sendmsg : uintptr : 46
+ SYS_recvmsg : uintptr : 47
+ SYS_shutdown : uintptr : 48
+ SYS_bind : uintptr : 49
+ SYS_listen : uintptr : 50
+ SYS_getsockname : uintptr : 51
+ SYS_getpeername : uintptr : 52
+ SYS_socketpair : uintptr : 53
+ SYS_setsockopt : uintptr : 54
+ SYS_getsockopt : uintptr : 55
+ SYS_clone : uintptr : 56
+ SYS_fork : uintptr : 57
+ SYS_vfork : uintptr : 58
+ SYS_execve : uintptr : 59
+ SYS_exit : uintptr : 60
+ SYS_wait4 : uintptr : 61
+ SYS_kill : uintptr : 62
+ SYS_uname : uintptr : 63
+ SYS_semget : uintptr : 64
+ SYS_semop : uintptr : 65
+ SYS_semctl : uintptr : 66
+ SYS_shmdt : uintptr : 67
+ SYS_msgget : uintptr : 68
+ SYS_msgsnd : uintptr : 69
+ SYS_msgrcv : uintptr : 70
+ SYS_msgctl : uintptr : 71
+ SYS_fcntl : uintptr : 72
+ SYS_flock : uintptr : 73
+ SYS_fsync : uintptr : 74
+ SYS_fdatasync : uintptr : 75
+ SYS_truncate : uintptr : 76
+ SYS_ftruncate : uintptr : 77
+ SYS_getdents : uintptr : 78
+ SYS_getcwd : uintptr : 79
+ SYS_chdir : uintptr : 80
+ SYS_fchdir : uintptr : 81
+ SYS_rename : uintptr : 82
+ SYS_mkdir : uintptr : 83
+ SYS_rmdir : uintptr : 84
+ SYS_creat : uintptr : 85
+ SYS_link : uintptr : 86
+ SYS_unlink : uintptr : 87
+ SYS_symlink : uintptr : 88
+ SYS_readlink : uintptr : 89
+ SYS_chmod : uintptr : 90
+ SYS_fchmod : uintptr : 91
+ SYS_chown : uintptr : 92
+ SYS_fchown : uintptr : 93
+ SYS_lchown : uintptr : 94
+ SYS_umask : uintptr : 95
+ SYS_gettimeofday : uintptr : 96
+ SYS_getrlimit : uintptr : 97
+ SYS_getrusage : uintptr : 98
+ SYS_sysinfo : uintptr : 99
+ SYS_times : uintptr : 100
+ SYS_ptrace : uintptr : 101
+ SYS_getuid : uintptr : 102
+ SYS_syslog : uintptr : 103
+ SYS_getgid : uintptr : 104
+ SYS_setuid : uintptr : 105
+ SYS_setgid : uintptr : 106
+ SYS_geteuid : uintptr : 107
+ SYS_getegid : uintptr : 108
+ SYS_setpgid : uintptr : 109
+ SYS_getppid : uintptr : 110
+ SYS_getpgrp : uintptr : 111
+ SYS_setsid : uintptr : 112
+ SYS_setreuid : uintptr : 113
+ SYS_setregid : uintptr : 114
+ SYS_getgroups : uintptr : 115
+ SYS_setgroups : uintptr : 116
+ SYS_setresuid : uintptr : 117
+ SYS_getresuid : uintptr : 118
+ SYS_setresgid : uintptr : 119
+ SYS_getresgid : uintptr : 120
+ SYS_getpgid : uintptr : 121
+ SYS_setfsuid : uintptr : 122
+ SYS_setfsgid : uintptr : 123
+ SYS_getsid : uintptr : 124
+ SYS_capget : uintptr : 125
+ SYS_capset : uintptr : 126
+ SYS_rt_sigpending : uintptr : 127
+ SYS_rt_sigtimedwait : uintptr : 128
+ SYS_rt_sigqueueinfo : uintptr : 129
+ SYS_rt_sigsuspend : uintptr : 130
+ SYS_sigaltstack : uintptr : 131
+ SYS_utime : uintptr : 132
+ SYS_mknod : uintptr : 133
+ SYS_uselib : uintptr : 134
+ SYS_personality : uintptr : 135
+ SYS_ustat : uintptr : 136
+ SYS_statfs : uintptr : 137
+ SYS_fstatfs : uintptr : 138
+ SYS_sysfs : uintptr : 139
+ SYS_getpriority : uintptr : 140
+ SYS_setpriority : uintptr : 141
+ SYS_sched_setparam : uintptr : 142
+ SYS_sched_getparam : uintptr : 143
+ SYS_sched_setscheduler : uintptr : 144
+ SYS_sched_getscheduler : uintptr : 145
+ SYS_sched_get_priority_max : uintptr : 146
+ SYS_sched_get_priority_min : uintptr : 147
+ SYS_sched_rr_get_interval : uintptr : 148
+ SYS_mlock : uintptr : 149
+ SYS_munlock : uintptr : 150
+ SYS_mlockall : uintptr : 151
+ SYS_munlockall : uintptr : 152
+ SYS_vhangup : uintptr : 153
+ SYS_modify_ldt : uintptr : 154
+ SYS_pivot_root : uintptr : 155
+ SYS__sysctl : uintptr : 156
+ SYS_prctl : uintptr : 157
+ SYS_arch_prctl : uintptr : 158
+ SYS_adjtimex : uintptr : 159
+ SYS_setrlimit : uintptr : 160
+ SYS_chroot : uintptr : 161
+ SYS_sync : uintptr : 162
+ SYS_acct : uintptr : 163
+ SYS_settimeofday : uintptr : 164
+ SYS_mount : uintptr : 165
+ SYS_umount2 : uintptr : 166
+ SYS_swapon : uintptr : 167
+ SYS_swapoff : uintptr : 168
+ SYS_reboot : uintptr : 169
+ SYS_sethostname : uintptr : 170
+ SYS_setdomainname : uintptr : 171
+ SYS_iopl : uintptr : 172
+ SYS_ioperm : uintptr : 173
+ SYS_create_module : uintptr : 174
+ SYS_init_module : uintptr : 175
+ SYS_delete_module : uintptr : 176
+ SYS_get_kernel_syms : uintptr : 177
+ SYS_query_module : uintptr : 178
+ SYS_quotactl : uintptr : 179
+ SYS_nfsservctl : uintptr : 180
+ SYS_getpmsg : uintptr : 181
+ SYS_putpmsg : uintptr : 182
+ SYS_afs_syscall : uintptr : 183
+ SYS_tuxcall : uintptr : 184
+ SYS_security : uintptr : 185
SYS_gettid : uintptr : 186
+ SYS_readahead : uintptr : 187
+ SYS_setxattr : uintptr : 188
+ SYS_lsetxattr : uintptr : 189
+ SYS_fsetxattr : uintptr : 190
+ SYS_getxattr : uintptr : 191
+ SYS_lgetxattr : uintptr : 192
+ SYS_fgetxattr : uintptr : 193
+ SYS_listxattr : uintptr : 194
+ SYS_llistxattr : uintptr : 195
+ SYS_flistxattr : uintptr : 196
+ SYS_removexattr : uintptr : 197
+ SYS_lremovexattr : uintptr : 198
+ SYS_fremovexattr : uintptr : 199
+ SYS_tkill : uintptr : 200
+ SYS_time : uintptr : 201
+ SYS_futex : uintptr : 202
+ SYS_sched_setaffinity : uintptr : 203
+ SYS_sched_getaffinity : uintptr : 204
+ SYS_set_thread_area : uintptr : 205
+ SYS_io_setup : uintptr : 206
+ SYS_io_destroy : uintptr : 207
+ SYS_io_getevents : uintptr : 208
+ SYS_io_submit : uintptr : 209
+ SYS_io_cancel : uintptr : 210
+ SYS_get_thread_area : uintptr : 211
+ SYS_lookup_dcookie : uintptr : 212
+ SYS_epoll_create : uintptr : 213
+ SYS_epoll_ctl_old : uintptr : 214
+ SYS_epoll_wait_old : uintptr : 215
+ SYS_remap_file_pages : uintptr : 216
+ SYS_getdents64 : uintptr : 217
+ SYS_set_tid_address : uintptr : 218
+ SYS_restart_syscall : uintptr : 219
+ SYS_semtimedop : uintptr : 220
+ SYS_fadvise64 : uintptr : 221
+ SYS_timer_create : uintptr : 222
+ SYS_timer_settime : uintptr : 223
+ SYS_timer_gettime : uintptr : 224
+ SYS_timer_getoverrun : uintptr : 225
+ SYS_timer_delete : uintptr : 226
+ SYS_clock_settime : uintptr : 227
+ SYS_clock_gettime : uintptr : 228
+ SYS_clock_getres : uintptr : 229
+ SYS_clock_nanosleep : uintptr : 230
+ SYS_exit_group : uintptr : 231
+ SYS_epoll_wait : uintptr : 232
+ SYS_epoll_ctl : uintptr : 233
+ SYS_tgkill : uintptr : 234
+ SYS_utimes : uintptr : 235
+ SYS_vserver : uintptr : 236
+ SYS_mbind : uintptr : 237
+ SYS_set_mempolicy : uintptr : 238
+ SYS_get_mempolicy : uintptr : 239
+ SYS_mq_open : uintptr : 240
+ SYS_mq_unlink : uintptr : 241
+ SYS_mq_timedsend : uintptr : 242
+ SYS_mq_timedreceive : uintptr : 243
+ SYS_mq_notify : uintptr : 244
+ SYS_mq_getsetattr : uintptr : 245
+ SYS_kexec_load : uintptr : 246
+ SYS_waitid : uintptr : 247
+ SYS_add_key : uintptr : 248
+ SYS_request_key : uintptr : 249
+ SYS_keyctl : uintptr : 250
+ SYS_ioprio_set : uintptr : 251
+ SYS_ioprio_get : uintptr : 252
+ SYS_inotify_init : uintptr : 253
+ SYS_inotify_add_watch : uintptr : 254
+ SYS_inotify_rm_watch : uintptr : 255
+ SYS_migrate_pages : uintptr : 256
+ SYS_openat : uintptr : 257
+ SYS_mkdirat : uintptr : 258
+ SYS_mknodat : uintptr : 259
+ SYS_fchownat : uintptr : 260
+ SYS_futimesat : uintptr : 261
+ SYS_fstatat : uintptr : 262
+ SYS_unlinkat : uintptr : 263
+ SYS_renameat : uintptr : 264
+ SYS_linkat : uintptr : 265
+ SYS_symlinkat : uintptr : 266
+ SYS_readlinkat : uintptr : 267
+ SYS_fchmodat : uintptr : 268
+ SYS_faccessat : uintptr : 269
+ SYS_pselect6 : uintptr : 270
+ SYS_ppoll : uintptr : 271
+ SYS_unshare : uintptr : 272
+ SYS_set_robust_list : uintptr : 273
+ SYS_get_robust_list : uintptr : 274
+ SYS_splice : uintptr : 275
+ SYS_tee : uintptr : 276
+ SYS_sync_file_range : uintptr : 277
+ SYS_vmsplice : uintptr : 278
+ SYS_move_pages : uintptr : 279
+ SYS_utimensat : uintptr : 280
+ SYS_epoll_pwait : uintptr : 281
+ SYS_signalfd : uintptr : 282
+ SYS_timerfd_create : uintptr : 283
+ SYS_eventfd : uintptr : 284
+ SYS_fallocate : uintptr : 285
+ SYS_timerfd_settime : uintptr : 286
+ SYS_timerfd_gettime : uintptr : 287
+ SYS_accept4 : uintptr : 288
+ SYS_signalfd4 : uintptr : 289
+ SYS_eventfd2 : uintptr : 290
+ SYS_epoll_create1 : uintptr : 291
+ SYS_dup3 : uintptr : 292
+ SYS_pipe2 : uintptr : 293
+ SYS_inotify_init1 : uintptr : 294
+ SYS_preadv : uintptr : 295
+ SYS_pwritev : uintptr : 296
+ SYS_rt_tgsigqueueinfo : uintptr : 297
+ SYS_perf_event_open : uintptr : 298
+ SYS_recvmmsg : uintptr : 299
+ SYS_fanotify_init : uintptr : 300
+ SYS_fanotify_mark : uintptr : 301
+ SYS_prlimit64 : uintptr : 302
+ SYS_name_to_handle_at : uintptr : 303
+ SYS_open_by_handle_at : uintptr : 304
+ SYS_clock_adjtime : uintptr : 305
+ SYS_syncfs : uintptr : 306
+ SYS_sendmmsg : uintptr : 307
+ SYS_setns : uintptr : 308
+ SYS_getcpu : uintptr : 309
+ SYS_process_vm_readv : uintptr : 310
+ SYS_process_vm_writev : uintptr : 311
+ SYS_kcmp : uintptr : 312
+ SYS_finit_module : uintptr : 313
+ SYS_sched_setattr : uintptr : 314
+ SYS_sched_getattr : uintptr : 315
+ SYS_renameat2 : uintptr : 316
+ SYS_seccomp : uintptr : 317
SYS_getrandom : uintptr : 318
+ SYS_memfd_create : uintptr : 319
+ SYS_kexec_file_load : uintptr : 320
+ SYS_bpf : uintptr : 321
+ SYS_execveat : uintptr : 322
+ SYS_userfaultfd : uintptr : 323
+ SYS_membarrier : uintptr : 324
+ SYS_mlock2 : uintptr : 325
+ SYS_copy_file_range : uintptr : 326
+ SYS_preadv2 : uintptr : 327
+ SYS_pwritev2 : uintptr : 328
+ SYS_pkey_mprotect : uintptr : 329
+ SYS_pkey_alloc : uintptr : 330
+ SYS_pkey_free : uintptr : 331
+ SYS_statx : uintptr : 332
+ SYS_io_pgetevents : uintptr : 333
+ SYS_rseq : uintptr : 334
+ SYS_pidfd_send_signal : uintptr : 424
+ SYS_io_uring_setup : uintptr : 425
+ SYS_io_uring_enter : uintptr : 426
+ SYS_io_uring_register : uintptr : 427
+ SYS_open_tree : uintptr : 428
+ SYS_move_mount : uintptr : 429
+ SYS_fsopen : uintptr : 430
+ SYS_fsconfig : uintptr : 431
+ SYS_fsmount : uintptr : 432
+ SYS_fspick : uintptr : 433
+ SYS_pidfd_open : uintptr : 434
+ SYS_clone3 : uintptr : 435
+ SYS_close_range : uintptr : 436
+ SYS_openat2 : uintptr : 437
+ SYS_pidfd_getfd : uintptr : 438
+ SYS_faccessat2 : uintptr : 439
+ SYS_process_madvise : uintptr : 440
+ SYS_epoll_pwait2 : uintptr : 441
+ SYS_mount_setattr : uintptr : 442
+ SYS_landlock_create_ruleset : uintptr : 444
+ SYS_landlock_add_rule : uintptr : 445
+ SYS_landlock_restrict_self : uintptr : 446
+ SYS_memfd_secret : uintptr : 447
} else when ODIN_ARCH == "arm64" {
- SYS_mmap : uintptr : 222
- SYS_mprotect : uintptr : 226
- SYS_munmap : uintptr : 215
- SYS_madvise : uintptr : 233
+ SYS_io_setup : uintptr : 0
+ SYS_io_destroy : uintptr : 1
+ SYS_io_submit : uintptr : 2
+ SYS_io_cancel : uintptr : 3
+ SYS_io_getevents : uintptr : 4
+ SYS_setxattr : uintptr : 5
+ SYS_lsetxattr : uintptr : 6
+ SYS_fsetxattr : uintptr : 7
+ SYS_getxattr : uintptr : 8
+ SYS_lgetxattr : uintptr : 9
+ SYS_fgetxattr : uintptr : 10
+ SYS_listxattr : uintptr : 11
+ SYS_llistxattr : uintptr : 12
+ SYS_flistxattr : uintptr : 13
+ SYS_removexattr : uintptr : 14
+ SYS_lremovexattr : uintptr : 15
+ SYS_fremovexattr : uintptr : 16
+ SYS_getcwd : uintptr : 17
+ SYS_lookup_dcookie : uintptr : 18
+ SYS_eventfd2 : uintptr : 19
+ SYS_epoll_create1 : uintptr : 20
+ SYS_epoll_ctl : uintptr : 21
+ SYS_epoll_pwait : uintptr : 22
+ SYS_dup : uintptr : 23
+ SYS_dup3 : uintptr : 24
+ SYS_fcntl : uintptr : 25
+ SYS_inotify_init1 : uintptr : 26
+ SYS_inotify_add_watch : uintptr : 27
+ SYS_inotify_rm_watch : uintptr : 28
+ SYS_ioctl : uintptr : 29
+ SYS_ioprio_set : uintptr : 30
+ SYS_ioprio_get : uintptr : 31
+ SYS_flock : uintptr : 32
+ SYS_mknodat : uintptr : 33
+ SYS_mkdirat : uintptr : 34
+ SYS_unlinkat : uintptr : 35
+ SYS_symlinkat : uintptr : 36
+ SYS_linkat : uintptr : 37
+ SYS_renameat : uintptr : 38
+ SYS_umount2 : uintptr : 39
+ SYS_mount : uintptr : 40
+ SYS_pivot_root : uintptr : 41
+ SYS_nfsservctl : uintptr : 42
+ SYS_statfs : uintptr : 43
+ SYS_fstatfs : uintptr : 44
+ SYS_truncate : uintptr : 45
+ SYS_ftruncate : uintptr : 46
+ SYS_fallocate : uintptr : 47
+ SYS_faccessat : uintptr : 48
+ SYS_chdir : uintptr : 49
+ SYS_fchdir : uintptr : 50
+ SYS_chroot : uintptr : 51
+ SYS_fchmod : uintptr : 52
+ SYS_fchmodat : uintptr : 53
+ SYS_fchownat : uintptr : 54
+ SYS_fchown : uintptr : 55
+ SYS_openat : uintptr : 56
+ SYS_close : uintptr : 57
+ SYS_vhangup : uintptr : 58
+ SYS_pipe2 : uintptr : 59
+ SYS_quotactl : uintptr : 60
+ SYS_getdents64 : uintptr : 61
+ SYS_lseek : uintptr : 62
+ SYS_read : uintptr : 63
+ SYS_write : uintptr : 64
+ SYS_readv : uintptr : 65
+ SYS_writev : uintptr : 66
+ SYS_pread64 : uintptr : 67
+ SYS_pwrite64 : uintptr : 68
+ SYS_preadv : uintptr : 69
+ SYS_pwritev : uintptr : 70
+ SYS_sendfile : uintptr : 71
+ SYS_pselect6 : uintptr : 72
+ SYS_ppoll : uintptr : 73
+ SYS_signalfd4 : uintptr : 74
+ SYS_vmsplice : uintptr : 75
+ SYS_splice : uintptr : 76
+ SYS_tee : uintptr : 77
+ SYS_readlinkat : uintptr : 78
+ SYS_fstatat : uintptr : 79
+ SYS_fstat : uintptr : 80
+ SYS_sync : uintptr : 81
+ SYS_fsync : uintptr : 82
+ SYS_fdatasync : uintptr : 83
+ SYS_sync_file_range : uintptr : 84
+ SYS_timerfd_create : uintptr : 85
+ SYS_timerfd_settime : uintptr : 86
+ SYS_timerfd_gettime : uintptr : 87
+ SYS_utimensat : uintptr : 88
+ SYS_acct : uintptr : 89
+ SYS_capget : uintptr : 90
+ SYS_capset : uintptr : 91
+ SYS_personality : uintptr : 92
+ SYS_exit : uintptr : 93
+ SYS_exit_group : uintptr : 94
+ SYS_waitid : uintptr : 95
+ SYS_set_tid_address : uintptr : 96
+ SYS_unshare : uintptr : 97
SYS_futex : uintptr : 98
+ SYS_set_robust_list : uintptr : 99
+ SYS_get_robust_list : uintptr : 100
+ SYS_nanosleep : uintptr : 101
+ SYS_getitimer : uintptr : 102
+ SYS_setitimer : uintptr : 103
+ SYS_kexec_load : uintptr : 104
+ SYS_init_module : uintptr : 105
+ SYS_delete_module : uintptr : 106
+ SYS_timer_create : uintptr : 107
+ SYS_timer_gettime : uintptr : 108
+ SYS_timer_getoverrun : uintptr : 109
+ SYS_timer_settime : uintptr : 110
+ SYS_timer_delete : uintptr : 111
+ SYS_clock_settime : uintptr : 112
+ SYS_clock_gettime : uintptr : 113
+ SYS_clock_getres : uintptr : 114
+ SYS_clock_nanosleep : uintptr : 115
+ SYS_syslog : uintptr : 116
+ SYS_ptrace : uintptr : 117
+ SYS_sched_setparam : uintptr : 118
+ SYS_sched_setscheduler : uintptr : 119
+ SYS_sched_getscheduler : uintptr : 120
+ SYS_sched_getparam : uintptr : 121
+ SYS_sched_setaffinity : uintptr : 122
+ SYS_sched_getaffinity : uintptr : 123
+ SYS_sched_yield : uintptr : 124
+ SYS_sched_get_priority_max : uintptr : 125
+ SYS_sched_get_priority_min : uintptr : 126
+ SYS_sched_rr_get_interval : uintptr : 127
+ SYS_restart_syscall : uintptr : 128
+ SYS_kill : uintptr : 129
+ SYS_tkill : uintptr : 130
+ SYS_tgkill : uintptr : 131
+ SYS_sigaltstack : uintptr : 132
+ SYS_rt_sigsuspend : uintptr : 133
+ SYS_rt_sigaction : uintptr : 134
+ SYS_rt_sigprocmask : uintptr : 135
+ SYS_rt_sigpending : uintptr : 136
+ SYS_rt_sigtimedwait : uintptr : 137
+ SYS_rt_sigqueueinfo : uintptr : 138
+ SYS_rt_sigreturn : uintptr : 139
+ SYS_setpriority : uintptr : 140
+ SYS_getpriority : uintptr : 141
+ SYS_reboot : uintptr : 142
+ SYS_setregid : uintptr : 143
+ SYS_setgid : uintptr : 144
+ SYS_setreuid : uintptr : 145
+ SYS_setuid : uintptr : 146
+ SYS_setresuid : uintptr : 147
+ SYS_getresuid : uintptr : 148
+ SYS_setresgid : uintptr : 149
+ SYS_getresgid : uintptr : 150
+ SYS_setfsuid : uintptr : 151
+ SYS_setfsgid : uintptr : 152
+ SYS_times : uintptr : 153
+ SYS_setpgid : uintptr : 154
+ SYS_getpgid : uintptr : 155
+ SYS_getsid : uintptr : 156
+ SYS_setsid : uintptr : 157
+ SYS_getgroups : uintptr : 158
+ SYS_setgroups : uintptr : 159
+ SYS_uname : uintptr : 160
+ SYS_sethostname : uintptr : 161
+ SYS_setdomainname : uintptr : 162
+ SYS_getrlimit : uintptr : 163
+ SYS_setrlimit : uintptr : 164
+ SYS_getrusage : uintptr : 165
+ SYS_umask : uintptr : 166
+ SYS_prctl : uintptr : 167
+ SYS_getcpu : uintptr : 168
+ SYS_gettimeofday : uintptr : 169
+ SYS_settimeofday : uintptr : 170
+ SYS_adjtimex : uintptr : 171
+ SYS_getpid : uintptr : 172
+ SYS_getppid : uintptr : 173
+ SYS_getuid : uintptr : 174
+ SYS_geteuid : uintptr : 175
+ SYS_getgid : uintptr : 176
+ SYS_getegid : uintptr : 177
SYS_gettid : uintptr : 178
+ SYS_sysinfo : uintptr : 179
+ SYS_mq_open : uintptr : 180
+ SYS_mq_unlink : uintptr : 181
+ SYS_mq_timedsend : uintptr : 182
+ SYS_mq_timedreceive : uintptr : 183
+ SYS_mq_notify : uintptr : 184
+ SYS_mq_getsetattr : uintptr : 185
+ SYS_msgget : uintptr : 186
+ SYS_msgctl : uintptr : 187
+ SYS_msgrcv : uintptr : 188
+ SYS_msgsnd : uintptr : 189
+ SYS_semget : uintptr : 190
+ SYS_semctl : uintptr : 191
+ SYS_semtimedop : uintptr : 192
+ SYS_semop : uintptr : 193
+ SYS_shmget : uintptr : 194
+ SYS_shmctl : uintptr : 195
+ SYS_shmat : uintptr : 196
+ SYS_shmdt : uintptr : 197
+ SYS_socket : uintptr : 198
+ SYS_socketpair : uintptr : 199
+ SYS_bind : uintptr : 200
+ SYS_listen : uintptr : 201
+ SYS_accept : uintptr : 202
+ SYS_connect : uintptr : 203
+ SYS_getsockname : uintptr : 204
+ SYS_getpeername : uintptr : 205
+ SYS_sendto : uintptr : 206
+ SYS_recvfrom : uintptr : 207
+ SYS_setsockopt : uintptr : 208
+ SYS_getsockopt : uintptr : 209
+ SYS_shutdown : uintptr : 210
+ SYS_sendmsg : uintptr : 211
+ SYS_recvmsg : uintptr : 212
+ SYS_readahead : uintptr : 213
+ SYS_brk : uintptr : 214
+ SYS_munmap : uintptr : 215
+ SYS_mremap : uintptr : 216
+ SYS_add_key : uintptr : 217
+ SYS_request_key : uintptr : 218
+ SYS_keyctl : uintptr : 219
+ SYS_clone : uintptr : 220
+ SYS_execve : uintptr : 221
+ SYS_mmap : uintptr : 222
+ SYS_fadvise64 : uintptr : 223
+ SYS_swapon : uintptr : 224
+ SYS_swapoff : uintptr : 225
+ SYS_mprotect : uintptr : 226
+ SYS_msync : uintptr : 227
+ SYS_mlock : uintptr : 228
+ SYS_munlock : uintptr : 229
+ SYS_mlockall : uintptr : 230
+ SYS_munlockall : uintptr : 231
+ SYS_mincore : uintptr : 232
+ SYS_madvise : uintptr : 233
+ SYS_remap_file_pages : uintptr : 234
+ SYS_mbind : uintptr : 235
+ SYS_get_mempolicy : uintptr : 236
+ SYS_set_mempolicy : uintptr : 237
+ SYS_migrate_pages : uintptr : 238
+ SYS_move_pages : uintptr : 239
+ SYS_rt_tgsigqueueinfo : uintptr : 240
+ SYS_perf_event_open : uintptr : 241
+ SYS_accept4 : uintptr : 242
+ SYS_recvmmsg : uintptr : 243
+ SYS_arch_specific_syscall : uintptr : 244
+ SYS_wait4 : uintptr : 260
+ SYS_prlimit64 : uintptr : 261
+ SYS_fanotify_init : uintptr : 262
+ SYS_fanotify_mark : uintptr : 263
+ SYS_clock_adjtime : uintptr : 266
+ SYS_syncfs : uintptr : 267
+ SYS_setns : uintptr : 268
+ SYS_sendmmsg : uintptr : 269
+ SYS_process_vm_readv : uintptr : 270
+ SYS_process_vm_writev : uintptr : 271
+ SYS_kcmp : uintptr : 272
+ SYS_finit_module : uintptr : 273
+ SYS_sched_setattr : uintptr : 274
+ SYS_sched_getattr : uintptr : 275
+ SYS_renameat2 : uintptr : 276
+ SYS_seccomp : uintptr : 277
SYS_getrandom : uintptr : 278
+ SYS_memfd_create : uintptr : 279
+ SYS_bpf : uintptr : 280
+ SYS_execveat : uintptr : 281
+ SYS_userfaultfd : uintptr : 282
+ SYS_membarrier : uintptr : 283
+ SYS_mlock2 : uintptr : 284
+ SYS_copy_file_range : uintptr : 285
+ SYS_preadv2 : uintptr : 286
+ SYS_pwritev2 : uintptr : 287
+ SYS_pkey_mprotect : uintptr : 288
+ SYS_pkey_alloc : uintptr : 289
+ SYS_pkey_free : uintptr : 290
+ SYS_statx : uintptr : 291
+ SYS_io_pgetevents : uintptr : 292
+ SYS_rseq : uintptr : 293
+ SYS_kexec_file_load : uintptr : 294
+ SYS_pidfd_send_signal : uintptr : 424
+ SYS_io_uring_setup : uintptr : 425
+ SYS_io_uring_enter : uintptr : 426
+ SYS_io_uring_register : uintptr : 427
+ SYS_open_tree : uintptr : 428
+ SYS_move_mount : uintptr : 429
+ SYS_fsopen : uintptr : 430
+ SYS_fsconfig : uintptr : 431
+ SYS_fsmount : uintptr : 432
+ SYS_fspick : uintptr : 433
+ SYS_pidfd_open : uintptr : 434
+ SYS_clone3 : uintptr : 435
+ SYS_close_range : uintptr : 436
+ SYS_openat2 : uintptr : 437
+ SYS_pidfd_getfd : uintptr : 438
+ SYS_faccessat2 : uintptr : 439
+ SYS_process_madvise : uintptr : 440
+ SYS_epoll_pwait2 : uintptr : 441
+ SYS_mount_setattr : uintptr : 442
+ SYS_landlock_create_ruleset : uintptr : 444
+ SYS_landlock_add_rule : uintptr : 445
+ SYS_landlock_restrict_self : uintptr : 446
} else when ODIN_ARCH == "386" {
- SYS_mmap : uintptr : 192 // 90 is "sys_old_mmap", we want mmap2
- SYS_mprotect : uintptr : 125
+ SYS_restart_syscall : uintptr : 0
+ SYS_exit : uintptr : 1
+ SYS_fork : uintptr : 2
+ SYS_read : uintptr : 3
+ SYS_write : uintptr : 4
+ SYS_open : uintptr : 5
+ SYS_close : uintptr : 6
+ SYS_waitpid : uintptr : 7
+ SYS_creat : uintptr : 8
+ SYS_link : uintptr : 9
+ SYS_unlink : uintptr : 10
+ SYS_execve : uintptr : 11
+ SYS_chdir : uintptr : 12
+ SYS_time : uintptr : 13
+ SYS_mknod : uintptr : 14
+ SYS_chmod : uintptr : 15
+ SYS_lchown : uintptr : 16
+ SYS_break : uintptr : 17
+ SYS_oldstat : uintptr : 18
+ SYS_lseek : uintptr : 19
+ SYS_getpid : uintptr : 20
+ SYS_mount : uintptr : 21
+ SYS_umount : uintptr : 22
+ SYS_setuid : uintptr : 23
+ SYS_getuid : uintptr : 24
+ SYS_stime : uintptr : 25
+ SYS_ptrace : uintptr : 26
+ SYS_alarm : uintptr : 27
+ SYS_oldfstat : uintptr : 28
+ SYS_pause : uintptr : 29
+ SYS_utime : uintptr : 30
+ SYS_stty : uintptr : 31
+ SYS_gtty : uintptr : 32
+ SYS_access : uintptr : 33
+ SYS_nice : uintptr : 34
+ SYS_ftime : uintptr : 35
+ SYS_sync : uintptr : 36
+ SYS_kill : uintptr : 37
+ SYS_rename : uintptr : 38
+ SYS_mkdir : uintptr : 39
+ SYS_rmdir : uintptr : 40
+ SYS_dup : uintptr : 41
+ SYS_pipe : uintptr : 42
+ SYS_times : uintptr : 43
+ SYS_prof : uintptr : 44
+ SYS_brk : uintptr : 45
+ SYS_setgid : uintptr : 46
+ SYS_getgid : uintptr : 47
+ SYS_signal : uintptr : 48
+ SYS_geteuid : uintptr : 49
+ SYS_getegid : uintptr : 50
+ SYS_acct : uintptr : 51
+ SYS_umount2 : uintptr : 52
+ SYS_lock : uintptr : 53
+ SYS_ioctl : uintptr : 54
+ SYS_fcntl : uintptr : 55
+ SYS_mpx : uintptr : 56
+ SYS_setpgid : uintptr : 57
+ SYS_ulimit : uintptr : 58
+ SYS_oldolduname : uintptr : 59
+ SYS_umask : uintptr : 60
+ SYS_chroot : uintptr : 61
+ SYS_ustat : uintptr : 62
+ SYS_dup2 : uintptr : 63
+ SYS_getppid : uintptr : 64
+ SYS_getpgrp : uintptr : 65
+ SYS_setsid : uintptr : 66
+ SYS_sigaction : uintptr : 67
+ SYS_sgetmask : uintptr : 68
+ SYS_ssetmask : uintptr : 69
+ SYS_setreuid : uintptr : 70
+ SYS_setregid : uintptr : 71
+ SYS_sigsuspend : uintptr : 72
+ SYS_sigpending : uintptr : 73
+ SYS_sethostname : uintptr : 74
+ SYS_setrlimit : uintptr : 75
+ SYS_getrlimit : uintptr : 76
+ SYS_getrusage : uintptr : 77
+ SYS_gettimeofday : uintptr : 78
+ SYS_settimeofday : uintptr : 79
+ SYS_getgroups : uintptr : 80
+ SYS_setgroups : uintptr : 81
+ SYS_select : uintptr : 82
+ SYS_symlink : uintptr : 83
+ SYS_oldlstat : uintptr : 84
+ SYS_readlink : uintptr : 85
+ SYS_uselib : uintptr : 86
+ SYS_swapon : uintptr : 87
+ SYS_reboot : uintptr : 88
+ SYS_readdir : uintptr : 89
+ SYS_old_mmap : uintptr : 90 // 90 is "sys_old_mmap", we want mmap2
SYS_munmap : uintptr : 91
- SYS_madvise : uintptr : 219
- SYS_futex : uintptr : 240
- SYS_gettid : uintptr : 224
- SYS_getrandom : uintptr : 355
-} else when ODIN_ARCH == "arm" {
- SYS_mmap : uintptr : 192 // 90 is "sys_old_mmap", we want mmap2
+ SYS_truncate : uintptr : 92
+ SYS_ftruncate : uintptr : 93
+ SYS_fchmod : uintptr : 94
+ SYS_fchown : uintptr : 95
+ SYS_getpriority : uintptr : 96
+ SYS_setpriority : uintptr : 97
+ SYS_profil : uintptr : 98
+ SYS_statfs : uintptr : 99
+ SYS_fstatfs : uintptr : 100
+ SYS_ioperm : uintptr : 101
+ SYS_socketcall : uintptr : 102
+ SYS_syslog : uintptr : 103
+ SYS_setitimer : uintptr : 104
+ SYS_getitimer : uintptr : 105
+ SYS_stat : uintptr : 106
+ SYS_lstat : uintptr : 107
+ SYS_fstat : uintptr : 108
+ SYS_olduname : uintptr : 109
+ SYS_iopl : uintptr : 110
+ SYS_vhangup : uintptr : 111
+ SYS_idle : uintptr : 112
+ SYS_vm86old : uintptr : 113
+ SYS_wait4 : uintptr : 114
+ SYS_swapoff : uintptr : 115
+ SYS_sysinfo : uintptr : 116
+ SYS_ipc : uintptr : 117
+ SYS_fsync : uintptr : 118
+ SYS_sigreturn : uintptr : 119
+ SYS_clone : uintptr : 120
+ SYS_setdomainname : uintptr : 121
+ SYS_uname : uintptr : 122
+ SYS_modify_ldt : uintptr : 123
+ SYS_adjtimex : uintptr : 124
SYS_mprotect : uintptr : 125
- SYS_munmap: uintptr : 91
- SYS_madvise: uintptr : 220
+ SYS_sigprocmask : uintptr : 126
+ SYS_create_module : uintptr : 127
+ SYS_init_module : uintptr : 128
+ SYS_delete_module : uintptr : 129
+ SYS_get_kernel_syms : uintptr : 130
+ SYS_quotactl : uintptr : 131
+ SYS_getpgid : uintptr : 132
+ SYS_fchdir : uintptr : 133
+ SYS_bdflush : uintptr : 134
+ SYS_sysfs : uintptr : 135
+ SYS_personality : uintptr : 136
+ SYS_afs_syscall : uintptr : 137
+ SYS_setfsuid : uintptr : 138
+ SYS_setfsgid : uintptr : 139
+ SYS__llseek : uintptr : 140
+ SYS_getdents : uintptr : 141
+ SYS__newselect : uintptr : 142
+ SYS_flock : uintptr : 143
+ SYS_msync : uintptr : 144
+ SYS_readv : uintptr : 145
+ SYS_writev : uintptr : 146
+ SYS_getsid : uintptr : 147
+ SYS_fdatasync : uintptr : 148
+ SYS__sysctl : uintptr : 149
+ SYS_mlock : uintptr : 150
+ SYS_munlock : uintptr : 151
+ SYS_mlockall : uintptr : 152
+ SYS_munlockall : uintptr : 153
+ SYS_sched_setparam : uintptr : 154
+ SYS_sched_getparam : uintptr : 155
+ SYS_sched_setscheduler : uintptr : 156
+ SYS_sched_getscheduler : uintptr : 157
+ SYS_sched_yield : uintptr : 158
+ SYS_sched_get_priority_max : uintptr : 159
+ SYS_sched_get_priority_min : uintptr : 160
+ SYS_sched_rr_get_interval : uintptr : 161
+ SYS_nanosleep : uintptr : 162
+ SYS_mremap : uintptr : 163
+ SYS_setresuid : uintptr : 164
+ SYS_getresuid : uintptr : 165
+ SYS_vm86 : uintptr : 166
+ SYS_query_module : uintptr : 167
+ SYS_poll : uintptr : 168
+ SYS_nfsservctl : uintptr : 169
+ SYS_setresgid : uintptr : 170
+ SYS_getresgid : uintptr : 171
+ SYS_prctl : uintptr : 172
+ SYS_rt_sigreturn : uintptr : 173
+ SYS_rt_sigaction : uintptr : 174
+ SYS_rt_sigprocmask : uintptr : 175
+ SYS_rt_sigpending : uintptr : 176
+ SYS_rt_sigtimedwait : uintptr : 177
+ SYS_rt_sigqueueinfo : uintptr : 178
+ SYS_rt_sigsuspend : uintptr : 179
+ SYS_pread64 : uintptr : 180
+ SYS_pwrite64 : uintptr : 181
+ SYS_chown : uintptr : 182
+ SYS_getcwd : uintptr : 183
+ SYS_capget : uintptr : 184
+ SYS_capset : uintptr : 185
+ SYS_sigaltstack : uintptr : 186
+ SYS_sendfile : uintptr : 187
+ SYS_getpmsg : uintptr : 188
+ SYS_putpmsg : uintptr : 189
+ SYS_vfork : uintptr : 190
+ SYS_ugetrlimit : uintptr : 191
+ SYS_mmap : uintptr : 192 // actually mmap2
+ SYS_truncate64 : uintptr : 193
+ SYS_ftruncate64 : uintptr : 194
+ SYS_stat64 : uintptr : 195
+ SYS_lstat64 : uintptr : 196
+ SYS_fstat64 : uintptr : 197
+ SYS_lchown32 : uintptr : 198
+ SYS_getuid32 : uintptr : 199
+ SYS_getgid32 : uintptr : 200
+ SYS_geteuid32 : uintptr : 201
+ SYS_getegid32 : uintptr : 202
+ SYS_setreuid32 : uintptr : 203
+ SYS_setregid32 : uintptr : 204
+ SYS_getgroups32 : uintptr : 205
+ SYS_setgroups32 : uintptr : 206
+ SYS_fchown32 : uintptr : 207
+ SYS_setresuid32 : uintptr : 208
+ SYS_getresuid32 : uintptr : 209
+ SYS_setresgid32 : uintptr : 210
+ SYS_getresgid32 : uintptr : 211
+ SYS_chown32 : uintptr : 212
+ SYS_setuid32 : uintptr : 213
+ SYS_setgid32 : uintptr : 214
+ SYS_setfsuid32 : uintptr : 215
+ SYS_setfsgid32 : uintptr : 216
+ SYS_pivot_root : uintptr : 217
+ SYS_mincore : uintptr : 218
+ SYS_madvise : uintptr : 219
+ SYS_getdents64 : uintptr : 220
+ SYS_fcntl64 : uintptr : 221
+ SYS_gettid : uintptr : 224
+ SYS_readahead : uintptr : 225
+ SYS_setxattr : uintptr : 226
+ SYS_lsetxattr : uintptr : 227
+ SYS_fsetxattr : uintptr : 228
+ SYS_getxattr : uintptr : 229
+ SYS_lgetxattr : uintptr : 230
+ SYS_fgetxattr : uintptr : 231
+ SYS_listxattr : uintptr : 232
+ SYS_llistxattr : uintptr : 233
+ SYS_flistxattr : uintptr : 234
+ SYS_removexattr : uintptr : 235
+ SYS_lremovexattr : uintptr : 236
+ SYS_fremovexattr : uintptr : 237
+ SYS_tkill : uintptr : 238
+ SYS_sendfile64 : uintptr : 239
SYS_futex : uintptr : 240
- SYS_gettid : uintptr: 224
+ SYS_sched_setaffinity : uintptr : 241
+ SYS_sched_getaffinity : uintptr : 242
+ SYS_set_thread_area : uintptr : 243
+ SYS_get_thread_area : uintptr : 244
+ SYS_io_setup : uintptr : 245
+ SYS_io_destroy : uintptr : 246
+ SYS_io_getevents : uintptr : 247
+ SYS_io_submit : uintptr : 248
+ SYS_io_cancel : uintptr : 249
+ SYS_fadvise64 : uintptr : 250
+ SYS_exit_group : uintptr : 252
+ SYS_lookup_dcookie : uintptr : 253
+ SYS_epoll_create : uintptr : 254
+ SYS_epoll_ctl : uintptr : 255
+ SYS_epoll_wait : uintptr : 256
+ SYS_remap_file_pages : uintptr : 257
+ SYS_set_tid_address : uintptr : 258
+ SYS_timer_create : uintptr : 259
+ SYS_timer_settime : uintptr : 260
+ SYS_timer_gettime : uintptr : 261
+ SYS_timer_getoverrun : uintptr : 262
+ SYS_timer_delete : uintptr : 263
+ SYS_clock_settime : uintptr : 264
+ SYS_clock_gettime : uintptr : 265
+ SYS_clock_getres : uintptr : 266
+ SYS_clock_nanosleep : uintptr : 267
+ SYS_statfs64 : uintptr : 268
+ SYS_fstatfs64 : uintptr : 269
+ SYS_tgkill : uintptr : 270
+ SYS_utimes : uintptr : 271
+ SYS_fadvise64_64 : uintptr : 272
+ SYS_vserver : uintptr : 273
+ SYS_mbind : uintptr : 274
+ SYS_get_mempolicy : uintptr : 275
+ SYS_set_mempolicy : uintptr : 276
+ SYS_mq_open : uintptr : 277
+ SYS_mq_unlink : uintptr : 278
+ SYS_mq_timedsend : uintptr : 279
+ SYS_mq_timedreceive : uintptr : 280
+ SYS_mq_notify : uintptr : 281
+ SYS_mq_getsetattr : uintptr : 282
+ SYS_kexec_load : uintptr : 283
+ SYS_waitid : uintptr : 284
+ SYS_add_key : uintptr : 286
+ SYS_request_key : uintptr : 287
+ SYS_keyctl : uintptr : 288
+ SYS_ioprio_set : uintptr : 289
+ SYS_ioprio_get : uintptr : 290
+ SYS_inotify_init : uintptr : 291
+ SYS_inotify_add_watch : uintptr : 292
+ SYS_inotify_rm_watch : uintptr : 293
+ SYS_migrate_pages : uintptr : 294
+ SYS_openat : uintptr : 295
+ SYS_mkdirat : uintptr : 296
+ SYS_mknodat : uintptr : 297
+ SYS_fchownat : uintptr : 298
+ SYS_futimesat : uintptr : 299
+ SYS_fstatat64 : uintptr : 300
+ SYS_unlinkat : uintptr : 301
+ SYS_renameat : uintptr : 302
+ SYS_linkat : uintptr : 303
+ SYS_symlinkat : uintptr : 304
+ SYS_readlinkat : uintptr : 305
+ SYS_fchmodat : uintptr : 306
+ SYS_faccessat : uintptr : 307
+ SYS_pselect6 : uintptr : 308
+ SYS_ppoll : uintptr : 309
+ SYS_unshare : uintptr : 310
+ SYS_set_robust_list : uintptr : 311
+ SYS_get_robust_list : uintptr : 312
+ SYS_splice : uintptr : 313
+ SYS_sync_file_range : uintptr : 314
+ SYS_tee : uintptr : 315
+ SYS_vmsplice : uintptr : 316
+ SYS_move_pages : uintptr : 317
+ SYS_getcpu : uintptr : 318
+ SYS_epoll_pwait : uintptr : 319
+ SYS_utimensat : uintptr : 320
+ SYS_signalfd : uintptr : 321
+ SYS_timerfd_create : uintptr : 322
+ SYS_eventfd : uintptr : 323
+ SYS_fallocate : uintptr : 324
+ SYS_timerfd_settime : uintptr : 325
+ SYS_timerfd_gettime : uintptr : 326
+ SYS_signalfd4 : uintptr : 327
+ SYS_eventfd2 : uintptr : 328
+ SYS_epoll_create1 : uintptr : 329
+ SYS_dup3 : uintptr : 330
+ SYS_pipe2 : uintptr : 331
+ SYS_inotify_init1 : uintptr : 332
+ SYS_preadv : uintptr : 333
+ SYS_pwritev : uintptr : 334
+ SYS_rt_tgsigqueueinfo : uintptr : 335
+ SYS_perf_event_open : uintptr : 336
+ SYS_recvmmsg : uintptr : 337
+ SYS_fanotify_init : uintptr : 338
+ SYS_fanotify_mark : uintptr : 339
+ SYS_prlimit64 : uintptr : 340
+ SYS_name_to_handle_at : uintptr : 341
+ SYS_open_by_handle_at : uintptr : 342
+ SYS_clock_adjtime : uintptr : 343
+ SYS_syncfs : uintptr : 344
+ SYS_sendmmsg : uintptr : 345
+ SYS_setns : uintptr : 346
+ SYS_process_vm_readv : uintptr : 347
+ SYS_process_vm_writev : uintptr : 348
+ SYS_kcmp : uintptr : 349
+ SYS_finit_module : uintptr : 350
+ SYS_sched_setattr : uintptr : 351
+ SYS_sched_getattr : uintptr : 352
+ SYS_renameat2 : uintptr : 353
+ SYS_seccomp : uintptr : 354
+ SYS_getrandom : uintptr : 355
+ SYS_memfd_create : uintptr : 356
+ SYS_bpf : uintptr : 357
+ SYS_execveat : uintptr : 358
+ SYS_socket : uintptr : 359
+ SYS_socketpair : uintptr : 360
+ SYS_bind : uintptr : 361
+ SYS_connect : uintptr : 362
+ SYS_listen : uintptr : 363
+ SYS_accept4 : uintptr : 364
+ SYS_getsockopt : uintptr : 365
+ SYS_setsockopt : uintptr : 366
+ SYS_getsockname : uintptr : 367
+ SYS_getpeername : uintptr : 368
+ SYS_sendto : uintptr : 369
+ SYS_sendmsg : uintptr : 370
+ SYS_recvfrom : uintptr : 371
+ SYS_recvmsg : uintptr : 372
+ SYS_shutdown : uintptr : 373
+ SYS_userfaultfd : uintptr : 374
+ SYS_membarrier : uintptr : 375
+ SYS_mlock2 : uintptr : 376
+ SYS_copy_file_range : uintptr : 377
+ SYS_preadv2 : uintptr : 378
+ SYS_pwritev2 : uintptr : 379
+ SYS_pkey_mprotect : uintptr : 380
+ SYS_pkey_alloc : uintptr : 381
+ SYS_pkey_free : uintptr : 382
+ SYS_statx : uintptr : 383
+ SYS_arch_prctl : uintptr : 384
+ SYS_io_pgetevents : uintptr : 385
+ SYS_rseq : uintptr : 386
+ SYS_semget : uintptr : 393
+ SYS_semctl : uintptr : 394
+ SYS_shmget : uintptr : 395
+ SYS_shmctl : uintptr : 396
+ SYS_shmat : uintptr : 397
+ SYS_shmdt : uintptr : 398
+ SYS_msgget : uintptr : 399
+ SYS_msgsnd : uintptr : 400
+ SYS_msgrcv : uintptr : 401
+ SYS_msgctl : uintptr : 402
+ SYS_clock_gettime64 : uintptr : 403
+ SYS_clock_settime64 : uintptr : 404
+ SYS_clock_adjtime64 : uintptr : 405
+ SYS_clock_getres_time64 : uintptr : 406
+ SYS_clock_nanosleep_time64 : uintptr : 407
+ SYS_timer_gettime64 : uintptr : 408
+ SYS_timer_settime64 : uintptr : 409
+ SYS_timerfd_gettime64 : uintptr : 410
+ SYS_timerfd_settime64 : uintptr : 411
+ SYS_utimensat_time64 : uintptr : 412
+ SYS_pselect6_time64 : uintptr : 413
+ SYS_ppoll_time64 : uintptr : 414
+ SYS_io_pgetevents_time64 : uintptr : 416
+ SYS_recvmmsg_time64 : uintptr : 417
+ SYS_mq_timedsend_time64 : uintptr : 418
+ SYS_mq_timedreceive_time64 : uintptr : 419
+ SYS_semtimedop_time64 : uintptr : 420
+ SYS_rt_sigtimedwait_time64 : uintptr : 421
+ SYS_futex_time64 : uintptr : 422
+ SYS_sched_rr_get_interval_time64 : uintptr : 423
+ SYS_pidfd_send_signal : uintptr : 424
+ SYS_io_uring_setup : uintptr : 425
+ SYS_io_uring_enter : uintptr : 426
+ SYS_io_uring_register : uintptr : 427
+ SYS_open_tree : uintptr : 428
+ SYS_move_mount : uintptr : 429
+ SYS_fsopen : uintptr : 430
+ SYS_fsconfig : uintptr : 431
+ SYS_fsmount : uintptr : 432
+ SYS_fspick : uintptr : 433
+ SYS_pidfd_open : uintptr : 434
+ SYS_clone3 : uintptr : 435
+ SYS_close_range : uintptr : 436
+ SYS_openat2 : uintptr : 437
+ SYS_pidfd_getfd : uintptr : 438
+ SYS_faccessat2 : uintptr : 439
+ SYS_process_madvise : uintptr : 440
+ SYS_epoll_pwait2 : uintptr : 441
+ SYS_mount_setattr : uintptr : 442
+ SYS_landlock_create_ruleset : uintptr : 444
+ SYS_landlock_add_rule : uintptr : 445
+ SYS_landlock_restrict_self : uintptr : 446
+ SYS_memfd_secret : uintptr : 447
+} else when ODIN_ARCH == "arm" {
+ SYS_restart_syscall : uintptr : 0
+ SYS_exit : uintptr : 1
+ SYS_fork : uintptr : 2
+ SYS_read : uintptr : 3
+ SYS_write : uintptr : 4
+ SYS_open : uintptr : 5
+ SYS_close : uintptr : 6
+ SYS_creat : uintptr : 8
+ SYS_link : uintptr : 9
+ SYS_unlink : uintptr : 10
+ SYS_execve : uintptr : 11
+ SYS_chdir : uintptr : 12
+ SYS_mknod : uintptr : 14
+ SYS_chmod : uintptr : 15
+ SYS_lchown : uintptr : 16
+ SYS_lseek : uintptr : 19
+ SYS_getpid : uintptr : 20
+ SYS_mount : uintptr : 21
+ SYS_setuid : uintptr : 23
+ SYS_getuid : uintptr : 24
+ SYS_ptrace : uintptr : 26
+ SYS_pause : uintptr : 29
+ SYS_access : uintptr : 33
+ SYS_nice : uintptr : 34
+ SYS_sync : uintptr : 36
+ SYS_kill : uintptr : 37
+ SYS_rename : uintptr : 38
+ SYS_mkdir : uintptr : 39
+ SYS_rmdir : uintptr : 40
+ SYS_dup : uintptr : 41
+ SYS_pipe : uintptr : 42
+ SYS_times : uintptr : 43
+ SYS_brk : uintptr : 45
+ SYS_setgid : uintptr : 46
+ SYS_getgid : uintptr : 47
+ SYS_geteuid : uintptr : 49
+ SYS_getegid : uintptr : 50
+ SYS_acct : uintptr : 51
+ SYS_umount2 : uintptr : 52
+ SYS_ioctl : uintptr : 54
+ SYS_fcntl : uintptr : 55
+ SYS_setpgid : uintptr : 57
+ SYS_umask : uintptr : 60
+ SYS_chroot : uintptr : 61
+ SYS_ustat : uintptr : 62
+ SYS_dup2 : uintptr : 63
+ SYS_getppid : uintptr : 64
+ SYS_getpgrp : uintptr : 65
+ SYS_setsid : uintptr : 66
+ SYS_sigaction : uintptr : 67
+ SYS_setreuid : uintptr : 70
+ SYS_setregid : uintptr : 71
+ SYS_sigsuspend : uintptr : 72
+ SYS_sigpending : uintptr : 73
+ SYS_sethostname : uintptr : 74
+ SYS_setrlimit : uintptr : 75
+ SYS_getrusage : uintptr : 77
+ SYS_gettimeofday : uintptr : 78
+ SYS_settimeofday : uintptr : 79
+ SYS_getgroups : uintptr : 80
+ SYS_setgroups : uintptr : 81
+ SYS_symlink : uintptr : 83
+ SYS_readlink : uintptr : 85
+ SYS_uselib : uintptr : 86
+ SYS_swapon : uintptr : 87
+ SYS_reboot : uintptr : 88
+ SYS_munmap : uintptr : 91
+ SYS_truncate : uintptr : 92
+ SYS_ftruncate : uintptr : 93
+ SYS_fchmod : uintptr : 94
+ SYS_fchown : uintptr : 95
+ SYS_getpriority : uintptr : 96
+ SYS_setpriority : uintptr : 97
+ SYS_statfs : uintptr : 99
+ SYS_fstatfs : uintptr : 100
+ SYS_syslog : uintptr : 103
+ SYS_setitimer : uintptr : 104
+ SYS_getitimer : uintptr : 105
+ SYS_stat : uintptr : 106
+ SYS_lstat : uintptr : 107
+ SYS_fstat : uintptr : 108
+ SYS_vhangup : uintptr : 111
+ SYS_wait4 : uintptr : 114
+ SYS_swapoff : uintptr : 115
+ SYS_sysinfo : uintptr : 116
+ SYS_fsync : uintptr : 118
+ SYS_sigreturn : uintptr : 119
+ SYS_clone : uintptr : 120
+ SYS_setdomainname : uintptr : 121
+ SYS_uname : uintptr : 122
+ SYS_adjtimex : uintptr : 124
+ SYS_mprotect : uintptr : 125
+ SYS_sigprocmask : uintptr : 126
+ SYS_init_module : uintptr : 128
+ SYS_delete_module : uintptr : 129
+ SYS_quotactl : uintptr : 131
+ SYS_getpgid : uintptr : 132
+ SYS_fchdir : uintptr : 133
+ SYS_bdflush : uintptr : 134
+ SYS_sysfs : uintptr : 135
+ SYS_personality : uintptr : 136
+ SYS_setfsuid : uintptr : 138
+ SYS_setfsgid : uintptr : 139
+ SYS__llseek : uintptr : 140
+ SYS_getdents : uintptr : 141
+ SYS__newselect : uintptr : 142
+ SYS_flock : uintptr : 143
+ SYS_msync : uintptr : 144
+ SYS_readv : uintptr : 145
+ SYS_writev : uintptr : 146
+ SYS_getsid : uintptr : 147
+ SYS_fdatasync : uintptr : 148
+ SYS__sysctl : uintptr : 149
+ SYS_mlock : uintptr : 150
+ SYS_munlock : uintptr : 151
+ SYS_mlockall : uintptr : 152
+ SYS_munlockall : uintptr : 153
+ SYS_sched_setparam : uintptr : 154
+ SYS_sched_getparam : uintptr : 155
+ SYS_sched_setscheduler : uintptr : 156
+ SYS_sched_getscheduler : uintptr : 157
+ SYS_sched_yield : uintptr : 158
+ SYS_sched_get_priority_max : uintptr : 159
+ SYS_sched_get_priority_min : uintptr : 160
+ SYS_sched_rr_get_interval : uintptr : 161
+ SYS_nanosleep : uintptr : 162
+ SYS_mremap : uintptr : 163
+ SYS_setresuid : uintptr : 164
+ SYS_getresuid : uintptr : 165
+ SYS_poll : uintptr : 168
+ SYS_nfsservctl : uintptr : 169
+ SYS_setresgid : uintptr : 170
+ SYS_getresgid : uintptr : 171
+ SYS_prctl : uintptr : 172
+ SYS_rt_sigreturn : uintptr : 173
+ SYS_rt_sigaction : uintptr : 174
+ SYS_rt_sigprocmask : uintptr : 175
+ SYS_rt_sigpending : uintptr : 176
+ SYS_rt_sigtimedwait : uintptr : 177
+ SYS_rt_sigqueueinfo : uintptr : 178
+ SYS_rt_sigsuspend : uintptr : 179
+ SYS_pread64 : uintptr : 180
+ SYS_pwrite64 : uintptr : 181
+ SYS_chown : uintptr : 182
+ SYS_getcwd : uintptr : 183
+ SYS_capget : uintptr : 184
+ SYS_capset : uintptr : 185
+ SYS_sigaltstack : uintptr : 186
+ SYS_sendfile : uintptr : 187
+ SYS_vfork : uintptr : 190
+ SYS_ugetrlimit : uintptr : 191
+ SYS_mmap : uintptr : 192 // actually mmap2
+ SYS_truncate64 : uintptr : 193
+ SYS_ftruncate64 : uintptr : 194
+ SYS_stat64 : uintptr : 195
+ SYS_lstat64 : uintptr : 196
+ SYS_fstat64 : uintptr : 197
+ SYS_lchown32 : uintptr : 198
+ SYS_getuid32 : uintptr : 199
+ SYS_getgid32 : uintptr : 200
+ SYS_geteuid32 : uintptr : 201
+ SYS_getegid32 : uintptr : 202
+ SYS_setreuid32 : uintptr : 203
+ SYS_setregid32 : uintptr : 204
+ SYS_getgroups32 : uintptr : 205
+ SYS_setgroups32 : uintptr : 206
+ SYS_fchown32 : uintptr : 207
+ SYS_setresuid32 : uintptr : 208
+ SYS_getresuid32 : uintptr : 209
+ SYS_setresgid32 : uintptr : 210
+ SYS_getresgid32 : uintptr : 211
+ SYS_chown32 : uintptr : 212
+ SYS_setuid32 : uintptr : 213
+ SYS_setgid32 : uintptr : 214
+ SYS_setfsuid32 : uintptr : 215
+ SYS_setfsgid32 : uintptr : 216
+ SYS_getdents64 : uintptr : 217
+ SYS_pivot_root : uintptr : 218
+ SYS_mincore : uintptr : 219
+ SYS_madvise : uintptr : 220
+ SYS_fcntl64 : uintptr : 221
+ SYS_gettid : uintptr : 224
+ SYS_readahead : uintptr : 225
+ SYS_setxattr : uintptr : 226
+ SYS_lsetxattr : uintptr : 227
+ SYS_fsetxattr : uintptr : 228
+ SYS_getxattr : uintptr : 229
+ SYS_lgetxattr : uintptr : 230
+ SYS_fgetxattr : uintptr : 231
+ SYS_listxattr : uintptr : 232
+ SYS_llistxattr : uintptr : 233
+ SYS_flistxattr : uintptr : 234
+ SYS_removexattr : uintptr : 235
+ SYS_lremovexattr : uintptr : 236
+ SYS_fremovexattr : uintptr : 237
+ SYS_tkill : uintptr : 238
+ SYS_sendfile64 : uintptr : 239
+ SYS_futex : uintptr : 240
+ SYS_sched_setaffinity : uintptr : 241
+ SYS_sched_getaffinity : uintptr : 242
+ SYS_io_setup : uintptr : 243
+ SYS_io_destroy : uintptr : 244
+ SYS_io_getevents : uintptr : 245
+ SYS_io_submit : uintptr : 246
+ SYS_io_cancel : uintptr : 247
+ SYS_exit_group : uintptr : 248
+ SYS_lookup_dcookie : uintptr : 249
+ SYS_epoll_create : uintptr : 250
+ SYS_epoll_ctl : uintptr : 251
+ SYS_epoll_wait : uintptr : 252
+ SYS_remap_file_pages : uintptr : 253
+ SYS_set_tid_address : uintptr : 256
+ SYS_timer_create : uintptr : 257
+ SYS_timer_settime : uintptr : 258
+ SYS_timer_gettime : uintptr : 259
+ SYS_timer_getoverrun : uintptr : 260
+ SYS_timer_delete : uintptr : 261
+ SYS_clock_settime : uintptr : 262
+ SYS_clock_gettime : uintptr : 263
+ SYS_clock_getres : uintptr : 264
+ SYS_clock_nanosleep : uintptr : 265
+ SYS_statfs64 : uintptr : 266
+ SYS_fstatfs64 : uintptr : 267
+ SYS_tgkill : uintptr : 268
+ SYS_utimes : uintptr : 269
+ SYS_fadvise64_64 : uintptr : 270
+ SYS_pciconfig_iobase : uintptr : 271
+ SYS_pciconfig_read : uintptr : 272
+ SYS_pciconfig_write : uintptr : 273
+ SYS_mq_open : uintptr : 274
+ SYS_mq_unlink : uintptr : 275
+ SYS_mq_timedsend : uintptr : 276
+ SYS_mq_timedreceive : uintptr : 277
+ SYS_mq_notify : uintptr : 278
+ SYS_mq_getsetattr : uintptr : 279
+ SYS_waitid : uintptr : 280
+ SYS_socket : uintptr : 281
+ SYS_bind : uintptr : 282
+ SYS_connect : uintptr : 283
+ SYS_listen : uintptr : 284
+ SYS_accept : uintptr : 285
+ SYS_getsockname : uintptr : 286
+ SYS_getpeername : uintptr : 287
+ SYS_socketpair : uintptr : 288
+ SYS_send : uintptr : 289
+ SYS_sendto : uintptr : 290
+ SYS_recv : uintptr : 291
+ SYS_recvfrom : uintptr : 292
+ SYS_shutdown : uintptr : 293
+ SYS_setsockopt : uintptr : 294
+ SYS_getsockopt : uintptr : 295
+ SYS_sendmsg : uintptr : 296
+ SYS_recvmsg : uintptr : 297
+ SYS_semop : uintptr : 298
+ SYS_semget : uintptr : 299
+ SYS_semctl : uintptr : 300
+ SYS_msgsnd : uintptr : 301
+ SYS_msgrcv : uintptr : 302
+ SYS_msgget : uintptr : 303
+ SYS_msgctl : uintptr : 304
+ SYS_shmat : uintptr : 305
+ SYS_shmdt : uintptr : 306
+ SYS_shmget : uintptr : 307
+ SYS_shmctl : uintptr : 308
+ SYS_add_key : uintptr : 309
+ SYS_request_key : uintptr : 310
+ SYS_keyctl : uintptr : 311
+ SYS_semtimedop : uintptr : 312
+ SYS_vserver : uintptr : 313
+ SYS_ioprio_set : uintptr : 314
+ SYS_ioprio_get : uintptr : 315
+ SYS_inotify_init : uintptr : 316
+ SYS_inotify_add_watch : uintptr : 317
+ SYS_inotify_rm_watch : uintptr : 318
+ SYS_mbind : uintptr : 319
+ SYS_get_mempolicy : uintptr : 320
+ SYS_set_mempolicy : uintptr : 321
+ SYS_openat : uintptr : 322
+ SYS_mkdirat : uintptr : 323
+ SYS_mknodat : uintptr : 324
+ SYS_fchownat : uintptr : 325
+ SYS_futimesat : uintptr : 326
+ SYS_fstatat64 : uintptr : 327
+ SYS_unlinkat : uintptr : 328
+ SYS_renameat : uintptr : 329
+ SYS_linkat : uintptr : 330
+ SYS_symlinkat : uintptr : 331
+ SYS_readlinkat : uintptr : 332
+ SYS_fchmodat : uintptr : 333
+ SYS_faccessat : uintptr : 334
+ SYS_pselect6 : uintptr : 335
+ SYS_ppoll : uintptr : 336
+ SYS_unshare : uintptr : 337
+ SYS_set_robust_list : uintptr : 338
+ SYS_get_robust_list : uintptr : 339
+ SYS_splice : uintptr : 340
+ SYS_sync_file_range : uintptr : 341
+ SYS_tee : uintptr : 342
+ SYS_vmsplice : uintptr : 343
+ SYS_move_pages : uintptr : 344
+ SYS_getcpu : uintptr : 345
+ SYS_epoll_pwait : uintptr : 346
+ SYS_kexec_load : uintptr : 347
+ SYS_utimensat : uintptr : 348
+ SYS_signalfd : uintptr : 349
+ SYS_timerfd_create : uintptr : 350
+ SYS_eventfd : uintptr : 351
+ SYS_fallocate : uintptr : 352
+ SYS_timerfd_settime : uintptr : 353
+ SYS_timerfd_gettime : uintptr : 354
+ SYS_signalfd4 : uintptr : 355
+ SYS_eventfd2 : uintptr : 356
+ SYS_epoll_create1 : uintptr : 357
+ SYS_dup3 : uintptr : 358
+ SYS_pipe2 : uintptr : 359
+ SYS_inotify_init1 : uintptr : 360
+ SYS_preadv : uintptr : 361
+ SYS_pwritev : uintptr : 362
+ SYS_rt_tgsigqueueinfo : uintptr : 363
+ SYS_perf_event_open : uintptr : 364
+ SYS_recvmmsg : uintptr : 365
+ SYS_accept4 : uintptr : 366
+ SYS_fanotify_init : uintptr : 367
+ SYS_fanotify_mark : uintptr : 368
+ SYS_prlimit64 : uintptr : 369
+ SYS_name_to_handle_at : uintptr : 370
+ SYS_open_by_handle_at : uintptr : 371
+ SYS_clock_adjtime : uintptr : 372
+ SYS_syncfs : uintptr : 373
+ SYS_sendmmsg : uintptr : 374
+ SYS_setns : uintptr : 375
+ SYS_process_vm_readv : uintptr : 376
+ SYS_process_vm_writev : uintptr : 377
+ SYS_kcmp : uintptr : 378
+ SYS_finit_module : uintptr : 379
+ SYS_sched_setattr : uintptr : 380
+ SYS_sched_getattr : uintptr : 381
+ SYS_renameat2 : uintptr : 382
+ SYS_seccomp : uintptr : 383
SYS_getrandom : uintptr : 384
+ SYS_memfd_create : uintptr : 385
+ SYS_bpf : uintptr : 386
+ SYS_execveat : uintptr : 387
+ SYS_userfaultfd : uintptr : 388
+ SYS_membarrier : uintptr : 389
+ SYS_mlock2 : uintptr : 390
+ SYS_copy_file_range : uintptr : 391
+ SYS_preadv2 : uintptr : 392
+ SYS_pwritev2 : uintptr : 393
+ SYS_pkey_mprotect : uintptr : 394
+ SYS_pkey_alloc : uintptr : 395
+ SYS_pkey_free : uintptr : 396
+ SYS_statx : uintptr : 397
+ SYS_rseq : uintptr : 398
+ SYS_io_pgetevents : uintptr : 399
+ SYS_migrate_pages : uintptr : 400
+ SYS_kexec_file_load : uintptr : 401
+ SYS_clock_gettime64 : uintptr : 403
+ SYS_clock_settime64 : uintptr : 404
+ SYS_clock_adjtime64 : uintptr : 405
+ SYS_clock_getres_time64 : uintptr : 406
+ SYS_clock_nanosleep_time64 : uintptr : 407
+ SYS_timer_gettime64 : uintptr : 408
+ SYS_timer_settime64 : uintptr : 409
+ SYS_timerfd_gettime64 : uintptr : 410
+ SYS_timerfd_settime64 : uintptr : 411
+ SYS_utimensat_time64 : uintptr : 412
+ SYS_pselect6_time64 : uintptr : 413
+ SYS_ppoll_time64 : uintptr : 414
+ SYS_io_pgetevents_time64 : uintptr : 416
+ SYS_recvmmsg_time64 : uintptr : 417
+ SYS_mq_timedsend_time64 : uintptr : 418
+ SYS_mq_timedreceive_time64 : uintptr : 419
+ SYS_semtimedop_time64 : uintptr : 420
+ SYS_rt_sigtimedwait_time64 : uintptr : 421
+ SYS_futex_time64 : uintptr : 422
+ SYS_sched_rr_get_interval_time64 : uintptr : 423
+ SYS_pidfd_send_signal : uintptr : 424
+ SYS_io_uring_setup : uintptr : 425
+ SYS_io_uring_enter : uintptr : 426
+ SYS_io_uring_register : uintptr : 427
+ SYS_open_tree : uintptr : 428
+ SYS_move_mount : uintptr : 429
+ SYS_fsopen : uintptr : 430
+ SYS_fsconfig : uintptr : 431
+ SYS_fsmount : uintptr : 432
+ SYS_fspick : uintptr : 433
+ SYS_pidfd_open : uintptr : 434
+ SYS_clone3 : uintptr : 435
+ SYS_close_range : uintptr : 436
+ SYS_openat2 : uintptr : 437
+ SYS_pidfd_getfd : uintptr : 438
+ SYS_faccessat2 : uintptr : 439
+ SYS_process_madvise : uintptr : 440
+ SYS_epoll_pwait2 : uintptr : 441
+ SYS_mount_setattr : uintptr : 442
+ SYS_landlock_create_ruleset : uintptr : 444
+ SYS_landlock_add_rule : uintptr : 445
+ SYS_landlock_restrict_self : uintptr : 446
} else {
#panic("Unsupported architecture")
}
From 68e5f57e278ea7a3404508783daf2cc299a202e2 Mon Sep 17 00:00:00 2001
From: Platin21
Date: Mon, 3 Jan 2022 20:34:57 +0100
Subject: [PATCH 060/710] Fixes open system call (Thanks TIM!)
---
core/os/os_darwin.odin | 25 ++++++++++++++++++++++++-
1 file changed, 24 insertions(+), 1 deletion(-)
diff --git a/core/os/os_darwin.odin b/core/os/os_darwin.odin
index d40c80aeb..d882dcbbd 100644
--- a/core/os/os_darwin.odin
+++ b/core/os/os_darwin.odin
@@ -296,6 +296,8 @@ foreign libc {
@(link_name="readdir_r$INODE64") _unix_readdir_r :: proc(dirp: Dir, entry: ^Dirent, result: ^^Dirent) -> c.int ---
@(link_name="fcntl") _unix_fcntl :: proc(fd: Handle, cmd: c.int, buf: ^byte) -> c.int ---
+ @(link_name="fchmod") _unix_fchmod :: proc(fildes: Handle, mode: u16) -> c.int ---;
+
@(link_name="malloc") _unix_malloc :: proc(size: int) -> rawptr ---
@(link_name="calloc") _unix_calloc :: proc(num, size: int) -> rawptr ---
@(link_name="free") _unix_free :: proc(ptr: rawptr) ---
@@ -305,6 +307,8 @@ foreign libc {
@(link_name="chdir") _unix_chdir :: proc(buf: cstring) -> c.int ---
@(link_name="realpath") _unix_realpath :: proc(path: cstring, resolved_path: rawptr) -> rawptr ---
+ @(link_name="strerror") _darwin_string_error :: proc(num : c.int) -> cstring ---;
+
@(link_name="exit") _unix_exit :: proc(status: c.int) -> ! ---
}
@@ -319,16 +323,35 @@ get_last_error :: proc() -> int {
return __error()^
}
-open :: proc(path: string, flags: int = O_RDONLY, mode: int = 0) -> (Handle, Errno) {
+get_last_error_string :: proc() -> string {
+ return cast(string)_darwin_string_error(cast(c.int)get_last_error());
+}
+
+open :: proc(path: string, flags: int = O_RDWR|O_CREATE, mode: int = 0) -> (Handle, Errno) {
cstr := strings.clone_to_cstring(path)
handle := _unix_open(cstr, i32(flags), u16(mode))
delete(cstr)
if handle == -1 {
return INVALID_HANDLE, 1
}
+
+when ODIN_OS == "darwin" && ODIN_ARCH == "arm64" {
+ if mode != 0 {
+ err := fchmod(handle, cast(u16)mode)
+ if err != 0 {
+ _unix_close(handle)
+ return INVALID_HANDLE, 1
+ }
+ }
+}
+
return handle, 0
}
+fchmod :: proc(fildes: Handle, mode: u16) -> Errno {
+ return cast(Errno)_unix_fchmod(fildes, mode)
+}
+
close :: proc(fd: Handle) {
_unix_close(fd)
}
From 8ff6f955715f70f4d369f8c52c8fc5e5c1658cc0 Mon Sep 17 00:00:00 2001
From: Platin21
Date: Mon, 3 Jan 2022 20:40:56 +0100
Subject: [PATCH 061/710] Removes the default create flag
---
core/os/os_darwin.odin | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/core/os/os_darwin.odin b/core/os/os_darwin.odin
index d882dcbbd..b32453a5d 100644
--- a/core/os/os_darwin.odin
+++ b/core/os/os_darwin.odin
@@ -327,7 +327,7 @@ get_last_error_string :: proc() -> string {
return cast(string)_darwin_string_error(cast(c.int)get_last_error());
}
-open :: proc(path: string, flags: int = O_RDWR|O_CREATE, mode: int = 0) -> (Handle, Errno) {
+open :: proc(path: string, flags: int = O_RDWR, mode: int = 0) -> (Handle, Errno) {
cstr := strings.clone_to_cstring(path)
handle := _unix_open(cstr, i32(flags), u16(mode))
delete(cstr)
From f818d0feb1fe0bb421ba27060ea6ff812bf67117 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Mon, 3 Jan 2022 19:43:22 +0000
Subject: [PATCH 062/710] Fix #1344
---
src/llvm_backend_stmt.cpp | 1 +
1 file changed, 1 insertion(+)
diff --git a/src/llvm_backend_stmt.cpp b/src/llvm_backend_stmt.cpp
index 016e464b8..20b444058 100644
--- a/src/llvm_backend_stmt.cpp
+++ b/src/llvm_backend_stmt.cpp
@@ -2188,6 +2188,7 @@ void lb_build_stmt(lbProcedure *p, Ast *node) {
lb_emit_defer_stmts(p, lbDeferExit_Branch, block);
}
lb_emit_jump(p, block);
+ lb_start_block(p, lb_create_block(p, "unreachable"));
case_end;
}
}
From f15bb0b424d854e4ba84c14046b56d7b8357eb94 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Mon, 3 Jan 2022 19:45:27 +0000
Subject: [PATCH 063/710] Fix quaternion casting
---
core/math/linalg/glsl/linalg_glsl.odin | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/core/math/linalg/glsl/linalg_glsl.odin b/core/math/linalg/glsl/linalg_glsl.odin
index 3b4976452..053182794 100644
--- a/core/math/linalg/glsl/linalg_glsl.odin
+++ b/core/math/linalg/glsl/linalg_glsl.odin
@@ -1597,7 +1597,7 @@ quatNlerp :: proc "c" (a, b: quat, t: f32) -> (c: quat) {
c.y = a.y + (b.y-a.y)*t
c.z = a.z + (b.z-a.z)*t
c.w = a.w + (b.w-a.w)*t
- return c/builtin.abs(c)
+ return c/quat(builtin.abs(c))
}
quatSlerp :: proc "c" (x, y: quat, t: f32) -> (q: quat) {
@@ -1699,7 +1699,7 @@ dquatNlerp :: proc "c" (a, b: dquat, t: f64) -> (c: dquat) {
c.y = a.y + (b.y-a.y)*t
c.z = a.z + (b.z-a.z)*t
c.w = a.w + (b.w-a.w)*t
- return c/builtin.abs(c)
+ return c/dquat(builtin.abs(c))
}
dquatSlerp :: proc "c" (x, y: dquat, t: f64) -> (q: dquat) {
From 17613185e79b324948c14257f64c388c8e2a52fb Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Tue, 4 Jan 2022 11:44:34 +0000
Subject: [PATCH 064/710] Support struct field tags in odin doc format
---
core/odin/doc-format/doc_format.odin | 4 +++-
src/docs_format.cpp | 3 ++-
src/docs_writer.cpp | 9 +++++++++
3 files changed, 14 insertions(+), 2 deletions(-)
diff --git a/core/odin/doc-format/doc_format.odin b/core/odin/doc-format/doc_format.odin
index c80be2489..83cd89ca2 100644
--- a/core/odin/doc-format/doc_format.odin
+++ b/core/odin/doc-format/doc_format.odin
@@ -11,7 +11,7 @@ String :: distinct Array(byte)
Version_Type_Major :: 0
Version_Type_Minor :: 2
-Version_Type_Patch :: 1
+Version_Type_Patch :: 2
Version_Type :: struct {
major, minor, patch: u8,
@@ -242,6 +242,8 @@ Type :: struct {
polymorphic_params: Type_Index,
// Used By: .Struct, .Union
where_clauses: Array(String),
+ // Used By: .Struct
+ tags: Array(String),
}
Type_Flags_Basic :: distinct bit_set[Type_Flag_Basic; u32le]
diff --git a/src/docs_format.cpp b/src/docs_format.cpp
index 1c3af6257..5cfac4817 100644
--- a/src/docs_format.cpp
+++ b/src/docs_format.cpp
@@ -15,7 +15,7 @@ struct OdinDocVersionType {
#define OdinDocVersionType_Major 0
#define OdinDocVersionType_Minor 2
-#define OdinDocVersionType_Patch 1
+#define OdinDocVersionType_Patch 2
struct OdinDocHeaderBase {
u8 magic[8];
@@ -137,6 +137,7 @@ struct OdinDocType {
OdinDocArray entities;
OdinDocTypeIndex polmorphic_params;
OdinDocArray where_clauses;
+ OdinDocArray tags; // struct field tags
};
struct OdinDocAttribute {
diff --git a/src/docs_writer.cpp b/src/docs_writer.cpp
index e8e8892ec..56ad0561e 100644
--- a/src/docs_writer.cpp
+++ b/src/docs_writer.cpp
@@ -598,6 +598,15 @@ OdinDocTypeIndex odin_doc_type(OdinDocWriter *w, Type *type) {
}
doc_type.where_clauses = odin_doc_where_clauses(w, st->where_clauses);
}
+
+ auto tags = array_make(heap_allocator(), type->Struct.fields.count);
+ defer (array_free(&tags));
+
+ for_array(i, type->Struct.fields) {
+ tags[i] = odin_doc_write_string(w, type->Struct.tags[i]);
+ }
+
+ doc_type.tags = odin_write_slice(w, tags.data, tags.count);
}
break;
case Type_Union:
From 72862ce30d55891f1b04d3aadd085d7822f1b960 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Tue, 4 Jan 2022 11:48:18 +0000
Subject: [PATCH 065/710] Fix minor typo in c/frontend/preprocess
---
core/c/frontend/preprocessor/preprocess.odin | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/core/c/frontend/preprocessor/preprocess.odin b/core/c/frontend/preprocessor/preprocess.odin
index 62b4183bc..9651cc81c 100644
--- a/core/c/frontend/preprocessor/preprocess.odin
+++ b/core/c/frontend/preprocessor/preprocess.odin
@@ -956,7 +956,7 @@ substitute_token :: proc(cpp: ^Preprocessor, tok: ^Token, args: ^Macro_Arg) -> ^
continue
}
- if tok.lit == "__VA__OPT__" && tok.next.lit == "(" {
+ if tok.lit == "__VA_OPT__" && tok.next.lit == "(" {
opt_arg := read_macro_arg_one(cpp, &tok, tok.next.next, true)
if has_varargs(args) {
for t := opt_arg.tok; t.kind != .EOF; t = t.next {
From 8c9597b24bc7397143b1a9039362be1c7ae53aeb Mon Sep 17 00:00:00 2001
From: Tyler Erickson
Date: Tue, 4 Jan 2022 16:45:16 -0800
Subject: [PATCH 066/710] add schar to core:c and core:c/libc
---
core/c/c.odin | 2 ++
core/c/libc/types.odin | 2 ++
2 files changed, 4 insertions(+)
diff --git a/core/c/c.odin b/core/c/c.odin
index d135fa93c..139d9920a 100644
--- a/core/c/c.odin
+++ b/core/c/c.odin
@@ -3,6 +3,8 @@ package c
import builtin "core:builtin"
char :: builtin.u8 // assuming -funsigned-char
+
+schar :: builtin.i8
short :: builtin.i16
int :: builtin.i32
long :: builtin.i32 when (ODIN_OS == "windows" || size_of(builtin.rawptr) == 4) else builtin.i64
diff --git a/core/c/libc/types.odin b/core/c/libc/types.odin
index 7199cf57b..a49e52fb6 100644
--- a/core/c/libc/types.odin
+++ b/core/c/libc/types.odin
@@ -3,6 +3,8 @@ package libc
import "core:c"
char :: c.char // assuming -funsigned-char
+
+schar :: c.schar
short :: c.short
int :: c.int
long :: c.long
From 7a14acaa01d37ca02597bf40df00f7d62903a291 Mon Sep 17 00:00:00 2001
From: Platin21
Date: Wed, 5 Jan 2022 16:49:58 +0100
Subject: [PATCH 067/710] Fixes syscall intrinsic on macOS they use a slightly
different section + register for the id
---
src/llvm_backend_proc.cpp | 59 ++++++++++++++++++++++++++-------------
1 file changed, 40 insertions(+), 19 deletions(-)
diff --git a/src/llvm_backend_proc.cpp b/src/llvm_backend_proc.cpp
index 84fddd9e2..50aa5f6db 100644
--- a/src/llvm_backend_proc.cpp
+++ b/src/llvm_backend_proc.cpp
@@ -2058,26 +2058,47 @@ lbValue lb_build_builtin_proc(lbProcedure *p, Ast *expr, TypeAndValue const &tv,
break;
case TargetArch_arm64:
{
- GB_ASSERT(arg_count <= 7);
-
- char asm_string[] = "svc #0";
- gbString constraints = gb_string_make(heap_allocator(), "={x0}");
- for (unsigned i = 0; i < arg_count; i++) {
- constraints = gb_string_appendc(constraints, ",{");
- static char const *regs[] = {
- "x8",
- "x0",
- "x1",
- "x2",
- "x3",
- "x4",
- "x5",
- };
- constraints = gb_string_appendc(constraints, regs[i]);
- constraints = gb_string_appendc(constraints, "}");
- }
+ GB_ASSERT(arg_count <= 7);
+
+ if(build_context.metrics.os == TargetOs_darwin) {
+ char asm_string[] = "svc #0x80";
+ gbString constraints = gb_string_make(heap_allocator(), "={x0}");
+ for (unsigned i = 0; i < arg_count; i++) {
+ constraints = gb_string_appendc(constraints, ",{");
+ static char const *regs[] = {
+ "x16",
+ "x0",
+ "x1",
+ "x2",
+ "x3",
+ "x4",
+ "x5",
+ };
+ constraints = gb_string_appendc(constraints, regs[i]);
+ constraints = gb_string_appendc(constraints, "}");
+ }
- inline_asm = llvm_get_inline_asm(func_type, make_string_c(asm_string), make_string_c(constraints));
+ inline_asm = llvm_get_inline_asm(func_type, make_string_c(asm_string), make_string_c(constraints));
+ } else {
+ char asm_string[] = "svc #0";
+ gbString constraints = gb_string_make(heap_allocator(), "={x0}");
+ for (unsigned i = 0; i < arg_count; i++) {
+ constraints = gb_string_appendc(constraints, ",{");
+ static char const *regs[] = {
+ "x8",
+ "x0",
+ "x1",
+ "x2",
+ "x3",
+ "x4",
+ "x5",
+ };
+ constraints = gb_string_appendc(constraints, regs[i]);
+ constraints = gb_string_appendc(constraints, "}");
+ }
+
+ inline_asm = llvm_get_inline_asm(func_type, make_string_c(asm_string), make_string_c(constraints));
+ }
}
break;
default:
From 566a7508990632726a181b12459538b9898bc72e Mon Sep 17 00:00:00 2001
From: Jeroen van Rijn
Date: Fri, 7 Jan 2022 06:12:00 +0100
Subject: [PATCH 068/710] Fix unused imports.
---
core/sort/map.odin | 3 +++
1 file changed, 3 insertions(+)
diff --git a/core/sort/map.odin b/core/sort/map.odin
index dff2dced3..32f5e09a2 100644
--- a/core/sort/map.odin
+++ b/core/sort/map.odin
@@ -4,6 +4,9 @@ import "core:intrinsics"
import "core:runtime"
import "core:slice"
+_ :: runtime
+_ :: slice
+
map_entries_by_key :: proc(m: ^$M/map[$K]$V, loc := #caller_location) where intrinsics.type_is_ordered(K) {
Entry :: struct {
hash: uintptr,
From 773cfac449a1a4a46795cc3345245fbd98b32cbe Mon Sep 17 00:00:00 2001
From: Naboris
Date: Sat, 8 Jan 2022 09:39:26 +0100
Subject: [PATCH 069/710] fix typo
---
core/unicode/utf16/utf16.odin | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/core/unicode/utf16/utf16.odin b/core/unicode/utf16/utf16.odin
index 2e349640e..6bdd6558a 100644
--- a/core/unicode/utf16/utf16.odin
+++ b/core/unicode/utf16/utf16.odin
@@ -117,9 +117,9 @@ decode_to_utf8 :: proc(d: []byte, s: []u16) -> (n: int) {
switch c := s[i]; {
case c < _surr1, _surr3 <= c:
r = rune(c)
- case _surr1 <= r && r < _surr2 && i+1 < len(s) &&
+ case _surr1 <= c && c < _surr2 && i+1 < len(s) &&
_surr2 <= s[i+1] && s[i+1] < _surr3:
- r = decode_surrogate_pair(rune(r), rune(s[i+1]))
+ r = decode_surrogate_pair(rune(c), rune(s[i+1]))
i += 1
}
From 1cff72ad6240e2ea211a0d3c4867a6671364e4e4 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Wojciech=20Bog=C3=B3cki?=
Date: Sun, 9 Jan 2022 22:43:12 +0800
Subject: [PATCH 070/710] Fix link to Odin blog
---
README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.md b/README.md
index b1d0a39a1..5b8c25492 100644
--- a/README.md
+++ b/README.md
@@ -84,7 +84,7 @@ The official Odin Language specification.
### Articles
-#### [The Odin Blog](https://odin-lang.org/blog)
+#### [The Odin Blog](https://odin-lang.org/news/)
The official blog of the Odin programming language, featuring announcements, news, and in-depth articles by the Odin team and guests.
From af612bc7e9ee96a73d4f766340249385979dd3c9 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Mon, 10 Jan 2022 11:32:27 +0000
Subject: [PATCH 071/710] Update matrix types to be the native Odin `matrix`
types
---
core/math/linalg/general.odin | 73 +-
core/math/linalg/specific.odin | 1063 ++++++++---------
.../linalg/specific_euler_angles_f16.odin | 1042 ++++++++--------
.../linalg/specific_euler_angles_f32.odin | 1042 ++++++++--------
.../linalg/specific_euler_angles_f64.odin | 1042 ++++++++--------
core/math/linalg/swizzle.odin | 5 +
6 files changed, 2103 insertions(+), 2164 deletions(-)
diff --git a/core/math/linalg/general.odin b/core/math/linalg/general.odin
index 6c594945f..b0572c0d3 100644
--- a/core/math/linalg/general.odin
+++ b/core/math/linalg/general.odin
@@ -1,6 +1,7 @@
package linalg
import "core:math"
+import "core:builtin"
import "core:intrinsics"
// Generic
@@ -60,14 +61,7 @@ quaternion256_dot :: proc(a, b: $T/quaternion256) -> (c: f64) {
dot :: proc{scalar_dot, vector_dot, quaternion64_dot, quaternion128_dot, quaternion256_dot}
inner_product :: dot
-outer_product :: proc(a: $A/[$M]$E, b: $B/[$N]E) -> (out: [M][N]E) where IS_NUMERIC(E) #no_bounds_check {
- for i in 0.. Q where IS_QUATERNION(Q) {
return conj(q) * quaternion(1.0/dot(q, q), 0, 0, 0)
@@ -163,65 +157,28 @@ identity :: proc($T: typeid/[$N][N]$E) -> (m: T) #no_bounds_check {
return m
}
-trace :: proc(m: $T/[$N][N]$E) -> (tr: E) {
- for i in 0.. (m: (T when N == M else [M][N]E)) #no_bounds_check {
- for j in 0.. (c: M)
+matrix_mul :: proc(a, b: $M/matrix[$N, N]$E) -> (c: M)
where !IS_ARRAY(E), IS_NUMERIC(E) #no_bounds_check {
- for i in 0.. (c: M)
+matrix_comp_mul :: proc(a, b: $M/matrix[$I, $J]$E) -> (c: M)
where !IS_ARRAY(E), IS_NUMERIC(E) #no_bounds_check {
- for j in 0.. (c: [K][I]E)
+matrix_mul_differ :: proc(a: $A/matrix[$I, $J]$E, b: $B/matrix[J, $K]E) -> (c: matrix[I, K]E)
where !IS_ARRAY(E), IS_NUMERIC(E), I != K #no_bounds_check {
- for k in 0.. (c: B)
+matrix_mul_vector :: proc(a: $A/matrix[$I, $J]$E, b: $B/[J]E) -> (c: B)
where !IS_ARRAY(E), IS_NUMERIC(E) #no_bounds_check {
- for i in 0.. Q where IS_QUATERNION(Q) {
@@ -270,8 +227,8 @@ mul :: proc{
vector_to_ptr :: proc(v: ^$V/[$N]$E) -> ^E where IS_NUMERIC(E), N > 0 #no_bounds_check {
return &v[0]
}
-matrix_to_ptr :: proc(m: ^$A/[$I][$J]$E) -> ^E where IS_NUMERIC(E), I > 0, J > 0 #no_bounds_check {
- return &m[0][0]
+matrix_to_ptr :: proc(m: ^$A/matrix[$I, $J]$E) -> ^E where IS_NUMERIC(E), I > 0, J > 0 #no_bounds_check {
+ return &m[0, 0]
}
to_ptr :: proc{vector_to_ptr, matrix_to_ptr}
@@ -357,6 +314,6 @@ to_uint :: #force_inline proc(v: $A/[$N]$T) -> [N]uint { return array_cast(v, ui
to_complex32 :: #force_inline proc(v: $A/[$N]$T) -> [N]complex32 { return array_cast(v, complex32) }
to_complex64 :: #force_inline proc(v: $A/[$N]$T) -> [N]complex64 { return array_cast(v, complex64) }
to_complex128 :: #force_inline proc(v: $A/[$N]$T) -> [N]complex128 { return array_cast(v, complex128) }
-to_quaternion64 :: #force_inline proc(v: $A/[$N]$T) -> [N]quaternion64 { return array_cast(v, quaternion64) }
+to_quaternion64 :: #force_inline proc(v: $A/[$N]$T) -> [N]quaternion64 { return array_cast(v, quaternion64) }
to_quaternion128 :: #force_inline proc(v: $A/[$N]$T) -> [N]quaternion128 { return array_cast(v, quaternion128) }
to_quaternion256 :: #force_inline proc(v: $A/[$N]$T) -> [N]quaternion256 { return array_cast(v, quaternion256) }
diff --git a/core/math/linalg/specific.odin b/core/math/linalg/specific.odin
index 5cb68e3a8..cb007bd91 100644
--- a/core/math/linalg/specific.odin
+++ b/core/math/linalg/specific.odin
@@ -1,5 +1,6 @@
package linalg
+import "core:builtin"
import "core:math"
F16_EPSILON :: 1e-3
@@ -10,25 +11,25 @@ Vector2f16 :: distinct [2]f16
Vector3f16 :: distinct [3]f16
Vector4f16 :: distinct [4]f16
-Matrix1x1f16 :: distinct [1][1]f16
-Matrix1x2f16 :: distinct [1][2]f16
-Matrix1x3f16 :: distinct [1][3]f16
-Matrix1x4f16 :: distinct [1][4]f16
+Matrix1x1f16 :: distinct matrix[1, 1]f16
+Matrix1x2f16 :: distinct matrix[1, 2]f16
+Matrix1x3f16 :: distinct matrix[1, 3]f16
+Matrix1x4f16 :: distinct matrix[1, 4]f16
-Matrix2x1f16 :: distinct [2][1]f16
-Matrix2x2f16 :: distinct [2][2]f16
-Matrix2x3f16 :: distinct [2][3]f16
-Matrix2x4f16 :: distinct [2][4]f16
+Matrix2x1f16 :: distinct matrix[2, 1]f16
+Matrix2x2f16 :: distinct matrix[2, 2]f16
+Matrix2x3f16 :: distinct matrix[2, 3]f16
+Matrix2x4f16 :: distinct matrix[2, 4]f16
-Matrix3x1f16 :: distinct [3][1]f16
-Matrix3x2f16 :: distinct [3][2]f16
-Matrix3x3f16 :: distinct [3][3]f16
-Matrix3x4f16 :: distinct [3][4]f16
+Matrix3x1f16 :: distinct matrix[3, 1]f16
+Matrix3x2f16 :: distinct matrix[3, 2]f16
+Matrix3x3f16 :: distinct matrix[3, 3]f16
+Matrix3x4f16 :: distinct matrix[3, 4]f16
-Matrix4x1f16 :: distinct [4][1]f16
-Matrix4x2f16 :: distinct [4][2]f16
-Matrix4x3f16 :: distinct [4][3]f16
-Matrix4x4f16 :: distinct [4][4]f16
+Matrix4x1f16 :: distinct matrix[4, 1]f16
+Matrix4x2f16 :: distinct matrix[4, 2]f16
+Matrix4x3f16 :: distinct matrix[4, 3]f16
+Matrix4x4f16 :: distinct matrix[4, 4]f16
Matrix1f16 :: Matrix1x1f16
Matrix2f16 :: Matrix2x2f16
@@ -39,25 +40,25 @@ Vector2f32 :: distinct [2]f32
Vector3f32 :: distinct [3]f32
Vector4f32 :: distinct [4]f32
-Matrix1x1f32 :: distinct [1][1]f32
-Matrix1x2f32 :: distinct [1][2]f32
-Matrix1x3f32 :: distinct [1][3]f32
-Matrix1x4f32 :: distinct [1][4]f32
+Matrix1x1f32 :: distinct matrix[1, 1]f32
+Matrix1x2f32 :: distinct matrix[1, 2]f32
+Matrix1x3f32 :: distinct matrix[1, 3]f32
+Matrix1x4f32 :: distinct matrix[1, 4]f32
-Matrix2x1f32 :: distinct [2][1]f32
-Matrix2x2f32 :: distinct [2][2]f32
-Matrix2x3f32 :: distinct [2][3]f32
-Matrix2x4f32 :: distinct [2][4]f32
+Matrix2x1f32 :: distinct matrix[2, 1]f32
+Matrix2x2f32 :: distinct matrix[2, 2]f32
+Matrix2x3f32 :: distinct matrix[2, 3]f32
+Matrix2x4f32 :: distinct matrix[2, 4]f32
-Matrix3x1f32 :: distinct [3][1]f32
-Matrix3x2f32 :: distinct [3][2]f32
-Matrix3x3f32 :: distinct [3][3]f32
-Matrix3x4f32 :: distinct [3][4]f32
+Matrix3x1f32 :: distinct matrix[3, 1]f32
+Matrix3x2f32 :: distinct matrix[3, 2]f32
+Matrix3x3f32 :: distinct matrix[3, 3]f32
+Matrix3x4f32 :: distinct matrix[3, 4]f32
-Matrix4x1f32 :: distinct [4][1]f32
-Matrix4x2f32 :: distinct [4][2]f32
-Matrix4x3f32 :: distinct [4][3]f32
-Matrix4x4f32 :: distinct [4][4]f32
+Matrix4x1f32 :: distinct matrix[4, 1]f32
+Matrix4x2f32 :: distinct matrix[4, 2]f32
+Matrix4x3f32 :: distinct matrix[4, 3]f32
+Matrix4x4f32 :: distinct matrix[4, 4]f32
Matrix1f32 :: Matrix1x1f32
Matrix2f32 :: Matrix2x2f32
@@ -68,25 +69,25 @@ Vector2f64 :: distinct [2]f64
Vector3f64 :: distinct [3]f64
Vector4f64 :: distinct [4]f64
-Matrix1x1f64 :: distinct [1][1]f64
-Matrix1x2f64 :: distinct [1][2]f64
-Matrix1x3f64 :: distinct [1][3]f64
-Matrix1x4f64 :: distinct [1][4]f64
+Matrix1x1f64 :: distinct matrix[1, 1]f64
+Matrix1x2f64 :: distinct matrix[1, 2]f64
+Matrix1x3f64 :: distinct matrix[1, 3]f64
+Matrix1x4f64 :: distinct matrix[1, 4]f64
-Matrix2x1f64 :: distinct [2][1]f64
-Matrix2x2f64 :: distinct [2][2]f64
-Matrix2x3f64 :: distinct [2][3]f64
-Matrix2x4f64 :: distinct [2][4]f64
+Matrix2x1f64 :: distinct matrix[2, 1]f64
+Matrix2x2f64 :: distinct matrix[2, 2]f64
+Matrix2x3f64 :: distinct matrix[2, 3]f64
+Matrix2x4f64 :: distinct matrix[2, 4]f64
-Matrix3x1f64 :: distinct [3][1]f64
-Matrix3x2f64 :: distinct [3][2]f64
-Matrix3x3f64 :: distinct [3][3]f64
-Matrix3x4f64 :: distinct [3][4]f64
+Matrix3x1f64 :: distinct matrix[3, 1]f64
+Matrix3x2f64 :: distinct matrix[3, 2]f64
+Matrix3x3f64 :: distinct matrix[3, 3]f64
+Matrix3x4f64 :: distinct matrix[3, 4]f64
-Matrix4x1f64 :: distinct [4][1]f64
-Matrix4x2f64 :: distinct [4][2]f64
-Matrix4x3f64 :: distinct [4][3]f64
-Matrix4x4f64 :: distinct [4][4]f64
+Matrix4x1f64 :: distinct matrix[4, 1]f64
+Matrix4x2f64 :: distinct matrix[4, 2]f64
+Matrix4x3f64 :: distinct matrix[4, 3]f64
+Matrix4x4f64 :: distinct matrix[4, 4]f64
Matrix1f64 :: Matrix1x1f64
Matrix2f64 :: Matrix2x2f64
@@ -97,20 +98,20 @@ Quaternionf16 :: distinct quaternion64
Quaternionf32 :: distinct quaternion128
Quaternionf64 :: distinct quaternion256
-MATRIX1F16_IDENTITY :: Matrix1f16{{1}}
-MATRIX2F16_IDENTITY :: Matrix2f16{{1, 0}, {0, 1}}
-MATRIX3F16_IDENTITY :: Matrix3f16{{1, 0, 0}, {0, 1, 0}, {0, 0, 1}}
-MATRIX4F16_IDENTITY :: Matrix4f16{{1, 0, 0, 0}, {0, 1, 0, 0}, {0, 0, 1, 0}, {0, 0, 0, 1}}
+MATRIX1F16_IDENTITY :: Matrix1f16(1)
+MATRIX2F16_IDENTITY :: Matrix2f16(1)
+MATRIX3F16_IDENTITY :: Matrix3f16(1)
+MATRIX4F16_IDENTITY :: Matrix4f16(1)
-MATRIX1F32_IDENTITY :: Matrix1f32{{1}}
-MATRIX2F32_IDENTITY :: Matrix2f32{{1, 0}, {0, 1}}
-MATRIX3F32_IDENTITY :: Matrix3f32{{1, 0, 0}, {0, 1, 0}, {0, 0, 1}}
-MATRIX4F32_IDENTITY :: Matrix4f32{{1, 0, 0, 0}, {0, 1, 0, 0}, {0, 0, 1, 0}, {0, 0, 0, 1}}
+MATRIX1F32_IDENTITY :: Matrix1f32(1)
+MATRIX2F32_IDENTITY :: Matrix2f32(1)
+MATRIX3F32_IDENTITY :: Matrix3f32(1)
+MATRIX4F32_IDENTITY :: Matrix4f32(1)
-MATRIX1F64_IDENTITY :: Matrix1f64{{1}}
-MATRIX2F64_IDENTITY :: Matrix2f64{{1, 0}, {0, 1}}
-MATRIX3F64_IDENTITY :: Matrix3f64{{1, 0, 0}, {0, 1, 0}, {0, 0, 1}}
-MATRIX4F64_IDENTITY :: Matrix4f64{{1, 0, 0, 0}, {0, 1, 0, 0}, {0, 0, 1, 0}, {0, 0, 0, 1}}
+MATRIX1F64_IDENTITY :: Matrix1f64(1)
+MATRIX2F64_IDENTITY :: Matrix2f64(1)
+MATRIX3F64_IDENTITY :: Matrix3f64(1)
+MATRIX4F64_IDENTITY :: Matrix4f64(1)
QUATERNIONF16_IDENTITY :: Quaternionf16(1)
QUATERNIONF32_IDENTITY :: Quaternionf32(1)
@@ -558,9 +559,9 @@ quaternion_from_forward_and_up_f16 :: proc(forward, up: Vector3f16) -> Quaternio
s := normalize(cross(f, up))
u := cross(s, f)
m := Matrix3f16{
- {+s.x, +u.x, -f.x},
- {+s.y, +u.y, -f.y},
- {+s.z, +u.z, -f.z},
+ +s.x, +s.y, +s.z,
+ +u.x, +u.y, +u.z,
+ -f.x, -f.y, -f.z,
}
tr := trace(m)
@@ -571,26 +572,26 @@ quaternion_from_forward_and_up_f16 :: proc(forward, up: Vector3f16) -> Quaternio
case tr > 0:
S := 2 * math.sqrt(1 + tr)
q.w = 0.25 * S
- q.x = (m[2][1] - m[1][2]) / S
- q.y = (m[0][2] - m[2][0]) / S
- q.z = (m[1][0] - m[0][1]) / S
- case (m[0][0] > m[1][1]) && (m[0][0] > m[2][2]):
- S := 2 * math.sqrt(1 + m[0][0] - m[1][1] - m[2][2])
- q.w = (m[2][1] - m[1][2]) / S
+ q.x = (m[1, 2] - m[2, 1]) / S
+ q.y = (m[2, 0] - m[0, 2]) / S
+ q.z = (m[0, 1] - m[1, 0]) / S
+ case (m[0, 0] > m[1, 1]) && (m[0, 0] > m[2, 2]):
+ S := 2 * math.sqrt(1 + m[0, 0] - m[1, 1] - m[2, 2])
+ q.w = (m[1, 2] - m[2, 1]) / S
q.x = 0.25 * S
- q.y = (m[0][1] + m[1][0]) / S
- q.z = (m[0][2] + m[2][0]) / S
- case m[1][1] > m[2][2]:
- S := 2 * math.sqrt(1 + m[1][1] - m[0][0] - m[2][2])
- q.w = (m[0][2] - m[2][0]) / S
- q.x = (m[0][1] + m[1][0]) / S
+ q.y = (m[1, 0] + m[0, 1]) / S
+ q.z = (m[2, 0] + m[0, 2]) / S
+ case m[1, 1] > m[2, 2]:
+ S := 2 * math.sqrt(1 + m[1, 1] - m[0, 0] - m[2, 2])
+ q.w = (m[2, 0] - m[0, 2]) / S
+ q.x = (m[1, 0] + m[0, 1]) / S
q.y = 0.25 * S
- q.z = (m[1][2] + m[2][1]) / S
+ q.z = (m[2, 1] + m[1, 2]) / S
case:
- S := 2 * math.sqrt(1 + m[2][2] - m[0][0] - m[1][1])
- q.w = (m[1][0] - m[0][1]) / S
- q.x = (m[0][2] - m[2][0]) / S
- q.y = (m[1][2] + m[2][1]) / S
+ S := 2 * math.sqrt(1 + m[2, 2] - m[0, 0] - m[1, 1])
+ q.w = (m[0, 1] - m[1, 0]) / S
+ q.x = (m[2, 0] - m[0, 2]) / S
+ q.y = (m[2, 1] + m[1, 2]) / S
q.z = 0.25 * S
}
@@ -601,9 +602,9 @@ quaternion_from_forward_and_up_f32 :: proc(forward, up: Vector3f32) -> Quaternio
s := normalize(cross(f, up))
u := cross(s, f)
m := Matrix3f32{
- {+s.x, +u.x, -f.x},
- {+s.y, +u.y, -f.y},
- {+s.z, +u.z, -f.z},
+ +s.x, +s.y, +s.z,
+ +u.x, +u.y, +u.z,
+ -f.x, -f.y, -f.z,
}
tr := trace(m)
@@ -614,26 +615,26 @@ quaternion_from_forward_and_up_f32 :: proc(forward, up: Vector3f32) -> Quaternio
case tr > 0:
S := 2 * math.sqrt(1 + tr)
q.w = 0.25 * S
- q.x = (m[2][1] - m[1][2]) / S
- q.y = (m[0][2] - m[2][0]) / S
- q.z = (m[1][0] - m[0][1]) / S
- case (m[0][0] > m[1][1]) && (m[0][0] > m[2][2]):
- S := 2 * math.sqrt(1 + m[0][0] - m[1][1] - m[2][2])
- q.w = (m[2][1] - m[1][2]) / S
+ q.x = (m[1, 2] - m[2, 1]) / S
+ q.y = (m[2, 0] - m[0, 2]) / S
+ q.z = (m[0, 1] - m[1, 0]) / S
+ case (m[0, 0] > m[1, 1]) && (m[0, 0] > m[2, 2]):
+ S := 2 * math.sqrt(1 + m[0, 0] - m[1, 1] - m[2, 2])
+ q.w = (m[1, 2] - m[2, 1]) / S
q.x = 0.25 * S
- q.y = (m[0][1] + m[1][0]) / S
- q.z = (m[0][2] + m[2][0]) / S
- case m[1][1] > m[2][2]:
- S := 2 * math.sqrt(1 + m[1][1] - m[0][0] - m[2][2])
- q.w = (m[0][2] - m[2][0]) / S
- q.x = (m[0][1] + m[1][0]) / S
+ q.y = (m[1, 0] + m[0, 1]) / S
+ q.z = (m[2, 0] + m[0, 2]) / S
+ case m[1, 1] > m[2, 2]:
+ S := 2 * math.sqrt(1 + m[1, 1] - m[0, 0] - m[2, 2])
+ q.w = (m[2, 0] - m[0, 2]) / S
+ q.x = (m[1, 0] + m[0, 1]) / S
q.y = 0.25 * S
- q.z = (m[1][2] + m[2][1]) / S
+ q.z = (m[2, 1] + m[1, 2]) / S
case:
- S := 2 * math.sqrt(1 + m[2][2] - m[0][0] - m[1][1])
- q.w = (m[1][0] - m[0][1]) / S
- q.x = (m[0][2] - m[2][0]) / S
- q.y = (m[1][2] + m[2][1]) / S
+ S := 2 * math.sqrt(1 + m[2, 2] - m[0, 0] - m[1, 1])
+ q.w = (m[0, 1] - m[1, 0]) / S
+ q.x = (m[2, 0] - m[0, 2]) / S
+ q.y = (m[2, 1] + m[1, 2]) / S
q.z = 0.25 * S
}
@@ -644,9 +645,9 @@ quaternion_from_forward_and_up_f64 :: proc(forward, up: Vector3f64) -> Quaternio
s := normalize(cross(f, up))
u := cross(s, f)
m := Matrix3f64{
- {+s.x, +u.x, -f.x},
- {+s.y, +u.y, -f.y},
- {+s.z, +u.z, -f.z},
+ +s.x, +s.y, +s.z,
+ +u.x, +u.y, +u.z,
+ -f.x, -f.y, -f.z,
}
tr := trace(m)
@@ -657,26 +658,26 @@ quaternion_from_forward_and_up_f64 :: proc(forward, up: Vector3f64) -> Quaternio
case tr > 0:
S := 2 * math.sqrt(1 + tr)
q.w = 0.25 * S
- q.x = (m[2][1] - m[1][2]) / S
- q.y = (m[0][2] - m[2][0]) / S
- q.z = (m[1][0] - m[0][1]) / S
- case (m[0][0] > m[1][1]) && (m[0][0] > m[2][2]):
- S := 2 * math.sqrt(1 + m[0][0] - m[1][1] - m[2][2])
- q.w = (m[2][1] - m[1][2]) / S
+ q.x = (m[1, 2] - m[2, 1]) / S
+ q.y = (m[2, 0] - m[0, 2]) / S
+ q.z = (m[0, 1] - m[1, 0]) / S
+ case (m[0, 0] > m[1, 1]) && (m[0, 0] > m[2, 2]):
+ S := 2 * math.sqrt(1 + m[0, 0] - m[1, 1] - m[2, 2])
+ q.w = (m[1, 2] - m[2, 1]) / S
q.x = 0.25 * S
- q.y = (m[0][1] + m[1][0]) / S
- q.z = (m[0][2] + m[2][0]) / S
- case m[1][1] > m[2][2]:
- S := 2 * math.sqrt(1 + m[1][1] - m[0][0] - m[2][2])
- q.w = (m[0][2] - m[2][0]) / S
- q.x = (m[0][1] + m[1][0]) / S
+ q.y = (m[1, 0] + m[0, 1]) / S
+ q.z = (m[2, 0] + m[0, 2]) / S
+ case m[1, 1] > m[2, 2]:
+ S := 2 * math.sqrt(1 + m[1, 1] - m[0, 0] - m[2, 2])
+ q.w = (m[2, 0] - m[0, 2]) / S
+ q.x = (m[1, 0] + m[0, 1]) / S
q.y = 0.25 * S
- q.z = (m[1][2] + m[2][1]) / S
+ q.z = (m[2, 1] + m[1, 2]) / S
case:
- S := 2 * math.sqrt(1 + m[2][2] - m[0][0] - m[1][1])
- q.w = (m[1][0] - m[0][1]) / S
- q.x = (m[0][2] - m[2][0]) / S
- q.y = (m[1][2] + m[2][1]) / S
+ S := 2 * math.sqrt(1 + m[2, 2] - m[0, 0] - m[1, 1])
+ q.w = (m[0, 1] - m[1, 0]) / S
+ q.x = (m[2, 0] - m[0, 2]) / S
+ q.y = (m[2, 1] + m[1, 2]) / S
q.z = 0.25 * S
}
@@ -842,23 +843,23 @@ quaternion_squad :: proc{
quaternion_from_matrix4_f16 :: proc(m: Matrix4f16) -> (q: Quaternionf16) {
m3: Matrix3f16 = ---
- m3[0][0], m3[0][1], m3[0][2] = m[0][0], m[0][1], m[0][2]
- m3[1][0], m3[1][1], m3[1][2] = m[1][0], m[1][1], m[1][2]
- m3[2][0], m3[2][1], m3[2][2] = m[2][0], m[2][1], m[2][2]
+ m3[0, 0], m3[1, 0], m3[2, 0] = m[0, 0], m[1, 0], m[2, 0]
+ m3[0, 1], m3[1, 1], m3[2, 1] = m[0, 1], m[1, 1], m[2, 1]
+ m3[0, 2], m3[1, 2], m3[2, 2] = m[0, 2], m[1, 2], m[2, 2]
return quaternion_from_matrix3(m3)
}
quaternion_from_matrix4_f32 :: proc(m: Matrix4f32) -> (q: Quaternionf32) {
m3: Matrix3f32 = ---
- m3[0][0], m3[0][1], m3[0][2] = m[0][0], m[0][1], m[0][2]
- m3[1][0], m3[1][1], m3[1][2] = m[1][0], m[1][1], m[1][2]
- m3[2][0], m3[2][1], m3[2][2] = m[2][0], m[2][1], m[2][2]
+ m3[0, 0], m3[1, 0], m3[2, 0] = m[0, 0], m[1, 0], m[2, 0]
+ m3[0, 1], m3[1, 1], m3[2, 1] = m[0, 1], m[1, 1], m[2, 1]
+ m3[0, 2], m3[1, 2], m3[2, 2] = m[0, 2], m[1, 2], m[2, 2]
return quaternion_from_matrix3(m3)
}
quaternion_from_matrix4_f64 :: proc(m: Matrix4f64) -> (q: Quaternionf64) {
m3: Matrix3f64 = ---
- m3[0][0], m3[0][1], m3[0][2] = m[0][0], m[0][1], m[0][2]
- m3[1][0], m3[1][1], m3[1][2] = m[1][0], m[1][1], m[1][2]
- m3[2][0], m3[2][1], m3[2][2] = m[2][0], m[2][1], m[2][2]
+ m3[0, 0], m3[1, 0], m3[2, 0] = m[0, 0], m[1, 0], m[2, 0]
+ m3[0, 1], m3[1, 1], m3[2, 1] = m[0, 1], m[1, 1], m[2, 1]
+ m3[0, 2], m3[1, 2], m3[2, 2] = m[0, 2], m[1, 2], m[2, 2]
return quaternion_from_matrix3(m3)
}
quaternion_from_matrix4 :: proc{
@@ -869,10 +870,10 @@ quaternion_from_matrix4 :: proc{
quaternion_from_matrix3_f16 :: proc(m: Matrix3f16) -> (q: Quaternionf16) {
- four_x_squared_minus_1 := m[0][0] - m[1][1] - m[2][2]
- four_y_squared_minus_1 := m[1][1] - m[0][0] - m[2][2]
- four_z_squared_minus_1 := m[2][2] - m[0][0] - m[1][1]
- four_w_squared_minus_1 := m[0][0] + m[1][1] + m[2][2]
+ four_x_squared_minus_1 := m[0, 0] - m[1, 1] - m[2, 2]
+ four_y_squared_minus_1 := m[1, 1] - m[0, 0] - m[2, 2]
+ four_z_squared_minus_1 := m[2, 2] - m[0, 0] - m[1, 1]
+ four_w_squared_minus_1 := m[0, 0] + m[1, 1] + m[2, 2]
biggest_index := 0
four_biggest_squared_minus_1 := four_w_squared_minus_1
@@ -896,32 +897,32 @@ quaternion_from_matrix3_f16 :: proc(m: Matrix3f16) -> (q: Quaternionf16) {
switch biggest_index {
case 0:
q.w = biggest_val
- q.x = (m[1][2] - m[2][1]) * mult
- q.y = (m[2][0] - m[0][2]) * mult
- q.z = (m[0][1] - m[1][0]) * mult
+ q.x = (m[2, 1] - m[1, 2]) * mult
+ q.y = (m[0, 2] - m[2, 0]) * mult
+ q.z = (m[1, 0] - m[0, 1]) * mult
case 1:
- q.w = (m[1][2] - m[2][1]) * mult
+ q.w = (m[2, 1] - m[1, 2]) * mult
q.x = biggest_val
- q.y = (m[0][1] + m[1][0]) * mult
- q.z = (m[2][0] + m[0][2]) * mult
+ q.y = (m[1, 0] + m[0, 1]) * mult
+ q.z = (m[0, 2] + m[2, 0]) * mult
case 2:
- q.w = (m[2][0] - m[0][2]) * mult
- q.x = (m[0][1] + m[1][0]) * mult
+ q.w = (m[0, 2] - m[2, 0]) * mult
+ q.x = (m[1, 0] + m[0, 1]) * mult
q.y = biggest_val
- q.z = (m[1][2] + m[2][1]) * mult
+ q.z = (m[2, 1] + m[1, 2]) * mult
case 3:
- q.w = (m[0][1] - m[1][0]) * mult
- q.x = (m[2][0] + m[0][2]) * mult
- q.y = (m[1][2] + m[2][1]) * mult
+ q.w = (m[1, 0] - m[0, 1]) * mult
+ q.x = (m[0, 2] + m[2, 0]) * mult
+ q.y = (m[2, 1] + m[1, 2]) * mult
q.z = biggest_val
}
return
}
quaternion_from_matrix3_f32 :: proc(m: Matrix3f32) -> (q: Quaternionf32) {
- four_x_squared_minus_1 := m[0][0] - m[1][1] - m[2][2]
- four_y_squared_minus_1 := m[1][1] - m[0][0] - m[2][2]
- four_z_squared_minus_1 := m[2][2] - m[0][0] - m[1][1]
- four_w_squared_minus_1 := m[0][0] + m[1][1] + m[2][2]
+ four_x_squared_minus_1 := m[0, 0] - m[1, 1] - m[2, 2]
+ four_y_squared_minus_1 := m[1, 1] - m[0, 0] - m[2, 2]
+ four_z_squared_minus_1 := m[2, 2] - m[0, 0] - m[1, 1]
+ four_w_squared_minus_1 := m[0, 0] + m[1, 1] + m[2, 2]
biggest_index := 0
four_biggest_squared_minus_1 := four_w_squared_minus_1
@@ -945,32 +946,32 @@ quaternion_from_matrix3_f32 :: proc(m: Matrix3f32) -> (q: Quaternionf32) {
switch biggest_index {
case 0:
q.w = biggest_val
- q.x = (m[1][2] - m[2][1]) * mult
- q.y = (m[2][0] - m[0][2]) * mult
- q.z = (m[0][1] - m[1][0]) * mult
+ q.x = (m[2, 1] - m[1, 2]) * mult
+ q.y = (m[0, 2] - m[2, 0]) * mult
+ q.z = (m[1, 0] - m[0, 1]) * mult
case 1:
- q.w = (m[1][2] - m[2][1]) * mult
+ q.w = (m[2, 1] - m[1, 2]) * mult
q.x = biggest_val
- q.y = (m[0][1] + m[1][0]) * mult
- q.z = (m[2][0] + m[0][2]) * mult
+ q.y = (m[1, 0] + m[0, 1]) * mult
+ q.z = (m[0, 2] + m[2, 0]) * mult
case 2:
- q.w = (m[2][0] - m[0][2]) * mult
- q.x = (m[0][1] + m[1][0]) * mult
+ q.w = (m[0, 2] - m[2, 0]) * mult
+ q.x = (m[1, 0] + m[0, 1]) * mult
q.y = biggest_val
- q.z = (m[1][2] + m[2][1]) * mult
+ q.z = (m[2, 1] + m[1, 2]) * mult
case 3:
- q.w = (m[0][1] - m[1][0]) * mult
- q.x = (m[2][0] + m[0][2]) * mult
- q.y = (m[1][2] + m[2][1]) * mult
+ q.w = (m[1, 0] - m[0, 1]) * mult
+ q.x = (m[0, 2] + m[2, 0]) * mult
+ q.y = (m[2, 1] + m[1, 2]) * mult
q.z = biggest_val
}
return
}
quaternion_from_matrix3_f64 :: proc(m: Matrix3f64) -> (q: Quaternionf64) {
- four_x_squared_minus_1 := m[0][0] - m[1][1] - m[2][2]
- four_y_squared_minus_1 := m[1][1] - m[0][0] - m[2][2]
- four_z_squared_minus_1 := m[2][2] - m[0][0] - m[1][1]
- four_w_squared_minus_1 := m[0][0] + m[1][1] + m[2][2]
+ four_x_squared_minus_1 := m[0, 0] - m[1, 1] - m[2, 2]
+ four_y_squared_minus_1 := m[1, 1] - m[0, 0] - m[2, 2]
+ four_z_squared_minus_1 := m[2, 2] - m[0, 0] - m[1, 1]
+ four_w_squared_minus_1 := m[0, 0] + m[1, 1] + m[2, 2]
biggest_index := 0
four_biggest_squared_minus_1 := four_w_squared_minus_1
@@ -994,23 +995,23 @@ quaternion_from_matrix3_f64 :: proc(m: Matrix3f64) -> (q: Quaternionf64) {
switch biggest_index {
case 0:
q.w = biggest_val
- q.x = (m[1][2] - m[2][1]) * mult
- q.y = (m[2][0] - m[0][2]) * mult
- q.z = (m[0][1] - m[1][0]) * mult
+ q.x = (m[2, 1] - m[1, 2]) * mult
+ q.y = (m[0, 2] - m[2, 0]) * mult
+ q.z = (m[1, 0] - m[0, 1]) * mult
case 1:
- q.w = (m[1][2] - m[2][1]) * mult
+ q.w = (m[2, 1] - m[1, 2]) * mult
q.x = biggest_val
- q.y = (m[0][1] + m[1][0]) * mult
- q.z = (m[2][0] + m[0][2]) * mult
+ q.y = (m[1, 0] + m[0, 1]) * mult
+ q.z = (m[0, 2] + m[2, 0]) * mult
case 2:
- q.w = (m[2][0] - m[0][2]) * mult
- q.x = (m[0][1] + m[1][0]) * mult
+ q.w = (m[0, 2] - m[2, 0]) * mult
+ q.x = (m[1, 0] + m[0, 1]) * mult
q.y = biggest_val
- q.z = (m[1][2] + m[2][1]) * mult
+ q.z = (m[2, 1] + m[1, 2]) * mult
case 3:
- q.w = (m[0][1] - m[1][0]) * mult
- q.x = (m[2][0] + m[0][2]) * mult
- q.y = (m[1][2] + m[2][1]) * mult
+ q.w = (m[1, 0] - m[0, 1]) * mult
+ q.x = (m[0, 2] + m[2, 0]) * mult
+ q.y = (m[2, 1] + m[1, 2]) * mult
q.z = biggest_val
}
return
@@ -1093,30 +1094,30 @@ quaternion_between_two_vector3 :: proc{
matrix2_inverse_transpose_f16 :: proc(m: Matrix2f16) -> (c: Matrix2f16) {
- d := m[0][0]*m[1][1] - m[1][0]*m[0][1]
+ d := m[0, 0]*m[1, 1] - m[0, 1]*m[1, 0]
id := 1.0/d
- c[0][0] = +m[1][1] * id
- c[0][1] = -m[0][1] * id
- c[1][0] = -m[1][0] * id
- c[1][1] = +m[0][0] * id
+ c[0, 0] = +m[1, 1] * id
+ c[1, 0] = -m[1, 0] * id
+ c[0, 1] = -m[0, 1] * id
+ c[1, 1] = +m[0, 0] * id
return c
}
matrix2_inverse_transpose_f32 :: proc(m: Matrix2f32) -> (c: Matrix2f32) {
- d := m[0][0]*m[1][1] - m[1][0]*m[0][1]
+ d := m[0, 0]*m[1, 1] - m[0, 1]*m[1, 0]
id := 1.0/d
- c[0][0] = +m[1][1] * id
- c[0][1] = -m[0][1] * id
- c[1][0] = -m[1][0] * id
- c[1][1] = +m[0][0] * id
+ c[0, 0] = +m[1, 1] * id
+ c[1, 0] = -m[1, 0] * id
+ c[0, 1] = -m[0, 1] * id
+ c[1, 1] = +m[0, 0] * id
return c
}
matrix2_inverse_transpose_f64 :: proc(m: Matrix2f64) -> (c: Matrix2f64) {
- d := m[0][0]*m[1][1] - m[1][0]*m[0][1]
+ d := m[0, 0]*m[1, 1] - m[0, 1]*m[1, 0]
id := 1.0/d
- c[0][0] = +m[1][1] * id
- c[0][1] = -m[0][1] * id
- c[1][0] = -m[1][0] * id
- c[1][1] = +m[0][0] * id
+ c[0, 0] = +m[1, 1] * id
+ c[1, 0] = -m[1, 0] * id
+ c[0, 1] = -m[0, 1] * id
+ c[1, 1] = +m[0, 0] * id
return c
}
matrix2_inverse_transpose :: proc{
@@ -1127,13 +1128,13 @@ matrix2_inverse_transpose :: proc{
matrix2_determinant_f16 :: proc(m: Matrix2f16) -> f16 {
- return m[0][0]*m[1][1] - m[1][0]*m[0][1]
+ return m[0, 0]*m[1, 1] - m[0, 1]*m[1, 0]
}
matrix2_determinant_f32 :: proc(m: Matrix2f32) -> f32 {
- return m[0][0]*m[1][1] - m[1][0]*m[0][1]
+ return m[0, 0]*m[1, 1] - m[0, 1]*m[1, 0]
}
matrix2_determinant_f64 :: proc(m: Matrix2f64) -> f64 {
- return m[0][0]*m[1][1] - m[1][0]*m[0][1]
+ return m[0, 0]*m[1, 1] - m[0, 1]*m[1, 0]
}
matrix2_determinant :: proc{
matrix2_determinant_f16,
@@ -1143,30 +1144,30 @@ matrix2_determinant :: proc{
matrix2_inverse_f16 :: proc(m: Matrix2f16) -> (c: Matrix2f16) {
- d := m[0][0]*m[1][1] - m[1][0]*m[0][1]
+ d := m[0, 0]*m[1, 1] - m[0, 1]*m[1, 0]
id := 1.0/d
- c[0][0] = +m[1][1] * id
- c[1][0] = -m[0][1] * id
- c[0][1] = -m[1][0] * id
- c[1][1] = +m[0][0] * id
+ c[0, 0] = +m[1, 1] * id
+ c[0, 1] = -m[1, 0] * id
+ c[1, 0] = -m[0, 1] * id
+ c[1, 1] = +m[0, 0] * id
return c
}
matrix2_inverse_f32 :: proc(m: Matrix2f32) -> (c: Matrix2f32) {
- d := m[0][0]*m[1][1] - m[1][0]*m[0][1]
+ d := m[0, 0]*m[1, 1] - m[0, 1]*m[1, 0]
id := 1.0/d
- c[0][0] = +m[1][1] * id
- c[1][0] = -m[0][1] * id
- c[0][1] = -m[1][0] * id
- c[1][1] = +m[0][0] * id
+ c[0, 0] = +m[1, 1] * id
+ c[0, 1] = -m[1, 0] * id
+ c[1, 0] = -m[0, 1] * id
+ c[1, 1] = +m[0, 0] * id
return c
}
matrix2_inverse_f64 :: proc(m: Matrix2f64) -> (c: Matrix2f64) {
- d := m[0][0]*m[1][1] - m[1][0]*m[0][1]
+ d := m[0, 0]*m[1, 1] - m[0, 1]*m[1, 0]
id := 1.0/d
- c[0][0] = +m[1][1] * id
- c[1][0] = -m[0][1] * id
- c[0][1] = -m[1][0] * id
- c[1][1] = +m[0][0] * id
+ c[0, 0] = +m[1, 1] * id
+ c[0, 1] = -m[1, 0] * id
+ c[1, 0] = -m[0, 1] * id
+ c[1, 1] = +m[0, 0] * id
return c
}
matrix2_inverse :: proc{
@@ -1177,24 +1178,24 @@ matrix2_inverse :: proc{
matrix2_adjoint_f16 :: proc(m: Matrix2f16) -> (c: Matrix2f16) {
- c[0][0] = +m[1][1]
- c[0][1] = -m[1][0]
- c[1][0] = -m[0][1]
- c[1][1] = +m[0][0]
+ c[0, 0] = +m[1, 1]
+ c[1, 0] = -m[0, 1]
+ c[0, 1] = -m[1, 0]
+ c[1, 1] = +m[0, 0]
return c
}
matrix2_adjoint_f32 :: proc(m: Matrix2f32) -> (c: Matrix2f32) {
- c[0][0] = +m[1][1]
- c[0][1] = -m[1][0]
- c[1][0] = -m[0][1]
- c[1][1] = +m[0][0]
+ c[0, 0] = +m[1, 1]
+ c[1, 0] = -m[0, 1]
+ c[0, 1] = -m[1, 0]
+ c[1, 1] = +m[0, 0]
return c
}
matrix2_adjoint_f64 :: proc(m: Matrix2f64) -> (c: Matrix2f64) {
- c[0][0] = +m[1][1]
- c[0][1] = -m[1][0]
- c[1][0] = -m[0][1]
- c[1][1] = +m[0][0]
+ c[0, 0] = +m[1, 1]
+ c[1, 0] = -m[0, 1]
+ c[0, 1] = -m[1, 0]
+ c[1, 1] = +m[0, 0]
return c
}
matrix2_adjoint :: proc{
@@ -1215,17 +1216,17 @@ matrix3_from_quaternion_f16 :: proc(q: Quaternionf16) -> (m: Matrix3f16) {
qwy := q.w * q.y
qwz := q.w * q.z
- m[0][0] = 1 - 2 * (qyy + qzz)
- m[0][1] = 2 * (qxy + qwz)
- m[0][2] = 2 * (qxz - qwy)
+ m[0, 0] = 1 - 2 * (qyy + qzz)
+ m[1, 0] = 2 * (qxy + qwz)
+ m[2, 0] = 2 * (qxz - qwy)
- m[1][0] = 2 * (qxy - qwz)
- m[1][1] = 1 - 2 * (qxx + qzz)
- m[1][2] = 2 * (qyz + qwx)
+ m[0, 1] = 2 * (qxy - qwz)
+ m[1, 1] = 1 - 2 * (qxx + qzz)
+ m[2, 1] = 2 * (qyz + qwx)
- m[2][0] = 2 * (qxz + qwy)
- m[2][1] = 2 * (qyz - qwx)
- m[2][2] = 1 - 2 * (qxx + qyy)
+ m[0, 2] = 2 * (qxz + qwy)
+ m[1, 2] = 2 * (qyz - qwx)
+ m[2, 2] = 1 - 2 * (qxx + qyy)
return m
}
matrix3_from_quaternion_f32 :: proc(q: Quaternionf32) -> (m: Matrix3f32) {
@@ -1239,17 +1240,17 @@ matrix3_from_quaternion_f32 :: proc(q: Quaternionf32) -> (m: Matrix3f32) {
qwy := q.w * q.y
qwz := q.w * q.z
- m[0][0] = 1 - 2 * (qyy + qzz)
- m[0][1] = 2 * (qxy + qwz)
- m[0][2] = 2 * (qxz - qwy)
+ m[0, 0] = 1 - 2 * (qyy + qzz)
+ m[1, 0] = 2 * (qxy + qwz)
+ m[2, 0] = 2 * (qxz - qwy)
- m[1][0] = 2 * (qxy - qwz)
- m[1][1] = 1 - 2 * (qxx + qzz)
- m[1][2] = 2 * (qyz + qwx)
+ m[0, 1] = 2 * (qxy - qwz)
+ m[1, 1] = 1 - 2 * (qxx + qzz)
+ m[2, 1] = 2 * (qyz + qwx)
- m[2][0] = 2 * (qxz + qwy)
- m[2][1] = 2 * (qyz - qwx)
- m[2][2] = 1 - 2 * (qxx + qyy)
+ m[0, 2] = 2 * (qxz + qwy)
+ m[1, 2] = 2 * (qyz - qwx)
+ m[2, 2] = 1 - 2 * (qxx + qyy)
return m
}
matrix3_from_quaternion_f64 :: proc(q: Quaternionf64) -> (m: Matrix3f64) {
@@ -1263,17 +1264,17 @@ matrix3_from_quaternion_f64 :: proc(q: Quaternionf64) -> (m: Matrix3f64) {
qwy := q.w * q.y
qwz := q.w * q.z
- m[0][0] = 1 - 2 * (qyy + qzz)
- m[0][1] = 2 * (qxy + qwz)
- m[0][2] = 2 * (qxz - qwy)
+ m[0, 0] = 1 - 2 * (qyy + qzz)
+ m[1, 0] = 2 * (qxy + qwz)
+ m[2, 0] = 2 * (qxz - qwy)
- m[1][0] = 2 * (qxy - qwz)
- m[1][1] = 1 - 2 * (qxx + qzz)
- m[1][2] = 2 * (qyz + qwx)
+ m[0, 1] = 2 * (qxy - qwz)
+ m[1, 1] = 1 - 2 * (qxx + qzz)
+ m[2, 1] = 2 * (qyz + qwx)
- m[2][0] = 2 * (qxz + qwy)
- m[2][1] = 2 * (qyz - qwx)
- m[2][2] = 1 - 2 * (qxx + qyy)
+ m[0, 2] = 2 * (qxz + qwy)
+ m[1, 2] = 2 * (qyz - qwx)
+ m[2, 2] = 1 - 2 * (qxx + qyy)
return m
}
matrix3_from_quaternion :: proc{
@@ -1300,21 +1301,21 @@ matrix3_inverse :: proc{
matrix3_determinant_f16 :: proc(m: Matrix3f16) -> f16 {
- a := +m[0][0] * (m[1][1] * m[2][2] - m[2][1] * m[1][2])
- b := -m[1][0] * (m[0][1] * m[2][2] - m[2][1] * m[0][2])
- c := +m[2][0] * (m[0][1] * m[1][2] - m[1][1] * m[0][2])
+ a := +m[0, 0] * (m[1, 1] * m[2, 2] - m[1, 2] * m[2, 1])
+ b := -m[0, 1] * (m[1, 0] * m[2, 2] - m[1, 2] * m[2, 0])
+ c := +m[0, 2] * (m[1, 0] * m[2, 1] - m[1, 1] * m[2, 0])
return a + b + c
}
matrix3_determinant_f32 :: proc(m: Matrix3f32) -> f32 {
- a := +m[0][0] * (m[1][1] * m[2][2] - m[2][1] * m[1][2])
- b := -m[1][0] * (m[0][1] * m[2][2] - m[2][1] * m[0][2])
- c := +m[2][0] * (m[0][1] * m[1][2] - m[1][1] * m[0][2])
+ a := +m[0, 0] * (m[1, 1] * m[2, 2] - m[1, 2] * m[2, 1])
+ b := -m[0, 1] * (m[1, 0] * m[2, 2] - m[1, 2] * m[2, 0])
+ c := +m[0, 2] * (m[1, 0] * m[2, 1] - m[1, 1] * m[2, 0])
return a + b + c
}
matrix3_determinant_f64 :: proc(m: Matrix3f64) -> f64 {
- a := +m[0][0] * (m[1][1] * m[2][2] - m[2][1] * m[1][2])
- b := -m[1][0] * (m[0][1] * m[2][2] - m[2][1] * m[0][2])
- c := +m[2][0] * (m[0][1] * m[1][2] - m[1][1] * m[0][2])
+ a := +m[0, 0] * (m[1, 1] * m[2, 2] - m[1, 2] * m[2, 1])
+ b := -m[0, 1] * (m[1, 0] * m[2, 2] - m[1, 2] * m[2, 0])
+ c := +m[0, 2] * (m[1, 0] * m[2, 1] - m[1, 1] * m[2, 0])
return a + b + c
}
matrix3_determinant :: proc{
@@ -1325,39 +1326,39 @@ matrix3_determinant :: proc{
matrix3_adjoint_f16 :: proc(m: Matrix3f16) -> (adjoint: Matrix3f16) {
- adjoint[0][0] = +(m[1][1] * m[2][2] - m[1][2] * m[2][1])
- adjoint[1][0] = -(m[0][1] * m[2][2] - m[0][2] * m[2][1])
- adjoint[2][0] = +(m[0][1] * m[1][2] - m[0][2] * m[1][1])
- adjoint[0][1] = -(m[1][0] * m[2][2] - m[1][2] * m[2][0])
- adjoint[1][1] = +(m[0][0] * m[2][2] - m[0][2] * m[2][0])
- adjoint[2][1] = -(m[0][0] * m[1][2] - m[0][2] * m[1][0])
- adjoint[0][2] = +(m[1][0] * m[2][1] - m[1][1] * m[2][0])
- adjoint[1][2] = -(m[0][0] * m[2][1] - m[0][1] * m[2][0])
- adjoint[2][2] = +(m[0][0] * m[1][1] - m[0][1] * m[1][0])
+ adjoint[0, 0] = +(m[1, 1] * m[2, 2] - m[2, 1] * m[1, 2])
+ adjoint[0, 1] = -(m[1, 0] * m[2, 2] - m[2, 0] * m[1, 2])
+ adjoint[0, 2] = +(m[1, 0] * m[2, 1] - m[2, 0] * m[1, 1])
+ adjoint[1, 0] = -(m[0, 1] * m[2, 2] - m[2, 1] * m[0, 2])
+ adjoint[1, 1] = +(m[0, 0] * m[2, 2] - m[2, 0] * m[0, 2])
+ adjoint[1, 2] = -(m[0, 0] * m[2, 1] - m[2, 0] * m[0, 1])
+ adjoint[2, 0] = +(m[0, 1] * m[1, 2] - m[1, 1] * m[0, 2])
+ adjoint[2, 1] = -(m[0, 0] * m[1, 2] - m[1, 0] * m[0, 2])
+ adjoint[2, 2] = +(m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1])
return adjoint
}
matrix3_adjoint_f32 :: proc(m: Matrix3f32) -> (adjoint: Matrix3f32) {
- adjoint[0][0] = +(m[1][1] * m[2][2] - m[1][2] * m[2][1])
- adjoint[1][0] = -(m[0][1] * m[2][2] - m[0][2] * m[2][1])
- adjoint[2][0] = +(m[0][1] * m[1][2] - m[0][2] * m[1][1])
- adjoint[0][1] = -(m[1][0] * m[2][2] - m[1][2] * m[2][0])
- adjoint[1][1] = +(m[0][0] * m[2][2] - m[0][2] * m[2][0])
- adjoint[2][1] = -(m[0][0] * m[1][2] - m[0][2] * m[1][0])
- adjoint[0][2] = +(m[1][0] * m[2][1] - m[1][1] * m[2][0])
- adjoint[1][2] = -(m[0][0] * m[2][1] - m[0][1] * m[2][0])
- adjoint[2][2] = +(m[0][0] * m[1][1] - m[0][1] * m[1][0])
+ adjoint[0, 0] = +(m[1, 1] * m[2, 2] - m[2, 1] * m[1, 2])
+ adjoint[0, 1] = -(m[1, 0] * m[2, 2] - m[2, 0] * m[1, 2])
+ adjoint[0, 2] = +(m[1, 0] * m[2, 1] - m[2, 0] * m[1, 1])
+ adjoint[1, 0] = -(m[0, 1] * m[2, 2] - m[2, 1] * m[0, 2])
+ adjoint[1, 1] = +(m[0, 0] * m[2, 2] - m[2, 0] * m[0, 2])
+ adjoint[1, 2] = -(m[0, 0] * m[2, 1] - m[2, 0] * m[0, 1])
+ adjoint[2, 0] = +(m[0, 1] * m[1, 2] - m[1, 1] * m[0, 2])
+ adjoint[2, 1] = -(m[0, 0] * m[1, 2] - m[1, 0] * m[0, 2])
+ adjoint[2, 2] = +(m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1])
return adjoint
}
matrix3_adjoint_f64 :: proc(m: Matrix3f64) -> (adjoint: Matrix3f64) {
- adjoint[0][0] = +(m[1][1] * m[2][2] - m[1][2] * m[2][1])
- adjoint[1][0] = -(m[0][1] * m[2][2] - m[0][2] * m[2][1])
- adjoint[2][0] = +(m[0][1] * m[1][2] - m[0][2] * m[1][1])
- adjoint[0][1] = -(m[1][0] * m[2][2] - m[1][2] * m[2][0])
- adjoint[1][1] = +(m[0][0] * m[2][2] - m[0][2] * m[2][0])
- adjoint[2][1] = -(m[0][0] * m[1][2] - m[0][2] * m[1][0])
- adjoint[0][2] = +(m[1][0] * m[2][1] - m[1][1] * m[2][0])
- adjoint[1][2] = -(m[0][0] * m[2][1] - m[0][1] * m[2][0])
- adjoint[2][2] = +(m[0][0] * m[1][1] - m[0][1] * m[1][0])
+ adjoint[0, 0] = +(m[1, 1] * m[2, 2] - m[2, 1] * m[1, 2])
+ adjoint[0, 1] = -(m[1, 0] * m[2, 2] - m[2, 0] * m[1, 2])
+ adjoint[0, 2] = +(m[1, 0] * m[2, 1] - m[2, 0] * m[1, 1])
+ adjoint[1, 0] = -(m[0, 1] * m[2, 2] - m[2, 1] * m[0, 2])
+ adjoint[1, 1] = +(m[0, 0] * m[2, 2] - m[2, 0] * m[0, 2])
+ adjoint[1, 2] = -(m[0, 0] * m[2, 1] - m[2, 0] * m[0, 1])
+ adjoint[2, 0] = +(m[0, 1] * m[1, 2] - m[1, 1] * m[0, 2])
+ adjoint[2, 1] = -(m[0, 0] * m[1, 2] - m[1, 0] * m[0, 2])
+ adjoint[2, 2] = +(m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1])
return adjoint
}
matrix3_adjoint :: proc{
@@ -1369,37 +1370,13 @@ matrix3_adjoint :: proc{
matrix3_inverse_transpose_f16 :: proc(m: Matrix3f16) -> (inverse_transpose: Matrix3f16) {
- adjoint := matrix3_adjoint(m)
- determinant := matrix3_determinant(m)
- inv_determinant := 1.0 / determinant
- for i in 0..<3 {
- for j in 0..<3 {
- inverse_transpose[i][j] = adjoint[i][j] * inv_determinant
- }
- }
- return
+ return builtin.inverse_transpose(m)
}
matrix3_inverse_transpose_f32 :: proc(m: Matrix3f32) -> (inverse_transpose: Matrix3f32) {
- adjoint := matrix3_adjoint(m)
- determinant := matrix3_determinant(m)
- inv_determinant := 1.0 / determinant
- for i in 0..<3 {
- for j in 0..<3 {
- inverse_transpose[i][j] = adjoint[i][j] * inv_determinant
- }
- }
- return
+ return builtin.inverse_transpose(m)
}
matrix3_inverse_transpose_f64 :: proc(m: Matrix3f64) -> (inverse_transpose: Matrix3f64) {
- adjoint := matrix3_adjoint(m)
- determinant := matrix3_determinant(m)
- inv_determinant := 1.0 / determinant
- for i in 0..<3 {
- for j in 0..<3 {
- inverse_transpose[i][j] = adjoint[i][j] * inv_determinant
- }
- }
- return
+ return builtin.inverse_transpose(m)
}
matrix3_inverse_transpose :: proc{
matrix3_inverse_transpose_f16,
@@ -1409,21 +1386,21 @@ matrix3_inverse_transpose :: proc{
matrix3_scale_f16 :: proc(s: Vector3f16) -> (m: Matrix3f16) {
- m[0][0] = s[0]
- m[1][1] = s[1]
- m[2][2] = s[2]
+ m[0, 0] = s[0]
+ m[1, 1] = s[1]
+ m[2, 2] = s[2]
return m
}
matrix3_scale_f32 :: proc(s: Vector3f32) -> (m: Matrix3f32) {
- m[0][0] = s[0]
- m[1][1] = s[1]
- m[2][2] = s[2]
+ m[0, 0] = s[0]
+ m[1, 1] = s[1]
+ m[2, 2] = s[2]
return m
}
matrix3_scale_f64 :: proc(s: Vector3f64) -> (m: Matrix3f64) {
- m[0][0] = s[0]
- m[1][1] = s[1]
- m[2][2] = s[2]
+ m[0, 0] = s[0]
+ m[1, 1] = s[1]
+ m[2, 2] = s[2]
return m
}
matrix3_scale :: proc{
@@ -1440,17 +1417,17 @@ matrix3_rotate_f16 :: proc(angle_radians: f16, v: Vector3f16) -> (rot: Matrix3f1
a := normalize(v)
t := a * (1-c)
- rot[0][0] = c + t[0]*a[0]
- rot[0][1] = 0 + t[0]*a[1] + s*a[2]
- rot[0][2] = 0 + t[0]*a[2] - s*a[1]
+ rot[0, 0] = c + t[0]*a[0]
+ rot[1, 0] = 0 + t[0]*a[1] + s*a[2]
+ rot[2, 0] = 0 + t[0]*a[2] - s*a[1]
- rot[1][0] = 0 + t[1]*a[0] - s*a[2]
- rot[1][1] = c + t[1]*a[1]
- rot[1][2] = 0 + t[1]*a[2] + s*a[0]
+ rot[0, 1] = 0 + t[1]*a[0] - s*a[2]
+ rot[1, 1] = c + t[1]*a[1]
+ rot[2, 1] = 0 + t[1]*a[2] + s*a[0]
- rot[2][0] = 0 + t[2]*a[0] + s*a[1]
- rot[2][1] = 0 + t[2]*a[1] - s*a[0]
- rot[2][2] = c + t[2]*a[2]
+ rot[0, 2] = 0 + t[2]*a[0] + s*a[1]
+ rot[1, 2] = 0 + t[2]*a[1] - s*a[0]
+ rot[2, 2] = c + t[2]*a[2]
return rot
}
@@ -1461,17 +1438,17 @@ matrix3_rotate_f32 :: proc(angle_radians: f32, v: Vector3f32) -> (rot: Matrix3f3
a := normalize(v)
t := a * (1-c)
- rot[0][0] = c + t[0]*a[0]
- rot[0][1] = 0 + t[0]*a[1] + s*a[2]
- rot[0][2] = 0 + t[0]*a[2] - s*a[1]
+ rot[0, 0] = c + t[0]*a[0]
+ rot[1, 0] = 0 + t[0]*a[1] + s*a[2]
+ rot[2, 0] = 0 + t[0]*a[2] - s*a[1]
- rot[1][0] = 0 + t[1]*a[0] - s*a[2]
- rot[1][1] = c + t[1]*a[1]
- rot[1][2] = 0 + t[1]*a[2] + s*a[0]
+ rot[0, 1] = 0 + t[1]*a[0] - s*a[2]
+ rot[1, 1] = c + t[1]*a[1]
+ rot[2, 1] = 0 + t[1]*a[2] + s*a[0]
- rot[2][0] = 0 + t[2]*a[0] + s*a[1]
- rot[2][1] = 0 + t[2]*a[1] - s*a[0]
- rot[2][2] = c + t[2]*a[2]
+ rot[0, 2] = 0 + t[2]*a[0] + s*a[1]
+ rot[1, 2] = 0 + t[2]*a[1] - s*a[0]
+ rot[2, 2] = c + t[2]*a[2]
return rot
}
@@ -1482,17 +1459,17 @@ matrix3_rotate_f64 :: proc(angle_radians: f64, v: Vector3f64) -> (rot: Matrix3f6
a := normalize(v)
t := a * (1-c)
- rot[0][0] = c + t[0]*a[0]
- rot[0][1] = 0 + t[0]*a[1] + s*a[2]
- rot[0][2] = 0 + t[0]*a[2] - s*a[1]
+ rot[0, 0] = c + t[0]*a[0]
+ rot[1, 0] = 0 + t[0]*a[1] + s*a[2]
+ rot[2, 0] = 0 + t[0]*a[2] - s*a[1]
- rot[1][0] = 0 + t[1]*a[0] - s*a[2]
- rot[1][1] = c + t[1]*a[1]
- rot[1][2] = 0 + t[1]*a[2] + s*a[0]
+ rot[0, 1] = 0 + t[1]*a[0] - s*a[2]
+ rot[1, 1] = c + t[1]*a[1]
+ rot[2, 1] = 0 + t[1]*a[2] + s*a[0]
- rot[2][0] = 0 + t[2]*a[0] + s*a[1]
- rot[2][1] = 0 + t[2]*a[1] - s*a[0]
- rot[2][2] = c + t[2]*a[2]
+ rot[0, 2] = 0 + t[2]*a[0] + s*a[1]
+ rot[1, 2] = 0 + t[2]*a[1] - s*a[0]
+ rot[2, 2] = c + t[2]*a[2]
return rot
}
@@ -1508,9 +1485,9 @@ matrix3_look_at_f16 :: proc(eye, centre, up: Vector3f16) -> Matrix3f16 {
s := normalize(cross(f, up))
u := cross(s, f)
return Matrix3f16{
- {+s.x, +u.x, -f.x},
- {+s.y, +u.y, -f.y},
- {+s.z, +u.z, -f.z},
+ +s.x, +s.y, +s.z,
+ +u.x, +u.y, +u.z,
+ -f.x, -f.y, -f.z,
}
}
matrix3_look_at_f32 :: proc(eye, centre, up: Vector3f32) -> Matrix3f32 {
@@ -1518,9 +1495,9 @@ matrix3_look_at_f32 :: proc(eye, centre, up: Vector3f32) -> Matrix3f32 {
s := normalize(cross(f, up))
u := cross(s, f)
return Matrix3f32{
- {+s.x, +u.x, -f.x},
- {+s.y, +u.y, -f.y},
- {+s.z, +u.z, -f.z},
+ +s.x, +s.y, +s.z,
+ +u.x, +u.y, +u.z,
+ -f.x, -f.y, -f.z,
}
}
matrix3_look_at_f64 :: proc(eye, centre, up: Vector3f64) -> Matrix3f64 {
@@ -1528,9 +1505,9 @@ matrix3_look_at_f64 :: proc(eye, centre, up: Vector3f64) -> Matrix3f64 {
s := normalize(cross(f, up))
u := cross(s, f)
return Matrix3f64{
- {+s.x, +u.x, -f.x},
- {+s.y, +u.y, -f.y},
- {+s.z, +u.z, -f.z},
+ +s.x, +s.y, +s.z,
+ +u.x, +u.y, +u.z,
+ -f.x, -f.y, -f.z,
}
}
matrix3_look_at :: proc{
@@ -1551,19 +1528,19 @@ matrix4_from_quaternion_f16 :: proc(q: Quaternionf16) -> (m: Matrix4f16) {
qwy := q.w * q.y
qwz := q.w * q.z
- m[0][0] = 1 - 2 * (qyy + qzz)
- m[0][1] = 2 * (qxy + qwz)
- m[0][2] = 2 * (qxz - qwy)
+ m[0, 0] = 1 - 2 * (qyy + qzz)
+ m[1, 0] = 2 * (qxy + qwz)
+ m[2, 0] = 2 * (qxz - qwy)
- m[1][0] = 2 * (qxy - qwz)
- m[1][1] = 1 - 2 * (qxx + qzz)
- m[1][2] = 2 * (qyz + qwx)
+ m[0, 1] = 2 * (qxy - qwz)
+ m[1, 1] = 1 - 2 * (qxx + qzz)
+ m[2, 1] = 2 * (qyz + qwx)
- m[2][0] = 2 * (qxz + qwy)
- m[2][1] = 2 * (qyz - qwx)
- m[2][2] = 1 - 2 * (qxx + qyy)
+ m[0, 2] = 2 * (qxz + qwy)
+ m[1, 2] = 2 * (qyz - qwx)
+ m[2, 2] = 1 - 2 * (qxx + qyy)
- m[3][3] = 1
+ m[3, 3] = 1
return m
}
@@ -1578,19 +1555,19 @@ matrix4_from_quaternion_f32 :: proc(q: Quaternionf32) -> (m: Matrix4f32) {
qwy := q.w * q.y
qwz := q.w * q.z
- m[0][0] = 1 - 2 * (qyy + qzz)
- m[0][1] = 2 * (qxy + qwz)
- m[0][2] = 2 * (qxz - qwy)
+ m[0, 0] = 1 - 2 * (qyy + qzz)
+ m[1, 0] = 2 * (qxy + qwz)
+ m[2, 0] = 2 * (qxz - qwy)
- m[1][0] = 2 * (qxy - qwz)
- m[1][1] = 1 - 2 * (qxx + qzz)
- m[1][2] = 2 * (qyz + qwx)
+ m[0, 1] = 2 * (qxy - qwz)
+ m[1, 1] = 1 - 2 * (qxx + qzz)
+ m[2, 1] = 2 * (qyz + qwx)
- m[2][0] = 2 * (qxz + qwy)
- m[2][1] = 2 * (qyz - qwx)
- m[2][2] = 1 - 2 * (qxx + qyy)
+ m[0, 2] = 2 * (qxz + qwy)
+ m[1, 2] = 2 * (qyz - qwx)
+ m[2, 2] = 1 - 2 * (qxx + qyy)
- m[3][3] = 1
+ m[3, 3] = 1
return m
}
@@ -1605,19 +1582,19 @@ matrix4_from_quaternion_f64 :: proc(q: Quaternionf64) -> (m: Matrix4f64) {
qwy := q.w * q.y
qwz := q.w * q.z
- m[0][0] = 1 - 2 * (qyy + qzz)
- m[0][1] = 2 * (qxy + qwz)
- m[0][2] = 2 * (qxz - qwy)
+ m[0, 0] = 1 - 2 * (qyy + qzz)
+ m[1, 0] = 2 * (qxy + qwz)
+ m[2, 0] = 2 * (qxz - qwy)
- m[1][0] = 2 * (qxy - qwz)
- m[1][1] = 1 - 2 * (qxx + qzz)
- m[1][2] = 2 * (qyz + qwx)
+ m[0, 1] = 2 * (qxy - qwz)
+ m[1, 1] = 1 - 2 * (qxx + qzz)
+ m[2, 1] = 2 * (qyz + qwx)
- m[2][0] = 2 * (qxz + qwy)
- m[2][1] = 2 * (qyz - qwx)
- m[2][2] = 1 - 2 * (qxx + qyy)
+ m[0, 2] = 2 * (qxz + qwy)
+ m[1, 2] = 2 * (qyz - qwx)
+ m[2, 2] = 1 - 2 * (qxx + qyy)
- m[3][3] = 1
+ m[3, 3] = 1
return m
}
@@ -1992,10 +1969,10 @@ matrix4_look_at_f16 :: proc(eye, centre, up: Vector3f16, flip_z_axis := true) ->
fe := dot(f, eye)
return {
- {+s.x, +u.x, -f.x, 0},
- {+s.y, +u.y, -f.y, 0},
- {+s.z, +u.z, -f.z, 0},
- {-dot(s, eye), -dot(u, eye), +fe if flip_z_axis else -fe, 1},
+ +s.x, +s.y, +s.z, -dot(s, eye),
+ +u.x, +u.y, +u.z, -dot(u, eye),
+ -f.x, -f.y, -f.z, +fe if flip_z_axis else -fe,
+ 0, 0, 0, 1,
}
}
matrix4_look_at_f32 :: proc(eye, centre, up: Vector3f32, flip_z_axis := true) -> (m: Matrix4f32) {
@@ -2006,10 +1983,10 @@ matrix4_look_at_f32 :: proc(eye, centre, up: Vector3f32, flip_z_axis := true) ->
fe := dot(f, eye)
return {
- {+s.x, +u.x, -f.x, 0},
- {+s.y, +u.y, -f.y, 0},
- {+s.z, +u.z, -f.z, 0},
- {-dot(s, eye), -dot(u, eye), +fe if flip_z_axis else -fe, 1},
+ +s.x, +s.y, +s.z, -dot(s, eye),
+ +u.x, +u.y, +u.z, -dot(u, eye),
+ -f.x, -f.y, -f.z, +fe if flip_z_axis else -fe,
+ 0, 0, 0, 1,
}
}
matrix4_look_at_f64 :: proc(eye, centre, up: Vector3f64, flip_z_axis := true) -> (m: Matrix4f64) {
@@ -2020,10 +1997,10 @@ matrix4_look_at_f64 :: proc(eye, centre, up: Vector3f64, flip_z_axis := true) ->
fe := dot(f, eye)
return {
- {+s.x, +u.x, -f.x, 0},
- {+s.y, +u.y, -f.y, 0},
- {+s.z, +u.z, -f.z, 0},
- {-dot(s, eye), -dot(u, eye), +fe if flip_z_axis else -fe, 1},
+ +s.x, +s.y, +s.z, -dot(s, eye),
+ +u.x, +u.y, +u.z, -dot(u, eye),
+ -f.x, -f.y, -f.z, +fe if flip_z_axis else -fe,
+ 0, 0, 0, 1,
}
}
matrix4_look_at :: proc{
@@ -2041,10 +2018,10 @@ matrix4_look_at_from_fru_f16 :: proc(eye, f, r, u: Vector3f16, flip_z_axis := tr
fe := dot(f, eye)
return {
- {+s.x, +u.x, -f.x, 0},
- {+s.y, +u.y, -f.y, 0},
- {+s.z, +u.z, -f.z, 0},
- {-dot(s, eye), -dot(u, eye), +fe if flip_z_axis else -fe, 1},
+ +s.x, +s.y, +s.z, -dot(s, eye),
+ +u.x, +u.y, +u.z, -dot(u, eye),
+ -f.x, -f.y, -f.z, +fe if flip_z_axis else -fe,
+ 0, 0, 0, 1,
}
}
matrix4_look_at_from_fru_f32 :: proc(eye, f, r, u: Vector3f32, flip_z_axis := true) -> (m: Matrix4f32) {
@@ -2055,10 +2032,10 @@ matrix4_look_at_from_fru_f32 :: proc(eye, f, r, u: Vector3f32, flip_z_axis := tr
fe := dot(f, eye)
return {
- {+s.x, +u.x, -f.x, 0},
- {+s.y, +u.y, -f.y, 0},
- {+s.z, +u.z, -f.z, 0},
- {-dot(s, eye), -dot(u, eye), +fe if flip_z_axis else -fe, 1},
+ +s.x, +s.y, +s.z, -dot(s, eye),
+ +u.x, +u.y, +u.z, -dot(u, eye),
+ -f.x, -f.y, -f.z, +fe if flip_z_axis else -fe,
+ 0, 0, 0, 1,
}
}
matrix4_look_at_from_fru_f64 :: proc(eye, f, r, u: Vector3f64, flip_z_axis := true) -> (m: Matrix4f64) {
@@ -2069,10 +2046,10 @@ matrix4_look_at_from_fru_f64 :: proc(eye, f, r, u: Vector3f64, flip_z_axis := tr
fe := dot(f, eye)
return {
- {+s.x, +u.x, -f.x, 0},
- {+s.y, +u.y, -f.y, 0},
- {+s.z, +u.z, -f.z, 0},
- {-dot(s, eye), -dot(u, eye), +fe if flip_z_axis else -fe, 1},
+ +s.x, +s.y, +s.z, -dot(s, eye),
+ +u.x, +u.y, +u.z, -dot(u, eye),
+ -f.x, -f.y, -f.z, +fe if flip_z_axis else -fe,
+ 0, 0, 0, 1,
}
}
matrix4_look_at_from_fru :: proc{
@@ -2084,11 +2061,11 @@ matrix4_look_at_from_fru :: proc{
matrix4_perspective_f16 :: proc(fovy, aspect, near, far: f16, flip_z_axis := true) -> (m: Matrix4f16) {
tan_half_fovy := math.tan(0.5 * fovy)
- m[0][0] = 1 / (aspect*tan_half_fovy)
- m[1][1] = 1 / (tan_half_fovy)
- m[2][2] = +(far + near) / (far - near)
- m[2][3] = +1
- m[3][2] = -2*far*near / (far - near)
+ m[0, 0] = 1 / (aspect*tan_half_fovy)
+ m[1, 1] = 1 / (tan_half_fovy)
+ m[2, 2] = +(far + near) / (far - near)
+ m[3, 2] = +1
+ m[2, 3] = -2*far*near / (far - near)
if flip_z_axis {
m[2] = -m[2]
@@ -2098,11 +2075,11 @@ matrix4_perspective_f16 :: proc(fovy, aspect, near, far: f16, flip_z_axis := tru
}
matrix4_perspective_f32 :: proc(fovy, aspect, near, far: f32, flip_z_axis := true) -> (m: Matrix4f32) {
tan_half_fovy := math.tan(0.5 * fovy)
- m[0][0] = 1 / (aspect*tan_half_fovy)
- m[1][1] = 1 / (tan_half_fovy)
- m[2][2] = +(far + near) / (far - near)
- m[2][3] = +1
- m[3][2] = -2*far*near / (far - near)
+ m[0, 0] = 1 / (aspect*tan_half_fovy)
+ m[1, 1] = 1 / (tan_half_fovy)
+ m[2, 2] = +(far + near) / (far - near)
+ m[3, 2] = +1
+ m[2, 3] = -2*far*near / (far - near)
if flip_z_axis {
m[2] = -m[2]
@@ -2112,11 +2089,11 @@ matrix4_perspective_f32 :: proc(fovy, aspect, near, far: f32, flip_z_axis := tru
}
matrix4_perspective_f64 :: proc(fovy, aspect, near, far: f64, flip_z_axis := true) -> (m: Matrix4f64) {
tan_half_fovy := math.tan(0.5 * fovy)
- m[0][0] = 1 / (aspect*tan_half_fovy)
- m[1][1] = 1 / (tan_half_fovy)
- m[2][2] = +(far + near) / (far - near)
- m[2][3] = +1
- m[3][2] = -2*far*near / (far - near)
+ m[0, 0] = 1 / (aspect*tan_half_fovy)
+ m[1, 1] = 1 / (tan_half_fovy)
+ m[2, 2] = +(far + near) / (far - near)
+ m[3, 2] = +1
+ m[2, 3] = -2*far*near / (far - near)
if flip_z_axis {
m[2] = -m[2]
@@ -2133,13 +2110,13 @@ matrix4_perspective :: proc{
matrix_ortho3d_f16 :: proc(left, right, bottom, top, near, far: f16, flip_z_axis := true) -> (m: Matrix4f16) {
- m[0][0] = +2 / (right - left)
- m[1][1] = +2 / (top - bottom)
- m[2][2] = +2 / (far - near)
- m[3][0] = -(right + left) / (right - left)
- m[3][1] = -(top + bottom) / (top - bottom)
- m[3][2] = -(far + near) / (far- near)
- m[3][3] = 1
+ m[0, 0] = +2 / (right - left)
+ m[1, 1] = +2 / (top - bottom)
+ m[2, 2] = +2 / (far - near)
+ m[0, 3] = -(right + left) / (right - left)
+ m[1, 3] = -(top + bottom) / (top - bottom)
+ m[2, 3] = -(far + near) / (far- near)
+ m[3, 3] = 1
if flip_z_axis {
m[2] = -m[2]
@@ -2148,13 +2125,13 @@ matrix_ortho3d_f16 :: proc(left, right, bottom, top, near, far: f16, flip_z_axis
return
}
matrix_ortho3d_f32 :: proc(left, right, bottom, top, near, far: f32, flip_z_axis := true) -> (m: Matrix4f32) {
- m[0][0] = +2 / (right - left)
- m[1][1] = +2 / (top - bottom)
- m[2][2] = +2 / (far - near)
- m[3][0] = -(right + left) / (right - left)
- m[3][1] = -(top + bottom) / (top - bottom)
- m[3][2] = -(far + near) / (far- near)
- m[3][3] = 1
+ m[0, 0] = +2 / (right - left)
+ m[1, 1] = +2 / (top - bottom)
+ m[2, 2] = +2 / (far - near)
+ m[0, 3] = -(right + left) / (right - left)
+ m[1, 3] = -(top + bottom) / (top - bottom)
+ m[2, 3] = -(far + near) / (far- near)
+ m[3, 3] = 1
if flip_z_axis {
m[2] = -m[2]
@@ -2163,13 +2140,13 @@ matrix_ortho3d_f32 :: proc(left, right, bottom, top, near, far: f32, flip_z_axis
return
}
matrix_ortho3d_f64 :: proc(left, right, bottom, top, near, far: f64, flip_z_axis := true) -> (m: Matrix4f64) {
- m[0][0] = +2 / (right - left)
- m[1][1] = +2 / (top - bottom)
- m[2][2] = +2 / (far - near)
- m[3][0] = -(right + left) / (right - left)
- m[3][1] = -(top + bottom) / (top - bottom)
- m[3][2] = -(far + near) / (far- near)
- m[3][3] = 1
+ m[0, 0] = +2 / (right - left)
+ m[1, 1] = +2 / (top - bottom)
+ m[2, 2] = +2 / (far - near)
+ m[0, 3] = -(right + left) / (right - left)
+ m[1, 3] = -(top + bottom) / (top - bottom)
+ m[2, 3] = -(far + near) / (far- near)
+ m[3, 3] = 1
if flip_z_axis {
m[2] = -m[2]
@@ -2187,11 +2164,11 @@ matrix_ortho3d :: proc{
matrix4_infinite_perspective_f16 :: proc(fovy, aspect, near: f16, flip_z_axis := true) -> (m: Matrix4f16) {
tan_half_fovy := math.tan(0.5 * fovy)
- m[0][0] = 1 / (aspect*tan_half_fovy)
- m[1][1] = 1 / (tan_half_fovy)
- m[2][2] = +1
- m[2][3] = +1
- m[3][2] = -2*near
+ m[0, 0] = 1 / (aspect*tan_half_fovy)
+ m[1, 1] = 1 / (tan_half_fovy)
+ m[2, 2] = +1
+ m[3, 2] = +1
+ m[2, 3] = -2*near
if flip_z_axis {
m[2] = -m[2]
@@ -2201,11 +2178,11 @@ matrix4_infinite_perspective_f16 :: proc(fovy, aspect, near: f16, flip_z_axis :=
}
matrix4_infinite_perspective_f32 :: proc(fovy, aspect, near: f32, flip_z_axis := true) -> (m: Matrix4f32) {
tan_half_fovy := math.tan(0.5 * fovy)
- m[0][0] = 1 / (aspect*tan_half_fovy)
- m[1][1] = 1 / (tan_half_fovy)
- m[2][2] = +1
- m[2][3] = +1
- m[3][2] = -2*near
+ m[0, 0] = 1 / (aspect*tan_half_fovy)
+ m[1, 1] = 1 / (tan_half_fovy)
+ m[2, 2] = +1
+ m[3, 2] = +1
+ m[2, 3] = -2*near
if flip_z_axis {
m[2] = -m[2]
@@ -2215,11 +2192,11 @@ matrix4_infinite_perspective_f32 :: proc(fovy, aspect, near: f32, flip_z_axis :=
}
matrix4_infinite_perspective_f64 :: proc(fovy, aspect, near: f64, flip_z_axis := true) -> (m: Matrix4f64) {
tan_half_fovy := math.tan(0.5 * fovy)
- m[0][0] = 1 / (aspect*tan_half_fovy)
- m[1][1] = 1 / (tan_half_fovy)
- m[2][2] = +1
- m[2][3] = +1
- m[3][2] = -2*near
+ m[0, 0] = 1 / (aspect*tan_half_fovy)
+ m[1, 1] = 1 / (tan_half_fovy)
+ m[2, 2] = +1
+ m[3, 2] = +1
+ m[2, 3] = -2*near
if flip_z_axis {
m[2] = -m[2]
@@ -2236,18 +2213,18 @@ matrix4_infinite_perspective :: proc{
matrix2_from_scalar_f16 :: proc(f: f16) -> (m: Matrix2f16) {
- m[0][0], m[0][1] = f, 0
- m[1][0], m[1][1] = 0, f
+ m[0, 0], m[1, 0] = f, 0
+ m[0, 1], m[1, 1] = 0, f
return
}
matrix2_from_scalar_f32 :: proc(f: f32) -> (m: Matrix2f32) {
- m[0][0], m[0][1] = f, 0
- m[1][0], m[1][1] = 0, f
+ m[0, 0], m[1, 0] = f, 0
+ m[0, 1], m[1, 1] = 0, f
return
}
matrix2_from_scalar_f64 :: proc(f: f64) -> (m: Matrix2f64) {
- m[0][0], m[0][1] = f, 0
- m[1][0], m[1][1] = 0, f
+ m[0, 0], m[1, 0] = f, 0
+ m[0, 1], m[1, 1] = 0, f
return
}
matrix2_from_scalar :: proc{
@@ -2258,21 +2235,21 @@ matrix2_from_scalar :: proc{
matrix3_from_scalar_f16 :: proc(f: f16) -> (m: Matrix3f16) {
- m[0][0], m[0][1], m[0][2] = f, 0, 0
- m[1][0], m[1][1], m[1][2] = 0, f, 0
- m[2][0], m[2][1], m[2][2] = 0, 0, f
+ m[0, 0], m[1, 0], m[2, 0] = f, 0, 0
+ m[0, 1], m[1, 1], m[2, 1] = 0, f, 0
+ m[0, 2], m[1, 2], m[2, 2] = 0, 0, f
return
}
matrix3_from_scalar_f32 :: proc(f: f32) -> (m: Matrix3f32) {
- m[0][0], m[0][1], m[0][2] = f, 0, 0
- m[1][0], m[1][1], m[1][2] = 0, f, 0
- m[2][0], m[2][1], m[2][2] = 0, 0, f
+ m[0, 0], m[1, 0], m[2, 0] = f, 0, 0
+ m[0, 1], m[1, 1], m[2, 1] = 0, f, 0
+ m[0, 2], m[1, 2], m[2, 2] = 0, 0, f
return
}
matrix3_from_scalar_f64 :: proc(f: f64) -> (m: Matrix3f64) {
- m[0][0], m[0][1], m[0][2] = f, 0, 0
- m[1][0], m[1][1], m[1][2] = 0, f, 0
- m[2][0], m[2][1], m[2][2] = 0, 0, f
+ m[0, 0], m[1, 0], m[2, 0] = f, 0, 0
+ m[0, 1], m[1, 1], m[2, 1] = 0, f, 0
+ m[0, 2], m[1, 2], m[2, 2] = 0, 0, f
return
}
matrix3_from_scalar :: proc{
@@ -2283,24 +2260,24 @@ matrix3_from_scalar :: proc{
matrix4_from_scalar_f16 :: proc(f: f16) -> (m: Matrix4f16) {
- m[0][0], m[0][1], m[0][2], m[0][3] = f, 0, 0, 0
- m[1][0], m[1][1], m[1][2], m[1][3] = 0, f, 0, 0
- m[2][0], m[2][1], m[2][2], m[2][3] = 0, 0, f, 0
- m[3][0], m[3][1], m[3][2], m[3][3] = 0, 0, 0, f
+ m[0, 0], m[1, 0], m[2, 0], m[3, 0] = f, 0, 0, 0
+ m[0, 1], m[1, 1], m[2, 1], m[3, 1] = 0, f, 0, 0
+ m[0, 2], m[1, 2], m[2, 2], m[3, 2] = 0, 0, f, 0
+ m[0, 3], m[1, 3], m[2, 3], m[3, 3] = 0, 0, 0, f
return
}
matrix4_from_scalar_f32 :: proc(f: f32) -> (m: Matrix4f32) {
- m[0][0], m[0][1], m[0][2], m[0][3] = f, 0, 0, 0
- m[1][0], m[1][1], m[1][2], m[1][3] = 0, f, 0, 0
- m[2][0], m[2][1], m[2][2], m[2][3] = 0, 0, f, 0
- m[3][0], m[3][1], m[3][2], m[3][3] = 0, 0, 0, f
+ m[0, 0], m[1, 0], m[2, 0], m[3, 0] = f, 0, 0, 0
+ m[0, 1], m[1, 1], m[2, 1], m[3, 1] = 0, f, 0, 0
+ m[0, 2], m[1, 2], m[2, 2], m[3, 2] = 0, 0, f, 0
+ m[0, 3], m[1, 3], m[2, 3], m[3, 3] = 0, 0, 0, f
return
}
matrix4_from_scalar_f64 :: proc(f: f64) -> (m: Matrix4f64) {
- m[0][0], m[0][1], m[0][2], m[0][3] = f, 0, 0, 0
- m[1][0], m[1][1], m[1][2], m[1][3] = 0, f, 0, 0
- m[2][0], m[2][1], m[2][2], m[2][3] = 0, 0, f, 0
- m[3][0], m[3][1], m[3][2], m[3][3] = 0, 0, 0, f
+ m[0, 0], m[1, 0], m[2, 0], m[3, 0] = f, 0, 0, 0
+ m[0, 1], m[1, 1], m[2, 1], m[3, 1] = 0, f, 0, 0
+ m[0, 2], m[1, 2], m[2, 2], m[3, 2] = 0, 0, f, 0
+ m[0, 3], m[1, 3], m[2, 3], m[3, 3] = 0, 0, 0, f
return
}
matrix4_from_scalar :: proc{
@@ -2311,18 +2288,18 @@ matrix4_from_scalar :: proc{
matrix2_from_matrix3_f16 :: proc(m: Matrix3f16) -> (r: Matrix2f16) {
- r[0][0], r[0][1] = m[0][0], m[0][1]
- r[1][0], r[1][1] = m[1][0], m[1][1]
+ r[0, 0], r[1, 0] = m[0, 0], m[1, 0]
+ r[0, 1], r[1, 1] = m[0, 1], m[1, 1]
return
}
matrix2_from_matrix3_f32 :: proc(m: Matrix3f32) -> (r: Matrix2f32) {
- r[0][0], r[0][1] = m[0][0], m[0][1]
- r[1][0], r[1][1] = m[1][0], m[1][1]
+ r[0, 0], r[1, 0] = m[0, 0], m[1, 0]
+ r[0, 1], r[1, 1] = m[0, 1], m[1, 1]
return
}
matrix2_from_matrix3_f64 :: proc(m: Matrix3f64) -> (r: Matrix2f64) {
- r[0][0], r[0][1] = m[0][0], m[0][1]
- r[1][0], r[1][1] = m[1][0], m[1][1]
+ r[0, 0], r[1, 0] = m[0, 0], m[1, 0]
+ r[0, 1], r[1, 1] = m[0, 1], m[1, 1]
return
}
matrix2_from_matrix3 :: proc{
@@ -2333,18 +2310,18 @@ matrix2_from_matrix3 :: proc{
matrix2_from_matrix4_f16 :: proc(m: Matrix4f16) -> (r: Matrix2f16) {
- r[0][0], r[0][1] = m[0][0], m[0][1]
- r[1][0], r[1][1] = m[1][0], m[1][1]
+ r[0, 0], r[1, 0] = m[0, 0], m[1, 0]
+ r[0, 1], r[1, 1] = m[0, 1], m[1, 1]
return
}
matrix2_from_matrix4_f32 :: proc(m: Matrix4f32) -> (r: Matrix2f32) {
- r[0][0], r[0][1] = m[0][0], m[0][1]
- r[1][0], r[1][1] = m[1][0], m[1][1]
+ r[0, 0], r[1, 0] = m[0, 0], m[1, 0]
+ r[0, 1], r[1, 1] = m[0, 1], m[1, 1]
return
}
matrix2_from_matrix4_f64 :: proc(m: Matrix4f64) -> (r: Matrix2f64) {
- r[0][0], r[0][1] = m[0][0], m[0][1]
- r[1][0], r[1][1] = m[1][0], m[1][1]
+ r[0, 0], r[1, 0] = m[0, 0], m[1, 0]
+ r[0, 1], r[1, 1] = m[0, 1], m[1, 1]
return
}
matrix2_from_matrix4 :: proc{
@@ -2355,21 +2332,21 @@ matrix2_from_matrix4 :: proc{
matrix3_from_matrix2_f16 :: proc(m: Matrix2f16) -> (r: Matrix3f16) {
- r[0][0], r[0][1], r[0][2] = m[0][0], m[0][1], 0
- r[1][0], r[1][1], r[1][2] = m[1][0], m[1][1], 0
- r[2][0], r[2][1], r[2][2] = 0, 0, 1
+ r[0, 0], r[1, 0], r[2, 0] = m[0, 0], m[1, 0], 0
+ r[0, 1], r[1, 1], r[2, 1] = m[0, 1], m[1, 1], 0
+ r[0, 2], r[1, 2], r[2, 2] = 0, 0, 1
return
}
matrix3_from_matrix2_f32 :: proc(m: Matrix2f32) -> (r: Matrix3f32) {
- r[0][0], r[0][1], r[0][2] = m[0][0], m[0][1], 0
- r[1][0], r[1][1], r[1][2] = m[1][0], m[1][1], 0
- r[2][0], r[2][1], r[2][2] = 0, 0, 1
+ r[0, 0], r[1, 0], r[2, 0] = m[0, 0], m[1, 0], 0
+ r[0, 1], r[1, 1], r[2, 1] = m[0, 1], m[1, 1], 0
+ r[0, 2], r[1, 2], r[2, 2] = 0, 0, 1
return
}
matrix3_from_matrix2_f64 :: proc(m: Matrix2f64) -> (r: Matrix3f64) {
- r[0][0], r[0][1], r[0][2] = m[0][0], m[0][1], 0
- r[1][0], r[1][1], r[1][2] = m[1][0], m[1][1], 0
- r[2][0], r[2][1], r[2][2] = 0, 0, 1
+ r[0, 0], r[1, 0], r[2, 0] = m[0, 0], m[1, 0], 0
+ r[0, 1], r[1, 1], r[2, 1] = m[0, 1], m[1, 1], 0
+ r[0, 2], r[1, 2], r[2, 2] = 0, 0, 1
return
}
matrix3_from_matrix2 :: proc{
@@ -2380,21 +2357,21 @@ matrix3_from_matrix2 :: proc{
matrix3_from_matrix4_f16 :: proc(m: Matrix4f16) -> (r: Matrix3f16) {
- r[0][0], r[0][1], r[0][2] = m[0][0], m[0][1], m[0][2]
- r[1][0], r[1][1], r[1][2] = m[1][0], m[1][1], m[1][2]
- r[2][0], r[2][1], r[2][2] = m[2][0], m[2][1], m[2][2]
+ r[0, 0], r[1, 0], r[2, 0] = m[0, 0], m[1, 0], m[2, 0]
+ r[0, 1], r[1, 1], r[2, 1] = m[0, 1], m[1, 1], m[2, 1]
+ r[0, 2], r[1, 2], r[2, 2] = m[0, 2], m[1, 2], m[2, 2]
return
}
matrix3_from_matrix4_f32 :: proc(m: Matrix4f32) -> (r: Matrix3f32) {
- r[0][0], r[0][1], r[0][2] = m[0][0], m[0][1], m[0][2]
- r[1][0], r[1][1], r[1][2] = m[1][0], m[1][1], m[1][2]
- r[2][0], r[2][1], r[2][2] = m[2][0], m[2][1], m[2][2]
+ r[0, 0], r[1, 0], r[2, 0] = m[0, 0], m[1, 0], m[2, 0]
+ r[0, 1], r[1, 1], r[2, 1] = m[0, 1], m[1, 1], m[2, 1]
+ r[0, 2], r[1, 2], r[2, 2] = m[0, 2], m[1, 2], m[2, 2]
return
}
matrix3_from_matrix4_f64 :: proc(m: Matrix4f64) -> (r: Matrix3f64) {
- r[0][0], r[0][1], r[0][2] = m[0][0], m[0][1], m[0][2]
- r[1][0], r[1][1], r[1][2] = m[1][0], m[1][1], m[1][2]
- r[2][0], r[2][1], r[2][2] = m[2][0], m[2][1], m[2][2]
+ r[0, 0], r[1, 0], r[2, 0] = m[0, 0], m[1, 0], m[2, 0]
+ r[0, 1], r[1, 1], r[2, 1] = m[0, 1], m[1, 1], m[2, 1]
+ r[0, 2], r[1, 2], r[2, 2] = m[0, 2], m[1, 2], m[2, 2]
return
}
matrix3_from_matrix4 :: proc{
@@ -2405,24 +2382,24 @@ matrix3_from_matrix4 :: proc{
matrix4_from_matrix2_f16 :: proc(m: Matrix2f16) -> (r: Matrix4f16) {
- r[0][0], r[0][1], r[0][2], r[0][3] = m[0][0], m[0][1], 0, 0
- r[1][0], r[1][1], r[1][2], r[1][3] = m[1][0], m[1][1], 0, 0
- r[2][0], r[2][1], r[2][2], r[2][3] = 0, 0, 1, 0
- r[3][0], r[3][1], r[3][2], r[3][3] = 0, 0, 0, 1
+ r[0, 0], r[1, 0], r[2, 0], r[3, 0] = m[0, 0], m[1, 0], 0, 0
+ r[0, 1], r[1, 1], r[2, 1], r[3, 1] = m[0, 1], m[1, 1], 0, 0
+ r[0, 2], r[1, 2], r[2, 2], r[3, 2] = 0, 0, 1, 0
+ r[0, 3], r[1, 3], r[2, 3], r[3, 3] = 0, 0, 0, 1
return
}
matrix4_from_matrix2_f32 :: proc(m: Matrix2f32) -> (r: Matrix4f32) {
- r[0][0], r[0][1], r[0][2], r[0][3] = m[0][0], m[0][1], 0, 0
- r[1][0], r[1][1], r[1][2], r[1][3] = m[1][0], m[1][1], 0, 0
- r[2][0], r[2][1], r[2][2], r[2][3] = 0, 0, 1, 0
- r[3][0], r[3][1], r[3][2], r[3][3] = 0, 0, 0, 1
+ r[0, 0], r[1, 0], r[2, 0], r[3, 0] = m[0, 0], m[1, 0], 0, 0
+ r[0, 1], r[1, 1], r[2, 1], r[3, 1] = m[0, 1], m[1, 1], 0, 0
+ r[0, 2], r[1, 2], r[2, 2], r[3, 2] = 0, 0, 1, 0
+ r[0, 3], r[1, 3], r[2, 3], r[3, 3] = 0, 0, 0, 1
return
}
matrix4_from_matrix2_f64 :: proc(m: Matrix2f64) -> (r: Matrix4f64) {
- r[0][0], r[0][1], r[0][2], r[0][3] = m[0][0], m[0][1], 0, 0
- r[1][0], r[1][1], r[1][2], r[1][3] = m[1][0], m[1][1], 0, 0
- r[2][0], r[2][1], r[2][2], r[2][3] = 0, 0, 1, 0
- r[3][0], r[3][1], r[3][2], r[3][3] = 0, 0, 0, 1
+ r[0, 0], r[1, 0], r[2, 0], r[3, 0] = m[0, 0], m[1, 0], 0, 0
+ r[0, 1], r[1, 1], r[2, 1], r[3, 1] = m[0, 1], m[1, 1], 0, 0
+ r[0, 2], r[1, 2], r[2, 2], r[3, 2] = 0, 0, 1, 0
+ r[0, 3], r[1, 3], r[2, 3], r[3, 3] = 0, 0, 0, 1
return
}
matrix4_from_matrix2 :: proc{
@@ -2433,24 +2410,24 @@ matrix4_from_matrix2 :: proc{
matrix4_from_matrix3_f16 :: proc(m: Matrix3f16) -> (r: Matrix4f16) {
- r[0][0], r[0][1], r[0][2], r[0][3] = m[0][0], m[0][1], m[0][2], 0
- r[1][0], r[1][1], r[1][2], r[1][3] = m[1][0], m[1][1], m[1][2], 0
- r[2][0], r[2][1], r[2][2], r[2][3] = m[2][0], m[2][1], m[2][2], 0
- r[3][0], r[3][1], r[3][2], r[3][3] = 0, 0, 0, 1
+ r[0, 0], r[1, 0], r[2, 0], r[3, 0] = m[0, 0], m[1, 0], m[2, 0], 0
+ r[0, 1], r[1, 1], r[2, 1], r[3, 1] = m[0, 1], m[1, 1], m[2, 1], 0
+ r[0, 2], r[1, 2], r[2, 2], r[3, 2] = m[0, 2], m[1, 2], m[2, 2], 0
+ r[0, 3], r[1, 3], r[2, 3], r[3, 3] = 0, 0, 0, 1
return
}
matrix4_from_matrix3_f32 :: proc(m: Matrix3f32) -> (r: Matrix4f32) {
- r[0][0], r[0][1], r[0][2], r[0][3] = m[0][0], m[0][1], m[0][2], 0
- r[1][0], r[1][1], r[1][2], r[1][3] = m[1][0], m[1][1], m[1][2], 0
- r[2][0], r[2][1], r[2][2], r[2][3] = m[2][0], m[2][1], m[2][2], 0
- r[3][0], r[3][1], r[3][2], r[3][3] = 0, 0, 0, 1
+ r[0, 0], r[1, 0], r[2, 0], r[3, 0] = m[0, 0], m[1, 0], m[2, 0], 0
+ r[0, 1], r[1, 1], r[2, 1], r[3, 1] = m[0, 1], m[1, 1], m[2, 1], 0
+ r[0, 2], r[1, 2], r[2, 2], r[3, 2] = m[0, 2], m[1, 2], m[2, 2], 0
+ r[0, 3], r[1, 3], r[2, 3], r[3, 3] = 0, 0, 0, 1
return
}
matrix4_from_matrix3_f64 :: proc(m: Matrix3f64) -> (r: Matrix4f64) {
- r[0][0], r[0][1], r[0][2], r[0][3] = m[0][0], m[0][1], m[0][2], 0
- r[1][0], r[1][1], r[1][2], r[1][3] = m[1][0], m[1][1], m[1][2], 0
- r[2][0], r[2][1], r[2][2], r[2][3] = m[2][0], m[2][1], m[2][2], 0
- r[3][0], r[3][1], r[3][2], r[3][3] = 0, 0, 0, 1
+ r[0, 0], r[1, 0], r[2, 0], r[3, 0] = m[0, 0], m[1, 0], m[2, 0], 0
+ r[0, 1], r[1, 1], r[2, 1], r[3, 1] = m[0, 1], m[1, 1], m[2, 1], 0
+ r[0, 2], r[1, 2], r[2, 2], r[3, 2] = m[0, 2], m[1, 2], m[2, 2], 0
+ r[0, 3], r[1, 3], r[2, 3], r[3, 3] = 0, 0, 0, 1
return
}
matrix4_from_matrix3 :: proc{
diff --git a/core/math/linalg/specific_euler_angles_f16.odin b/core/math/linalg/specific_euler_angles_f16.odin
index d0fb1beb3..9e21c7f97 100644
--- a/core/math/linalg/specific_euler_angles_f16.odin
+++ b/core/math/linalg/specific_euler_angles_f16.odin
@@ -212,29 +212,29 @@ euler_angles_zxy_from_quaternion_f16 :: proc(q: Quaternionf16) -> (t1, t2, t3: f
matrix3_from_euler_angle_x_f16 :: proc(angle_x: f16) -> (m: Matrix3f16) {
cos_x, sin_x := math.cos(angle_x), math.sin(angle_x)
- m[0][0] = 1
- m[1][1] = +cos_x
- m[2][1] = +sin_x
- m[1][2] = -sin_x
- m[2][2] = +cos_x
+ m[0, 0] = 1
+ m[1, 1] = +cos_x
+ m[1, 2] = +sin_x
+ m[2, 1] = -sin_x
+ m[2, 2] = +cos_x
return
}
matrix3_from_euler_angle_y_f16 :: proc(angle_y: f16) -> (m: Matrix3f16) {
cos_y, sin_y := math.cos(angle_y), math.sin(angle_y)
- m[0][0] = +cos_y
- m[2][0] = -sin_y
- m[1][1] = 1
- m[0][2] = +sin_y
- m[2][2] = +cos_y
+ m[0, 0] = +cos_y
+ m[0, 2] = -sin_y
+ m[1, 1] = 1
+ m[2, 0] = +sin_y
+ m[2, 2] = +cos_y
return
}
matrix3_from_euler_angle_z_f16 :: proc(angle_z: f16) -> (m: Matrix3f16) {
cos_z, sin_z := math.cos(angle_z), math.sin(angle_z)
- m[0][0] = +cos_z
- m[1][0] = +sin_z
- m[1][1] = +cos_z
- m[0][1] = -sin_z
- m[2][2] = 1
+ m[0, 0] = +cos_z
+ m[0, 1] = +sin_z
+ m[1, 1] = +cos_z
+ m[1, 0] = -sin_z
+ m[2, 2] = 1
return
}
@@ -242,31 +242,31 @@ matrix3_from_euler_angle_z_f16 :: proc(angle_z: f16) -> (m: Matrix3f16) {
matrix3_from_derived_euler_angle_x_f16 :: proc(angle_x: f16, angular_velocity_x: f16) -> (m: Matrix3f16) {
cos_x := math.cos(angle_x) * angular_velocity_x
sin_x := math.sin(angle_x) * angular_velocity_x
- m[0][0] = 1
- m[1][1] = +cos_x
- m[2][1] = +sin_x
- m[1][2] = -sin_x
- m[2][2] = +cos_x
+ m[0, 0] = 1
+ m[1, 1] = +cos_x
+ m[1, 2] = +sin_x
+ m[2, 1] = -sin_x
+ m[2, 2] = +cos_x
return
}
matrix3_from_derived_euler_angle_y_f16 :: proc(angle_y: f16, angular_velocity_y: f16) -> (m: Matrix3f16) {
cos_y := math.cos(angle_y) * angular_velocity_y
sin_y := math.sin(angle_y) * angular_velocity_y
- m[0][0] = +cos_y
- m[2][0] = -sin_y
- m[1][1] = 1
- m[0][2] = +sin_y
- m[2][2] = +cos_y
+ m[0, 0] = +cos_y
+ m[0, 2] = -sin_y
+ m[1, 1] = 1
+ m[2, 0] = +sin_y
+ m[2, 2] = +cos_y
return
}
matrix3_from_derived_euler_angle_z_f16 :: proc(angle_z: f16, angular_velocity_z: f16) -> (m: Matrix3f16) {
cos_z := math.cos(angle_z) * angular_velocity_z
sin_z := math.sin(angle_z) * angular_velocity_z
- m[0][0] = +cos_z
- m[1][0] = +sin_z
- m[1][1] = +cos_z
- m[0][1] = -sin_z
- m[2][2] = 1
+ m[0, 0] = +cos_z
+ m[0, 1] = +sin_z
+ m[1, 1] = +cos_z
+ m[1, 0] = -sin_z
+ m[2, 2] = 1
return
}
@@ -274,14 +274,14 @@ matrix3_from_derived_euler_angle_z_f16 :: proc(angle_z: f16, angular_velocity_z:
matrix3_from_euler_angles_xy_f16 :: proc(angle_x, angle_y: f16) -> (m: Matrix3f16) {
cos_x, sin_x := math.cos(angle_x), math.sin(angle_x)
cos_y, sin_y := math.cos(angle_y), math.sin(angle_y)
- m[0][0] = cos_y
- m[1][0] = -sin_x * - sin_y
- m[2][0] = -cos_x * - sin_y
- m[1][1] = cos_x
- m[2][1] = sin_x
- m[0][2] = sin_y
- m[1][2] = -sin_x * cos_y
- m[2][2] = cos_x * cos_y
+ m[0, 0] = cos_y
+ m[0, 1] = -sin_x * - sin_y
+ m[0, 2] = -cos_x * - sin_y
+ m[1, 1] = cos_x
+ m[1, 2] = sin_x
+ m[2, 0] = sin_y
+ m[2, 1] = -sin_x * cos_y
+ m[2, 2] = cos_x * cos_y
return
}
@@ -289,14 +289,14 @@ matrix3_from_euler_angles_xy_f16 :: proc(angle_x, angle_y: f16) -> (m: Matrix3f1
matrix3_from_euler_angles_yx_f16 :: proc(angle_y, angle_x: f16) -> (m: Matrix3f16) {
cos_x, sin_x := math.cos(angle_x), math.sin(angle_x)
cos_y, sin_y := math.cos(angle_y), math.sin(angle_y)
- m[0][0] = cos_y
- m[2][0] = -sin_y
- m[0][1] = sin_y*sin_x
- m[1][1] = cos_x
- m[2][1] = cos_y*sin_x
- m[0][2] = sin_y*cos_x
- m[1][2] = -sin_x
- m[2][2] = cos_y*cos_x
+ m[0, 0] = cos_y
+ m[0, 2] = -sin_y
+ m[1, 0] = sin_y*sin_x
+ m[1, 1] = cos_x
+ m[1, 2] = cos_y*sin_x
+ m[2, 0] = sin_y*cos_x
+ m[2, 1] = -sin_x
+ m[2, 2] = cos_y*cos_x
return
}
@@ -322,15 +322,15 @@ matrix3_from_euler_angles_xyz_f16 :: proc(t1, t2, t3: f16) -> (m: Matrix3f16) {
s2 := math.sin(-t2)
s3 := math.sin(-t3)
- m[0][0] = c2 * c3
- m[0][1] =-c1 * s3 + s1 * s2 * c3
- m[0][2] = s1 * s3 + c1 * s2 * c3
- m[1][0] = c2 * s3
- m[1][1] = c1 * c3 + s1 * s2 * s3
- m[1][2] =-s1 * c3 + c1 * s2 * s3
- m[2][0] =-s2
- m[2][1] = s1 * c2
- m[2][2] = c1 * c2
+ m[0, 0] = c2 * c3
+ m[1, 0] =-c1 * s3 + s1 * s2 * c3
+ m[2, 0] = s1 * s3 + c1 * s2 * c3
+ m[0, 1] = c2 * s3
+ m[1, 1] = c1 * c3 + s1 * s2 * s3
+ m[2, 1] =-s1 * c3 + c1 * s2 * s3
+ m[0, 2] =-s2
+ m[1, 2] = s1 * c2
+ m[2, 2] = c1 * c2
return
}
@@ -342,15 +342,15 @@ matrix3_from_euler_angles_yxz_f16 :: proc(yaw, pitch, roll: f16) -> (m: Matrix3f
cb := math.cos(roll)
sb := math.sin(roll)
- m[0][0] = ch * cb + sh * sp * sb
- m[0][1] = sb * cp
- m[0][2] = -sh * cb + ch * sp * sb
- m[1][0] = -ch * sb + sh * sp * cb
- m[1][1] = cb * cp
- m[1][2] = sb * sh + ch * sp * cb
- m[2][0] = sh * cp
- m[2][1] = -sp
- m[2][2] = ch * cp
+ m[0, 0] = ch * cb + sh * sp * sb
+ m[1, 0] = sb * cp
+ m[2, 0] = -sh * cb + ch * sp * sb
+ m[0, 1] = -ch * sb + sh * sp * cb
+ m[1, 1] = cb * cp
+ m[2, 1] = sb * sh + ch * sp * cb
+ m[0, 2] = sh * cp
+ m[1, 2] = -sp
+ m[2, 2] = ch * cp
return
}
@@ -362,15 +362,15 @@ matrix3_from_euler_angles_xzx_f16 :: proc(t1, t2, t3: f16) -> (m: Matrix3f16) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c2
- m[0][1] = c1 * s2
- m[0][2] = s1 * s2
- m[1][0] =-c3 * s2
- m[1][1] = c1 * c2 * c3 - s1 * s3
- m[1][2] = c1 * s3 + c2 * c3 * s1
- m[2][0] = s2 * s3
- m[2][1] =-c3 * s1 - c1 * c2 * s3
- m[2][2] = c1 * c3 - c2 * s1 * s3
+ m[0, 0] = c2
+ m[1, 0] = c1 * s2
+ m[2, 0] = s1 * s2
+ m[0, 1] =-c3 * s2
+ m[1, 1] = c1 * c2 * c3 - s1 * s3
+ m[2, 1] = c1 * s3 + c2 * c3 * s1
+ m[0, 2] = s2 * s3
+ m[1, 2] =-c3 * s1 - c1 * c2 * s3
+ m[2, 2] = c1 * c3 - c2 * s1 * s3
return
}
@@ -382,15 +382,15 @@ matrix3_from_euler_angles_xyx_f16 :: proc(t1, t2, t3: f16) -> (m: Matrix3f16) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c2
- m[0][1] = s1 * s2
- m[0][2] =-c1 * s2
- m[1][0] = s2 * s3
- m[1][1] = c1 * c3 - c2 * s1 * s3
- m[1][2] = c3 * s1 + c1 * c2 * s3
- m[2][0] = c3 * s2
- m[2][1] =-c1 * s3 - c2 * c3 * s1
- m[2][2] = c1 * c2 * c3 - s1 * s3
+ m[0, 0] = c2
+ m[1, 0] = s1 * s2
+ m[2, 0] =-c1 * s2
+ m[0, 1] = s2 * s3
+ m[1, 1] = c1 * c3 - c2 * s1 * s3
+ m[2, 1] = c3 * s1 + c1 * c2 * s3
+ m[0, 2] = c3 * s2
+ m[1, 2] =-c1 * s3 - c2 * c3 * s1
+ m[2, 2] = c1 * c2 * c3 - s1 * s3
return
}
@@ -402,15 +402,15 @@ matrix3_from_euler_angles_yxy_f16 :: proc(t1, t2, t3: f16) -> (m: Matrix3f16) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c1 * c3 - c2 * s1 * s3
- m[0][1] = s2* s3
- m[0][2] =-c3 * s1 - c1 * c2 * s3
- m[1][0] = s1 * s2
- m[1][1] = c2
- m[1][2] = c1 * s2
- m[2][0] = c1 * s3 + c2 * c3 * s1
- m[2][1] =-c3 * s2
- m[2][2] = c1 * c2 * c3 - s1 * s3
+ m[0, 0] = c1 * c3 - c2 * s1 * s3
+ m[1, 0] = s2* s3
+ m[2, 0] =-c3 * s1 - c1 * c2 * s3
+ m[0, 1] = s1 * s2
+ m[1, 1] = c2
+ m[2, 1] = c1 * s2
+ m[0, 2] = c1 * s3 + c2 * c3 * s1
+ m[1, 2] =-c3 * s2
+ m[2, 2] = c1 * c2 * c3 - s1 * s3
return
}
@@ -422,15 +422,15 @@ matrix3_from_euler_angles_yzy_f16 :: proc(t1, t2, t3: f16) -> (m: Matrix3f16) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c1 * c2 * c3 - s1 * s3
- m[0][1] = c3 * s2
- m[0][2] =-c1 * s3 - c2 * c3 * s1
- m[1][0] =-c1 * s2
- m[1][1] = c2
- m[1][2] = s1 * s2
- m[2][0] = c3 * s1 + c1 * c2 * s3
- m[2][1] = s2 * s3
- m[2][2] = c1 * c3 - c2 * s1 * s3
+ m[0, 0] = c1 * c2 * c3 - s1 * s3
+ m[1, 0] = c3 * s2
+ m[2, 0] =-c1 * s3 - c2 * c3 * s1
+ m[0, 1] =-c1 * s2
+ m[1, 1] = c2
+ m[2, 1] = s1 * s2
+ m[0, 2] = c3 * s1 + c1 * c2 * s3
+ m[1, 2] = s2 * s3
+ m[2, 2] = c1 * c3 - c2 * s1 * s3
return
}
@@ -442,15 +442,15 @@ matrix3_from_euler_angles_zyz_f16 :: proc(t1, t2, t3: f16) -> (m: Matrix3f16) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c1 * c2 * c3 - s1 * s3
- m[0][1] = c1 * s3 + c2 * c3 * s1
- m[0][2] =-c3 * s2
- m[1][0] =-c3 * s1 - c1 * c2 * s3
- m[1][1] = c1 * c3 - c2 * s1 * s3
- m[1][2] = s2 * s3
- m[2][0] = c1 * s2
- m[2][1] = s1 * s2
- m[2][2] = c2
+ m[0, 0] = c1 * c2 * c3 - s1 * s3
+ m[1, 0] = c1 * s3 + c2 * c3 * s1
+ m[2, 0] =-c3 * s2
+ m[0, 1] =-c3 * s1 - c1 * c2 * s3
+ m[1, 1] = c1 * c3 - c2 * s1 * s3
+ m[2, 1] = s2 * s3
+ m[0, 2] = c1 * s2
+ m[1, 2] = s1 * s2
+ m[2, 2] = c2
return
}
@@ -462,15 +462,15 @@ matrix3_from_euler_angles_zxz_f16 :: proc(t1, t2, t3: f16) -> (m: Matrix3f16) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c1 * c3 - c2 * s1 * s3
- m[0][1] = c3 * s1 + c1 * c2 * s3
- m[0][2] = s2 *s3
- m[1][0] =-c1 * s3 - c2 * c3 * s1
- m[1][1] = c1 * c2 * c3 - s1 * s3
- m[1][2] = c3 * s2
- m[2][0] = s1 * s2
- m[2][1] =-c1 * s2
- m[2][2] = c2
+ m[0, 0] = c1 * c3 - c2 * s1 * s3
+ m[1, 0] = c3 * s1 + c1 * c2 * s3
+ m[2, 0] = s2 *s3
+ m[0, 1] =-c1 * s3 - c2 * c3 * s1
+ m[1, 1] = c1 * c2 * c3 - s1 * s3
+ m[2, 1] = c3 * s2
+ m[0, 2] = s1 * s2
+ m[1, 2] =-c1 * s2
+ m[2, 2] = c2
return
}
@@ -483,15 +483,15 @@ matrix3_from_euler_angles_xzy_f16 :: proc(t1, t2, t3: f16) -> (m: Matrix3f16) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c2 * c3
- m[0][1] = s1 * s3 + c1 * c3 * s2
- m[0][2] = c3 * s1 * s2 - c1 * s3
- m[1][0] =-s2
- m[1][1] = c1 * c2
- m[1][2] = c2 * s1
- m[2][0] = c2 * s3
- m[2][1] = c1 * s2 * s3 - c3 * s1
- m[2][2] = c1 * c3 + s1 * s2 *s3
+ m[0, 0] = c2 * c3
+ m[1, 0] = s1 * s3 + c1 * c3 * s2
+ m[2, 0] = c3 * s1 * s2 - c1 * s3
+ m[0, 1] =-s2
+ m[1, 1] = c1 * c2
+ m[2, 1] = c2 * s1
+ m[0, 2] = c2 * s3
+ m[1, 2] = c1 * s2 * s3 - c3 * s1
+ m[2, 2] = c1 * c3 + s1 * s2 *s3
return
}
@@ -503,15 +503,15 @@ matrix3_from_euler_angles_yzx_f16 :: proc(t1, t2, t3: f16) -> (m: Matrix3f16) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c1 * c2
- m[0][1] = s2
- m[0][2] =-c2 * s1
- m[1][0] = s1 * s3 - c1 * c3 * s2
- m[1][1] = c2 * c3
- m[1][2] = c1 * s3 + c3 * s1 * s2
- m[2][0] = c3 * s1 + c1 * s2 * s3
- m[2][1] =-c2 * s3
- m[2][2] = c1 * c3 - s1 * s2 * s3
+ m[0, 0] = c1 * c2
+ m[1, 0] = s2
+ m[2, 0] =-c2 * s1
+ m[0, 1] = s1 * s3 - c1 * c3 * s2
+ m[1, 1] = c2 * c3
+ m[2, 1] = c1 * s3 + c3 * s1 * s2
+ m[0, 2] = c3 * s1 + c1 * s2 * s3
+ m[1, 2] =-c2 * s3
+ m[2, 2] = c1 * c3 - s1 * s2 * s3
return
}
@@ -523,15 +523,15 @@ matrix3_from_euler_angles_zyx_f16 :: proc(t1, t2, t3: f16) -> (m: Matrix3f16) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c1 * c2
- m[0][1] = c2 * s1
- m[0][2] =-s2
- m[1][0] = c1 * s2 * s3 - c3 * s1
- m[1][1] = c1 * c3 + s1 * s2 * s3
- m[1][2] = c2 * s3
- m[2][0] = s1 * s3 + c1 * c3 * s2
- m[2][1] = c3 * s1 * s2 - c1 * s3
- m[2][2] = c2 * c3
+ m[0, 0] = c1 * c2
+ m[1, 0] = c2 * s1
+ m[2, 0] =-s2
+ m[0, 1] = c1 * s2 * s3 - c3 * s1
+ m[1, 1] = c1 * c3 + s1 * s2 * s3
+ m[2, 1] = c2 * s3
+ m[0, 2] = s1 * s3 + c1 * c3 * s2
+ m[1, 2] = c3 * s1 * s2 - c1 * s3
+ m[2, 2] = c2 * c3
return
}
@@ -543,15 +543,15 @@ matrix3_from_euler_angles_zxy_f16 :: proc(t1, t2, t3: f16) -> (m: Matrix3f16) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c1 * c3 - s1 * s2 * s3
- m[0][1] = c3 * s1 + c1 * s2 * s3
- m[0][2] =-c2 * s3
- m[1][0] =-c2 * s1
- m[1][1] = c1 * c2
- m[1][2] = s2
- m[2][0] = c1 * s3 + c3 * s1 * s2
- m[2][1] = s1 * s3 - c1 * c3 * s2
- m[2][2] = c2 * c3
+ m[0, 0] = c1 * c3 - s1 * s2 * s3
+ m[1, 0] = c3 * s1 + c1 * s2 * s3
+ m[2, 0] =-c2 * s3
+ m[0, 1] =-c2 * s1
+ m[1, 1] = c1 * c2
+ m[2, 1] = s2
+ m[0, 2] = c1 * s3 + c3 * s1 * s2
+ m[1, 2] = s1 * s3 - c1 * c3 * s2
+ m[2, 2] = c2 * c3
return
}
@@ -564,25 +564,25 @@ matrix3_from_yaw_pitch_roll_f16 :: proc(yaw, pitch, roll: f16) -> (m: Matrix3f16
cb := math.cos(roll)
sb := math.sin(roll)
- m[0][0] = ch * cb + sh * sp * sb
- m[0][1] = sb * cp
- m[0][2] = -sh * cb + ch * sp * sb
- m[1][0] = -ch * sb + sh * sp * cb
- m[1][1] = cb * cp
- m[1][2] = sb * sh + ch * sp * cb
- m[2][0] = sh * cp
- m[2][1] = -sp
- m[2][2] = ch * cp
+ m[0, 0] = ch * cb + sh * sp * sb
+ m[1, 0] = sb * cp
+ m[2, 0] = -sh * cb + ch * sp * sb
+ m[0, 1] = -ch * sb + sh * sp * cb
+ m[1, 1] = cb * cp
+ m[2, 1] = sb * sh + ch * sp * cb
+ m[0, 2] = sh * cp
+ m[1, 2] = -sp
+ m[2, 2] = ch * cp
return m
}
euler_angles_xyz_from_matrix3_f16 :: proc(m: Matrix3f16) -> (t1, t2, t3: f16) {
- T1 := math.atan2(m[2][1], m[2][2])
- C2 := math.sqrt(m[0][0]*m[0][0] + m[1][0]*m[1][0])
- T2 := math.atan2(-m[2][0], C2)
+ T1 := math.atan2(m[1, 2], m[2, 2])
+ C2 := math.sqrt(m[0, 0]*m[0, 0] + m[0, 1]*m[0, 1])
+ T2 := math.atan2(-m[0, 2], C2)
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(S1*m[0][2] - C1*m[0][1], C1*m[1][1] - S1*m[1][2])
+ T3 := math.atan2(S1*m[2, 0] - C1*m[1, 0], C1*m[1, 1] - S1*m[2, 1])
t1 = -T1
t2 = -T2
t3 = -T3
@@ -590,12 +590,12 @@ euler_angles_xyz_from_matrix3_f16 :: proc(m: Matrix3f16) -> (t1, t2, t3: f16) {
}
euler_angles_yxz_from_matrix3_f16 :: proc(m: Matrix3f16) -> (t1, t2, t3: f16) {
- T1 := math.atan2(m[2][0], m[2][2])
- C2 := math.sqrt(m[0][1]*m[0][1] + m[1][1]*m[1][1])
- T2 := math.atan2(-m[2][1], C2)
+ T1 := math.atan2(m[0, 2], m[2, 2])
+ C2 := math.sqrt(m[1, 0]*m[1, 0] + m[1, 1]*m[1, 1])
+ T2 := math.atan2(-m[1, 2], C2)
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(S1*m[1][2] - C1*m[1][0], C1*m[0][0] - S1*m[0][2])
+ T3 := math.atan2(S1*m[2, 1] - C1*m[0, 1], C1*m[0, 0] - S1*m[2, 0])
t1 = T1
t2 = T2
t3 = T3
@@ -603,12 +603,12 @@ euler_angles_yxz_from_matrix3_f16 :: proc(m: Matrix3f16) -> (t1, t2, t3: f16) {
}
euler_angles_xzx_from_matrix3_f16 :: proc(m: Matrix3f16) -> (t1, t2, t3: f16) {
- T1 := math.atan2(m[0][2], m[0][1])
- S2 := math.sqrt(m[1][0]*m[1][0] + m[2][0]*m[2][0])
- T2 := math.atan2(S2, m[0][0])
+ T1 := math.atan2(m[2, 0], m[1, 0])
+ S2 := math.sqrt(m[0, 1]*m[0, 1] + m[0, 2]*m[0, 2])
+ T2 := math.atan2(S2, m[0, 0])
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(C1*m[1][2] - S1*m[1][1], C1*m[2][2] - S1*m[2][1])
+ T3 := math.atan2(C1*m[2, 1] - S1*m[1, 1], C1*m[2, 2] - S1*m[1, 2])
t1 = T1
t2 = T2
t3 = T3
@@ -616,12 +616,12 @@ euler_angles_xzx_from_matrix3_f16 :: proc(m: Matrix3f16) -> (t1, t2, t3: f16) {
}
euler_angles_xyx_from_matrix3_f16 :: proc(m: Matrix3f16) -> (t1, t2, t3: f16) {
- T1 := math.atan2(m[0][1], -m[0][2])
- S2 := math.sqrt(m[1][0]*m[1][0] + m[2][0]*m[2][0])
- T2 := math.atan2(S2, m[0][0])
+ T1 := math.atan2(m[1, 0], -m[2, 0])
+ S2 := math.sqrt(m[0, 1]*m[0, 1] + m[0, 2]*m[0, 2])
+ T2 := math.atan2(S2, m[0, 0])
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(-C1*m[2][1] - S1*m[2][2], C1*m[1][1] + S1*m[1][2])
+ T3 := math.atan2(-C1*m[1, 2] - S1*m[2, 2], C1*m[1, 1] + S1*m[2, 1])
t1 = T1
t2 = T2
t3 = T3
@@ -629,12 +629,12 @@ euler_angles_xyx_from_matrix3_f16 :: proc(m: Matrix3f16) -> (t1, t2, t3: f16) {
}
euler_angles_yxy_from_matrix3_f16 :: proc(m: Matrix3f16) -> (t1, t2, t3: f16) {
- T1 := math.atan2(m[1][0], m[1][2])
- S2 := math.sqrt(m[0][1]*m[0][1] + m[2][1]*m[2][1])
- T2 := math.atan2(S2, m[1][1])
+ T1 := math.atan2(m[0, 1], m[2, 1])
+ S2 := math.sqrt(m[1, 0]*m[1, 0] + m[1, 2]*m[1, 2])
+ T2 := math.atan2(S2, m[1, 1])
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(C1*m[2][0] - S1*m[2][2], C1*m[0][0] - S1*m[0][2])
+ T3 := math.atan2(C1*m[0, 2] - S1*m[2, 2], C1*m[0, 0] - S1*m[2, 0])
t1 = T1
t2 = T2
t3 = T3
@@ -642,24 +642,24 @@ euler_angles_yxy_from_matrix3_f16 :: proc(m: Matrix3f16) -> (t1, t2, t3: f16) {
}
euler_angles_yzy_from_matrix3_f16 :: proc(m: Matrix3f16) -> (t1, t2, t3: f16) {
- T1 := math.atan2(m[1][2], -m[1][0])
- S2 := math.sqrt(m[0][1]*m[0][1] + m[2][1]*m[2][1])
- T2 := math.atan2(S2, m[1][1])
+ T1 := math.atan2(m[2, 1], -m[0, 1])
+ S2 := math.sqrt(m[1, 0]*m[1, 0] + m[1, 2]*m[1, 2])
+ T2 := math.atan2(S2, m[1, 1])
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(-S1*m[0][0] - C1*m[0][2], S1*m[2][0] + C1*m[2][2])
+ T3 := math.atan2(-S1*m[0, 0] - C1*m[2, 0], S1*m[0, 2] + C1*m[2, 2])
t1 = T1
t2 = T2
t3 = T3
return
}
euler_angles_zyz_from_matrix3_f16 :: proc(m: Matrix3f16) -> (t1, t2, t3: f16) {
- T1 := math.atan2(m[2][1], m[2][0])
- S2 := math.sqrt(m[0][2]*m[0][2] + m[1][2]*m[1][2])
- T2 := math.atan2(S2, m[2][2])
+ T1 := math.atan2(m[1, 2], m[0, 2])
+ S2 := math.sqrt(m[2, 0]*m[2, 0] + m[2, 1]*m[2, 1])
+ T2 := math.atan2(S2, m[2, 2])
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(C1*m[0][1] - S1*m[0][0], C1*m[1][1] - S1*m[1][0])
+ T3 := math.atan2(C1*m[1, 0] - S1*m[0, 0], C1*m[1, 1] - S1*m[0, 1])
t1 = T1
t2 = T2
t3 = T3
@@ -667,12 +667,12 @@ euler_angles_zyz_from_matrix3_f16 :: proc(m: Matrix3f16) -> (t1, t2, t3: f16) {
}
euler_angles_zxz_from_matrix3_f16 :: proc(m: Matrix3f16) -> (t1, t2, t3: f16) {
- T1 := math.atan2(m[2][0], -m[2][1])
- S2 := math.sqrt(m[0][2]*m[0][2] + m[1][2]*m[1][2])
- T2 := math.atan2(S2, m[2][2])
+ T1 := math.atan2(m[0, 2], -m[1, 2])
+ S2 := math.sqrt(m[2, 0]*m[2, 0] + m[2, 1]*m[2, 1])
+ T2 := math.atan2(S2, m[2, 2])
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(-C1*m[1][0] - S1*m[1][1], C1*m[0][0] + S1*m[0][1])
+ T3 := math.atan2(-C1*m[0, 1] - S1*m[1, 1], C1*m[0, 0] + S1*m[1, 0])
t1 = T1
t2 = T2
t3 = T3
@@ -680,12 +680,12 @@ euler_angles_zxz_from_matrix3_f16 :: proc(m: Matrix3f16) -> (t1, t2, t3: f16) {
}
euler_angles_xzy_from_matrix3_f16 :: proc(m: Matrix3f16) -> (t1, t2, t3: f16) {
- T1 := math.atan2(m[1][2], m[1][1])
- C2 := math.sqrt(m[0][0]*m[0][0] + m[2][0]*m[2][0])
- T2 := math.atan2(-m[1][0], C2)
+ T1 := math.atan2(m[2, 1], m[1, 1])
+ C2 := math.sqrt(m[0, 0]*m[0, 0] + m[0, 2]*m[0, 2])
+ T2 := math.atan2(-m[0, 1], C2)
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(S1*m[0][1] - C1*m[0][2], C1*m[2][2] - S1*m[2][1])
+ T3 := math.atan2(S1*m[1, 0] - C1*m[2, 0], C1*m[2, 2] - S1*m[1, 2])
t1 = T1
t2 = T2
t3 = T3
@@ -693,12 +693,12 @@ euler_angles_xzy_from_matrix3_f16 :: proc(m: Matrix3f16) -> (t1, t2, t3: f16) {
}
euler_angles_yzx_from_matrix3_f16 :: proc(m: Matrix3f16) -> (t1, t2, t3: f16) {
- T1 := math.atan2(-m[0][2], m[0][0])
- C2 := math.sqrt(m[1][1]*m[1][1] + m[2][1]*m[2][1])
- T2 := math.atan2(m[0][1], C2)
+ T1 := math.atan2(-m[2, 0], m[0, 0])
+ C2 := math.sqrt(m[1, 1]*m[1, 1] + m[1, 2]*m[1, 2])
+ T2 := math.atan2(m[1, 0], C2)
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(S1*m[1][0] + C1*m[1][2], S1*m[2][0] + C1*m[2][2])
+ T3 := math.atan2(S1*m[0, 1] + C1*m[2, 1], S1*m[0, 2] + C1*m[2, 2])
t1 = T1
t2 = T2
t3 = T3
@@ -706,12 +706,12 @@ euler_angles_yzx_from_matrix3_f16 :: proc(m: Matrix3f16) -> (t1, t2, t3: f16) {
}
euler_angles_zyx_from_matrix3_f16 :: proc(m: Matrix3f16) -> (t1, t2, t3: f16) {
- T1 := math.atan2(m[0][1], m[0][0])
- C2 := math.sqrt(m[1][2]*m[1][2] + m[2][2]*m[2][2])
- T2 := math.atan2(-m[0][2], C2)
+ T1 := math.atan2(m[1, 0], m[0, 0])
+ C2 := math.sqrt(m[2, 1]*m[2, 1] + m[2, 2]*m[2, 2])
+ T2 := math.atan2(-m[2, 0], C2)
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(S1*m[2][0] - C1*m[2][1], C1*m[1][1] - S1*m[1][0])
+ T3 := math.atan2(S1*m[0, 2] - C1*m[1, 2], C1*m[1, 1] - S1*m[0, 1])
t1 = T1
t2 = T2
t3 = T3
@@ -719,12 +719,12 @@ euler_angles_zyx_from_matrix3_f16 :: proc(m: Matrix3f16) -> (t1, t2, t3: f16) {
}
euler_angles_zxy_from_matrix3_f16 :: proc(m: Matrix3f16) -> (t1, t2, t3: f16) {
- T1 := math.atan2(-m[1][0], m[1][1])
- C2 := math.sqrt(m[0][2]*m[0][2] + m[2][2]*m[2][2])
- T2 := math.atan2(m[1][2], C2)
+ T1 := math.atan2(-m[0, 1], m[1, 1])
+ C2 := math.sqrt(m[2, 0]*m[2, 0] + m[2, 2]*m[2, 2])
+ T2 := math.atan2(m[2, 1], C2)
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(C1*m[2][0] + S1*m[2][1], C1*m[0][0] + S1*m[0][1])
+ T3 := math.atan2(C1*m[0, 2] + S1*m[1, 2], C1*m[0, 0] + S1*m[1, 0])
t1 = T1
t2 = T2
t3 = T3
@@ -737,32 +737,32 @@ euler_angles_zxy_from_matrix3_f16 :: proc(m: Matrix3f16) -> (t1, t2, t3: f16) {
matrix4_from_euler_angle_x_f16 :: proc(angle_x: f16) -> (m: Matrix4f16) {
cos_x, sin_x := math.cos(angle_x), math.sin(angle_x)
- m[0][0] = 1
- m[1][1] = +cos_x
- m[2][1] = +sin_x
- m[1][2] = -sin_x
- m[2][2] = +cos_x
- m[3][3] = 1
+ m[0, 0] = 1
+ m[1, 1] = +cos_x
+ m[1, 2] = +sin_x
+ m[2, 1] = -sin_x
+ m[2, 2] = +cos_x
+ m[3, 3] = 1
return
}
matrix4_from_euler_angle_y_f16 :: proc(angle_y: f16) -> (m: Matrix4f16) {
cos_y, sin_y := math.cos(angle_y), math.sin(angle_y)
- m[0][0] = +cos_y
- m[2][0] = -sin_y
- m[1][1] = 1
- m[0][2] = +sin_y
- m[2][2] = +cos_y
- m[3][3] = 1
+ m[0, 0] = +cos_y
+ m[0, 2] = -sin_y
+ m[1, 1] = 1
+ m[2, 0] = +sin_y
+ m[2, 2] = +cos_y
+ m[3, 3] = 1
return
}
matrix4_from_euler_angle_z_f16 :: proc(angle_z: f16) -> (m: Matrix4f16) {
cos_z, sin_z := math.cos(angle_z), math.sin(angle_z)
- m[0][0] = +cos_z
- m[1][0] = +sin_z
- m[1][1] = +cos_z
- m[0][1] = -sin_z
- m[2][2] = 1
- m[3][3] = 1
+ m[0, 0] = +cos_z
+ m[0, 1] = +sin_z
+ m[1, 1] = +cos_z
+ m[1, 0] = -sin_z
+ m[2, 2] = 1
+ m[3, 3] = 1
return
}
@@ -770,34 +770,34 @@ matrix4_from_euler_angle_z_f16 :: proc(angle_z: f16) -> (m: Matrix4f16) {
matrix4_from_derived_euler_angle_x_f16 :: proc(angle_x: f16, angular_velocity_x: f16) -> (m: Matrix4f16) {
cos_x := math.cos(angle_x) * angular_velocity_x
sin_x := math.sin(angle_x) * angular_velocity_x
- m[0][0] = 1
- m[1][1] = +cos_x
- m[2][1] = +sin_x
- m[1][2] = -sin_x
- m[2][2] = +cos_x
- m[3][3] = 1
+ m[0, 0] = 1
+ m[1, 1] = +cos_x
+ m[1, 2] = +sin_x
+ m[2, 1] = -sin_x
+ m[2, 2] = +cos_x
+ m[3, 3] = 1
return
}
matrix4_from_derived_euler_angle_y_f16 :: proc(angle_y: f16, angular_velocity_y: f16) -> (m: Matrix4f16) {
cos_y := math.cos(angle_y) * angular_velocity_y
sin_y := math.sin(angle_y) * angular_velocity_y
- m[0][0] = +cos_y
- m[2][0] = -sin_y
- m[1][1] = 1
- m[0][2] = +sin_y
- m[2][2] = +cos_y
- m[3][3] = 1
+ m[0, 0] = +cos_y
+ m[0, 2] = -sin_y
+ m[1, 1] = 1
+ m[2, 0] = +sin_y
+ m[2, 2] = +cos_y
+ m[3, 3] = 1
return
}
matrix4_from_derived_euler_angle_z_f16 :: proc(angle_z: f16, angular_velocity_z: f16) -> (m: Matrix4f16) {
cos_z := math.cos(angle_z) * angular_velocity_z
sin_z := math.sin(angle_z) * angular_velocity_z
- m[0][0] = +cos_z
- m[1][0] = +sin_z
- m[1][1] = +cos_z
- m[0][1] = -sin_z
- m[2][2] = 1
- m[3][3] = 1
+ m[0, 0] = +cos_z
+ m[0, 1] = +sin_z
+ m[1, 1] = +cos_z
+ m[1, 0] = -sin_z
+ m[2, 2] = 1
+ m[3, 3] = 1
return
}
@@ -805,15 +805,15 @@ matrix4_from_derived_euler_angle_z_f16 :: proc(angle_z: f16, angular_velocity_z:
matrix4_from_euler_angles_xy_f16 :: proc(angle_x, angle_y: f16) -> (m: Matrix4f16) {
cos_x, sin_x := math.cos(angle_x), math.sin(angle_x)
cos_y, sin_y := math.cos(angle_y), math.sin(angle_y)
- m[0][0] = cos_y
- m[1][0] = -sin_x * - sin_y
- m[2][0] = -cos_x * - sin_y
- m[1][1] = cos_x
- m[2][1] = sin_x
- m[0][2] = sin_y
- m[1][2] = -sin_x * cos_y
- m[2][2] = cos_x * cos_y
- m[3][3] = 1
+ m[0, 0] = cos_y
+ m[0, 1] = -sin_x * - sin_y
+ m[0, 2] = -cos_x * - sin_y
+ m[1, 1] = cos_x
+ m[1, 2] = sin_x
+ m[2, 0] = sin_y
+ m[2, 1] = -sin_x * cos_y
+ m[2, 2] = cos_x * cos_y
+ m[3, 3] = 1
return
}
@@ -821,15 +821,15 @@ matrix4_from_euler_angles_xy_f16 :: proc(angle_x, angle_y: f16) -> (m: Matrix4f1
matrix4_from_euler_angles_yx_f16 :: proc(angle_y, angle_x: f16) -> (m: Matrix4f16) {
cos_x, sin_x := math.cos(angle_x), math.sin(angle_x)
cos_y, sin_y := math.cos(angle_y), math.sin(angle_y)
- m[0][0] = cos_y
- m[2][0] = -sin_y
- m[0][1] = sin_y*sin_x
- m[1][1] = cos_x
- m[2][1] = cos_y*sin_x
- m[0][2] = sin_y*cos_x
- m[1][2] = -sin_x
- m[2][2] = cos_y*cos_x
- m[3][3] = 1
+ m[0, 0] = cos_y
+ m[0, 2] = -sin_y
+ m[1, 0] = sin_y*sin_x
+ m[1, 1] = cos_x
+ m[1, 2] = cos_y*sin_x
+ m[2, 0] = sin_y*cos_x
+ m[2, 1] = -sin_x
+ m[2, 2] = cos_y*cos_x
+ m[3, 3] = 1
return
}
@@ -855,22 +855,22 @@ matrix4_from_euler_angles_xyz_f16 :: proc(t1, t2, t3: f16) -> (m: Matrix4f16) {
s2 := math.sin(-t2)
s3 := math.sin(-t3)
- m[0][0] = c2 * c3
- m[0][1] =-c1 * s3 + s1 * s2 * c3
- m[0][2] = s1 * s3 + c1 * s2 * c3
- m[0][3] = 0
- m[1][0] = c2 * s3
- m[1][1] = c1 * c3 + s1 * s2 * s3
- m[1][2] =-s1 * c3 + c1 * s2 * s3
- m[1][3] = 0
- m[2][0] =-s2
- m[2][1] = s1 * c2
- m[2][2] = c1 * c2
- m[2][3] = 0
- m[3][0] = 0
- m[3][1] = 0
- m[3][2] = 0
- m[3][3] = 1
+ m[0, 0] = c2 * c3
+ m[1, 0] =-c1 * s3 + s1 * s2 * c3
+ m[2, 0] = s1 * s3 + c1 * s2 * c3
+ m[3, 0] = 0
+ m[0, 1] = c2 * s3
+ m[1, 1] = c1 * c3 + s1 * s2 * s3
+ m[2, 1] =-s1 * c3 + c1 * s2 * s3
+ m[3, 1] = 0
+ m[0, 2] =-s2
+ m[1, 2] = s1 * c2
+ m[2, 2] = c1 * c2
+ m[3, 2] = 0
+ m[0, 3] = 0
+ m[1, 3] = 0
+ m[2, 3] = 0
+ m[3, 3] = 1
return
}
@@ -882,22 +882,22 @@ matrix4_from_euler_angles_yxz_f16 :: proc(yaw, pitch, roll: f16) -> (m: Matrix4f
cb := math.cos(roll)
sb := math.sin(roll)
- m[0][0] = ch * cb + sh * sp * sb
- m[0][1] = sb * cp
- m[0][2] = -sh * cb + ch * sp * sb
- m[0][3] = 0
- m[1][0] = -ch * sb + sh * sp * cb
- m[1][1] = cb * cp
- m[1][2] = sb * sh + ch * sp * cb
- m[1][3] = 0
- m[2][0] = sh * cp
- m[2][1] = -sp
- m[2][2] = ch * cp
- m[2][3] = 0
- m[3][0] = 0
- m[3][1] = 0
- m[3][2] = 0
- m[3][3] = 1
+ m[0, 0] = ch * cb + sh * sp * sb
+ m[1, 0] = sb * cp
+ m[2, 0] = -sh * cb + ch * sp * sb
+ m[3, 0] = 0
+ m[0, 1] = -ch * sb + sh * sp * cb
+ m[1, 1] = cb * cp
+ m[2, 1] = sb * sh + ch * sp * cb
+ m[3, 1] = 0
+ m[0, 2] = sh * cp
+ m[1, 2] = -sp
+ m[2, 2] = ch * cp
+ m[3, 2] = 0
+ m[0, 3] = 0
+ m[1, 3] = 0
+ m[2, 3] = 0
+ m[3, 3] = 1
return
}
@@ -909,22 +909,22 @@ matrix4_from_euler_angles_xzx_f16 :: proc(t1, t2, t3: f16) -> (m: Matrix4f16) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c2
- m[0][1] = c1 * s2
- m[0][2] = s1 * s2
- m[0][3] = 0
- m[1][0] =-c3 * s2
- m[1][1] = c1 * c2 * c3 - s1 * s3
- m[1][2] = c1 * s3 + c2 * c3 * s1
- m[1][3] = 0
- m[2][0] = s2 * s3
- m[2][1] =-c3 * s1 - c1 * c2 * s3
- m[2][2] = c1 * c3 - c2 * s1 * s3
- m[2][3] = 0
- m[3][0] = 0
- m[3][1] = 0
- m[3][2] = 0
- m[3][3] = 1
+ m[0, 0] = c2
+ m[1, 0] = c1 * s2
+ m[2, 0] = s1 * s2
+ m[3, 0] = 0
+ m[0, 1] =-c3 * s2
+ m[1, 1] = c1 * c2 * c3 - s1 * s3
+ m[2, 1] = c1 * s3 + c2 * c3 * s1
+ m[3, 1] = 0
+ m[0, 2] = s2 * s3
+ m[1, 2] =-c3 * s1 - c1 * c2 * s3
+ m[2, 2] = c1 * c3 - c2 * s1 * s3
+ m[3, 2] = 0
+ m[0, 3] = 0
+ m[1, 3] = 0
+ m[2, 3] = 0
+ m[3, 3] = 1
return
}
@@ -936,22 +936,22 @@ matrix4_from_euler_angles_xyx_f16 :: proc(t1, t2, t3: f16) -> (m: Matrix4f16) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c2
- m[0][1] = s1 * s2
- m[0][2] =-c1 * s2
- m[0][3] = 0
- m[1][0] = s2 * s3
- m[1][1] = c1 * c3 - c2 * s1 * s3
- m[1][2] = c3 * s1 + c1 * c2 * s3
- m[1][3] = 0
- m[2][0] = c3 * s2
- m[2][1] =-c1 * s3 - c2 * c3 * s1
- m[2][2] = c1 * c2 * c3 - s1 * s3
- m[2][3] = 0
- m[3][0] = 0
- m[3][1] = 0
- m[3][2] = 0
- m[3][3] = 1
+ m[0, 0] = c2
+ m[1, 0] = s1 * s2
+ m[2, 0] =-c1 * s2
+ m[3, 0] = 0
+ m[0, 1] = s2 * s3
+ m[1, 1] = c1 * c3 - c2 * s1 * s3
+ m[2, 1] = c3 * s1 + c1 * c2 * s3
+ m[3, 1] = 0
+ m[0, 2] = c3 * s2
+ m[1, 2] =-c1 * s3 - c2 * c3 * s1
+ m[2, 2] = c1 * c2 * c3 - s1 * s3
+ m[3, 2] = 0
+ m[0, 3] = 0
+ m[1, 3] = 0
+ m[2, 3] = 0
+ m[3, 3] = 1
return
}
@@ -963,22 +963,22 @@ matrix4_from_euler_angles_yxy_f16 :: proc(t1, t2, t3: f16) -> (m: Matrix4f16) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c1 * c3 - c2 * s1 * s3
- m[0][1] = s2* s3
- m[0][2] =-c3 * s1 - c1 * c2 * s3
- m[0][3] = 0
- m[1][0] = s1 * s2
- m[1][1] = c2
- m[1][2] = c1 * s2
- m[1][3] = 0
- m[2][0] = c1 * s3 + c2 * c3 * s1
- m[2][1] =-c3 * s2
- m[2][2] = c1 * c2 * c3 - s1 * s3
- m[2][3] = 0
- m[3][0] = 0
- m[3][1] = 0
- m[3][2] = 0
- m[3][3] = 1
+ m[0, 0] = c1 * c3 - c2 * s1 * s3
+ m[1, 0] = s2* s3
+ m[2, 0] =-c3 * s1 - c1 * c2 * s3
+ m[3, 0] = 0
+ m[0, 1] = s1 * s2
+ m[1, 1] = c2
+ m[2, 1] = c1 * s2
+ m[3, 1] = 0
+ m[0, 2] = c1 * s3 + c2 * c3 * s1
+ m[1, 2] =-c3 * s2
+ m[2, 2] = c1 * c2 * c3 - s1 * s3
+ m[3, 2] = 0
+ m[0, 3] = 0
+ m[1, 3] = 0
+ m[2, 3] = 0
+ m[3, 3] = 1
return
}
@@ -990,22 +990,22 @@ matrix4_from_euler_angles_yzy_f16 :: proc(t1, t2, t3: f16) -> (m: Matrix4f16) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c1 * c2 * c3 - s1 * s3
- m[0][1] = c3 * s2
- m[0][2] =-c1 * s3 - c2 * c3 * s1
- m[0][3] = 0
- m[1][0] =-c1 * s2
- m[1][1] = c2
- m[1][2] = s1 * s2
- m[1][3] = 0
- m[2][0] = c3 * s1 + c1 * c2 * s3
- m[2][1] = s2 * s3
- m[2][2] = c1 * c3 - c2 * s1 * s3
- m[2][3] = 0
- m[3][0] = 0
- m[3][1] = 0
- m[3][2] = 0
- m[3][3] = 1
+ m[0, 0] = c1 * c2 * c3 - s1 * s3
+ m[1, 0] = c3 * s2
+ m[2, 0] =-c1 * s3 - c2 * c3 * s1
+ m[3, 0] = 0
+ m[0, 1] =-c1 * s2
+ m[1, 1] = c2
+ m[2, 1] = s1 * s2
+ m[3, 1] = 0
+ m[0, 2] = c3 * s1 + c1 * c2 * s3
+ m[1, 2] = s2 * s3
+ m[2, 2] = c1 * c3 - c2 * s1 * s3
+ m[3, 2] = 0
+ m[0, 3] = 0
+ m[1, 3] = 0
+ m[2, 3] = 0
+ m[3, 3] = 1
return
}
@@ -1017,22 +1017,22 @@ matrix4_from_euler_angles_zyz_f16 :: proc(t1, t2, t3: f16) -> (m: Matrix4f16) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c1 * c2 * c3 - s1 * s3
- m[0][1] = c1 * s3 + c2 * c3 * s1
- m[0][2] =-c3 * s2
- m[0][3] = 0
- m[1][0] =-c3 * s1 - c1 * c2 * s3
- m[1][1] = c1 * c3 - c2 * s1 * s3
- m[1][2] = s2 * s3
- m[1][3] = 0
- m[2][0] = c1 * s2
- m[2][1] = s1 * s2
- m[2][2] = c2
- m[2][3] = 0
- m[3][0] = 0
- m[3][1] = 0
- m[3][2] = 0
- m[3][3] = 1
+ m[0, 0] = c1 * c2 * c3 - s1 * s3
+ m[1, 0] = c1 * s3 + c2 * c3 * s1
+ m[2, 0] =-c3 * s2
+ m[3, 0] = 0
+ m[0, 1] =-c3 * s1 - c1 * c2 * s3
+ m[1, 1] = c1 * c3 - c2 * s1 * s3
+ m[2, 1] = s2 * s3
+ m[3, 1] = 0
+ m[0, 2] = c1 * s2
+ m[1, 2] = s1 * s2
+ m[2, 2] = c2
+ m[3, 2] = 0
+ m[0, 3] = 0
+ m[1, 3] = 0
+ m[2, 3] = 0
+ m[3, 3] = 1
return
}
@@ -1044,22 +1044,22 @@ matrix4_from_euler_angles_zxz_f16 :: proc(t1, t2, t3: f16) -> (m: Matrix4f16) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c1 * c3 - c2 * s1 * s3
- m[0][1] = c3 * s1 + c1 * c2 * s3
- m[0][2] = s2 *s3
- m[0][3] = 0
- m[1][0] =-c1 * s3 - c2 * c3 * s1
- m[1][1] = c1 * c2 * c3 - s1 * s3
- m[1][2] = c3 * s2
- m[1][3] = 0
- m[2][0] = s1 * s2
- m[2][1] =-c1 * s2
- m[2][2] = c2
- m[2][3] = 0
- m[3][0] = 0
- m[3][1] = 0
- m[3][2] = 0
- m[3][3] = 1
+ m[0, 0] = c1 * c3 - c2 * s1 * s3
+ m[1, 0] = c3 * s1 + c1 * c2 * s3
+ m[2, 0] = s2 *s3
+ m[3, 0] = 0
+ m[0, 1] =-c1 * s3 - c2 * c3 * s1
+ m[1, 1] = c1 * c2 * c3 - s1 * s3
+ m[2, 1] = c3 * s2
+ m[3, 1] = 0
+ m[0, 2] = s1 * s2
+ m[1, 2] =-c1 * s2
+ m[2, 2] = c2
+ m[3, 2] = 0
+ m[0, 3] = 0
+ m[1, 3] = 0
+ m[2, 3] = 0
+ m[3, 3] = 1
return
}
@@ -1072,22 +1072,22 @@ matrix4_from_euler_angles_xzy_f16 :: proc(t1, t2, t3: f16) -> (m: Matrix4f16) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c2 * c3
- m[0][1] = s1 * s3 + c1 * c3 * s2
- m[0][2] = c3 * s1 * s2 - c1 * s3
- m[0][3] = 0
- m[1][0] =-s2
- m[1][1] = c1 * c2
- m[1][2] = c2 * s1
- m[1][3] = 0
- m[2][0] = c2 * s3
- m[2][1] = c1 * s2 * s3 - c3 * s1
- m[2][2] = c1 * c3 + s1 * s2 *s3
- m[2][3] = 0
- m[3][0] = 0
- m[3][1] = 0
- m[3][2] = 0
- m[3][3] = 1
+ m[0, 0] = c2 * c3
+ m[1, 0] = s1 * s3 + c1 * c3 * s2
+ m[2, 0] = c3 * s1 * s2 - c1 * s3
+ m[3, 0] = 0
+ m[0, 1] =-s2
+ m[1, 1] = c1 * c2
+ m[2, 1] = c2 * s1
+ m[3, 1] = 0
+ m[0, 2] = c2 * s3
+ m[1, 2] = c1 * s2 * s3 - c3 * s1
+ m[2, 2] = c1 * c3 + s1 * s2 *s3
+ m[3, 2] = 0
+ m[0, 3] = 0
+ m[1, 3] = 0
+ m[2, 3] = 0
+ m[3, 3] = 1
return
}
@@ -1099,22 +1099,22 @@ matrix4_from_euler_angles_yzx_f16 :: proc(t1, t2, t3: f16) -> (m: Matrix4f16) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c1 * c2
- m[0][1] = s2
- m[0][2] =-c2 * s1
- m[0][3] = 0
- m[1][0] = s1 * s3 - c1 * c3 * s2
- m[1][1] = c2 * c3
- m[1][2] = c1 * s3 + c3 * s1 * s2
- m[1][3] = 0
- m[2][0] = c3 * s1 + c1 * s2 * s3
- m[2][1] =-c2 * s3
- m[2][2] = c1 * c3 - s1 * s2 * s3
- m[2][3] = 0
- m[3][0] = 0
- m[3][1] = 0
- m[3][2] = 0
- m[3][3] = 1
+ m[0, 0] = c1 * c2
+ m[1, 0] = s2
+ m[2, 0] =-c2 * s1
+ m[3, 0] = 0
+ m[0, 1] = s1 * s3 - c1 * c3 * s2
+ m[1, 1] = c2 * c3
+ m[2, 1] = c1 * s3 + c3 * s1 * s2
+ m[3, 1] = 0
+ m[0, 2] = c3 * s1 + c1 * s2 * s3
+ m[1, 2] =-c2 * s3
+ m[2, 2] = c1 * c3 - s1 * s2 * s3
+ m[3, 2] = 0
+ m[0, 3] = 0
+ m[1, 3] = 0
+ m[2, 3] = 0
+ m[3, 3] = 1
return
}
@@ -1126,22 +1126,22 @@ matrix4_from_euler_angles_zyx_f16 :: proc(t1, t2, t3: f16) -> (m: Matrix4f16) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c1 * c2
- m[0][1] = c2 * s1
- m[0][2] =-s2
- m[0][3] = 0
- m[1][0] = c1 * s2 * s3 - c3 * s1
- m[1][1] = c1 * c3 + s1 * s2 * s3
- m[1][2] = c2 * s3
- m[1][3] = 0
- m[2][0] = s1 * s3 + c1 * c3 * s2
- m[2][1] = c3 * s1 * s2 - c1 * s3
- m[2][2] = c2 * c3
- m[2][3] = 0
- m[3][0] = 0
- m[3][1] = 0
- m[3][2] = 0
- m[3][3] = 1
+ m[0, 0] = c1 * c2
+ m[1, 0] = c2 * s1
+ m[2, 0] =-s2
+ m[3, 0] = 0
+ m[0, 1] = c1 * s2 * s3 - c3 * s1
+ m[1, 1] = c1 * c3 + s1 * s2 * s3
+ m[2, 1] = c2 * s3
+ m[3, 1] = 0
+ m[0, 2] = s1 * s3 + c1 * c3 * s2
+ m[1, 2] = c3 * s1 * s2 - c1 * s3
+ m[2, 2] = c2 * c3
+ m[3, 2] = 0
+ m[0, 3] = 0
+ m[1, 3] = 0
+ m[2, 3] = 0
+ m[3, 3] = 1
return
}
@@ -1153,22 +1153,22 @@ matrix4_from_euler_angles_zxy_f16 :: proc(t1, t2, t3: f16) -> (m: Matrix4f16) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c1 * c3 - s1 * s2 * s3
- m[0][1] = c3 * s1 + c1 * s2 * s3
- m[0][2] =-c2 * s3
- m[0][3] = 0
- m[1][0] =-c2 * s1
- m[1][1] = c1 * c2
- m[1][2] = s2
- m[1][3] = 0
- m[2][0] = c1 * s3 + c3 * s1 * s2
- m[2][1] = s1 * s3 - c1 * c3 * s2
- m[2][2] = c2 * c3
- m[2][3] = 0
- m[3][0] = 0
- m[3][1] = 0
- m[3][2] = 0
- m[3][3] = 1
+ m[0, 0] = c1 * c3 - s1 * s2 * s3
+ m[1, 0] = c3 * s1 + c1 * s2 * s3
+ m[2, 0] =-c2 * s3
+ m[3, 0] = 0
+ m[0, 1] =-c2 * s1
+ m[1, 1] = c1 * c2
+ m[2, 1] = s2
+ m[3, 1] = 0
+ m[0, 2] = c1 * s3 + c3 * s1 * s2
+ m[1, 2] = s1 * s3 - c1 * c3 * s2
+ m[2, 2] = c2 * c3
+ m[3, 2] = 0
+ m[0, 3] = 0
+ m[1, 3] = 0
+ m[2, 3] = 0
+ m[3, 3] = 1
return
}
@@ -1181,32 +1181,32 @@ matrix4_from_yaw_pitch_roll_f16 :: proc(yaw, pitch, roll: f16) -> (m: Matrix4f16
cb := math.cos(roll)
sb := math.sin(roll)
- m[0][0] = ch * cb + sh * sp * sb
- m[0][1] = sb * cp
- m[0][2] = -sh * cb + ch * sp * sb
- m[0][3] = 0
- m[1][0] = -ch * sb + sh * sp * cb
- m[1][1] = cb * cp
- m[1][2] = sb * sh + ch * sp * cb
- m[1][3] = 0
- m[2][0] = sh * cp
- m[2][1] = -sp
- m[2][2] = ch * cp
- m[2][3] = 0
- m[3][0] = 0
- m[3][1] = 0
- m[3][2] = 0
- m[3][3] = 1
+ m[0, 0] = ch * cb + sh * sp * sb
+ m[1, 0] = sb * cp
+ m[2, 0] = -sh * cb + ch * sp * sb
+ m[3, 0] = 0
+ m[0, 1] = -ch * sb + sh * sp * cb
+ m[1, 1] = cb * cp
+ m[2, 1] = sb * sh + ch * sp * cb
+ m[3, 1] = 0
+ m[0, 2] = sh * cp
+ m[1, 2] = -sp
+ m[2, 2] = ch * cp
+ m[3, 2] = 0
+ m[0, 3] = 0
+ m[1, 3] = 0
+ m[2, 3] = 0
+ m[3, 3] = 1
return m
}
euler_angles_xyz_from_matrix4_f16 :: proc(m: Matrix4f16) -> (t1, t2, t3: f16) {
- T1 := math.atan2(m[2][1], m[2][2])
- C2 := math.sqrt(m[0][0]*m[0][0] + m[1][0]*m[1][0])
- T2 := math.atan2(-m[2][0], C2)
+ T1 := math.atan2(m[1, 2], m[2, 2])
+ C2 := math.sqrt(m[0, 0]*m[0, 0] + m[0, 1]*m[0, 1])
+ T2 := math.atan2(-m[0, 2], C2)
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(S1*m[0][2] - C1*m[0][1], C1*m[1][1] - S1*m[1][2])
+ T3 := math.atan2(S1*m[2, 0] - C1*m[1, 0], C1*m[1, 1] - S1*m[2, 1])
t1 = -T1
t2 = -T2
t3 = -T3
@@ -1214,12 +1214,12 @@ euler_angles_xyz_from_matrix4_f16 :: proc(m: Matrix4f16) -> (t1, t2, t3: f16) {
}
euler_angles_yxz_from_matrix4_f16 :: proc(m: Matrix4f16) -> (t1, t2, t3: f16) {
- T1 := math.atan2(m[2][0], m[2][2])
- C2 := math.sqrt(m[0][1]*m[0][1] + m[1][1]*m[1][1])
- T2 := math.atan2(-m[2][1], C2)
+ T1 := math.atan2(m[0, 2], m[2, 2])
+ C2 := math.sqrt(m[1, 0]*m[1, 0] + m[1, 1]*m[1, 1])
+ T2 := math.atan2(-m[1, 2], C2)
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(S1*m[1][2] - C1*m[1][0], C1*m[0][0] - S1*m[0][2])
+ T3 := math.atan2(S1*m[2, 1] - C1*m[0, 1], C1*m[0, 0] - S1*m[2, 0])
t1 = T1
t2 = T2
t3 = T3
@@ -1227,12 +1227,12 @@ euler_angles_yxz_from_matrix4_f16 :: proc(m: Matrix4f16) -> (t1, t2, t3: f16) {
}
euler_angles_xzx_from_matrix4_f16 :: proc(m: Matrix4f16) -> (t1, t2, t3: f16) {
- T1 := math.atan2(m[0][2], m[0][1])
- S2 := math.sqrt(m[1][0]*m[1][0] + m[2][0]*m[2][0])
- T2 := math.atan2(S2, m[0][0])
+ T1 := math.atan2(m[2, 0], m[1, 0])
+ S2 := math.sqrt(m[0, 1]*m[0, 1] + m[0, 2]*m[0, 2])
+ T2 := math.atan2(S2, m[0, 0])
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(C1*m[1][2] - S1*m[1][1], C1*m[2][2] - S1*m[2][1])
+ T3 := math.atan2(C1*m[2, 1] - S1*m[1, 1], C1*m[2, 2] - S1*m[1, 2])
t1 = T1
t2 = T2
t3 = T3
@@ -1240,12 +1240,12 @@ euler_angles_xzx_from_matrix4_f16 :: proc(m: Matrix4f16) -> (t1, t2, t3: f16) {
}
euler_angles_xyx_from_matrix4_f16 :: proc(m: Matrix4f16) -> (t1, t2, t3: f16) {
- T1 := math.atan2(m[0][1], -m[0][2])
- S2 := math.sqrt(m[1][0]*m[1][0] + m[2][0]*m[2][0])
- T2 := math.atan2(S2, m[0][0])
+ T1 := math.atan2(m[1, 0], -m[2, 0])
+ S2 := math.sqrt(m[0, 1]*m[0, 1] + m[0, 2]*m[0, 2])
+ T2 := math.atan2(S2, m[0, 0])
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(-C1*m[2][1] - S1*m[2][2], C1*m[1][1] + S1*m[1][2])
+ T3 := math.atan2(-C1*m[1, 2] - S1*m[2, 2], C1*m[1, 1] + S1*m[2, 1])
t1 = T1
t2 = T2
t3 = T3
@@ -1253,12 +1253,12 @@ euler_angles_xyx_from_matrix4_f16 :: proc(m: Matrix4f16) -> (t1, t2, t3: f16) {
}
euler_angles_yxy_from_matrix4_f16 :: proc(m: Matrix4f16) -> (t1, t2, t3: f16) {
- T1 := math.atan2(m[1][0], m[1][2])
- S2 := math.sqrt(m[0][1]*m[0][1] + m[2][1]*m[2][1])
- T2 := math.atan2(S2, m[1][1])
+ T1 := math.atan2(m[0, 1], m[2, 1])
+ S2 := math.sqrt(m[1, 0]*m[1, 0] + m[1, 2]*m[1, 2])
+ T2 := math.atan2(S2, m[1, 1])
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(C1*m[2][0] - S1*m[2][2], C1*m[0][0] - S1*m[0][2])
+ T3 := math.atan2(C1*m[0, 2] - S1*m[2, 2], C1*m[0, 0] - S1*m[2, 0])
t1 = T1
t2 = T2
t3 = T3
@@ -1266,24 +1266,24 @@ euler_angles_yxy_from_matrix4_f16 :: proc(m: Matrix4f16) -> (t1, t2, t3: f16) {
}
euler_angles_yzy_from_matrix4_f16 :: proc(m: Matrix4f16) -> (t1, t2, t3: f16) {
- T1 := math.atan2(m[1][2], -m[1][0])
- S2 := math.sqrt(m[0][1]*m[0][1] + m[2][1]*m[2][1])
- T2 := math.atan2(S2, m[1][1])
+ T1 := math.atan2(m[2, 1], -m[0, 1])
+ S2 := math.sqrt(m[1, 0]*m[1, 0] + m[1, 2]*m[1, 2])
+ T2 := math.atan2(S2, m[1, 1])
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(-S1*m[0][0] - C1*m[0][2], S1*m[2][0] + C1*m[2][2])
+ T3 := math.atan2(-S1*m[0, 0] - C1*m[2, 0], S1*m[0, 2] + C1*m[2, 2])
t1 = T1
t2 = T2
t3 = T3
return
}
euler_angles_zyz_from_matrix4_f16 :: proc(m: Matrix4f16) -> (t1, t2, t3: f16) {
- T1 := math.atan2(m[2][1], m[2][0])
- S2 := math.sqrt(m[0][2]*m[0][2] + m[1][2]*m[1][2])
- T2 := math.atan2(S2, m[2][2])
+ T1 := math.atan2(m[1, 2], m[0, 2])
+ S2 := math.sqrt(m[2, 0]*m[2, 0] + m[2, 1]*m[2, 1])
+ T2 := math.atan2(S2, m[2, 2])
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(C1*m[0][1] - S1*m[0][0], C1*m[1][1] - S1*m[1][0])
+ T3 := math.atan2(C1*m[1, 0] - S1*m[0, 0], C1*m[1, 1] - S1*m[0, 1])
t1 = T1
t2 = T2
t3 = T3
@@ -1291,12 +1291,12 @@ euler_angles_zyz_from_matrix4_f16 :: proc(m: Matrix4f16) -> (t1, t2, t3: f16) {
}
euler_angles_zxz_from_matrix4_f16 :: proc(m: Matrix4f16) -> (t1, t2, t3: f16) {
- T1 := math.atan2(m[2][0], -m[2][1])
- S2 := math.sqrt(m[0][2]*m[0][2] + m[1][2]*m[1][2])
- T2 := math.atan2(S2, m[2][2])
+ T1 := math.atan2(m[0, 2], -m[1, 2])
+ S2 := math.sqrt(m[2, 0]*m[2, 0] + m[2, 1]*m[2, 1])
+ T2 := math.atan2(S2, m[2, 2])
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(-C1*m[1][0] - S1*m[1][1], C1*m[0][0] + S1*m[0][1])
+ T3 := math.atan2(-C1*m[0, 1] - S1*m[1, 1], C1*m[0, 0] + S1*m[1, 0])
t1 = T1
t2 = T2
t3 = T3
@@ -1304,12 +1304,12 @@ euler_angles_zxz_from_matrix4_f16 :: proc(m: Matrix4f16) -> (t1, t2, t3: f16) {
}
euler_angles_xzy_from_matrix4_f16 :: proc(m: Matrix4f16) -> (t1, t2, t3: f16) {
- T1 := math.atan2(m[1][2], m[1][1])
- C2 := math.sqrt(m[0][0]*m[0][0] + m[2][0]*m[2][0])
- T2 := math.atan2(-m[1][0], C2)
+ T1 := math.atan2(m[2, 1], m[1, 1])
+ C2 := math.sqrt(m[0, 0]*m[0, 0] + m[0, 2]*m[0, 2])
+ T2 := math.atan2(-m[0, 1], C2)
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(S1*m[0][1] - C1*m[0][2], C1*m[2][2] - S1*m[2][1])
+ T3 := math.atan2(S1*m[1, 0] - C1*m[2, 0], C1*m[2, 2] - S1*m[1, 2])
t1 = T1
t2 = T2
t3 = T3
@@ -1317,12 +1317,12 @@ euler_angles_xzy_from_matrix4_f16 :: proc(m: Matrix4f16) -> (t1, t2, t3: f16) {
}
euler_angles_yzx_from_matrix4_f16 :: proc(m: Matrix4f16) -> (t1, t2, t3: f16) {
- T1 := math.atan2(-m[0][2], m[0][0])
- C2 := math.sqrt(m[1][1]*m[1][1] + m[2][1]*m[2][1])
- T2 := math.atan2(m[0][1], C2)
+ T1 := math.atan2(-m[2, 0], m[0, 0])
+ C2 := math.sqrt(m[1, 1]*m[1, 1] + m[1, 2]*m[1, 2])
+ T2 := math.atan2(m[1, 0], C2)
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(S1*m[1][0] + C1*m[1][2], S1*m[2][0] + C1*m[2][2])
+ T3 := math.atan2(S1*m[0, 1] + C1*m[2, 1], S1*m[0, 2] + C1*m[2, 2])
t1 = T1
t2 = T2
t3 = T3
@@ -1330,12 +1330,12 @@ euler_angles_yzx_from_matrix4_f16 :: proc(m: Matrix4f16) -> (t1, t2, t3: f16) {
}
euler_angles_zyx_from_matrix4_f16 :: proc(m: Matrix4f16) -> (t1, t2, t3: f16) {
- T1 := math.atan2(m[0][1], m[0][0])
- C2 := math.sqrt(m[1][2]*m[1][2] + m[2][2]*m[2][2])
- T2 := math.atan2(-m[0][2], C2)
+ T1 := math.atan2(m[1, 0], m[0, 0])
+ C2 := math.sqrt(m[2, 1]*m[2, 1] + m[2, 2]*m[2, 2])
+ T2 := math.atan2(-m[2, 0], C2)
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(S1*m[2][0] - C1*m[2][1], C1*m[1][1] - S1*m[1][0])
+ T3 := math.atan2(S1*m[0, 2] - C1*m[1, 2], C1*m[1, 1] - S1*m[0, 1])
t1 = T1
t2 = T2
t3 = T3
@@ -1343,12 +1343,12 @@ euler_angles_zyx_from_matrix4_f16 :: proc(m: Matrix4f16) -> (t1, t2, t3: f16) {
}
euler_angles_zxy_from_matrix4_f16 :: proc(m: Matrix4f16) -> (t1, t2, t3: f16) {
- T1 := math.atan2(-m[1][0], m[1][1])
- C2 := math.sqrt(m[0][2]*m[0][2] + m[2][2]*m[2][2])
- T2 := math.atan2(m[1][2], C2)
+ T1 := math.atan2(-m[0, 1], m[1, 1])
+ C2 := math.sqrt(m[2, 0]*m[2, 0] + m[2, 2]*m[2, 2])
+ T2 := math.atan2(m[2, 1], C2)
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(C1*m[2][0] + S1*m[2][1], C1*m[0][0] + S1*m[0][1])
+ T3 := math.atan2(C1*m[0, 2] + S1*m[1, 2], C1*m[0, 0] + S1*m[1, 0])
t1 = T1
t2 = T2
t3 = T3
diff --git a/core/math/linalg/specific_euler_angles_f32.odin b/core/math/linalg/specific_euler_angles_f32.odin
index 6ae1b0fa0..80e19ce85 100644
--- a/core/math/linalg/specific_euler_angles_f32.odin
+++ b/core/math/linalg/specific_euler_angles_f32.odin
@@ -212,29 +212,29 @@ euler_angles_zxy_from_quaternion_f32 :: proc(q: Quaternionf32) -> (t1, t2, t3: f
matrix3_from_euler_angle_x_f32 :: proc(angle_x: f32) -> (m: Matrix3f32) {
cos_x, sin_x := math.cos(angle_x), math.sin(angle_x)
- m[0][0] = 1
- m[1][1] = +cos_x
- m[2][1] = +sin_x
- m[1][2] = -sin_x
- m[2][2] = +cos_x
+ m[0, 0] = 1
+ m[1, 1] = +cos_x
+ m[1, 2] = +sin_x
+ m[2, 1] = -sin_x
+ m[2, 2] = +cos_x
return
}
matrix3_from_euler_angle_y_f32 :: proc(angle_y: f32) -> (m: Matrix3f32) {
cos_y, sin_y := math.cos(angle_y), math.sin(angle_y)
- m[0][0] = +cos_y
- m[2][0] = -sin_y
- m[1][1] = 1
- m[0][2] = +sin_y
- m[2][2] = +cos_y
+ m[0, 0] = +cos_y
+ m[0, 2] = -sin_y
+ m[1, 1] = 1
+ m[2, 0] = +sin_y
+ m[2, 2] = +cos_y
return
}
matrix3_from_euler_angle_z_f32 :: proc(angle_z: f32) -> (m: Matrix3f32) {
cos_z, sin_z := math.cos(angle_z), math.sin(angle_z)
- m[0][0] = +cos_z
- m[1][0] = +sin_z
- m[1][1] = +cos_z
- m[0][1] = -sin_z
- m[2][2] = 1
+ m[0, 0] = +cos_z
+ m[0, 1] = +sin_z
+ m[1, 1] = +cos_z
+ m[1, 0] = -sin_z
+ m[2, 2] = 1
return
}
@@ -242,31 +242,31 @@ matrix3_from_euler_angle_z_f32 :: proc(angle_z: f32) -> (m: Matrix3f32) {
matrix3_from_derived_euler_angle_x_f32 :: proc(angle_x: f32, angular_velocity_x: f32) -> (m: Matrix3f32) {
cos_x := math.cos(angle_x) * angular_velocity_x
sin_x := math.sin(angle_x) * angular_velocity_x
- m[0][0] = 1
- m[1][1] = +cos_x
- m[2][1] = +sin_x
- m[1][2] = -sin_x
- m[2][2] = +cos_x
+ m[0, 0] = 1
+ m[1, 1] = +cos_x
+ m[1, 2] = +sin_x
+ m[2, 1] = -sin_x
+ m[2, 2] = +cos_x
return
}
matrix3_from_derived_euler_angle_y_f32 :: proc(angle_y: f32, angular_velocity_y: f32) -> (m: Matrix3f32) {
cos_y := math.cos(angle_y) * angular_velocity_y
sin_y := math.sin(angle_y) * angular_velocity_y
- m[0][0] = +cos_y
- m[2][0] = -sin_y
- m[1][1] = 1
- m[0][2] = +sin_y
- m[2][2] = +cos_y
+ m[0, 0] = +cos_y
+ m[0, 2] = -sin_y
+ m[1, 1] = 1
+ m[2, 0] = +sin_y
+ m[2, 2] = +cos_y
return
}
matrix3_from_derived_euler_angle_z_f32 :: proc(angle_z: f32, angular_velocity_z: f32) -> (m: Matrix3f32) {
cos_z := math.cos(angle_z) * angular_velocity_z
sin_z := math.sin(angle_z) * angular_velocity_z
- m[0][0] = +cos_z
- m[1][0] = +sin_z
- m[1][1] = +cos_z
- m[0][1] = -sin_z
- m[2][2] = 1
+ m[0, 0] = +cos_z
+ m[0, 1] = +sin_z
+ m[1, 1] = +cos_z
+ m[1, 0] = -sin_z
+ m[2, 2] = 1
return
}
@@ -274,14 +274,14 @@ matrix3_from_derived_euler_angle_z_f32 :: proc(angle_z: f32, angular_velocity_z:
matrix3_from_euler_angles_xy_f32 :: proc(angle_x, angle_y: f32) -> (m: Matrix3f32) {
cos_x, sin_x := math.cos(angle_x), math.sin(angle_x)
cos_y, sin_y := math.cos(angle_y), math.sin(angle_y)
- m[0][0] = cos_y
- m[1][0] = -sin_x * - sin_y
- m[2][0] = -cos_x * - sin_y
- m[1][1] = cos_x
- m[2][1] = sin_x
- m[0][2] = sin_y
- m[1][2] = -sin_x * cos_y
- m[2][2] = cos_x * cos_y
+ m[0, 0] = cos_y
+ m[0, 1] = -sin_x * - sin_y
+ m[0, 2] = -cos_x * - sin_y
+ m[1, 1] = cos_x
+ m[1, 2] = sin_x
+ m[2, 0] = sin_y
+ m[2, 1] = -sin_x * cos_y
+ m[2, 2] = cos_x * cos_y
return
}
@@ -289,14 +289,14 @@ matrix3_from_euler_angles_xy_f32 :: proc(angle_x, angle_y: f32) -> (m: Matrix3f3
matrix3_from_euler_angles_yx_f32 :: proc(angle_y, angle_x: f32) -> (m: Matrix3f32) {
cos_x, sin_x := math.cos(angle_x), math.sin(angle_x)
cos_y, sin_y := math.cos(angle_y), math.sin(angle_y)
- m[0][0] = cos_y
- m[2][0] = -sin_y
- m[0][1] = sin_y*sin_x
- m[1][1] = cos_x
- m[2][1] = cos_y*sin_x
- m[0][2] = sin_y*cos_x
- m[1][2] = -sin_x
- m[2][2] = cos_y*cos_x
+ m[0, 0] = cos_y
+ m[0, 2] = -sin_y
+ m[1, 0] = sin_y*sin_x
+ m[1, 1] = cos_x
+ m[1, 2] = cos_y*sin_x
+ m[2, 0] = sin_y*cos_x
+ m[2, 1] = -sin_x
+ m[2, 2] = cos_y*cos_x
return
}
@@ -322,15 +322,15 @@ matrix3_from_euler_angles_xyz_f32 :: proc(t1, t2, t3: f32) -> (m: Matrix3f32) {
s2 := math.sin(-t2)
s3 := math.sin(-t3)
- m[0][0] = c2 * c3
- m[0][1] =-c1 * s3 + s1 * s2 * c3
- m[0][2] = s1 * s3 + c1 * s2 * c3
- m[1][0] = c2 * s3
- m[1][1] = c1 * c3 + s1 * s2 * s3
- m[1][2] =-s1 * c3 + c1 * s2 * s3
- m[2][0] =-s2
- m[2][1] = s1 * c2
- m[2][2] = c1 * c2
+ m[0, 0] = c2 * c3
+ m[1, 0] =-c1 * s3 + s1 * s2 * c3
+ m[2, 0] = s1 * s3 + c1 * s2 * c3
+ m[0, 1] = c2 * s3
+ m[1, 1] = c1 * c3 + s1 * s2 * s3
+ m[2, 1] =-s1 * c3 + c1 * s2 * s3
+ m[0, 2] =-s2
+ m[1, 2] = s1 * c2
+ m[2, 2] = c1 * c2
return
}
@@ -342,15 +342,15 @@ matrix3_from_euler_angles_yxz_f32 :: proc(yaw, pitch, roll: f32) -> (m: Matrix3f
cb := math.cos(roll)
sb := math.sin(roll)
- m[0][0] = ch * cb + sh * sp * sb
- m[0][1] = sb * cp
- m[0][2] = -sh * cb + ch * sp * sb
- m[1][0] = -ch * sb + sh * sp * cb
- m[1][1] = cb * cp
- m[1][2] = sb * sh + ch * sp * cb
- m[2][0] = sh * cp
- m[2][1] = -sp
- m[2][2] = ch * cp
+ m[0, 0] = ch * cb + sh * sp * sb
+ m[1, 0] = sb * cp
+ m[2, 0] = -sh * cb + ch * sp * sb
+ m[0, 1] = -ch * sb + sh * sp * cb
+ m[1, 1] = cb * cp
+ m[2, 1] = sb * sh + ch * sp * cb
+ m[0, 2] = sh * cp
+ m[1, 2] = -sp
+ m[2, 2] = ch * cp
return
}
@@ -362,15 +362,15 @@ matrix3_from_euler_angles_xzx_f32 :: proc(t1, t2, t3: f32) -> (m: Matrix3f32) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c2
- m[0][1] = c1 * s2
- m[0][2] = s1 * s2
- m[1][0] =-c3 * s2
- m[1][1] = c1 * c2 * c3 - s1 * s3
- m[1][2] = c1 * s3 + c2 * c3 * s1
- m[2][0] = s2 * s3
- m[2][1] =-c3 * s1 - c1 * c2 * s3
- m[2][2] = c1 * c3 - c2 * s1 * s3
+ m[0, 0] = c2
+ m[1, 0] = c1 * s2
+ m[2, 0] = s1 * s2
+ m[0, 1] =-c3 * s2
+ m[1, 1] = c1 * c2 * c3 - s1 * s3
+ m[2, 1] = c1 * s3 + c2 * c3 * s1
+ m[0, 2] = s2 * s3
+ m[1, 2] =-c3 * s1 - c1 * c2 * s3
+ m[2, 2] = c1 * c3 - c2 * s1 * s3
return
}
@@ -382,15 +382,15 @@ matrix3_from_euler_angles_xyx_f32 :: proc(t1, t2, t3: f32) -> (m: Matrix3f32) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c2
- m[0][1] = s1 * s2
- m[0][2] =-c1 * s2
- m[1][0] = s2 * s3
- m[1][1] = c1 * c3 - c2 * s1 * s3
- m[1][2] = c3 * s1 + c1 * c2 * s3
- m[2][0] = c3 * s2
- m[2][1] =-c1 * s3 - c2 * c3 * s1
- m[2][2] = c1 * c2 * c3 - s1 * s3
+ m[0, 0] = c2
+ m[1, 0] = s1 * s2
+ m[2, 0] =-c1 * s2
+ m[0, 1] = s2 * s3
+ m[1, 1] = c1 * c3 - c2 * s1 * s3
+ m[2, 1] = c3 * s1 + c1 * c2 * s3
+ m[0, 2] = c3 * s2
+ m[1, 2] =-c1 * s3 - c2 * c3 * s1
+ m[2, 2] = c1 * c2 * c3 - s1 * s3
return
}
@@ -402,15 +402,15 @@ matrix3_from_euler_angles_yxy_f32 :: proc(t1, t2, t3: f32) -> (m: Matrix3f32) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c1 * c3 - c2 * s1 * s3
- m[0][1] = s2* s3
- m[0][2] =-c3 * s1 - c1 * c2 * s3
- m[1][0] = s1 * s2
- m[1][1] = c2
- m[1][2] = c1 * s2
- m[2][0] = c1 * s3 + c2 * c3 * s1
- m[2][1] =-c3 * s2
- m[2][2] = c1 * c2 * c3 - s1 * s3
+ m[0, 0] = c1 * c3 - c2 * s1 * s3
+ m[1, 0] = s2* s3
+ m[2, 0] =-c3 * s1 - c1 * c2 * s3
+ m[0, 1] = s1 * s2
+ m[1, 1] = c2
+ m[2, 1] = c1 * s2
+ m[0, 2] = c1 * s3 + c2 * c3 * s1
+ m[1, 2] =-c3 * s2
+ m[2, 2] = c1 * c2 * c3 - s1 * s3
return
}
@@ -422,15 +422,15 @@ matrix3_from_euler_angles_yzy_f32 :: proc(t1, t2, t3: f32) -> (m: Matrix3f32) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c1 * c2 * c3 - s1 * s3
- m[0][1] = c3 * s2
- m[0][2] =-c1 * s3 - c2 * c3 * s1
- m[1][0] =-c1 * s2
- m[1][1] = c2
- m[1][2] = s1 * s2
- m[2][0] = c3 * s1 + c1 * c2 * s3
- m[2][1] = s2 * s3
- m[2][2] = c1 * c3 - c2 * s1 * s3
+ m[0, 0] = c1 * c2 * c3 - s1 * s3
+ m[1, 0] = c3 * s2
+ m[2, 0] =-c1 * s3 - c2 * c3 * s1
+ m[0, 1] =-c1 * s2
+ m[1, 1] = c2
+ m[2, 1] = s1 * s2
+ m[0, 2] = c3 * s1 + c1 * c2 * s3
+ m[1, 2] = s2 * s3
+ m[2, 2] = c1 * c3 - c2 * s1 * s3
return
}
@@ -442,15 +442,15 @@ matrix3_from_euler_angles_zyz_f32 :: proc(t1, t2, t3: f32) -> (m: Matrix3f32) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c1 * c2 * c3 - s1 * s3
- m[0][1] = c1 * s3 + c2 * c3 * s1
- m[0][2] =-c3 * s2
- m[1][0] =-c3 * s1 - c1 * c2 * s3
- m[1][1] = c1 * c3 - c2 * s1 * s3
- m[1][2] = s2 * s3
- m[2][0] = c1 * s2
- m[2][1] = s1 * s2
- m[2][2] = c2
+ m[0, 0] = c1 * c2 * c3 - s1 * s3
+ m[1, 0] = c1 * s3 + c2 * c3 * s1
+ m[2, 0] =-c3 * s2
+ m[0, 1] =-c3 * s1 - c1 * c2 * s3
+ m[1, 1] = c1 * c3 - c2 * s1 * s3
+ m[2, 1] = s2 * s3
+ m[0, 2] = c1 * s2
+ m[1, 2] = s1 * s2
+ m[2, 2] = c2
return
}
@@ -462,15 +462,15 @@ matrix3_from_euler_angles_zxz_f32 :: proc(t1, t2, t3: f32) -> (m: Matrix3f32) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c1 * c3 - c2 * s1 * s3
- m[0][1] = c3 * s1 + c1 * c2 * s3
- m[0][2] = s2 *s3
- m[1][0] =-c1 * s3 - c2 * c3 * s1
- m[1][1] = c1 * c2 * c3 - s1 * s3
- m[1][2] = c3 * s2
- m[2][0] = s1 * s2
- m[2][1] =-c1 * s2
- m[2][2] = c2
+ m[0, 0] = c1 * c3 - c2 * s1 * s3
+ m[1, 0] = c3 * s1 + c1 * c2 * s3
+ m[2, 0] = s2 *s3
+ m[0, 1] =-c1 * s3 - c2 * c3 * s1
+ m[1, 1] = c1 * c2 * c3 - s1 * s3
+ m[2, 1] = c3 * s2
+ m[0, 2] = s1 * s2
+ m[1, 2] =-c1 * s2
+ m[2, 2] = c2
return
}
@@ -483,15 +483,15 @@ matrix3_from_euler_angles_xzy_f32 :: proc(t1, t2, t3: f32) -> (m: Matrix3f32) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c2 * c3
- m[0][1] = s1 * s3 + c1 * c3 * s2
- m[0][2] = c3 * s1 * s2 - c1 * s3
- m[1][0] =-s2
- m[1][1] = c1 * c2
- m[1][2] = c2 * s1
- m[2][0] = c2 * s3
- m[2][1] = c1 * s2 * s3 - c3 * s1
- m[2][2] = c1 * c3 + s1 * s2 *s3
+ m[0, 0] = c2 * c3
+ m[1, 0] = s1 * s3 + c1 * c3 * s2
+ m[2, 0] = c3 * s1 * s2 - c1 * s3
+ m[0, 1] =-s2
+ m[1, 1] = c1 * c2
+ m[2, 1] = c2 * s1
+ m[0, 2] = c2 * s3
+ m[1, 2] = c1 * s2 * s3 - c3 * s1
+ m[2, 2] = c1 * c3 + s1 * s2 *s3
return
}
@@ -503,15 +503,15 @@ matrix3_from_euler_angles_yzx_f32 :: proc(t1, t2, t3: f32) -> (m: Matrix3f32) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c1 * c2
- m[0][1] = s2
- m[0][2] =-c2 * s1
- m[1][0] = s1 * s3 - c1 * c3 * s2
- m[1][1] = c2 * c3
- m[1][2] = c1 * s3 + c3 * s1 * s2
- m[2][0] = c3 * s1 + c1 * s2 * s3
- m[2][1] =-c2 * s3
- m[2][2] = c1 * c3 - s1 * s2 * s3
+ m[0, 0] = c1 * c2
+ m[1, 0] = s2
+ m[2, 0] =-c2 * s1
+ m[0, 1] = s1 * s3 - c1 * c3 * s2
+ m[1, 1] = c2 * c3
+ m[2, 1] = c1 * s3 + c3 * s1 * s2
+ m[0, 2] = c3 * s1 + c1 * s2 * s3
+ m[1, 2] =-c2 * s3
+ m[2, 2] = c1 * c3 - s1 * s2 * s3
return
}
@@ -523,15 +523,15 @@ matrix3_from_euler_angles_zyx_f32 :: proc(t1, t2, t3: f32) -> (m: Matrix3f32) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c1 * c2
- m[0][1] = c2 * s1
- m[0][2] =-s2
- m[1][0] = c1 * s2 * s3 - c3 * s1
- m[1][1] = c1 * c3 + s1 * s2 * s3
- m[1][2] = c2 * s3
- m[2][0] = s1 * s3 + c1 * c3 * s2
- m[2][1] = c3 * s1 * s2 - c1 * s3
- m[2][2] = c2 * c3
+ m[0, 0] = c1 * c2
+ m[1, 0] = c2 * s1
+ m[2, 0] =-s2
+ m[0, 1] = c1 * s2 * s3 - c3 * s1
+ m[1, 1] = c1 * c3 + s1 * s2 * s3
+ m[2, 1] = c2 * s3
+ m[0, 2] = s1 * s3 + c1 * c3 * s2
+ m[1, 2] = c3 * s1 * s2 - c1 * s3
+ m[2, 2] = c2 * c3
return
}
@@ -543,15 +543,15 @@ matrix3_from_euler_angles_zxy_f32 :: proc(t1, t2, t3: f32) -> (m: Matrix3f32) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c1 * c3 - s1 * s2 * s3
- m[0][1] = c3 * s1 + c1 * s2 * s3
- m[0][2] =-c2 * s3
- m[1][0] =-c2 * s1
- m[1][1] = c1 * c2
- m[1][2] = s2
- m[2][0] = c1 * s3 + c3 * s1 * s2
- m[2][1] = s1 * s3 - c1 * c3 * s2
- m[2][2] = c2 * c3
+ m[0, 0] = c1 * c3 - s1 * s2 * s3
+ m[1, 0] = c3 * s1 + c1 * s2 * s3
+ m[2, 0] =-c2 * s3
+ m[0, 1] =-c2 * s1
+ m[1, 1] = c1 * c2
+ m[2, 1] = s2
+ m[0, 2] = c1 * s3 + c3 * s1 * s2
+ m[1, 2] = s1 * s3 - c1 * c3 * s2
+ m[2, 2] = c2 * c3
return
}
@@ -564,25 +564,25 @@ matrix3_from_yaw_pitch_roll_f32 :: proc(yaw, pitch, roll: f32) -> (m: Matrix3f32
cb := math.cos(roll)
sb := math.sin(roll)
- m[0][0] = ch * cb + sh * sp * sb
- m[0][1] = sb * cp
- m[0][2] = -sh * cb + ch * sp * sb
- m[1][0] = -ch * sb + sh * sp * cb
- m[1][1] = cb * cp
- m[1][2] = sb * sh + ch * sp * cb
- m[2][0] = sh * cp
- m[2][1] = -sp
- m[2][2] = ch * cp
+ m[0, 0] = ch * cb + sh * sp * sb
+ m[1, 0] = sb * cp
+ m[2, 0] = -sh * cb + ch * sp * sb
+ m[0, 1] = -ch * sb + sh * sp * cb
+ m[1, 1] = cb * cp
+ m[2, 1] = sb * sh + ch * sp * cb
+ m[0, 2] = sh * cp
+ m[1, 2] = -sp
+ m[2, 2] = ch * cp
return m
}
euler_angles_xyz_from_matrix3_f32 :: proc(m: Matrix3f32) -> (t1, t2, t3: f32) {
- T1 := math.atan2(m[2][1], m[2][2])
- C2 := math.sqrt(m[0][0]*m[0][0] + m[1][0]*m[1][0])
- T2 := math.atan2(-m[2][0], C2)
+ T1 := math.atan2(m[1, 2], m[2, 2])
+ C2 := math.sqrt(m[0, 0]*m[0, 0] + m[0, 1]*m[0, 1])
+ T2 := math.atan2(-m[0, 2], C2)
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(S1*m[0][2] - C1*m[0][1], C1*m[1][1] - S1*m[1][2])
+ T3 := math.atan2(S1*m[2, 0] - C1*m[1, 0], C1*m[1, 1] - S1*m[2, 1])
t1 = -T1
t2 = -T2
t3 = -T3
@@ -590,12 +590,12 @@ euler_angles_xyz_from_matrix3_f32 :: proc(m: Matrix3f32) -> (t1, t2, t3: f32) {
}
euler_angles_yxz_from_matrix3_f32 :: proc(m: Matrix3f32) -> (t1, t2, t3: f32) {
- T1 := math.atan2(m[2][0], m[2][2])
- C2 := math.sqrt(m[0][1]*m[0][1] + m[1][1]*m[1][1])
- T2 := math.atan2(-m[2][1], C2)
+ T1 := math.atan2(m[0, 2], m[2, 2])
+ C2 := math.sqrt(m[1, 0]*m[1, 0] + m[1, 1]*m[1, 1])
+ T2 := math.atan2(-m[1, 2], C2)
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(S1*m[1][2] - C1*m[1][0], C1*m[0][0] - S1*m[0][2])
+ T3 := math.atan2(S1*m[2, 1] - C1*m[0, 1], C1*m[0, 0] - S1*m[2, 0])
t1 = T1
t2 = T2
t3 = T3
@@ -603,12 +603,12 @@ euler_angles_yxz_from_matrix3_f32 :: proc(m: Matrix3f32) -> (t1, t2, t3: f32) {
}
euler_angles_xzx_from_matrix3_f32 :: proc(m: Matrix3f32) -> (t1, t2, t3: f32) {
- T1 := math.atan2(m[0][2], m[0][1])
- S2 := math.sqrt(m[1][0]*m[1][0] + m[2][0]*m[2][0])
- T2 := math.atan2(S2, m[0][0])
+ T1 := math.atan2(m[2, 0], m[1, 0])
+ S2 := math.sqrt(m[0, 1]*m[0, 1] + m[0, 2]*m[0, 2])
+ T2 := math.atan2(S2, m[0, 0])
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(C1*m[1][2] - S1*m[1][1], C1*m[2][2] - S1*m[2][1])
+ T3 := math.atan2(C1*m[2, 1] - S1*m[1, 1], C1*m[2, 2] - S1*m[1, 2])
t1 = T1
t2 = T2
t3 = T3
@@ -616,12 +616,12 @@ euler_angles_xzx_from_matrix3_f32 :: proc(m: Matrix3f32) -> (t1, t2, t3: f32) {
}
euler_angles_xyx_from_matrix3_f32 :: proc(m: Matrix3f32) -> (t1, t2, t3: f32) {
- T1 := math.atan2(m[0][1], -m[0][2])
- S2 := math.sqrt(m[1][0]*m[1][0] + m[2][0]*m[2][0])
- T2 := math.atan2(S2, m[0][0])
+ T1 := math.atan2(m[1, 0], -m[2, 0])
+ S2 := math.sqrt(m[0, 1]*m[0, 1] + m[0, 2]*m[0, 2])
+ T2 := math.atan2(S2, m[0, 0])
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(-C1*m[2][1] - S1*m[2][2], C1*m[1][1] + S1*m[1][2])
+ T3 := math.atan2(-C1*m[1, 2] - S1*m[2, 2], C1*m[1, 1] + S1*m[2, 1])
t1 = T1
t2 = T2
t3 = T3
@@ -629,12 +629,12 @@ euler_angles_xyx_from_matrix3_f32 :: proc(m: Matrix3f32) -> (t1, t2, t3: f32) {
}
euler_angles_yxy_from_matrix3_f32 :: proc(m: Matrix3f32) -> (t1, t2, t3: f32) {
- T1 := math.atan2(m[1][0], m[1][2])
- S2 := math.sqrt(m[0][1]*m[0][1] + m[2][1]*m[2][1])
- T2 := math.atan2(S2, m[1][1])
+ T1 := math.atan2(m[0, 1], m[2, 1])
+ S2 := math.sqrt(m[1, 0]*m[1, 0] + m[1, 2]*m[1, 2])
+ T2 := math.atan2(S2, m[1, 1])
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(C1*m[2][0] - S1*m[2][2], C1*m[0][0] - S1*m[0][2])
+ T3 := math.atan2(C1*m[0, 2] - S1*m[2, 2], C1*m[0, 0] - S1*m[2, 0])
t1 = T1
t2 = T2
t3 = T3
@@ -642,24 +642,24 @@ euler_angles_yxy_from_matrix3_f32 :: proc(m: Matrix3f32) -> (t1, t2, t3: f32) {
}
euler_angles_yzy_from_matrix3_f32 :: proc(m: Matrix3f32) -> (t1, t2, t3: f32) {
- T1 := math.atan2(m[1][2], -m[1][0])
- S2 := math.sqrt(m[0][1]*m[0][1] + m[2][1]*m[2][1])
- T2 := math.atan2(S2, m[1][1])
+ T1 := math.atan2(m[2, 1], -m[0, 1])
+ S2 := math.sqrt(m[1, 0]*m[1, 0] + m[1, 2]*m[1, 2])
+ T2 := math.atan2(S2, m[1, 1])
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(-S1*m[0][0] - C1*m[0][2], S1*m[2][0] + C1*m[2][2])
+ T3 := math.atan2(-S1*m[0, 0] - C1*m[2, 0], S1*m[0, 2] + C1*m[2, 2])
t1 = T1
t2 = T2
t3 = T3
return
}
euler_angles_zyz_from_matrix3_f32 :: proc(m: Matrix3f32) -> (t1, t2, t3: f32) {
- T1 := math.atan2(m[2][1], m[2][0])
- S2 := math.sqrt(m[0][2]*m[0][2] + m[1][2]*m[1][2])
- T2 := math.atan2(S2, m[2][2])
+ T1 := math.atan2(m[1, 2], m[0, 2])
+ S2 := math.sqrt(m[2, 0]*m[2, 0] + m[2, 1]*m[2, 1])
+ T2 := math.atan2(S2, m[2, 2])
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(C1*m[0][1] - S1*m[0][0], C1*m[1][1] - S1*m[1][0])
+ T3 := math.atan2(C1*m[1, 0] - S1*m[0, 0], C1*m[1, 1] - S1*m[0, 1])
t1 = T1
t2 = T2
t3 = T3
@@ -667,12 +667,12 @@ euler_angles_zyz_from_matrix3_f32 :: proc(m: Matrix3f32) -> (t1, t2, t3: f32) {
}
euler_angles_zxz_from_matrix3_f32 :: proc(m: Matrix3f32) -> (t1, t2, t3: f32) {
- T1 := math.atan2(m[2][0], -m[2][1])
- S2 := math.sqrt(m[0][2]*m[0][2] + m[1][2]*m[1][2])
- T2 := math.atan2(S2, m[2][2])
+ T1 := math.atan2(m[0, 2], -m[1, 2])
+ S2 := math.sqrt(m[2, 0]*m[2, 0] + m[2, 1]*m[2, 1])
+ T2 := math.atan2(S2, m[2, 2])
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(-C1*m[1][0] - S1*m[1][1], C1*m[0][0] + S1*m[0][1])
+ T3 := math.atan2(-C1*m[0, 1] - S1*m[1, 1], C1*m[0, 0] + S1*m[1, 0])
t1 = T1
t2 = T2
t3 = T3
@@ -680,12 +680,12 @@ euler_angles_zxz_from_matrix3_f32 :: proc(m: Matrix3f32) -> (t1, t2, t3: f32) {
}
euler_angles_xzy_from_matrix3_f32 :: proc(m: Matrix3f32) -> (t1, t2, t3: f32) {
- T1 := math.atan2(m[1][2], m[1][1])
- C2 := math.sqrt(m[0][0]*m[0][0] + m[2][0]*m[2][0])
- T2 := math.atan2(-m[1][0], C2)
+ T1 := math.atan2(m[2, 1], m[1, 1])
+ C2 := math.sqrt(m[0, 0]*m[0, 0] + m[0, 2]*m[0, 2])
+ T2 := math.atan2(-m[0, 1], C2)
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(S1*m[0][1] - C1*m[0][2], C1*m[2][2] - S1*m[2][1])
+ T3 := math.atan2(S1*m[1, 0] - C1*m[2, 0], C1*m[2, 2] - S1*m[1, 2])
t1 = T1
t2 = T2
t3 = T3
@@ -693,12 +693,12 @@ euler_angles_xzy_from_matrix3_f32 :: proc(m: Matrix3f32) -> (t1, t2, t3: f32) {
}
euler_angles_yzx_from_matrix3_f32 :: proc(m: Matrix3f32) -> (t1, t2, t3: f32) {
- T1 := math.atan2(-m[0][2], m[0][0])
- C2 := math.sqrt(m[1][1]*m[1][1] + m[2][1]*m[2][1])
- T2 := math.atan2(m[0][1], C2)
+ T1 := math.atan2(-m[2, 0], m[0, 0])
+ C2 := math.sqrt(m[1, 1]*m[1, 1] + m[1, 2]*m[1, 2])
+ T2 := math.atan2(m[1, 0], C2)
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(S1*m[1][0] + C1*m[1][2], S1*m[2][0] + C1*m[2][2])
+ T3 := math.atan2(S1*m[0, 1] + C1*m[2, 1], S1*m[0, 2] + C1*m[2, 2])
t1 = T1
t2 = T2
t3 = T3
@@ -706,12 +706,12 @@ euler_angles_yzx_from_matrix3_f32 :: proc(m: Matrix3f32) -> (t1, t2, t3: f32) {
}
euler_angles_zyx_from_matrix3_f32 :: proc(m: Matrix3f32) -> (t1, t2, t3: f32) {
- T1 := math.atan2(m[0][1], m[0][0])
- C2 := math.sqrt(m[1][2]*m[1][2] + m[2][2]*m[2][2])
- T2 := math.atan2(-m[0][2], C2)
+ T1 := math.atan2(m[1, 0], m[0, 0])
+ C2 := math.sqrt(m[2, 1]*m[2, 1] + m[2, 2]*m[2, 2])
+ T2 := math.atan2(-m[2, 0], C2)
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(S1*m[2][0] - C1*m[2][1], C1*m[1][1] - S1*m[1][0])
+ T3 := math.atan2(S1*m[0, 2] - C1*m[1, 2], C1*m[1, 1] - S1*m[0, 1])
t1 = T1
t2 = T2
t3 = T3
@@ -719,12 +719,12 @@ euler_angles_zyx_from_matrix3_f32 :: proc(m: Matrix3f32) -> (t1, t2, t3: f32) {
}
euler_angles_zxy_from_matrix3_f32 :: proc(m: Matrix3f32) -> (t1, t2, t3: f32) {
- T1 := math.atan2(-m[1][0], m[1][1])
- C2 := math.sqrt(m[0][2]*m[0][2] + m[2][2]*m[2][2])
- T2 := math.atan2(m[1][2], C2)
+ T1 := math.atan2(-m[0, 1], m[1, 1])
+ C2 := math.sqrt(m[2, 0]*m[2, 0] + m[2, 2]*m[2, 2])
+ T2 := math.atan2(m[2, 1], C2)
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(C1*m[2][0] + S1*m[2][1], C1*m[0][0] + S1*m[0][1])
+ T3 := math.atan2(C1*m[0, 2] + S1*m[1, 2], C1*m[0, 0] + S1*m[1, 0])
t1 = T1
t2 = T2
t3 = T3
@@ -737,32 +737,32 @@ euler_angles_zxy_from_matrix3_f32 :: proc(m: Matrix3f32) -> (t1, t2, t3: f32) {
matrix4_from_euler_angle_x_f32 :: proc(angle_x: f32) -> (m: Matrix4f32) {
cos_x, sin_x := math.cos(angle_x), math.sin(angle_x)
- m[0][0] = 1
- m[1][1] = +cos_x
- m[2][1] = +sin_x
- m[1][2] = -sin_x
- m[2][2] = +cos_x
- m[3][3] = 1
+ m[0, 0] = 1
+ m[1, 1] = +cos_x
+ m[1, 2] = +sin_x
+ m[2, 1] = -sin_x
+ m[2, 2] = +cos_x
+ m[3, 3] = 1
return
}
matrix4_from_euler_angle_y_f32 :: proc(angle_y: f32) -> (m: Matrix4f32) {
cos_y, sin_y := math.cos(angle_y), math.sin(angle_y)
- m[0][0] = +cos_y
- m[2][0] = -sin_y
- m[1][1] = 1
- m[0][2] = +sin_y
- m[2][2] = +cos_y
- m[3][3] = 1
+ m[0, 0] = +cos_y
+ m[0, 2] = -sin_y
+ m[1, 1] = 1
+ m[2, 0] = +sin_y
+ m[2, 2] = +cos_y
+ m[3, 3] = 1
return
}
matrix4_from_euler_angle_z_f32 :: proc(angle_z: f32) -> (m: Matrix4f32) {
cos_z, sin_z := math.cos(angle_z), math.sin(angle_z)
- m[0][0] = +cos_z
- m[1][0] = +sin_z
- m[1][1] = +cos_z
- m[0][1] = -sin_z
- m[2][2] = 1
- m[3][3] = 1
+ m[0, 0] = +cos_z
+ m[0, 1] = +sin_z
+ m[1, 1] = +cos_z
+ m[1, 0] = -sin_z
+ m[2, 2] = 1
+ m[3, 3] = 1
return
}
@@ -770,34 +770,34 @@ matrix4_from_euler_angle_z_f32 :: proc(angle_z: f32) -> (m: Matrix4f32) {
matrix4_from_derived_euler_angle_x_f32 :: proc(angle_x: f32, angular_velocity_x: f32) -> (m: Matrix4f32) {
cos_x := math.cos(angle_x) * angular_velocity_x
sin_x := math.sin(angle_x) * angular_velocity_x
- m[0][0] = 1
- m[1][1] = +cos_x
- m[2][1] = +sin_x
- m[1][2] = -sin_x
- m[2][2] = +cos_x
- m[3][3] = 1
+ m[0, 0] = 1
+ m[1, 1] = +cos_x
+ m[1, 2] = +sin_x
+ m[2, 1] = -sin_x
+ m[2, 2] = +cos_x
+ m[3, 3] = 1
return
}
matrix4_from_derived_euler_angle_y_f32 :: proc(angle_y: f32, angular_velocity_y: f32) -> (m: Matrix4f32) {
cos_y := math.cos(angle_y) * angular_velocity_y
sin_y := math.sin(angle_y) * angular_velocity_y
- m[0][0] = +cos_y
- m[2][0] = -sin_y
- m[1][1] = 1
- m[0][2] = +sin_y
- m[2][2] = +cos_y
- m[3][3] = 1
+ m[0, 0] = +cos_y
+ m[0, 2] = -sin_y
+ m[1, 1] = 1
+ m[2, 0] = +sin_y
+ m[2, 2] = +cos_y
+ m[3, 3] = 1
return
}
matrix4_from_derived_euler_angle_z_f32 :: proc(angle_z: f32, angular_velocity_z: f32) -> (m: Matrix4f32) {
cos_z := math.cos(angle_z) * angular_velocity_z
sin_z := math.sin(angle_z) * angular_velocity_z
- m[0][0] = +cos_z
- m[1][0] = +sin_z
- m[1][1] = +cos_z
- m[0][1] = -sin_z
- m[2][2] = 1
- m[3][3] = 1
+ m[0, 0] = +cos_z
+ m[0, 1] = +sin_z
+ m[1, 1] = +cos_z
+ m[1, 0] = -sin_z
+ m[2, 2] = 1
+ m[3, 3] = 1
return
}
@@ -805,15 +805,15 @@ matrix4_from_derived_euler_angle_z_f32 :: proc(angle_z: f32, angular_velocity_z:
matrix4_from_euler_angles_xy_f32 :: proc(angle_x, angle_y: f32) -> (m: Matrix4f32) {
cos_x, sin_x := math.cos(angle_x), math.sin(angle_x)
cos_y, sin_y := math.cos(angle_y), math.sin(angle_y)
- m[0][0] = cos_y
- m[1][0] = -sin_x * - sin_y
- m[2][0] = -cos_x * - sin_y
- m[1][1] = cos_x
- m[2][1] = sin_x
- m[0][2] = sin_y
- m[1][2] = -sin_x * cos_y
- m[2][2] = cos_x * cos_y
- m[3][3] = 1
+ m[0, 0] = cos_y
+ m[0, 1] = -sin_x * - sin_y
+ m[0, 2] = -cos_x * - sin_y
+ m[1, 1] = cos_x
+ m[1, 2] = sin_x
+ m[2, 0] = sin_y
+ m[2, 1] = -sin_x * cos_y
+ m[2, 2] = cos_x * cos_y
+ m[3, 3] = 1
return
}
@@ -821,15 +821,15 @@ matrix4_from_euler_angles_xy_f32 :: proc(angle_x, angle_y: f32) -> (m: Matrix4f3
matrix4_from_euler_angles_yx_f32 :: proc(angle_y, angle_x: f32) -> (m: Matrix4f32) {
cos_x, sin_x := math.cos(angle_x), math.sin(angle_x)
cos_y, sin_y := math.cos(angle_y), math.sin(angle_y)
- m[0][0] = cos_y
- m[2][0] = -sin_y
- m[0][1] = sin_y*sin_x
- m[1][1] = cos_x
- m[2][1] = cos_y*sin_x
- m[0][2] = sin_y*cos_x
- m[1][2] = -sin_x
- m[2][2] = cos_y*cos_x
- m[3][3] = 1
+ m[0, 0] = cos_y
+ m[0, 2] = -sin_y
+ m[1, 0] = sin_y*sin_x
+ m[1, 1] = cos_x
+ m[1, 2] = cos_y*sin_x
+ m[2, 0] = sin_y*cos_x
+ m[2, 1] = -sin_x
+ m[2, 2] = cos_y*cos_x
+ m[3, 3] = 1
return
}
@@ -855,22 +855,22 @@ matrix4_from_euler_angles_xyz_f32 :: proc(t1, t2, t3: f32) -> (m: Matrix4f32) {
s2 := math.sin(-t2)
s3 := math.sin(-t3)
- m[0][0] = c2 * c3
- m[0][1] =-c1 * s3 + s1 * s2 * c3
- m[0][2] = s1 * s3 + c1 * s2 * c3
- m[0][3] = 0
- m[1][0] = c2 * s3
- m[1][1] = c1 * c3 + s1 * s2 * s3
- m[1][2] =-s1 * c3 + c1 * s2 * s3
- m[1][3] = 0
- m[2][0] =-s2
- m[2][1] = s1 * c2
- m[2][2] = c1 * c2
- m[2][3] = 0
- m[3][0] = 0
- m[3][1] = 0
- m[3][2] = 0
- m[3][3] = 1
+ m[0, 0] = c2 * c3
+ m[1, 0] =-c1 * s3 + s1 * s2 * c3
+ m[2, 0] = s1 * s3 + c1 * s2 * c3
+ m[3, 0] = 0
+ m[0, 1] = c2 * s3
+ m[1, 1] = c1 * c3 + s1 * s2 * s3
+ m[2, 1] =-s1 * c3 + c1 * s2 * s3
+ m[3, 1] = 0
+ m[0, 2] =-s2
+ m[1, 2] = s1 * c2
+ m[2, 2] = c1 * c2
+ m[3, 2] = 0
+ m[0, 3] = 0
+ m[1, 3] = 0
+ m[2, 3] = 0
+ m[3, 3] = 1
return
}
@@ -882,22 +882,22 @@ matrix4_from_euler_angles_yxz_f32 :: proc(yaw, pitch, roll: f32) -> (m: Matrix4f
cb := math.cos(roll)
sb := math.sin(roll)
- m[0][0] = ch * cb + sh * sp * sb
- m[0][1] = sb * cp
- m[0][2] = -sh * cb + ch * sp * sb
- m[0][3] = 0
- m[1][0] = -ch * sb + sh * sp * cb
- m[1][1] = cb * cp
- m[1][2] = sb * sh + ch * sp * cb
- m[1][3] = 0
- m[2][0] = sh * cp
- m[2][1] = -sp
- m[2][2] = ch * cp
- m[2][3] = 0
- m[3][0] = 0
- m[3][1] = 0
- m[3][2] = 0
- m[3][3] = 1
+ m[0, 0] = ch * cb + sh * sp * sb
+ m[1, 0] = sb * cp
+ m[2, 0] = -sh * cb + ch * sp * sb
+ m[3, 0] = 0
+ m[0, 1] = -ch * sb + sh * sp * cb
+ m[1, 1] = cb * cp
+ m[2, 1] = sb * sh + ch * sp * cb
+ m[3, 1] = 0
+ m[0, 2] = sh * cp
+ m[1, 2] = -sp
+ m[2, 2] = ch * cp
+ m[3, 2] = 0
+ m[0, 3] = 0
+ m[1, 3] = 0
+ m[2, 3] = 0
+ m[3, 3] = 1
return
}
@@ -909,22 +909,22 @@ matrix4_from_euler_angles_xzx_f32 :: proc(t1, t2, t3: f32) -> (m: Matrix4f32) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c2
- m[0][1] = c1 * s2
- m[0][2] = s1 * s2
- m[0][3] = 0
- m[1][0] =-c3 * s2
- m[1][1] = c1 * c2 * c3 - s1 * s3
- m[1][2] = c1 * s3 + c2 * c3 * s1
- m[1][3] = 0
- m[2][0] = s2 * s3
- m[2][1] =-c3 * s1 - c1 * c2 * s3
- m[2][2] = c1 * c3 - c2 * s1 * s3
- m[2][3] = 0
- m[3][0] = 0
- m[3][1] = 0
- m[3][2] = 0
- m[3][3] = 1
+ m[0, 0] = c2
+ m[1, 0] = c1 * s2
+ m[2, 0] = s1 * s2
+ m[3, 0] = 0
+ m[0, 1] =-c3 * s2
+ m[1, 1] = c1 * c2 * c3 - s1 * s3
+ m[2, 1] = c1 * s3 + c2 * c3 * s1
+ m[3, 1] = 0
+ m[0, 2] = s2 * s3
+ m[1, 2] =-c3 * s1 - c1 * c2 * s3
+ m[2, 2] = c1 * c3 - c2 * s1 * s3
+ m[3, 2] = 0
+ m[0, 3] = 0
+ m[1, 3] = 0
+ m[2, 3] = 0
+ m[3, 3] = 1
return
}
@@ -936,22 +936,22 @@ matrix4_from_euler_angles_xyx_f32 :: proc(t1, t2, t3: f32) -> (m: Matrix4f32) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c2
- m[0][1] = s1 * s2
- m[0][2] =-c1 * s2
- m[0][3] = 0
- m[1][0] = s2 * s3
- m[1][1] = c1 * c3 - c2 * s1 * s3
- m[1][2] = c3 * s1 + c1 * c2 * s3
- m[1][3] = 0
- m[2][0] = c3 * s2
- m[2][1] =-c1 * s3 - c2 * c3 * s1
- m[2][2] = c1 * c2 * c3 - s1 * s3
- m[2][3] = 0
- m[3][0] = 0
- m[3][1] = 0
- m[3][2] = 0
- m[3][3] = 1
+ m[0, 0] = c2
+ m[1, 0] = s1 * s2
+ m[2, 0] =-c1 * s2
+ m[3, 0] = 0
+ m[0, 1] = s2 * s3
+ m[1, 1] = c1 * c3 - c2 * s1 * s3
+ m[2, 1] = c3 * s1 + c1 * c2 * s3
+ m[3, 1] = 0
+ m[0, 2] = c3 * s2
+ m[1, 2] =-c1 * s3 - c2 * c3 * s1
+ m[2, 2] = c1 * c2 * c3 - s1 * s3
+ m[3, 2] = 0
+ m[0, 3] = 0
+ m[1, 3] = 0
+ m[2, 3] = 0
+ m[3, 3] = 1
return
}
@@ -963,22 +963,22 @@ matrix4_from_euler_angles_yxy_f32 :: proc(t1, t2, t3: f32) -> (m: Matrix4f32) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c1 * c3 - c2 * s1 * s3
- m[0][1] = s2* s3
- m[0][2] =-c3 * s1 - c1 * c2 * s3
- m[0][3] = 0
- m[1][0] = s1 * s2
- m[1][1] = c2
- m[1][2] = c1 * s2
- m[1][3] = 0
- m[2][0] = c1 * s3 + c2 * c3 * s1
- m[2][1] =-c3 * s2
- m[2][2] = c1 * c2 * c3 - s1 * s3
- m[2][3] = 0
- m[3][0] = 0
- m[3][1] = 0
- m[3][2] = 0
- m[3][3] = 1
+ m[0, 0] = c1 * c3 - c2 * s1 * s3
+ m[1, 0] = s2* s3
+ m[2, 0] =-c3 * s1 - c1 * c2 * s3
+ m[3, 0] = 0
+ m[0, 1] = s1 * s2
+ m[1, 1] = c2
+ m[2, 1] = c1 * s2
+ m[3, 1] = 0
+ m[0, 2] = c1 * s3 + c2 * c3 * s1
+ m[1, 2] =-c3 * s2
+ m[2, 2] = c1 * c2 * c3 - s1 * s3
+ m[3, 2] = 0
+ m[0, 3] = 0
+ m[1, 3] = 0
+ m[2, 3] = 0
+ m[3, 3] = 1
return
}
@@ -990,22 +990,22 @@ matrix4_from_euler_angles_yzy_f32 :: proc(t1, t2, t3: f32) -> (m: Matrix4f32) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c1 * c2 * c3 - s1 * s3
- m[0][1] = c3 * s2
- m[0][2] =-c1 * s3 - c2 * c3 * s1
- m[0][3] = 0
- m[1][0] =-c1 * s2
- m[1][1] = c2
- m[1][2] = s1 * s2
- m[1][3] = 0
- m[2][0] = c3 * s1 + c1 * c2 * s3
- m[2][1] = s2 * s3
- m[2][2] = c1 * c3 - c2 * s1 * s3
- m[2][3] = 0
- m[3][0] = 0
- m[3][1] = 0
- m[3][2] = 0
- m[3][3] = 1
+ m[0, 0] = c1 * c2 * c3 - s1 * s3
+ m[1, 0] = c3 * s2
+ m[2, 0] =-c1 * s3 - c2 * c3 * s1
+ m[3, 0] = 0
+ m[0, 1] =-c1 * s2
+ m[1, 1] = c2
+ m[2, 1] = s1 * s2
+ m[3, 1] = 0
+ m[0, 2] = c3 * s1 + c1 * c2 * s3
+ m[1, 2] = s2 * s3
+ m[2, 2] = c1 * c3 - c2 * s1 * s3
+ m[3, 2] = 0
+ m[0, 3] = 0
+ m[1, 3] = 0
+ m[2, 3] = 0
+ m[3, 3] = 1
return
}
@@ -1017,22 +1017,22 @@ matrix4_from_euler_angles_zyz_f32 :: proc(t1, t2, t3: f32) -> (m: Matrix4f32) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c1 * c2 * c3 - s1 * s3
- m[0][1] = c1 * s3 + c2 * c3 * s1
- m[0][2] =-c3 * s2
- m[0][3] = 0
- m[1][0] =-c3 * s1 - c1 * c2 * s3
- m[1][1] = c1 * c3 - c2 * s1 * s3
- m[1][2] = s2 * s3
- m[1][3] = 0
- m[2][0] = c1 * s2
- m[2][1] = s1 * s2
- m[2][2] = c2
- m[2][3] = 0
- m[3][0] = 0
- m[3][1] = 0
- m[3][2] = 0
- m[3][3] = 1
+ m[0, 0] = c1 * c2 * c3 - s1 * s3
+ m[1, 0] = c1 * s3 + c2 * c3 * s1
+ m[2, 0] =-c3 * s2
+ m[3, 0] = 0
+ m[0, 1] =-c3 * s1 - c1 * c2 * s3
+ m[1, 1] = c1 * c3 - c2 * s1 * s3
+ m[2, 1] = s2 * s3
+ m[3, 1] = 0
+ m[0, 2] = c1 * s2
+ m[1, 2] = s1 * s2
+ m[2, 2] = c2
+ m[3, 2] = 0
+ m[0, 3] = 0
+ m[1, 3] = 0
+ m[2, 3] = 0
+ m[3, 3] = 1
return
}
@@ -1044,22 +1044,22 @@ matrix4_from_euler_angles_zxz_f32 :: proc(t1, t2, t3: f32) -> (m: Matrix4f32) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c1 * c3 - c2 * s1 * s3
- m[0][1] = c3 * s1 + c1 * c2 * s3
- m[0][2] = s2 *s3
- m[0][3] = 0
- m[1][0] =-c1 * s3 - c2 * c3 * s1
- m[1][1] = c1 * c2 * c3 - s1 * s3
- m[1][2] = c3 * s2
- m[1][3] = 0
- m[2][0] = s1 * s2
- m[2][1] =-c1 * s2
- m[2][2] = c2
- m[2][3] = 0
- m[3][0] = 0
- m[3][1] = 0
- m[3][2] = 0
- m[3][3] = 1
+ m[0, 0] = c1 * c3 - c2 * s1 * s3
+ m[1, 0] = c3 * s1 + c1 * c2 * s3
+ m[2, 0] = s2 *s3
+ m[3, 0] = 0
+ m[0, 1] =-c1 * s3 - c2 * c3 * s1
+ m[1, 1] = c1 * c2 * c3 - s1 * s3
+ m[2, 1] = c3 * s2
+ m[3, 1] = 0
+ m[0, 2] = s1 * s2
+ m[1, 2] =-c1 * s2
+ m[2, 2] = c2
+ m[3, 2] = 0
+ m[0, 3] = 0
+ m[1, 3] = 0
+ m[2, 3] = 0
+ m[3, 3] = 1
return
}
@@ -1072,22 +1072,22 @@ matrix4_from_euler_angles_xzy_f32 :: proc(t1, t2, t3: f32) -> (m: Matrix4f32) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c2 * c3
- m[0][1] = s1 * s3 + c1 * c3 * s2
- m[0][2] = c3 * s1 * s2 - c1 * s3
- m[0][3] = 0
- m[1][0] =-s2
- m[1][1] = c1 * c2
- m[1][2] = c2 * s1
- m[1][3] = 0
- m[2][0] = c2 * s3
- m[2][1] = c1 * s2 * s3 - c3 * s1
- m[2][2] = c1 * c3 + s1 * s2 *s3
- m[2][3] = 0
- m[3][0] = 0
- m[3][1] = 0
- m[3][2] = 0
- m[3][3] = 1
+ m[0, 0] = c2 * c3
+ m[1, 0] = s1 * s3 + c1 * c3 * s2
+ m[2, 0] = c3 * s1 * s2 - c1 * s3
+ m[3, 0] = 0
+ m[0, 1] =-s2
+ m[1, 1] = c1 * c2
+ m[2, 1] = c2 * s1
+ m[3, 1] = 0
+ m[0, 2] = c2 * s3
+ m[1, 2] = c1 * s2 * s3 - c3 * s1
+ m[2, 2] = c1 * c3 + s1 * s2 *s3
+ m[3, 2] = 0
+ m[0, 3] = 0
+ m[1, 3] = 0
+ m[2, 3] = 0
+ m[3, 3] = 1
return
}
@@ -1099,22 +1099,22 @@ matrix4_from_euler_angles_yzx_f32 :: proc(t1, t2, t3: f32) -> (m: Matrix4f32) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c1 * c2
- m[0][1] = s2
- m[0][2] =-c2 * s1
- m[0][3] = 0
- m[1][0] = s1 * s3 - c1 * c3 * s2
- m[1][1] = c2 * c3
- m[1][2] = c1 * s3 + c3 * s1 * s2
- m[1][3] = 0
- m[2][0] = c3 * s1 + c1 * s2 * s3
- m[2][1] =-c2 * s3
- m[2][2] = c1 * c3 - s1 * s2 * s3
- m[2][3] = 0
- m[3][0] = 0
- m[3][1] = 0
- m[3][2] = 0
- m[3][3] = 1
+ m[0, 0] = c1 * c2
+ m[1, 0] = s2
+ m[2, 0] =-c2 * s1
+ m[3, 0] = 0
+ m[0, 1] = s1 * s3 - c1 * c3 * s2
+ m[1, 1] = c2 * c3
+ m[2, 1] = c1 * s3 + c3 * s1 * s2
+ m[3, 1] = 0
+ m[0, 2] = c3 * s1 + c1 * s2 * s3
+ m[1, 2] =-c2 * s3
+ m[2, 2] = c1 * c3 - s1 * s2 * s3
+ m[3, 2] = 0
+ m[0, 3] = 0
+ m[1, 3] = 0
+ m[2, 3] = 0
+ m[3, 3] = 1
return
}
@@ -1126,22 +1126,22 @@ matrix4_from_euler_angles_zyx_f32 :: proc(t1, t2, t3: f32) -> (m: Matrix4f32) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c1 * c2
- m[0][1] = c2 * s1
- m[0][2] =-s2
- m[0][3] = 0
- m[1][0] = c1 * s2 * s3 - c3 * s1
- m[1][1] = c1 * c3 + s1 * s2 * s3
- m[1][2] = c2 * s3
- m[1][3] = 0
- m[2][0] = s1 * s3 + c1 * c3 * s2
- m[2][1] = c3 * s1 * s2 - c1 * s3
- m[2][2] = c2 * c3
- m[2][3] = 0
- m[3][0] = 0
- m[3][1] = 0
- m[3][2] = 0
- m[3][3] = 1
+ m[0, 0] = c1 * c2
+ m[1, 0] = c2 * s1
+ m[2, 0] =-s2
+ m[3, 0] = 0
+ m[0, 1] = c1 * s2 * s3 - c3 * s1
+ m[1, 1] = c1 * c3 + s1 * s2 * s3
+ m[2, 1] = c2 * s3
+ m[3, 1] = 0
+ m[0, 2] = s1 * s3 + c1 * c3 * s2
+ m[1, 2] = c3 * s1 * s2 - c1 * s3
+ m[2, 2] = c2 * c3
+ m[3, 2] = 0
+ m[0, 3] = 0
+ m[1, 3] = 0
+ m[2, 3] = 0
+ m[3, 3] = 1
return
}
@@ -1153,22 +1153,22 @@ matrix4_from_euler_angles_zxy_f32 :: proc(t1, t2, t3: f32) -> (m: Matrix4f32) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c1 * c3 - s1 * s2 * s3
- m[0][1] = c3 * s1 + c1 * s2 * s3
- m[0][2] =-c2 * s3
- m[0][3] = 0
- m[1][0] =-c2 * s1
- m[1][1] = c1 * c2
- m[1][2] = s2
- m[1][3] = 0
- m[2][0] = c1 * s3 + c3 * s1 * s2
- m[2][1] = s1 * s3 - c1 * c3 * s2
- m[2][2] = c2 * c3
- m[2][3] = 0
- m[3][0] = 0
- m[3][1] = 0
- m[3][2] = 0
- m[3][3] = 1
+ m[0, 0] = c1 * c3 - s1 * s2 * s3
+ m[1, 0] = c3 * s1 + c1 * s2 * s3
+ m[2, 0] =-c2 * s3
+ m[3, 0] = 0
+ m[0, 1] =-c2 * s1
+ m[1, 1] = c1 * c2
+ m[2, 1] = s2
+ m[3, 1] = 0
+ m[0, 2] = c1 * s3 + c3 * s1 * s2
+ m[1, 2] = s1 * s3 - c1 * c3 * s2
+ m[2, 2] = c2 * c3
+ m[3, 2] = 0
+ m[0, 3] = 0
+ m[1, 3] = 0
+ m[2, 3] = 0
+ m[3, 3] = 1
return
}
@@ -1181,32 +1181,32 @@ matrix4_from_yaw_pitch_roll_f32 :: proc(yaw, pitch, roll: f32) -> (m: Matrix4f32
cb := math.cos(roll)
sb := math.sin(roll)
- m[0][0] = ch * cb + sh * sp * sb
- m[0][1] = sb * cp
- m[0][2] = -sh * cb + ch * sp * sb
- m[0][3] = 0
- m[1][0] = -ch * sb + sh * sp * cb
- m[1][1] = cb * cp
- m[1][2] = sb * sh + ch * sp * cb
- m[1][3] = 0
- m[2][0] = sh * cp
- m[2][1] = -sp
- m[2][2] = ch * cp
- m[2][3] = 0
- m[3][0] = 0
- m[3][1] = 0
- m[3][2] = 0
- m[3][3] = 1
+ m[0, 0] = ch * cb + sh * sp * sb
+ m[1, 0] = sb * cp
+ m[2, 0] = -sh * cb + ch * sp * sb
+ m[3, 0] = 0
+ m[0, 1] = -ch * sb + sh * sp * cb
+ m[1, 1] = cb * cp
+ m[2, 1] = sb * sh + ch * sp * cb
+ m[3, 1] = 0
+ m[0, 2] = sh * cp
+ m[1, 2] = -sp
+ m[2, 2] = ch * cp
+ m[3, 2] = 0
+ m[0, 3] = 0
+ m[1, 3] = 0
+ m[2, 3] = 0
+ m[3, 3] = 1
return m
}
euler_angles_xyz_from_matrix4_f32 :: proc(m: Matrix4f32) -> (t1, t2, t3: f32) {
- T1 := math.atan2(m[2][1], m[2][2])
- C2 := math.sqrt(m[0][0]*m[0][0] + m[1][0]*m[1][0])
- T2 := math.atan2(-m[2][0], C2)
+ T1 := math.atan2(m[1, 2], m[2, 2])
+ C2 := math.sqrt(m[0, 0]*m[0, 0] + m[0, 1]*m[0, 1])
+ T2 := math.atan2(-m[0, 2], C2)
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(S1*m[0][2] - C1*m[0][1], C1*m[1][1] - S1*m[1][2])
+ T3 := math.atan2(S1*m[2, 0] - C1*m[1, 0], C1*m[1, 1] - S1*m[2, 1])
t1 = -T1
t2 = -T2
t3 = -T3
@@ -1214,12 +1214,12 @@ euler_angles_xyz_from_matrix4_f32 :: proc(m: Matrix4f32) -> (t1, t2, t3: f32) {
}
euler_angles_yxz_from_matrix4_f32 :: proc(m: Matrix4f32) -> (t1, t2, t3: f32) {
- T1 := math.atan2(m[2][0], m[2][2])
- C2 := math.sqrt(m[0][1]*m[0][1] + m[1][1]*m[1][1])
- T2 := math.atan2(-m[2][1], C2)
+ T1 := math.atan2(m[0, 2], m[2, 2])
+ C2 := math.sqrt(m[1, 0]*m[1, 0] + m[1, 1]*m[1, 1])
+ T2 := math.atan2(-m[1, 2], C2)
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(S1*m[1][2] - C1*m[1][0], C1*m[0][0] - S1*m[0][2])
+ T3 := math.atan2(S1*m[2, 1] - C1*m[0, 1], C1*m[0, 0] - S1*m[2, 0])
t1 = T1
t2 = T2
t3 = T3
@@ -1227,12 +1227,12 @@ euler_angles_yxz_from_matrix4_f32 :: proc(m: Matrix4f32) -> (t1, t2, t3: f32) {
}
euler_angles_xzx_from_matrix4_f32 :: proc(m: Matrix4f32) -> (t1, t2, t3: f32) {
- T1 := math.atan2(m[0][2], m[0][1])
- S2 := math.sqrt(m[1][0]*m[1][0] + m[2][0]*m[2][0])
- T2 := math.atan2(S2, m[0][0])
+ T1 := math.atan2(m[2, 0], m[1, 0])
+ S2 := math.sqrt(m[0, 1]*m[0, 1] + m[0, 2]*m[0, 2])
+ T2 := math.atan2(S2, m[0, 0])
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(C1*m[1][2] - S1*m[1][1], C1*m[2][2] - S1*m[2][1])
+ T3 := math.atan2(C1*m[2, 1] - S1*m[1, 1], C1*m[2, 2] - S1*m[1, 2])
t1 = T1
t2 = T2
t3 = T3
@@ -1240,12 +1240,12 @@ euler_angles_xzx_from_matrix4_f32 :: proc(m: Matrix4f32) -> (t1, t2, t3: f32) {
}
euler_angles_xyx_from_matrix4_f32 :: proc(m: Matrix4f32) -> (t1, t2, t3: f32) {
- T1 := math.atan2(m[0][1], -m[0][2])
- S2 := math.sqrt(m[1][0]*m[1][0] + m[2][0]*m[2][0])
- T2 := math.atan2(S2, m[0][0])
+ T1 := math.atan2(m[1, 0], -m[2, 0])
+ S2 := math.sqrt(m[0, 1]*m[0, 1] + m[0, 2]*m[0, 2])
+ T2 := math.atan2(S2, m[0, 0])
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(-C1*m[2][1] - S1*m[2][2], C1*m[1][1] + S1*m[1][2])
+ T3 := math.atan2(-C1*m[1, 2] - S1*m[2, 2], C1*m[1, 1] + S1*m[2, 1])
t1 = T1
t2 = T2
t3 = T3
@@ -1253,12 +1253,12 @@ euler_angles_xyx_from_matrix4_f32 :: proc(m: Matrix4f32) -> (t1, t2, t3: f32) {
}
euler_angles_yxy_from_matrix4_f32 :: proc(m: Matrix4f32) -> (t1, t2, t3: f32) {
- T1 := math.atan2(m[1][0], m[1][2])
- S2 := math.sqrt(m[0][1]*m[0][1] + m[2][1]*m[2][1])
- T2 := math.atan2(S2, m[1][1])
+ T1 := math.atan2(m[0, 1], m[2, 1])
+ S2 := math.sqrt(m[1, 0]*m[1, 0] + m[1, 2]*m[1, 2])
+ T2 := math.atan2(S2, m[1, 1])
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(C1*m[2][0] - S1*m[2][2], C1*m[0][0] - S1*m[0][2])
+ T3 := math.atan2(C1*m[0, 2] - S1*m[2, 2], C1*m[0, 0] - S1*m[2, 0])
t1 = T1
t2 = T2
t3 = T3
@@ -1266,24 +1266,24 @@ euler_angles_yxy_from_matrix4_f32 :: proc(m: Matrix4f32) -> (t1, t2, t3: f32) {
}
euler_angles_yzy_from_matrix4_f32 :: proc(m: Matrix4f32) -> (t1, t2, t3: f32) {
- T1 := math.atan2(m[1][2], -m[1][0])
- S2 := math.sqrt(m[0][1]*m[0][1] + m[2][1]*m[2][1])
- T2 := math.atan2(S2, m[1][1])
+ T1 := math.atan2(m[2, 1], -m[0, 1])
+ S2 := math.sqrt(m[1, 0]*m[1, 0] + m[1, 2]*m[1, 2])
+ T2 := math.atan2(S2, m[1, 1])
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(-S1*m[0][0] - C1*m[0][2], S1*m[2][0] + C1*m[2][2])
+ T3 := math.atan2(-S1*m[0, 0] - C1*m[2, 0], S1*m[0, 2] + C1*m[2, 2])
t1 = T1
t2 = T2
t3 = T3
return
}
euler_angles_zyz_from_matrix4_f32 :: proc(m: Matrix4f32) -> (t1, t2, t3: f32) {
- T1 := math.atan2(m[2][1], m[2][0])
- S2 := math.sqrt(m[0][2]*m[0][2] + m[1][2]*m[1][2])
- T2 := math.atan2(S2, m[2][2])
+ T1 := math.atan2(m[1, 2], m[0, 2])
+ S2 := math.sqrt(m[2, 0]*m[2, 0] + m[2, 1]*m[2, 1])
+ T2 := math.atan2(S2, m[2, 2])
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(C1*m[0][1] - S1*m[0][0], C1*m[1][1] - S1*m[1][0])
+ T3 := math.atan2(C1*m[1, 0] - S1*m[0, 0], C1*m[1, 1] - S1*m[0, 1])
t1 = T1
t2 = T2
t3 = T3
@@ -1291,12 +1291,12 @@ euler_angles_zyz_from_matrix4_f32 :: proc(m: Matrix4f32) -> (t1, t2, t3: f32) {
}
euler_angles_zxz_from_matrix4_f32 :: proc(m: Matrix4f32) -> (t1, t2, t3: f32) {
- T1 := math.atan2(m[2][0], -m[2][1])
- S2 := math.sqrt(m[0][2]*m[0][2] + m[1][2]*m[1][2])
- T2 := math.atan2(S2, m[2][2])
+ T1 := math.atan2(m[0, 2], -m[1, 2])
+ S2 := math.sqrt(m[2, 0]*m[2, 0] + m[2, 1]*m[2, 1])
+ T2 := math.atan2(S2, m[2, 2])
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(-C1*m[1][0] - S1*m[1][1], C1*m[0][0] + S1*m[0][1])
+ T3 := math.atan2(-C1*m[0, 1] - S1*m[1, 1], C1*m[0, 0] + S1*m[1, 0])
t1 = T1
t2 = T2
t3 = T3
@@ -1304,12 +1304,12 @@ euler_angles_zxz_from_matrix4_f32 :: proc(m: Matrix4f32) -> (t1, t2, t3: f32) {
}
euler_angles_xzy_from_matrix4_f32 :: proc(m: Matrix4f32) -> (t1, t2, t3: f32) {
- T1 := math.atan2(m[1][2], m[1][1])
- C2 := math.sqrt(m[0][0]*m[0][0] + m[2][0]*m[2][0])
- T2 := math.atan2(-m[1][0], C2)
+ T1 := math.atan2(m[2, 1], m[1, 1])
+ C2 := math.sqrt(m[0, 0]*m[0, 0] + m[0, 2]*m[0, 2])
+ T2 := math.atan2(-m[0, 1], C2)
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(S1*m[0][1] - C1*m[0][2], C1*m[2][2] - S1*m[2][1])
+ T3 := math.atan2(S1*m[1, 0] - C1*m[2, 0], C1*m[2, 2] - S1*m[1, 2])
t1 = T1
t2 = T2
t3 = T3
@@ -1317,12 +1317,12 @@ euler_angles_xzy_from_matrix4_f32 :: proc(m: Matrix4f32) -> (t1, t2, t3: f32) {
}
euler_angles_yzx_from_matrix4_f32 :: proc(m: Matrix4f32) -> (t1, t2, t3: f32) {
- T1 := math.atan2(-m[0][2], m[0][0])
- C2 := math.sqrt(m[1][1]*m[1][1] + m[2][1]*m[2][1])
- T2 := math.atan2(m[0][1], C2)
+ T1 := math.atan2(-m[2, 0], m[0, 0])
+ C2 := math.sqrt(m[1, 1]*m[1, 1] + m[1, 2]*m[1, 2])
+ T2 := math.atan2(m[1, 0], C2)
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(S1*m[1][0] + C1*m[1][2], S1*m[2][0] + C1*m[2][2])
+ T3 := math.atan2(S1*m[0, 1] + C1*m[2, 1], S1*m[0, 2] + C1*m[2, 2])
t1 = T1
t2 = T2
t3 = T3
@@ -1330,12 +1330,12 @@ euler_angles_yzx_from_matrix4_f32 :: proc(m: Matrix4f32) -> (t1, t2, t3: f32) {
}
euler_angles_zyx_from_matrix4_f32 :: proc(m: Matrix4f32) -> (t1, t2, t3: f32) {
- T1 := math.atan2(m[0][1], m[0][0])
- C2 := math.sqrt(m[1][2]*m[1][2] + m[2][2]*m[2][2])
- T2 := math.atan2(-m[0][2], C2)
+ T1 := math.atan2(m[1, 0], m[0, 0])
+ C2 := math.sqrt(m[2, 1]*m[2, 1] + m[2, 2]*m[2, 2])
+ T2 := math.atan2(-m[2, 0], C2)
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(S1*m[2][0] - C1*m[2][1], C1*m[1][1] - S1*m[1][0])
+ T3 := math.atan2(S1*m[0, 2] - C1*m[1, 2], C1*m[1, 1] - S1*m[0, 1])
t1 = T1
t2 = T2
t3 = T3
@@ -1343,12 +1343,12 @@ euler_angles_zyx_from_matrix4_f32 :: proc(m: Matrix4f32) -> (t1, t2, t3: f32) {
}
euler_angles_zxy_from_matrix4_f32 :: proc(m: Matrix4f32) -> (t1, t2, t3: f32) {
- T1 := math.atan2(-m[1][0], m[1][1])
- C2 := math.sqrt(m[0][2]*m[0][2] + m[2][2]*m[2][2])
- T2 := math.atan2(m[1][2], C2)
+ T1 := math.atan2(-m[0, 1], m[1, 1])
+ C2 := math.sqrt(m[2, 0]*m[2, 0] + m[2, 2]*m[2, 2])
+ T2 := math.atan2(m[2, 1], C2)
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(C1*m[2][0] + S1*m[2][1], C1*m[0][0] + S1*m[0][1])
+ T3 := math.atan2(C1*m[0, 2] + S1*m[1, 2], C1*m[0, 0] + S1*m[1, 0])
t1 = T1
t2 = T2
t3 = T3
diff --git a/core/math/linalg/specific_euler_angles_f64.odin b/core/math/linalg/specific_euler_angles_f64.odin
index efaddd651..2f8f758b0 100644
--- a/core/math/linalg/specific_euler_angles_f64.odin
+++ b/core/math/linalg/specific_euler_angles_f64.odin
@@ -212,29 +212,29 @@ euler_angles_zxy_from_quaternion_f64 :: proc(q: Quaternionf64) -> (t1, t2, t3: f
matrix3_from_euler_angle_x_f64 :: proc(angle_x: f64) -> (m: Matrix3f64) {
cos_x, sin_x := math.cos(angle_x), math.sin(angle_x)
- m[0][0] = 1
- m[1][1] = +cos_x
- m[2][1] = +sin_x
- m[1][2] = -sin_x
- m[2][2] = +cos_x
+ m[0, 0] = 1
+ m[1, 1] = +cos_x
+ m[1, 2] = +sin_x
+ m[2, 1] = -sin_x
+ m[2, 2] = +cos_x
return
}
matrix3_from_euler_angle_y_f64 :: proc(angle_y: f64) -> (m: Matrix3f64) {
cos_y, sin_y := math.cos(angle_y), math.sin(angle_y)
- m[0][0] = +cos_y
- m[2][0] = -sin_y
- m[1][1] = 1
- m[0][2] = +sin_y
- m[2][2] = +cos_y
+ m[0, 0] = +cos_y
+ m[0, 2] = -sin_y
+ m[1, 1] = 1
+ m[2, 0] = +sin_y
+ m[2, 2] = +cos_y
return
}
matrix3_from_euler_angle_z_f64 :: proc(angle_z: f64) -> (m: Matrix3f64) {
cos_z, sin_z := math.cos(angle_z), math.sin(angle_z)
- m[0][0] = +cos_z
- m[1][0] = +sin_z
- m[1][1] = +cos_z
- m[0][1] = -sin_z
- m[2][2] = 1
+ m[0, 0] = +cos_z
+ m[0, 1] = +sin_z
+ m[1, 1] = +cos_z
+ m[1, 0] = -sin_z
+ m[2, 2] = 1
return
}
@@ -242,31 +242,31 @@ matrix3_from_euler_angle_z_f64 :: proc(angle_z: f64) -> (m: Matrix3f64) {
matrix3_from_derived_euler_angle_x_f64 :: proc(angle_x: f64, angular_velocity_x: f64) -> (m: Matrix3f64) {
cos_x := math.cos(angle_x) * angular_velocity_x
sin_x := math.sin(angle_x) * angular_velocity_x
- m[0][0] = 1
- m[1][1] = +cos_x
- m[2][1] = +sin_x
- m[1][2] = -sin_x
- m[2][2] = +cos_x
+ m[0, 0] = 1
+ m[1, 1] = +cos_x
+ m[1, 2] = +sin_x
+ m[2, 1] = -sin_x
+ m[2, 2] = +cos_x
return
}
matrix3_from_derived_euler_angle_y_f64 :: proc(angle_y: f64, angular_velocity_y: f64) -> (m: Matrix3f64) {
cos_y := math.cos(angle_y) * angular_velocity_y
sin_y := math.sin(angle_y) * angular_velocity_y
- m[0][0] = +cos_y
- m[2][0] = -sin_y
- m[1][1] = 1
- m[0][2] = +sin_y
- m[2][2] = +cos_y
+ m[0, 0] = +cos_y
+ m[0, 2] = -sin_y
+ m[1, 1] = 1
+ m[2, 0] = +sin_y
+ m[2, 2] = +cos_y
return
}
matrix3_from_derived_euler_angle_z_f64 :: proc(angle_z: f64, angular_velocity_z: f64) -> (m: Matrix3f64) {
cos_z := math.cos(angle_z) * angular_velocity_z
sin_z := math.sin(angle_z) * angular_velocity_z
- m[0][0] = +cos_z
- m[1][0] = +sin_z
- m[1][1] = +cos_z
- m[0][1] = -sin_z
- m[2][2] = 1
+ m[0, 0] = +cos_z
+ m[0, 1] = +sin_z
+ m[1, 1] = +cos_z
+ m[1, 0] = -sin_z
+ m[2, 2] = 1
return
}
@@ -274,14 +274,14 @@ matrix3_from_derived_euler_angle_z_f64 :: proc(angle_z: f64, angular_velocity_z:
matrix3_from_euler_angles_xy_f64 :: proc(angle_x, angle_y: f64) -> (m: Matrix3f64) {
cos_x, sin_x := math.cos(angle_x), math.sin(angle_x)
cos_y, sin_y := math.cos(angle_y), math.sin(angle_y)
- m[0][0] = cos_y
- m[1][0] = -sin_x * - sin_y
- m[2][0] = -cos_x * - sin_y
- m[1][1] = cos_x
- m[2][1] = sin_x
- m[0][2] = sin_y
- m[1][2] = -sin_x * cos_y
- m[2][2] = cos_x * cos_y
+ m[0, 0] = cos_y
+ m[0, 1] = -sin_x * - sin_y
+ m[0, 2] = -cos_x * - sin_y
+ m[1, 1] = cos_x
+ m[1, 2] = sin_x
+ m[2, 0] = sin_y
+ m[2, 1] = -sin_x * cos_y
+ m[2, 2] = cos_x * cos_y
return
}
@@ -289,14 +289,14 @@ matrix3_from_euler_angles_xy_f64 :: proc(angle_x, angle_y: f64) -> (m: Matrix3f6
matrix3_from_euler_angles_yx_f64 :: proc(angle_y, angle_x: f64) -> (m: Matrix3f64) {
cos_x, sin_x := math.cos(angle_x), math.sin(angle_x)
cos_y, sin_y := math.cos(angle_y), math.sin(angle_y)
- m[0][0] = cos_y
- m[2][0] = -sin_y
- m[0][1] = sin_y*sin_x
- m[1][1] = cos_x
- m[2][1] = cos_y*sin_x
- m[0][2] = sin_y*cos_x
- m[1][2] = -sin_x
- m[2][2] = cos_y*cos_x
+ m[0, 0] = cos_y
+ m[0, 2] = -sin_y
+ m[1, 0] = sin_y*sin_x
+ m[1, 1] = cos_x
+ m[1, 2] = cos_y*sin_x
+ m[2, 0] = sin_y*cos_x
+ m[2, 1] = -sin_x
+ m[2, 2] = cos_y*cos_x
return
}
@@ -322,15 +322,15 @@ matrix3_from_euler_angles_xyz_f64 :: proc(t1, t2, t3: f64) -> (m: Matrix3f64) {
s2 := math.sin(-t2)
s3 := math.sin(-t3)
- m[0][0] = c2 * c3
- m[0][1] =-c1 * s3 + s1 * s2 * c3
- m[0][2] = s1 * s3 + c1 * s2 * c3
- m[1][0] = c2 * s3
- m[1][1] = c1 * c3 + s1 * s2 * s3
- m[1][2] =-s1 * c3 + c1 * s2 * s3
- m[2][0] =-s2
- m[2][1] = s1 * c2
- m[2][2] = c1 * c2
+ m[0, 0] = c2 * c3
+ m[1, 0] =-c1 * s3 + s1 * s2 * c3
+ m[2, 0] = s1 * s3 + c1 * s2 * c3
+ m[0, 1] = c2 * s3
+ m[1, 1] = c1 * c3 + s1 * s2 * s3
+ m[2, 1] =-s1 * c3 + c1 * s2 * s3
+ m[0, 2] =-s2
+ m[1, 2] = s1 * c2
+ m[2, 2] = c1 * c2
return
}
@@ -342,15 +342,15 @@ matrix3_from_euler_angles_yxz_f64 :: proc(yaw, pitch, roll: f64) -> (m: Matrix3f
cb := math.cos(roll)
sb := math.sin(roll)
- m[0][0] = ch * cb + sh * sp * sb
- m[0][1] = sb * cp
- m[0][2] = -sh * cb + ch * sp * sb
- m[1][0] = -ch * sb + sh * sp * cb
- m[1][1] = cb * cp
- m[1][2] = sb * sh + ch * sp * cb
- m[2][0] = sh * cp
- m[2][1] = -sp
- m[2][2] = ch * cp
+ m[0, 0] = ch * cb + sh * sp * sb
+ m[1, 0] = sb * cp
+ m[2, 0] = -sh * cb + ch * sp * sb
+ m[0, 1] = -ch * sb + sh * sp * cb
+ m[1, 1] = cb * cp
+ m[2, 1] = sb * sh + ch * sp * cb
+ m[0, 2] = sh * cp
+ m[1, 2] = -sp
+ m[2, 2] = ch * cp
return
}
@@ -362,15 +362,15 @@ matrix3_from_euler_angles_xzx_f64 :: proc(t1, t2, t3: f64) -> (m: Matrix3f64) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c2
- m[0][1] = c1 * s2
- m[0][2] = s1 * s2
- m[1][0] =-c3 * s2
- m[1][1] = c1 * c2 * c3 - s1 * s3
- m[1][2] = c1 * s3 + c2 * c3 * s1
- m[2][0] = s2 * s3
- m[2][1] =-c3 * s1 - c1 * c2 * s3
- m[2][2] = c1 * c3 - c2 * s1 * s3
+ m[0, 0] = c2
+ m[1, 0] = c1 * s2
+ m[2, 0] = s1 * s2
+ m[0, 1] =-c3 * s2
+ m[1, 1] = c1 * c2 * c3 - s1 * s3
+ m[2, 1] = c1 * s3 + c2 * c3 * s1
+ m[0, 2] = s2 * s3
+ m[1, 2] =-c3 * s1 - c1 * c2 * s3
+ m[2, 2] = c1 * c3 - c2 * s1 * s3
return
}
@@ -382,15 +382,15 @@ matrix3_from_euler_angles_xyx_f64 :: proc(t1, t2, t3: f64) -> (m: Matrix3f64) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c2
- m[0][1] = s1 * s2
- m[0][2] =-c1 * s2
- m[1][0] = s2 * s3
- m[1][1] = c1 * c3 - c2 * s1 * s3
- m[1][2] = c3 * s1 + c1 * c2 * s3
- m[2][0] = c3 * s2
- m[2][1] =-c1 * s3 - c2 * c3 * s1
- m[2][2] = c1 * c2 * c3 - s1 * s3
+ m[0, 0] = c2
+ m[1, 0] = s1 * s2
+ m[2, 0] =-c1 * s2
+ m[0, 1] = s2 * s3
+ m[1, 1] = c1 * c3 - c2 * s1 * s3
+ m[2, 1] = c3 * s1 + c1 * c2 * s3
+ m[0, 2] = c3 * s2
+ m[1, 2] =-c1 * s3 - c2 * c3 * s1
+ m[2, 2] = c1 * c2 * c3 - s1 * s3
return
}
@@ -402,15 +402,15 @@ matrix3_from_euler_angles_yxy_f64 :: proc(t1, t2, t3: f64) -> (m: Matrix3f64) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c1 * c3 - c2 * s1 * s3
- m[0][1] = s2* s3
- m[0][2] =-c3 * s1 - c1 * c2 * s3
- m[1][0] = s1 * s2
- m[1][1] = c2
- m[1][2] = c1 * s2
- m[2][0] = c1 * s3 + c2 * c3 * s1
- m[2][1] =-c3 * s2
- m[2][2] = c1 * c2 * c3 - s1 * s3
+ m[0, 0] = c1 * c3 - c2 * s1 * s3
+ m[1, 0] = s2* s3
+ m[2, 0] =-c3 * s1 - c1 * c2 * s3
+ m[0, 1] = s1 * s2
+ m[1, 1] = c2
+ m[2, 1] = c1 * s2
+ m[0, 2] = c1 * s3 + c2 * c3 * s1
+ m[1, 2] =-c3 * s2
+ m[2, 2] = c1 * c2 * c3 - s1 * s3
return
}
@@ -422,15 +422,15 @@ matrix3_from_euler_angles_yzy_f64 :: proc(t1, t2, t3: f64) -> (m: Matrix3f64) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c1 * c2 * c3 - s1 * s3
- m[0][1] = c3 * s2
- m[0][2] =-c1 * s3 - c2 * c3 * s1
- m[1][0] =-c1 * s2
- m[1][1] = c2
- m[1][2] = s1 * s2
- m[2][0] = c3 * s1 + c1 * c2 * s3
- m[2][1] = s2 * s3
- m[2][2] = c1 * c3 - c2 * s1 * s3
+ m[0, 0] = c1 * c2 * c3 - s1 * s3
+ m[1, 0] = c3 * s2
+ m[2, 0] =-c1 * s3 - c2 * c3 * s1
+ m[0, 1] =-c1 * s2
+ m[1, 1] = c2
+ m[2, 1] = s1 * s2
+ m[0, 2] = c3 * s1 + c1 * c2 * s3
+ m[1, 2] = s2 * s3
+ m[2, 2] = c1 * c3 - c2 * s1 * s3
return
}
@@ -442,15 +442,15 @@ matrix3_from_euler_angles_zyz_f64 :: proc(t1, t2, t3: f64) -> (m: Matrix3f64) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c1 * c2 * c3 - s1 * s3
- m[0][1] = c1 * s3 + c2 * c3 * s1
- m[0][2] =-c3 * s2
- m[1][0] =-c3 * s1 - c1 * c2 * s3
- m[1][1] = c1 * c3 - c2 * s1 * s3
- m[1][2] = s2 * s3
- m[2][0] = c1 * s2
- m[2][1] = s1 * s2
- m[2][2] = c2
+ m[0, 0] = c1 * c2 * c3 - s1 * s3
+ m[1, 0] = c1 * s3 + c2 * c3 * s1
+ m[2, 0] =-c3 * s2
+ m[0, 1] =-c3 * s1 - c1 * c2 * s3
+ m[1, 1] = c1 * c3 - c2 * s1 * s3
+ m[2, 1] = s2 * s3
+ m[0, 2] = c1 * s2
+ m[1, 2] = s1 * s2
+ m[2, 2] = c2
return
}
@@ -462,15 +462,15 @@ matrix3_from_euler_angles_zxz_f64 :: proc(t1, t2, t3: f64) -> (m: Matrix3f64) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c1 * c3 - c2 * s1 * s3
- m[0][1] = c3 * s1 + c1 * c2 * s3
- m[0][2] = s2 *s3
- m[1][0] =-c1 * s3 - c2 * c3 * s1
- m[1][1] = c1 * c2 * c3 - s1 * s3
- m[1][2] = c3 * s2
- m[2][0] = s1 * s2
- m[2][1] =-c1 * s2
- m[2][2] = c2
+ m[0, 0] = c1 * c3 - c2 * s1 * s3
+ m[1, 0] = c3 * s1 + c1 * c2 * s3
+ m[2, 0] = s2 *s3
+ m[0, 1] =-c1 * s3 - c2 * c3 * s1
+ m[1, 1] = c1 * c2 * c3 - s1 * s3
+ m[2, 1] = c3 * s2
+ m[0, 2] = s1 * s2
+ m[1, 2] =-c1 * s2
+ m[2, 2] = c2
return
}
@@ -483,15 +483,15 @@ matrix3_from_euler_angles_xzy_f64 :: proc(t1, t2, t3: f64) -> (m: Matrix3f64) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c2 * c3
- m[0][1] = s1 * s3 + c1 * c3 * s2
- m[0][2] = c3 * s1 * s2 - c1 * s3
- m[1][0] =-s2
- m[1][1] = c1 * c2
- m[1][2] = c2 * s1
- m[2][0] = c2 * s3
- m[2][1] = c1 * s2 * s3 - c3 * s1
- m[2][2] = c1 * c3 + s1 * s2 *s3
+ m[0, 0] = c2 * c3
+ m[1, 0] = s1 * s3 + c1 * c3 * s2
+ m[2, 0] = c3 * s1 * s2 - c1 * s3
+ m[0, 1] =-s2
+ m[1, 1] = c1 * c2
+ m[2, 1] = c2 * s1
+ m[0, 2] = c2 * s3
+ m[1, 2] = c1 * s2 * s3 - c3 * s1
+ m[2, 2] = c1 * c3 + s1 * s2 *s3
return
}
@@ -503,15 +503,15 @@ matrix3_from_euler_angles_yzx_f64 :: proc(t1, t2, t3: f64) -> (m: Matrix3f64) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c1 * c2
- m[0][1] = s2
- m[0][2] =-c2 * s1
- m[1][0] = s1 * s3 - c1 * c3 * s2
- m[1][1] = c2 * c3
- m[1][2] = c1 * s3 + c3 * s1 * s2
- m[2][0] = c3 * s1 + c1 * s2 * s3
- m[2][1] =-c2 * s3
- m[2][2] = c1 * c3 - s1 * s2 * s3
+ m[0, 0] = c1 * c2
+ m[1, 0] = s2
+ m[2, 0] =-c2 * s1
+ m[0, 1] = s1 * s3 - c1 * c3 * s2
+ m[1, 1] = c2 * c3
+ m[2, 1] = c1 * s3 + c3 * s1 * s2
+ m[0, 2] = c3 * s1 + c1 * s2 * s3
+ m[1, 2] =-c2 * s3
+ m[2, 2] = c1 * c3 - s1 * s2 * s3
return
}
@@ -523,15 +523,15 @@ matrix3_from_euler_angles_zyx_f64 :: proc(t1, t2, t3: f64) -> (m: Matrix3f64) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c1 * c2
- m[0][1] = c2 * s1
- m[0][2] =-s2
- m[1][0] = c1 * s2 * s3 - c3 * s1
- m[1][1] = c1 * c3 + s1 * s2 * s3
- m[1][2] = c2 * s3
- m[2][0] = s1 * s3 + c1 * c3 * s2
- m[2][1] = c3 * s1 * s2 - c1 * s3
- m[2][2] = c2 * c3
+ m[0, 0] = c1 * c2
+ m[1, 0] = c2 * s1
+ m[2, 0] =-s2
+ m[0, 1] = c1 * s2 * s3 - c3 * s1
+ m[1, 1] = c1 * c3 + s1 * s2 * s3
+ m[2, 1] = c2 * s3
+ m[0, 2] = s1 * s3 + c1 * c3 * s2
+ m[1, 2] = c3 * s1 * s2 - c1 * s3
+ m[2, 2] = c2 * c3
return
}
@@ -543,15 +543,15 @@ matrix3_from_euler_angles_zxy_f64 :: proc(t1, t2, t3: f64) -> (m: Matrix3f64) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c1 * c3 - s1 * s2 * s3
- m[0][1] = c3 * s1 + c1 * s2 * s3
- m[0][2] =-c2 * s3
- m[1][0] =-c2 * s1
- m[1][1] = c1 * c2
- m[1][2] = s2
- m[2][0] = c1 * s3 + c3 * s1 * s2
- m[2][1] = s1 * s3 - c1 * c3 * s2
- m[2][2] = c2 * c3
+ m[0, 0] = c1 * c3 - s1 * s2 * s3
+ m[1, 0] = c3 * s1 + c1 * s2 * s3
+ m[2, 0] =-c2 * s3
+ m[0, 1] =-c2 * s1
+ m[1, 1] = c1 * c2
+ m[2, 1] = s2
+ m[0, 2] = c1 * s3 + c3 * s1 * s2
+ m[1, 2] = s1 * s3 - c1 * c3 * s2
+ m[2, 2] = c2 * c3
return
}
@@ -564,25 +564,25 @@ matrix3_from_yaw_pitch_roll_f64 :: proc(yaw, pitch, roll: f64) -> (m: Matrix3f64
cb := math.cos(roll)
sb := math.sin(roll)
- m[0][0] = ch * cb + sh * sp * sb
- m[0][1] = sb * cp
- m[0][2] = -sh * cb + ch * sp * sb
- m[1][0] = -ch * sb + sh * sp * cb
- m[1][1] = cb * cp
- m[1][2] = sb * sh + ch * sp * cb
- m[2][0] = sh * cp
- m[2][1] = -sp
- m[2][2] = ch * cp
+ m[0, 0] = ch * cb + sh * sp * sb
+ m[1, 0] = sb * cp
+ m[2, 0] = -sh * cb + ch * sp * sb
+ m[0, 1] = -ch * sb + sh * sp * cb
+ m[1, 1] = cb * cp
+ m[2, 1] = sb * sh + ch * sp * cb
+ m[0, 2] = sh * cp
+ m[1, 2] = -sp
+ m[2, 2] = ch * cp
return m
}
euler_angles_xyz_from_matrix3_f64 :: proc(m: Matrix3f64) -> (t1, t2, t3: f64) {
- T1 := math.atan2(m[2][1], m[2][2])
- C2 := math.sqrt(m[0][0]*m[0][0] + m[1][0]*m[1][0])
- T2 := math.atan2(-m[2][0], C2)
+ T1 := math.atan2(m[1, 2], m[2, 2])
+ C2 := math.sqrt(m[0, 0]*m[0, 0] + m[0, 1]*m[0, 1])
+ T2 := math.atan2(-m[0, 2], C2)
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(S1*m[0][2] - C1*m[0][1], C1*m[1][1] - S1*m[1][2])
+ T3 := math.atan2(S1*m[2, 0] - C1*m[1, 0], C1*m[1, 1] - S1*m[2, 1])
t1 = -T1
t2 = -T2
t3 = -T3
@@ -590,12 +590,12 @@ euler_angles_xyz_from_matrix3_f64 :: proc(m: Matrix3f64) -> (t1, t2, t3: f64) {
}
euler_angles_yxz_from_matrix3_f64 :: proc(m: Matrix3f64) -> (t1, t2, t3: f64) {
- T1 := math.atan2(m[2][0], m[2][2])
- C2 := math.sqrt(m[0][1]*m[0][1] + m[1][1]*m[1][1])
- T2 := math.atan2(-m[2][1], C2)
+ T1 := math.atan2(m[0, 2], m[2, 2])
+ C2 := math.sqrt(m[1, 0]*m[1, 0] + m[1, 1]*m[1, 1])
+ T2 := math.atan2(-m[1, 2], C2)
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(S1*m[1][2] - C1*m[1][0], C1*m[0][0] - S1*m[0][2])
+ T3 := math.atan2(S1*m[2, 1] - C1*m[0, 1], C1*m[0, 0] - S1*m[2, 0])
t1 = T1
t2 = T2
t3 = T3
@@ -603,12 +603,12 @@ euler_angles_yxz_from_matrix3_f64 :: proc(m: Matrix3f64) -> (t1, t2, t3: f64) {
}
euler_angles_xzx_from_matrix3_f64 :: proc(m: Matrix3f64) -> (t1, t2, t3: f64) {
- T1 := math.atan2(m[0][2], m[0][1])
- S2 := math.sqrt(m[1][0]*m[1][0] + m[2][0]*m[2][0])
- T2 := math.atan2(S2, m[0][0])
+ T1 := math.atan2(m[2, 0], m[1, 0])
+ S2 := math.sqrt(m[0, 1]*m[0, 1] + m[0, 2]*m[0, 2])
+ T2 := math.atan2(S2, m[0, 0])
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(C1*m[1][2] - S1*m[1][1], C1*m[2][2] - S1*m[2][1])
+ T3 := math.atan2(C1*m[2, 1] - S1*m[1, 1], C1*m[2, 2] - S1*m[1, 2])
t1 = T1
t2 = T2
t3 = T3
@@ -616,12 +616,12 @@ euler_angles_xzx_from_matrix3_f64 :: proc(m: Matrix3f64) -> (t1, t2, t3: f64) {
}
euler_angles_xyx_from_matrix3_f64 :: proc(m: Matrix3f64) -> (t1, t2, t3: f64) {
- T1 := math.atan2(m[0][1], -m[0][2])
- S2 := math.sqrt(m[1][0]*m[1][0] + m[2][0]*m[2][0])
- T2 := math.atan2(S2, m[0][0])
+ T1 := math.atan2(m[1, 0], -m[2, 0])
+ S2 := math.sqrt(m[0, 1]*m[0, 1] + m[0, 2]*m[0, 2])
+ T2 := math.atan2(S2, m[0, 0])
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(-C1*m[2][1] - S1*m[2][2], C1*m[1][1] + S1*m[1][2])
+ T3 := math.atan2(-C1*m[1, 2] - S1*m[2, 2], C1*m[1, 1] + S1*m[2, 1])
t1 = T1
t2 = T2
t3 = T3
@@ -629,12 +629,12 @@ euler_angles_xyx_from_matrix3_f64 :: proc(m: Matrix3f64) -> (t1, t2, t3: f64) {
}
euler_angles_yxy_from_matrix3_f64 :: proc(m: Matrix3f64) -> (t1, t2, t3: f64) {
- T1 := math.atan2(m[1][0], m[1][2])
- S2 := math.sqrt(m[0][1]*m[0][1] + m[2][1]*m[2][1])
- T2 := math.atan2(S2, m[1][1])
+ T1 := math.atan2(m[0, 1], m[2, 1])
+ S2 := math.sqrt(m[1, 0]*m[1, 0] + m[1, 2]*m[1, 2])
+ T2 := math.atan2(S2, m[1, 1])
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(C1*m[2][0] - S1*m[2][2], C1*m[0][0] - S1*m[0][2])
+ T3 := math.atan2(C1*m[0, 2] - S1*m[2, 2], C1*m[0, 0] - S1*m[2, 0])
t1 = T1
t2 = T2
t3 = T3
@@ -642,24 +642,24 @@ euler_angles_yxy_from_matrix3_f64 :: proc(m: Matrix3f64) -> (t1, t2, t3: f64) {
}
euler_angles_yzy_from_matrix3_f64 :: proc(m: Matrix3f64) -> (t1, t2, t3: f64) {
- T1 := math.atan2(m[1][2], -m[1][0])
- S2 := math.sqrt(m[0][1]*m[0][1] + m[2][1]*m[2][1])
- T2 := math.atan2(S2, m[1][1])
+ T1 := math.atan2(m[2, 1], -m[0, 1])
+ S2 := math.sqrt(m[1, 0]*m[1, 0] + m[1, 2]*m[1, 2])
+ T2 := math.atan2(S2, m[1, 1])
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(-S1*m[0][0] - C1*m[0][2], S1*m[2][0] + C1*m[2][2])
+ T3 := math.atan2(-S1*m[0, 0] - C1*m[2, 0], S1*m[0, 2] + C1*m[2, 2])
t1 = T1
t2 = T2
t3 = T3
return
}
euler_angles_zyz_from_matrix3_f64 :: proc(m: Matrix3f64) -> (t1, t2, t3: f64) {
- T1 := math.atan2(m[2][1], m[2][0])
- S2 := math.sqrt(m[0][2]*m[0][2] + m[1][2]*m[1][2])
- T2 := math.atan2(S2, m[2][2])
+ T1 := math.atan2(m[1, 2], m[0, 2])
+ S2 := math.sqrt(m[2, 0]*m[2, 0] + m[2, 1]*m[2, 1])
+ T2 := math.atan2(S2, m[2, 2])
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(C1*m[0][1] - S1*m[0][0], C1*m[1][1] - S1*m[1][0])
+ T3 := math.atan2(C1*m[1, 0] - S1*m[0, 0], C1*m[1, 1] - S1*m[0, 1])
t1 = T1
t2 = T2
t3 = T3
@@ -667,12 +667,12 @@ euler_angles_zyz_from_matrix3_f64 :: proc(m: Matrix3f64) -> (t1, t2, t3: f64) {
}
euler_angles_zxz_from_matrix3_f64 :: proc(m: Matrix3f64) -> (t1, t2, t3: f64) {
- T1 := math.atan2(m[2][0], -m[2][1])
- S2 := math.sqrt(m[0][2]*m[0][2] + m[1][2]*m[1][2])
- T2 := math.atan2(S2, m[2][2])
+ T1 := math.atan2(m[0, 2], -m[1, 2])
+ S2 := math.sqrt(m[2, 0]*m[2, 0] + m[2, 1]*m[2, 1])
+ T2 := math.atan2(S2, m[2, 2])
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(-C1*m[1][0] - S1*m[1][1], C1*m[0][0] + S1*m[0][1])
+ T3 := math.atan2(-C1*m[0, 1] - S1*m[1, 1], C1*m[0, 0] + S1*m[1, 0])
t1 = T1
t2 = T2
t3 = T3
@@ -680,12 +680,12 @@ euler_angles_zxz_from_matrix3_f64 :: proc(m: Matrix3f64) -> (t1, t2, t3: f64) {
}
euler_angles_xzy_from_matrix3_f64 :: proc(m: Matrix3f64) -> (t1, t2, t3: f64) {
- T1 := math.atan2(m[1][2], m[1][1])
- C2 := math.sqrt(m[0][0]*m[0][0] + m[2][0]*m[2][0])
- T2 := math.atan2(-m[1][0], C2)
+ T1 := math.atan2(m[2, 1], m[1, 1])
+ C2 := math.sqrt(m[0, 0]*m[0, 0] + m[0, 2]*m[0, 2])
+ T2 := math.atan2(-m[0, 1], C2)
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(S1*m[0][1] - C1*m[0][2], C1*m[2][2] - S1*m[2][1])
+ T3 := math.atan2(S1*m[1, 0] - C1*m[2, 0], C1*m[2, 2] - S1*m[1, 2])
t1 = T1
t2 = T2
t3 = T3
@@ -693,12 +693,12 @@ euler_angles_xzy_from_matrix3_f64 :: proc(m: Matrix3f64) -> (t1, t2, t3: f64) {
}
euler_angles_yzx_from_matrix3_f64 :: proc(m: Matrix3f64) -> (t1, t2, t3: f64) {
- T1 := math.atan2(-m[0][2], m[0][0])
- C2 := math.sqrt(m[1][1]*m[1][1] + m[2][1]*m[2][1])
- T2 := math.atan2(m[0][1], C2)
+ T1 := math.atan2(-m[2, 0], m[0, 0])
+ C2 := math.sqrt(m[1, 1]*m[1, 1] + m[1, 2]*m[1, 2])
+ T2 := math.atan2(m[1, 0], C2)
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(S1*m[1][0] + C1*m[1][2], S1*m[2][0] + C1*m[2][2])
+ T3 := math.atan2(S1*m[0, 1] + C1*m[2, 1], S1*m[0, 2] + C1*m[2, 2])
t1 = T1
t2 = T2
t3 = T3
@@ -706,12 +706,12 @@ euler_angles_yzx_from_matrix3_f64 :: proc(m: Matrix3f64) -> (t1, t2, t3: f64) {
}
euler_angles_zyx_from_matrix3_f64 :: proc(m: Matrix3f64) -> (t1, t2, t3: f64) {
- T1 := math.atan2(m[0][1], m[0][0])
- C2 := math.sqrt(m[1][2]*m[1][2] + m[2][2]*m[2][2])
- T2 := math.atan2(-m[0][2], C2)
+ T1 := math.atan2(m[1, 0], m[0, 0])
+ C2 := math.sqrt(m[2, 1]*m[2, 1] + m[2, 2]*m[2, 2])
+ T2 := math.atan2(-m[2, 0], C2)
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(S1*m[2][0] - C1*m[2][1], C1*m[1][1] - S1*m[1][0])
+ T3 := math.atan2(S1*m[0, 2] - C1*m[1, 2], C1*m[1, 1] - S1*m[0, 1])
t1 = T1
t2 = T2
t3 = T3
@@ -719,12 +719,12 @@ euler_angles_zyx_from_matrix3_f64 :: proc(m: Matrix3f64) -> (t1, t2, t3: f64) {
}
euler_angles_zxy_from_matrix3_f64 :: proc(m: Matrix3f64) -> (t1, t2, t3: f64) {
- T1 := math.atan2(-m[1][0], m[1][1])
- C2 := math.sqrt(m[0][2]*m[0][2] + m[2][2]*m[2][2])
- T2 := math.atan2(m[1][2], C2)
+ T1 := math.atan2(-m[0, 1], m[1, 1])
+ C2 := math.sqrt(m[2, 0]*m[2, 0] + m[2, 2]*m[2, 2])
+ T2 := math.atan2(m[2, 1], C2)
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(C1*m[2][0] + S1*m[2][1], C1*m[0][0] + S1*m[0][1])
+ T3 := math.atan2(C1*m[0, 2] + S1*m[1, 2], C1*m[0, 0] + S1*m[1, 0])
t1 = T1
t2 = T2
t3 = T3
@@ -737,32 +737,32 @@ euler_angles_zxy_from_matrix3_f64 :: proc(m: Matrix3f64) -> (t1, t2, t3: f64) {
matrix4_from_euler_angle_x_f64 :: proc(angle_x: f64) -> (m: Matrix4f64) {
cos_x, sin_x := math.cos(angle_x), math.sin(angle_x)
- m[0][0] = 1
- m[1][1] = +cos_x
- m[2][1] = +sin_x
- m[1][2] = -sin_x
- m[2][2] = +cos_x
- m[3][3] = 1
+ m[0, 0] = 1
+ m[1, 1] = +cos_x
+ m[1, 2] = +sin_x
+ m[2, 1] = -sin_x
+ m[2, 2] = +cos_x
+ m[3, 3] = 1
return
}
matrix4_from_euler_angle_y_f64 :: proc(angle_y: f64) -> (m: Matrix4f64) {
cos_y, sin_y := math.cos(angle_y), math.sin(angle_y)
- m[0][0] = +cos_y
- m[2][0] = -sin_y
- m[1][1] = 1
- m[0][2] = +sin_y
- m[2][2] = +cos_y
- m[3][3] = 1
+ m[0, 0] = +cos_y
+ m[0, 2] = -sin_y
+ m[1, 1] = 1
+ m[2, 0] = +sin_y
+ m[2, 2] = +cos_y
+ m[3, 3] = 1
return
}
matrix4_from_euler_angle_z_f64 :: proc(angle_z: f64) -> (m: Matrix4f64) {
cos_z, sin_z := math.cos(angle_z), math.sin(angle_z)
- m[0][0] = +cos_z
- m[1][0] = +sin_z
- m[1][1] = +cos_z
- m[0][1] = -sin_z
- m[2][2] = 1
- m[3][3] = 1
+ m[0, 0] = +cos_z
+ m[0, 1] = +sin_z
+ m[1, 1] = +cos_z
+ m[1, 0] = -sin_z
+ m[2, 2] = 1
+ m[3, 3] = 1
return
}
@@ -770,34 +770,34 @@ matrix4_from_euler_angle_z_f64 :: proc(angle_z: f64) -> (m: Matrix4f64) {
matrix4_from_derived_euler_angle_x_f64 :: proc(angle_x: f64, angular_velocity_x: f64) -> (m: Matrix4f64) {
cos_x := math.cos(angle_x) * angular_velocity_x
sin_x := math.sin(angle_x) * angular_velocity_x
- m[0][0] = 1
- m[1][1] = +cos_x
- m[2][1] = +sin_x
- m[1][2] = -sin_x
- m[2][2] = +cos_x
- m[3][3] = 1
+ m[0, 0] = 1
+ m[1, 1] = +cos_x
+ m[1, 2] = +sin_x
+ m[2, 1] = -sin_x
+ m[2, 2] = +cos_x
+ m[3, 3] = 1
return
}
matrix4_from_derived_euler_angle_y_f64 :: proc(angle_y: f64, angular_velocity_y: f64) -> (m: Matrix4f64) {
cos_y := math.cos(angle_y) * angular_velocity_y
sin_y := math.sin(angle_y) * angular_velocity_y
- m[0][0] = +cos_y
- m[2][0] = -sin_y
- m[1][1] = 1
- m[0][2] = +sin_y
- m[2][2] = +cos_y
- m[3][3] = 1
+ m[0, 0] = +cos_y
+ m[0, 2] = -sin_y
+ m[1, 1] = 1
+ m[2, 0] = +sin_y
+ m[2, 2] = +cos_y
+ m[3, 3] = 1
return
}
matrix4_from_derived_euler_angle_z_f64 :: proc(angle_z: f64, angular_velocity_z: f64) -> (m: Matrix4f64) {
cos_z := math.cos(angle_z) * angular_velocity_z
sin_z := math.sin(angle_z) * angular_velocity_z
- m[0][0] = +cos_z
- m[1][0] = +sin_z
- m[1][1] = +cos_z
- m[0][1] = -sin_z
- m[2][2] = 1
- m[3][3] = 1
+ m[0, 0] = +cos_z
+ m[0, 1] = +sin_z
+ m[1, 1] = +cos_z
+ m[1, 0] = -sin_z
+ m[2, 2] = 1
+ m[3, 3] = 1
return
}
@@ -805,15 +805,15 @@ matrix4_from_derived_euler_angle_z_f64 :: proc(angle_z: f64, angular_velocity_z:
matrix4_from_euler_angles_xy_f64 :: proc(angle_x, angle_y: f64) -> (m: Matrix4f64) {
cos_x, sin_x := math.cos(angle_x), math.sin(angle_x)
cos_y, sin_y := math.cos(angle_y), math.sin(angle_y)
- m[0][0] = cos_y
- m[1][0] = -sin_x * - sin_y
- m[2][0] = -cos_x * - sin_y
- m[1][1] = cos_x
- m[2][1] = sin_x
- m[0][2] = sin_y
- m[1][2] = -sin_x * cos_y
- m[2][2] = cos_x * cos_y
- m[3][3] = 1
+ m[0, 0] = cos_y
+ m[0, 1] = -sin_x * - sin_y
+ m[0, 2] = -cos_x * - sin_y
+ m[1, 1] = cos_x
+ m[1, 2] = sin_x
+ m[2, 0] = sin_y
+ m[2, 1] = -sin_x * cos_y
+ m[2, 2] = cos_x * cos_y
+ m[3, 3] = 1
return
}
@@ -821,15 +821,15 @@ matrix4_from_euler_angles_xy_f64 :: proc(angle_x, angle_y: f64) -> (m: Matrix4f6
matrix4_from_euler_angles_yx_f64 :: proc(angle_y, angle_x: f64) -> (m: Matrix4f64) {
cos_x, sin_x := math.cos(angle_x), math.sin(angle_x)
cos_y, sin_y := math.cos(angle_y), math.sin(angle_y)
- m[0][0] = cos_y
- m[2][0] = -sin_y
- m[0][1] = sin_y*sin_x
- m[1][1] = cos_x
- m[2][1] = cos_y*sin_x
- m[0][2] = sin_y*cos_x
- m[1][2] = -sin_x
- m[2][2] = cos_y*cos_x
- m[3][3] = 1
+ m[0, 0] = cos_y
+ m[0, 2] = -sin_y
+ m[1, 0] = sin_y*sin_x
+ m[1, 1] = cos_x
+ m[1, 2] = cos_y*sin_x
+ m[2, 0] = sin_y*cos_x
+ m[2, 1] = -sin_x
+ m[2, 2] = cos_y*cos_x
+ m[3, 3] = 1
return
}
@@ -855,22 +855,22 @@ matrix4_from_euler_angles_xyz_f64 :: proc(t1, t2, t3: f64) -> (m: Matrix4f64) {
s2 := math.sin(-t2)
s3 := math.sin(-t3)
- m[0][0] = c2 * c3
- m[0][1] =-c1 * s3 + s1 * s2 * c3
- m[0][2] = s1 * s3 + c1 * s2 * c3
- m[0][3] = 0
- m[1][0] = c2 * s3
- m[1][1] = c1 * c3 + s1 * s2 * s3
- m[1][2] =-s1 * c3 + c1 * s2 * s3
- m[1][3] = 0
- m[2][0] =-s2
- m[2][1] = s1 * c2
- m[2][2] = c1 * c2
- m[2][3] = 0
- m[3][0] = 0
- m[3][1] = 0
- m[3][2] = 0
- m[3][3] = 1
+ m[0, 0] = c2 * c3
+ m[1, 0] =-c1 * s3 + s1 * s2 * c3
+ m[2, 0] = s1 * s3 + c1 * s2 * c3
+ m[3, 0] = 0
+ m[0, 1] = c2 * s3
+ m[1, 1] = c1 * c3 + s1 * s2 * s3
+ m[2, 1] =-s1 * c3 + c1 * s2 * s3
+ m[3, 1] = 0
+ m[0, 2] =-s2
+ m[1, 2] = s1 * c2
+ m[2, 2] = c1 * c2
+ m[3, 2] = 0
+ m[0, 3] = 0
+ m[1, 3] = 0
+ m[2, 3] = 0
+ m[3, 3] = 1
return
}
@@ -882,22 +882,22 @@ matrix4_from_euler_angles_yxz_f64 :: proc(yaw, pitch, roll: f64) -> (m: Matrix4f
cb := math.cos(roll)
sb := math.sin(roll)
- m[0][0] = ch * cb + sh * sp * sb
- m[0][1] = sb * cp
- m[0][2] = -sh * cb + ch * sp * sb
- m[0][3] = 0
- m[1][0] = -ch * sb + sh * sp * cb
- m[1][1] = cb * cp
- m[1][2] = sb * sh + ch * sp * cb
- m[1][3] = 0
- m[2][0] = sh * cp
- m[2][1] = -sp
- m[2][2] = ch * cp
- m[2][3] = 0
- m[3][0] = 0
- m[3][1] = 0
- m[3][2] = 0
- m[3][3] = 1
+ m[0, 0] = ch * cb + sh * sp * sb
+ m[1, 0] = sb * cp
+ m[2, 0] = -sh * cb + ch * sp * sb
+ m[3, 0] = 0
+ m[0, 1] = -ch * sb + sh * sp * cb
+ m[1, 1] = cb * cp
+ m[2, 1] = sb * sh + ch * sp * cb
+ m[3, 1] = 0
+ m[0, 2] = sh * cp
+ m[1, 2] = -sp
+ m[2, 2] = ch * cp
+ m[3, 2] = 0
+ m[0, 3] = 0
+ m[1, 3] = 0
+ m[2, 3] = 0
+ m[3, 3] = 1
return
}
@@ -909,22 +909,22 @@ matrix4_from_euler_angles_xzx_f64 :: proc(t1, t2, t3: f64) -> (m: Matrix4f64) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c2
- m[0][1] = c1 * s2
- m[0][2] = s1 * s2
- m[0][3] = 0
- m[1][0] =-c3 * s2
- m[1][1] = c1 * c2 * c3 - s1 * s3
- m[1][2] = c1 * s3 + c2 * c3 * s1
- m[1][3] = 0
- m[2][0] = s2 * s3
- m[2][1] =-c3 * s1 - c1 * c2 * s3
- m[2][2] = c1 * c3 - c2 * s1 * s3
- m[2][3] = 0
- m[3][0] = 0
- m[3][1] = 0
- m[3][2] = 0
- m[3][3] = 1
+ m[0, 0] = c2
+ m[1, 0] = c1 * s2
+ m[2, 0] = s1 * s2
+ m[3, 0] = 0
+ m[0, 1] =-c3 * s2
+ m[1, 1] = c1 * c2 * c3 - s1 * s3
+ m[2, 1] = c1 * s3 + c2 * c3 * s1
+ m[3, 1] = 0
+ m[0, 2] = s2 * s3
+ m[1, 2] =-c3 * s1 - c1 * c2 * s3
+ m[2, 2] = c1 * c3 - c2 * s1 * s3
+ m[3, 2] = 0
+ m[0, 3] = 0
+ m[1, 3] = 0
+ m[2, 3] = 0
+ m[3, 3] = 1
return
}
@@ -936,22 +936,22 @@ matrix4_from_euler_angles_xyx_f64 :: proc(t1, t2, t3: f64) -> (m: Matrix4f64) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c2
- m[0][1] = s1 * s2
- m[0][2] =-c1 * s2
- m[0][3] = 0
- m[1][0] = s2 * s3
- m[1][1] = c1 * c3 - c2 * s1 * s3
- m[1][2] = c3 * s1 + c1 * c2 * s3
- m[1][3] = 0
- m[2][0] = c3 * s2
- m[2][1] =-c1 * s3 - c2 * c3 * s1
- m[2][2] = c1 * c2 * c3 - s1 * s3
- m[2][3] = 0
- m[3][0] = 0
- m[3][1] = 0
- m[3][2] = 0
- m[3][3] = 1
+ m[0, 0] = c2
+ m[1, 0] = s1 * s2
+ m[2, 0] =-c1 * s2
+ m[3, 0] = 0
+ m[0, 1] = s2 * s3
+ m[1, 1] = c1 * c3 - c2 * s1 * s3
+ m[2, 1] = c3 * s1 + c1 * c2 * s3
+ m[3, 1] = 0
+ m[0, 2] = c3 * s2
+ m[1, 2] =-c1 * s3 - c2 * c3 * s1
+ m[2, 2] = c1 * c2 * c3 - s1 * s3
+ m[3, 2] = 0
+ m[0, 3] = 0
+ m[1, 3] = 0
+ m[2, 3] = 0
+ m[3, 3] = 1
return
}
@@ -963,22 +963,22 @@ matrix4_from_euler_angles_yxy_f64 :: proc(t1, t2, t3: f64) -> (m: Matrix4f64) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c1 * c3 - c2 * s1 * s3
- m[0][1] = s2* s3
- m[0][2] =-c3 * s1 - c1 * c2 * s3
- m[0][3] = 0
- m[1][0] = s1 * s2
- m[1][1] = c2
- m[1][2] = c1 * s2
- m[1][3] = 0
- m[2][0] = c1 * s3 + c2 * c3 * s1
- m[2][1] =-c3 * s2
- m[2][2] = c1 * c2 * c3 - s1 * s3
- m[2][3] = 0
- m[3][0] = 0
- m[3][1] = 0
- m[3][2] = 0
- m[3][3] = 1
+ m[0, 0] = c1 * c3 - c2 * s1 * s3
+ m[1, 0] = s2* s3
+ m[2, 0] =-c3 * s1 - c1 * c2 * s3
+ m[3, 0] = 0
+ m[0, 1] = s1 * s2
+ m[1, 1] = c2
+ m[2, 1] = c1 * s2
+ m[3, 1] = 0
+ m[0, 2] = c1 * s3 + c2 * c3 * s1
+ m[1, 2] =-c3 * s2
+ m[2, 2] = c1 * c2 * c3 - s1 * s3
+ m[3, 2] = 0
+ m[0, 3] = 0
+ m[1, 3] = 0
+ m[2, 3] = 0
+ m[3, 3] = 1
return
}
@@ -990,22 +990,22 @@ matrix4_from_euler_angles_yzy_f64 :: proc(t1, t2, t3: f64) -> (m: Matrix4f64) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c1 * c2 * c3 - s1 * s3
- m[0][1] = c3 * s2
- m[0][2] =-c1 * s3 - c2 * c3 * s1
- m[0][3] = 0
- m[1][0] =-c1 * s2
- m[1][1] = c2
- m[1][2] = s1 * s2
- m[1][3] = 0
- m[2][0] = c3 * s1 + c1 * c2 * s3
- m[2][1] = s2 * s3
- m[2][2] = c1 * c3 - c2 * s1 * s3
- m[2][3] = 0
- m[3][0] = 0
- m[3][1] = 0
- m[3][2] = 0
- m[3][3] = 1
+ m[0, 0] = c1 * c2 * c3 - s1 * s3
+ m[1, 0] = c3 * s2
+ m[2, 0] =-c1 * s3 - c2 * c3 * s1
+ m[3, 0] = 0
+ m[0, 1] =-c1 * s2
+ m[1, 1] = c2
+ m[2, 1] = s1 * s2
+ m[3, 1] = 0
+ m[0, 2] = c3 * s1 + c1 * c2 * s3
+ m[1, 2] = s2 * s3
+ m[2, 2] = c1 * c3 - c2 * s1 * s3
+ m[3, 2] = 0
+ m[0, 3] = 0
+ m[1, 3] = 0
+ m[2, 3] = 0
+ m[3, 3] = 1
return
}
@@ -1017,22 +1017,22 @@ matrix4_from_euler_angles_zyz_f64 :: proc(t1, t2, t3: f64) -> (m: Matrix4f64) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c1 * c2 * c3 - s1 * s3
- m[0][1] = c1 * s3 + c2 * c3 * s1
- m[0][2] =-c3 * s2
- m[0][3] = 0
- m[1][0] =-c3 * s1 - c1 * c2 * s3
- m[1][1] = c1 * c3 - c2 * s1 * s3
- m[1][2] = s2 * s3
- m[1][3] = 0
- m[2][0] = c1 * s2
- m[2][1] = s1 * s2
- m[2][2] = c2
- m[2][3] = 0
- m[3][0] = 0
- m[3][1] = 0
- m[3][2] = 0
- m[3][3] = 1
+ m[0, 0] = c1 * c2 * c3 - s1 * s3
+ m[1, 0] = c1 * s3 + c2 * c3 * s1
+ m[2, 0] =-c3 * s2
+ m[3, 0] = 0
+ m[0, 1] =-c3 * s1 - c1 * c2 * s3
+ m[1, 1] = c1 * c3 - c2 * s1 * s3
+ m[2, 1] = s2 * s3
+ m[3, 1] = 0
+ m[0, 2] = c1 * s2
+ m[1, 2] = s1 * s2
+ m[2, 2] = c2
+ m[3, 2] = 0
+ m[0, 3] = 0
+ m[1, 3] = 0
+ m[2, 3] = 0
+ m[3, 3] = 1
return
}
@@ -1044,22 +1044,22 @@ matrix4_from_euler_angles_zxz_f64 :: proc(t1, t2, t3: f64) -> (m: Matrix4f64) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c1 * c3 - c2 * s1 * s3
- m[0][1] = c3 * s1 + c1 * c2 * s3
- m[0][2] = s2 *s3
- m[0][3] = 0
- m[1][0] =-c1 * s3 - c2 * c3 * s1
- m[1][1] = c1 * c2 * c3 - s1 * s3
- m[1][2] = c3 * s2
- m[1][3] = 0
- m[2][0] = s1 * s2
- m[2][1] =-c1 * s2
- m[2][2] = c2
- m[2][3] = 0
- m[3][0] = 0
- m[3][1] = 0
- m[3][2] = 0
- m[3][3] = 1
+ m[0, 0] = c1 * c3 - c2 * s1 * s3
+ m[1, 0] = c3 * s1 + c1 * c2 * s3
+ m[2, 0] = s2 *s3
+ m[3, 0] = 0
+ m[0, 1] =-c1 * s3 - c2 * c3 * s1
+ m[1, 1] = c1 * c2 * c3 - s1 * s3
+ m[2, 1] = c3 * s2
+ m[3, 1] = 0
+ m[0, 2] = s1 * s2
+ m[1, 2] =-c1 * s2
+ m[2, 2] = c2
+ m[3, 2] = 0
+ m[0, 3] = 0
+ m[1, 3] = 0
+ m[2, 3] = 0
+ m[3, 3] = 1
return
}
@@ -1072,22 +1072,22 @@ matrix4_from_euler_angles_xzy_f64 :: proc(t1, t2, t3: f64) -> (m: Matrix4f64) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c2 * c3
- m[0][1] = s1 * s3 + c1 * c3 * s2
- m[0][2] = c3 * s1 * s2 - c1 * s3
- m[0][3] = 0
- m[1][0] =-s2
- m[1][1] = c1 * c2
- m[1][2] = c2 * s1
- m[1][3] = 0
- m[2][0] = c2 * s3
- m[2][1] = c1 * s2 * s3 - c3 * s1
- m[2][2] = c1 * c3 + s1 * s2 *s3
- m[2][3] = 0
- m[3][0] = 0
- m[3][1] = 0
- m[3][2] = 0
- m[3][3] = 1
+ m[0, 0] = c2 * c3
+ m[1, 0] = s1 * s3 + c1 * c3 * s2
+ m[2, 0] = c3 * s1 * s2 - c1 * s3
+ m[3, 0] = 0
+ m[0, 1] =-s2
+ m[1, 1] = c1 * c2
+ m[2, 1] = c2 * s1
+ m[3, 1] = 0
+ m[0, 2] = c2 * s3
+ m[1, 2] = c1 * s2 * s3 - c3 * s1
+ m[2, 2] = c1 * c3 + s1 * s2 *s3
+ m[3, 2] = 0
+ m[0, 3] = 0
+ m[1, 3] = 0
+ m[2, 3] = 0
+ m[3, 3] = 1
return
}
@@ -1099,22 +1099,22 @@ matrix4_from_euler_angles_yzx_f64 :: proc(t1, t2, t3: f64) -> (m: Matrix4f64) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c1 * c2
- m[0][1] = s2
- m[0][2] =-c2 * s1
- m[0][3] = 0
- m[1][0] = s1 * s3 - c1 * c3 * s2
- m[1][1] = c2 * c3
- m[1][2] = c1 * s3 + c3 * s1 * s2
- m[1][3] = 0
- m[2][0] = c3 * s1 + c1 * s2 * s3
- m[2][1] =-c2 * s3
- m[2][2] = c1 * c3 - s1 * s2 * s3
- m[2][3] = 0
- m[3][0] = 0
- m[3][1] = 0
- m[3][2] = 0
- m[3][3] = 1
+ m[0, 0] = c1 * c2
+ m[1, 0] = s2
+ m[2, 0] =-c2 * s1
+ m[3, 0] = 0
+ m[0, 1] = s1 * s3 - c1 * c3 * s2
+ m[1, 1] = c2 * c3
+ m[2, 1] = c1 * s3 + c3 * s1 * s2
+ m[3, 1] = 0
+ m[0, 2] = c3 * s1 + c1 * s2 * s3
+ m[1, 2] =-c2 * s3
+ m[2, 2] = c1 * c3 - s1 * s2 * s3
+ m[3, 2] = 0
+ m[0, 3] = 0
+ m[1, 3] = 0
+ m[2, 3] = 0
+ m[3, 3] = 1
return
}
@@ -1126,22 +1126,22 @@ matrix4_from_euler_angles_zyx_f64 :: proc(t1, t2, t3: f64) -> (m: Matrix4f64) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c1 * c2
- m[0][1] = c2 * s1
- m[0][2] =-s2
- m[0][3] = 0
- m[1][0] = c1 * s2 * s3 - c3 * s1
- m[1][1] = c1 * c3 + s1 * s2 * s3
- m[1][2] = c2 * s3
- m[1][3] = 0
- m[2][0] = s1 * s3 + c1 * c3 * s2
- m[2][1] = c3 * s1 * s2 - c1 * s3
- m[2][2] = c2 * c3
- m[2][3] = 0
- m[3][0] = 0
- m[3][1] = 0
- m[3][2] = 0
- m[3][3] = 1
+ m[0, 0] = c1 * c2
+ m[1, 0] = c2 * s1
+ m[2, 0] =-s2
+ m[3, 0] = 0
+ m[0, 1] = c1 * s2 * s3 - c3 * s1
+ m[1, 1] = c1 * c3 + s1 * s2 * s3
+ m[2, 1] = c2 * s3
+ m[3, 1] = 0
+ m[0, 2] = s1 * s3 + c1 * c3 * s2
+ m[1, 2] = c3 * s1 * s2 - c1 * s3
+ m[2, 2] = c2 * c3
+ m[3, 2] = 0
+ m[0, 3] = 0
+ m[1, 3] = 0
+ m[2, 3] = 0
+ m[3, 3] = 1
return
}
@@ -1153,22 +1153,22 @@ matrix4_from_euler_angles_zxy_f64 :: proc(t1, t2, t3: f64) -> (m: Matrix4f64) {
c3 := math.cos(t3)
s3 := math.sin(t3)
- m[0][0] = c1 * c3 - s1 * s2 * s3
- m[0][1] = c3 * s1 + c1 * s2 * s3
- m[0][2] =-c2 * s3
- m[0][3] = 0
- m[1][0] =-c2 * s1
- m[1][1] = c1 * c2
- m[1][2] = s2
- m[1][3] = 0
- m[2][0] = c1 * s3 + c3 * s1 * s2
- m[2][1] = s1 * s3 - c1 * c3 * s2
- m[2][2] = c2 * c3
- m[2][3] = 0
- m[3][0] = 0
- m[3][1] = 0
- m[3][2] = 0
- m[3][3] = 1
+ m[0, 0] = c1 * c3 - s1 * s2 * s3
+ m[1, 0] = c3 * s1 + c1 * s2 * s3
+ m[2, 0] =-c2 * s3
+ m[3, 0] = 0
+ m[0, 1] =-c2 * s1
+ m[1, 1] = c1 * c2
+ m[2, 1] = s2
+ m[3, 1] = 0
+ m[0, 2] = c1 * s3 + c3 * s1 * s2
+ m[1, 2] = s1 * s3 - c1 * c3 * s2
+ m[2, 2] = c2 * c3
+ m[3, 2] = 0
+ m[0, 3] = 0
+ m[1, 3] = 0
+ m[2, 3] = 0
+ m[3, 3] = 1
return
}
@@ -1181,32 +1181,32 @@ matrix4_from_yaw_pitch_roll_f64 :: proc(yaw, pitch, roll: f64) -> (m: Matrix4f64
cb := math.cos(roll)
sb := math.sin(roll)
- m[0][0] = ch * cb + sh * sp * sb
- m[0][1] = sb * cp
- m[0][2] = -sh * cb + ch * sp * sb
- m[0][3] = 0
- m[1][0] = -ch * sb + sh * sp * cb
- m[1][1] = cb * cp
- m[1][2] = sb * sh + ch * sp * cb
- m[1][3] = 0
- m[2][0] = sh * cp
- m[2][1] = -sp
- m[2][2] = ch * cp
- m[2][3] = 0
- m[3][0] = 0
- m[3][1] = 0
- m[3][2] = 0
- m[3][3] = 1
+ m[0, 0] = ch * cb + sh * sp * sb
+ m[1, 0] = sb * cp
+ m[2, 0] = -sh * cb + ch * sp * sb
+ m[3, 0] = 0
+ m[0, 1] = -ch * sb + sh * sp * cb
+ m[1, 1] = cb * cp
+ m[2, 1] = sb * sh + ch * sp * cb
+ m[3, 1] = 0
+ m[0, 2] = sh * cp
+ m[1, 2] = -sp
+ m[2, 2] = ch * cp
+ m[3, 2] = 0
+ m[0, 3] = 0
+ m[1, 3] = 0
+ m[2, 3] = 0
+ m[3, 3] = 1
return m
}
euler_angles_xyz_from_matrix4_f64 :: proc(m: Matrix4f64) -> (t1, t2, t3: f64) {
- T1 := math.atan2(m[2][1], m[2][2])
- C2 := math.sqrt(m[0][0]*m[0][0] + m[1][0]*m[1][0])
- T2 := math.atan2(-m[2][0], C2)
+ T1 := math.atan2(m[1, 2], m[2, 2])
+ C2 := math.sqrt(m[0, 0]*m[0, 0] + m[0, 1]*m[0, 1])
+ T2 := math.atan2(-m[0, 2], C2)
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(S1*m[0][2] - C1*m[0][1], C1*m[1][1] - S1*m[1][2])
+ T3 := math.atan2(S1*m[2, 0] - C1*m[1, 0], C1*m[1, 1] - S1*m[2, 1])
t1 = -T1
t2 = -T2
t3 = -T3
@@ -1214,12 +1214,12 @@ euler_angles_xyz_from_matrix4_f64 :: proc(m: Matrix4f64) -> (t1, t2, t3: f64) {
}
euler_angles_yxz_from_matrix4_f64 :: proc(m: Matrix4f64) -> (t1, t2, t3: f64) {
- T1 := math.atan2(m[2][0], m[2][2])
- C2 := math.sqrt(m[0][1]*m[0][1] + m[1][1]*m[1][1])
- T2 := math.atan2(-m[2][1], C2)
+ T1 := math.atan2(m[0, 2], m[2, 2])
+ C2 := math.sqrt(m[1, 0]*m[1, 0] + m[1, 1]*m[1, 1])
+ T2 := math.atan2(-m[1, 2], C2)
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(S1*m[1][2] - C1*m[1][0], C1*m[0][0] - S1*m[0][2])
+ T3 := math.atan2(S1*m[2, 1] - C1*m[0, 1], C1*m[0, 0] - S1*m[2, 0])
t1 = T1
t2 = T2
t3 = T3
@@ -1227,12 +1227,12 @@ euler_angles_yxz_from_matrix4_f64 :: proc(m: Matrix4f64) -> (t1, t2, t3: f64) {
}
euler_angles_xzx_from_matrix4_f64 :: proc(m: Matrix4f64) -> (t1, t2, t3: f64) {
- T1 := math.atan2(m[0][2], m[0][1])
- S2 := math.sqrt(m[1][0]*m[1][0] + m[2][0]*m[2][0])
- T2 := math.atan2(S2, m[0][0])
+ T1 := math.atan2(m[2, 0], m[1, 0])
+ S2 := math.sqrt(m[0, 1]*m[0, 1] + m[0, 2]*m[0, 2])
+ T2 := math.atan2(S2, m[0, 0])
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(C1*m[1][2] - S1*m[1][1], C1*m[2][2] - S1*m[2][1])
+ T3 := math.atan2(C1*m[2, 1] - S1*m[1, 1], C1*m[2, 2] - S1*m[1, 2])
t1 = T1
t2 = T2
t3 = T3
@@ -1240,12 +1240,12 @@ euler_angles_xzx_from_matrix4_f64 :: proc(m: Matrix4f64) -> (t1, t2, t3: f64) {
}
euler_angles_xyx_from_matrix4_f64 :: proc(m: Matrix4f64) -> (t1, t2, t3: f64) {
- T1 := math.atan2(m[0][1], -m[0][2])
- S2 := math.sqrt(m[1][0]*m[1][0] + m[2][0]*m[2][0])
- T2 := math.atan2(S2, m[0][0])
+ T1 := math.atan2(m[1, 0], -m[2, 0])
+ S2 := math.sqrt(m[0, 1]*m[0, 1] + m[0, 2]*m[0, 2])
+ T2 := math.atan2(S2, m[0, 0])
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(-C1*m[2][1] - S1*m[2][2], C1*m[1][1] + S1*m[1][2])
+ T3 := math.atan2(-C1*m[1, 2] - S1*m[2, 2], C1*m[1, 1] + S1*m[2, 1])
t1 = T1
t2 = T2
t3 = T3
@@ -1253,12 +1253,12 @@ euler_angles_xyx_from_matrix4_f64 :: proc(m: Matrix4f64) -> (t1, t2, t3: f64) {
}
euler_angles_yxy_from_matrix4_f64 :: proc(m: Matrix4f64) -> (t1, t2, t3: f64) {
- T1 := math.atan2(m[1][0], m[1][2])
- S2 := math.sqrt(m[0][1]*m[0][1] + m[2][1]*m[2][1])
- T2 := math.atan2(S2, m[1][1])
+ T1 := math.atan2(m[0, 1], m[2, 1])
+ S2 := math.sqrt(m[1, 0]*m[1, 0] + m[1, 2]*m[1, 2])
+ T2 := math.atan2(S2, m[1, 1])
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(C1*m[2][0] - S1*m[2][2], C1*m[0][0] - S1*m[0][2])
+ T3 := math.atan2(C1*m[0, 2] - S1*m[2, 2], C1*m[0, 0] - S1*m[2, 0])
t1 = T1
t2 = T2
t3 = T3
@@ -1266,24 +1266,24 @@ euler_angles_yxy_from_matrix4_f64 :: proc(m: Matrix4f64) -> (t1, t2, t3: f64) {
}
euler_angles_yzy_from_matrix4_f64 :: proc(m: Matrix4f64) -> (t1, t2, t3: f64) {
- T1 := math.atan2(m[1][2], -m[1][0])
- S2 := math.sqrt(m[0][1]*m[0][1] + m[2][1]*m[2][1])
- T2 := math.atan2(S2, m[1][1])
+ T1 := math.atan2(m[2, 1], -m[0, 1])
+ S2 := math.sqrt(m[1, 0]*m[1, 0] + m[1, 2]*m[1, 2])
+ T2 := math.atan2(S2, m[1, 1])
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(-S1*m[0][0] - C1*m[0][2], S1*m[2][0] + C1*m[2][2])
+ T3 := math.atan2(-S1*m[0, 0] - C1*m[2, 0], S1*m[0, 2] + C1*m[2, 2])
t1 = T1
t2 = T2
t3 = T3
return
}
euler_angles_zyz_from_matrix4_f64 :: proc(m: Matrix4f64) -> (t1, t2, t3: f64) {
- T1 := math.atan2(m[2][1], m[2][0])
- S2 := math.sqrt(m[0][2]*m[0][2] + m[1][2]*m[1][2])
- T2 := math.atan2(S2, m[2][2])
+ T1 := math.atan2(m[1, 2], m[0, 2])
+ S2 := math.sqrt(m[2, 0]*m[2, 0] + m[2, 1]*m[2, 1])
+ T2 := math.atan2(S2, m[2, 2])
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(C1*m[0][1] - S1*m[0][0], C1*m[1][1] - S1*m[1][0])
+ T3 := math.atan2(C1*m[1, 0] - S1*m[0, 0], C1*m[1, 1] - S1*m[0, 1])
t1 = T1
t2 = T2
t3 = T3
@@ -1291,12 +1291,12 @@ euler_angles_zyz_from_matrix4_f64 :: proc(m: Matrix4f64) -> (t1, t2, t3: f64) {
}
euler_angles_zxz_from_matrix4_f64 :: proc(m: Matrix4f64) -> (t1, t2, t3: f64) {
- T1 := math.atan2(m[2][0], -m[2][1])
- S2 := math.sqrt(m[0][2]*m[0][2] + m[1][2]*m[1][2])
- T2 := math.atan2(S2, m[2][2])
+ T1 := math.atan2(m[0, 2], -m[1, 2])
+ S2 := math.sqrt(m[2, 0]*m[2, 0] + m[2, 1]*m[2, 1])
+ T2 := math.atan2(S2, m[2, 2])
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(-C1*m[1][0] - S1*m[1][1], C1*m[0][0] + S1*m[0][1])
+ T3 := math.atan2(-C1*m[0, 1] - S1*m[1, 1], C1*m[0, 0] + S1*m[1, 0])
t1 = T1
t2 = T2
t3 = T3
@@ -1304,12 +1304,12 @@ euler_angles_zxz_from_matrix4_f64 :: proc(m: Matrix4f64) -> (t1, t2, t3: f64) {
}
euler_angles_xzy_from_matrix4_f64 :: proc(m: Matrix4f64) -> (t1, t2, t3: f64) {
- T1 := math.atan2(m[1][2], m[1][1])
- C2 := math.sqrt(m[0][0]*m[0][0] + m[2][0]*m[2][0])
- T2 := math.atan2(-m[1][0], C2)
+ T1 := math.atan2(m[2, 1], m[1, 1])
+ C2 := math.sqrt(m[0, 0]*m[0, 0] + m[0, 2]*m[0, 2])
+ T2 := math.atan2(-m[0, 1], C2)
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(S1*m[0][1] - C1*m[0][2], C1*m[2][2] - S1*m[2][1])
+ T3 := math.atan2(S1*m[1, 0] - C1*m[2, 0], C1*m[2, 2] - S1*m[1, 2])
t1 = T1
t2 = T2
t3 = T3
@@ -1317,12 +1317,12 @@ euler_angles_xzy_from_matrix4_f64 :: proc(m: Matrix4f64) -> (t1, t2, t3: f64) {
}
euler_angles_yzx_from_matrix4_f64 :: proc(m: Matrix4f64) -> (t1, t2, t3: f64) {
- T1 := math.atan2(-m[0][2], m[0][0])
- C2 := math.sqrt(m[1][1]*m[1][1] + m[2][1]*m[2][1])
- T2 := math.atan2(m[0][1], C2)
+ T1 := math.atan2(-m[2, 0], m[0, 0])
+ C2 := math.sqrt(m[1, 1]*m[1, 1] + m[1, 2]*m[1, 2])
+ T2 := math.atan2(m[1, 0], C2)
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(S1*m[1][0] + C1*m[1][2], S1*m[2][0] + C1*m[2][2])
+ T3 := math.atan2(S1*m[0, 1] + C1*m[2, 1], S1*m[0, 2] + C1*m[2, 2])
t1 = T1
t2 = T2
t3 = T3
@@ -1330,12 +1330,12 @@ euler_angles_yzx_from_matrix4_f64 :: proc(m: Matrix4f64) -> (t1, t2, t3: f64) {
}
euler_angles_zyx_from_matrix4_f64 :: proc(m: Matrix4f64) -> (t1, t2, t3: f64) {
- T1 := math.atan2(m[0][1], m[0][0])
- C2 := math.sqrt(m[1][2]*m[1][2] + m[2][2]*m[2][2])
- T2 := math.atan2(-m[0][2], C2)
+ T1 := math.atan2(m[1, 0], m[0, 0])
+ C2 := math.sqrt(m[2, 1]*m[2, 1] + m[2, 2]*m[2, 2])
+ T2 := math.atan2(-m[2, 0], C2)
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(S1*m[2][0] - C1*m[2][1], C1*m[1][1] - S1*m[1][0])
+ T3 := math.atan2(S1*m[0, 2] - C1*m[1, 2], C1*m[1, 1] - S1*m[0, 1])
t1 = T1
t2 = T2
t3 = T3
@@ -1343,12 +1343,12 @@ euler_angles_zyx_from_matrix4_f64 :: proc(m: Matrix4f64) -> (t1, t2, t3: f64) {
}
euler_angles_zxy_from_matrix4_f64 :: proc(m: Matrix4f64) -> (t1, t2, t3: f64) {
- T1 := math.atan2(-m[1][0], m[1][1])
- C2 := math.sqrt(m[0][2]*m[0][2] + m[2][2]*m[2][2])
- T2 := math.atan2(m[1][2], C2)
+ T1 := math.atan2(-m[0, 1], m[1, 1])
+ C2 := math.sqrt(m[2, 0]*m[2, 0] + m[2, 2]*m[2, 2])
+ T2 := math.atan2(m[2, 1], C2)
S1 := math.sin(T1)
C1 := math.cos(T1)
- T3 := math.atan2(C1*m[2][0] + S1*m[2][1], C1*m[0][0] + S1*m[0][1])
+ T3 := math.atan2(C1*m[0, 2] + S1*m[1, 2], C1*m[0, 0] + S1*m[1, 0])
t1 = T1
t2 = T2
t3 = T3
diff --git a/core/math/linalg/swizzle.odin b/core/math/linalg/swizzle.odin
index f035a5276..ada4aebcf 100644
--- a/core/math/linalg/swizzle.odin
+++ b/core/math/linalg/swizzle.odin
@@ -1,5 +1,10 @@
package linalg
+/*
+ These procedures are to allow for swizzling with non-compile (runtime) known components
+*/
+
+
Scalar_Components :: enum u8 {
x = 0,
r = 0,
From 80bd1eb615ba83727a57173a391ae2bb710f6533 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Mon, 10 Jan 2022 12:19:49 +0000
Subject: [PATCH 072/710] Fix polymorphic matrix element with a minor hack
---
core/math/linalg/general.odin | 8 ++++----
src/check_type.cpp | 11 +++++++++++
2 files changed, 15 insertions(+), 4 deletions(-)
diff --git a/core/math/linalg/general.odin b/core/math/linalg/general.odin
index b0572c0d3..9f22fa45e 100644
--- a/core/math/linalg/general.odin
+++ b/core/math/linalg/general.odin
@@ -287,10 +287,10 @@ array_cast :: proc(v: $A/[$N]$T, $Elem_Type: typeid) -> (w: [N]Elem_Type) #no_bo
return
}
-matrix_cast :: proc(v: $A/[$M][$N]$T, $Elem_Type: typeid) -> (w: [M][N]Elem_Type) #no_bounds_check {
- for i in 0.. (w: matrix[M, N]Elem_Type) #no_bounds_check {
+ for j in 0..elem);
+ if (e && e->kind == Entity_TypeName && e->TypeName.is_type_alias) {
+ // HACK TODO(bill): This is to allow polymorphic parameters for matrix elements
+ // proc($T: typeid) -> matrix[2, 2]T
+ //
+ // THIS IS NEEDS TO BE FIXED AND NOT USE THIS HACK
+ goto type_assign;
+ }
+ }
gbString s = type_to_string(elem);
error(column.expr, "Matrix elements types are limited to integers, floats, and complex, got %s", s);
gb_string_free(s);
}
+type_assign:;
*type = alloc_type_matrix(elem, row_count, column_count, generic_row, generic_column);
From cb1080d56c7a5d26f344a41ea6553bf154509072 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Mon, 10 Jan 2022 13:31:34 +0000
Subject: [PATCH 073/710] Fix `check_procedure_bodies` to allow multiple
threads caused by a typo
---
src/checker.cpp | 1 -
1 file changed, 1 deletion(-)
diff --git a/src/checker.cpp b/src/checker.cpp
index 667146eda..c270e8210 100644
--- a/src/checker.cpp
+++ b/src/checker.cpp
@@ -4941,7 +4941,6 @@ void check_procedure_bodies(Checker *c) {
if (!build_context.threaded_checker) {
worker_count = 0;
}
- worker_count = 0;
if (worker_count == 0) {
auto *this_queue = &c->procs_to_check_queue;
From 6f3e450c502a8d05653ffcd98c74e2e933a34d1d Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Mon, 10 Jan 2022 14:03:36 +0000
Subject: [PATCH 074/710] Move error handling code to a separate file
---
src/error.cpp | 411 +++++++++++++++++++++++++++++++++++++++++++++
src/tokenizer.cpp | 415 +---------------------------------------------
2 files changed, 413 insertions(+), 413 deletions(-)
create mode 100644 src/error.cpp
diff --git a/src/error.cpp b/src/error.cpp
new file mode 100644
index 000000000..1496b4775
--- /dev/null
+++ b/src/error.cpp
@@ -0,0 +1,411 @@
+struct ErrorCollector {
+ TokenPos prev;
+ std::atomic count;
+ std::atomic warning_count;
+ std::atomic in_block;
+ BlockingMutex mutex;
+ BlockingMutex error_out_mutex;
+ BlockingMutex string_mutex;
+ RecursiveMutex block_mutex;
+
+ Array error_buffer;
+ Array errors;
+};
+
+gb_global ErrorCollector global_error_collector;
+
+#define MAX_ERROR_COLLECTOR_COUNT (36)
+
+
+bool any_errors(void) {
+ return global_error_collector.count.load() != 0;
+}
+
+void init_global_error_collector(void) {
+ mutex_init(&global_error_collector.mutex);
+ mutex_init(&global_error_collector.block_mutex);
+ mutex_init(&global_error_collector.error_out_mutex);
+ mutex_init(&global_error_collector.string_mutex);
+ array_init(&global_error_collector.errors, heap_allocator());
+ array_init(&global_error_collector.error_buffer, heap_allocator());
+ array_init(&global_file_path_strings, heap_allocator(), 1, 4096);
+ array_init(&global_files, heap_allocator(), 1, 4096);
+}
+
+
+bool set_file_path_string(i32 index, String const &path) {
+ bool ok = false;
+ GB_ASSERT(index >= 0);
+ mutex_lock(&global_error_collector.string_mutex);
+
+ if (index >= global_file_path_strings.count) {
+ array_resize(&global_file_path_strings, index+1);
+ }
+ String prev = global_file_path_strings[index];
+ if (prev.len == 0) {
+ global_file_path_strings[index] = path;
+ ok = true;
+ }
+
+ mutex_unlock(&global_error_collector.string_mutex);
+ return ok;
+}
+
+bool thread_safe_set_ast_file_from_id(i32 index, AstFile *file) {
+ bool ok = false;
+ GB_ASSERT(index >= 0);
+ mutex_lock(&global_error_collector.string_mutex);
+
+ if (index >= global_files.count) {
+ array_resize(&global_files, index+1);
+ }
+ AstFile *prev = global_files[index];
+ if (prev == nullptr) {
+ global_files[index] = file;
+ ok = true;
+ }
+
+ mutex_unlock(&global_error_collector.string_mutex);
+ return ok;
+}
+
+String get_file_path_string(i32 index) {
+ GB_ASSERT(index >= 0);
+ mutex_lock(&global_error_collector.string_mutex);
+
+ String path = {};
+ if (index < global_file_path_strings.count) {
+ path = global_file_path_strings[index];
+ }
+
+ mutex_unlock(&global_error_collector.string_mutex);
+ return path;
+}
+
+AstFile *thread_safe_get_ast_file_from_id(i32 index) {
+ GB_ASSERT(index >= 0);
+ mutex_lock(&global_error_collector.string_mutex);
+
+ AstFile *file = nullptr;
+ if (index < global_files.count) {
+ file = global_files[index];
+ }
+
+ mutex_unlock(&global_error_collector.string_mutex);
+ return file;
+}
+
+
+
+void begin_error_block(void) {
+ mutex_lock(&global_error_collector.block_mutex);
+ global_error_collector.in_block.store(true);
+}
+
+void end_error_block(void) {
+ if (global_error_collector.error_buffer.count > 0) {
+ isize n = global_error_collector.error_buffer.count;
+ u8 *text = gb_alloc_array(permanent_allocator(), u8, n+1);
+ gb_memmove(text, global_error_collector.error_buffer.data, n);
+ text[n] = 0;
+ String s = {text, n};
+ array_add(&global_error_collector.errors, s);
+ global_error_collector.error_buffer.count = 0;
+ }
+
+ global_error_collector.in_block.store(false);
+ mutex_unlock(&global_error_collector.block_mutex);
+}
+
+#define ERROR_BLOCK() begin_error_block(); defer (end_error_block())
+
+
+#define ERROR_OUT_PROC(name) void name(char const *fmt, va_list va)
+typedef ERROR_OUT_PROC(ErrorOutProc);
+
+ERROR_OUT_PROC(default_error_out_va) {
+ gbFile *f = gb_file_get_standard(gbFileStandard_Error);
+
+ char buf[4096] = {};
+ isize len = gb_snprintf_va(buf, gb_size_of(buf), fmt, va);
+ isize n = len-1;
+ if (global_error_collector.in_block) {
+ isize cap = global_error_collector.error_buffer.count + n;
+ array_reserve(&global_error_collector.error_buffer, cap);
+ u8 *data = global_error_collector.error_buffer.data + global_error_collector.error_buffer.count;
+ gb_memmove(data, buf, n);
+ global_error_collector.error_buffer.count += n;
+ } else {
+ mutex_lock(&global_error_collector.error_out_mutex);
+ {
+ u8 *text = gb_alloc_array(permanent_allocator(), u8, n+1);
+ gb_memmove(text, buf, n);
+ text[n] = 0;
+ array_add(&global_error_collector.errors, make_string(text, n));
+ }
+ mutex_unlock(&global_error_collector.error_out_mutex);
+
+ }
+ gb_file_write(f, buf, n);
+}
+
+
+ErrorOutProc *error_out_va = default_error_out_va;
+
+// NOTE: defined in build_settings.cpp
+bool global_warnings_as_errors(void);
+bool global_ignore_warnings(void);
+bool show_error_line(void);
+gbString get_file_line_as_string(TokenPos const &pos, i32 *offset);
+
+void error_out(char const *fmt, ...) {
+ va_list va;
+ va_start(va, fmt);
+ error_out_va(fmt, va);
+ va_end(va);
+}
+
+
+bool show_error_on_line(TokenPos const &pos, TokenPos end) {
+ if (!show_error_line()) {
+ return false;
+ }
+
+ i32 offset = 0;
+ gbString the_line = get_file_line_as_string(pos, &offset);
+ defer (gb_string_free(the_line));
+
+ if (the_line != nullptr) {
+ String line = make_string(cast(u8 const *)the_line, gb_string_length(the_line));
+
+ // TODO(bill): This assumes ASCII
+
+ enum {
+ MAX_LINE_LENGTH = 76,
+ MAX_TAB_WIDTH = 8,
+ ELLIPSIS_PADDING = 8
+ };
+
+ error_out("\n\t");
+ if (line.len+MAX_TAB_WIDTH+ELLIPSIS_PADDING > MAX_LINE_LENGTH) {
+ i32 const half_width = MAX_LINE_LENGTH/2;
+ i32 left = cast(i32)(offset);
+ i32 right = cast(i32)(line.len - offset);
+ left = gb_min(left, half_width);
+ right = gb_min(right, half_width);
+
+ line.text += offset-left;
+ line.len -= offset+right-left;
+
+ line = string_trim_whitespace(line);
+
+ offset = left + ELLIPSIS_PADDING/2;
+
+ error_out("... %.*s ...", LIT(line));
+ } else {
+ error_out("%.*s", LIT(line));
+ }
+ error_out("\n\t");
+
+ for (i32 i = 0; i < offset; i++) {
+ error_out(" ");
+ }
+ error_out("^");
+ if (end.file_id == pos.file_id) {
+ if (end.line > pos.line) {
+ for (i32 i = offset; i < line.len; i++) {
+ error_out("~");
+ }
+ } else if (end.line == pos.line && end.column > pos.column) {
+ i32 length = gb_min(end.offset - pos.offset, cast(i32)(line.len-offset));
+ for (i32 i = 1; i < length-1; i++) {
+ error_out("~");
+ }
+ if (length > 1) {
+ error_out("^");
+ }
+ }
+ }
+
+ error_out("\n\n");
+ return true;
+ }
+ return false;
+}
+
+void error_va(TokenPos const &pos, TokenPos end, char const *fmt, va_list va) {
+ global_error_collector.count.fetch_add(1);
+
+ mutex_lock(&global_error_collector.mutex);
+ // NOTE(bill): Duplicate error, skip it
+ if (pos.line == 0) {
+ error_out("Error: %s\n", gb_bprintf_va(fmt, va));
+ } else if (global_error_collector.prev != pos) {
+ global_error_collector.prev = pos;
+ error_out("%s %s\n",
+ token_pos_to_string(pos),
+ gb_bprintf_va(fmt, va));
+ show_error_on_line(pos, end);
+ }
+ mutex_unlock(&global_error_collector.mutex);
+ if (global_error_collector.count > MAX_ERROR_COLLECTOR_COUNT) {
+ gb_exit(1);
+ }
+}
+
+void warning_va(TokenPos const &pos, TokenPos end, char const *fmt, va_list va) {
+ if (global_warnings_as_errors()) {
+ error_va(pos, end, fmt, va);
+ return;
+ }
+ global_error_collector.warning_count.fetch_add(1);
+ mutex_lock(&global_error_collector.mutex);
+ if (!global_ignore_warnings()) {
+ // NOTE(bill): Duplicate error, skip it
+ if (pos.line == 0) {
+ error_out("Warning: %s\n", gb_bprintf_va(fmt, va));
+ } else if (global_error_collector.prev != pos) {
+ global_error_collector.prev = pos;
+ error_out("%s Warning: %s\n",
+ token_pos_to_string(pos),
+ gb_bprintf_va(fmt, va));
+ show_error_on_line(pos, end);
+ }
+ }
+ mutex_unlock(&global_error_collector.mutex);
+}
+
+
+void error_line_va(char const *fmt, va_list va) {
+ error_out_va(fmt, va);
+}
+
+void error_no_newline_va(TokenPos const &pos, char const *fmt, va_list va) {
+ mutex_lock(&global_error_collector.mutex);
+ global_error_collector.count++;
+ // NOTE(bill): Duplicate error, skip it
+ if (pos.line == 0) {
+ error_out("Error: %s", gb_bprintf_va(fmt, va));
+ } else if (global_error_collector.prev != pos) {
+ global_error_collector.prev = pos;
+ error_out("%s %s",
+ token_pos_to_string(pos),
+ gb_bprintf_va(fmt, va));
+ }
+ mutex_unlock(&global_error_collector.mutex);
+ if (global_error_collector.count > MAX_ERROR_COLLECTOR_COUNT) {
+ gb_exit(1);
+ }
+}
+
+
+void syntax_error_va(TokenPos const &pos, TokenPos end, char const *fmt, va_list va) {
+ mutex_lock(&global_error_collector.mutex);
+ global_error_collector.count++;
+ // NOTE(bill): Duplicate error, skip it
+ if (global_error_collector.prev != pos) {
+ global_error_collector.prev = pos;
+ error_out("%s Syntax Error: %s\n",
+ token_pos_to_string(pos),
+ gb_bprintf_va(fmt, va));
+ show_error_on_line(pos, end);
+ } else if (pos.line == 0) {
+ error_out("Syntax Error: %s\n", gb_bprintf_va(fmt, va));
+ }
+
+ mutex_unlock(&global_error_collector.mutex);
+ if (global_error_collector.count > MAX_ERROR_COLLECTOR_COUNT) {
+ gb_exit(1);
+ }
+}
+
+void syntax_warning_va(TokenPos const &pos, TokenPos end, char const *fmt, va_list va) {
+ if (global_warnings_as_errors()) {
+ syntax_error_va(pos, end, fmt, va);
+ return;
+ }
+ mutex_lock(&global_error_collector.mutex);
+ global_error_collector.warning_count++;
+ if (!global_ignore_warnings()) {
+ // NOTE(bill): Duplicate error, skip it
+ if (global_error_collector.prev != pos) {
+ global_error_collector.prev = pos;
+ error_out("%s Syntax Warning: %s\n",
+ token_pos_to_string(pos),
+ gb_bprintf_va(fmt, va));
+ show_error_on_line(pos, end);
+ } else if (pos.line == 0) {
+ error_out("Warning: %s\n", gb_bprintf_va(fmt, va));
+ }
+ }
+ mutex_unlock(&global_error_collector.mutex);
+}
+
+
+
+void warning(Token const &token, char const *fmt, ...) {
+ va_list va;
+ va_start(va, fmt);
+ warning_va(token.pos, {}, fmt, va);
+ va_end(va);
+}
+
+void error(Token const &token, char const *fmt, ...) {
+ va_list va;
+ va_start(va, fmt);
+ error_va(token.pos, {}, fmt, va);
+ va_end(va);
+}
+
+void error(TokenPos pos, char const *fmt, ...) {
+ va_list va;
+ va_start(va, fmt);
+ Token token = {};
+ token.pos = pos;
+ error_va(pos, {}, fmt, va);
+ va_end(va);
+}
+
+void error_line(char const *fmt, ...) {
+ va_list va;
+ va_start(va, fmt);
+ error_line_va(fmt, va);
+ va_end(va);
+}
+
+
+void syntax_error(Token const &token, char const *fmt, ...) {
+ va_list va;
+ va_start(va, fmt);
+ syntax_error_va(token.pos, {}, fmt, va);
+ va_end(va);
+}
+
+void syntax_error(TokenPos pos, char const *fmt, ...) {
+ va_list va;
+ va_start(va, fmt);
+ syntax_error_va(pos, {}, fmt, va);
+ va_end(va);
+}
+
+void syntax_warning(Token const &token, char const *fmt, ...) {
+ va_list va;
+ va_start(va, fmt);
+ syntax_warning_va(token.pos, {}, fmt, va);
+ va_end(va);
+}
+
+
+void compiler_error(char const *fmt, ...) {
+ va_list va;
+
+ va_start(va, fmt);
+ gb_printf_err("Internal Compiler Error: %s\n",
+ gb_bprintf_va(fmt, va));
+ va_end(va);
+ gb_exit(1);
+}
+
+
+
+
diff --git a/src/tokenizer.cpp b/src/tokenizer.cpp
index 624aea2aa..20815fd16 100644
--- a/src/tokenizer.cpp
+++ b/src/tokenizer.cpp
@@ -264,419 +264,6 @@ bool token_is_newline(Token const &tok) {
return tok.kind == Token_Semicolon && tok.string == "\n";
}
-
-struct ErrorCollector {
- TokenPos prev;
- std::atomic count;
- std::atomic warning_count;
- std::atomic in_block;
- BlockingMutex mutex;
- BlockingMutex error_out_mutex;
- BlockingMutex string_mutex;
- RecursiveMutex block_mutex;
-
- Array error_buffer;
- Array errors;
-};
-
-gb_global ErrorCollector global_error_collector;
-
-#define MAX_ERROR_COLLECTOR_COUNT (36)
-
-
-bool any_errors(void) {
- return global_error_collector.count.load() != 0;
-}
-
-void init_global_error_collector(void) {
- mutex_init(&global_error_collector.mutex);
- mutex_init(&global_error_collector.block_mutex);
- mutex_init(&global_error_collector.error_out_mutex);
- mutex_init(&global_error_collector.string_mutex);
- array_init(&global_error_collector.errors, heap_allocator());
- array_init(&global_error_collector.error_buffer, heap_allocator());
- array_init(&global_file_path_strings, heap_allocator(), 1, 4096);
- array_init(&global_files, heap_allocator(), 1, 4096);
-}
-
-
-bool set_file_path_string(i32 index, String const &path) {
- bool ok = false;
- GB_ASSERT(index >= 0);
- mutex_lock(&global_error_collector.string_mutex);
-
- if (index >= global_file_path_strings.count) {
- array_resize(&global_file_path_strings, index+1);
- }
- String prev = global_file_path_strings[index];
- if (prev.len == 0) {
- global_file_path_strings[index] = path;
- ok = true;
- }
-
- mutex_unlock(&global_error_collector.string_mutex);
- return ok;
-}
-
-bool thread_safe_set_ast_file_from_id(i32 index, AstFile *file) {
- bool ok = false;
- GB_ASSERT(index >= 0);
- mutex_lock(&global_error_collector.string_mutex);
-
- if (index >= global_files.count) {
- array_resize(&global_files, index+1);
- }
- AstFile *prev = global_files[index];
- if (prev == nullptr) {
- global_files[index] = file;
- ok = true;
- }
-
- mutex_unlock(&global_error_collector.string_mutex);
- return ok;
-}
-
-String get_file_path_string(i32 index) {
- GB_ASSERT(index >= 0);
- mutex_lock(&global_error_collector.string_mutex);
-
- String path = {};
- if (index < global_file_path_strings.count) {
- path = global_file_path_strings[index];
- }
-
- mutex_unlock(&global_error_collector.string_mutex);
- return path;
-}
-
-AstFile *thread_safe_get_ast_file_from_id(i32 index) {
- GB_ASSERT(index >= 0);
- mutex_lock(&global_error_collector.string_mutex);
-
- AstFile *file = nullptr;
- if (index < global_files.count) {
- file = global_files[index];
- }
-
- mutex_unlock(&global_error_collector.string_mutex);
- return file;
-}
-
-
-
-void begin_error_block(void) {
- mutex_lock(&global_error_collector.block_mutex);
- global_error_collector.in_block.store(true);
-}
-
-void end_error_block(void) {
- if (global_error_collector.error_buffer.count > 0) {
- isize n = global_error_collector.error_buffer.count;
- u8 *text = gb_alloc_array(permanent_allocator(), u8, n+1);
- gb_memmove(text, global_error_collector.error_buffer.data, n);
- text[n] = 0;
- String s = {text, n};
- array_add(&global_error_collector.errors, s);
- global_error_collector.error_buffer.count = 0;
- }
-
- global_error_collector.in_block.store(false);
- mutex_unlock(&global_error_collector.block_mutex);
-}
-
-#define ERROR_BLOCK() begin_error_block(); defer (end_error_block())
-
-
-#define ERROR_OUT_PROC(name) void name(char const *fmt, va_list va)
-typedef ERROR_OUT_PROC(ErrorOutProc);
-
-ERROR_OUT_PROC(default_error_out_va) {
- gbFile *f = gb_file_get_standard(gbFileStandard_Error);
-
- char buf[4096] = {};
- isize len = gb_snprintf_va(buf, gb_size_of(buf), fmt, va);
- isize n = len-1;
- if (global_error_collector.in_block) {
- isize cap = global_error_collector.error_buffer.count + n;
- array_reserve(&global_error_collector.error_buffer, cap);
- u8 *data = global_error_collector.error_buffer.data + global_error_collector.error_buffer.count;
- gb_memmove(data, buf, n);
- global_error_collector.error_buffer.count += n;
- } else {
- mutex_lock(&global_error_collector.error_out_mutex);
- {
- u8 *text = gb_alloc_array(permanent_allocator(), u8, n+1);
- gb_memmove(text, buf, n);
- text[n] = 0;
- array_add(&global_error_collector.errors, make_string(text, n));
- }
- mutex_unlock(&global_error_collector.error_out_mutex);
-
- }
- gb_file_write(f, buf, n);
-}
-
-
-ErrorOutProc *error_out_va = default_error_out_va;
-
-// NOTE: defined in build_settings.cpp
-bool global_warnings_as_errors(void);
-bool global_ignore_warnings(void);
-bool show_error_line(void);
-gbString get_file_line_as_string(TokenPos const &pos, i32 *offset);
-
-void error_out(char const *fmt, ...) {
- va_list va;
- va_start(va, fmt);
- error_out_va(fmt, va);
- va_end(va);
-}
-
-
-bool show_error_on_line(TokenPos const &pos, TokenPos end) {
- if (!show_error_line()) {
- return false;
- }
-
- i32 offset = 0;
- gbString the_line = get_file_line_as_string(pos, &offset);
- defer (gb_string_free(the_line));
-
- if (the_line != nullptr) {
- String line = make_string(cast(u8 const *)the_line, gb_string_length(the_line));
-
- // TODO(bill): This assumes ASCII
-
- enum {
- MAX_LINE_LENGTH = 76,
- MAX_TAB_WIDTH = 8,
- ELLIPSIS_PADDING = 8
- };
-
- error_out("\n\t");
- if (line.len+MAX_TAB_WIDTH+ELLIPSIS_PADDING > MAX_LINE_LENGTH) {
- i32 const half_width = MAX_LINE_LENGTH/2;
- i32 left = cast(i32)(offset);
- i32 right = cast(i32)(line.len - offset);
- left = gb_min(left, half_width);
- right = gb_min(right, half_width);
-
- line.text += offset-left;
- line.len -= offset+right-left;
-
- line = string_trim_whitespace(line);
-
- offset = left + ELLIPSIS_PADDING/2;
-
- error_out("... %.*s ...", LIT(line));
- } else {
- error_out("%.*s", LIT(line));
- }
- error_out("\n\t");
-
- for (i32 i = 0; i < offset; i++) {
- error_out(" ");
- }
- error_out("^");
- if (end.file_id == pos.file_id) {
- if (end.line > pos.line) {
- for (i32 i = offset; i < line.len; i++) {
- error_out("~");
- }
- } else if (end.line == pos.line && end.column > pos.column) {
- i32 length = gb_min(end.offset - pos.offset, cast(i32)(line.len-offset));
- for (i32 i = 1; i < length-1; i++) {
- error_out("~");
- }
- if (length > 1) {
- error_out("^");
- }
- }
- }
-
- error_out("\n\n");
- return true;
- }
- return false;
-}
-
-void error_va(TokenPos const &pos, TokenPos end, char const *fmt, va_list va) {
- global_error_collector.count.fetch_add(1);
-
- mutex_lock(&global_error_collector.mutex);
- // NOTE(bill): Duplicate error, skip it
- if (pos.line == 0) {
- error_out("Error: %s\n", gb_bprintf_va(fmt, va));
- } else if (global_error_collector.prev != pos) {
- global_error_collector.prev = pos;
- error_out("%s %s\n",
- token_pos_to_string(pos),
- gb_bprintf_va(fmt, va));
- show_error_on_line(pos, end);
- }
- mutex_unlock(&global_error_collector.mutex);
- if (global_error_collector.count > MAX_ERROR_COLLECTOR_COUNT) {
- gb_exit(1);
- }
-}
-
-void warning_va(TokenPos const &pos, TokenPos end, char const *fmt, va_list va) {
- if (global_warnings_as_errors()) {
- error_va(pos, end, fmt, va);
- return;
- }
- global_error_collector.warning_count.fetch_add(1);
- mutex_lock(&global_error_collector.mutex);
- if (!global_ignore_warnings()) {
- // NOTE(bill): Duplicate error, skip it
- if (pos.line == 0) {
- error_out("Warning: %s\n", gb_bprintf_va(fmt, va));
- } else if (global_error_collector.prev != pos) {
- global_error_collector.prev = pos;
- error_out("%s Warning: %s\n",
- token_pos_to_string(pos),
- gb_bprintf_va(fmt, va));
- show_error_on_line(pos, end);
- }
- }
- mutex_unlock(&global_error_collector.mutex);
-}
-
-
-void error_line_va(char const *fmt, va_list va) {
- error_out_va(fmt, va);
-}
-
-void error_no_newline_va(TokenPos const &pos, char const *fmt, va_list va) {
- mutex_lock(&global_error_collector.mutex);
- global_error_collector.count++;
- // NOTE(bill): Duplicate error, skip it
- if (pos.line == 0) {
- error_out("Error: %s", gb_bprintf_va(fmt, va));
- } else if (global_error_collector.prev != pos) {
- global_error_collector.prev = pos;
- error_out("%s %s",
- token_pos_to_string(pos),
- gb_bprintf_va(fmt, va));
- }
- mutex_unlock(&global_error_collector.mutex);
- if (global_error_collector.count > MAX_ERROR_COLLECTOR_COUNT) {
- gb_exit(1);
- }
-}
-
-
-void syntax_error_va(TokenPos const &pos, TokenPos end, char const *fmt, va_list va) {
- mutex_lock(&global_error_collector.mutex);
- global_error_collector.count++;
- // NOTE(bill): Duplicate error, skip it
- if (global_error_collector.prev != pos) {
- global_error_collector.prev = pos;
- error_out("%s Syntax Error: %s\n",
- token_pos_to_string(pos),
- gb_bprintf_va(fmt, va));
- show_error_on_line(pos, end);
- } else if (pos.line == 0) {
- error_out("Syntax Error: %s\n", gb_bprintf_va(fmt, va));
- }
-
- mutex_unlock(&global_error_collector.mutex);
- if (global_error_collector.count > MAX_ERROR_COLLECTOR_COUNT) {
- gb_exit(1);
- }
-}
-
-void syntax_warning_va(TokenPos const &pos, TokenPos end, char const *fmt, va_list va) {
- if (global_warnings_as_errors()) {
- syntax_error_va(pos, end, fmt, va);
- return;
- }
- mutex_lock(&global_error_collector.mutex);
- global_error_collector.warning_count++;
- if (!global_ignore_warnings()) {
- // NOTE(bill): Duplicate error, skip it
- if (global_error_collector.prev != pos) {
- global_error_collector.prev = pos;
- error_out("%s Syntax Warning: %s\n",
- token_pos_to_string(pos),
- gb_bprintf_va(fmt, va));
- show_error_on_line(pos, end);
- } else if (pos.line == 0) {
- error_out("Warning: %s\n", gb_bprintf_va(fmt, va));
- }
- }
- mutex_unlock(&global_error_collector.mutex);
-}
-
-
-
-void warning(Token const &token, char const *fmt, ...) {
- va_list va;
- va_start(va, fmt);
- warning_va(token.pos, {}, fmt, va);
- va_end(va);
-}
-
-void error(Token const &token, char const *fmt, ...) {
- va_list va;
- va_start(va, fmt);
- error_va(token.pos, {}, fmt, va);
- va_end(va);
-}
-
-void error(TokenPos pos, char const *fmt, ...) {
- va_list va;
- va_start(va, fmt);
- Token token = {};
- token.pos = pos;
- error_va(pos, {}, fmt, va);
- va_end(va);
-}
-
-void error_line(char const *fmt, ...) {
- va_list va;
- va_start(va, fmt);
- error_line_va(fmt, va);
- va_end(va);
-}
-
-
-void syntax_error(Token const &token, char const *fmt, ...) {
- va_list va;
- va_start(va, fmt);
- syntax_error_va(token.pos, {}, fmt, va);
- va_end(va);
-}
-
-void syntax_error(TokenPos pos, char const *fmt, ...) {
- va_list va;
- va_start(va, fmt);
- syntax_error_va(pos, {}, fmt, va);
- va_end(va);
-}
-
-void syntax_warning(Token const &token, char const *fmt, ...) {
- va_list va;
- va_start(va, fmt);
- syntax_warning_va(token.pos, {}, fmt, va);
- va_end(va);
-}
-
-
-void compiler_error(char const *fmt, ...) {
- va_list va;
-
- va_start(va, fmt);
- gb_printf_err("Internal Compiler Error: %s\n",
- gb_bprintf_va(fmt, va));
- va_end(va);
- gb_exit(1);
-}
-
-
-
-
-
gb_inline bool token_is_literal(TokenKind t) {
return gb_is_between(t, Token__LiteralBegin+1, Token__LiteralEnd-1);
}
@@ -695,6 +282,8 @@ gb_inline bool token_is_shift(TokenKind t) {
gb_inline void print_token(Token t) { gb_printf("%.*s\n", LIT(t.string)); }
+#include "error.cpp"
+
enum TokenizerInitError {
TokenizerInit_None,
From 7cc265e14ce3ec08a5908d31441000bdcb4ac645 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Mon, 10 Jan 2022 14:50:28 +0000
Subject: [PATCH 075/710] Add mutex guards for signature scopes
---
src/array.cpp | 4 ++++
src/check_decl.cpp | 2 +-
src/check_expr.cpp | 2 ++
src/check_stmt.cpp | 2 +-
src/checker.cpp | 2 +-
src/common_memory.cpp | 36 +++++++++++++++++++++++++-----------
src/threading.cpp | 34 ++++++++++++++++++++++++++++++++++
7 files changed, 68 insertions(+), 14 deletions(-)
diff --git a/src/array.cpp b/src/array.cpp
index c41125c6d..ac3727978 100644
--- a/src/array.cpp
+++ b/src/array.cpp
@@ -77,15 +77,19 @@ template Slice slice_from_array(Array const &a);
template
Slice slice_make(gbAllocator const &allocator, isize count) {
+ GB_ASSERT(count >= 0);
Slice s = {};
s.data = gb_alloc_array(allocator, T, count);
+ GB_ASSERT(s.data != nullptr);
s.count = count;
return s;
}
template
void slice_init(Slice *s, gbAllocator const &allocator, isize count) {
+ GB_ASSERT(count >= 0);
s->data = gb_alloc_array(allocator, T, count);
+ GB_ASSERT(s->data != nullptr);
s->count = count;
}
diff --git a/src/check_decl.cpp b/src/check_decl.cpp
index 42f68203c..55ad67abf 100644
--- a/src/check_decl.cpp
+++ b/src/check_decl.cpp
@@ -1286,7 +1286,7 @@ void check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *decl, Type *ty
using_entities.allocator = heap_allocator();
defer (array_free(&using_entities));
- {
+ MUTEX_GUARD_BLOCK(ctx->scope->mutex) {
if (type->Proc.param_count > 0) {
TypeTuple *params = &type->Proc.params->Tuple;
for_array(i, params->variables) {
diff --git a/src/check_expr.cpp b/src/check_expr.cpp
index cfffffd9f..1162cefee 100644
--- a/src/check_expr.cpp
+++ b/src/check_expr.cpp
@@ -4021,10 +4021,12 @@ void check_did_you_mean_scope(String const &name, Scope *scope) {
DidYouMeanAnswers d = did_you_mean_make(heap_allocator(), scope->elements.entries.count, name);
defer (did_you_mean_destroy(&d));
+ mutex_lock(&scope->mutex);
for_array(i, scope->elements.entries) {
Entity *e = scope->elements.entries[i].value;
did_you_mean_append(&d, e->token.string);
}
+ mutex_unlock(&scope->mutex);
check_did_you_mean_print(&d);
}
diff --git a/src/check_stmt.cpp b/src/check_stmt.cpp
index 396388629..94b7561c7 100644
--- a/src/check_stmt.cpp
+++ b/src/check_stmt.cpp
@@ -607,7 +607,7 @@ bool check_using_stmt_entity(CheckerContext *ctx, AstUsingStmt *us, Ast *expr, b
case Entity_ImportName: {
Scope *scope = e->ImportName.scope;
- for_array(i, scope->elements.entries) {
+ MUTEX_GUARD_BLOCK(scope->mutex) for_array(i, scope->elements.entries) {
String name = scope->elements.entries[i].key.string;
Entity *decl = scope->elements.entries[i].value;
if (!is_entity_exported(decl)) continue;
diff --git a/src/checker.cpp b/src/checker.cpp
index c270e8210..58c71a176 100644
--- a/src/checker.cpp
+++ b/src/checker.cpp
@@ -622,7 +622,7 @@ void check_scope_usage(Checker *c, Scope *scope) {
Array vetted_entities = {};
array_init(&vetted_entities, heap_allocator());
- for_array(i, scope->elements.entries) {
+ MUTEX_GUARD_BLOCK(scope->mutex) for_array(i, scope->elements.entries) {
Entity *e = scope->elements.entries[i].value;
if (e == nullptr) continue;
VettedEntity ve = {};
diff --git a/src/common_memory.cpp b/src/common_memory.cpp
index 2d7a7a246..096c35b5c 100644
--- a/src/common_memory.cpp
+++ b/src/common_memory.cpp
@@ -325,18 +325,32 @@ GB_ALLOCATOR_PROC(heap_allocator_proc) {
// TODO(bill): Throughly test!
switch (type) {
#if defined(GB_COMPILER_MSVC)
- case gbAllocation_Alloc: {
- isize aligned_size = align_formula_isize(size, alignment);
- // TODO(bill): Make sure this is aligned correctly
- ptr = HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, aligned_size);
- } break;
- case gbAllocation_Free:
- HeapFree(GetProcessHeap(), 0, old_memory);
+ case gbAllocation_Alloc:
+ if (size == 0) {
+ return NULL;
+ } else {
+ isize aligned_size = align_formula_isize(size, alignment);
+ // TODO(bill): Make sure this is aligned correctly
+ ptr = HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, aligned_size);
+ }
+ break;
+ case gbAllocation_Free:
+ if (old_memory != nullptr) {
+ HeapFree(GetProcessHeap(), 0, old_memory);
+ }
+ break;
+ case gbAllocation_Resize:
+ if (old_memory != nullptr && size > 0) {
+ isize aligned_size = align_formula_isize(size, alignment);
+ ptr = HeapReAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, old_memory, aligned_size);
+ } else if (old_memory != nullptr) {
+ HeapFree(GetProcessHeap(), 0, old_memory);
+ } else if (size != 0) {
+ isize aligned_size = align_formula_isize(size, alignment);
+ // TODO(bill): Make sure this is aligned correctly
+ ptr = HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, aligned_size);
+ }
break;
- case gbAllocation_Resize: {
- isize aligned_size = align_formula_isize(size, alignment);
- ptr = HeapReAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, old_memory, aligned_size);
- } break;
#elif defined(GB_SYSTEM_LINUX)
// TODO(bill): *nix version that's decent
case gbAllocation_Alloc: {
diff --git a/src/threading.cpp b/src/threading.cpp
index b318e4ff1..e848bba00 100644
--- a/src/threading.cpp
+++ b/src/threading.cpp
@@ -68,6 +68,40 @@ void yield_thread(void);
void yield_process(void);
+struct MutexGuard {
+ MutexGuard() = delete;
+ MutexGuard(MutexGuard const &) = delete;
+
+ MutexGuard(BlockingMutex *bm) : bm{bm} {
+ mutex_lock(this->bm);
+ }
+ MutexGuard(RecursiveMutex *rm) : rm{rm} {
+ mutex_lock(this->rm);
+ }
+ MutexGuard(BlockingMutex &bm) : bm{&bm} {
+ mutex_lock(this->bm);
+ }
+ MutexGuard(RecursiveMutex &rm) : rm{&rm} {
+ mutex_lock(this->rm);
+ }
+ ~MutexGuard() {
+ if (this->bm) {
+ mutex_unlock(this->bm);
+ } else if (this->rm) {
+ mutex_unlock(this->rm);
+ }
+ }
+
+ operator bool() const { return true; }
+
+ BlockingMutex *bm;
+ RecursiveMutex *rm;
+};
+
+#define MUTEX_GUARD_BLOCK(m) if (MutexGuard GB_DEFER_3(_mutex_guard_) = m)
+#define MUTEX_GUARD(m) MutexGuard GB_DEFER_3(_mutex_guard_) = m
+
+
#if defined(GB_SYSTEM_WINDOWS)
struct BlockingMutex {
SRWLOCK srwlock;
From 32ec1162bf467359ed47ba0bd4e74ec0c7fbd167 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Mon, 10 Jan 2022 14:52:47 +0000
Subject: [PATCH 076/710] Use more `{}` ctor
---
src/threading.cpp | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/src/threading.cpp b/src/threading.cpp
index e848bba00..50d0dfed1 100644
--- a/src/threading.cpp
+++ b/src/threading.cpp
@@ -98,8 +98,8 @@ struct MutexGuard {
RecursiveMutex *rm;
};
-#define MUTEX_GUARD_BLOCK(m) if (MutexGuard GB_DEFER_3(_mutex_guard_) = m)
-#define MUTEX_GUARD(m) MutexGuard GB_DEFER_3(_mutex_guard_) = m
+#define MUTEX_GUARD_BLOCK(m) if (MutexGuard GB_DEFER_3(_mutex_guard_){m})
+#define MUTEX_GUARD(m) MutexGuard GB_DEFER_3(_mutex_guard_){m}
#if defined(GB_SYSTEM_WINDOWS)
From 8f91e9307c6ea7a243001efb2ecb135d37587301 Mon Sep 17 00:00:00 2001
From: Dale Weiler
Date: Mon, 10 Jan 2022 17:57:33 -0500
Subject: [PATCH 077/710] shared library fixes
---
src/llvm_backend.cpp | 8 ++++++-
src/main.cpp | 51 ++++++++++++++++++++++----------------------
2 files changed, 32 insertions(+), 27 deletions(-)
diff --git a/src/llvm_backend.cpp b/src/llvm_backend.cpp
index 5acd2a80f..1a657e47b 100644
--- a/src/llvm_backend.cpp
+++ b/src/llvm_backend.cpp
@@ -1238,12 +1238,18 @@ void lb_generate_code(lbGenerator *gen) {
// NOTE(bill, 2021-05-04): Target machines must be unique to each module because they are not thread safe
auto target_machines = array_make(permanent_allocator(), gen->modules.entries.count);
+ // NOTE(dweiler): Dynamic libraries require position-independent code.
+ LLVMRelocMode reloc_mode = LLVMRelocDefault;
+ if (build_context.build_mode == BuildMode_DynamicLibrary) {
+ reloc_mode = LLVMRelocPIC;
+ }
+
for_array(i, gen->modules.entries) {
target_machines[i] = LLVMCreateTargetMachine(
target, target_triple, llvm_cpu,
llvm_features,
code_gen_level,
- LLVMRelocDefault,
+ reloc_mode,
code_mode);
LLVMSetModuleDataLayout(gen->modules.entries[i].value->mod, LLVMCreateTargetDataLayout(target_machines[i]));
}
diff --git a/src/main.cpp b/src/main.cpp
index 36b30112f..0e8894ed0 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -432,40 +432,39 @@ i32 linker_stage(lbGenerator *gen) {
// typically executable files on *NIX systems don't have extensions.
String output_ext = {};
gbString link_settings = gb_string_make_reserve(heap_allocator(), 32);
- char const *linker;
+
+ // NOTE(dweiler): We use clang as a frontend for the linker as there are
+ // other runtime and compiler support libraries that need to be linked in
+ // very specific orders such as libgcc_s, ld-linux-so, unwind, etc.
+ // These are not always typically inside /lib, /lib64, or /usr versions
+ // of that, e.g libgcc.a is in /usr/lib/gcc/{version}, and can vary on
+ // the distribution of Linux even. The gcc or clang specs is the only
+ // reliable way to query this information to call ld directly.
if (build_context.build_mode == BuildMode_DynamicLibrary) {
- // NOTE(tetra, 2020-11-06): __$startup_runtime must be called at DLL load time.
- // Clang, for some reason, won't let us pass the '-init' flag that lets us do this,
- // so use ld instead.
- // :UseLDForShared
- linker = "ld";
+ // NOTE(dweiler): Let the frontend know we're building a shared library
+ // so it doesn't generate symbols which cannot be relocated.
+ link_settings = gb_string_appendc(link_settings, "-shared ");
+
+ // NOTE(dweiler): __$startup_runtime must be called at initialization
+ // time of the shared object, we can pass -init to the linker by using
+ // a comma separated list of arguments to -Wl.
+ //
+ // This previously used ld but ld cannot actually build a shared library
+ // correctly this way since all the other dependencies provided implicitly
+ // by the compiler frontend are still needed and most of the command
+ // line arguments prepared previously are incompatible with ld.
+ //
// Shared libraries are .dylib on MacOS and .so on Linux.
#if defined(GB_SYSTEM_OSX)
output_ext = STR_LIT(".dylib");
- link_settings = gb_string_appendc(link_settings, "-init '___$startup_runtime' ");
- link_settings = gb_string_appendc(link_settings, "-dylib -dynamic ");
+ link_settings = gb_string_appendc(link_settings, "-Wl,-init,'___$startup_runtime' ");
#else
output_ext = STR_LIT(".so");
- link_settings = gb_string_appendc(link_settings, "-init '__$startup_runtime' ");
- link_settings = gb_string_appendc(link_settings, "-shared ");
+ link_settings = gb_string_appendc(link_settings, "-Wl,-init,'__$startup_runtime' ");
#endif
} else {
- #if defined(GB_SYSTEM_OSX)
- linker = "ld";
- #else
- // TODO(zangent): Figure out how to make ld work on Linux.
- // It probably has to do with including the entire CRT, but
- // that's quite a complicated issue to solve while remaining distro-agnostic.
- // Clang can figure out linker flags for us, and that's good enough _for now_.
- linker = "clang -Wno-unused-command-line-argument";
- #endif
- }
-
- if (build_context.metrics.os == TargetOs_linux) {
link_settings = gb_string_appendc(link_settings, "-no-pie ");
}
-
-
if (build_context.out_filepath.len > 0) {
//NOTE(thebirk): We have a custom -out arguments, so we should use the extension from that
isize pos = string_extension_position(build_context.out_filepath);
@@ -475,7 +474,7 @@ i32 linker_stage(lbGenerator *gen) {
}
result = system_exec_command_line_app("ld-link",
- "%s %s -o \"%.*s%.*s\" %s "
+ "clang -Wunused-command-line-argument %s -o \"%.*s%.*s\" %s "
" %s "
" %.*s "
" %.*s "
@@ -492,7 +491,7 @@ i32 linker_stage(lbGenerator *gen) {
// This points the linker to where the entry point is
" -e _main "
#endif
- , linker, object_files, LIT(output_base), LIT(output_ext),
+ , object_files, LIT(output_base), LIT(output_ext),
#if defined(GB_SYSTEM_OSX)
"-lSystem -lm -syslibroot /Library/Developer/CommandLineTools/SDKs/MacOSX.sdk -L/usr/local/lib",
#else
From 4334dbe69ac7b2e03d327c93ff559042c03db427 Mon Sep 17 00:00:00 2001
From: Dale Weiler
Date: Mon, 10 Jan 2022 18:00:38 -0500
Subject: [PATCH 078/710] disable this warning
---
src/main.cpp | 124 +++++++++++++++++++++++++--------------------------
1 file changed, 62 insertions(+), 62 deletions(-)
diff --git a/src/main.cpp b/src/main.cpp
index 0e8894ed0..444ab44f0 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -74,16 +74,16 @@ i32 system_exec_command_line_app(char const *name, char const *fmt, ...) {
isize cmd_len = 0;
va_list va;
i32 exit_code = 0;
-
+
va_start(va, fmt);
cmd_len = gb_snprintf_va(cmd_line, cmd_cap-1, fmt, va);
va_end(va);
-
+
#if defined(GB_SYSTEM_WINDOWS)
STARTUPINFOW start_info = {gb_size_of(STARTUPINFOW)};
PROCESS_INFORMATION pi = {0};
String16 wcmd = {};
-
+
start_info.dwFlags = STARTF_USESTDHANDLES | STARTF_USESHOWWINDOW;
start_info.wShowWindow = SW_SHOW;
start_info.hStdInput = GetStdHandle(STD_INPUT_HANDLE);
@@ -118,11 +118,11 @@ i32 system_exec_command_line_app(char const *name, char const *fmt, ...) {
}
exit_code = system(cmd_line);
#endif
-
+
if (exit_code) {
exit(exit_code);
}
-
+
return exit_code;
}
@@ -137,7 +137,7 @@ i32 linker_stage(lbGenerator *gen) {
if (is_arch_wasm()) {
timings_start_section(timings, str_lit("wasm-ld"));
-
+
#if defined(GB_SYSTEM_WINDOWS)
result = system_exec_command_line_app("wasm-ld",
"\"%.*s\\bin\\wasm-ld\" \"%.*s.wasm.o\" -o \"%.*s.wasm\" %.*s %.*s",
@@ -211,12 +211,12 @@ i32 linker_stage(lbGenerator *gen) {
add_path(find_result.windows_sdk_ucrt_library_path);
add_path(find_result.vs_library_path);
}
-
-
+
+
StringSet libs = {};
string_set_init(&libs, heap_allocator(), 64);
defer (string_set_destroy(&libs));
-
+
StringSet asm_files = {};
string_set_init(&asm_files, heap_allocator(), 64);
defer (string_set_destroy(&asm_files));
@@ -241,13 +241,13 @@ i32 linker_stage(lbGenerator *gen) {
string_set_add(&libs, lib);
}
}
-
+
for_array(i, libs.entries) {
String lib = libs.entries[i].value;
lib_str = gb_string_append_fmt(lib_str, " \"%.*s\"", LIT(lib));
}
-
-
+
+
if (build_context.build_mode == BuildMode_DynamicLibrary) {
output_ext = "dll";
link_settings = gb_string_append_fmt(link_settings, " /DLL");
@@ -268,7 +268,7 @@ i32 linker_stage(lbGenerator *gen) {
if (build_context.ODIN_DEBUG) {
link_settings = gb_string_append_fmt(link_settings, " /DEBUG");
}
-
+
for_array(i, asm_files.entries) {
String asm_file = asm_files.entries[i].value;
String obj_file = concatenate_strings(permanent_allocator(), asm_file, str_lit(".obj"));
@@ -283,7 +283,7 @@ i32 linker_stage(lbGenerator *gen) {
LIT(obj_file),
LIT(build_context.extra_assembler_flags)
);
-
+
if (result) {
return result;
}
@@ -305,7 +305,7 @@ i32 linker_stage(lbGenerator *gen) {
LIT(output_base),
LIT(build_context.resource_filepath)
);
-
+
if (result) {
return result;
}
@@ -340,11 +340,11 @@ i32 linker_stage(lbGenerator *gen) {
lib_str
);
}
-
+
if (result) {
return result;
}
-
+
} else { // lld
result = system_exec_command_line_app("msvc-lld-link",
"\"%.*s\\bin\\lld-link\" %s -OUT:\"%.*s.%s\" %s "
@@ -360,7 +360,7 @@ i32 linker_stage(lbGenerator *gen) {
LIT(build_context.extra_linker_flags),
lib_str
);
-
+
if (result) {
return result;
}
@@ -474,7 +474,7 @@ i32 linker_stage(lbGenerator *gen) {
}
result = system_exec_command_line_app("ld-link",
- "clang -Wunused-command-line-argument %s -o \"%.*s%.*s\" %s "
+ "clang -Wno-unused-command-line-argument %s -o \"%.*s%.*s\" %s "
" %s "
" %.*s "
" %.*s "
@@ -501,7 +501,7 @@ i32 linker_stage(lbGenerator *gen) {
LIT(build_context.link_flags),
LIT(build_context.extra_linker_flags),
link_settings);
-
+
if (result) {
return result;
}
@@ -513,7 +513,7 @@ i32 linker_stage(lbGenerator *gen) {
result = system_exec_command_line_app("dsymutil",
"dsymutil %.*s%.*s", LIT(output_base), LIT(output_ext)
);
-
+
if (result) {
return result;
}
@@ -674,9 +674,9 @@ enum BuildFlagKind {
BuildFlag_IgnoreWarnings,
BuildFlag_WarningsAsErrors,
BuildFlag_VerboseErrors,
-
+
// internal use only
- BuildFlag_InternalIgnoreLazy,
+ BuildFlag_InternalIgnoreLazy,
#if defined(GB_SYSTEM_WINDOWS)
BuildFlag_IgnoreVsSearch,
@@ -826,7 +826,7 @@ bool parse_build_flags(Array args) {
add_flag(&build_flags, BuildFlag_IgnoreWarnings, str_lit("ignore-warnings"), BuildFlagParam_None, Command_all);
add_flag(&build_flags, BuildFlag_WarningsAsErrors, str_lit("warnings-as-errors"), BuildFlagParam_None, Command_all);
add_flag(&build_flags, BuildFlag_VerboseErrors, str_lit("verbose-errors"), BuildFlagParam_None, Command_all);
-
+
add_flag(&build_flags, BuildFlag_InternalIgnoreLazy, str_lit("internal-ignore-lazy"), BuildFlagParam_None, Command_all);
#if defined(GB_SYSTEM_WINDOWS)
@@ -1355,7 +1355,7 @@ bool parse_build_flags(Array args) {
GB_ASSERT(value.kind == ExactValue_String);
build_context.extra_linker_flags = value.value_string;
break;
- case BuildFlag_ExtraAssemblerFlags:
+ case BuildFlag_ExtraAssemblerFlags:
GB_ASSERT(value.kind == ExactValue_String);
build_context.extra_assembler_flags = value.value_string;
break;
@@ -1817,7 +1817,7 @@ void show_timings(Checker *c, Timings *t) {
void remove_temp_files(lbGenerator *gen) {
if (build_context.keep_temp_files) return;
-
+
TIME_SECTION("remove keep temp files");
for_array(i, gen->output_temp_paths) {
@@ -1866,7 +1866,7 @@ void print_show_help(String const arg0, String const &command) {
} else if (command == "strip-semicolon") {
print_usage_line(1, "strip-semicolon");
print_usage_line(2, "parse and type check .odin file(s) and then remove unneeded semicolons from the entire project");
- }
+ }
bool doc = command == "doc";
bool build = command == "build";
@@ -2071,7 +2071,7 @@ void print_show_help(String const arg0, String const &command) {
print_usage_line(1, "-extra-linker-flags:");
print_usage_line(2, "Adds extra linker specific flags in a string");
print_usage_line(0, "");
-
+
print_usage_line(1, "-extra-assembler-flags:");
print_usage_line(2, "Adds extra assembler specific flags in a string");
print_usage_line(0, "");
@@ -2097,7 +2097,7 @@ void print_show_help(String const arg0, String const &command) {
print_usage_line(1, "-strict-style");
print_usage_line(2, "Errs on unneeded tokens, such as unneeded semicolons");
print_usage_line(0, "");
-
+
print_usage_line(1, "-strict-style-init-only");
print_usage_line(2, "Errs on unneeded tokens, such as unneeded semicolons, only on the initial project");
print_usage_line(0, "");
@@ -2262,7 +2262,7 @@ gbFileError write_file_with_stripped_tokens(gbFile *f, AstFile *file, i64 *writt
}
written += to_write;
prev_offset = token_pos_end(*token).offset;
- }
+ }
if (token->flags & TokenFlag_Replace) {
if (token->kind == Token_Ellipsis) {
if (!gb_file_write(f, "..=", 3)) {
@@ -2281,7 +2281,7 @@ gbFileError write_file_with_stripped_tokens(gbFile *f, AstFile *file, i64 *writt
}
written += to_write;
}
-
+
if (written_) *written_ = written;
return err;
}
@@ -2292,14 +2292,14 @@ int strip_semicolons(Parser *parser) {
AstPackage *pkg = parser->packages[i];
file_count += pkg->files.count;
}
-
+
auto generated_files = array_make(permanent_allocator(), 0, file_count);
-
+
for_array(i, parser->packages) {
AstPackage *pkg = parser->packages[i];
for_array(j, pkg->files) {
AstFile *file = pkg->files[j];
-
+
bool nothing_to_change = true;
for_array(i, file->tokens) {
Token *token = &file->tokens[i];
@@ -2308,29 +2308,29 @@ int strip_semicolons(Parser *parser) {
break;
}
}
-
+
if (nothing_to_change) {
continue;
}
-
+
String old_fullpath = copy_string(permanent_allocator(), file->fullpath);
-
+
// assumes .odin extension
String fullpath_base = substring(old_fullpath, 0, old_fullpath.len-5);
-
+
String old_fullpath_backup = concatenate_strings(permanent_allocator(), fullpath_base, str_lit("~backup.odin-temp"));
String new_fullpath = concatenate_strings(permanent_allocator(), fullpath_base, str_lit("~temp.odin-temp"));
-
+
array_add(&generated_files, StripSemicolonFile{old_fullpath, old_fullpath_backup, new_fullpath, file});
}
}
-
+
gb_printf_err("File count to be stripped of unneeded tokens: %td\n", generated_files.count);
-
-
+
+
isize generated_count = 0;
bool failed = false;
-
+
for_array(i, generated_files) {
auto *file = &generated_files[i];
char const *filename = cast(char const *)file->new_fullpath.text;
@@ -2338,15 +2338,15 @@ int strip_semicolons(Parser *parser) {
defer (if (err != gbFileError_None) {
failed = true;
});
-
- gbFile f = {};
+
+ gbFile f = {};
err = gb_file_create(&f, filename);
if (err) {
break;
}
defer (err = gb_file_close(&f));
generated_count += 1;
-
+
i64 written = 0;
defer (err = gb_file_truncate(&f, written));
@@ -2367,23 +2367,23 @@ int strip_semicolons(Parser *parser) {
}
return 1;
}
-
+
isize overwritten_files = 0;
-
+
for_array(i, generated_files) {
auto *file = &generated_files[i];
-
+
char const *old_fullpath = cast(char const *)file->old_fullpath.text;
char const *old_fullpath_backup = cast(char const *)file->old_fullpath_backup.text;
char const *new_fullpath = cast(char const *)file->new_fullpath.text;
-
+
debugf("Copy '%s' to '%s'\n", old_fullpath, old_fullpath_backup);
if (!gb_file_copy(old_fullpath, old_fullpath_backup, false)) {
gb_printf_err("failed to copy '%s' to '%s'\n", old_fullpath, old_fullpath_backup);
failed = true;
break;
}
-
+
debugf("Copy '%s' to '%s'\n", new_fullpath, old_fullpath);
if (!gb_file_copy(new_fullpath, old_fullpath, false)) {
@@ -2400,19 +2400,19 @@ int strip_semicolons(Parser *parser) {
if (!gb_file_remove(old_fullpath_backup)) {
gb_printf_err("failed to remove '%s'\n", old_fullpath_backup);
}
-
+
overwritten_files++;
}
-
+
if (!build_context.keep_temp_files) {
for_array(i, generated_files) {
auto *file = &generated_files[i];
char const *filename = nullptr;
filename = cast(char const *)file->new_fullpath.text;
-
+
debugf("Remove '%s'\n", filename);
GB_ASSERT_MSG(gb_file_remove(filename), "unable to delete file %s", filename);
-
+
filename = cast(char const *)file->old_fullpath_backup.text;
debugf("Remove '%s'\n", filename);
if (gb_file_exists(filename) && !gb_file_remove(filename)) {
@@ -2423,10 +2423,10 @@ int strip_semicolons(Parser *parser) {
}
}
}
-
+
gb_printf_err("Files stripped of unneeded token: %td\n", generated_files.count);
-
-
+
+
return cast(int)failed;
}
@@ -2442,7 +2442,7 @@ int main(int arg_count, char const **arg_ptr) {
defer (timings_destroy(&global_timings));
MAIN_TIME_SECTION("initialization");
-
+
virtual_memory_init();
mutex_init(&fullpath_mutex);
mutex_init(&hash_exact_value_mutex);
@@ -2617,7 +2617,7 @@ int main(int arg_count, char const **arg_ptr) {
init_global_thread_pool();
defer (thread_pool_destroy(&global_thread_pool));
-
+
init_universal();
// TODO(bill): prevent compiling without a linker
@@ -2649,7 +2649,7 @@ int main(int arg_count, char const **arg_ptr) {
if (any_errors()) {
return 1;
}
-
+
if (build_context.command_kind == Command_strip_semicolon) {
return strip_semicolons(parser);
}
@@ -2703,7 +2703,7 @@ int main(int arg_count, char const **arg_ptr) {
}
remove_temp_files(gen);
-
+
if (build_context.show_timings) {
show_timings(checker, &global_timings);
}
From 847b05013f71c69a4123fe5a4606c88039b716a3 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Tue, 11 Jan 2022 10:56:07 +0000
Subject: [PATCH 079/710] Disable `DEFAULT_TO_THREADED_CHECKER` until race
condition is found
---
src/bug_report.cpp | 26 +++++++++++++-------------
src/build_settings.cpp | 2 +-
src/check_decl.cpp | 9 ++++-----
src/checker.cpp | 10 +++++-----
src/checker.hpp | 2 +-
5 files changed, 24 insertions(+), 25 deletions(-)
diff --git a/src/bug_report.cpp b/src/bug_report.cpp
index 27e7fcf9a..9a1cb2254 100644
--- a/src/bug_report.cpp
+++ b/src/bug_report.cpp
@@ -140,7 +140,7 @@ void report_windows_product_type(DWORD ProductType) {
break;
default:
- gb_printf("Unknown Edition (%08x)", ProductType);
+ gb_printf("Unknown Edition (%08x)", cast(unsigned)ProductType);
}
}
#endif
@@ -316,14 +316,14 @@ void print_bug_report_help() {
}
if (false) {
- gb_printf("dwMajorVersion: %d\n", osvi.dwMajorVersion);
- gb_printf("dwMinorVersion: %d\n", osvi.dwMinorVersion);
- gb_printf("dwBuildNumber: %d\n", osvi.dwBuildNumber);
- gb_printf("dwPlatformId: %d\n", osvi.dwPlatformId);
- gb_printf("wServicePackMajor: %d\n", osvi.wServicePackMajor);
- gb_printf("wServicePackMinor: %d\n", osvi.wServicePackMinor);
- gb_printf("wSuiteMask: %d\n", osvi.wSuiteMask);
- gb_printf("wProductType: %d\n", osvi.wProductType);
+ gb_printf("dwMajorVersion: %u\n", cast(unsigned)osvi.dwMajorVersion);
+ gb_printf("dwMinorVersion: %u\n", cast(unsigned)osvi.dwMinorVersion);
+ gb_printf("dwBuildNumber: %u\n", cast(unsigned)osvi.dwBuildNumber);
+ gb_printf("dwPlatformId: %u\n", cast(unsigned)osvi.dwPlatformId);
+ gb_printf("wServicePackMajor: %u\n", cast(unsigned)osvi.wServicePackMajor);
+ gb_printf("wServicePackMinor: %u\n", cast(unsigned)osvi.wServicePackMinor);
+ gb_printf("wSuiteMask: %u\n", cast(unsigned)osvi.wSuiteMask);
+ gb_printf("wProductType: %u\n", cast(unsigned)osvi.wProductType);
}
gb_printf("Windows ");
@@ -441,18 +441,18 @@ void print_bug_report_help() {
TEXT("DisplayVersion"),
RRF_RT_REG_SZ,
ValueType,
- &DisplayVersion,
+ DisplayVersion,
&ValueSize
);
if (status == 0x0) {
- gb_printf(" (version: %s)", &DisplayVersion);
+ gb_printf(" (version: %s)", DisplayVersion);
}
/*
Now print build number.
*/
- gb_printf(", build %d", osvi.dwBuildNumber);
+ gb_printf(", build %u", cast(unsigned)osvi.dwBuildNumber);
ValueSize = sizeof(UBR);
status = RegGetValue(
@@ -466,7 +466,7 @@ void print_bug_report_help() {
);
if (status == 0x0) {
- gb_printf(".%d", UBR);
+ gb_printf(".%u", cast(unsigned)UBR);
}
gb_printf("\n");
}
diff --git a/src/build_settings.cpp b/src/build_settings.cpp
index b8d50898d..ccae0fcf0 100644
--- a/src/build_settings.cpp
+++ b/src/build_settings.cpp
@@ -5,7 +5,7 @@
// #if defined(GB_SYSTEM_WINDOWS)
-#define DEFAULT_TO_THREADED_CHECKER
+// #define DEFAULT_TO_THREADED_CHECKER
// #endif
enum TargetOsKind {
diff --git a/src/check_decl.cpp b/src/check_decl.cpp
index 55ad67abf..3f7d2f33d 100644
--- a/src/check_decl.cpp
+++ b/src/check_decl.cpp
@@ -1286,7 +1286,7 @@ void check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *decl, Type *ty
using_entities.allocator = heap_allocator();
defer (array_free(&using_entities));
- MUTEX_GUARD_BLOCK(ctx->scope->mutex) {
+ {
if (type->Proc.param_count > 0) {
TypeTuple *params = &type->Proc.params->Tuple;
for_array(i, params->variables) {
@@ -1303,7 +1303,7 @@ void check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *decl, Type *ty
if (t->kind == Type_Struct) {
Scope *scope = t->Struct.scope;
GB_ASSERT(scope != nullptr);
- for_array(i, scope->elements.entries) {
+ MUTEX_GUARD_BLOCK(scope->mutex) for_array(i, scope->elements.entries) {
Entity *f = scope->elements.entries[i].value;
if (f->kind == Entity_Variable) {
Entity *uvar = alloc_entity_using_variable(e, f->token, f->type, nullptr);
@@ -1321,11 +1321,10 @@ void check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *decl, Type *ty
}
}
-
- for_array(i, using_entities) {
+ MUTEX_GUARD_BLOCK(ctx->scope->mutex) for_array(i, using_entities) {
Entity *e = using_entities[i].e;
Entity *uvar = using_entities[i].uvar;
- Entity *prev = scope_insert(ctx->scope, uvar);
+ Entity *prev = scope_insert(ctx->scope, uvar, false);
if (prev != nullptr) {
error(e->token, "Namespace collision while 'using' procedure argument '%.*s' of: %.*s", LIT(e->token.string), LIT(prev->token.string));
error_line("%.*s != %.*s\n", LIT(uvar->token.string), LIT(prev->token.string));
diff --git a/src/checker.cpp b/src/checker.cpp
index 58c71a176..b4c5d0c72 100644
--- a/src/checker.cpp
+++ b/src/checker.cpp
@@ -446,7 +446,7 @@ Entity *scope_lookup(Scope *s, String const &name) {
-Entity *scope_insert_with_name(Scope *s, String const &name, Entity *entity) {
+Entity *scope_insert_with_name(Scope *s, String const &name, Entity *entity, bool use_mutex=true) {
if (name == "") {
return nullptr;
}
@@ -454,8 +454,8 @@ Entity *scope_insert_with_name(Scope *s, String const &name, Entity *entity) {
Entity **found = nullptr;
Entity *result = nullptr;
- mutex_lock(&s->mutex);
- defer (mutex_unlock(&s->mutex));
+ if (use_mutex) mutex_lock(&s->mutex);
+ defer (if (use_mutex) mutex_unlock(&s->mutex));
found = string_map_get(&s->elements, key);
@@ -485,9 +485,9 @@ end:;
return result;
}
-Entity *scope_insert(Scope *s, Entity *entity) {
+Entity *scope_insert(Scope *s, Entity *entity, bool use_mutex) {
String name = entity->token.string;
- return scope_insert_with_name(s, name, entity);
+ return scope_insert_with_name(s, name, entity, use_mutex);
}
diff --git a/src/checker.hpp b/src/checker.hpp
index 74435c1d4..db59ebce7 100644
--- a/src/checker.hpp
+++ b/src/checker.hpp
@@ -423,7 +423,7 @@ Entity *entity_of_node(Ast *expr);
Entity *scope_lookup_current(Scope *s, String const &name);
Entity *scope_lookup (Scope *s, String const &name);
void scope_lookup_parent (Scope *s, String const &name, Scope **scope_, Entity **entity_);
-Entity *scope_insert (Scope *s, Entity *entity);
+Entity *scope_insert (Scope *s, Entity *entity, bool use_mutex=true);
void add_type_and_value (CheckerInfo *i, Ast *expression, AddressingMode mode, Type *type, ExactValue value);
From 7e4067c44ceb21b4ca0ce89e501df1bf9de106b7 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Wed, 12 Jan 2022 19:18:54 +0000
Subject: [PATCH 080/710] Begin work to move entry point code to Odin itself
rather than in C++ side
---
core/intrinsics/intrinsics.odin | 5 ++
core/runtime/procs_windows_amd64.odin | 2 +-
src/check_builtin.cpp | 6 +++
src/check_decl.cpp | 36 +++++++------
src/checker.cpp | 74 +++++++++++++++------------
src/checker.hpp | 3 ++
src/checker_builtin_procs.hpp | 7 ++-
src/llvm_backend.cpp | 15 ++++--
src/llvm_backend_proc.cpp | 8 +++
9 files changed, 100 insertions(+), 56 deletions(-)
diff --git a/core/intrinsics/intrinsics.odin b/core/intrinsics/intrinsics.odin
index 2da7a7439..803b04d17 100644
--- a/core/intrinsics/intrinsics.odin
+++ b/core/intrinsics/intrinsics.odin
@@ -197,3 +197,8 @@ type_field_index_of :: proc($T: typeid, $name: string) -> uintptr ---
type_equal_proc :: proc($T: typeid) -> (equal: proc "contextless" (rawptr, rawptr) -> bool) where type_is_comparable(T) ---
type_hasher_proc :: proc($T: typeid) -> (hasher: proc "contextless" (data: rawptr, seed: uintptr) -> uintptr) where type_is_comparable(T) ---
+
+
+// Internal compiler use only
+
+__entry_point :: proc() ---
\ No newline at end of file
diff --git a/core/runtime/procs_windows_amd64.odin b/core/runtime/procs_windows_amd64.odin
index 273bb57b2..e430357be 100644
--- a/core/runtime/procs_windows_amd64.odin
+++ b/core/runtime/procs_windows_amd64.odin
@@ -22,4 +22,4 @@ windows_trap_type_assertion :: proc "contextless" () -> ! {
when ODIN_NO_CRT {
@(require)
foreign import crt_lib "procs_windows_amd64.asm"
-}
\ No newline at end of file
+}
diff --git a/src/check_builtin.cpp b/src/check_builtin.cpp
index dc8c209c9..82ad6d161 100644
--- a/src/check_builtin.cpp
+++ b/src/check_builtin.cpp
@@ -219,6 +219,12 @@ bool check_builtin_procedure(CheckerContext *c, Operand *operand, Ast *call, i32
GB_PANIC("Implement built-in procedure: %.*s", LIT(builtin_name));
break;
+ case BuiltinProc___entry_point:
+ operand->mode = Addressing_NoValue;
+ operand->type = nullptr;
+ mpmc_enqueue(&c->info->intrinsics_entry_point_usage, call);
+ break;
+
case BuiltinProc_DIRECTIVE: {
ast_node(bd, BasicDirective, ce->proc);
String name = bd->name.string;
diff --git a/src/check_decl.cpp b/src/check_decl.cpp
index 3f7d2f33d..f9bc17ba4 100644
--- a/src/check_decl.cpp
+++ b/src/check_decl.cpp
@@ -777,21 +777,23 @@ void check_proc_decl(CheckerContext *ctx, Entity *e, DeclInfo *d) {
if (e->pkg != nullptr && e->token.string == "main") {
- if (pt->param_count != 0 ||
- pt->result_count != 0) {
- gbString str = type_to_string(proc_type);
- error(e->token, "Procedure type of 'main' was expected to be 'proc()', got %s", str);
- gb_string_free(str);
- }
- if (pt->calling_convention != default_calling_convention()) {
- error(e->token, "Procedure 'main' cannot have a custom calling convention");
- }
- pt->calling_convention = default_calling_convention();
- if (e->pkg->kind == Package_Init) {
- if (ctx->info->entry_point != nullptr) {
- error(e->token, "Redeclaration of the entry pointer procedure 'main'");
- } else {
- ctx->info->entry_point = e;
+ if (e->pkg->kind != Package_Runtime) {
+ if (pt->param_count != 0 ||
+ pt->result_count != 0) {
+ gbString str = type_to_string(proc_type);
+ error(e->token, "Procedure type of 'main' was expected to be 'proc()', got %s", str);
+ gb_string_free(str);
+ }
+ if (pt->calling_convention != default_calling_convention()) {
+ error(e->token, "Procedure 'main' cannot have a custom calling convention");
+ }
+ pt->calling_convention = default_calling_convention();
+ if (e->pkg->kind == Package_Init) {
+ if (ctx->info->entry_point != nullptr) {
+ error(e->token, "Redeclaration of the entry pointer procedure 'main'");
+ } else {
+ ctx->info->entry_point = e;
+ }
}
}
}
@@ -924,7 +926,9 @@ void check_proc_decl(CheckerContext *ctx, Entity *e, DeclInfo *d) {
"\tother at %s",
LIT(name), token_pos_to_string(pos));
} else if (name == "main") {
- error(d->proc_lit, "The link name 'main' is reserved for internal use");
+ if (d->entity->pkg->kind != Package_Runtime) {
+ error(d->proc_lit, "The link name 'main' is reserved for internal use");
+ }
} else {
string_map_set(fp, key, e);
}
diff --git a/src/checker.cpp b/src/checker.cpp
index b4c5d0c72..42575f88d 100644
--- a/src/checker.cpp
+++ b/src/checker.cpp
@@ -823,6 +823,7 @@ void init_universal(void) {
add_global_bool_constant("ODIN_NO_CRT", bc->no_crt);
add_global_bool_constant("ODIN_USE_SEPARATE_MODULES", bc->use_separate_modules);
add_global_bool_constant("ODIN_TEST", bc->command_kind == Command_test);
+ add_global_bool_constant("ODIN_NO_ENTRY_POINT", bc->no_entry_point);
// Builtin Procedures
@@ -941,6 +942,8 @@ void init_checker_info(CheckerInfo *i) {
mutex_init(&i->foreign_mutex);
semaphore_init(&i->collect_semaphore);
+
+ mpmc_init(&i->intrinsics_entry_point_usage, a, 1<<10); // just waste some memory here, even if it probably never used
}
void destroy_checker_info(CheckerInfo *i) {
@@ -1228,7 +1231,7 @@ void add_type_and_value(CheckerInfo *i, Ast *expr, AddressingMode mode, Type *ty
while (prev_expr != expr) {
prev_expr = expr;
expr->tav.mode = mode;
- if (type != nullptr && expr->tav.type != nullptr &&
+ if (type != nullptr && expr->tav.type != nullptr &&
is_type_any(type) && is_type_untyped(expr->tav.type)) {
// ignore
} else {
@@ -1424,7 +1427,7 @@ bool could_entity_be_lazy(Entity *e, DeclInfo *d) {
return false;
} else if (name == "linkage") {
return false;
- }
+ }
}
}
}
@@ -1704,7 +1707,7 @@ void add_type_info_type_internal(CheckerContext *c, Type *t) {
add_type_info_type_internal(c, bt->RelativeSlice.slice_type);
add_type_info_type_internal(c, bt->RelativeSlice.base_integer);
break;
-
+
case Type_Matrix:
add_type_info_type_internal(c, bt->Matrix.elem);
break;
@@ -1919,7 +1922,7 @@ void add_min_dep_type_info(Checker *c, Type *t) {
add_min_dep_type_info(c, bt->RelativeSlice.slice_type);
add_min_dep_type_info(c, bt->RelativeSlice.base_integer);
break;
-
+
case Type_Matrix:
add_min_dep_type_info(c, bt->Matrix.elem);
break;
@@ -2020,7 +2023,7 @@ void generate_minimum_dependency_set(Checker *c, Entity *start) {
str_lit("__init_context"),
str_lit("__type_info_of"),
str_lit("cstring_to_string"),
- str_lit("_cleanup_runtime"),
+ str_lit("_cleanup_runtime"),
// Pseudo-CRT required procedures
str_lit("memset"),
@@ -2047,7 +2050,7 @@ void generate_minimum_dependency_set(Checker *c, Entity *start) {
str_lit("gnu_h2f_ieee"),
str_lit("gnu_f2h_ieee"),
str_lit("extendhfsf2"),
-
+
// WASM Specific
str_lit("__ashlti3"),
str_lit("__multi3"),
@@ -2119,25 +2122,25 @@ void generate_minimum_dependency_set(Checker *c, Entity *start) {
if (e->flags & EntityFlag_Init) {
Type *t = base_type(e->type);
GB_ASSERT(t->kind == Type_Proc);
-
+
bool is_init = true;
-
+
if (t->Proc.param_count != 0 || t->Proc.result_count != 0) {
gbString str = type_to_string(t);
error(e->token, "@(init) procedures must have a signature type with no parameters nor results, got %s", str);
gb_string_free(str);
is_init = false;
}
-
+
if ((e->scope->flags & (ScopeFlag_File|ScopeFlag_Pkg)) == 0) {
error(e->token, "@(init) procedures must be declared at the file scope");
is_init = false;
}
-
+
if (is_init) {
add_dependency_to_set(c, e);
array_add(&c->info.init_procedures, e);
- }
+ }
}
break;
}
@@ -3677,11 +3680,6 @@ void check_single_global_entity(Checker *c, Entity *e, DeclInfo *d) {
error(e->token, "'main' is reserved as the entry point procedure in the initial scope");
return;
}
- } else if (pkg->kind == Package_Runtime) {
- if (e->token.string == "main") {
- error(e->token, "'main' is reserved as the entry point procedure in the initial scope");
- return;
- }
}
check_entity_decl(ctx, e, d, nullptr);
@@ -3841,7 +3839,7 @@ void add_import_dependency_node(Checker *c, Ast *decl, PtrMap generate_import_dependency_graph(Checker *c) {
- PtrMap M = {};
+ PtrMap M = {};
map_init(&M, heap_allocator(), 2*c->parser->packages.count);
defer (map_destroy(&M));
@@ -4121,11 +4119,11 @@ void check_add_foreign_import_decl(CheckerContext *ctx, Ast *decl) {
mpmc_enqueue(&ctx->info->required_foreign_imports_through_force_queue, e);
add_entity_use(ctx, nullptr, e);
}
-
+
if (has_asm_extension(fullpath)) {
if (build_context.metrics.arch != TargetArch_amd64 ||
build_context.metrics.os != TargetOs_windows) {
- error(decl, "Assembly files are not yet supported on this platform: %.*s_%.*s",
+ error(decl, "Assembly files are not yet supported on this platform: %.*s_%.*s",
LIT(target_os_names[build_context.metrics.os]), LIT(target_arch_names[build_context.metrics.arch]));
}
}
@@ -4327,7 +4325,7 @@ void check_with_workers(Checker *c, WorkerTaskProc *proc, isize total_count) {
if (!build_context.threaded_checker) {
worker_count = 0;
}
-
+
semaphore_post(&c->info.collect_semaphore, cast(i32)thread_count);
if (worker_count == 0) {
ThreadProcCheckerSection section_all = {};
@@ -4351,7 +4349,7 @@ void check_with_workers(Checker *c, WorkerTaskProc *proc, isize total_count) {
}
GB_ASSERT(remaining_count <= 0);
-
+
for (isize i = 0; i < thread_count; i++) {
global_thread_pool_add_task(proc, thread_data+i);
}
@@ -4750,11 +4748,11 @@ bool check_proc_info(Checker *c, ProcInfo *pi, UntypedExprInfoMap *untyped, Proc
ctx.decl = pi->decl;
ctx.procs_to_check_queue = procs_to_check_queue;
GB_ASSERT(procs_to_check_queue != nullptr);
-
+
GB_ASSERT(pi->type->kind == Type_Proc);
TypeProc *pt = &pi->type->Proc;
String name = pi->token.string;
-
+
if (pt->is_polymorphic && !pt->is_poly_specialized) {
Token token = pi->token;
if (pi->poly_def_node != nullptr) {
@@ -4832,7 +4830,7 @@ void check_unchecked_bodies(Checker *c) {
if (pi->body == nullptr) {
continue;
}
-
+
debugf("unchecked: %.*s\n", LIT(e->token.string));
mpmc_enqueue(&c->procs_to_check_queue, pi);
}
@@ -4943,14 +4941,14 @@ void check_procedure_bodies(Checker *c) {
}
if (worker_count == 0) {
auto *this_queue = &c->procs_to_check_queue;
-
+
UntypedExprInfoMap untyped = {};
map_init(&untyped, heap_allocator());
-
+
for (ProcInfo *pi = nullptr; mpmc_dequeue(this_queue, &pi); /**/) {
consume_proc_info_queue(c, pi, this_queue, &untyped);
}
-
+
map_destroy(&untyped);
debugf("Total Procedure Bodies Checked: %td\n", total_bodies_checked.load(std::memory_order_relaxed));
@@ -4994,7 +4992,7 @@ void check_procedure_bodies(Checker *c) {
GB_ASSERT(total_queued == original_queue_count);
semaphore_post(&c->procs_to_check_semaphore, cast(i32)thread_count);
-
+
for (isize i = 0; i < thread_count; i++) {
global_thread_pool_add_task(thread_proc_body, thread_data+i);
}
@@ -5031,7 +5029,7 @@ void check_deferred_procedures(Checker *c) {
Entity *dst = src->Procedure.deferred_procedure.entity;
GB_ASSERT(dst != nullptr);
GB_ASSERT(dst->kind == Entity_Procedure);
-
+
char const *attribute = "deferred_none";
switch (dst_kind) {
case DeferredProcedure_none:
@@ -5232,7 +5230,7 @@ GB_COMPARE_PROC(init_procedures_cmp) {
cmp = 0;
return cmp;
}
-
+
if (x->pkg != y->pkg) {
isize order_x = x->pkg ? x->pkg->order : 0;
isize order_y = y->pkg ? y->pkg->order : 0;
@@ -5246,14 +5244,14 @@ GB_COMPARE_PROC(init_procedures_cmp) {
String fullpath_y = y->file ? y->file->fullpath : (String{});
String file_x = filename_from_path(fullpath_x);
String file_y = filename_from_path(fullpath_y);
-
+
cmp = string_compare(file_x, file_y);
if (cmp) {
return cmp;
}
}
-
+
cmp = u64_cmp(x->order_in_src, y->order_in_src);
if (cmp) {
return cmp;
@@ -5433,9 +5431,19 @@ void check_parsed_files(Checker *c) {
TIME_SECTION("sanity checks");
GB_ASSERT(c->info.entity_queue.count.load(std::memory_order_relaxed) == 0);
GB_ASSERT(c->info.definition_queue.count.load(std::memory_order_relaxed) == 0);
-
+
TIME_SECTION("sort init procedures");
check_sort_init_procedures(c);
+ if (c->info.intrinsics_entry_point_usage.count > 0) {
+ TIME_SECTION("check intrinsics.__entry_point usage");
+ Ast *node = nullptr;
+ while (mpmc_dequeue(&c->info.intrinsics_entry_point_usage, &node)) {
+ if (c->info.entry_point == nullptr && node != nullptr) {
+ warning(node, "usage of intrinsics.__entry_point will be a no-op");
+ }
+ }
+ }
+
TIME_SECTION("type check finish");
}
diff --git a/src/checker.hpp b/src/checker.hpp
index db59ebce7..9a8753efd 100644
--- a/src/checker.hpp
+++ b/src/checker.hpp
@@ -338,6 +338,9 @@ struct CheckerInfo {
MPMCQueue required_global_variable_queue;
MPMCQueue required_foreign_imports_through_force_queue;
+ MPMCQueue intrinsics_entry_point_usage;
+
+
};
struct CheckerContext {
diff --git a/src/checker_builtin_procs.hpp b/src/checker_builtin_procs.hpp
index abd9fc6ca..e8f5174c0 100644
--- a/src/checker_builtin_procs.hpp
+++ b/src/checker_builtin_procs.hpp
@@ -250,6 +250,8 @@ BuiltinProc__type_simple_boolean_end,
BuiltinProc__type_end,
+ BuiltinProc___entry_point,
+
BuiltinProc_COUNT,
};
gb_global BuiltinProc builtin_procs[BuiltinProc_COUNT] = {
@@ -497,6 +499,9 @@ gb_global BuiltinProc builtin_procs[BuiltinProc_COUNT] = {
{STR_LIT("type_equal_proc"), 1, false, Expr_Expr, BuiltinProcPkg_intrinsics},
{STR_LIT("type_hasher_proc"), 1, false, Expr_Expr, BuiltinProcPkg_intrinsics},
-
+
+
{STR_LIT(""), 0, false, Expr_Stmt, BuiltinProcPkg_intrinsics},
+
+ {STR_LIT("__entry_point"), 0, false, Expr_Stmt, BuiltinProcPkg_intrinsics},
};
diff --git a/src/llvm_backend.cpp b/src/llvm_backend.cpp
index 1a657e47b..b42ea8211 100644
--- a/src/llvm_backend.cpp
+++ b/src/llvm_backend.cpp
@@ -873,7 +873,7 @@ lbProcedure *lb_create_main_procedure(lbModule *m, lbProcedure *startup_runtime)
} else {
if (m->info->entry_point != nullptr) {
lbValue entry_point = lb_find_procedure_value_from_entity(m, m->info->entry_point);
- lb_emit_call(p, entry_point, {});
+ lb_emit_call(p, entry_point, {}, ProcInlining_no_inline);
}
}
@@ -1408,6 +1408,7 @@ void lb_generate_code(lbGenerator *gen) {
Entity *entry_point = info->entry_point;
bool has_dll_main = false;
bool has_win_main = false;
+ bool already_has_entry_point = false;
for_array(i, info->entities) {
Entity *e = info->entities[i];
@@ -1425,7 +1426,9 @@ void lb_generate_code(lbGenerator *gen) {
if (e->Procedure.is_export ||
(e->Procedure.link_name.len > 0) ||
((e->scope->flags&ScopeFlag_File) && e->Procedure.link_name.len > 0)) {
- if (!has_dll_main && name == "DllMain") {
+ if (name == "main" || name == "DllMain" || name == "WinMain" || name == "mainCRTStartup") {
+ already_has_entry_point = true;
+ } else if (!has_dll_main && name == "DllMain") {
has_dll_main = true;
} else if (!has_win_main && name == "WinMain") {
has_win_main = true;
@@ -1643,9 +1646,11 @@ void lb_generate_code(lbGenerator *gen) {
}
- if (!(build_context.build_mode == BuildMode_DynamicLibrary && !has_dll_main)) {
- TIME_SECTION("LLVM main");
- lb_create_main_procedure(default_module, startup_runtime);
+ if (!already_has_entry_point) {
+ if (!(build_context.build_mode == BuildMode_DynamicLibrary && !has_dll_main)) {
+ TIME_SECTION("LLVM main");
+ lb_create_main_procedure(default_module, startup_runtime);
+ }
}
for_array(j, gen->modules.entries) {
diff --git a/src/llvm_backend_proc.cpp b/src/llvm_backend_proc.cpp
index 50aa5f6db..10b8a093f 100644
--- a/src/llvm_backend_proc.cpp
+++ b/src/llvm_backend_proc.cpp
@@ -1965,6 +1965,14 @@ lbValue lb_build_builtin_proc(lbProcedure *p, Ast *expr, TypeAndValue const &tv,
return res;
}
+ case BuiltinProc___entry_point:
+ if (p->module->info->entry_point) {
+ lbValue entry_point = lb_find_procedure_value_from_entity(p->module, p->module->info->entry_point);
+ GB_ASSERT(entry_point.value != nullptr);
+ lb_emit_call(p, entry_point, {});
+ }
+ return {};
+
case BuiltinProc_syscall:
{
unsigned arg_count = cast(unsigned)ce->args.count;
From 5ec93677a04f17f38117f0cf301d9a72036a04e7 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Wed, 12 Jan 2022 19:27:49 +0000
Subject: [PATCH 081/710] Correct look for entry point in llvm backend (Windows
only currently)
---
src/llvm_backend.cpp | 19 +++++--------------
1 file changed, 5 insertions(+), 14 deletions(-)
diff --git a/src/llvm_backend.cpp b/src/llvm_backend.cpp
index b42ea8211..0b4c674ac 100644
--- a/src/llvm_backend.cpp
+++ b/src/llvm_backend.cpp
@@ -1406,19 +1406,15 @@ void lb_generate_code(lbGenerator *gen) {
isize global_variable_max_count = 0;
Entity *entry_point = info->entry_point;
- bool has_dll_main = false;
- bool has_win_main = false;
bool already_has_entry_point = false;
for_array(i, info->entities) {
Entity *e = info->entities[i];
String name = e->token.string;
- bool is_global = e->pkg != nullptr;
-
if (e->kind == Entity_Variable) {
global_variable_max_count++;
- } else if (e->kind == Entity_Procedure && !is_global) {
+ } else if (e->kind == Entity_Procedure) {
if ((e->scope->flags&ScopeFlag_Init) && name == "main") {
GB_ASSERT(e == entry_point);
// entry_point = e;
@@ -1426,12 +1422,9 @@ void lb_generate_code(lbGenerator *gen) {
if (e->Procedure.is_export ||
(e->Procedure.link_name.len > 0) ||
((e->scope->flags&ScopeFlag_File) && e->Procedure.link_name.len > 0)) {
- if (name == "main" || name == "DllMain" || name == "WinMain" || name == "mainCRTStartup") {
+ String link_name = e->Procedure.link_name;
+ if (link_name == "main" || link_name == "DllMain" || link_name == "WinMain" || link_name == "mainCRTStartup") {
already_has_entry_point = true;
- } else if (!has_dll_main && name == "DllMain") {
- has_dll_main = true;
- } else if (!has_win_main && name == "WinMain") {
- has_win_main = true;
}
}
}
@@ -1647,10 +1640,8 @@ void lb_generate_code(lbGenerator *gen) {
if (!already_has_entry_point) {
- if (!(build_context.build_mode == BuildMode_DynamicLibrary && !has_dll_main)) {
- TIME_SECTION("LLVM main");
- lb_create_main_procedure(default_module, startup_runtime);
- }
+ TIME_SECTION("LLVM main");
+ lb_create_main_procedure(default_module, startup_runtime);
}
for_array(j, gen->modules.entries) {
From 774951e8c0bfca3f9cdd445a003134d8f315ccd4 Mon Sep 17 00:00:00 2001
From: CiD-
Date: Wed, 12 Jan 2022 14:36:18 -0500
Subject: [PATCH 082/710] os_linux additions + libc to syscalls
---
core/os/os_linux.odin | 277 +++++++++++++++++++++++++++++++++++-------
1 file changed, 231 insertions(+), 46 deletions(-)
diff --git a/core/os/os_linux.odin b/core/os/os_linux.odin
index 260a051ce..6b2fda1e3 100644
--- a/core/os/os_linux.odin
+++ b/core/os/os_linux.odin
@@ -266,33 +266,142 @@ X_OK :: 1 // Test for execute permission
W_OK :: 2 // Test for write permission
R_OK :: 4 // Test for read permission
+AT_FDCWD :: -100
+AT_REMOVEDIR :: uintptr(0x200)
+AT_SYMLINK_NOFOLLOW :: uintptr(0x100)
+
+_unix_open :: proc(path: cstring, flags: int, mode: int = 0o000) -> Handle {
+ when ODIN_ARCH != "arm64" {
+ res := int(intrinsics.syscall(unix.SYS_open, uintptr(rawptr(path)), uintptr(flags), uintptr(mode)))
+ } else { // NOTE: arm64 does not have open
+ res := int(intrinsics.syscall(unix.SYS_openat, uintptr(AT_FDCWD), uintptr(rawptr(path), uintptr(flags), uintptr(mode))))
+ }
+ return -1 if res < 0 else Handle(res)
+}
+
+_unix_close :: proc(fd: Handle) -> int {
+ return int(intrinsics.syscall(unix.SYS_close, uintptr(fd)))
+}
+
+_unix_read :: proc(fd: Handle, buf: rawptr, size: uint) -> int {
+ return int(intrinsics.syscall(unix.SYS_read, uintptr(fd), uintptr(buf), uintptr(size)))
+}
+
+_unix_write :: proc(fd: Handle, buf: rawptr, size: uint) -> int {
+ return int(intrinsics.syscall(unix.SYS_write, uintptr(fd), uintptr(buf), uintptr(size)))
+}
+
+_unix_seek :: proc(fd: Handle, offset: i64, whence: int) -> i64 {
+ when ODIN_ARCH == "amd64" || ODIN_ARCH == "arm64" {
+ return i64(intrinsics.syscall(unix.SYS_lseek, uintptr(fd), uintptr(offset), uintptr(whence)))
+ } else {
+ low := uintptr(offset & 0xFFFFFFFF)
+ high := uintptr(offset >> 32)
+ result: i64
+ res := i64(intrinsics.syscall(unix.SYS__llseek, uintptr(fd), high, low, &result, uintptr(whence)))
+ return -1 if res < 0 else result
+ }
+}
+
+_unix_stat :: proc(path: cstring, stat: ^OS_Stat) -> int {
+ when ODIN_ARCH == "amd64" {
+ return int(intrinsics.syscall(unix.SYS_stat, uintptr(rawptr(path)), uintptr(stat)))
+ } else when ODIN_ARCH != "arm64" {
+ return int(intrinsics.syscall(unix.SYS_stat64, uintptr(rawptr(path)), uintptr(stat)))
+ } else { // NOTE: arm64 does not have stat
+ return int(intrinsics.syscall(unix.SYS_fstatat, uintptr(AT_FDCWD), uintptr(rawptr(path)), uintptr(stat), 0))
+ }
+}
+
+_unix_fstat :: proc(fd: Handle, stat: ^OS_Stat) -> int {
+ when ODIN_ARCH == "amd64" || ODIN_ARCH == "arm64" {
+ return int(intrinsics.syscall(unix.SYS_fstat, uintptr(fd), uintptr(stat)))
+ } else {
+ return int(intrinsics.syscall(unix.SYS_fstat64, uintptr(fd), uintptr(stat)))
+ }
+}
+
+_unix_lstat :: proc(path: cstring, stat: ^OS_Stat) -> int {
+ when ODIN_ARCH == "amd64" {
+ return int(intrinsics.syscall(unix.SYS_lstat, uintptr(rawptr(path)), uintptr(stat)))
+ } else when ODIN_ARCH != "arm64" {
+ return int(intrinsics.syscall(unix.SYS_lstat64, uintptr(rawptr(path)), uintptr(stat)))
+ } else { // NOTE: arm64 does not have any lstat
+ return int(intrinsics.syscall(unix.SYS_fstatat, uintptr(AT_FDCWD), uintptr(rawptr(path)), uintptr(stat), AT_SYMLINK_NOFOLLOW))
+ }
+}
+
+_unix_readlink :: proc(path: cstring, buf: rawptr, bufsiz: uint) -> int {
+ when ODIN_ARCH != "arm64" {
+ return int(intrinsics.syscall(unix.SYS_readlink, uintptr(rawptr(path)), uintptr(buf), uintptr(bufsiz)))
+ } else { // NOTE: arm64 does not have readlink
+ return int(intrinsics.syscall(unix.SYS_readlinkat, uintptr(AT_FDCWD), uintptr(rawptr(path)), uintptr(buf), uintptr(bufsiz)))
+ }
+}
+
+_unix_access :: proc(path: cstring, mask: int) -> int {
+ when ODIN_ARCH != "arm64" {
+ return int(intrinsics.syscall(unix.SYS_access, uintptr(rawptr(path)), uintptr(mask)))
+ } else { // NOTE: arm64 does not have access
+ return int(intrinsics.syscall(unix.SYS_faccessat, uintptr(AT_FDCWD), uintptr(rawptr(path)), uintptr(mask)))
+ }
+}
+
+_unix_getcwd :: proc(buf: rawptr, size: uint) -> int {
+ return int(intrinsics.syscall(unix.SYS_getcwd, uintptr(buf), uintptr(size)))
+}
+
+_unix_chdir :: proc(path: cstring) -> int {
+ return int(intrinsics.syscall(unix.SYS_chdir, uintptr(rawptr(path))))
+}
+
+_unix_rename :: proc(old, new: cstring) -> int {
+ when ODIN_ARCH != "arm64" {
+ return int(intrinsics.syscall(unix.SYS_rename, uintptr(rawptr(old)), uintptr(rawptr(new))))
+ } else { // NOTE: arm64 does not have rename
+ return int(intrinsics.syscall(unix.SYS_renameat, uintptr(AT_FDCWD), uintptr(rawptr(old)), uintptr(rawptr(new))))
+ }
+}
+
+_unix_unlink :: proc(path: cstring) -> int {
+ when ODIN_ARCH != "arm64" {
+ return int(intrinsics.syscall(unix.SYS_unlink, uintptr(rawptr(path))))
+ } else { // NOTE: arm64 does not have unlink
+ return int(intrinsics.syscall(unix.SYS_unlinkat, uintptr(AT_FDCWD), uintptr(rawptr(path), 0)))
+ }
+}
+
+_unix_rmdir :: proc(path: cstring) -> int {
+ when ODIN_ARCH != "arm64" {
+ return int(intrinsics.syscall(unix.SYS_rmdir, uintptr(rawptr(path))))
+ } else { // NOTE: arm64 does not have rmdir
+ return int(intrinsics.syscall(unix.SYS_unlinkat, uintptr(AT_FDCWD), uintptr(rawptr(path)), AT_REMOVEDIR))
+ }
+}
+
+_unix_mkdir :: proc(path: cstring, mode: u32) -> int {
+ when ODIN_ARCH != "arm64" {
+ return int(intrinsics.syscall(unix.SYS_mkdir, uintptr(rawptr(path)), uintptr(mode)))
+ } else { // NOTE: arm64 does not have mkdir
+ return int(intrinsics.syscall(unix.SYS_mkdirat, uintptr(AT_FDCWD), uintptr(rawptr(path)), uintptr(mode)))
+ }
+}
+
foreign libc {
@(link_name="__errno_location") __errno_location :: proc() -> ^int ---
- @(link_name="open") _unix_open :: proc(path: cstring, flags: c.int, mode: c.int) -> Handle ---
- @(link_name="close") _unix_close :: proc(fd: Handle) -> c.int ---
- @(link_name="read") _unix_read :: proc(fd: Handle, buf: rawptr, size: c.size_t) -> c.ssize_t ---
- @(link_name="write") _unix_write :: proc(fd: Handle, buf: rawptr, size: c.size_t) -> c.ssize_t ---
- @(link_name="lseek64") _unix_seek :: proc(fd: Handle, offset: i64, whence: c.int) -> i64 ---
- @(link_name="gettid") _unix_gettid :: proc() -> u64 ---
@(link_name="getpagesize") _unix_getpagesize :: proc() -> c.int ---
- @(link_name="stat64") _unix_stat :: proc(path: cstring, stat: ^OS_Stat) -> c.int ---
- @(link_name="lstat") _unix_lstat :: proc(path: cstring, stat: ^OS_Stat) -> c.int ---
- @(link_name="fstat") _unix_fstat :: proc(fd: Handle, stat: ^OS_Stat) -> c.int ---
@(link_name="fdopendir") _unix_fdopendir :: proc(fd: Handle) -> Dir ---
@(link_name="closedir") _unix_closedir :: proc(dirp: Dir) -> c.int ---
@(link_name="rewinddir") _unix_rewinddir :: proc(dirp: Dir) ---
@(link_name="readdir_r") _unix_readdir_r :: proc(dirp: Dir, entry: ^Dirent, result: ^^Dirent) -> c.int ---
- @(link_name="readlink") _unix_readlink :: proc(path: cstring, buf: ^byte, bufsiz: c.size_t) -> c.ssize_t ---
- @(link_name="access") _unix_access :: proc(path: cstring, mask: c.int) -> c.int ---
@(link_name="malloc") _unix_malloc :: proc(size: c.size_t) -> rawptr ---
@(link_name="calloc") _unix_calloc :: proc(num, size: c.size_t) -> rawptr ---
@(link_name="free") _unix_free :: proc(ptr: rawptr) ---
@(link_name="realloc") _unix_realloc :: proc(ptr: rawptr, size: c.size_t) -> rawptr ---
+
@(link_name="getenv") _unix_getenv :: proc(cstring) -> cstring ---
- @(link_name="getcwd") _unix_getcwd :: proc(buf: cstring, len: c.size_t) -> cstring ---
- @(link_name="chdir") _unix_chdir :: proc(buf: cstring) -> c.int ---
@(link_name="realpath") _unix_realpath :: proc(path: cstring, resolved_path: rawptr) -> rawptr ---
@(link_name="exit") _unix_exit :: proc(status: c.int) -> ! ---
@@ -308,51 +417,57 @@ is_path_separator :: proc(r: rune) -> bool {
return r == '/'
}
+// determine errno from syscall return value
+@private
+_get_errno :: proc(res: int) -> Errno {
+ if res < 0 && res > -4096 {
+ return Errno(-res)
+ }
+ return 0
+}
+
+// get errno from libc
get_last_error :: proc() -> int {
return __errno_location()^
}
open :: proc(path: string, flags: int = O_RDONLY, mode: int = 0) -> (Handle, Errno) {
cstr := strings.clone_to_cstring(path)
- handle := _unix_open(cstr, c.int(flags), c.int(mode))
- delete(cstr)
- if handle == -1 {
- return INVALID_HANDLE, Errno(get_last_error())
+ handle := _unix_open(cstr, flags, mode)
+ defer delete(cstr)
+ if handle < 0 {
+ return INVALID_HANDLE, _get_errno(int(handle))
}
return handle, ERROR_NONE
}
close :: proc(fd: Handle) -> Errno {
- result := _unix_close(fd)
- if result == -1 {
- return Errno(get_last_error())
- }
- return ERROR_NONE
+ return _get_errno(_unix_close(fd))
}
read :: proc(fd: Handle, data: []byte) -> (int, Errno) {
bytes_read := _unix_read(fd, &data[0], c.size_t(len(data)))
- if bytes_read == -1 {
- return -1, Errno(get_last_error())
+ if bytes_read < 0 {
+ return -1, _get_errno(bytes_read)
}
- return int(bytes_read), ERROR_NONE
+ return bytes_read, ERROR_NONE
}
write :: proc(fd: Handle, data: []byte) -> (int, Errno) {
if len(data) == 0 {
return 0, ERROR_NONE
}
- bytes_written := _unix_write(fd, &data[0], c.size_t(len(data)))
- if bytes_written == -1 {
- return -1, Errno(get_last_error())
+ bytes_written := _unix_write(fd, &data[0], uint(len(data)))
+ if bytes_written < 0 {
+ return -1, _get_errno(bytes_written)
}
return int(bytes_written), ERROR_NONE
}
seek :: proc(fd: Handle, offset: i64, whence: int) -> (i64, Errno) {
- res := _unix_seek(fd, offset, c.int(whence))
- if res == -1 {
- return -1, Errno(get_last_error())
+ res := _unix_seek(fd, offset, whence)
+ if res < 0 {
+ return -1, _get_errno(int(res))
}
return res, ERROR_NONE
}
@@ -365,6 +480,75 @@ file_size :: proc(fd: Handle) -> (i64, Errno) {
return max(s.size, 0), ERROR_NONE
}
+rename :: proc(old_path, new_path: string) -> Errno {
+ old_path_cstr := strings.clone_to_cstring(old_path, context.temp_allocator)
+ new_path_cstr := strings.clone_to_cstring(new_path, context.temp_allocator)
+ return _get_errno(_unix_rename(old_path_cstr, new_path_cstr))
+}
+
+remove :: proc(path: string) -> Errno {
+ path_cstr := strings.clone_to_cstring(path, context.temp_allocator)
+ return _get_errno(_unix_unlink(path_cstr))
+}
+
+make_directory :: proc(path: string, mode: u32 = 0o775) -> Errno {
+ path_cstr := strings.clone_to_cstring(path, context.temp_allocator)
+ return _get_errno(_unix_mkdir(path_cstr, mode))
+}
+
+remove_directory :: proc(path: string) -> Errno {
+ path_cstr := strings.clone_to_cstring(path, context.temp_allocator)
+ return _get_errno(_unix_rmdir(path_cstr))
+}
+
+is_file_handle :: proc(fd: Handle) -> bool {
+ s, err := _fstat(fd)
+ if err != ERROR_NONE {
+ return false
+ }
+ return S_ISREG(s.mode)
+}
+
+is_file_path :: proc(path: string, follow_links: bool = true) -> bool {
+ s: OS_Stat
+ err: Errno
+ if follow_links {
+ s, err = _stat(path)
+ } else {
+ s, err = _lstat(path)
+ }
+ if err != ERROR_NONE {
+ return false
+ }
+ return S_ISREG(s.mode)
+}
+
+
+is_dir_handle :: proc(fd: Handle) -> bool {
+ s, err := _fstat(fd)
+ if err != ERROR_NONE {
+ return false
+ }
+ return S_ISDIR(s.mode)
+}
+
+is_dir_path :: proc(path: string, follow_links: bool = true) -> bool {
+ s: OS_Stat
+ err: Errno
+ if follow_links {
+ s, err = _stat(path)
+ } else {
+ s, err = _lstat(path)
+ }
+ if err != ERROR_NONE {
+ return false
+ }
+ return S_ISDIR(s.mode)
+}
+
+is_file :: proc {is_file_path, is_file_handle}
+is_dir :: proc {is_dir_path, is_dir_handle}
+
// NOTE(bill): Uses startup to initialize it
@@ -401,8 +585,8 @@ _stat :: proc(path: string) -> (OS_Stat, Errno) {
s: OS_Stat
result := _unix_stat(cstr, &s)
- if result == -1 {
- return s, Errno(get_last_error())
+ if result < 0 {
+ return s, _get_errno(result)
}
return s, ERROR_NONE
}
@@ -414,8 +598,8 @@ _lstat :: proc(path: string) -> (OS_Stat, Errno) {
s: OS_Stat
result := _unix_lstat(cstr, &s)
- if result == -1 {
- return s, Errno(get_last_error())
+ if result < 0 {
+ return s, _get_errno(result)
}
return s, ERROR_NONE
}
@@ -424,8 +608,8 @@ _lstat :: proc(path: string) -> (OS_Stat, Errno) {
_fstat :: proc(fd: Handle) -> (OS_Stat, Errno) {
s: OS_Stat
result := _unix_fstat(fd, &s)
- if result == -1 {
- return s, Errno(get_last_error())
+ if result < 0 {
+ return s, _get_errno(result)
}
return s, ERROR_NONE
}
@@ -530,9 +714,9 @@ absolute_path_from_relative :: proc(rel: string) -> (path: string, err: Errno) {
access :: proc(path: string, mask: int) -> (bool, Errno) {
cstr := strings.clone_to_cstring(path)
defer delete(cstr)
- result := _unix_access(cstr, c.int(mask))
- if result == -1 {
- return false, Errno(get_last_error())
+ result := _unix_access(cstr, mask)
+ if result < 0 {
+ return false, _get_errno(result)
}
return true, ERROR_NONE
}
@@ -567,11 +751,12 @@ get_current_directory :: proc() -> string {
page_size := get_page_size()
buf := make([dynamic]u8, page_size)
for {
- #no_bounds_check cwd := _unix_getcwd(cstring(&buf[0]), c.size_t(len(buf)))
- if cwd != nil {
- return string(cwd)
+ #no_bounds_check res := _unix_getcwd(&buf[0], uint(len(buf)))
+
+ if res >= 0 {
+ return strings.string_from_nul_terminated_ptr(&buf[0], len(buf))
}
- if Errno(get_last_error()) != ERANGE {
+ if _get_errno(res) != ERANGE {
return ""
}
resize(&buf, len(buf)+page_size)
@@ -582,8 +767,8 @@ get_current_directory :: proc() -> string {
set_current_directory :: proc(path: string) -> (err: Errno) {
cstr := strings.clone_to_cstring(path, context.temp_allocator)
res := _unix_chdir(cstr)
- if res == -1 {
- return Errno(get_last_error())
+ if res < 0 {
+ return _get_errno(res)
}
return ERROR_NONE
}
From 8eaafd5242bb66f1daca84776a5e8560f65a3aa9 Mon Sep 17 00:00:00 2001
From: CiD-
Date: Wed, 12 Jan 2022 14:51:49 -0500
Subject: [PATCH 083/710] check correct errno in _readlink
---
core/os/os_linux.odin | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/core/os/os_linux.odin b/core/os/os_linux.odin
index 6b2fda1e3..1c796f1b8 100644
--- a/core/os/os_linux.odin
+++ b/core/os/os_linux.odin
@@ -666,9 +666,9 @@ _readlink :: proc(path: string) -> (string, Errno) {
buf := make([]byte, bufsz)
for {
rc := _unix_readlink(path_cstr, &(buf[0]), bufsz)
- if rc == -1 {
+ if rc < 0 {
delete(buf)
- return "", Errno(get_last_error())
+ return "", _get_errno(rc)
} else if rc == int(bufsz) {
// NOTE(laleksic, 2021-01-21): Any cleaner way to resize the slice?
bufsz *= 2
From fb0a3ab7c14d4bc3b821cef723ec6ea3e956c956 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Wed, 12 Jan 2022 20:07:17 +0000
Subject: [PATCH 084/710] Correct linkage for entry point procedures on Windows
---
src/checker.cpp | 8 +++++++-
src/llvm_backend.cpp | 23 +++++++++++------------
src/llvm_backend_proc.cpp | 2 +-
3 files changed, 19 insertions(+), 14 deletions(-)
diff --git a/src/checker.cpp b/src/checker.cpp
index 42575f88d..f261c8f4a 100644
--- a/src/checker.cpp
+++ b/src/checker.cpp
@@ -2113,11 +2113,15 @@ void generate_minimum_dependency_set(Checker *c, Entity *start) {
case Entity_Variable:
if (e->Variable.is_export) {
add_dependency_to_set(c, e);
+ } else if (e->flags & EntityFlag_Require) {
+ add_dependency_to_set(c, e);
}
break;
case Entity_Procedure:
if (e->Procedure.is_export) {
add_dependency_to_set(c, e);
+ } else if (e->flags & EntityFlag_Require) {
+ add_dependency_to_set(c, e);
}
if (e->flags & EntityFlag_Init) {
Type *t = base_type(e->type);
@@ -5440,7 +5444,9 @@ void check_parsed_files(Checker *c) {
Ast *node = nullptr;
while (mpmc_dequeue(&c->info.intrinsics_entry_point_usage, &node)) {
if (c->info.entry_point == nullptr && node != nullptr) {
- warning(node, "usage of intrinsics.__entry_point will be a no-op");
+ if (node->file()->pkg->kind != Package_Runtime) {
+ warning(node, "usage of intrinsics.__entry_point will be a no-op");
+ }
}
}
}
diff --git a/src/llvm_backend.cpp b/src/llvm_backend.cpp
index 0b4c674ac..1c3cf86ac 100644
--- a/src/llvm_backend.cpp
+++ b/src/llvm_backend.cpp
@@ -1405,7 +1405,6 @@ void lb_generate_code(lbGenerator *gen) {
isize global_variable_max_count = 0;
- Entity *entry_point = info->entry_point;
bool already_has_entry_point = false;
for_array(i, info->entities) {
@@ -1416,14 +1415,17 @@ void lb_generate_code(lbGenerator *gen) {
global_variable_max_count++;
} else if (e->kind == Entity_Procedure) {
if ((e->scope->flags&ScopeFlag_Init) && name == "main") {
- GB_ASSERT(e == entry_point);
- // entry_point = e;
+ GB_ASSERT(e == info->entry_point);
}
if (e->Procedure.is_export ||
(e->Procedure.link_name.len > 0) ||
((e->scope->flags&ScopeFlag_File) && e->Procedure.link_name.len > 0)) {
String link_name = e->Procedure.link_name;
- if (link_name == "main" || link_name == "DllMain" || link_name == "WinMain" || link_name == "mainCRTStartup") {
+ if (link_name == "main" ||
+ link_name == "DllMain" ||
+ link_name == "WinMain" ||
+ link_name == "wWinMain" ||
+ link_name == "mainCRTStartup") {
already_has_entry_point = true;
}
}
@@ -1562,6 +1564,11 @@ void lb_generate_code(lbGenerator *gen) {
}
}
+ TIME_SECTION("LLVM Runtime Type Information Creation");
+ lbProcedure *startup_type_info = lb_create_startup_type_info(default_module);
+
+ TIME_SECTION("LLVM Runtime Startup Creation (Global Variables)");
+ lbProcedure *startup_runtime = lb_create_startup_runtime(default_module, startup_type_info, global_variables);
TIME_SECTION("LLVM Global Procedures and Types");
for_array(i, info->entities) {
@@ -1621,14 +1628,6 @@ void lb_generate_code(lbGenerator *gen) {
}
}
-
- TIME_SECTION("LLVM Runtime Type Information Creation");
- lbProcedure *startup_type_info = lb_create_startup_type_info(default_module);
-
- TIME_SECTION("LLVM Runtime Startup Creation (Global Variables)");
- lbProcedure *startup_runtime = lb_create_startup_runtime(default_module, startup_type_info, global_variables);
-
-
TIME_SECTION("LLVM Procedure Generation");
for_array(j, gen->modules.entries) {
lbModule *m = gen->modules.entries[j].value;
diff --git a/src/llvm_backend_proc.cpp b/src/llvm_backend_proc.cpp
index 10b8a093f..c52572588 100644
--- a/src/llvm_backend_proc.cpp
+++ b/src/llvm_backend_proc.cpp
@@ -304,7 +304,7 @@ lbProcedure *lb_create_procedure(lbModule *m, Entity *entity, bool ignore_body)
lbProcedure *lb_create_dummy_procedure(lbModule *m, String link_name, Type *type) {
{
lbValue *found = string_map_get(&m->members, link_name);
- GB_ASSERT(found == nullptr);
+ GB_ASSERT_MSG(found == nullptr, "failed to create dummy procedure for: %.*s", LIT(link_name));
}
lbProcedure *p = gb_alloc_item(permanent_allocator(), lbProcedure);
From f1521aa980da5753a6ba6ea951d1cb2ebfd0e66a Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Wed, 12 Jan 2022 20:10:23 +0000
Subject: [PATCH 085/710] Add proc_windows.odin for custom entry points
---
core/runtime/proc_windows.odin | 44 ++++++++++++++++++++++++++++++++++
1 file changed, 44 insertions(+)
create mode 100644 core/runtime/proc_windows.odin
diff --git a/core/runtime/proc_windows.odin b/core/runtime/proc_windows.odin
new file mode 100644
index 000000000..ba3a2b9d8
--- /dev/null
+++ b/core/runtime/proc_windows.odin
@@ -0,0 +1,44 @@
+//+private
+//+build windows
+package runtime
+
+import "core:intrinsics"
+
+when ODIN_BUILD_MODE == "dynamic" {
+ @(link_name="DllMain", linkage="strong", require)
+ DllMain :: proc "stdcall" (hinstDLL: rawptr, fdwReason: u32, lpReserved: rawptr) -> b32 {
+ context = default_context()
+ switch fdwReason {
+ case 1: // DLL_PROCESS_ATTACH
+ #force_no_inline _startup_runtime()
+ intrinsics.__entry_point()
+ case 0: // DLL_PROCESS_DETACH
+ #force_no_inline _cleanup_runtime()
+ case 2: // DLL_THREAD_ATTACH
+ break
+ case 3: // DLL_THREAD_DETACH
+ break
+ }
+ return true
+ }
+} else when !ODIN_TEST && !ODIN_NO_ENTRY_POINT {
+ when ODIN_ARCH == "386" || ODIN_NO_CRT {
+ @(link_name="mainCRTStartup", linkage="strong", require)
+ mainCRTStartup :: proc "stdcall" () -> i32 {
+ context = default_context()
+ #force_no_inline _startup_runtime()
+ intrinsics.__entry_point()
+ #force_no_inline _cleanup_runtime()
+ return 0
+ }
+ } else {
+ @(link_name="main", linkage="strong", require)
+ main :: proc "c" (argc: i32, argv: [^]cstring) -> i32 {
+ context = default_context()
+ #force_no_inline _startup_runtime()
+ intrinsics.__entry_point()
+ #force_no_inline _cleanup_runtime()
+ return 0
+ }
+ }
+}
\ No newline at end of file
From 75b7f2b9feada3cf6ed8aab5142e078d7b07ed59 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Wed, 12 Jan 2022 20:13:38 +0000
Subject: [PATCH 086/710] Correct `-init` for *nix to be a different procedure
---
core/runtime/proc_unix.odin | 18 ++++++++++++++++++
1 file changed, 18 insertions(+)
create mode 100644 core/runtime/proc_unix.odin
diff --git a/core/runtime/proc_unix.odin b/core/runtime/proc_unix.odin
new file mode 100644
index 000000000..7e0d1055a
--- /dev/null
+++ b/core/runtime/proc_unix.odin
@@ -0,0 +1,18 @@
+//+private
+//+build linux, darwin
+package runtime
+
+import "core:intrinsics"
+
+when ODIN_BUILD_MODE == "dynamic" {
+ @(link_name="_odin_entry_point", linkage="strong", require)
+ _odin_entry_point :: proc "c" () {
+ context = default_context()
+ #force_no_inline _startup_runtime()
+ }
+ @(link_name="_odin_exit_point", linkage="strong", require)
+ _odin_exit_point :: proc "c" () {
+ context = default_context()
+ #force_no_inline _cleanup_runtime()
+ }
+}
\ No newline at end of file
From 6209b02bf9d8dc05c34754beded39c4b80f1d8c6 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Wed, 12 Jan 2022 20:16:04 +0000
Subject: [PATCH 087/710] Add `intrinsics._entry_point` call to
`_odin_entry_point`
---
core/runtime/proc_unix.odin | 1 +
1 file changed, 1 insertion(+)
diff --git a/core/runtime/proc_unix.odin b/core/runtime/proc_unix.odin
index 7e0d1055a..cb3199ad1 100644
--- a/core/runtime/proc_unix.odin
+++ b/core/runtime/proc_unix.odin
@@ -9,6 +9,7 @@ when ODIN_BUILD_MODE == "dynamic" {
_odin_entry_point :: proc "c" () {
context = default_context()
#force_no_inline _startup_runtime()
+ intrinsics._odin_entry_point()
}
@(link_name="_odin_exit_point", linkage="strong", require)
_odin_exit_point :: proc "c" () {
From 7df93ea5044bcc4cb5d9bb15a511b449225b2182 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Wed, 12 Jan 2022 20:16:46 +0000
Subject: [PATCH 088/710] Initialize `runtime.args__` through `main`
---
core/runtime/proc_windows.odin | 1 +
1 file changed, 1 insertion(+)
diff --git a/core/runtime/proc_windows.odin b/core/runtime/proc_windows.odin
index ba3a2b9d8..ef8b0c529 100644
--- a/core/runtime/proc_windows.odin
+++ b/core/runtime/proc_windows.odin
@@ -34,6 +34,7 @@ when ODIN_BUILD_MODE == "dynamic" {
} else {
@(link_name="main", linkage="strong", require)
main :: proc "c" (argc: i32, argv: [^]cstring) -> i32 {
+ args__ = argv[:argc]
context = default_context()
#force_no_inline _startup_runtime()
intrinsics.__entry_point()
From e30f16b1f3fa5630c43c62f5602e26aa8d55e53b Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Wed, 12 Jan 2022 20:17:30 +0000
Subject: [PATCH 089/710] Correct `-init` for *nix
---
src/main.cpp | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/src/main.cpp b/src/main.cpp
index 444ab44f0..9b2c5d5de 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -445,7 +445,7 @@ i32 linker_stage(lbGenerator *gen) {
// so it doesn't generate symbols which cannot be relocated.
link_settings = gb_string_appendc(link_settings, "-shared ");
- // NOTE(dweiler): __$startup_runtime must be called at initialization
+ // NOTE(dweiler): _odin_entry_point must be called at initialization
// time of the shared object, we can pass -init to the linker by using
// a comma separated list of arguments to -Wl.
//
@@ -457,10 +457,10 @@ i32 linker_stage(lbGenerator *gen) {
// Shared libraries are .dylib on MacOS and .so on Linux.
#if defined(GB_SYSTEM_OSX)
output_ext = STR_LIT(".dylib");
- link_settings = gb_string_appendc(link_settings, "-Wl,-init,'___$startup_runtime' ");
+ link_settings = gb_string_appendc(link_settings, "-Wl,-init,'__odin_entry_point' ");
#else
output_ext = STR_LIT(".so");
- link_settings = gb_string_appendc(link_settings, "-Wl,-init,'__$startup_runtime' ");
+ link_settings = gb_string_appendc(link_settings, "-Wl,-init,'__odin_entry_point' ");
#endif
} else {
link_settings = gb_string_appendc(link_settings, "-no-pie ");
From 3def94505eb72dee3ce46047d8e7820cad8d0c8f Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Wed, 12 Jan 2022 20:28:11 +0000
Subject: [PATCH 090/710] Add `dynamic` to error message for `-build-mode`
---
src/main.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/main.cpp b/src/main.cpp
index 9b2c5d5de..35b3d713b 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -1280,7 +1280,7 @@ bool parse_build_flags(Array args) {
} else {
gb_printf_err("Unknown build mode '%.*s'\n", LIT(str));
gb_printf_err("Valid build modes:\n");
- gb_printf_err("\tdll, shared\n");
+ gb_printf_err("\tdll, shared, dynamic\n");
gb_printf_err("\tobj, object\n");
gb_printf_err("\texe\n");
gb_printf_err("\tasm, assembly, assembler\n");
From 8f038118420ecbca072017b4f0379603688389c4 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Wed, 12 Jan 2022 20:30:34 +0000
Subject: [PATCH 091/710] Fix typo
---
core/runtime/proc_unix.odin | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/core/runtime/proc_unix.odin b/core/runtime/proc_unix.odin
index cb3199ad1..2a62c8d8f 100644
--- a/core/runtime/proc_unix.odin
+++ b/core/runtime/proc_unix.odin
@@ -9,7 +9,7 @@ when ODIN_BUILD_MODE == "dynamic" {
_odin_entry_point :: proc "c" () {
context = default_context()
#force_no_inline _startup_runtime()
- intrinsics._odin_entry_point()
+ intrinsics.__entry_point()
}
@(link_name="_odin_exit_point", linkage="strong", require)
_odin_exit_point :: proc "c" () {
From 80f175cdb0fe13b4bd83239373f890c155b1ecf5 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Wed, 12 Jan 2022 20:40:34 +0000
Subject: [PATCH 092/710] Add empty `main` dynamic builds for *nix systems
---
core/runtime/proc_unix.odin | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/core/runtime/proc_unix.odin b/core/runtime/proc_unix.odin
index 2a62c8d8f..119ce3718 100644
--- a/core/runtime/proc_unix.odin
+++ b/core/runtime/proc_unix.odin
@@ -16,4 +16,8 @@ when ODIN_BUILD_MODE == "dynamic" {
context = default_context()
#force_no_inline _cleanup_runtime()
}
+ @(link_name="main", linkage="strong", require)
+ main :: proc(argc: i32, argv: [^]cstring) -> i32 {
+ return 0
+ }
}
\ No newline at end of file
From c9bc7596240ac3c18d48c0545f478e08b0c2a23e Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Wed, 12 Jan 2022 23:04:31 +0000
Subject: [PATCH 093/710] Correct calling convention
---
core/runtime/proc_unix.odin | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/core/runtime/proc_unix.odin b/core/runtime/proc_unix.odin
index 119ce3718..eb656f39b 100644
--- a/core/runtime/proc_unix.odin
+++ b/core/runtime/proc_unix.odin
@@ -17,7 +17,7 @@ when ODIN_BUILD_MODE == "dynamic" {
#force_no_inline _cleanup_runtime()
}
@(link_name="main", linkage="strong", require)
- main :: proc(argc: i32, argv: [^]cstring) -> i32 {
+ main :: proc "c" (argc: i32, argv: [^]cstring) -> i32 {
return 0
}
}
\ No newline at end of file
From ee260986a9bb5dcf1eed24425313fae95c33f187 Mon Sep 17 00:00:00 2001
From: Dale Weiler
Date: Thu, 13 Jan 2022 00:19:04 -0500
Subject: [PATCH 094/710] more fixes
---
src/main.cpp | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/src/main.cpp b/src/main.cpp
index 35b3d713b..9aa9bd2ac 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -446,8 +446,9 @@ i32 linker_stage(lbGenerator *gen) {
link_settings = gb_string_appendc(link_settings, "-shared ");
// NOTE(dweiler): _odin_entry_point must be called at initialization
- // time of the shared object, we can pass -init to the linker by using
- // a comma separated list of arguments to -Wl.
+ // time of the shared object, similarly, _odin_exit_point must be called
+ // at deinitialization. We can pass both -init and -fini to the linker by
+ // using a comma separated list of arguments to -Wl.
//
// This previously used ld but ld cannot actually build a shared library
// correctly this way since all the other dependencies provided implicitly
@@ -457,11 +458,11 @@ i32 linker_stage(lbGenerator *gen) {
// Shared libraries are .dylib on MacOS and .so on Linux.
#if defined(GB_SYSTEM_OSX)
output_ext = STR_LIT(".dylib");
- link_settings = gb_string_appendc(link_settings, "-Wl,-init,'__odin_entry_point' ");
#else
output_ext = STR_LIT(".so");
- link_settings = gb_string_appendc(link_settings, "-Wl,-init,'__odin_entry_point' ");
#endif
+ link_settings = gb_string_appendc(link_settings, "-Wl,-init,'_odin_entry_point' ");
+ link_settings = gb_string_appendc(link_settings, "-Wl,-fini,'_odin_exit_point' ");
} else {
link_settings = gb_string_appendc(link_settings, "-no-pie ");
}
From 315a08f33f0d3ae9c16377c5a077733824d301d4 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Thu, 13 Jan 2022 12:04:42 +0000
Subject: [PATCH 095/710] Add `main` to proc_unix.odin
---
core/runtime/proc_unix.odin | 16 +++++++++++++---
1 file changed, 13 insertions(+), 3 deletions(-)
diff --git a/core/runtime/proc_unix.odin b/core/runtime/proc_unix.odin
index eb656f39b..38116c0f9 100644
--- a/core/runtime/proc_unix.odin
+++ b/core/runtime/proc_unix.odin
@@ -1,17 +1,17 @@
//+private
-//+build linux, darwin
+//+build linux, darwin, freebsd
package runtime
import "core:intrinsics"
when ODIN_BUILD_MODE == "dynamic" {
- @(link_name="_odin_entry_point", linkage="strong", require)
+ @(link_name="_odin_entry_point", linkage="strong", require, link_section=".init")
_odin_entry_point :: proc "c" () {
context = default_context()
#force_no_inline _startup_runtime()
intrinsics.__entry_point()
}
- @(link_name="_odin_exit_point", linkage="strong", require)
+ @(link_name="_odin_exit_point", linkage="strong", require, link_section=".fini")
_odin_exit_point :: proc "c" () {
context = default_context()
#force_no_inline _cleanup_runtime()
@@ -20,4 +20,14 @@ when ODIN_BUILD_MODE == "dynamic" {
main :: proc "c" (argc: i32, argv: [^]cstring) -> i32 {
return 0
}
+} else when !ODIN_TEST && !ODIN_NO_ENTRY_POINT {
+ @(link_name="main", linkage="strong", require)
+ main :: proc "c" (argc: i32, argv: [^]cstring) -> i32 {
+ args__ = argv[:argc]
+ context = default_context()
+ #force_no_inline _startup_runtime()
+ intrinsics.__entry_point()
+ #force_no_inline _cleanup_runtime()
+ return 0
+ }
}
\ No newline at end of file
From b33ca6651e442c2ff6122cbdadedec782cb376c9 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Thu, 13 Jan 2022 12:05:22 +0000
Subject: [PATCH 096/710] Rename `proc_*` to `entry_*`
---
core/runtime/{proc_unix.odin => entry_unix.odin} | 0
core/runtime/{proc_windows.odin => entry_windows.odin} | 0
2 files changed, 0 insertions(+), 0 deletions(-)
rename core/runtime/{proc_unix.odin => entry_unix.odin} (100%)
rename core/runtime/{proc_windows.odin => entry_windows.odin} (100%)
diff --git a/core/runtime/proc_unix.odin b/core/runtime/entry_unix.odin
similarity index 100%
rename from core/runtime/proc_unix.odin
rename to core/runtime/entry_unix.odin
diff --git a/core/runtime/proc_windows.odin b/core/runtime/entry_windows.odin
similarity index 100%
rename from core/runtime/proc_windows.odin
rename to core/runtime/entry_windows.odin
From e15f71466005ff84c6dc97cbfbd10d57f1193e7a Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Thu, 13 Jan 2022 15:18:47 +0000
Subject: [PATCH 097/710] Define wasm `_start` entry point in Odin code
---
core/runtime/entry_wasm.odin | 19 +++++++++++++++++++
1 file changed, 19 insertions(+)
create mode 100644 core/runtime/entry_wasm.odin
diff --git a/core/runtime/entry_wasm.odin b/core/runtime/entry_wasm.odin
new file mode 100644
index 000000000..125abc756
--- /dev/null
+++ b/core/runtime/entry_wasm.odin
@@ -0,0 +1,19 @@
+//+private
+//+build wasm32, wasm64
+package runtime
+
+import "core:intrinsics"
+
+when !ODIN_TEST && !ODIN_NO_ENTRY_POINT {
+ @(link_name="_start", linkage="strong", require, export)
+ _start :: proc "c" () {
+ context = default_context()
+ #force_no_inline _startup_runtime()
+ intrinsics.__entry_point()
+ }
+ @(link_name="_end", linkage="strong", require, export)
+ _end :: proc "c" () {
+ context = default_context()
+ #force_no_inline _cleanup_runtime()
+ }
+}
\ No newline at end of file
From 6cf5371d7d55038c0d0789c240813e1be08c0107 Mon Sep 17 00:00:00 2001
From: CiD-
Date: Fri, 14 Jan 2022 10:17:49 -0500
Subject: [PATCH 098/710] fix push_back and pop_front
---
core/container/queue/queue.odin | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/core/container/queue/queue.odin b/core/container/queue/queue.odin
index ff1e85fbd..feca6934c 100644
--- a/core/container/queue/queue.odin
+++ b/core/container/queue/queue.odin
@@ -86,7 +86,8 @@ push_back :: proc(q: ^$Q/Queue($T), elem: T) -> bool {
if space(q^) == 0 {
_grow(q) or_return
}
- q.data[q.len] = elem
+ idx := (q.offset+uint(q.len))%builtin.len(q.data)
+ q.data[idx] = elem
q.len += 1
return true
}
@@ -126,6 +127,7 @@ pop_back_safe :: proc(q: ^$Q/Queue($T)) -> (elem: T, ok: bool) {
pop_front :: proc(q: ^$Q/Queue($T), loc := #caller_location) -> (elem: T) {
assert(condition=q.len > 0, loc=loc)
elem = q.data[q.offset]
+ q.offset = (q.offset+1)%builtin.len(q.data)
q.len -= 1
return
}
@@ -133,6 +135,7 @@ pop_front :: proc(q: ^$Q/Queue($T), loc := #caller_location) -> (elem: T) {
pop_front_safe :: proc(q: ^$Q/Queue($T)) -> (elem: T, ok: bool) {
if q.len > 0 {
elem = q.data[q.offset]
+ q.offset = (q.offset+1)%builtin.len(q.data)
q.len -= 1
ok = true
}
From c6ed3fa4b5aca2a5e2b444c6b9cd4f8d2ca8f3e1 Mon Sep 17 00:00:00 2001
From: oskarnp
Date: Fri, 14 Jan 2022 10:43:33 -0500
Subject: [PATCH 099/710] Fix invalid linker flags passed to clang on macOS
---
src/main.cpp | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/src/main.cpp b/src/main.cpp
index 9aa9bd2ac..fe56d451f 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -485,16 +485,16 @@ i32 linker_stage(lbGenerator *gen) {
// NOTE: If you change this (although this minimum is as low as you can go with Odin working)
// make sure to also change the 'mtriple' param passed to 'opt'
#if defined(GB_CPU_ARM)
- " -macosx_version_min 11.0.0 "
+ " -mmacosx-version-min=11.0.0 "
#else
- " -macosx_version_min 10.8.0 "
+ " -mmacosx-version-min=10.8.0 "
#endif
// This points the linker to where the entry point is
" -e _main "
#endif
, object_files, LIT(output_base), LIT(output_ext),
#if defined(GB_SYSTEM_OSX)
- "-lSystem -lm -syslibroot /Library/Developer/CommandLineTools/SDKs/MacOSX.sdk -L/usr/local/lib",
+ "-lSystem -lm -Wl,-syslibroot /Library/Developer/CommandLineTools/SDKs/MacOSX.sdk -L/usr/local/lib",
#else
"-lc -lm",
#endif
From 6aa80ee8e4fc8f96c24e9881a5833f14c86eff05 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Sat, 15 Jan 2022 15:38:09 +0000
Subject: [PATCH 100/710] Correct `_start` as an entry point
---
src/llvm_backend.cpp | 17 ++++++++++-------
1 file changed, 10 insertions(+), 7 deletions(-)
diff --git a/src/llvm_backend.cpp b/src/llvm_backend.cpp
index 1c3cf86ac..7a7f20f8d 100644
--- a/src/llvm_backend.cpp
+++ b/src/llvm_backend.cpp
@@ -1420,13 +1420,16 @@ void lb_generate_code(lbGenerator *gen) {
if (e->Procedure.is_export ||
(e->Procedure.link_name.len > 0) ||
((e->scope->flags&ScopeFlag_File) && e->Procedure.link_name.len > 0)) {
- String link_name = e->Procedure.link_name;
- if (link_name == "main" ||
- link_name == "DllMain" ||
- link_name == "WinMain" ||
- link_name == "wWinMain" ||
- link_name == "mainCRTStartup") {
- already_has_entry_point = true;
+ String link_name = e->Procedure.link_name;
+ if (e->pkg->kind == Package_Runtime) {
+ if (link_name == "main" ||
+ link_name == "DllMain" ||
+ link_name == "WinMain" ||
+ link_name == "wWinMain" ||
+ link_name == "mainCRTStartup" ||
+ link_name == "_start") {
+ already_has_entry_point = true;
+ }
}
}
}
From a390ef41f803587ebdf404cd0f10c19a0a651994 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Sat, 15 Jan 2022 15:55:01 +0000
Subject: [PATCH 101/710] Fix swizzle logic within `lb_build_assign_stmt_array`
---
src/llvm_backend_stmt.cpp | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/src/llvm_backend_stmt.cpp b/src/llvm_backend_stmt.cpp
index 20b444058..04e7e0d03 100644
--- a/src/llvm_backend_stmt.cpp
+++ b/src/llvm_backend_stmt.cpp
@@ -1766,6 +1766,8 @@ void lb_build_for_stmt(lbProcedure *p, Ast *node) {
}
void lb_build_assign_stmt_array(lbProcedure *p, TokenKind op, lbAddr const &lhs, lbValue const &value) {
+ GB_ASSERT(op != Token_Eq);
+
Type *lhs_type = lb_addr_type(lhs);
Type *array_type = base_type(lhs_type);
GB_ASSERT(is_type_array_like(array_type));
@@ -1795,7 +1797,6 @@ void lb_build_assign_stmt_array(lbProcedure *p, TokenKind op, lbAddr const &lhs,
}
indices[index_count++] = index;
}
- gb_sort_array(indices, index_count, gb_i32_cmp(0));
lbValue lhs_ptrs[4] = {};
lbValue x_loads[4] = {};
@@ -1840,7 +1841,6 @@ void lb_build_assign_stmt_array(lbProcedure *p, TokenKind op, lbAddr const &lhs,
}
indices[index_count++] = index;
}
- gb_sort_array(indices.data, index_count, gb_i32_cmp(0));
lbValue lhs_ptrs[4] = {};
lbValue x_loads[4] = {};
From 7501cc2f17315af368abad8d89f669e414c6cd9e Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Sat, 15 Jan 2022 16:01:23 +0000
Subject: [PATCH 102/710] Remove dead code
---
src/llvm_backend_stmt.cpp | 48 ---------------------------------------
1 file changed, 48 deletions(-)
diff --git a/src/llvm_backend_stmt.cpp b/src/llvm_backend_stmt.cpp
index 04e7e0d03..5882b71ae 100644
--- a/src/llvm_backend_stmt.cpp
+++ b/src/llvm_backend_stmt.cpp
@@ -1868,11 +1868,7 @@ void lb_build_assign_stmt_array(lbProcedure *p, TokenKind op, lbAddr const &lhs,
lbValue x = lb_addr_get_ptr(p, lhs);
-
-
if (inline_array_arith) {
- #if 1
- #if 1
unsigned n = cast(unsigned)count;
auto lhs_ptrs = slice_make(temporary_allocator(), n);
@@ -1896,50 +1892,6 @@ void lb_build_assign_stmt_array(lbProcedure *p, TokenKind op, lbAddr const &lhs,
for (unsigned i = 0; i < n; i++) {
lb_emit_store(p, lhs_ptrs[i], ops[i]);
}
-
- #else
- lbValue y = lb_address_from_load_or_generate_local(p, rhs);
-
- unsigned n = cast(unsigned)count;
-
- auto lhs_ptrs = slice_make(temporary_allocator(), n);
- auto rhs_ptrs = slice_make(temporary_allocator(), n);
- auto x_loads = slice_make(temporary_allocator(), n);
- auto y_loads = slice_make(temporary_allocator(), n);
- auto ops = slice_make(temporary_allocator(), n);
-
- for (unsigned i = 0; i < n; i++) {
- lhs_ptrs[i] = lb_emit_array_epi(p, x, i);
- }
- for (unsigned i = 0; i < n; i++) {
- rhs_ptrs[i] = lb_emit_array_epi(p, y, i);
- }
- for (unsigned i = 0; i < n; i++) {
- x_loads[i] = lb_emit_load(p, lhs_ptrs[i]);
- }
- for (unsigned i = 0; i < n; i++) {
- y_loads[i] = lb_emit_load(p, rhs_ptrs[i]);
- }
- for (unsigned i = 0; i < n; i++) {
- ops[i] = lb_emit_arith(p, op, x_loads[i], y_loads[i], elem_type);
- }
- for (unsigned i = 0; i < n; i++) {
- lb_emit_store(p, lhs_ptrs[i], ops[i]);
- }
- #endif
- #else
- lbValue y = lb_address_from_load_or_generate_local(p, rhs);
-
- for (i64 i = 0; i < count; i++) {
- lbValue a_ptr = lb_emit_array_epi(p, x, i);
- lbValue b_ptr = lb_emit_array_epi(p, y, i);
-
- lbValue a = lb_emit_load(p, a_ptr);
- lbValue b = lb_emit_load(p, b_ptr);
- lbValue c = lb_emit_arith(p, op, a, b, elem_type);
- lb_emit_store(p, a_ptr, c);
- }
- #endif
} else {
lbValue y = lb_address_from_load_or_generate_local(p, rhs);
From 79f32d7b71f8ca00fa347ed0ab393d0d8c02111b Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Sat, 15 Jan 2022 16:03:37 +0000
Subject: [PATCH 103/710] Remove unused lbDefer kind
---
src/llvm_backend.hpp | 3 ---
src/llvm_backend_stmt.cpp | 4 ----
2 files changed, 7 deletions(-)
diff --git a/src/llvm_backend.hpp b/src/llvm_backend.hpp
index e70b1f84c..45e58cacf 100644
--- a/src/llvm_backend.hpp
+++ b/src/llvm_backend.hpp
@@ -204,7 +204,6 @@ enum lbDeferExitKind {
enum lbDeferKind {
lbDefer_Node,
- lbDefer_Instr,
lbDefer_Proc,
};
@@ -215,8 +214,6 @@ struct lbDefer {
lbBlock * block;
union {
Ast *stmt;
- // NOTE(bill): 'instr' will be copied every time to create a new one
- lbValue instr;
struct {
lbValue deferred;
Array result_as_args;
diff --git a/src/llvm_backend_stmt.cpp b/src/llvm_backend_stmt.cpp
index 5882b71ae..3375ceda9 100644
--- a/src/llvm_backend_stmt.cpp
+++ b/src/llvm_backend_stmt.cpp
@@ -2172,10 +2172,6 @@ void lb_build_defer_stmt(lbProcedure *p, lbDefer const &d) {
lb_start_block(p, b);
if (d.kind == lbDefer_Node) {
lb_build_stmt(p, d.stmt);
- } else if (d.kind == lbDefer_Instr) {
- // NOTE(bill): Need to make a new copy
- LLVMValueRef instr = LLVMInstructionClone(d.instr.value);
- LLVMInsertIntoBuilder(p->builder, instr);
} else if (d.kind == lbDefer_Proc) {
lb_emit_call(p, d.proc.deferred, d.proc.result_as_args);
}
From 9ecbadd457a6a647c88c9b9670e9a9154fb33e5d Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Sat, 15 Jan 2022 16:16:11 +0000
Subject: [PATCH 104/710] Simplify procedure parameters callee logic
---
src/llvm_backend.hpp | 1 -
src/llvm_backend_proc.cpp | 53 ++++++++++++++-------------------------
2 files changed, 19 insertions(+), 35 deletions(-)
diff --git a/src/llvm_backend.hpp b/src/llvm_backend.hpp
index 45e58cacf..49f675a49 100644
--- a/src/llvm_backend.hpp
+++ b/src/llvm_backend.hpp
@@ -267,7 +267,6 @@ struct lbProcedure {
bool is_done;
lbAddr return_ptr;
- Array params;
Array defer_stmts;
Array blocks;
Array branch_blocks;
diff --git a/src/llvm_backend_proc.cpp b/src/llvm_backend_proc.cpp
index c52572588..eccf9b360 100644
--- a/src/llvm_backend_proc.cpp
+++ b/src/llvm_backend_proc.cpp
@@ -107,7 +107,6 @@ lbProcedure *lb_create_procedure(lbModule *m, Entity *entity, bool ignore_body)
gbAllocator a = heap_allocator();
p->children.allocator = a;
- p->params.allocator = a;
p->defer_stmts.allocator = a;
p->blocks.allocator = a;
p->branch_blocks.allocator = a;
@@ -323,7 +322,6 @@ lbProcedure *lb_create_dummy_procedure(lbModule *m, String link_name, Type *type
gbAllocator a = permanent_allocator();
p->children.allocator = a;
- p->params.allocator = a;
p->defer_stmts.allocator = a;
p->blocks.allocator = a;
p->branch_blocks.allocator = a;
@@ -478,42 +476,29 @@ void lb_begin_procedure_body(lbProcedure *p) {
if (arg_type->kind == lbArg_Ignore) {
continue;
} else if (arg_type->kind == lbArg_Direct) {
- lbParamPasskind kind = lbParamPass_Value;
- LLVMTypeRef param_type = lb_type(p->module, e->type);
- if (param_type != arg_type->type) {
- kind = lbParamPass_BitCast;
+ if (e->token.string.len != 0 && !is_blank_ident(e->token.string)) {
+ LLVMTypeRef param_type = lb_type(p->module, e->type);
+ LLVMValueRef value = LLVMGetParam(p->value, param_offset+param_index);
+
+ value = OdinLLVMBuildTransmute(p, value, param_type);
+
+ lbValue param = {};
+ param.value = value;
+ param.type = e->type;
+
+ lbValue ptr = lb_address_from_load_or_generate_local(p, param);
+ lb_add_entity(p->module, e, ptr);
}
- LLVMValueRef value = LLVMGetParam(p->value, param_offset+param_index);
-
- value = OdinLLVMBuildTransmute(p, value, param_type);
-
- lbValue param = {};
- param.value = value;
- param.type = e->type;
- array_add(&p->params, param);
-
- if (e->token.string.len != 0) {
- lbAddr l = lb_add_local(p, e->type, e, false, param_index);
- lb_addr_store(p, l, param);
- }
-
- param_index += 1;
} else if (arg_type->kind == lbArg_Indirect) {
- LLVMValueRef value_ptr = LLVMGetParam(p->value, param_offset+param_index);
- LLVMValueRef value = LLVMBuildLoad(p->builder, value_ptr, "");
+ if (e->token.string.len != 0 && !is_blank_ident(e->token.string)) {
+ lbValue ptr = {};
+ ptr.value = LLVMGetParam(p->value, param_offset+param_index);
+ ptr.type = alloc_type_pointer(e->type);
- lbValue param = {};
- param.value = value;
- param.type = e->type;
- array_add(&p->params, param);
-
- lbValue ptr = {};
- ptr.value = value_ptr;
- ptr.type = alloc_type_pointer(e->type);
-
- lb_add_entity(p->module, e, ptr);
- param_index += 1;
+ lb_add_entity(p->module, e, ptr);
+ }
}
+ param_index += 1;
}
}
From 51dcbc80c3f0ef8755a0dc07b257ca93b264c2cc Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Sat, 15 Jan 2022 16:26:14 +0000
Subject: [PATCH 105/710] Add `LLVMAddMergedLoadStoreMotionPass` on `-debug
-opt:0`
---
src/llvm_backend_opt.cpp | 16 ++++++----------
1 file changed, 6 insertions(+), 10 deletions(-)
diff --git a/src/llvm_backend_opt.cpp b/src/llvm_backend_opt.cpp
index 5b8468799..8f1c7ad59 100644
--- a/src/llvm_backend_opt.cpp
+++ b/src/llvm_backend_opt.cpp
@@ -57,17 +57,13 @@ LLVMBool lb_must_preserve_predicate_callback(LLVMValueRef value, void *user_data
void lb_basic_populate_function_pass_manager(LLVMPassManagerRef fpm, i32 optimization_level) {
if (optimization_level == 0 && build_context.ODIN_DEBUG) {
- return;
+ LLVMAddMergedLoadStoreMotionPass(fpm);
+ } else {
+ LLVMAddPromoteMemoryToRegisterPass(fpm);
+ LLVMAddMergedLoadStoreMotionPass(fpm);
+ LLVM_ADD_CONSTANT_VALUE_PASS(fpm);
+ LLVMAddEarlyCSEPass(fpm);
}
- LLVMAddPromoteMemoryToRegisterPass(fpm);
- LLVMAddMergedLoadStoreMotionPass(fpm);
- LLVM_ADD_CONSTANT_VALUE_PASS(fpm);
- LLVMAddEarlyCSEPass(fpm);
-
- // LLVM_ADD_CONSTANT_VALUE_PASS(fpm);
- // LLVMAddMergedLoadStoreMotionPass(fpm);
- // LLVMAddPromoteMemoryToRegisterPass(fpm);
- // LLVMAddCFGSimplificationPass(fpm);
}
void lb_populate_function_pass_manager(lbModule *m, LLVMPassManagerRef fpm, bool ignore_memcpy_pass, i32 optimization_level) {
From 6c4867081985a8dd1cccb3e5c503d72807e4ee87 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Sat, 15 Jan 2022 17:34:35 +0000
Subject: [PATCH 106/710] Make `ODIN_BUILD_MODE` a enum type
---
core/runtime/core.odin | 13 ++++
core/runtime/entry_unix.odin | 2 +-
core/runtime/entry_windows.odin | 2 +-
core/runtime/internal.odin | 2 +-
src/build_settings.cpp | 23 +------
src/check_expr.cpp | 112 ++++++++++++++++++--------------
src/checker.cpp | 64 +++++++++++++++++-
7 files changed, 144 insertions(+), 74 deletions(-)
diff --git a/core/runtime/core.odin b/core/runtime/core.odin
index be30eef02..91b6bf5ca 100644
--- a/core/runtime/core.odin
+++ b/core/runtime/core.odin
@@ -386,6 +386,19 @@ Raw_Cstring :: struct {
}
+/*
+ // Defined internally by the compiler
+ Odin_Build_Mode_Type :: enum int {
+ Executable,
+ Dynamic,
+ Object,
+ Assembly,
+ LLVM_IR,
+ }
+*/
+Odin_Build_Mode_Type :: type_of(ODIN_BUILD_MODE)
+
+
/////////////////////////////
// Init Startup Procedures //
/////////////////////////////
diff --git a/core/runtime/entry_unix.odin b/core/runtime/entry_unix.odin
index 38116c0f9..67d2cbcb7 100644
--- a/core/runtime/entry_unix.odin
+++ b/core/runtime/entry_unix.odin
@@ -4,7 +4,7 @@ package runtime
import "core:intrinsics"
-when ODIN_BUILD_MODE == "dynamic" {
+when ODIN_BUILD_MODE == .Dynamic {
@(link_name="_odin_entry_point", linkage="strong", require, link_section=".init")
_odin_entry_point :: proc "c" () {
context = default_context()
diff --git a/core/runtime/entry_windows.odin b/core/runtime/entry_windows.odin
index ef8b0c529..97a5bebe6 100644
--- a/core/runtime/entry_windows.odin
+++ b/core/runtime/entry_windows.odin
@@ -4,7 +4,7 @@ package runtime
import "core:intrinsics"
-when ODIN_BUILD_MODE == "dynamic" {
+when ODIN_BUILD_MODE == .Dynamic {
@(link_name="DllMain", linkage="strong", require)
DllMain :: proc "stdcall" (hinstDLL: rawptr, fdwReason: u32, lpReserved: rawptr) -> b32 {
context = default_context()
diff --git a/core/runtime/internal.odin b/core/runtime/internal.odin
index 6498c4db7..7b283a132 100644
--- a/core/runtime/internal.odin
+++ b/core/runtime/internal.odin
@@ -8,7 +8,7 @@ IS_WASM :: ODIN_ARCH == "wasm32" || ODIN_ARCH == "wasm64"
@(private)
RUNTIME_LINKAGE :: "strong" when (
(ODIN_USE_SEPARATE_MODULES ||
- ODIN_BUILD_MODE == "dynamic" ||
+ ODIN_BUILD_MODE == .Dynamic ||
!ODIN_NO_CRT) &&
!IS_WASM) else "internal"
RUNTIME_REQUIRE :: true
diff --git a/src/build_settings.cpp b/src/build_settings.cpp
index ccae0fcf0..bafa93042 100644
--- a/src/build_settings.cpp
+++ b/src/build_settings.cpp
@@ -119,6 +119,8 @@ enum BuildModeKind {
BuildMode_Object,
BuildMode_Assembly,
BuildMode_LLVM_IR,
+
+ BuildMode_COUNT,
};
enum CommandKind : u32 {
@@ -172,10 +174,9 @@ struct BuildContext {
String ODIN_VENDOR; // compiler vendor
String ODIN_VERSION; // compiler version
String ODIN_ROOT; // Odin ROOT
- String ODIN_BUILD_MODE;
bool ODIN_DEBUG; // Odin in debug mode
bool ODIN_DISABLE_ASSERT; // Whether the default 'assert' et al is disabled in code or not
- bool ODIN_DEFAULT_TO_NIL_ALLOCATOR; // Whether the default allocator is a "nil" allocator or not (i.e. it does nothing)
+bool ODIN_DEFAULT_TO_NIL_ALLOCATOR; // Whether the default allocator is a "nil" allocator or not (i.e. it does nothing)
TargetEndianKind endian_kind;
@@ -855,24 +856,6 @@ void init_build_context(TargetMetrics *cross_target) {
bc->ODIN_VENDOR = str_lit("odin");
bc->ODIN_VERSION = ODIN_VERSION;
bc->ODIN_ROOT = odin_root_dir();
- switch (bc->build_mode) {
- default:
- case BuildMode_Executable:
- bc->ODIN_BUILD_MODE = str_lit("executable");
- break;
- case BuildMode_DynamicLibrary:
- bc->ODIN_BUILD_MODE = str_lit("dynamic");
- break;
- case BuildMode_Object:
- bc->ODIN_BUILD_MODE = str_lit("object");
- break;
- case BuildMode_Assembly:
- bc->ODIN_BUILD_MODE = str_lit("assembly");
- break;
- case BuildMode_LLVM_IR:
- bc->ODIN_BUILD_MODE = str_lit("llvm-ir");
- break;
- }
bc->copy_file_contents = true;
diff --git a/src/check_expr.cpp b/src/check_expr.cpp
index 1162cefee..8667d8734 100644
--- a/src/check_expr.cpp
+++ b/src/check_expr.cpp
@@ -119,6 +119,58 @@ void check_or_else_split_types(CheckerContext *c, Operand *x, String const &name
void check_or_else_expr_no_value_error(CheckerContext *c, String const &name, Operand const &x, Type *type_hint);
void check_or_return_split_types(CheckerContext *c, Operand *x, String const &name, Type **left_type_, Type **right_type_);
+
+void check_did_you_mean_print(DidYouMeanAnswers *d, char const *prefix = "") {
+ auto results = did_you_mean_results(d);
+ if (results.count != 0) {
+ error_line("\tSuggestion: Did you mean?\n");
+ for_array(i, results) {
+ String const &target = results[i].target;
+ error_line("\t\t%s%.*s\n", prefix, LIT(target));
+ // error_line("\t\t%.*s %td\n", LIT(target), results[i].distance);
+ }
+ }
+}
+
+void check_did_you_mean_type(String const &name, Array const &fields, char const *prefix = "") {
+ ERROR_BLOCK();
+
+ DidYouMeanAnswers d = did_you_mean_make(heap_allocator(), fields.count, name);
+ defer (did_you_mean_destroy(&d));
+
+ for_array(i, fields) {
+ did_you_mean_append(&d, fields[i]->token.string);
+ }
+ check_did_you_mean_print(&d, prefix);
+}
+
+void check_did_you_mean_type(String const &name, Slice const &fields, char const *prefix = "") {
+ ERROR_BLOCK();
+
+ DidYouMeanAnswers d = did_you_mean_make(heap_allocator(), fields.count, name);
+ defer (did_you_mean_destroy(&d));
+
+ for_array(i, fields) {
+ did_you_mean_append(&d, fields[i]->token.string);
+ }
+ check_did_you_mean_print(&d, prefix);
+}
+
+void check_did_you_mean_scope(String const &name, Scope *scope, char const *prefix = "") {
+ ERROR_BLOCK();
+
+ DidYouMeanAnswers d = did_you_mean_make(heap_allocator(), scope->elements.entries.count, name);
+ defer (did_you_mean_destroy(&d));
+
+ mutex_lock(&scope->mutex);
+ for_array(i, scope->elements.entries) {
+ Entity *e = scope->elements.entries[i].value;
+ did_you_mean_append(&d, e->token.string);
+ }
+ mutex_unlock(&scope->mutex);
+ check_did_you_mean_print(&d, prefix);
+}
+
Entity *entity_from_expr(Ast *expr) {
expr = unparen_expr(expr);
switch (expr->kind) {
@@ -3361,7 +3413,17 @@ void convert_untyped_error(CheckerContext *c, Operand *operand, Type *target_typ
}
}
}
+ ERROR_BLOCK();
+
error(operand->expr, "Cannot convert untyped value '%s' to '%s' from '%s'%s", expr_str, type_str, from_type_str, extra_text);
+ if (operand->value.kind == ExactValue_String) {
+ String key = operand->value.value_string;
+ if (is_type_string(operand->type) && is_type_enum(target_type)) {
+ gb_printf_err("HERE!\n");
+ Type *et = base_type(target_type);
+ check_did_you_mean_type(key, et->Enum.fields, ".");
+ }
+ }
gb_string_free(from_type_str);
gb_string_free(type_str);
@@ -3979,56 +4041,6 @@ ExactValue get_constant_field(CheckerContext *c, Operand const *operand, Selecti
if (success_) *success_ = true;
return empty_exact_value;
}
-void check_did_you_mean_print(DidYouMeanAnswers *d) {
- auto results = did_you_mean_results(d);
- if (results.count != 0) {
- error_line("\tSuggestion: Did you mean?\n");
- for_array(i, results) {
- String const &target = results[i].target;
- error_line("\t\t%.*s\n", LIT(target));
- // error_line("\t\t%.*s %td\n", LIT(target), results[i].distance);
- }
- }
-}
-
-void check_did_you_mean_type(String const &name, Array const &fields) {
- ERROR_BLOCK();
-
- DidYouMeanAnswers d = did_you_mean_make(heap_allocator(), fields.count, name);
- defer (did_you_mean_destroy(&d));
-
- for_array(i, fields) {
- did_you_mean_append(&d, fields[i]->token.string);
- }
- check_did_you_mean_print(&d);
-}
-
-void check_did_you_mean_type(String const &name, Slice const &fields) {
- ERROR_BLOCK();
-
- DidYouMeanAnswers d = did_you_mean_make(heap_allocator(), fields.count, name);
- defer (did_you_mean_destroy(&d));
-
- for_array(i, fields) {
- did_you_mean_append(&d, fields[i]->token.string);
- }
- check_did_you_mean_print(&d);
-}
-
-void check_did_you_mean_scope(String const &name, Scope *scope) {
- ERROR_BLOCK();
-
- DidYouMeanAnswers d = did_you_mean_make(heap_allocator(), scope->elements.entries.count, name);
- defer (did_you_mean_destroy(&d));
-
- mutex_lock(&scope->mutex);
- for_array(i, scope->elements.entries) {
- Entity *e = scope->elements.entries[i].value;
- did_you_mean_append(&d, e->token.string);
- }
- mutex_unlock(&scope->mutex);
- check_did_you_mean_print(&d);
-}
Type *determine_swizzle_array_type(Type *original_type, Type *type_hint, isize new_count) {
Type *array_type = base_type(type_deref(original_type));
diff --git a/src/checker.cpp b/src/checker.cpp
index f261c8f4a..c3dcd1d11 100644
--- a/src/checker.cpp
+++ b/src/checker.cpp
@@ -780,6 +780,54 @@ AstPackage *create_builtin_package(char const *name) {
return pkg;
}
+struct GlobalEnumValue {
+ char const *name;
+ i64 value;
+};
+
+Slice add_global_enum_type(String const &type_name, GlobalEnumValue *values, isize value_count) {
+ Scope *scope = create_scope(nullptr, builtin_pkg->scope);
+ Entity *e = alloc_entity_type_name(scope, make_token_ident(type_name), nullptr, EntityState_Resolved);
+
+ Type *enum_type = alloc_type_enum();
+ Type *named_type = alloc_type_named(type_name, enum_type, e);
+ set_base_type(named_type, enum_type);
+ enum_type->Enum.base_type = t_int;
+ enum_type->Enum.scope = scope;
+
+ auto fields = array_make(permanent_allocator(), value_count);
+ for (isize i = 0; i < value_count; i++) {
+ i64 value = values[i].value;
+ Entity *e = alloc_entity_constant(scope, make_token_ident(values[i].name), named_type, exact_value_i64(value));
+ e->flags |= EntityFlag_Visited;
+ e->state = EntityState_Resolved;
+ fields[i] = e;
+
+ Entity *ie = scope_insert(scope, e);
+ GB_ASSERT(ie == nullptr);
+ }
+
+
+ enum_type->Enum.fields = fields;
+ enum_type->Enum.min_value_index = 0;
+ enum_type->Enum.max_value_index = value_count-1;
+ enum_type->Enum.min_value = &enum_type->Enum.fields[enum_type->Enum.min_value_index]->Constant.value;
+ enum_type->Enum.max_value = &enum_type->Enum.fields[enum_type->Enum.max_value_index]->Constant.value;
+
+ return slice_from_array(fields);
+}
+void add_global_enum_constant(Slice const &fields, char const *name, i64 value) {
+ for (Entity *field : fields) {
+ GB_ASSERT(field->kind == Entity_Constant);
+ if (value == exact_value_to_i64(field->Constant.value)) {
+ add_global_constant(name, field->type, field->Constant.value);
+ return;
+ }
+ }
+ GB_PANIC("Unfound enum value for global constant: %s %lld", name, cast(long long)value);
+}
+
+
void init_universal(void) {
BuildContext *bc = &build_context;
@@ -815,7 +863,21 @@ void init_universal(void) {
add_global_string_constant("ODIN_VERSION", bc->ODIN_VERSION);
add_global_string_constant("ODIN_ROOT", bc->ODIN_ROOT);
- add_global_string_constant("ODIN_BUILD_MODE", bc->ODIN_BUILD_MODE);
+ {
+ GlobalEnumValue values[BuildMode_COUNT] = {
+ {"Executable", BuildMode_Executable},
+ {"Dynamic", BuildMode_DynamicLibrary},
+ {"Object", BuildMode_Object},
+ {"Assembly", BuildMode_Assembly},
+ {"LLVM_IR", BuildMode_LLVM_IR},
+ };
+
+ auto fields = add_global_enum_type(str_lit("Odin_Build_Mode_Type"), values, gb_count_of(values));
+ add_global_enum_constant(fields, "ODIN_BUILD_MODE", bc->build_mode);
+ }
+
+ // add_global_string_constant("ODIN_BUILD_MODE", bc->ODIN_BUILD_MODE);
+
add_global_bool_constant("ODIN_DEBUG", bc->ODIN_DEBUG);
add_global_bool_constant("ODIN_DISABLE_ASSERT", bc->ODIN_DISABLE_ASSERT);
add_global_bool_constant("ODIN_DEFAULT_TO_NIL_ALLOCATOR", bc->ODIN_DEFAULT_TO_NIL_ALLOCATOR);
From 29ebe0c3c92724f74392687ee859a0c2db503b0d Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Sat, 15 Jan 2022 17:40:00 +0000
Subject: [PATCH 107/710] Rename architecture `386` to `i386`
---
core/c/c.odin | 2 +-
core/crypto/_fiat/field_poly1305/field.odin | 2 +-
core/crypto/chacha20/chacha20.odin | 2 +-
core/runtime/entry_windows.odin | 2 +-
...ndows_386.odin => procs_windows_i386.odin} | 0
core/sys/cpu/cpu_x86.odin | 2 +-
core/sys/unix/syscalls_linux.odin | 2 +-
src/build_settings.cpp | 30 +++++++++----------
src/check_builtin.cpp | 2 +-
src/llvm_abi.cpp | 2 +-
src/llvm_backend.cpp | 4 +--
src/llvm_backend_expr.cpp | 2 +-
src/llvm_backend_proc.cpp | 4 +--
src/llvm_backend_utility.cpp | 2 +-
src/microsoft_craziness.h | 10 +++----
15 files changed, 34 insertions(+), 34 deletions(-)
rename core/runtime/{procs_windows_386.odin => procs_windows_i386.odin} (100%)
diff --git a/core/c/c.odin b/core/c/c.odin
index 139d9920a..d0b8e377f 100644
--- a/core/c/c.odin
+++ b/core/c/c.odin
@@ -48,7 +48,7 @@ int_least64_t :: builtin.i64
uint_least64_t :: builtin.u64
// Same on Windows, Linux, and FreeBSD
-when ODIN_ARCH == "386" || ODIN_ARCH == "amd64" {
+when ODIN_ARCH == "i386" || ODIN_ARCH == "amd64" {
int_fast8_t :: builtin.i8
uint_fast8_t :: builtin.u8
int_fast16_t :: builtin.i32
diff --git a/core/crypto/_fiat/field_poly1305/field.odin b/core/crypto/_fiat/field_poly1305/field.odin
index bfb7cf1f9..4ed8acbff 100644
--- a/core/crypto/_fiat/field_poly1305/field.odin
+++ b/core/crypto/_fiat/field_poly1305/field.odin
@@ -22,7 +22,7 @@ fe_from_bytes :: #force_inline proc (out1: ^Tight_Field_Element, arg1: []byte, a
assert(len(arg1) == 16)
- when ODIN_ARCH == "386" || ODIN_ARCH == "amd64" {
+ when ODIN_ARCH == "i386" || ODIN_ARCH == "amd64" {
// While it may be unwise to do deserialization here on our
// own when fiat-crypto provides equivalent functionality,
// doing it this way provides a little under 3x performance
diff --git a/core/crypto/chacha20/chacha20.odin b/core/crypto/chacha20/chacha20.odin
index f6f551692..e32dacb2c 100644
--- a/core/crypto/chacha20/chacha20.odin
+++ b/core/crypto/chacha20/chacha20.odin
@@ -346,7 +346,7 @@ _do_blocks :: proc (ctx: ^Context, dst, src: []byte, nr_blocks: int) {
// Until dedicated assembly can be written leverage the fact that
// the callers of this routine ensure that src/dst are valid.
- when ODIN_ARCH == "386" || ODIN_ARCH == "amd64" {
+ when ODIN_ARCH == "i386" || ODIN_ARCH == "amd64" {
// util.PUT_U32_LE/util.U32_LE are not required on little-endian
// systems that also happen to not be strict about aligned
// memory access.
diff --git a/core/runtime/entry_windows.odin b/core/runtime/entry_windows.odin
index 97a5bebe6..35a6bb421 100644
--- a/core/runtime/entry_windows.odin
+++ b/core/runtime/entry_windows.odin
@@ -22,7 +22,7 @@ when ODIN_BUILD_MODE == .Dynamic {
return true
}
} else when !ODIN_TEST && !ODIN_NO_ENTRY_POINT {
- when ODIN_ARCH == "386" || ODIN_NO_CRT {
+ when ODIN_ARCH == "i386" || ODIN_NO_CRT {
@(link_name="mainCRTStartup", linkage="strong", require)
mainCRTStartup :: proc "stdcall" () -> i32 {
context = default_context()
diff --git a/core/runtime/procs_windows_386.odin b/core/runtime/procs_windows_i386.odin
similarity index 100%
rename from core/runtime/procs_windows_386.odin
rename to core/runtime/procs_windows_i386.odin
diff --git a/core/sys/cpu/cpu_x86.odin b/core/sys/cpu/cpu_x86.odin
index 8f3560a87..146822e61 100644
--- a/core/sys/cpu/cpu_x86.odin
+++ b/core/sys/cpu/cpu_x86.odin
@@ -1,4 +1,4 @@
-//+build 386, amd64
+//+build i386, amd64
package sys_cpu
_cache_line_size :: 64;
diff --git a/core/sys/unix/syscalls_linux.odin b/core/sys/unix/syscalls_linux.odin
index 25c5ed0a1..3dc3d2c74 100644
--- a/core/sys/unix/syscalls_linux.odin
+++ b/core/sys/unix/syscalls_linux.odin
@@ -675,7 +675,7 @@ when ODIN_ARCH == "amd64" {
SYS_landlock_create_ruleset : uintptr : 444
SYS_landlock_add_rule : uintptr : 445
SYS_landlock_restrict_self : uintptr : 446
-} else when ODIN_ARCH == "386" {
+} else when ODIN_ARCH == "i386" {
SYS_restart_syscall : uintptr : 0
SYS_exit : uintptr : 1
SYS_fork : uintptr : 2
diff --git a/src/build_settings.cpp b/src/build_settings.cpp
index bafa93042..5e4534517 100644
--- a/src/build_settings.cpp
+++ b/src/build_settings.cpp
@@ -29,7 +29,7 @@ enum TargetArchKind {
TargetArch_Invalid,
TargetArch_amd64,
- TargetArch_386,
+ TargetArch_i386,
TargetArch_arm64,
TargetArch_wasm32,
TargetArch_wasm64,
@@ -63,7 +63,7 @@ String target_os_names[TargetOs_COUNT] = {
String target_arch_names[TargetArch_COUNT] = {
str_lit(""),
str_lit("amd64"),
- str_lit("386"),
+ str_lit("i386"),
str_lit("arm64"),
str_lit("wasm32"),
str_lit("wasm64"),
@@ -269,9 +269,9 @@ bool global_ignore_warnings(void) {
}
-gb_global TargetMetrics target_windows_386 = {
+gb_global TargetMetrics target_windows_i386 = {
TargetOs_windows,
- TargetArch_386,
+ TargetArch_i386,
4,
8,
str_lit("i386-pc-windows-msvc"),
@@ -285,9 +285,9 @@ gb_global TargetMetrics target_windows_amd64 = {
str_lit("e-m:w-i64:64-f80:128-n8:16:32:64-S128"),
};
-gb_global TargetMetrics target_linux_386 = {
+gb_global TargetMetrics target_linux_i386 = {
TargetOs_linux,
- TargetArch_386,
+ TargetArch_i386,
4,
8,
str_lit("i386-pc-linux-gnu"),
@@ -328,9 +328,9 @@ gb_global TargetMetrics target_darwin_arm64 = {
str_lit("e-m:o-i64:64-i128:128-n32:64-S128"), // TODO(bill): Is this correct?
};
-gb_global TargetMetrics target_freebsd_386 = {
+gb_global TargetMetrics target_freebsd_i386 = {
TargetOs_freebsd,
- TargetArch_386,
+ TargetArch_i386,
4,
8,
str_lit("i386-unknown-freebsd-elf"),
@@ -401,12 +401,12 @@ gb_global NamedTargetMetrics named_targets[] = {
{ str_lit("darwin_amd64"), &target_darwin_amd64 },
{ str_lit("darwin_arm64"), &target_darwin_arm64 },
{ str_lit("essence_amd64"), &target_essence_amd64 },
- { str_lit("linux_386"), &target_linux_386 },
+ { str_lit("linux_i386"), &target_linux_i386 },
{ str_lit("linux_amd64"), &target_linux_amd64 },
{ str_lit("linux_arm64"), &target_linux_arm64 },
- { str_lit("windows_386"), &target_windows_386 },
+ { str_lit("windows_i386"), &target_windows_i386 },
{ str_lit("windows_amd64"), &target_windows_amd64 },
- { str_lit("freebsd_386"), &target_freebsd_386 },
+ { str_lit("freebsd_i386"), &target_freebsd_i386 },
{ str_lit("freebsd_amd64"), &target_freebsd_amd64 },
{ str_lit("freestanding_wasm32"), &target_freestanding_wasm32 },
{ str_lit("wasi_wasm32"), &target_wasi_wasm32 },
@@ -879,13 +879,13 @@ void init_build_context(TargetMetrics *cross_target) {
#endif
#else
#if defined(GB_SYSTEM_WINDOWS)
- metrics = &target_windows_386;
+ metrics = &target_windows_i386;
#elif defined(GB_SYSTEM_OSX)
#error "Build Error: Unsupported architecture"
#elif defined(GB_SYSTEM_FREEBSD)
- metrics = &target_freebsd_386;
+ metrics = &target_freebsd_i386;
#else
- metrics = &target_linux_386;
+ metrics = &target_linux_i386;
#endif
#endif
@@ -932,7 +932,7 @@ void init_build_context(TargetMetrics *cross_target) {
bc->link_flags = str_lit("-arch x86-64 ");
break;
}
- } else if (bc->metrics.arch == TargetArch_386) {
+ } else if (bc->metrics.arch == TargetArch_i386) {
switch (bc->metrics.os) {
case TargetOs_windows:
bc->link_flags = str_lit("/machine:x86 ");
diff --git a/src/check_builtin.cpp b/src/check_builtin.cpp
index 82ad6d161..a42741976 100644
--- a/src/check_builtin.cpp
+++ b/src/check_builtin.cpp
@@ -3225,7 +3225,7 @@ bool check_builtin_procedure(CheckerContext *c, Operand *operand, Ast *call, i32
case TargetOs_essence:
case TargetOs_freebsd:
switch (build_context.metrics.arch) {
- case TargetArch_386:
+ case TargetArch_i386:
case TargetArch_amd64:
case TargetArch_arm64:
max_arg_count = 7;
diff --git a/src/llvm_abi.cpp b/src/llvm_abi.cpp
index 42f05bb27..310df6639 100644
--- a/src/llvm_abi.cpp
+++ b/src/llvm_abi.cpp
@@ -1193,7 +1193,7 @@ LB_ABI_INFO(lb_get_abi_info) {
} else {
return lbAbiAmd64SysV::abi_info(c, arg_types, arg_count, return_type, return_is_defined, calling_convention);
}
- case TargetArch_386:
+ case TargetArch_i386:
return lbAbi386::abi_info(c, arg_types, arg_count, return_type, return_is_defined, calling_convention);
case TargetArch_arm64:
return lbAbiArm64::abi_info(c, arg_types, arg_count, return_type, return_is_defined, calling_convention);
diff --git a/src/llvm_backend.cpp b/src/llvm_backend.cpp
index 7a7f20f8d..63fb5d4e9 100644
--- a/src/llvm_backend.cpp
+++ b/src/llvm_backend.cpp
@@ -783,7 +783,7 @@ lbProcedure *lb_create_main_procedure(lbModule *m, lbProcedure *startup_runtime)
params->Tuple.variables[1] = alloc_entity_param(nullptr, make_token_ident("fdwReason"), t_u32, false, true);
params->Tuple.variables[2] = alloc_entity_param(nullptr, make_token_ident("lpReserved"), t_rawptr, false, true);
call_cleanup = false;
- } else if (build_context.metrics.os == TargetOs_windows && (build_context.metrics.arch == TargetArch_386 || build_context.no_crt)) {
+ } else if (build_context.metrics.os == TargetOs_windows && (build_context.metrics.arch == TargetArch_i386 || build_context.no_crt)) {
name = str_lit("mainCRTStartup");
} else if (is_arch_wasm()) {
name = str_lit("_start");
@@ -1140,7 +1140,7 @@ void lb_generate_code(lbGenerator *gen) {
switch (build_context.metrics.arch) {
case TargetArch_amd64:
- case TargetArch_386:
+ case TargetArch_i386:
LLVMInitializeX86TargetInfo();
LLVMInitializeX86Target();
LLVMInitializeX86TargetMC();
diff --git a/src/llvm_backend_expr.cpp b/src/llvm_backend_expr.cpp
index 0b2f6b3fd..1f0ed6434 100644
--- a/src/llvm_backend_expr.cpp
+++ b/src/llvm_backend_expr.cpp
@@ -508,7 +508,7 @@ bool lb_is_matrix_simdable(Type *t) {
case TargetArch_arm64:
// TODO(bill): determine when this is fine
return true;
- case TargetArch_386:
+ case TargetArch_i386:
case TargetArch_wasm32:
case TargetArch_wasm64:
return false;
diff --git a/src/llvm_backend_proc.cpp b/src/llvm_backend_proc.cpp
index eccf9b360..2a6eb6bb3 100644
--- a/src/llvm_backend_proc.cpp
+++ b/src/llvm_backend_proc.cpp
@@ -1361,7 +1361,7 @@ lbValue lb_build_builtin_proc(lbProcedure *p, Ast *expr, TypeAndValue const &tv,
}
case BuiltinProc_cpu_relax:
- if (build_context.metrics.arch == TargetArch_386 ||
+ if (build_context.metrics.arch == TargetArch_i386 ||
build_context.metrics.arch == TargetArch_amd64) {
LLVMTypeRef func_type = LLVMFunctionType(LLVMVoidTypeInContext(p->module->ctx), nullptr, 0, false);
LLVMValueRef the_asm = llvm_get_inline_asm(func_type, str_lit("pause"), {});
@@ -2018,7 +2018,7 @@ lbValue lb_build_builtin_proc(lbProcedure *p, Ast *expr, TypeAndValue const &tv,
inline_asm = llvm_get_inline_asm(func_type, make_string_c(asm_string), make_string_c(constraints));
}
break;
- case TargetArch_386:
+ case TargetArch_i386:
{
GB_ASSERT(arg_count <= 7);
diff --git a/src/llvm_backend_utility.cpp b/src/llvm_backend_utility.cpp
index 3fe96459f..5b1b11b44 100644
--- a/src/llvm_backend_utility.cpp
+++ b/src/llvm_backend_utility.cpp
@@ -1494,7 +1494,7 @@ lbValue lb_emit_mul_add(lbProcedure *p, lbValue a, lbValue b, lbValue c, Type *t
case TargetArch_arm64:
// possible
break;
- case TargetArch_386:
+ case TargetArch_i386:
case TargetArch_wasm32:
case TargetArch_wasm64:
is_possible = false;
diff --git a/src/microsoft_craziness.h b/src/microsoft_craziness.h
index 02f14dda3..b4f815284 100644
--- a/src/microsoft_craziness.h
+++ b/src/microsoft_craziness.h
@@ -460,7 +460,7 @@ bool find_visual_studio_by_fighting_through_microsoft_craziness(Find_Result *res
wchar_t *library_path = nullptr;
if (build_context.metrics.arch == TargetArch_amd64) {
library_path = concat(bstr_inst_path, L"\\VC\\Tools\\MSVC\\", version, L"\\lib\\x64\\");
- } else if (build_context.metrics.arch == TargetArch_386) {
+ } else if (build_context.metrics.arch == TargetArch_i386) {
library_path = concat(bstr_inst_path, L"\\VC\\Tools\\MSVC\\", version, L"\\lib\\x86\\");
} else {
continue;
@@ -472,7 +472,7 @@ bool find_visual_studio_by_fighting_through_microsoft_craziness(Find_Result *res
wchar_t *link_exe_path = nullptr;
if (build_context.metrics.arch == TargetArch_amd64) {
link_exe_path = concat(bstr_inst_path, L"\\VC\\Tools\\MSVC\\", version, L"\\bin\\Hostx64\\x64\\");
- } else if (build_context.metrics.arch == TargetArch_386) {
+ } else if (build_context.metrics.arch == TargetArch_i386) {
link_exe_path = concat(bstr_inst_path, L"\\VC\\Tools\\MSVC\\", version, L"\\bin\\Hostx86\\x86\\");
} else {
continue;
@@ -529,7 +529,7 @@ bool find_visual_studio_by_fighting_through_microsoft_craziness(Find_Result *res
if (build_context.metrics.arch == TargetArch_amd64) {
lib_path = concat(buffer, L"VC\\Lib\\amd64\\");
- } else if (build_context.metrics.arch == TargetArch_386) {
+ } else if (build_context.metrics.arch == TargetArch_i386) {
lib_path = concat(buffer, L"VC\\Lib\\");
} else {
continue;
@@ -542,7 +542,7 @@ bool find_visual_studio_by_fighting_through_microsoft_craziness(Find_Result *res
if (os_file_exists(vcruntime_filename)) {
if (build_context.metrics.arch == TargetArch_amd64) {
result->vs_exe_path = concat(buffer, L"VC\\bin\\");
- } else if (build_context.metrics.arch == TargetArch_386) {
+ } else if (build_context.metrics.arch == TargetArch_i386) {
// result->vs_exe_path = concat(buffer, L"VC\\bin\\amd64_x86\\");
result->vs_exe_path = concat(buffer, L"VC\\bin\\x86_amd64\\");
} else {
@@ -573,7 +573,7 @@ Find_Result find_visual_studio_and_windows_sdk() {
if (build_context.metrics.arch == TargetArch_amd64) {
result.windows_sdk_um_library_path = concat(result.windows_sdk_root, L"um\\x64\\");
result.windows_sdk_ucrt_library_path = concat(result.windows_sdk_root, L"ucrt\\x64\\");
- } else if (build_context.metrics.arch == TargetArch_386) {
+ } else if (build_context.metrics.arch == TargetArch_i386) {
result.windows_sdk_um_library_path = concat(result.windows_sdk_root, L"um\\x86\\");
result.windows_sdk_ucrt_library_path = concat(result.windows_sdk_root, L"ucrt\\x86\\");
}
From 3f59c45740403e538a23f50c2fe4cd25e815531b Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Sat, 15 Jan 2022 17:42:10 +0000
Subject: [PATCH 108/710] Remove `main` creation in llvm_backend.cpp and have
it done purely in the runtime package (partial bootstrapping)
---
src/llvm_backend.cpp | 7 +------
1 file changed, 1 insertion(+), 6 deletions(-)
diff --git a/src/llvm_backend.cpp b/src/llvm_backend.cpp
index 63fb5d4e9..304effb7f 100644
--- a/src/llvm_backend.cpp
+++ b/src/llvm_backend.cpp
@@ -1572,6 +1572,7 @@ void lb_generate_code(lbGenerator *gen) {
TIME_SECTION("LLVM Runtime Startup Creation (Global Variables)");
lbProcedure *startup_runtime = lb_create_startup_runtime(default_module, startup_type_info, global_variables);
+ gb_unused(startup_runtime);
TIME_SECTION("LLVM Global Procedures and Types");
for_array(i, info->entities) {
@@ -1640,12 +1641,6 @@ void lb_generate_code(lbGenerator *gen) {
}
}
-
- if (!already_has_entry_point) {
- TIME_SECTION("LLVM main");
- lb_create_main_procedure(default_module, startup_runtime);
- }
-
for_array(j, gen->modules.entries) {
lbModule *m = gen->modules.entries[j].value;
for_array(i, m->missing_procedures_to_check) {
From f0529535e02ab175bca3f7ff8c3bc2112d949236 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Sat, 15 Jan 2022 17:53:18 +0000
Subject: [PATCH 109/710] `ODIN_ENDIAN` changed to an enum constant;
`ODIN_ENUM_STRING` is the new string version of the old constant
---
core/crypto/_sha3/_sha3.odin | 4 ++--
core/crypto/haval/haval.odin | 2 +-
core/encoding/json/marshal.odin | 4 ++--
core/fmt/fmt.odin | 4 ++--
core/hash/xxhash/streaming.odin | 2 +-
core/image/png/example.odin | 2 +-
core/image/png/png.odin | 2 +-
core/math/bits/bits.odin | 32 ++++++++++++++++----------------
core/runtime/core.odin | 10 ++++++++++
core/runtime/udivmod128.odin | 2 +-
src/build_settings.cpp | 20 +++++++++-----------
src/checker.cpp | 15 +++++++++++++--
vendor/sdl2/sdl_audio.odin | 2 +-
vendor/sdl2/sdl_pixels.odin | 8 ++++----
14 files changed, 64 insertions(+), 45 deletions(-)
diff --git a/core/crypto/_sha3/_sha3.odin b/core/crypto/_sha3/_sha3.odin
index 76e09bf24..9846aca42 100644
--- a/core/crypto/_sha3/_sha3.odin
+++ b/core/crypto/_sha3/_sha3.odin
@@ -52,7 +52,7 @@ keccakf :: proc "contextless" (st: ^[25]u64) {
t: u64 = ---
bc: [5]u64 = ---
- when ODIN_ENDIAN != "little" {
+ when ODIN_ENDIAN != .Little {
v: uintptr = ---
for i = 0; i < 25; i += 1 {
v := uintptr(&st[i])
@@ -98,7 +98,7 @@ keccakf :: proc "contextless" (st: ^[25]u64) {
st[0] ~= keccakf_rndc[r]
}
- when ODIN_ENDIAN != "little" {
+ when ODIN_ENDIAN != .Little {
for i = 0; i < 25; i += 1 {
v = uintptr(&st[i])
t = st[i]
diff --git a/core/crypto/haval/haval.odin b/core/crypto/haval/haval.odin
index f95ea344d..442a348e9 100644
--- a/core/crypto/haval/haval.odin
+++ b/core/crypto/haval/haval.odin
@@ -1332,7 +1332,7 @@ update :: proc(ctx: ^Haval_Context, data: []byte) {
}
ctx.count[1] += str_len >> 29
- when ODIN_ENDIAN == "little" {
+ when ODIN_ENDIAN == .Little {
if rmd_len + str_len >= 128 {
copy(util.slice_to_bytes(ctx.block[:])[rmd_len:], data[:fill_len])
block(ctx, ctx.rounds)
diff --git a/core/encoding/json/marshal.odin b/core/encoding/json/marshal.odin
index adbcb95be..aa1c1559c 100644
--- a/core/encoding/json/marshal.odin
+++ b/core/encoding/json/marshal.odin
@@ -285,8 +285,8 @@ marshal_to_writer :: proc(w: io.Writer, v: any) -> (err: Marshal_Error) {
case runtime.Type_Info_Integer:
switch info.endianness {
case .Platform: return false
- case .Little: return ODIN_ENDIAN != "little"
- case .Big: return ODIN_ENDIAN != "big"
+ case .Little: return ODIN_ENDIAN != .Little
+ case .Big: return ODIN_ENDIAN != .Big
}
}
return false
diff --git a/core/fmt/fmt.odin b/core/fmt/fmt.odin
index a9ff6ca47..2cc192c12 100644
--- a/core/fmt/fmt.odin
+++ b/core/fmt/fmt.odin
@@ -1092,8 +1092,8 @@ fmt_bit_set :: proc(fi: ^Info, v: any, name: string = "") {
case runtime.Type_Info_Integer:
switch info.endianness {
case .Platform: return false
- case .Little: return ODIN_ENDIAN != "little"
- case .Big: return ODIN_ENDIAN != "big"
+ case .Little: return ODIN_ENDIAN != .Little
+ case .Big: return ODIN_ENDIAN != .Big
}
}
return false
diff --git a/core/hash/xxhash/streaming.odin b/core/hash/xxhash/streaming.odin
index 737e37eae..d6df1089f 100644
--- a/core/hash/xxhash/streaming.odin
+++ b/core/hash/xxhash/streaming.odin
@@ -96,7 +96,7 @@ XXH3_128_canonical_from_hash :: proc(hash: XXH128_hash_t) -> (canonical: XXH128_
#assert(size_of(XXH128_canonical) == size_of(XXH128_hash_t))
t := hash
- when ODIN_ENDIAN == "little" {
+ when ODIN_ENDIAN == .Little {
t.high = byte_swap(t.high)
t.low = byte_swap(t.low)
}
diff --git a/core/image/png/example.odin b/core/image/png/example.odin
index 5e7dca4c8..f4eb5128e 100644
--- a/core/image/png/example.odin
+++ b/core/image/png/example.odin
@@ -189,7 +189,7 @@ write_image_as_ppm :: proc(filename: string, image: ^image.Image) -> (success: b
img := image
// PBM 16-bit images are big endian
- when ODIN_ENDIAN == "little" {
+ when ODIN_ENDIAN == .Little {
if img.depth == 16 {
// The pixel components are in Big Endian. Let's byteswap back.
input := mem.slice_data_cast([]u16, img.pixels.buf[:])
diff --git a/core/image/png/png.odin b/core/image/png/png.odin
index f77bf7519..da76a4588 100644
--- a/core/image/png/png.odin
+++ b/core/image/png/png.odin
@@ -1611,7 +1611,7 @@ defilter :: proc(img: ^Image, filter_bytes: ^bytes.Buffer, header: ^image.PNG_IH
}
}
}
- when ODIN_ENDIAN == "little" {
+ when ODIN_ENDIAN == .Little {
if img.depth == 16 {
// The pixel components are in Big Endian. Let's byteswap.
input := mem.slice_data_cast([]u16be, img.pixels.buf[:])
diff --git a/core/math/bits/bits.odin b/core/math/bits/bits.odin
index bff984cc7..850e8038a 100644
--- a/core/math/bits/bits.odin
+++ b/core/math/bits/bits.odin
@@ -69,29 +69,29 @@ rotate_left :: proc(x: uint, k: int) -> uint {
}
from_be_u8 :: proc(i: u8) -> u8 { return i }
-from_be_u16 :: proc(i: u16) -> u16 { when ODIN_ENDIAN == "big" { return i } else { return byte_swap(i) } }
-from_be_u32 :: proc(i: u32) -> u32 { when ODIN_ENDIAN == "big" { return i } else { return byte_swap(i) } }
-from_be_u64 :: proc(i: u64) -> u64 { when ODIN_ENDIAN == "big" { return i } else { return byte_swap(i) } }
-from_be_uint :: proc(i: uint) -> uint { when ODIN_ENDIAN == "big" { return i } else { return byte_swap(i) } }
+from_be_u16 :: proc(i: u16) -> u16 { when ODIN_ENDIAN == .Big { return i } else { return byte_swap(i) } }
+from_be_u32 :: proc(i: u32) -> u32 { when ODIN_ENDIAN == .Big { return i } else { return byte_swap(i) } }
+from_be_u64 :: proc(i: u64) -> u64 { when ODIN_ENDIAN == .Big { return i } else { return byte_swap(i) } }
+from_be_uint :: proc(i: uint) -> uint { when ODIN_ENDIAN == .Big { return i } else { return byte_swap(i) } }
from_le_u8 :: proc(i: u8) -> u8 { return i }
-from_le_u16 :: proc(i: u16) -> u16 { when ODIN_ENDIAN == "little" { return i } else { return byte_swap(i) } }
-from_le_u32 :: proc(i: u32) -> u32 { when ODIN_ENDIAN == "little" { return i } else { return byte_swap(i) } }
-from_le_u64 :: proc(i: u64) -> u64 { when ODIN_ENDIAN == "little" { return i } else { return byte_swap(i) } }
-from_le_uint :: proc(i: uint) -> uint { when ODIN_ENDIAN == "little" { return i } else { return byte_swap(i) } }
+from_le_u16 :: proc(i: u16) -> u16 { when ODIN_ENDIAN == .Little { return i } else { return byte_swap(i) } }
+from_le_u32 :: proc(i: u32) -> u32 { when ODIN_ENDIAN == .Little { return i } else { return byte_swap(i) } }
+from_le_u64 :: proc(i: u64) -> u64 { when ODIN_ENDIAN == .Little { return i } else { return byte_swap(i) } }
+from_le_uint :: proc(i: uint) -> uint { when ODIN_ENDIAN == .Little { return i } else { return byte_swap(i) } }
to_be_u8 :: proc(i: u8) -> u8 { return i }
-to_be_u16 :: proc(i: u16) -> u16 { when ODIN_ENDIAN == "big" { return i } else { return byte_swap(i) } }
-to_be_u32 :: proc(i: u32) -> u32 { when ODIN_ENDIAN == "big" { return i } else { return byte_swap(i) } }
-to_be_u64 :: proc(i: u64) -> u64 { when ODIN_ENDIAN == "big" { return i } else { return byte_swap(i) } }
-to_be_uint :: proc(i: uint) -> uint { when ODIN_ENDIAN == "big" { return i } else { return byte_swap(i) } }
+to_be_u16 :: proc(i: u16) -> u16 { when ODIN_ENDIAN == .Big { return i } else { return byte_swap(i) } }
+to_be_u32 :: proc(i: u32) -> u32 { when ODIN_ENDIAN == .Big { return i } else { return byte_swap(i) } }
+to_be_u64 :: proc(i: u64) -> u64 { when ODIN_ENDIAN == .Big { return i } else { return byte_swap(i) } }
+to_be_uint :: proc(i: uint) -> uint { when ODIN_ENDIAN == .Big { return i } else { return byte_swap(i) } }
to_le_u8 :: proc(i: u8) -> u8 { return i }
-to_le_u16 :: proc(i: u16) -> u16 { when ODIN_ENDIAN == "little" { return i } else { return byte_swap(i) } }
-to_le_u32 :: proc(i: u32) -> u32 { when ODIN_ENDIAN == "little" { return i } else { return byte_swap(i) } }
-to_le_u64 :: proc(i: u64) -> u64 { when ODIN_ENDIAN == "little" { return i } else { return byte_swap(i) } }
-to_le_uint :: proc(i: uint) -> uint { when ODIN_ENDIAN == "little" { return i } else { return byte_swap(i) } }
+to_le_u16 :: proc(i: u16) -> u16 { when ODIN_ENDIAN == .Little { return i } else { return byte_swap(i) } }
+to_le_u32 :: proc(i: u32) -> u32 { when ODIN_ENDIAN == .Little { return i } else { return byte_swap(i) } }
+to_le_u64 :: proc(i: u64) -> u64 { when ODIN_ENDIAN == .Little { return i } else { return byte_swap(i) } }
+to_le_uint :: proc(i: uint) -> uint { when ODIN_ENDIAN == .Little { return i } else { return byte_swap(i) } }
diff --git a/core/runtime/core.odin b/core/runtime/core.odin
index 91b6bf5ca..424650828 100644
--- a/core/runtime/core.odin
+++ b/core/runtime/core.odin
@@ -398,6 +398,16 @@ Raw_Cstring :: struct {
*/
Odin_Build_Mode_Type :: type_of(ODIN_BUILD_MODE)
+/*
+ // Defined internally by the compiler
+ Odin_Endian_Type :: enum int {
+ Unknown,
+ Little,
+ Big,
+ }
+*/
+Odin_Endian_Type :: type_of(ODIN_ENDIAN)
+
/////////////////////////////
// Init Startup Procedures //
diff --git a/core/runtime/udivmod128.odin b/core/runtime/udivmod128.odin
index 1fd1b5f84..87ef73c2c 100644
--- a/core/runtime/udivmod128.odin
+++ b/core/runtime/udivmod128.odin
@@ -11,7 +11,7 @@ udivmod128 :: proc "c" (a, b: u128, rem: ^u128) -> u128 {
q, r: [2]u64
sr: u32 = 0
- low :: 1 when ODIN_ENDIAN == "big" else 0
+ low :: 1 when ODIN_ENDIAN == .Big else 0
high :: 1 - low
U64_BITS :: 8*size_of(u64)
U128_BITS :: 8*size_of(u128)
diff --git a/src/build_settings.cpp b/src/build_settings.cpp
index 5e4534517..b4a934ec8 100644
--- a/src/build_settings.cpp
+++ b/src/build_settings.cpp
@@ -170,7 +170,6 @@ struct BuildContext {
// Constants
String ODIN_OS; // target operating system
String ODIN_ARCH; // target architecture
- String ODIN_ENDIAN; // target endian
String ODIN_VENDOR; // compiler vendor
String ODIN_VERSION; // compiler version
String ODIN_ROOT; // Odin ROOT
@@ -269,7 +268,7 @@ bool global_ignore_warnings(void) {
}
-gb_global TargetMetrics target_windows_i386 = {
+gb_global TargetMetrics target_windows_386 = {
TargetOs_windows,
TargetArch_i386,
4,
@@ -285,7 +284,7 @@ gb_global TargetMetrics target_windows_amd64 = {
str_lit("e-m:w-i64:64-f80:128-n8:16:32:64-S128"),
};
-gb_global TargetMetrics target_linux_i386 = {
+gb_global TargetMetrics target_linux_386 = {
TargetOs_linux,
TargetArch_i386,
4,
@@ -328,7 +327,7 @@ gb_global TargetMetrics target_darwin_arm64 = {
str_lit("e-m:o-i64:64-i128:128-n32:64-S128"), // TODO(bill): Is this correct?
};
-gb_global TargetMetrics target_freebsd_i386 = {
+gb_global TargetMetrics target_freebsd_386 = {
TargetOs_freebsd,
TargetArch_i386,
4,
@@ -401,12 +400,12 @@ gb_global NamedTargetMetrics named_targets[] = {
{ str_lit("darwin_amd64"), &target_darwin_amd64 },
{ str_lit("darwin_arm64"), &target_darwin_arm64 },
{ str_lit("essence_amd64"), &target_essence_amd64 },
- { str_lit("linux_i386"), &target_linux_i386 },
+ { str_lit("linux_386"), &target_linux_386 },
{ str_lit("linux_amd64"), &target_linux_amd64 },
{ str_lit("linux_arm64"), &target_linux_arm64 },
- { str_lit("windows_i386"), &target_windows_i386 },
+ { str_lit("windows_386"), &target_windows_386 },
{ str_lit("windows_amd64"), &target_windows_amd64 },
- { str_lit("freebsd_i386"), &target_freebsd_i386 },
+ { str_lit("freebsd_386"), &target_freebsd_386 },
{ str_lit("freebsd_amd64"), &target_freebsd_amd64 },
{ str_lit("freestanding_wasm32"), &target_freestanding_wasm32 },
{ str_lit("wasi_wasm32"), &target_wasi_wasm32 },
@@ -879,13 +878,13 @@ void init_build_context(TargetMetrics *cross_target) {
#endif
#else
#if defined(GB_SYSTEM_WINDOWS)
- metrics = &target_windows_i386;
+ metrics = &target_windows_386;
#elif defined(GB_SYSTEM_OSX)
#error "Build Error: Unsupported architecture"
#elif defined(GB_SYSTEM_FREEBSD)
- metrics = &target_freebsd_i386;
+ metrics = &target_freebsd_386;
#else
- metrics = &target_linux_i386;
+ metrics = &target_linux_386;
#endif
#endif
@@ -904,7 +903,6 @@ void init_build_context(TargetMetrics *cross_target) {
bc->metrics = *metrics;
bc->ODIN_OS = target_os_names[metrics->os];
bc->ODIN_ARCH = target_arch_names[metrics->arch];
- bc->ODIN_ENDIAN = target_endian_names[target_endians[metrics->arch]];
bc->endian_kind = target_endians[metrics->arch];
bc->word_size = metrics->word_size;
bc->max_align = metrics->max_align;
diff --git a/src/checker.cpp b/src/checker.cpp
index c3dcd1d11..ddb73d33e 100644
--- a/src/checker.cpp
+++ b/src/checker.cpp
@@ -858,7 +858,6 @@ void init_universal(void) {
// TODO(bill): Set through flags in the compiler
add_global_string_constant("ODIN_OS", bc->ODIN_OS);
add_global_string_constant("ODIN_ARCH", bc->ODIN_ARCH);
- add_global_string_constant("ODIN_ENDIAN", bc->ODIN_ENDIAN);
add_global_string_constant("ODIN_VENDOR", bc->ODIN_VENDOR);
add_global_string_constant("ODIN_VERSION", bc->ODIN_VERSION);
add_global_string_constant("ODIN_ROOT", bc->ODIN_ROOT);
@@ -876,7 +875,19 @@ void init_universal(void) {
add_global_enum_constant(fields, "ODIN_BUILD_MODE", bc->build_mode);
}
- // add_global_string_constant("ODIN_BUILD_MODE", bc->ODIN_BUILD_MODE);
+ add_global_string_constant("ODIN_ENDIAN_STRING", target_endian_names[target_endians[bc->metrics.arch]]);
+ {
+ GlobalEnumValue values[TargetEndian_COUNT] = {
+ {"Unknown", TargetEndian_Invalid},
+
+ {"Little", TargetEndian_Little},
+ {"Big", TargetEndian_Big},
+ };
+
+ auto fields = add_global_enum_type(str_lit("Odin_Endian_Type"), values, gb_count_of(values));
+ add_global_enum_constant(fields, "ODIN_ENDIAN", target_endians[bc->metrics.arch]);
+ }
+
add_global_bool_constant("ODIN_DEBUG", bc->ODIN_DEBUG);
add_global_bool_constant("ODIN_DISABLE_ASSERT", bc->ODIN_DISABLE_ASSERT);
diff --git a/vendor/sdl2/sdl_audio.odin b/vendor/sdl2/sdl_audio.odin
index e108e31c6..2c5b7fedb 100644
--- a/vendor/sdl2/sdl_audio.odin
+++ b/vendor/sdl2/sdl_audio.odin
@@ -59,7 +59,7 @@ AUDIO_F32LSB :: 0x8120 /**< 32-bit floating point samples */
AUDIO_F32MSB :: 0x9120 /**< As above, but big-endian byte order */
AUDIO_F32 :: AUDIO_F32LSB
-when ODIN_ENDIAN == "little" {
+when ODIN_ENDIAN == .Little {
AUDIO_U16SYS :: AUDIO_U16LSB
AUDIO_S16SYS :: AUDIO_S16LSB
AUDIO_S32SYS :: AUDIO_S32LSB
diff --git a/vendor/sdl2/sdl_pixels.odin b/vendor/sdl2/sdl_pixels.odin
index a8503c621..22f6db440 100644
--- a/vendor/sdl2/sdl_pixels.odin
+++ b/vendor/sdl2/sdl_pixels.odin
@@ -156,10 +156,10 @@ PixelFormatEnum :: enum u32 {
ARGB2101010 = 1<<28 | PIXELTYPE_PACKED32<<24 | PACKEDORDER_ARGB<<20 | PACKEDLAYOUT_2101010<<16 | 32<<8 | 4<<0,
/* Aliases for RGBA byte arrays of color data, for the current platform */
- RGBA32 = RGBA8888 when ODIN_ENDIAN == "big" else ABGR8888,
- ARGB32 = ARGB8888 when ODIN_ENDIAN == "big" else BGRA8888,
- BGRA32 = BGRA8888 when ODIN_ENDIAN == "big" else ARGB8888,
- ABGR32 = ABGR8888 when ODIN_ENDIAN == "big" else RGBA8888,
+ RGBA32 = RGBA8888 when ODIN_ENDIAN == .Big else ABGR8888,
+ ARGB32 = ARGB8888 when ODIN_ENDIAN == .Big else BGRA8888,
+ BGRA32 = BGRA8888 when ODIN_ENDIAN == .Big else ARGB8888,
+ ABGR32 = ABGR8888 when ODIN_ENDIAN == .Big else RGBA8888,
YV12 = /**< Planar mode: Y + V + U (3 planes) */
'Y'<<24 | 'V'<<16 | '1'<<8 | '2'<<0,
From d57ec4a11d217c906a1ed18161b0dc6beba02089 Mon Sep 17 00:00:00 2001
From: Daniel Gavin
Date: Sun, 16 Jan 2022 13:20:12 +0100
Subject: [PATCH 110/710] Fix return stmt when it's one lined(check for close
brace).
---
core/odin/parser/parser.odin | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/core/odin/parser/parser.odin b/core/odin/parser/parser.odin
index e8c2c848d..0366e70d2 100644
--- a/core/odin/parser/parser.odin
+++ b/core/odin/parser/parser.odin
@@ -1312,7 +1312,7 @@ parse_stmt :: proc(p: ^Parser) -> ^ast.Stmt {
}
results: [dynamic]^ast.Expr
- for p.curr_tok.kind != .Semicolon {
+ for p.curr_tok.kind != .Semicolon && p.curr_tok.kind != .Close_Brace {
result := parse_expr(p, false)
append(&results, result)
if p.curr_tok.kind != .Comma ||
From 2a325b3da0fc7b6bf41418aee022cc71f0137bfb Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Sun, 16 Jan 2022 12:25:39 +0000
Subject: [PATCH 111/710] Update `ODIN_ENDIAN` usage
---
tests/core/image/test_core_image.odin | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tests/core/image/test_core_image.odin b/tests/core/image/test_core_image.odin
index 155b69298..23a7c2561 100644
--- a/tests/core/image/test_core_image.odin
+++ b/tests/core/image/test_core_image.odin
@@ -1767,7 +1767,7 @@ write_image_as_ppm :: proc(filename: string, image: ^image.Image) -> (success: b
img := image
// PBM 16-bit images are big endian
- when ODIN_ENDIAN == "little" {
+ when ODIN_ENDIAN == .Little {
if img.depth == 16 {
// The pixel components are in Big Endian. Let's byteswap back.
input := mem.slice_data_cast([]u16, img.pixels.buf[:])
From d4ea02a877cffbb35a19ba6f8eababe1b217c0bf Mon Sep 17 00:00:00 2001
From: Jesse Stiller
Date: Mon, 17 Jan 2022 12:19:06 +1000
Subject: [PATCH 112/710] Extraneous parameters in hlsl/glsl.saturate removed
This is a breaking change to anyone who used glsl/hlsl.saturate functions prior, but the y and z parameters never were used and do not conform to how the saturate function works in HLSL: https://docs.microsoft.com/en-us/windows/win32/direct3dhlsl/dx-graphics-hlsl-saturate
Note however GLSL does not contain a saturate function, but removing it does not accomplish anything good IMO.
---
core/math/linalg/glsl/linalg_glsl.odin | 33 +++++++++++++-------------
core/math/linalg/hlsl/linalg_hlsl.odin | 32 ++++++++++++-------------
2 files changed, 32 insertions(+), 33 deletions(-)
diff --git a/core/math/linalg/glsl/linalg_glsl.odin b/core/math/linalg/glsl/linalg_glsl.odin
index 053182794..7bc68b964 100644
--- a/core/math/linalg/glsl/linalg_glsl.odin
+++ b/core/math/linalg/glsl/linalg_glsl.odin
@@ -693,23 +693,22 @@ saturate :: proc{
saturate_uvec3,
saturate_uvec4,
}
-saturate_i32 :: proc "c" (x, y, z: i32) -> i32 { return builtin.clamp(x, 0, 1) }
-saturate_u32 :: proc "c" (x, y, z: u32) -> u32 { return builtin.clamp(x, 0, 1) }
-saturate_f32 :: proc "c" (x, y, z: f32) -> f32 { return builtin.clamp(x, 0, 1) }
-saturate_f64 :: proc "c" (x, y, z: f64) -> f64 { return builtin.clamp(x, 0, 1) }
-saturate_vec2 :: proc "c" (x, y, z: vec2) -> vec2 { return {builtin.clamp(x.x, 0, 1), builtin.clamp(x.y, 0, 1)} }
-saturate_vec3 :: proc "c" (x, y, z: vec3) -> vec3 { return {builtin.clamp(x.x, 0, 1), builtin.clamp(x.y, 0, 1), builtin.clamp(x.z, 0, 1)} }
-saturate_vec4 :: proc "c" (x, y, z: vec4) -> vec4 { return {builtin.clamp(x.x, 0, 1), builtin.clamp(x.y, 0, 1), builtin.clamp(x.z, 0, 1), builtin.clamp(x.w, 0, 1)} }
-saturate_dvec2 :: proc "c" (x, y, z: dvec2) -> dvec2 { return {builtin.clamp(x.x, 0, 1), builtin.clamp(x.y, 0, 1)} }
-saturate_dvec3 :: proc "c" (x, y, z: dvec3) -> dvec3 { return {builtin.clamp(x.x, 0, 1), builtin.clamp(x.y, 0, 1), builtin.clamp(x.z, 0, 1)} }
-saturate_dvec4 :: proc "c" (x, y, z: dvec4) -> dvec4 { return {builtin.clamp(x.x, 0, 1), builtin.clamp(x.y, 0, 1), builtin.clamp(x.z, 0, 1), builtin.clamp(x.w, 0, 1)} }
-saturate_ivec2 :: proc "c" (x, y, z: ivec2) -> ivec2 { return {builtin.clamp(x.x, 0, 1), builtin.clamp(x.y, 0, 1)} }
-saturate_ivec3 :: proc "c" (x, y, z: ivec3) -> ivec3 { return {builtin.clamp(x.x, 0, 1), builtin.clamp(x.y, 0, 1), builtin.clamp(x.z, 0, 1)} }
-saturate_ivec4 :: proc "c" (x, y, z: ivec4) -> ivec4 { return {builtin.clamp(x.x, 0, 1), builtin.clamp(x.y, 0, 1), builtin.clamp(x.z, 0, 1), builtin.clamp(x.w, 0, 1)} }
-saturate_uvec2 :: proc "c" (x, y, z: uvec2) -> uvec2 { return {builtin.clamp(x.x, 0, 1), builtin.clamp(x.y, 0, 1)} }
-saturate_uvec3 :: proc "c" (x, y, z: uvec3) -> uvec3 { return {builtin.clamp(x.x, 0, 1), builtin.clamp(x.y, 0, 1), builtin.clamp(x.z, 0, 1)} }
-saturate_uvec4 :: proc "c" (x, y, z: uvec4) -> uvec4 { return {builtin.clamp(x.x, 0, 1), builtin.clamp(x.y, 0, 1), builtin.clamp(x.z, 0, 1), builtin.clamp(x.w, 0, 1)} }
-
+saturate_i32 :: proc "c" (v: i32) -> i32 { return builtin.clamp(v, 0, 1) }
+saturate_u32 :: proc "c" (v: u32) -> u32 { return builtin.clamp(v, 0, 1) }
+saturate_f32 :: proc "c" (v: f32) -> f32 { return builtin.clamp(v, 0, 1) }
+saturate_f64 :: proc "c" (v: f64) -> f64 { return builtin.clamp(v, 0, 1) }
+saturate_vec2 :: proc "c" (v: vec2) -> vec2 { return {builtin.clamp(v.x, 0, 1), builtin.clamp(v.y, 0, 1)} }
+saturate_vec3 :: proc "c" (v: vec3) -> vec3 { return {builtin.clamp(v.x, 0, 1), builtin.clamp(v.y, 0, 1), builtin.clamp(v.z, 0, 1)} }
+saturate_vec4 :: proc "c" (v: vec4) -> vec4 { return {builtin.clamp(v.x, 0, 1), builtin.clamp(v.y, 0, 1), builtin.clamp(v.z, 0, 1), builtin.clamp(v.w, 0, 1)} }
+saturate_dvec2 :: proc "c" (v: dvec2) -> dvec2 { return {builtin.clamp(v.x, 0, 1), builtin.clamp(v.y, 0, 1)} }
+saturate_dvec3 :: proc "c" (v: dvec3) -> dvec3 { return {builtin.clamp(v.x, 0, 1), builtin.clamp(v.y, 0, 1), builtin.clamp(v.z, 0, 1)} }
+saturate_dvec4 :: proc "c" (v: dvec4) -> dvec4 { return {builtin.clamp(v.x, 0, 1), builtin.clamp(v.y, 0, 1), builtin.clamp(v.z, 0, 1), builtin.clamp(v.w, 0, 1)} }
+saturate_ivec2 :: proc "c" (v: ivec2) -> ivec2 { return {builtin.clamp(v.x, 0, 1), builtin.clamp(v.y, 0, 1)} }
+saturate_ivec3 :: proc "c" (v: ivec3) -> ivec3 { return {builtin.clamp(v.x, 0, 1), builtin.clamp(v.y, 0, 1), builtin.clamp(v.z, 0, 1)} }
+saturate_ivec4 :: proc "c" (v: ivec4) -> ivec4 { return {builtin.clamp(v.x, 0, 1), builtin.clamp(v.y, 0, 1), builtin.clamp(v.z, 0, 1), builtin.clamp(v.w, 0, 1)} }
+saturate_uvec2 :: proc "c" (v: uvec2) -> uvec2 { return {builtin.clamp(v.x, 0, 1), builtin.clamp(v.y, 0, 1)} }
+saturate_uvec3 :: proc "c" (v: uvec3) -> uvec3 { return {builtin.clamp(v.x, 0, 1), builtin.clamp(v.y, 0, 1), builtin.clamp(v.z, 0, 1)} }
+saturate_uvec4 :: proc "c" (v: uvec4) -> uvec4 { return {builtin.clamp(v.x, 0, 1), builtin.clamp(v.y, 0, 1), builtin.clamp(v.z, 0, 1), builtin.clamp(v.w, 0, 1)} }
mix :: proc{
mix_f32,
diff --git a/core/math/linalg/hlsl/linalg_hlsl.odin b/core/math/linalg/hlsl/linalg_hlsl.odin
index 0eb8413a9..4391975ba 100644
--- a/core/math/linalg/hlsl/linalg_hlsl.odin
+++ b/core/math/linalg/hlsl/linalg_hlsl.odin
@@ -772,22 +772,22 @@ saturate :: proc{
saturate_uint3,
saturate_uint4,
}
-saturate_int :: proc "c" (x, y, z: int) -> int { return builtin.clamp(x, 0, 1) }
-saturate_uint :: proc "c" (x, y, z: uint) -> uint { return builtin.clamp(x, 0, 1) }
-saturate_float :: proc "c" (x, y, z: float) -> float { return builtin.clamp(x, 0, 1) }
-saturate_double :: proc "c" (x, y, z: double) -> double { return builtin.clamp(x, 0, 1) }
-saturate_float2 :: proc "c" (x, y, z: float2) -> float2 { return {builtin.clamp(x.x, 0, 1), builtin.clamp(x.y, 0, 1)} }
-saturate_float3 :: proc "c" (x, y, z: float3) -> float3 { return {builtin.clamp(x.x, 0, 1), builtin.clamp(x.y, 0, 1), builtin.clamp(x.z, 0, 1)} }
-saturate_float4 :: proc "c" (x, y, z: float4) -> float4 { return {builtin.clamp(x.x, 0, 1), builtin.clamp(x.y, 0, 1), builtin.clamp(x.z, 0, 1), builtin.clamp(x.w, 0, 1)} }
-saturate_double2 :: proc "c" (x, y, z: double2) -> double2 { return {builtin.clamp(x.x, 0, 1), builtin.clamp(x.y, 0, 1)} }
-saturate_double3 :: proc "c" (x, y, z: double3) -> double3 { return {builtin.clamp(x.x, 0, 1), builtin.clamp(x.y, 0, 1), builtin.clamp(x.z, 0, 1)} }
-saturate_double4 :: proc "c" (x, y, z: double4) -> double4 { return {builtin.clamp(x.x, 0, 1), builtin.clamp(x.y, 0, 1), builtin.clamp(x.z, 0, 1), builtin.clamp(x.w, 0, 1)} }
-saturate_int2 :: proc "c" (x, y, z: int2) -> int2 { return {builtin.clamp(x.x, 0, 1), builtin.clamp(x.y, 0, 1)} }
-saturate_int3 :: proc "c" (x, y, z: int3) -> int3 { return {builtin.clamp(x.x, 0, 1), builtin.clamp(x.y, 0, 1), builtin.clamp(x.z, 0, 1)} }
-saturate_int4 :: proc "c" (x, y, z: int4) -> int4 { return {builtin.clamp(x.x, 0, 1), builtin.clamp(x.y, 0, 1), builtin.clamp(x.z, 0, 1), builtin.clamp(x.w, 0, 1)} }
-saturate_uint2 :: proc "c" (x, y, z: uint2) -> uint2 { return {builtin.clamp(x.x, 0, 1), builtin.clamp(x.y, 0, 1)} }
-saturate_uint3 :: proc "c" (x, y, z: uint3) -> uint3 { return {builtin.clamp(x.x, 0, 1), builtin.clamp(x.y, 0, 1), builtin.clamp(x.z, 0, 1)} }
-saturate_uint4 :: proc "c" (x, y, z: uint4) -> uint4 { return {builtin.clamp(x.x, 0, 1), builtin.clamp(x.y, 0, 1), builtin.clamp(x.z, 0, 1), builtin.clamp(x.w, 0, 1)} }
+saturate_int :: proc "c" (v: int) -> int { return builtin.clamp(v, 0, 1) }
+saturate_uint :: proc "c" (v: uint) -> uint { return builtin.clamp(v, 0, 1) }
+saturate_float :: proc "c" (v: float) -> float { return builtin.clamp(v, 0, 1) }
+saturate_double :: proc "c" (v: double) -> double { return builtin.clamp(v, 0, 1) }
+saturate_float2 :: proc "c" (v: float2) -> float2 { return {builtin.clamp(v.x, 0, 1), builtin.clamp(v.y, 0, 1)} }
+saturate_float3 :: proc "c" (v: float3) -> float3 { return {builtin.clamp(v.x, 0, 1), builtin.clamp(v.y, 0, 1), builtin.clamp(v.z, 0, 1)} }
+saturate_float4 :: proc "c" (v: float4) -> float4 { return {builtin.clamp(v.x, 0, 1), builtin.clamp(v.y, 0, 1), builtin.clamp(v.z, 0, 1), builtin.clamp(v.w, 0, 1)} }
+saturate_double2 :: proc "c" (v: double2) -> double2 { return {builtin.clamp(v.x, 0, 1), builtin.clamp(v.y, 0, 1)} }
+saturate_double3 :: proc "c" (v: double3) -> double3 { return {builtin.clamp(v.x, 0, 1), builtin.clamp(v.y, 0, 1), builtin.clamp(v.z, 0, 1)} }
+saturate_double4 :: proc "c" (v: double4) -> double4 { return {builtin.clamp(v.x, 0, 1), builtin.clamp(v.y, 0, 1), builtin.clamp(v.z, 0, 1), builtin.clamp(v.w, 0, 1)} }
+saturate_int2 :: proc "c" (v: int2) -> int2 { return {builtin.clamp(v.x, 0, 1), builtin.clamp(v.y, 0, 1)} }
+saturate_int3 :: proc "c" (v: int3) -> int3 { return {builtin.clamp(v.x, 0, 1), builtin.clamp(v.y, 0, 1), builtin.clamp(v.z, 0, 1)} }
+saturate_int4 :: proc "c" (v: int4) -> int4 { return {builtin.clamp(v.x, 0, 1), builtin.clamp(v.y, 0, 1), builtin.clamp(v.z, 0, 1), builtin.clamp(v.w, 0, 1)} }
+saturate_uint2 :: proc "c" (v: uint2) -> uint2 { return {builtin.clamp(v.x, 0, 1), builtin.clamp(v.y, 0, 1)} }
+saturate_uint3 :: proc "c" (v: uint3) -> uint3 { return {builtin.clamp(v.x, 0, 1), builtin.clamp(v.y, 0, 1), builtin.clamp(v.z, 0, 1)} }
+saturate_uint4 :: proc "c" (v: uint4) -> uint4 { return {builtin.clamp(v.x, 0, 1), builtin.clamp(v.y, 0, 1), builtin.clamp(v.z, 0, 1), builtin.clamp(v.w, 0, 1)} }
lerp :: proc{
From 1d293749c2f0751c112e91f1337a83cbc3c5cb04 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Mon, 17 Jan 2022 11:38:15 +0000
Subject: [PATCH 113/710] Move `core:path` to `core:path/slashpath`
This is to reduce the confusion that occurs between that package and the `core:path/filepath` package
---
core/path/path_error.odin | 5 +++++
core/path/{ => slashpath}/match.odin | 2 +-
core/path/{ => slashpath}/path.odin | 4 ++--
3 files changed, 8 insertions(+), 3 deletions(-)
create mode 100644 core/path/path_error.odin
rename core/path/{ => slashpath}/match.odin (99%)
rename core/path/{ => slashpath}/path.odin (94%)
diff --git a/core/path/path_error.odin b/core/path/path_error.odin
new file mode 100644
index 000000000..2be0b4cf4
--- /dev/null
+++ b/core/path/path_error.odin
@@ -0,0 +1,5 @@
+package path
+
+#panic(
+`core:path/slashpath - for paths separated by forward slashes, e.g. paths in URLs, this does not deal with OS-specific paths
+core:path/filepath - uses either forward slashes or backslashes depending on the operating system, deals with Windows/NT paths with volume letters or backslashes (on the related platforms)`)
diff --git a/core/path/match.odin b/core/path/slashpath/match.odin
similarity index 99%
rename from core/path/match.odin
rename to core/path/slashpath/match.odin
index 0bea4f6e7..09e774275 100644
--- a/core/path/match.odin
+++ b/core/path/slashpath/match.odin
@@ -1,4 +1,4 @@
-package path
+package slashpath
import "core:strings"
import "core:unicode/utf8"
diff --git a/core/path/path.odin b/core/path/slashpath/path.odin
similarity index 94%
rename from core/path/path.odin
rename to core/path/slashpath/path.odin
index 186176b42..8ac10e655 100644
--- a/core/path/path.odin
+++ b/core/path/slashpath/path.odin
@@ -1,9 +1,9 @@
-// The path package is only to be used for paths separated by forward slashes,
+// The slashpath package is only to be used for paths separated by forward slashes,
// e.g. paths in URLs
//
// This package does not deal with Windows/NT paths with volume letters or backslashes
// To manipulate operating system specific paths, use the path/filepath package
-package path
+package slashpath
import "core:strings"
From 95620aaf2aced0290133dcbfa96a1920b3ff7578 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Mon, 17 Jan 2022 11:48:15 +0000
Subject: [PATCH 114/710] Update examples/all
---
examples/all/all_main.odin | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/examples/all/all_main.odin b/examples/all/all_main.odin
index a88cc273e..c24238602 100644
--- a/examples/all/all_main.odin
+++ b/examples/all/all_main.odin
@@ -40,7 +40,7 @@ import odin_parser "core:odin/parser"
import odin_printer "core:odin/printer"
import odin_tokenizer "core:odin/tokenizer"
import os "core:os"
-import path "core:path"
+import slashpath "core:path/slashpath"
import filepath "core:path/filepath"
import reflect "core:reflect"
import runtime "core:runtime"
@@ -97,7 +97,7 @@ _ :: odin_parser
_ :: odin_printer
_ :: odin_tokenizer
_ :: os
-_ :: path
+_ :: slashpath
_ :: filepath
_ :: reflect
_ :: runtime
From cd6898439e9de6fce42555e2bab5c27e206dbcde Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Mon, 17 Jan 2022 12:17:13 +0000
Subject: [PATCH 115/710] Comment out `link_section` on procedures
---
core/runtime/entry_unix.odin | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/core/runtime/entry_unix.odin b/core/runtime/entry_unix.odin
index 67d2cbcb7..dd1e06625 100644
--- a/core/runtime/entry_unix.odin
+++ b/core/runtime/entry_unix.odin
@@ -5,13 +5,13 @@ package runtime
import "core:intrinsics"
when ODIN_BUILD_MODE == .Dynamic {
- @(link_name="_odin_entry_point", linkage="strong", require, link_section=".init")
+ @(link_name="_odin_entry_point", linkage="strong", require/*, link_section=".init"*/)
_odin_entry_point :: proc "c" () {
context = default_context()
#force_no_inline _startup_runtime()
intrinsics.__entry_point()
}
- @(link_name="_odin_exit_point", linkage="strong", require, link_section=".fini")
+ @(link_name="_odin_exit_point", linkage="strong", require/*, link_section=".fini"*/)
_odin_exit_point :: proc "c" () {
context = default_context()
#force_no_inline _cleanup_runtime()
From 686dbb4421824f17164443b2538b587e91d400a5 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Mon, 17 Jan 2022 14:43:42 +0000
Subject: [PATCH 116/710] Correct odin doc comment printing
---
src/docs.cpp | 28 ++++++++++++++++++++++------
src/docs_writer.cpp | 5 +++--
2 files changed, 25 insertions(+), 8 deletions(-)
diff --git a/src/docs.cpp b/src/docs.cpp
index 8d65cb83a..3ea3cce1b 100644
--- a/src/docs.cpp
+++ b/src/docs.cpp
@@ -67,6 +67,14 @@ GB_COMPARE_PROC(cmp_ast_package_by_name) {
#include "docs_format.cpp"
#include "docs_writer.cpp"
+void print_doc_line(i32 indent, String const &data) {
+ while (indent --> 0) {
+ gb_printf("\t");
+ }
+ gb_file_write(gb_file_get_standard(gbFileStandard_Output), data.text, data.len);
+ gb_printf("\n");
+}
+
void print_doc_line(i32 indent, char const *fmt, ...) {
while (indent --> 0) {
gb_printf("\t");
@@ -86,6 +94,13 @@ void print_doc_line_no_newline(i32 indent, char const *fmt, ...) {
gb_printf_va(fmt, va);
va_end(va);
}
+void print_doc_line_no_newline(i32 indent, String const &data) {
+ while (indent --> 0) {
+ gb_printf("\t");
+ }
+ gb_file_write(gb_file_get_standard(gbFileStandard_Output), data.text, data.len);
+}
+
bool print_doc_comment_group_string(i32 indent, CommentGroup *g) {
if (g == nullptr) {
@@ -106,8 +121,9 @@ bool print_doc_comment_group_string(i32 indent, CommentGroup *g) {
String comment = g->list[i].string;
String original_comment = comment;
- bool slash_slash = comment[1] == '/';
+ bool slash_slash = false;
if (comment[1] == '/') {
+ slash_slash = true;
comment.text += 2;
comment.len -= 2;
} else if (comment[1] == '*') {
@@ -131,7 +147,7 @@ bool print_doc_comment_group_string(i32 indent, CommentGroup *g) {
}
if (slash_slash) {
- print_doc_line(indent, "%.*s", LIT(comment));
+ print_doc_line(indent, comment);
count += 1;
} else {
isize pos = 0;
@@ -143,7 +159,7 @@ bool print_doc_comment_group_string(i32 indent, CommentGroup *g) {
}
}
String line = substring(comment, pos, end);
- pos = end+1;
+ pos = end;
String trimmed_line = string_trim_whitespace(line);
if (trimmed_line.len == 0) {
if (count == 0) {
@@ -159,7 +175,7 @@ bool print_doc_comment_group_string(i32 indent, CommentGroup *g) {
line = substring(line, 2, line.len);
}
- print_doc_line(indent, "%.*s", LIT(line));
+ print_doc_line(indent, line);
count += 1;
}
}
@@ -263,7 +279,7 @@ void print_doc_package(CheckerInfo *info, AstPackage *pkg) {
}
GB_ASSERT(type_expr != nullptr || init_expr != nullptr);
- print_doc_line_no_newline(2, "%.*s", LIT(e->token.string));
+ print_doc_line_no_newline(2, e->token.string);
if (type_expr != nullptr) {
gbString t = expr_to_string(type_expr);
gb_printf(": %s ", t);
@@ -298,7 +314,7 @@ void print_doc_package(CheckerInfo *info, AstPackage *pkg) {
for_array(i, pkg->files) {
AstFile *f = pkg->files[i];
String filename = remove_directory_from_path(f->fullpath);
- print_doc_line(2, "%.*s", LIT(filename));
+ print_doc_line(2, filename);
}
}
diff --git a/src/docs_writer.cpp b/src/docs_writer.cpp
index 56ad0561e..94b43be99 100644
--- a/src/docs_writer.cpp
+++ b/src/docs_writer.cpp
@@ -292,8 +292,9 @@ bool odin_doc_append_comment_group_string(Array *buf, CommentGroup *g) {
String comment = g->list[i].string;
String original_comment = comment;
- bool slash_slash = comment[1] == '/';
+ bool slash_slash = false;
if (comment[1] == '/') {
+ slash_slash = true;
comment.text += 2;
comment.len -= 2;
} else if (comment[1] == '*') {
@@ -330,7 +331,7 @@ bool odin_doc_append_comment_group_string(Array *buf, CommentGroup *g) {
}
}
String line = substring(comment, pos, end);
- pos = end+1;
+ pos = end;
String trimmed_line = string_trim_whitespace(line);
if (trimmed_line.len == 0) {
if (count == 0) {
From 76ccce2942a0d527be5693ff5bedbf92a5de5eb2 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Mon, 17 Jan 2022 17:57:55 +0000
Subject: [PATCH 117/710] Begin work on a html doc printer
---
tools/odin-html-docs/odin_html_docs_main.odin | 609 ++++++++++++++++++
tools/odin-html-docs/style.css | 31 +
2 files changed, 640 insertions(+)
create mode 100644 tools/odin-html-docs/odin_html_docs_main.odin
create mode 100644 tools/odin-html-docs/style.css
diff --git a/tools/odin-html-docs/odin_html_docs_main.odin b/tools/odin-html-docs/odin_html_docs_main.odin
new file mode 100644
index 000000000..7c822e4a1
--- /dev/null
+++ b/tools/odin-html-docs/odin_html_docs_main.odin
@@ -0,0 +1,609 @@
+package odin_html_docs
+
+import doc "core:odin/doc-format"
+import "core:fmt"
+import "core:io"
+import "core:os"
+import "core:strings"
+import "core:path/slashpath"
+import "core:sort"
+import "core:slice"
+
+header: ^doc.Header
+files: []doc.File
+pkgs: []doc.Pkg
+entities: []doc.Entity
+types: []doc.Type
+
+pkgs_to_use: map[string]^doc.Pkg // trimmed path
+pkg_to_path: map[^doc.Pkg]string // trimmed path
+
+array :: proc(a: $A/doc.Array($T)) -> []T {
+ return doc.from_array(header, a)
+}
+str :: proc(s: $A/doc.String) -> string {
+ return doc.from_string(header, s)
+}
+
+errorf :: proc(format: string, args: ..any) -> ! {
+ fmt.eprintf("%s ", os.args[0])
+ fmt.eprintf(format, ..args)
+ fmt.eprintln()
+ os.exit(1)
+}
+
+common_prefix :: proc(strs: []string) -> string {
+ if len(strs) == 0 {
+ return ""
+ }
+ n := max(int)
+ for str in strs {
+ n = min(n, len(str))
+ }
+
+ prefix := strs[0][:n]
+ for str in strs[1:] {
+ for len(prefix) != 0 && str[:len(prefix)] != prefix {
+ prefix = prefix[:len(prefix)-1]
+ }
+ if len(prefix) == 0 {
+ break
+ }
+ }
+ return prefix
+}
+
+
+write_html_header :: proc(w: io.Writer, title: string) {
+ fmt.wprintf(w, `
+
+
+
+
+ %s
+
+
+
+
+
+
+`, title)
+ fmt.wprintln(w, "\n")
+ fmt.wprintln(w, "\n
Core Directory ")
+
+}
+
+write_html_footer :: proc(w: io.Writer) {
+ fmt.wprintf(w, "
\n\n")
+}
+
+main :: proc() {
+ if len(os.args) != 2 {
+ errorf("expected 1 .odin-doc file")
+ }
+ data, ok := os.read_entire_file(os.args[1])
+ if !ok {
+ errorf("unable to read file:", os.args[1])
+ }
+ err: doc.Reader_Error
+ header, err = doc.read_from_bytes(data)
+ switch err {
+ case .None:
+ case .Header_Too_Small:
+ errorf("file is too small for the file format")
+ case .Invalid_Magic:
+ errorf("invalid magic for the file format")
+ case .Data_Too_Small:
+ errorf("data is too small for the file format")
+ case .Invalid_Version:
+ errorf("invalid file format version")
+ }
+ files = array(header.files)
+ pkgs = array(header.pkgs)
+ entities = array(header.entities)
+ types = array(header.types)
+
+ fullpaths: [dynamic]string
+ defer delete(fullpaths)
+
+ for pkg in pkgs[1:] {
+ append(&fullpaths, str(pkg.fullpath))
+ }
+ path_prefix := common_prefix(fullpaths[:])
+
+ pkgs_to_use = make(map[string]^doc.Pkg)
+ for fullpath, i in fullpaths {
+ path := strings.trim_prefix(fullpath, path_prefix)
+ if strings.has_prefix(path, "core/") {
+ pkgs_to_use[strings.trim_prefix(path, "core/")] = &pkgs[i+1]
+ }
+ }
+ sort.map_entries_by_key(&pkgs_to_use)
+ for path, pkg in pkgs_to_use {
+ pkg_to_path[pkg] = path
+ }
+
+ b := strings.make_builder()
+ w := strings.to_writer(&b)
+ {
+ strings.reset_builder(&b)
+ write_html_header(w, "core library - pkg.odin-lang.org")
+ write_core_directory(w)
+ write_html_footer(w)
+ os.make_directory("core", 0)
+ os.write_entire_file("core/index.html", b.buf[:])
+ }
+
+ for path, pkg in pkgs_to_use {
+ strings.reset_builder(&b)
+ write_html_header(w, fmt.tprintf("package %s - pkg.odin-lang.org", path))
+ write_pkg(w, path, pkg)
+ write_html_footer(w)
+ os.make_directory(fmt.tprintf("core/%s", path), 0)
+ os.write_entire_file(fmt.tprintf("core/%s/index.html", path), b.buf[:])
+ }
+}
+
+
+write_core_directory :: proc(w: io.Writer) {
+ Node :: struct {
+ dir: string,
+ path: string,
+ name: string,
+ pkg: ^doc.Pkg,
+ next: ^Node,
+ first_child: ^Node,
+ }
+ add_child :: proc(parent: ^Node, child: ^Node) -> ^Node {
+ assert(parent != nil)
+ end := &parent.first_child
+ for end^ != nil {
+ end = &end^.next
+ }
+ child.next = end^
+ end^ = child
+ return child
+ }
+
+ root: Node
+ for path, pkg in pkgs_to_use {
+ dir, _, inner := strings.partition(path, "/")
+
+ node: ^Node = nil
+ for node = root.first_child; node != nil; node = node.next {
+ if node.dir == dir {
+ break
+ }
+ }
+ if inner == "" {
+ if node == nil {
+ add_child(&root, new_clone(Node{
+ dir = dir,
+ name = dir,
+ path = path,
+ pkg = pkg,
+ }))
+ } else {
+ node.dir = dir
+ node.name = dir
+ node.path = path
+ node.pkg = pkg
+ }
+ } else {
+ if node == nil {
+ node = add_child(&root, new_clone(Node{
+ dir = dir,
+ name = dir,
+ }))
+ }
+ assert(node != nil)
+ child := add_child(node, new_clone(Node{
+ dir = dir,
+ name = inner,
+ path = path,
+ pkg = pkg,
+ }))
+ }
+ }
+
+
+ fmt.wprintln(w, "Directories ")
+
+ fmt.wprintln(w, "\t")
+ fmt.wprintln(w, "\t\t")
+
+ for dir := root.first_child; dir != nil; dir = dir.next {
+ if dir.first_child != nil {
+ fmt.wprint(w, ``, dir.dir)
+ } else {
+ fmt.wprintf(w, ` `, dir.dir)
+ }
+
+ if dir.pkg != nil {
+ fmt.wprintf(w, `%s `, dir.path, dir.name)
+ } else {
+ fmt.wprintf(w, "%s", dir.name)
+ }
+ fmt.wprintf(w, " ")
+ if dir.pkg != nil {
+ line_doc, _, _ := strings.partition(str(dir.pkg.docs), "\n")
+ line_doc = strings.trim_space(line_doc)
+ if line_doc != "" {
+ fmt.wprintf(w, `%s `, line_doc)
+ }
+ }
+ fmt.wprintf(w, " \n")
+
+ for child := dir.first_child; child != nil; child = child.next {
+ assert(child.pkg != nil)
+ fmt.wprintf(w, ``, str(child.pkg.name))
+ fmt.wprintf(w, `%s `, child.path, child.name)
+ fmt.wprintf(w, " ")
+
+ line_doc, _, _ := strings.partition(str(child.pkg.docs), "\n")
+ line_doc = strings.trim_space(line_doc)
+ if line_doc != "" {
+ fmt.wprintf(w, `%s `, line_doc)
+ }
+
+ fmt.wprintf(w, " \n")
+ }
+ }
+
+ fmt.wprintln(w, "\t\t ")
+ fmt.wprintln(w, "\t
")
+}
+
+is_entity_blank :: proc(e: doc.Entity_Index) -> bool {
+ name := str(entities[e].name)
+ return name == "" || name == "_"
+}
+
+Write_Type_Flag :: enum {
+ Is_Results,
+ Variadic,
+}
+Write_Type_Flags :: distinct bit_set[Write_Type_Flag]
+
+write_type :: proc(w: io.Writer, pkg: doc.Pkg_Index, type: doc.Type, flags: Write_Type_Flags) {
+ type_entites := array(type.entities)
+ type_types := array(type.types)
+ switch type.kind {
+ case .Invalid:
+ // ignore
+ case .Basic:
+ type_flags := transmute(doc.Type_Flags_Basic)type.flags
+ if .Untyped in type_flags {
+ io.write_string(w, str(type.name))
+ } else {
+ fmt.wprintf(w, `%s `, str(type.name))
+ }
+ case .Named:
+ e := entities[type_entites[0]]
+ name := str(type.name)
+ fmt.wprintf(w, ``)
+ tn_pkg := files[e.pos.file].pkg
+ if tn_pkg != pkg {
+ fmt.wprintf(w, `%s.`, str(pkgs[pkg].name))
+ }
+ fmt.wprintf(w, `{1:s} `, pkg_to_path[&pkgs[tn_pkg]], name)
+ case .Generic:
+ name := str(type.name)
+ io.write_byte(w, '$')
+ io.write_string(w, name)
+ if len(array(type.types)) == 1 {
+ io.write_byte(w, '/')
+ write_type(w, pkg, types[type_types[0]], flags)
+ }
+ case .Pointer:
+ io.write_byte(w, '^')
+ write_type(w, pkg, types[type_types[0]], flags)
+ case .Array:
+ assert(type.elem_count_len == 1)
+ io.write_byte(w, '[')
+ io.write_uint(w, uint(type.elem_counts[0]))
+ io.write_byte(w, ']')
+ write_type(w, pkg, types[type_types[0]], flags)
+ case .Enumerated_Array:
+ io.write_byte(w, '[')
+ write_type(w, pkg, types[type_types[0]], flags)
+ io.write_byte(w, ']')
+ write_type(w, pkg, types[type_types[1]], flags)
+ case .Slice:
+ if .Variadic in flags {
+ io.write_string(w, "..")
+ } else {
+ io.write_string(w, "[]")
+ }
+ write_type(w, pkg, types[type_types[0]], flags - {.Variadic})
+ case .Dynamic_Array:
+ io.write_string(w, "[dynamic]")
+ write_type(w, pkg, types[type_types[0]], flags)
+ case .Map:
+ io.write_string(w, "map[")
+ write_type(w, pkg, types[type_types[0]], flags)
+ io.write_byte(w, ']')
+ write_type(w, pkg, types[type_types[1]], flags)
+ case .Struct:
+ type_flags := transmute(doc.Type_Flags_Struct)type.flags
+ io.write_string(w, "struct {}")
+ case .Union:
+ type_flags := transmute(doc.Type_Flags_Union)type.flags
+ io.write_string(w, "union {}")
+ case .Enum:
+ io.write_string(w, "enum {}")
+ case .Tuple:
+ entity_indices := type_entites
+ if len(entity_indices) == 0 {
+ return
+ }
+ require_parens := (.Is_Results in flags) && (len(entity_indices) > 1 || !is_entity_blank(entity_indices[0]))
+ if require_parens { io.write_byte(w, '(') }
+ for entity_index, i in entity_indices {
+ e := &entities[entity_index]
+ name := str(e.name)
+
+ if i > 0 {
+ io.write_string(w, ", ")
+ }
+ if .Param_Using in e.flags { io.write_string(w, "using ") }
+ if .Param_Const in e.flags { io.write_string(w, "#const ") }
+ if .Param_Auto_Cast in e.flags { io.write_string(w, "#auto_cast ") }
+ if .Param_CVararg in e.flags { io.write_string(w, "#c_vararg ") }
+ if .Param_No_Alias in e.flags { io.write_string(w, "#no_alias ") }
+ if .Param_Any_Int in e.flags { io.write_string(w, "#any_int ") }
+
+ if name != "" {
+ io.write_string(w, name)
+ io.write_string(w, ": ")
+ }
+ param_flags := flags - {.Is_Results}
+ if .Param_Ellipsis in e.flags {
+ param_flags += {.Variadic}
+ }
+ write_type(w, pkg, types[e.type], param_flags)
+ }
+ if require_parens { io.write_byte(w, ')') }
+
+ case .Proc:
+ type_flags := transmute(doc.Type_Flags_Proc)type.flags
+ io.write_string(w, "proc")
+ cc := str(type.calling_convention)
+ if cc != "" {
+ io.write_byte(w, ' ')
+ io.write_quoted_string(w, cc)
+ io.write_byte(w, ' ')
+ }
+ params := array(type.types)[0]
+ results := array(type.types)[1]
+ io.write_byte(w, '(')
+ write_type(w, pkg, types[params], flags)
+ io.write_byte(w, ')')
+ if results != 0 {
+ assert(.Diverging not_in type_flags)
+ io.write_string(w, " -> ")
+ write_type(w, pkg, types[results], flags+{.Is_Results})
+ }
+ if .Diverging in type_flags {
+ io.write_string(w, " -> !")
+ }
+ if .Optional_Ok in type_flags {
+ io.write_string(w, " #optional_ok")
+ }
+
+ case .Bit_Set:
+ type_flags := transmute(doc.Type_Flags_Bit_Set)type.flags
+ case .Simd_Vector:
+ io.write_string(w, "#simd[")
+ io.write_uint(w, uint(type.elem_counts[0]))
+ io.write_byte(w, ']')
+ case .SOA_Struct_Fixed:
+ io.write_string(w, "#soa[")
+ io.write_uint(w, uint(type.elem_counts[0]))
+ io.write_byte(w, ']')
+ case .SOA_Struct_Slice:
+ io.write_string(w, "#soa[]")
+ case .SOA_Struct_Dynamic:
+ io.write_string(w, "#soa[dynamic]")
+ case .Relative_Pointer:
+ io.write_string(w, "#relative(")
+ write_type(w, pkg, types[type_types[1]], flags)
+ io.write_string(w, ") ")
+ write_type(w, pkg, types[type_types[0]], flags)
+ case .Relative_Slice:
+ io.write_string(w, "#relative(")
+ write_type(w, pkg, types[type_types[1]], flags)
+ io.write_string(w, ") ")
+ write_type(w, pkg, types[type_types[0]], flags)
+ case .Multi_Pointer:
+ io.write_string(w, "[^]")
+ write_type(w, pkg, types[type_types[0]], flags)
+ case .Matrix:
+ io.write_string(w, "matrix[")
+ io.write_uint(w, uint(type.elem_counts[0]))
+ io.write_string(w, ", ")
+ io.write_uint(w, uint(type.elem_counts[1]))
+ io.write_string(w, "]")
+ write_type(w, pkg, types[type_types[0]], flags)
+ }
+}
+
+write_docs :: proc(w: io.Writer, pkg: ^doc.Pkg, docs: string) {
+ if docs == "" {
+ return
+ }
+ it := docs
+ was_code := true
+ was_paragraph := true
+ for line in strings.split_iterator(&it, "\n") {
+ if strings.has_prefix(line, "\t") {
+ if !was_code {
+ was_code = true;
+ fmt.wprint(w, ``)
+ }
+ fmt.wprintf(w, "%s\n", strings.trim_prefix(line, "\t"))
+ continue
+ } else if was_code {
+ was_code = false
+ fmt.wprintln(w, " ")
+ }
+ text := strings.trim_space(line)
+ if text == "" {
+ if was_paragraph {
+ was_paragraph = false
+ fmt.wprintln(w, "
")
+ }
+ continue
+ }
+ if !was_paragraph {
+ fmt.wprintln(w, "")
+ }
+ assert(!was_code)
+ was_paragraph = true
+ fmt.wprintln(w, text)
+ }
+ if was_code {
+ // assert(!was_paragraph, str(pkg.name))
+ was_code = false
+ fmt.wprintln(w, "")
+ } else if was_paragraph {
+ fmt.wprintln(w, "
")
+ }
+}
+
+write_pkg :: proc(w: io.Writer, path: string, pkg: ^doc.Pkg) {
+ fmt.wprintf(w, "package core:%s \n", path)
+ fmt.wprintln(w, "Documentation ")
+ docs := strings.trim_space(str(pkg.docs))
+ if docs != "" {
+ fmt.wprintln(w, "Overview ")
+ fmt.wprintln(w, "")
+ defer fmt.wprintln(w, "
")
+
+ write_docs(w, pkg, docs)
+ }
+
+ fmt.wprintln(w, "Index ")
+ fmt.wprintln(w, ``)
+ pkg_procs: [dynamic]^doc.Entity
+ pkg_proc_groups: [dynamic]^doc.Entity
+ pkg_types: [dynamic]^doc.Entity
+ pkg_vars: [dynamic]^doc.Entity
+ pkg_consts: [dynamic]^doc.Entity
+
+ for entity_index in array(pkg.entities) {
+ e := &entities[entity_index]
+ name := str(e.name)
+ if name == "" || name[0] == '_' {
+ continue
+ }
+ switch e.kind {
+ case .Invalid, .Import_Name, .Library_Name:
+ // ignore
+ case .Constant: append(&pkg_consts, e)
+ case .Variable: append(&pkg_vars, e)
+ case .Type_Name: append(&pkg_types, e)
+ case .Procedure: append(&pkg_procs, e)
+ case .Proc_Group: append(&pkg_proc_groups, e)
+ }
+ }
+
+ entity_key :: proc(e: ^doc.Entity) -> string {
+ return str(e.name)
+ }
+
+ slice.sort_by_key(pkg_procs[:], entity_key)
+ slice.sort_by_key(pkg_proc_groups[:], entity_key)
+ slice.sort_by_key(pkg_types[:], entity_key)
+ slice.sort_by_key(pkg_vars[:], entity_key)
+ slice.sort_by_key(pkg_consts[:], entity_key)
+
+ print_index :: proc(w: io.Writer, name: string, entities: []^doc.Entity) {
+ fmt.wprintf(w, "%s \n", name)
+ fmt.wprintln(w, ``)
+ fmt.wprintln(w, "")
+ for e in entities {
+ name := str(e.name)
+ fmt.wprintf(w, "{0:s} \n", name)
+ }
+ fmt.wprintln(w, " ")
+ fmt.wprintln(w, " ")
+ }
+
+
+ print_index(w, "Procedures", pkg_procs[:])
+ print_index(w, "Procedure Groups", pkg_proc_groups[:])
+ print_index(w, "Types", pkg_types[:])
+ print_index(w, "Variables", pkg_vars[:])
+ print_index(w, "Constants", pkg_consts[:])
+
+ fmt.wprintln(w, " ")
+
+
+ print_entity :: proc(w: io.Writer, e: ^doc.Entity) {
+ pkg := &pkgs[files[e.pos.file].pkg]
+ name := str(e.name)
+ fmt.wprintf(w, "{0:s} \n", name)
+ switch e.kind {
+ case .Invalid, .Import_Name, .Library_Name:
+ // ignore
+ case .Constant:
+ case .Variable:
+ case .Type_Name:
+ case .Procedure:
+ fmt.wprint(w, "")
+ fmt.wprintf(w, "%s :: ", name)
+ write_type(w, files[e.pos.file].pkg, types[e.type], nil)
+ where_clauses := array(e.where_clauses)
+ if len(where_clauses) != 0 {
+ io.write_string(w, " where ")
+ for clause, i in where_clauses {
+ if i > 0 {
+ io.write_string(w, ", ")
+ }
+ io.write_string(w, str(clause))
+ }
+ }
+
+ fmt.wprint(w, " {…}")
+ fmt.wprintln(w, " ")
+ case .Proc_Group:
+ }
+
+ write_docs(w, pkg, strings.trim_space(str(e.docs)))
+ }
+ print_entities :: proc(w: io.Writer, title: string, entities: []^doc.Entity) {
+ fmt.wprintf(w, "%s \n", title)
+ fmt.wprintln(w, ``)
+ for e in entities {
+ print_entity(w, e)
+ }
+ fmt.wprintln(w, " ")
+ }
+
+ print_entities(w, "Procedures", pkg_procs[:])
+ print_entities(w, "Procedure Groups", pkg_proc_groups[:])
+ print_entities(w, "Types", pkg_types[:])
+ print_entities(w, "Variables", pkg_vars[:])
+ print_entities(w, "Constants", pkg_consts[:])
+
+
+ fmt.wprintln(w, "Source Files ")
+ fmt.wprintln(w, "")
+ for file_index in array(pkg.files) {
+ file := files[file_index]
+ filename := slashpath.base(str(file.name))
+ fmt.wprintf(w, `%s `, path, filename, filename)
+ fmt.wprintln(w)
+ }
+ fmt.wprintln(w, " ")
+
+}
\ No newline at end of file
diff --git a/tools/odin-html-docs/style.css b/tools/odin-html-docs/style.css
new file mode 100644
index 000000000..7c23d0bc7
--- /dev/null
+++ b/tools/odin-html-docs/style.css
@@ -0,0 +1,31 @@
+.container {
+ max-width: 60em;
+ margin: 0 auto;
+ padding-left: 0.01em 1em;
+}
+
+.directory-pkg {
+ width: 20em;
+}
+
+.directory-child .pkg-name {
+ position: relative;
+ left: 2em;
+ width: 18em;
+}
+
+pre {
+ white-space: pre;
+ tab-size: 8;
+ background-color: #f8f8f8;
+ color: #202224;
+ border: 1px solid #c6c8ca;
+ border-radius: 0.25rem;
+ padding: 0.625rem;
+}
+
+.documentation pre a {
+ text-decoration: none;
+ font-weight: bold;
+ color: #00bfd5;
+}
\ No newline at end of file
From 97922406fec4296ce0732d1eefa0b9d7c943086f Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Mon, 17 Jan 2022 18:23:30 +0000
Subject: [PATCH 118/710] Improve printing for record types
---
tools/odin-html-docs/odin_html_docs_main.odin | 253 ++++++++++++++----
1 file changed, 200 insertions(+), 53 deletions(-)
diff --git a/tools/odin-html-docs/odin_html_docs_main.odin b/tools/odin-html-docs/odin_html_docs_main.odin
index 7c822e4a1..3cbc0d860 100644
--- a/tools/odin-html-docs/odin_html_docs_main.odin
+++ b/tools/odin-html-docs/odin_html_docs_main.odin
@@ -32,6 +32,17 @@ errorf :: proc(format: string, args: ..any) -> ! {
os.exit(1)
}
+base_type :: proc(t: doc.Type) -> doc.Type {
+ t := t
+ for {
+ if t.kind != .Named {
+ break
+ }
+ t = types[array(t.types)[0]]
+ }
+ return t
+}
+
common_prefix :: proc(strs: []string) -> string {
if len(strs) == 0 {
return ""
@@ -270,10 +281,74 @@ is_entity_blank :: proc(e: doc.Entity_Index) -> bool {
Write_Type_Flag :: enum {
Is_Results,
Variadic,
+ Allow_Indent,
}
Write_Type_Flags :: distinct bit_set[Write_Type_Flag]
+Type_Writer :: struct {
+ w: io.Writer,
+ pkg: doc.Pkg_Index,
+ indent: int,
+}
+
+write_type :: proc(using writer: ^Type_Writer, type: doc.Type, flags: Write_Type_Flags) {
+ write_param_entity :: proc(using writer: ^Type_Writer, e: ^doc.Entity, flags: Write_Type_Flags, name_width := 0) {
+ name := str(e.name)
+
+ if .Param_Using in e.flags { io.write_string(w, "using ") }
+ if .Param_Const in e.flags { io.write_string(w, "#const ") }
+ if .Param_Auto_Cast in e.flags { io.write_string(w, "#auto_cast ") }
+ if .Param_CVararg in e.flags { io.write_string(w, "#c_vararg ") }
+ if .Param_No_Alias in e.flags { io.write_string(w, "#no_alias ") }
+ if .Param_Any_Int in e.flags { io.write_string(w, "#any_int ") }
+
+ if name != "" {
+ io.write_string(w, name)
+ io.write_string(w, ": ")
+ }
+ padding := max(name_width-len(name), 0)
+ for _ in 0.. 0 {
+ io.write_string(w, ", ")
+ }
+ write_param_entity(writer, &entities[entity_index], flags)
+ }
+ io.write_byte(w, ')')
+ }
+ do_indent :: proc(using writer: ^Type_Writer, flags: Write_Type_Flags) {
+ if .Allow_Indent not_in flags {
+ return
+ }
+ for _ in 0.. (name_width: int) {
+ for entity_index in type_entites {
+ e := &entities[entity_index]
+ name := str(e.name)
+ name_width = max(len(name), name_width)
+ }
+ return
+ }
+
-write_type :: proc(w: io.Writer, pkg: doc.Pkg_Index, type: doc.Type, flags: Write_Type_Flags) {
type_entites := array(type.entities)
type_types := array(type.types)
switch type.kind {
@@ -301,75 +376,128 @@ write_type :: proc(w: io.Writer, pkg: doc.Pkg_Index, type: doc.Type, flags: Writ
io.write_string(w, name)
if len(array(type.types)) == 1 {
io.write_byte(w, '/')
- write_type(w, pkg, types[type_types[0]], flags)
+ write_type(writer, types[type_types[0]], flags)
}
case .Pointer:
io.write_byte(w, '^')
- write_type(w, pkg, types[type_types[0]], flags)
+ write_type(writer, types[type_types[0]], flags)
case .Array:
assert(type.elem_count_len == 1)
io.write_byte(w, '[')
io.write_uint(w, uint(type.elem_counts[0]))
io.write_byte(w, ']')
- write_type(w, pkg, types[type_types[0]], flags)
+ write_type(writer, types[type_types[0]], flags)
case .Enumerated_Array:
io.write_byte(w, '[')
- write_type(w, pkg, types[type_types[0]], flags)
+ write_type(writer, types[type_types[0]], flags)
io.write_byte(w, ']')
- write_type(w, pkg, types[type_types[1]], flags)
+ write_type(writer, types[type_types[1]], flags)
case .Slice:
if .Variadic in flags {
io.write_string(w, "..")
} else {
io.write_string(w, "[]")
}
- write_type(w, pkg, types[type_types[0]], flags - {.Variadic})
+ write_type(writer, types[type_types[0]], flags - {.Variadic})
case .Dynamic_Array:
io.write_string(w, "[dynamic]")
- write_type(w, pkg, types[type_types[0]], flags)
+ write_type(writer, types[type_types[0]], flags)
case .Map:
io.write_string(w, "map[")
- write_type(w, pkg, types[type_types[0]], flags)
+ write_type(writer, types[type_types[0]], flags)
io.write_byte(w, ']')
- write_type(w, pkg, types[type_types[1]], flags)
+ write_type(writer, types[type_types[1]], flags)
case .Struct:
type_flags := transmute(doc.Type_Flags_Struct)type.flags
- io.write_string(w, "struct {}")
+ io.write_string(w, "struct")
+ if .Polymorphic in type_flags {
+ write_poly_params(writer, type, flags)
+ }
+ if .Packed in type_flags { io.write_string(w, " #packed") }
+ if .Raw_Union in type_flags { io.write_string(w, " #raw_union") }
+ if custom_align := str(type.custom_align); custom_align != "" {
+ io.write_string(w, " #align")
+ io.write_string(w, custom_align)
+ }
+ io.write_string(w, " {")
+ do_newline(writer, flags)
+ indent += 1
+ name_width := calc_name_width(type_entites)
+
+ for entity_index in type_entites {
+ e := &entities[entity_index]
+ do_indent(writer, flags)
+ write_param_entity(writer, e, flags, name_width)
+ io.write_byte(w, ',')
+ do_newline(writer, flags)
+ }
+ indent -= 1
+ do_indent(writer, flags)
+ io.write_string(w, "}")
case .Union:
type_flags := transmute(doc.Type_Flags_Union)type.flags
- io.write_string(w, "union {}")
+ io.write_string(w, "union")
+ if .Polymorphic in type_flags {
+ write_poly_params(writer, type, flags)
+ }
+ if .No_Nil in type_flags { io.write_string(w, " #no_nil") }
+ if .Maybe in type_flags { io.write_string(w, " #maybe") }
+ if custom_align := str(type.custom_align); custom_align != "" {
+ io.write_string(w, " #align")
+ io.write_string(w, custom_align)
+ }
+ io.write_string(w, " {")
+ if len(type_types) > 1 {
+ do_newline(writer, flags)
+ indent += 1
+ for type_index in type_types {
+ do_indent(writer, flags)
+ write_type(writer, types[type_index], flags)
+ io.write_string(w, ", ")
+ do_newline(writer, flags)
+ }
+ indent -= 1
+ do_indent(writer, flags)
+ }
+ io.write_string(w, "}")
case .Enum:
- io.write_string(w, "enum {}")
+ io.write_string(w, "enum")
+ io.write_string(w, " {")
+ do_newline(writer, flags)
+ indent += 1
+
+ name_width := calc_name_width(type_entites)
+
+ for entity_index in type_entites {
+ e := &entities[entity_index]
+
+ do_indent(writer, flags)
+ io.write_string(w, str(e.name))
+
+ if init_string := str(e.init_string); init_string != "" {
+ for _ in 0.. 1 || !is_entity_blank(entity_indices[0]))
+ require_parens := (.Is_Results in flags) && (len(type_entites) > 1 || !is_entity_blank(type_entites[0]))
if require_parens { io.write_byte(w, '(') }
- for entity_index, i in entity_indices {
- e := &entities[entity_index]
- name := str(e.name)
-
+ for entity_index, i in type_entites {
if i > 0 {
io.write_string(w, ", ")
}
- if .Param_Using in e.flags { io.write_string(w, "using ") }
- if .Param_Const in e.flags { io.write_string(w, "#const ") }
- if .Param_Auto_Cast in e.flags { io.write_string(w, "#auto_cast ") }
- if .Param_CVararg in e.flags { io.write_string(w, "#c_vararg ") }
- if .Param_No_Alias in e.flags { io.write_string(w, "#no_alias ") }
- if .Param_Any_Int in e.flags { io.write_string(w, "#any_int ") }
-
- if name != "" {
- io.write_string(w, name)
- io.write_string(w, ": ")
- }
- param_flags := flags - {.Is_Results}
- if .Param_Ellipsis in e.flags {
- param_flags += {.Variadic}
- }
- write_type(w, pkg, types[e.type], param_flags)
+ write_param_entity(writer, &entities[entity_index], flags)
}
if require_parens { io.write_byte(w, ')') }
@@ -385,12 +513,12 @@ write_type :: proc(w: io.Writer, pkg: doc.Pkg_Index, type: doc.Type, flags: Writ
params := array(type.types)[0]
results := array(type.types)[1]
io.write_byte(w, '(')
- write_type(w, pkg, types[params], flags)
+ write_type(writer, types[params], flags)
io.write_byte(w, ')')
if results != 0 {
assert(.Diverging not_in type_flags)
io.write_string(w, " -> ")
- write_type(w, pkg, types[results], flags+{.Is_Results})
+ write_type(writer, types[results], flags+{.Is_Results})
}
if .Diverging in type_flags {
io.write_string(w, " -> !")
@@ -415,24 +543,24 @@ write_type :: proc(w: io.Writer, pkg: doc.Pkg_Index, type: doc.Type, flags: Writ
io.write_string(w, "#soa[dynamic]")
case .Relative_Pointer:
io.write_string(w, "#relative(")
- write_type(w, pkg, types[type_types[1]], flags)
+ write_type(writer, types[type_types[1]], flags)
io.write_string(w, ") ")
- write_type(w, pkg, types[type_types[0]], flags)
+ write_type(writer, types[type_types[0]], flags)
case .Relative_Slice:
io.write_string(w, "#relative(")
- write_type(w, pkg, types[type_types[1]], flags)
+ write_type(writer, types[type_types[1]], flags)
io.write_string(w, ") ")
- write_type(w, pkg, types[type_types[0]], flags)
+ write_type(writer, types[type_types[0]], flags)
case .Multi_Pointer:
io.write_string(w, "[^]")
- write_type(w, pkg, types[type_types[0]], flags)
+ write_type(writer, types[type_types[0]], flags)
case .Matrix:
io.write_string(w, "matrix[")
io.write_uint(w, uint(type.elem_counts[0]))
io.write_string(w, ", ")
io.write_uint(w, uint(type.elem_counts[1]))
io.write_string(w, "]")
- write_type(w, pkg, types[type_types[0]], flags)
+ write_type(writer, types[type_types[0]], flags)
}
}
@@ -529,12 +657,16 @@ write_pkg :: proc(w: io.Writer, path: string, pkg: ^doc.Pkg) {
print_index :: proc(w: io.Writer, name: string, entities: []^doc.Entity) {
fmt.wprintf(w, "%s \n", name)
fmt.wprintln(w, ``)
- fmt.wprintln(w, "")
- for e in entities {
- name := str(e.name)
- fmt.wprintf(w, "{0:s} \n", name)
+ if len(entities) == 0 {
+ io.write_string(w, "This section is empty.
\n")
+ } else {
+ fmt.wprintln(w, "")
+ for e in entities {
+ name := str(e.name)
+ fmt.wprintf(w, "{0:s} \n", name)
+ }
+ fmt.wprintln(w, " ")
}
- fmt.wprintln(w, " ")
fmt.wprintln(w, " ")
}
@@ -549,7 +681,13 @@ write_pkg :: proc(w: io.Writer, path: string, pkg: ^doc.Pkg) {
print_entity :: proc(w: io.Writer, e: ^doc.Entity) {
- pkg := &pkgs[files[e.pos.file].pkg]
+ pkg_index := files[e.pos.file].pkg
+ pkg := &pkgs[pkg_index]
+ writer := &Type_Writer{
+ w = w,
+ pkg = pkg_index,
+ }
+
name := str(e.name)
fmt.wprintf(w, "{0:s} \n", name)
switch e.kind {
@@ -558,10 +696,15 @@ write_pkg :: proc(w: io.Writer, path: string, pkg: ^doc.Pkg) {
case .Constant:
case .Variable:
case .Type_Name:
+ fmt.wprint(w, "")
+ fmt.wprintf(w, "%s :: ", name)
+ tn := base_type(types[e.type])
+ write_type(writer, tn, {.Allow_Indent})
+ fmt.wprintln(w, " ")
case .Procedure:
fmt.wprint(w, "")
fmt.wprintf(w, "%s :: ", name)
- write_type(w, files[e.pos.file].pkg, types[e.type], nil)
+ write_type(writer, types[e.type], nil)
where_clauses := array(e.where_clauses)
if len(where_clauses) != 0 {
io.write_string(w, " where ")
@@ -583,8 +726,12 @@ write_pkg :: proc(w: io.Writer, path: string, pkg: ^doc.Pkg) {
print_entities :: proc(w: io.Writer, title: string, entities: []^doc.Entity) {
fmt.wprintf(w, "%s \n", title)
fmt.wprintln(w, ``)
- for e in entities {
- print_entity(w, e)
+ if len(entities) == 0 {
+ io.write_string(w, "This section is empty.
\n")
+ } else {
+ for e in entities {
+ print_entity(w, e)
+ }
}
fmt.wprintln(w, " ")
}
From c85ac955f798fefd149a5eeaecabf0713210b152 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Mon, 17 Jan 2022 19:00:47 +0000
Subject: [PATCH 119/710] Simplify docs to hide the copyright
---
core/compress/common.odin | 3 +++
core/encoding/hxa/doc.odin | 12 ++++++------
core/fmt/doc.odin | 2 +-
core/image/common.odin | 2 ++
core/image/png/png.odin | 5 +++++
core/math/big/api.odin | 6 ++----
core/math/big/common.odin | 6 ++----
core/math/big/doc.odin | 28 ++++++++++++++++++++++++++++
core/math/big/helpers.odin | 6 ++----
core/math/big/internal.odin | 28 ++--------------------------
core/math/big/logical.odin | 2 ++
core/math/big/prime.odin | 2 ++
core/math/big/private.odin | 2 ++
core/math/big/public.odin | 2 ++
core/math/big/radix.odin | 2 ++
core/math/big/tune.odin | 2 ++
16 files changed, 65 insertions(+), 45 deletions(-)
create mode 100644 core/math/big/doc.odin
diff --git a/core/compress/common.odin b/core/compress/common.odin
index 41f292b6f..5f5ef2413 100644
--- a/core/compress/common.odin
+++ b/core/compress/common.odin
@@ -5,6 +5,9 @@
List of contributors:
Jeroen van Rijn: Initial implementation, optimization.
*/
+
+
+// package compress is a collection of utilities to aid with other compression packages
package compress
import "core:io"
diff --git a/core/encoding/hxa/doc.odin b/core/encoding/hxa/doc.odin
index 16b94a243..230d6ea66 100644
--- a/core/encoding/hxa/doc.odin
+++ b/core/encoding/hxa/doc.odin
@@ -27,7 +27,7 @@
// Construction history, or BSP trees would make the format too large to serve its purpose.
// The facilities of the formats to store meta data should make the format flexible enough
// for most uses. Adding HxA support should be something anyone can do in a days work.
-
+//
// Structure:
// ----------
// HxA is designed to be extremely simple to parse, and is therefore based around conventions. It has
@@ -45,17 +45,17 @@
// of a number of named layers. All layers in the stack have the same number of elements. Each layer
// describes one property of the primitive. Each layer can have multiple channels and each layer can
// store data of a different type.
-
+//
// HaX stores 3 kinds of nodes
// - Pixel data.
// - Polygon geometry data.
// - Meta data only.
-
+//
// Pixel Nodes stores pixels in a layer stack. A layer may store things like Albedo, Roughness,
// Reflectance, Light maps, Masks, Normal maps, and Displacement. Layers use the channels of the
// layers to store things like color. The length of the layer stack is determined by the type and
// dimensions stored in the
-
+//
// Geometry data is stored in 3 separate layer stacks for: vertex data, corner data and face data. The
// vertex data stores things like verities, blend shapes, weight maps, and vertex colors. The first
// layer in a vertex stack has to be a 3 channel layer named "position" describing the base position
@@ -63,7 +63,7 @@
// for things like UV, normals, and adjacency. The first layer in a corner stack has to be a 1 channel
// integer layer named "index" describing the vertices used to form polygons. The last value in each
// polygon has a negative - 1 index to indicate the end of the polygon.
-
+//
// Example:
// A quad and a tri with the vertex index:
// [0, 1, 2, 3] [1, 4, 2]
@@ -72,7 +72,7 @@
// The face stack stores values per face. the length of the face stack has to match the number of
// negative values in the index layer in the corner stack. The face stack can be used to store things
// like material index.
-
+//
// Storage
// -------
// All data is stored in little endian byte order with no padding. The layout mirrors the structs
diff --git a/core/fmt/doc.odin b/core/fmt/doc.odin
index 5984da950..668fc9bc6 100644
--- a/core/fmt/doc.odin
+++ b/core/fmt/doc.odin
@@ -64,6 +64,7 @@ If not present, the width is whatever is necessary to represent the value.
Precision is specified after the (optional) width followed by a period followed by a decimal number.
If no period is present, a default precision is used.
A period with no following number specifies a precision of 0.
+
Examples:
%f default width, default precision
%8f width 8, default precision
@@ -84,7 +85,6 @@ Other flags:
add leading 0z for dozenal (%#z)
add leading 0x or 0X for hexadecimal (%#x or %#X)
remove leading 0x for %p (%#p)
-
' ' (space) leave a space for elided sign in numbers (% d)
0 pad with leading zeros rather than spaces
diff --git a/core/image/common.odin b/core/image/common.odin
index 3ec8e15be..d72b770d5 100644
--- a/core/image/common.odin
+++ b/core/image/common.odin
@@ -6,6 +6,8 @@
Jeroen van Rijn: Initial implementation, optimization.
Ginger Bill: Cosmetic changes.
*/
+
+// package image implements a general 2D image library to be used with other image related packages
package image
import "core:bytes"
diff --git a/core/image/png/png.odin b/core/image/png/png.odin
index da76a4588..bff0afde3 100644
--- a/core/image/png/png.odin
+++ b/core/image/png/png.odin
@@ -6,6 +6,11 @@
Jeroen van Rijn: Initial implementation.
Ginger Bill: Cosmetic changes.
*/
+
+
+// package png implements a PNG image reader
+//
+// The PNG specification is at https://www.w3.org/TR/PNG/.
package png
import "core:compress"
diff --git a/core/math/big/api.odin b/core/math/big/api.odin
index c9be04da0..bf19e83b6 100644
--- a/core/math/big/api.odin
+++ b/core/math/big/api.odin
@@ -2,12 +2,10 @@
Copyright 2021 Jeroen van Rijn .
Made available under Odin's BSD-3 license.
- An arbitrary precision mathematics implementation in Odin.
- For the theoretical underpinnings, see Knuth's The Art of Computer Programming, Volume 2, section 4.3.
- The code started out as an idiomatic source port of libTomMath, which is in the public domain, with thanks.
-
This file collects public proc maps and their aliases.
*/
+
+
package math_big
/*
diff --git a/core/math/big/common.odin b/core/math/big/common.odin
index 31ad54b14..2b34a9163 100644
--- a/core/math/big/common.odin
+++ b/core/math/big/common.odin
@@ -1,11 +1,9 @@
/*
Copyright 2021 Jeroen van Rijn .
Made available under Odin's BSD-3 license.
-
- An arbitrary precision mathematics implementation in Odin.
- For the theoretical underpinnings, see Knuth's The Art of Computer Programming, Volume 2, section 4.3.
- The code started out as an idiomatic source port of libTomMath, which is in the public domain, with thanks.
*/
+
+
package math_big
import "core:intrinsics"
diff --git a/core/math/big/doc.odin b/core/math/big/doc.odin
new file mode 100644
index 000000000..f5e0900f5
--- /dev/null
+++ b/core/math/big/doc.odin
@@ -0,0 +1,28 @@
+/*
+A BigInt implementation in Odin.
+For the theoretical underpinnings, see Knuth's The Art of Computer Programming, Volume 2, section 4.3.
+The code started out as an idiomatic source port of libTomMath, which is in the public domain, with thanks.
+
+========================== Low-level routines ==========================
+
+IMPORTANT: `internal_*` procedures make certain assumptions about their input.
+
+The public functions that call them are expected to satisfy their sanity check requirements.
+This allows `internal_*` call `internal_*` without paying this overhead multiple times.
+
+Where errors can occur, they are of course still checked and returned as appropriate.
+
+When importing `math:core/big` to implement an involved algorithm of your own, you are welcome
+to use these procedures instead of their public counterparts.
+
+Most inputs and outputs are expected to be passed an initialized `Int`, for example.
+Exceptions include `quotient` and `remainder`, which are allowed to be `nil` when the calling code doesn't need them.
+
+Check the comments above each `internal_*` implementation to see what constraints it expects to have met.
+
+We pass the custom allocator to procedures by default using the pattern `context.allocator = allocator`.
+This way we don't have to add `, allocator` at the end of each call.
+
+TODO: Handle +/- Infinity and NaN.
+*/
+package math_big
diff --git a/core/math/big/helpers.odin b/core/math/big/helpers.odin
index 6d13d32bb..6c4b5dd01 100644
--- a/core/math/big/helpers.odin
+++ b/core/math/big/helpers.odin
@@ -1,11 +1,9 @@
/*
Copyright 2021 Jeroen van Rijn .
Made available under Odin's BSD-3 license.
-
- An arbitrary precision mathematics implementation in Odin.
- For the theoretical underpinnings, see Knuth's The Art of Computer Programming, Volume 2, section 4.3.
- The code started out as an idiomatic source port of libTomMath, which is in the public domain, with thanks.
*/
+
+
package math_big
import "core:intrinsics"
diff --git a/core/math/big/internal.odin b/core/math/big/internal.odin
index 437f6e5fc..5085898e5 100644
--- a/core/math/big/internal.odin
+++ b/core/math/big/internal.odin
@@ -2,33 +2,9 @@
/*
Copyright 2021 Jeroen van Rijn .
Made available under Odin's BSD-3 license.
-
- A BigInt implementation in Odin.
- For the theoretical underpinnings, see Knuth's The Art of Computer Programming, Volume 2, section 4.3.
- The code started out as an idiomatic source port of libTomMath, which is in the public domain, with thanks.
-
- ========================== Low-level routines ==========================
-
- IMPORTANT: `internal_*` procedures make certain assumptions about their input.
-
- The public functions that call them are expected to satisfy their sanity check requirements.
- This allows `internal_*` call `internal_*` without paying this overhead multiple times.
-
- Where errors can occur, they are of course still checked and returned as appropriate.
-
- When importing `math:core/big` to implement an involved algorithm of your own, you are welcome
- to use these procedures instead of their public counterparts.
-
- Most inputs and outputs are expected to be passed an initialized `Int`, for example.
- Exceptions include `quotient` and `remainder`, which are allowed to be `nil` when the calling code doesn't need them.
-
- Check the comments above each `internal_*` implementation to see what constraints it expects to have met.
-
- We pass the custom allocator to procedures by default using the pattern `context.allocator = allocator`.
- This way we don't have to add `, allocator` at the end of each call.
-
- TODO: Handle +/- Infinity and NaN.
*/
+
+
package math_big
import "core:mem"
diff --git a/core/math/big/logical.odin b/core/math/big/logical.odin
index e7e55cc47..b5de4cabf 100644
--- a/core/math/big/logical.odin
+++ b/core/math/big/logical.odin
@@ -8,6 +8,8 @@
This file contains logical operations like `and`, `or` and `xor`.
*/
+
+
package math_big
/*
diff --git a/core/math/big/prime.odin b/core/math/big/prime.odin
index eb0cd644c..3cce69675 100644
--- a/core/math/big/prime.odin
+++ b/core/math/big/prime.odin
@@ -8,6 +8,8 @@
This file contains prime finding operations.
*/
+
+
package math_big
import rnd "core:math/rand"
diff --git a/core/math/big/private.odin b/core/math/big/private.odin
index 9989a208a..419f2103f 100644
--- a/core/math/big/private.odin
+++ b/core/math/big/private.odin
@@ -15,6 +15,8 @@
These aren't exported for the same reasons.
*/
+
+
package math_big
import "core:intrinsics"
diff --git a/core/math/big/public.odin b/core/math/big/public.odin
index 2673a262f..3227d7bc4 100644
--- a/core/math/big/public.odin
+++ b/core/math/big/public.odin
@@ -8,6 +8,8 @@
This file contains basic arithmetic operations like `add`, `sub`, `mul`, `div`, ...
*/
+
+
package math_big
import "core:intrinsics"
diff --git a/core/math/big/radix.odin b/core/math/big/radix.odin
index 760c49d77..2b758dc35 100644
--- a/core/math/big/radix.odin
+++ b/core/math/big/radix.odin
@@ -12,6 +12,8 @@
- Use Barrett reduction for non-powers-of-two.
- Also look at extracting and splatting several digits at once.
*/
+
+
package math_big
import "core:intrinsics"
diff --git a/core/math/big/tune.odin b/core/math/big/tune.odin
index d67ff61b4..64a73b656 100644
--- a/core/math/big/tune.odin
+++ b/core/math/big/tune.odin
@@ -7,6 +7,8 @@
For the theoretical underpinnings, see Knuth's The Art of Computer Programming, Volume 2, section 4.3.
The code started out as an idiomatic source port of libTomMath, which is in the public domain, with thanks.
*/
+
+
package math_big
import "core:time"
From 8eda7567141316627973d0018bfb7e80ebdf90aa Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Mon, 17 Jan 2022 19:01:16 +0000
Subject: [PATCH 120/710] Add printing for constants, variables, types, and
procedure groups
---
tools/odin-html-docs/odin_html_docs_main.odin | 97 +++++++++++++++----
tools/odin-html-docs/style.css | 6 +-
2 files changed, 83 insertions(+), 20 deletions(-)
diff --git a/tools/odin-html-docs/odin_html_docs_main.odin b/tools/odin-html-docs/odin_html_docs_main.odin
index 3cbc0d860..d1e4f4432 100644
--- a/tools/odin-html-docs/odin_html_docs_main.odin
+++ b/tools/odin-html-docs/odin_html_docs_main.odin
@@ -114,27 +114,30 @@ main :: proc() {
entities = array(header.entities)
types = array(header.types)
- fullpaths: [dynamic]string
- defer delete(fullpaths)
+ {
+ fullpaths: [dynamic]string
+ defer delete(fullpaths)
- for pkg in pkgs[1:] {
- append(&fullpaths, str(pkg.fullpath))
- }
- path_prefix := common_prefix(fullpaths[:])
-
- pkgs_to_use = make(map[string]^doc.Pkg)
- for fullpath, i in fullpaths {
- path := strings.trim_prefix(fullpath, path_prefix)
- if strings.has_prefix(path, "core/") {
- pkgs_to_use[strings.trim_prefix(path, "core/")] = &pkgs[i+1]
+ for pkg in pkgs[1:] {
+ append(&fullpaths, str(pkg.fullpath))
+ }
+ path_prefix := common_prefix(fullpaths[:])
+
+ pkgs_to_use = make(map[string]^doc.Pkg)
+ for fullpath, i in fullpaths {
+ path := strings.trim_prefix(fullpath, path_prefix)
+ if strings.has_prefix(path, "core/") {
+ pkgs_to_use[strings.trim_prefix(path, "core/")] = &pkgs[i+1]
+ }
+ }
+ sort.map_entries_by_key(&pkgs_to_use)
+ for path, pkg in pkgs_to_use {
+ pkg_to_path[pkg] = path
}
- }
- sort.map_entries_by_key(&pkgs_to_use)
- for path, pkg in pkgs_to_use {
- pkg_to_path[pkg] = path
}
b := strings.make_builder()
+ defer strings.destroy_builder(&b)
w := strings.to_writer(&b)
{
strings.reset_builder(&b)
@@ -369,7 +372,7 @@ write_type :: proc(using writer: ^Type_Writer, type: doc.Type, flags: Write_Type
if tn_pkg != pkg {
fmt.wprintf(w, `%s.`, str(pkgs[pkg].name))
}
- fmt.wprintf(w, `{1:s} `, pkg_to_path[&pkgs[tn_pkg]], name)
+ fmt.wprintf(w, `{1:s} `, pkg_to_path[&pkgs[tn_pkg]], name)
case .Generic:
name := str(type.name)
io.write_byte(w, '$')
@@ -694,12 +697,49 @@ write_pkg :: proc(w: io.Writer, path: string, pkg: ^doc.Pkg) {
case .Invalid, .Import_Name, .Library_Name:
// ignore
case .Constant:
+ fmt.wprint(w, "")
+ the_type := types[e.type]
+ if the_type.kind == .Basic && .Untyped in (transmute(doc.Type_Flags_Basic)the_type.flags) {
+ fmt.wprintf(w, "%s :: ", name)
+ } else {
+ fmt.wprintf(w, "%s: ", name)
+ write_type(writer, the_type, {.Allow_Indent})
+ fmt.wprintf(w, " : ")
+ }
+
+ init_string := str(e.init_string)
+ assert(init_string != "")
+ io.write_string(w, init_string)
+ fmt.wprintln(w, " ")
case .Variable:
+ fmt.wprint(w, "")
+ fmt.wprintf(w, "%s: ", name)
+ write_type(writer, types[e.type], {.Allow_Indent})
+ init_string := str(e.init_string)
+ if init_string != "" {
+ io.write_string(w, " = ")
+ io.write_string(w, init_string)
+ }
+ fmt.wprintln(w, " ")
+
case .Type_Name:
fmt.wprint(w, "")
fmt.wprintf(w, "%s :: ", name)
- tn := base_type(types[e.type])
- write_type(writer, tn, {.Allow_Indent})
+ the_type := types[e.type]
+ type_to_print := the_type
+ if the_type.kind == .Named {
+ if e.pos != entities[array(the_type.entities)[0]].pos {
+ io.write_string(w, "distinct ")
+ } else {
+ bt := base_type(the_type)
+ #partial switch bt.kind {
+ case .Struct, .Union, .Proc, .Enum:
+ io.write_string(w, "distinct ")
+ type_to_print = bt
+ }
+ }
+ }
+ write_type(writer, type_to_print, {.Allow_Indent})
fmt.wprintln(w, " ")
case .Procedure:
fmt.wprint(w, "")
@@ -719,6 +759,25 @@ write_pkg :: proc(w: io.Writer, path: string, pkg: ^doc.Pkg) {
fmt.wprint(w, " {…}")
fmt.wprintln(w, " ")
case .Proc_Group:
+ fmt.wprint(w, "")
+ fmt.wprintf(w, "%s :: proc{{\n", name)
+ for entity_index in array(e.grouped_entities) {
+ this_proc := &entities[entity_index]
+ this_pkg := files[this_proc.pos.file].pkg
+ io.write_byte(w, '\t')
+ if this_pkg != pkg_index {
+ fmt.wprintf(w, "%s.", str(pkgs[this_pkg].name))
+ }
+ name := str(this_proc.name)
+ fmt.wprintf(w, ``, pkg_to_path[&pkgs[this_pkg]], name)
+ io.write_string(w, name)
+ io.write_string(w, ` `)
+ io.write_byte(w, ',')
+ io.write_byte(w, '\n')
+ }
+ fmt.wprintln(w, "}")
+ fmt.wprintln(w, " ")
+
}
write_docs(w, pkg, strings.trim_space(str(e.docs)))
diff --git a/tools/odin-html-docs/style.css b/tools/odin-html-docs/style.css
index 7c23d0bc7..1f334ad91 100644
--- a/tools/odin-html-docs/style.css
+++ b/tools/odin-html-docs/style.css
@@ -28,4 +28,8 @@ pre {
text-decoration: none;
font-weight: bold;
color: #00bfd5;
-}
\ No newline at end of file
+}
+
+pre a.code-procedure {
+ color: #079300;
+}
From 0d4642825fc0d9ad5009bb4e4cae467ee9900112 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Mon, 17 Jan 2022 19:07:25 +0000
Subject: [PATCH 121/710] Correct package docs parsing
---
src/parser.cpp | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/src/parser.cpp b/src/parser.cpp
index 5bf43cee9..7e7146244 100644
--- a/src/parser.cpp
+++ b/src/parser.cpp
@@ -5412,6 +5412,15 @@ bool parse_file(Parser *p, AstFile *f) {
if (f->package_token.kind != Token_package) {
return false;
}
+ if (docs != nullptr) {
+ TokenPos end = token_pos_end(docs->list[docs->list.count-1]);
+ if (end.line == f->package_token.pos.line || end.line+1 == f->package_token.pos.line) {
+ // Okay
+ } else {
+ docs = nullptr;
+ }
+ }
+
Token package_name = expect_token_after(f, Token_Ident, "package");
if (package_name.kind == Token_Ident) {
if (package_name.string == "_") {
From 2ca2dbcc923a005b95459b1ad4c6f5b5600fd17c Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Mon, 17 Jan 2022 19:23:24 +0000
Subject: [PATCH 122/710] Correct `distinct` printing
---
tools/odin-html-docs/odin_html_docs_main.odin | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/tools/odin-html-docs/odin_html_docs_main.odin b/tools/odin-html-docs/odin_html_docs_main.odin
index d1e4f4432..90010369d 100644
--- a/tools/odin-html-docs/odin_html_docs_main.odin
+++ b/tools/odin-html-docs/odin_html_docs_main.odin
@@ -370,7 +370,7 @@ write_type :: proc(using writer: ^Type_Writer, type: doc.Type, flags: Write_Type
fmt.wprintf(w, ``)
tn_pkg := files[e.pos.file].pkg
if tn_pkg != pkg {
- fmt.wprintf(w, `%s.`, str(pkgs[pkg].name))
+ fmt.wprintf(w, `%s.`, str(pkgs[tn_pkg].name))
}
fmt.wprintf(w, `{1:s} `, pkg_to_path[&pkgs[tn_pkg]], name)
case .Generic:
@@ -727,16 +727,16 @@ write_pkg :: proc(w: io.Writer, path: string, pkg: ^doc.Pkg) {
fmt.wprintf(w, "%s :: ", name)
the_type := types[e.type]
type_to_print := the_type
- if the_type.kind == .Named {
- if e.pos != entities[array(the_type.entities)[0]].pos {
- io.write_string(w, "distinct ")
- } else {
+ if the_type.kind == .Named && .Type_Alias not_in e.flags {
+ if e.pos == entities[array(the_type.entities)[0]].pos {
bt := base_type(the_type)
#partial switch bt.kind {
case .Struct, .Union, .Proc, .Enum:
+ // Okay
+ case:
io.write_string(w, "distinct ")
- type_to_print = bt
}
+ type_to_print = bt
}
}
write_type(writer, type_to_print, {.Allow_Indent})
From e9ae6e20e859eb68b01f3e55ca955a2e08cf446b Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Mon, 17 Jan 2022 20:50:40 +0000
Subject: [PATCH 123/710] Fix code, source code links, and add recursive make
directory
---
tools/odin-html-docs/odin_html_docs_main.odin | 179 ++++++++++++++----
tools/odin-html-docs/style.css | 32 +++-
2 files changed, 174 insertions(+), 37 deletions(-)
diff --git a/tools/odin-html-docs/odin_html_docs_main.odin b/tools/odin-html-docs/odin_html_docs_main.odin
index 90010369d..4260bd697 100644
--- a/tools/odin-html-docs/odin_html_docs_main.odin
+++ b/tools/odin-html-docs/odin_html_docs_main.odin
@@ -9,6 +9,8 @@ import "core:path/slashpath"
import "core:sort"
import "core:slice"
+GITHUB_CORE_URL :: "https://github.com/odin-lang/Odin/tree/master/core"
+
header: ^doc.Header
files: []doc.File
pkgs: []doc.Pkg
@@ -64,6 +66,18 @@ common_prefix :: proc(strs: []string) -> string {
return prefix
}
+recursive_make_directory :: proc(path: string, prefix := "") {
+ head, _, tail := strings.partition(path, "/")
+ path_to_make := head
+ if prefix != "" {
+ path_to_make = fmt.tprintf("%s/%s", prefix, head)
+ }
+ os.make_directory(path_to_make, 0)
+ if tail != "" {
+ recursive_make_directory(tail, path_to_make)
+ }
+}
+
write_html_header :: proc(w: io.Writer, title: string) {
fmt.wprintf(w, `
@@ -124,11 +138,21 @@ main :: proc() {
path_prefix := common_prefix(fullpaths[:])
pkgs_to_use = make(map[string]^doc.Pkg)
- for fullpath, i in fullpaths {
+ fullpath_loop: for fullpath, i in fullpaths {
path := strings.trim_prefix(fullpath, path_prefix)
- if strings.has_prefix(path, "core/") {
- pkgs_to_use[strings.trim_prefix(path, "core/")] = &pkgs[i+1]
+ if !strings.has_prefix(path, "core/") {
+ continue fullpath_loop
}
+ pkg := &pkgs[i+1]
+ if len(array(pkg.entities)) == 0 {
+ continue fullpath_loop
+ }
+ trimmed_path := strings.trim_prefix(path, "core/")
+ if strings.has_prefix(trimmed_path, "sys") {
+ continue fullpath_loop
+ }
+
+ pkgs_to_use[trimmed_path] = pkg
}
sort.map_entries_by_key(&pkgs_to_use)
for path, pkg in pkgs_to_use {
@@ -153,7 +177,7 @@ main :: proc() {
write_html_header(w, fmt.tprintf("package %s - pkg.odin-lang.org", path))
write_pkg(w, path, pkg)
write_html_footer(w)
- os.make_directory(fmt.tprintf("core/%s", path), 0)
+ recursive_make_directory(path, "core")
os.write_entire_file(fmt.tprintf("core/%s/index.html", path), b.buf[:])
}
}
@@ -304,20 +328,37 @@ write_type :: proc(using writer: ^Type_Writer, type: doc.Type, flags: Write_Type
if .Param_No_Alias in e.flags { io.write_string(w, "#no_alias ") }
if .Param_Any_Int in e.flags { io.write_string(w, "#any_int ") }
- if name != "" {
+ init_string := str(e.init_string)
+ switch init_string {
+ case "#caller_location":
+ assert(name != "")
io.write_string(w, name)
- io.write_string(w, ": ")
- }
- padding := max(name_width-len(name), 0)
- for _ in 0..`)
+ io.write_string(w, init_string)
+ io.write_string(w, ``)
- param_flags := flags - {.Is_Results}
- if .Param_Ellipsis in e.flags {
- param_flags += {.Variadic}
+ case:
+ if name != "" {
+ io.write_string(w, name)
+ io.write_string(w, ": ")
+ }
+ padding := max(name_width-len(name), 0)
+ for _ in 0.. ")
+ continue
}
text := strings.trim_space(line)
if text == "" {
@@ -604,8 +669,9 @@ write_docs :: proc(w: io.Writer, pkg: ^doc.Pkg, docs: string) {
if was_code {
// assert(!was_paragraph, str(pkg.name))
was_code = false
- fmt.wprintln(w, "")
- } else if was_paragraph {
+ fmt.wprintln(w, "")
+ }
+ if was_paragraph {
fmt.wprintln(w, "")
}
}
@@ -684,6 +750,21 @@ write_pkg :: proc(w: io.Writer, path: string, pkg: ^doc.Pkg) {
print_entity :: proc(w: io.Writer, e: ^doc.Entity) {
+ write_attributes :: proc(w: io.Writer, e: ^doc.Entity) {
+ for attr in array(e.attributes) {
+ io.write_string(w, "@(")
+ name := str(attr.name)
+ value := str(attr.value)
+ io.write_string(w, name)
+ if value != "" {
+ io.write_string(w, "=")
+ io.write_string(w, value)
+ }
+ io.write_string(w, ")\n")
+ }
+ }
+
+
pkg_index := files[e.pos.file].pkg
pkg := &pkgs[pkg_index]
writer := &Type_Writer{
@@ -692,7 +773,15 @@ write_pkg :: proc(w: io.Writer, path: string, pkg: ^doc.Pkg) {
}
name := str(e.name)
- fmt.wprintf(w, "{0:s} \n", name)
+ path := pkg_to_path[pkg]
+ filename := slashpath.base(str(files[e.pos.file].name))
+ fmt.wprintf(w, "\n")
+ defer if e.pos.file != 0 && e.pos.line > 0 {
+ src_url := fmt.tprintf("%s/%s/%s#L%d", GITHUB_CORE_URL, path, filename, e.pos.line)
+ fmt.wprintf(w, "Source: {0:s} ", src_url)
+ }
+
switch e.kind {
case .Invalid, .Import_Name, .Library_Name:
// ignore
@@ -713,6 +802,7 @@ write_pkg :: proc(w: io.Writer, path: string, pkg: ^doc.Pkg) {
fmt.wprintln(w, "")
case .Variable:
fmt.wprint(w, "")
+ write_attributes(w, e)
fmt.wprintf(w, "%s: ", name)
write_type(writer, types[e.type], {.Allow_Indent})
init_string := str(e.init_string)
@@ -804,12 +894,35 @@ write_pkg :: proc(w: io.Writer, path: string, pkg: ^doc.Pkg) {
fmt.wprintln(w, "Source Files ")
fmt.wprintln(w, " ")
- for file_index in array(pkg.files) {
+ any_hidden := false
+ source_file_loop: for file_index in array(pkg.files) {
file := files[file_index]
filename := slashpath.base(str(file.name))
- fmt.wprintf(w, `%s `, path, filename, filename)
+ switch {
+ case
+ strings.has_suffix(filename, "_windows.odin"),
+ strings.has_suffix(filename, "_darwin.odin"),
+ strings.has_suffix(filename, "_essence.odin"),
+ strings.has_suffix(filename, "_freebsd.odin"),
+ strings.has_suffix(filename, "_wasi.odin"),
+ strings.has_suffix(filename, "_js.odin"),
+ strings.has_suffix(filename, "_freestanding.odin"),
+
+ strings.has_suffix(filename, "_amd64.odin"),
+ strings.has_suffix(filename, "_i386.odin"),
+ strings.has_suffix(filename, "_arch64.odin"),
+ strings.has_suffix(filename, "_wasm32.odin"),
+ strings.has_suffix(filename, "_wasm64.odin"),
+ false:
+ any_hidden = true
+ continue source_file_loop
+ }
+ fmt.wprintf(w, `%s `, GITHUB_CORE_URL, path, filename, filename)
fmt.wprintln(w)
}
+ if any_hidden {
+ fmt.wprintln(w, "(hidden platform specific files) ")
+ }
fmt.wprintln(w, " ")
}
\ No newline at end of file
diff --git a/tools/odin-html-docs/style.css b/tools/odin-html-docs/style.css
index 1f334ad91..61cab3e8c 100644
--- a/tools/odin-html-docs/style.css
+++ b/tools/odin-html-docs/style.css
@@ -1,3 +1,8 @@
+* {
+ font-family:-apple-system,BlinkMacSystemFont,Segoe UI,Helvetica,Arial,sans-serif,"Apple Color Emoji","Segoe UI Emoji";
+}
+
+
.container {
max-width: 60em;
margin: 0 auto;
@@ -15,8 +20,11 @@
}
pre {
- white-space: pre;
+ white-space: pre-wrap;
+ word-break: break-all;
+ word-wrap: break-word;
tab-size: 8;
+ font-family: Consolas,Liberation Mono,Menlo,monospace!important;
background-color: #f8f8f8;
color: #202224;
border: 1px solid #c6c8ca;
@@ -24,12 +32,28 @@ pre {
padding: 0.625rem;
}
-.documentation pre a {
+pre a {
+ font-family: Consolas,Liberation Mono,Menlo,monospace!important;
text-decoration: none;
- font-weight: bold;
+ /*font-weight: bold;*/
color: #00bfd5;
}
-pre a.code-procedure {
+.documentation pre a.code-procedure {
color: #079300;
}
+
+.documentation-source {
+ text-decoration: none;
+ color: #666666;
+}
+.documentation-source:hover {
+ text-decoration: underline;
+}
+
+a > .a-hidden {
+ opacity: 0;
+}
+a:hover > .a-hidden {
+ opacity: 100;
+}
\ No newline at end of file
From cafb6e5587d4d3f5211b945bcb9d949a3980aa89 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Mon, 17 Jan 2022 21:33:20 +0000
Subject: [PATCH 124/710] Correct `//+private` for `odin doc`
---
src/checker.cpp | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/src/checker.cpp b/src/checker.cpp
index ddb73d33e..44dc90c67 100644
--- a/src/checker.cpp
+++ b/src/checker.cpp
@@ -3446,6 +3446,13 @@ void check_collect_value_decl(CheckerContext *c, Ast *decl) {
}
}
+ if (entity_visibility_kind == EntityVisiblity_Public &&
+ (c->scope->flags&ScopeFlag_File) &&
+ c->scope->file &&
+ (c->scope->file->flags & AstFile_IsPrivate)) {
+ entity_visibility_kind = EntityVisiblity_PrivateToPackage;
+ }
+
if (entity_visibility_kind != EntityVisiblity_Public && !(c->scope->flags&ScopeFlag_File)) {
error(decl, "Attribute 'private' is not allowed on a non file scope entity");
}
From c7a9c8274fc212ec421d46c2c58f36afdc949898 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Mon, 17 Jan 2022 22:16:32 +0000
Subject: [PATCH 125/710] Improve type printing
---
tools/odin-html-docs/odin_html_docs_main.odin | 243 +++++++++++++-----
tools/odin-html-docs/style.css | 32 ++-
2 files changed, 205 insertions(+), 70 deletions(-)
diff --git a/tools/odin-html-docs/odin_html_docs_main.odin b/tools/odin-html-docs/odin_html_docs_main.odin
index 4260bd697..988c54d9d 100644
--- a/tools/odin-html-docs/odin_html_docs_main.odin
+++ b/tools/odin-html-docs/odin_html_docs_main.odin
@@ -45,6 +45,14 @@ base_type :: proc(t: doc.Type) -> doc.Type {
return t
}
+is_type_untyped :: proc(type: doc.Type) -> bool {
+ if type.kind == .Basic {
+ flags := transmute(doc.Type_Flags_Basic)type.flags
+ return .Untyped in flags
+ }
+ return false
+}
+
common_prefix :: proc(strs: []string) -> string {
if len(strs) == 0 {
return ""
@@ -275,7 +283,9 @@ write_core_directory :: proc(w: io.Writer) {
line_doc, _, _ := strings.partition(str(dir.pkg.docs), "\n")
line_doc = strings.trim_space(line_doc)
if line_doc != "" {
- fmt.wprintf(w, `%s `, line_doc)
+ io.write_string(w, ``)
+ write_doc_line(w, line_doc)
+ io.write_string(w, ` `)
}
}
fmt.wprintf(w, "\n")
@@ -289,7 +299,9 @@ write_core_directory :: proc(w: io.Writer) {
line_doc, _, _ := strings.partition(str(child.pkg.docs), "\n")
line_doc = strings.trim_space(line_doc)
if line_doc != "" {
- fmt.wprintf(w, `%s `, line_doc)
+ io.write_string(w, ``)
+ write_doc_line(w, line_doc)
+ io.write_string(w, ` `)
}
fmt.wprintf(w, "\n")
@@ -305,16 +317,31 @@ is_entity_blank :: proc(e: doc.Entity_Index) -> bool {
return name == "" || name == "_"
}
+write_where_clauses :: proc(w: io.Writer, where_clauses: []doc.String) {
+ if len(where_clauses) != 0 {
+ io.write_string(w, " where ")
+ for clause, i in where_clauses {
+ if i > 0 {
+ io.write_string(w, ", ")
+ }
+ io.write_string(w, str(clause))
+ }
+ }
+}
+
+
Write_Type_Flag :: enum {
Is_Results,
Variadic,
Allow_Indent,
+ Poly_Names,
}
Write_Type_Flags :: distinct bit_set[Write_Type_Flag]
Type_Writer :: struct {
w: io.Writer,
pkg: doc.Pkg_Index,
indent: int,
+ generic_scope: map[string]bool,
}
write_type :: proc(using writer: ^Type_Writer, type: doc.Type, flags: Write_Type_Flags) {
@@ -329,30 +356,65 @@ write_type :: proc(using writer: ^Type_Writer, type: doc.Type, flags: Write_Type
if .Param_Any_Int in e.flags { io.write_string(w, "#any_int ") }
init_string := str(e.init_string)
- switch init_string {
- case "#caller_location":
+ switch {
+ case init_string == "#caller_location":
assert(name != "")
io.write_string(w, name)
io.write_string(w, " := ")
io.write_string(w, ``)
io.write_string(w, init_string)
io.write_string(w, ` `)
-
+ case strings.has_prefix(init_string, "context."):
+ io.write_string(w, name)
+ io.write_string(w, " := ")
+ io.write_string(w, ``)
+ io.write_string(w, init_string)
+ io.write_string(w, ` `)
case:
- if name != "" {
- io.write_string(w, name)
- io.write_string(w, ": ")
- }
- padding := max(name_width-len(name), 0)
- for _ in 0.. 0 {
- io.write_string(w, ", ")
- }
- write_param_entity(writer, &entities[entity_index], flags)
+ if type.polymorphic_params != 0 {
+ io.write_byte(w, '(')
+ write_type(writer, types[type.polymorphic_params], flags+{.Poly_Names})
+ io.write_byte(w, ')')
}
- io.write_byte(w, ')')
+
+ write_where_clauses(w, array(type.where_clauses))
}
do_indent :: proc(using writer: ^Type_Writer, flags: Write_Type_Flags) {
if .Allow_Indent not_in flags {
@@ -400,7 +461,7 @@ write_type :: proc(using writer: ^Type_Writer, type: doc.Type, flags: Write_Type
// ignore
case .Basic:
type_flags := transmute(doc.Type_Flags_Basic)type.flags
- if .Untyped in type_flags {
+ if is_type_untyped(type) {
io.write_string(w, str(type.name))
} else {
fmt.wprintf(w, `%s `, str(type.name))
@@ -408,17 +469,23 @@ write_type :: proc(using writer: ^Type_Writer, type: doc.Type, flags: Write_Type
case .Named:
e := entities[type_entites[0]]
name := str(type.name)
- fmt.wprintf(w, ``)
tn_pkg := files[e.pos.file].pkg
if tn_pkg != pkg {
fmt.wprintf(w, `%s.`, str(pkgs[tn_pkg].name))
}
- fmt.wprintf(w, `{1:s} `, pkg_to_path[&pkgs[tn_pkg]], name)
+ if n := strings.contains_rune(name, '('); n >= 0 {
+ fmt.wprintf(w, `{1:s} `, pkg_to_path[&pkgs[tn_pkg]], name[:n])
+ io.write_string(w, name[n:])
+ } else {
+ fmt.wprintf(w, `{1:s} `, pkg_to_path[&pkgs[tn_pkg]], name)
+ }
case .Generic:
name := str(type.name)
- io.write_byte(w, '$')
+ if name not_in generic_scope {
+ io.write_byte(w, '$')
+ }
io.write_string(w, name)
- if len(array(type.types)) == 1 {
+ if name not_in generic_scope && len(array(type.types)) == 1 {
io.write_byte(w, '/')
write_type(writer, types[type_types[0]], flags)
}
@@ -454,9 +521,7 @@ write_type :: proc(using writer: ^Type_Writer, type: doc.Type, flags: Write_Type
case .Struct:
type_flags := transmute(doc.Type_Flags_Struct)type.flags
io.write_string(w, "struct")
- if .Polymorphic in type_flags {
- write_poly_params(writer, type, flags)
- }
+ write_poly_params(writer, type, flags)
if .Packed in type_flags { io.write_string(w, " #packed") }
if .Raw_Union in type_flags { io.write_string(w, " #raw_union") }
if custom_align := str(type.custom_align); custom_align != "" {
@@ -483,9 +548,7 @@ write_type :: proc(using writer: ^Type_Writer, type: doc.Type, flags: Write_Type
case .Union:
type_flags := transmute(doc.Type_Flags_Union)type.flags
io.write_string(w, "union")
- if .Polymorphic in type_flags {
- write_poly_params(writer, type, flags)
- }
+ write_poly_params(writer, type, flags)
if .No_Nil in type_flags { io.write_string(w, " #no_nil") }
if .Maybe in type_flags { io.write_string(w, " #maybe") }
if custom_align := str(type.custom_align); custom_align != "" {
@@ -631,6 +694,25 @@ write_type :: proc(using writer: ^Type_Writer, type: doc.Type, flags: Write_Type
}
}
+write_doc_line :: proc(w: io.Writer, text: string) {
+ text := text
+ for len(text) != 0 {
+ if strings.count(text, "`") >= 2 {
+ n := strings.index_byte(text, '`')
+ io.write_string(w, text[:n])
+ io.write_string(w, "")
+ remaining := text[n+1:]
+ m := strings.index_byte(remaining, '`')
+ io.write_string(w, remaining[:m])
+ io.write_string(w, "")
+ text = remaining[m+1:]
+ } else {
+ io.write_string(w, text)
+ return
+ }
+ }
+}
+
write_docs :: proc(w: io.Writer, pkg: ^doc.Pkg, docs: string) {
if docs == "" {
return
@@ -663,8 +745,11 @@ write_docs :: proc(w: io.Writer, pkg: ^doc.Pkg, docs: string) {
fmt.wprintln(w, "")
}
assert(!was_code)
+
was_paragraph = true
- fmt.wprintln(w, text)
+ write_doc_line(w, text)
+
+ io.write_byte(w, '\n')
}
if was_code {
// assert(!was_paragraph, str(pkg.name))
@@ -677,6 +762,24 @@ write_docs :: proc(w: io.Writer, pkg: ^doc.Pkg, docs: string) {
}
write_pkg :: proc(w: io.Writer, path: string, pkg: ^doc.Pkg) {
+ write_breadcrumbs :: proc(w: io.Writer, path: string) {
+ dirs := strings.split(path, "/")
+ io.write_string(w, "
\n")
+ for dir, i in dirs {
+ url := strings.join(dirs[:i+1], "/")
+ short_path := strings.join(dirs[1:i+1], "/")
+ if i == 0 || short_path in pkgs_to_use {
+ fmt.wprintf(w, "%s ", url, dir)
+ } else {
+ fmt.wprintf(w, "%s ", dir)
+ }
+ }
+ io.write_string(w, " \n")
+
+ }
+ write_breadcrumbs(w, fmt.tprintf("core/%s", path))
+
+
fmt.wprintf(w, "package core:%s \n", path)
fmt.wprintln(w, "Documentation ")
docs := strings.trim_space(str(pkg.docs))
@@ -723,7 +826,7 @@ write_pkg :: proc(w: io.Writer, path: string, pkg: ^doc.Pkg) {
slice.sort_by_key(pkg_vars[:], entity_key)
slice.sort_by_key(pkg_consts[:], entity_key)
- print_index :: proc(w: io.Writer, name: string, entities: []^doc.Entity) {
+ write_index :: proc(w: io.Writer, name: string, entities: []^doc.Entity) {
fmt.wprintf(w, "%s \n", name)
fmt.wprintln(w, ``)
if len(entities) == 0 {
@@ -740,16 +843,16 @@ write_pkg :: proc(w: io.Writer, path: string, pkg: ^doc.Pkg) {
}
- print_index(w, "Procedures", pkg_procs[:])
- print_index(w, "Procedure Groups", pkg_proc_groups[:])
- print_index(w, "Types", pkg_types[:])
- print_index(w, "Variables", pkg_vars[:])
- print_index(w, "Constants", pkg_consts[:])
+ write_index(w, "Procedures", pkg_procs[:])
+ write_index(w, "Procedure Groups", pkg_proc_groups[:])
+ write_index(w, "Types", pkg_types[:])
+ write_index(w, "Variables", pkg_vars[:])
+ write_index(w, "Constants", pkg_consts[:])
fmt.wprintln(w, " ")
- print_entity :: proc(w: io.Writer, e: ^doc.Entity) {
+ write_entity :: proc(w: io.Writer, e: ^doc.Entity) {
write_attributes :: proc(w: io.Writer, e: ^doc.Entity) {
for attr in array(e.attributes) {
io.write_string(w, "@(")
@@ -764,23 +867,24 @@ write_pkg :: proc(w: io.Writer, path: string, pkg: ^doc.Pkg) {
}
}
-
pkg_index := files[e.pos.file].pkg
pkg := &pkgs[pkg_index]
writer := &Type_Writer{
w = w,
pkg = pkg_index,
}
+ defer delete(writer.generic_scope)
name := str(e.name)
path := pkg_to_path[pkg]
filename := slashpath.base(str(files[e.pos.file].name))
- fmt.wprintf(w, "\n")
- defer if e.pos.file != 0 && e.pos.line > 0 {
+ fmt.wprintf(w, "{0:s}", name)
+ fmt.wprintf(w, " ¶ ")
+ if e.pos.file != 0 && e.pos.line > 0 {
src_url := fmt.tprintf("%s/%s/%s#L%d", GITHUB_CORE_URL, path, filename, e.pos.line)
- fmt.wprintf(w, "Source: {0:s} ", src_url)
+ fmt.wprintf(w, "", src_url)
}
+ fmt.wprintf(w, " \n")
switch e.kind {
case .Invalid, .Import_Name, .Library_Name:
@@ -788,7 +892,21 @@ write_pkg :: proc(w: io.Writer, path: string, pkg: ^doc.Pkg) {
case .Constant:
fmt.wprint(w, "")
the_type := types[e.type]
- if the_type.kind == .Basic && .Untyped in (transmute(doc.Type_Flags_Basic)the_type.flags) {
+
+ init_string := str(e.init_string)
+ assert(init_string != "")
+
+ ignore_type := true
+ if the_type.kind == .Basic && is_type_untyped(the_type) {
+ } else {
+ ignore_type = false
+ type_name := str(the_type.name)
+ if type_name != "" && strings.has_prefix(init_string, type_name) {
+ ignore_type = true
+ }
+ }
+
+ if ignore_type {
fmt.wprintf(w, "%s :: ", name)
} else {
fmt.wprintf(w, "%s: ", name)
@@ -796,8 +914,7 @@ write_pkg :: proc(w: io.Writer, path: string, pkg: ^doc.Pkg) {
fmt.wprintf(w, " : ")
}
- init_string := str(e.init_string)
- assert(init_string != "")
+
io.write_string(w, init_string)
fmt.wprintln(w, " ")
case .Variable:
@@ -835,17 +952,7 @@ write_pkg :: proc(w: io.Writer, path: string, pkg: ^doc.Pkg) {
fmt.wprint(w, "")
fmt.wprintf(w, "%s :: ", name)
write_type(writer, types[e.type], nil)
- where_clauses := array(e.where_clauses)
- if len(where_clauses) != 0 {
- io.write_string(w, " where ")
- for clause, i in where_clauses {
- if i > 0 {
- io.write_string(w, ", ")
- }
- io.write_string(w, str(clause))
- }
- }
-
+ write_where_clauses(w, array(e.where_clauses))
fmt.wprint(w, " {…}")
fmt.wprintln(w, " ")
case .Proc_Group:
@@ -872,24 +979,24 @@ write_pkg :: proc(w: io.Writer, path: string, pkg: ^doc.Pkg) {
write_docs(w, pkg, strings.trim_space(str(e.docs)))
}
- print_entities :: proc(w: io.Writer, title: string, entities: []^doc.Entity) {
+ write_entities :: proc(w: io.Writer, title: string, entities: []^doc.Entity) {
fmt.wprintf(w, "%s \n", title)
fmt.wprintln(w, ``)
if len(entities) == 0 {
io.write_string(w, "This section is empty.
\n")
} else {
for e in entities {
- print_entity(w, e)
+ write_entity(w, e)
}
}
fmt.wprintln(w, " ")
}
- print_entities(w, "Procedures", pkg_procs[:])
- print_entities(w, "Procedure Groups", pkg_proc_groups[:])
- print_entities(w, "Types", pkg_types[:])
- print_entities(w, "Variables", pkg_vars[:])
- print_entities(w, "Constants", pkg_consts[:])
+ write_entities(w, "Procedures", pkg_procs[:])
+ write_entities(w, "Procedure Groups", pkg_proc_groups[:])
+ write_entities(w, "Types", pkg_types[:])
+ write_entities(w, "Variables", pkg_vars[:])
+ write_entities(w, "Constants", pkg_consts[:])
fmt.wprintln(w, "Source Files ")
diff --git a/tools/odin-html-docs/style.css b/tools/odin-html-docs/style.css
index 61cab3e8c..cf43a7199 100644
--- a/tools/odin-html-docs/style.css
+++ b/tools/odin-html-docs/style.css
@@ -21,7 +21,7 @@
pre {
white-space: pre-wrap;
- word-break: break-all;
+ word-break: keep-all;
word-wrap: break-word;
tab-size: 8;
font-family: Consolas,Liberation Mono,Menlo,monospace!important;
@@ -44,10 +44,15 @@ pre a {
}
.documentation-source {
+ display: inline;
+ float: right;
+}
+
+.documentation-source a {
text-decoration: none;
color: #666666;
}
-.documentation-source:hover {
+.documentation-source a:hover {
text-decoration: underline;
}
@@ -56,4 +61,27 @@ a > .a-hidden {
}
a:hover > .a-hidden {
opacity: 100;
+}
+
+ul.documentation-breadcrumb {
+ list-style: none;
+}
+
+ul.documentation-breadcrumb li {
+ display: inline;
+}
+
+ul.documentation-breadcrumb li+li:before {
+ padding: 0.2rem;
+ color: black;
+ content: "/\00a0";
+}
+
+.code-inline {
+ font-family: Consolas,Liberation Mono,Menlo,monospace!important;
+ background-color: #f8f8f8;
+ color: #202224;
+ border: 1px solid #c6c8ca;
+ border-radius: 0.25rem;
+ padding: 0.125rem;
}
\ No newline at end of file
From fb01dfe04845a489760956cea4f0019e1464b2e3 Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Mon, 17 Jan 2022 22:17:07 +0000
Subject: [PATCH 126/710] Improve docs_writer.cpp
---
core/math/big/doc.odin | 22 ----------------------
core/math/big/internal.odin | 24 +++++++++++++++++++++++-
core/math/big/tune.odin | 3 +--
src/docs_writer.cpp | 2 +-
src/types.cpp | 14 +++++---------
5 files changed, 30 insertions(+), 35 deletions(-)
diff --git a/core/math/big/doc.odin b/core/math/big/doc.odin
index f5e0900f5..0f9b88d01 100644
--- a/core/math/big/doc.odin
+++ b/core/math/big/doc.odin
@@ -2,27 +2,5 @@
A BigInt implementation in Odin.
For the theoretical underpinnings, see Knuth's The Art of Computer Programming, Volume 2, section 4.3.
The code started out as an idiomatic source port of libTomMath, which is in the public domain, with thanks.
-
-========================== Low-level routines ==========================
-
-IMPORTANT: `internal_*` procedures make certain assumptions about their input.
-
-The public functions that call them are expected to satisfy their sanity check requirements.
-This allows `internal_*` call `internal_*` without paying this overhead multiple times.
-
-Where errors can occur, they are of course still checked and returned as appropriate.
-
-When importing `math:core/big` to implement an involved algorithm of your own, you are welcome
-to use these procedures instead of their public counterparts.
-
-Most inputs and outputs are expected to be passed an initialized `Int`, for example.
-Exceptions include `quotient` and `remainder`, which are allowed to be `nil` when the calling code doesn't need them.
-
-Check the comments above each `internal_*` implementation to see what constraints it expects to have met.
-
-We pass the custom allocator to procedures by default using the pattern `context.allocator = allocator`.
-This way we don't have to add `, allocator` at the end of each call.
-
-TODO: Handle +/- Infinity and NaN.
*/
package math_big
diff --git a/core/math/big/internal.odin b/core/math/big/internal.odin
index 5085898e5..dbcd16509 100644
--- a/core/math/big/internal.odin
+++ b/core/math/big/internal.odin
@@ -1,10 +1,32 @@
-//+ignore
/*
Copyright 2021 Jeroen van Rijn .
Made available under Odin's BSD-3 license.
+
+ ========================== Low-level routines ==========================
+
+ IMPORTANT: `internal_*` procedures make certain assumptions about their input.
+
+ The public functions that call them are expected to satisfy their sanity check requirements.
+ This allows `internal_*` call `internal_*` without paying this overhead multiple times.
+
+ Where errors can occur, they are of course still checked and returned as appropriate.
+
+ When importing `math:core/big` to implement an involved algorithm of your own, you are welcome
+ to use these procedures instead of their public counterparts.
+
+ Most inputs and outputs are expected to be passed an initialized `Int`, for example.
+ Exceptions include `quotient` and `remainder`, which are allowed to be `nil` when the calling code doesn't need them.
+
+ Check the comments above each `internal_*` implementation to see what constraints it expects to have met.
+
+ We pass the custom allocator to procedures by default using the pattern `context.allocator = allocator`.
+ This way we don't have to add `, allocator` at the end of each call.
+
+ TODO: Handle +/- Infinity and NaN.
*/
+//+ignore
package math_big
import "core:mem"
diff --git a/core/math/big/tune.odin b/core/math/big/tune.odin
index 64a73b656..78a20c12b 100644
--- a/core/math/big/tune.odin
+++ b/core/math/big/tune.odin
@@ -1,4 +1,3 @@
-//+ignore
/*
Copyright 2021 Jeroen van Rijn .
Made available under Odin's BSD-3 license.
@@ -8,7 +7,7 @@
The code started out as an idiomatic source port of libTomMath, which is in the public domain, with thanks.
*/
-
+//+ignore
package math_big
import "core:time"
diff --git a/src/docs_writer.cpp b/src/docs_writer.cpp
index 94b43be99..762a2afe1 100644
--- a/src/docs_writer.cpp
+++ b/src/docs_writer.cpp
@@ -513,7 +513,7 @@ OdinDocTypeIndex odin_doc_type(OdinDocWriter *w, Type *type) {
break;
case Type_Generic:
doc_type.kind = OdinDocType_Generic;
- doc_type.name = odin_doc_write_string(w, type->Generic.name);
+ doc_type.name = odin_doc_write_string(w, type->Generic.entity->token.string);
if (type->Generic.specialized) {
doc_type.types = odin_doc_type_as_slice(w, type->Generic.specialized);
}
diff --git a/src/types.cpp b/src/types.cpp
index f621d4346..6162a5aa8 100644
--- a/src/types.cpp
+++ b/src/types.cpp
@@ -3933,7 +3933,7 @@ gbString write_type_to_string(gbString str, Type *type) {
str = gb_string_appendc(str, " = ");
str = write_exact_value_to_string(str, var->Constant.value);
} else {
- str = gb_string_appendc(str, "=");
+ str = gb_string_appendc(str, " := ");
str = write_exact_value_to_string(str, var->Constant.value);
}
continue;
@@ -3961,14 +3961,10 @@ gbString write_type_to_string(gbString str, Type *type) {
str = gb_string_appendc(str, "typeid/");
str = write_type_to_string(str, var->type);
} else {
- if (var->kind == Entity_TypeName) {
- str = gb_string_appendc(str, "$");
- str = gb_string_append_length(str, name.text, name.len);
- str = gb_string_appendc(str, "=");
- str = write_type_to_string(str, var->type);
- } else {
- str = gb_string_appendc(str, "typeid");
- }
+ str = gb_string_appendc(str, "$");
+ str = gb_string_append_length(str, name.text, name.len);
+ str = gb_string_appendc(str, "=");
+ str = write_type_to_string(str, var->type);
}
}
}
From 6b830f42b6a8baec77ee0c8d12333ca2ad4a296f Mon Sep 17 00:00:00 2001
From: gingerBill
Date: Mon, 17 Jan 2022 23:48:46 +0000
Subject: [PATCH 127/710] Improve stylization with collapsible directories; Fix
name padding
---
tools/odin-html-docs/odin_html_docs_main.odin | 75 ++++++++++++++++---
tools/odin-html-docs/style.css | 63 ++++++++++++++--
2 files changed, 121 insertions(+), 17 deletions(-)
diff --git a/tools/odin-html-docs/odin_html_docs_main.odin b/tools/odin-html-docs/odin_html_docs_main.odin
index 988c54d9d..317d95a5a 100644
--- a/tools/odin-html-docs/odin_html_docs_main.odin
+++ b/tools/odin-html-docs/odin_html_docs_main.odin
@@ -107,6 +107,51 @@ write_html_header :: proc(w: io.Writer, title: string) {
}
write_html_footer :: proc(w: io.Writer) {
+ io.write_string(w, `
+
+`)
fmt.wprintf(w, "