mirror of
https://github.com/odin-lang/Odin.git
synced 2026-01-05 20:48:04 +00:00
Merge branch 'master' into docs-simd
This commit is contained in:
37
.github/workflows/ci.yml
vendored
37
.github/workflows/ci.yml
vendored
@@ -32,6 +32,8 @@ jobs:
|
||||
gmake -C vendor/miniaudio/src
|
||||
./odin check examples/all -vet -strict-style -disallow-do -target:netbsd_amd64
|
||||
./odin check examples/all -vet -strict-style -disallow-do -target:netbsd_arm64
|
||||
./odin check vendor/sdl3 -vet -strict-style -disallow-do -target:netbsd_amd64 -no-entry-point
|
||||
./odin check vendor/sdl3 -vet -strict-style -disallow-do -target:netbsd_arm64 -no-entry-point
|
||||
./odin test tests/core/normal.odin -file -all-packages -vet -strict-style -disallow-do -define:ODIN_TEST_FANCY=false -define:ODIN_TEST_FAIL_ON_BAD_MEMORY=true
|
||||
./odin test tests/core/speed.odin -file -all-packages -vet -strict-style -disallow-do -o:speed -define:ODIN_TEST_FANCY=false -define:ODIN_TEST_FAIL_ON_BAD_MEMORY=true
|
||||
./odin test tests/vendor -all-packages -vet -strict-style -disallow-do -define:ODIN_TEST_FANCY=false -define:ODIN_TEST_FAIL_ON_BAD_MEMORY=true
|
||||
@@ -62,6 +64,7 @@ jobs:
|
||||
gmake -C vendor/cgltf/src
|
||||
gmake -C vendor/miniaudio/src
|
||||
./odin check examples/all -vet -strict-style -disallow-do -target:freebsd_amd64
|
||||
./odin check vendor/sdl3 -vet -strict-style -disallow-do -target:freebsd_amd64 -no-entry-point
|
||||
./odin test tests/core/normal.odin -file -all-packages -vet -strict-style -disallow-do -define:ODIN_TEST_FANCY=false -define:ODIN_TEST_FAIL_ON_BAD_MEMORY=true
|
||||
./odin test tests/core/speed.odin -file -all-packages -vet -strict-style -disallow-do -o:speed -define:ODIN_TEST_FANCY=false -define:ODIN_TEST_FAIL_ON_BAD_MEMORY=true
|
||||
./odin test tests/vendor -all-packages -vet -strict-style -disallow-do -define:ODIN_TEST_FANCY=false -define:ODIN_TEST_FAIL_ON_BAD_MEMORY=true
|
||||
@@ -117,7 +120,9 @@ jobs:
|
||||
- name: Odin run -debug
|
||||
run: ./odin run examples/demo -debug
|
||||
- name: Odin check examples/all
|
||||
run: ./odin check examples/all -strict-style
|
||||
run: ./odin check examples/all -strict-style -vet -disallow-do
|
||||
- name: Odin check vendor/sdl3
|
||||
run: ./odin check vendor/sdl3 -strict-style -vet -disallow-do -no-entry-point
|
||||
- name: Normal Core library tests
|
||||
run: ./odin test tests/core/normal.odin -file -all-packages -vet -strict-style -disallow-do -define:ODIN_TEST_FANCY=false -define:ODIN_TEST_FAIL_ON_BAD_MEMORY=true
|
||||
- name: Optimized Core library tests
|
||||
@@ -146,6 +151,20 @@ jobs:
|
||||
run: ./odin check examples/all -vet -strict-style -disallow-do -target:openbsd_amd64
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
|
||||
- name: Odin check vendor/sdl3 for Linux i386
|
||||
run: ./odin check vendor/sdl3 -vet -strict-style -disallow-do -no-entry-point -target:linux_i386
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
- name: Odin check vendor/sdl3 for Linux arm64
|
||||
run: ./odin check vendor/sdl3 -vet -strict-style -disallow-do -no-entry-point -target:linux_arm64
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
- name: Odin check vendor/sdl3 for FreeBSD amd64
|
||||
run: ./odin check vendor/sdl3 -vet -strict-style -disallow-do -no-entry-point -target:freebsd_amd64
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
- name: Odin check vendor/sdl3 for OpenBSD amd64
|
||||
run: ./odin check vendor/sdl3 -vet -strict-style -disallow-do -no-entry-point -target:openbsd_amd64
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
|
||||
|
||||
- name: Run demo on WASI WASM32
|
||||
run: |
|
||||
./odin build examples/demo -target:wasi_wasm32 -vet -strict-style -disallow-do -out:demo.wasm
|
||||
@@ -187,6 +206,11 @@ jobs:
|
||||
run: |
|
||||
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat
|
||||
odin check examples/all -vet -strict-style -disallow-do
|
||||
- name: Odin check vendor/sdl3
|
||||
shell: cmd
|
||||
run: |
|
||||
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat
|
||||
odin check vendor/sdl3 -vet -strict-style -disallow-do -no-entry-point
|
||||
- name: Core library tests
|
||||
shell: cmd
|
||||
run: |
|
||||
@@ -208,6 +232,12 @@ jobs:
|
||||
run: |
|
||||
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat
|
||||
odin test tests/internal -all-packages -vet -strict-style -disallow-do -define:ODIN_TEST_FANCY=false -define:ODIN_TEST_FAIL_ON_BAD_MEMORY=true
|
||||
- name: Check issues
|
||||
shell: cmd
|
||||
run: |
|
||||
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat
|
||||
cd tests/issues
|
||||
call run.bat
|
||||
- name: Check benchmarks
|
||||
shell: cmd
|
||||
run: |
|
||||
@@ -260,9 +290,12 @@ jobs:
|
||||
make -C vendor/cgltf/src
|
||||
make -C vendor/miniaudio/src
|
||||
|
||||
- name: Odin check
|
||||
- name: Odin check examples/all
|
||||
run: ./odin check examples/all -target:linux_riscv64 -vet -strict-style -disallow-do
|
||||
|
||||
- name: Odin check vendor/sdl3
|
||||
run: ./odin check vendor/sdl3 -target:linux_riscv64 -vet -strict-style -disallow-do -no-entry-point
|
||||
|
||||
- name: Install riscv64 toolchain and qemu
|
||||
run: sudo apt-get install -y qemu-user qemu-user-static gcc-12-riscv64-linux-gnu libc6-riscv64-cross
|
||||
|
||||
|
||||
@@ -76,6 +76,10 @@ Answers to common questions about Odin.
|
||||
|
||||
Documentation for all the official packages part of the [core](https://pkg.odin-lang.org/core/) and [vendor](https://pkg.odin-lang.org/vendor/) library collections.
|
||||
|
||||
#### [Examples](https://github.com/odin-lang/examples)
|
||||
|
||||
Examples on how to write idiomatic Odin code. Shows how to accomplish specific tasks in Odin, as well as how to use packages from `core` and `vendor`.
|
||||
|
||||
#### [Odin Documentation](https://odin-lang.org/docs/)
|
||||
|
||||
Documentation for the Odin language itself.
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
// This is purely for documentation
|
||||
package builtin
|
||||
|
||||
import "base:runtime"
|
||||
|
||||
nil :: nil
|
||||
false :: 0!=0
|
||||
true :: 0==0
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
#+build ignore
|
||||
package intrinsics
|
||||
|
||||
import "base:runtime"
|
||||
|
||||
// Package-Related
|
||||
is_package_imported :: proc(package_name: string) -> bool ---
|
||||
|
||||
@@ -72,7 +74,7 @@ prefetch_write_instruction :: proc(address: rawptr, #const locality: i32 /* 0..=
|
||||
prefetch_write_data :: proc(address: rawptr, #const locality: i32 /* 0..=3 */) ---
|
||||
|
||||
// Compiler Hints
|
||||
expect :: proc(val, expected_val: T) -> T ---
|
||||
expect :: proc(val, expected_val: $T) -> T ---
|
||||
|
||||
// Linux and Darwin Only
|
||||
syscall :: proc(id: uintptr, args: ..uintptr) -> uintptr ---
|
||||
@@ -219,7 +221,7 @@ type_map_cell_info :: proc($T: typeid) -> ^runtime.Map_Cell_Info ---
|
||||
type_convert_variants_to_pointers :: proc($T: typeid) -> typeid where type_is_union(T) ---
|
||||
type_merge :: proc($U, $V: typeid) -> typeid where type_is_union(U), type_is_union(V) ---
|
||||
|
||||
type_has_shared_fields :: proc($U, $V: typeid) -> bool typeid where type_is_struct(U), type_is_struct(V) ---
|
||||
type_has_shared_fields :: proc($U, $V: typeid) -> bool where type_is_struct(U), type_is_struct(V) ---
|
||||
|
||||
constant_utf16_cstring :: proc($literal: string) -> [^]u16 ---
|
||||
|
||||
@@ -283,6 +285,9 @@ simd_reduce_xor :: proc(a: #simd[N]T) -> T where type_is_integer(T) || t
|
||||
simd_reduce_any :: proc(a: #simd[N]T) -> T where type_is_boolean(T) ---
|
||||
simd_reduce_all :: proc(a: #simd[N]T) -> T where type_is_boolean(T) ---
|
||||
|
||||
simd_extract_lsbs :: proc(a: #simd[N]T) -> bit_set[0..<N] where type_is_integer(T) || type_is_boolean(T) ---
|
||||
simd_extract_msbs :: proc(a: #simd[N]T) -> bit_set[0..<N] where type_is_integer(T) || type_is_boolean(T) ---
|
||||
|
||||
|
||||
simd_gather :: proc(ptr: #simd[N]rawptr, val: #simd[N]T, mask: #simd[N]U) -> #simd[N]T where type_is_integer(U) || type_is_boolean(U) ---
|
||||
simd_scatter :: proc(ptr: #simd[N]rawptr, val: #simd[N]T, mask: #simd[N]U) where type_is_integer(U) || type_is_boolean(U) ---
|
||||
|
||||
@@ -239,47 +239,6 @@ Type_Info :: struct {
|
||||
},
|
||||
}
|
||||
|
||||
// NOTE(bill): This must match the compiler's
|
||||
Typeid_Kind :: enum u8 {
|
||||
Invalid,
|
||||
Integer,
|
||||
Rune,
|
||||
Float,
|
||||
Complex,
|
||||
Quaternion,
|
||||
String,
|
||||
Boolean,
|
||||
Any,
|
||||
Type_Id,
|
||||
Pointer,
|
||||
Multi_Pointer,
|
||||
Procedure,
|
||||
Array,
|
||||
Enumerated_Array,
|
||||
Dynamic_Array,
|
||||
Slice,
|
||||
Tuple,
|
||||
Struct,
|
||||
Union,
|
||||
Enum,
|
||||
Map,
|
||||
Bit_Set,
|
||||
Simd_Vector,
|
||||
Matrix,
|
||||
Soa_Pointer,
|
||||
Bit_Field,
|
||||
}
|
||||
#assert(len(Typeid_Kind) < 32)
|
||||
|
||||
Typeid_Bit_Field :: bit_field uintptr {
|
||||
index: uintptr | 8*size_of(uintptr) - 8,
|
||||
kind: Typeid_Kind | 5, // Typeid_Kind
|
||||
named: bool | 1,
|
||||
special: bool | 1, // signed, cstring, etc
|
||||
reserved: bool | 1,
|
||||
}
|
||||
#assert(size_of(Typeid_Bit_Field) == size_of(uintptr))
|
||||
|
||||
// NOTE(bill): only the ones that are needed (not all types)
|
||||
// This will be set by the compiler
|
||||
type_table: []^Type_Info
|
||||
@@ -483,10 +442,12 @@ Raw_Any :: struct {
|
||||
data: rawptr,
|
||||
id: typeid,
|
||||
}
|
||||
#assert(size_of(Raw_Any) == size_of(any))
|
||||
|
||||
Raw_Cstring :: struct {
|
||||
data: [^]byte,
|
||||
}
|
||||
#assert(size_of(Raw_Cstring) == size_of(cstring))
|
||||
|
||||
Raw_Soa_Pointer :: struct {
|
||||
data: rawptr,
|
||||
@@ -686,13 +647,16 @@ type_info_core :: proc "contextless" (info: ^Type_Info) -> ^Type_Info {
|
||||
type_info_base_without_enum :: type_info_core
|
||||
|
||||
__type_info_of :: proc "contextless" (id: typeid) -> ^Type_Info #no_bounds_check {
|
||||
MASK :: 1<<(8*size_of(typeid) - 8) - 1
|
||||
data := transmute(uintptr)id
|
||||
n := int(data & MASK)
|
||||
if n < 0 || n >= len(type_table) {
|
||||
n = 0
|
||||
n := u64(len(type_table))
|
||||
i := transmute(u64)id % n
|
||||
for _ in 0..<n {
|
||||
ptr := type_table[i]
|
||||
if ptr != nil && ptr.id == id {
|
||||
return ptr
|
||||
}
|
||||
i = i+1 if i+1 < n else 0
|
||||
}
|
||||
return type_table[n]
|
||||
return type_table[0]
|
||||
}
|
||||
|
||||
when !ODIN_NO_RTTI {
|
||||
|
||||
@@ -826,10 +826,12 @@ _resize_dynamic_array :: #force_inline proc(a: ^Raw_Dynamic_Array, size_of_elem,
|
||||
return nil
|
||||
}
|
||||
|
||||
if should_zero && a.len < length {
|
||||
num_reused := min(a.cap, length) - a.len
|
||||
intrinsics.mem_zero(([^]byte)(a.data)[a.len*size_of_elem:], num_reused*size_of_elem)
|
||||
}
|
||||
|
||||
if length <= a.cap {
|
||||
if should_zero && a.len < length {
|
||||
intrinsics.mem_zero(([^]byte)(a.data)[a.len*size_of_elem:], (length-a.len)*size_of_elem)
|
||||
}
|
||||
a.len = max(length, 0)
|
||||
return nil
|
||||
}
|
||||
@@ -936,6 +938,32 @@ map_upsert :: proc(m: ^$T/map[$K]$V, key: K, value: V, loc := #caller_location)
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
Retrieves a pointer to the key and value for a possibly just inserted entry into the map.
|
||||
|
||||
If the `key` was not in the map `m`, an entry is inserted with the zero value and `just_inserted` will be `true`.
|
||||
Otherwise the existing entry is left untouched and pointers to its key and value are returned.
|
||||
|
||||
If the map has to grow in order to insert the entry and the allocation fails, `err` is set and returned.
|
||||
|
||||
If `err` is `nil`, `key_ptr` and `value_ptr` are valid pointers and will not be `nil`.
|
||||
|
||||
WARN: User modification of the key pointed at by `key_ptr` should only be done if the new key is equal to (in hash) the old key.
|
||||
If that is not the case you will corrupt the map.
|
||||
*/
|
||||
@(builtin, require_results)
|
||||
map_entry :: proc(m: ^$T/map[$K]$V, key: K, loc := #caller_location) -> (key_ptr: ^K, value_ptr: ^V, just_inserted: bool, err: Allocator_Error) {
|
||||
key := key
|
||||
zero: V
|
||||
|
||||
_key_ptr, _value_ptr: rawptr
|
||||
_key_ptr, _value_ptr, just_inserted, err = __dynamic_map_entry((^Raw_Map)(m), map_info(T), &key, &zero, loc)
|
||||
|
||||
key_ptr = (^K)(_key_ptr)
|
||||
value_ptr = (^V)(_value_ptr)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@builtin
|
||||
card :: proc "contextless" (s: $S/bit_set[$E; $U]) -> int {
|
||||
@@ -964,6 +992,24 @@ assert :: proc(condition: bool, message := #caller_expression(condition), loc :=
|
||||
}
|
||||
}
|
||||
|
||||
// Evaluates the condition and aborts the program iff the condition is
|
||||
// false. This routine ignores `ODIN_DISABLE_ASSERT`, and will always
|
||||
// execute.
|
||||
@builtin
|
||||
ensure :: proc(condition: bool, message := #caller_expression(condition), loc := #caller_location) {
|
||||
if !condition {
|
||||
@(cold)
|
||||
internal :: proc(message: string, loc: Source_Code_Location) {
|
||||
p := context.assertion_failure_proc
|
||||
if p == nil {
|
||||
p = default_assertion_failure_proc
|
||||
}
|
||||
p("unsatisfied ensure", message, loc)
|
||||
}
|
||||
internal(message, loc)
|
||||
}
|
||||
}
|
||||
|
||||
@builtin
|
||||
panic :: proc(message: string, loc := #caller_location) -> ! {
|
||||
p := context.assertion_failure_proc
|
||||
@@ -999,6 +1045,17 @@ assert_contextless :: proc "contextless" (condition: bool, message := #caller_ex
|
||||
}
|
||||
}
|
||||
|
||||
@builtin
|
||||
ensure_contextless :: proc "contextless" (condition: bool, message := #caller_expression(condition), loc := #caller_location) {
|
||||
if !condition {
|
||||
@(cold)
|
||||
internal :: proc "contextless" (message: string, loc: Source_Code_Location) {
|
||||
default_assertion_contextless_failure_proc("unsatisfied ensure", message, loc)
|
||||
}
|
||||
internal(message, loc)
|
||||
}
|
||||
}
|
||||
|
||||
@builtin
|
||||
panic_contextless :: proc "contextless" (message: string, loc := #caller_location) -> ! {
|
||||
default_assertion_contextless_failure_proc("panic", message, loc)
|
||||
|
||||
@@ -210,10 +210,24 @@ arena_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
|
||||
case size == 0:
|
||||
err = .Mode_Not_Implemented
|
||||
return
|
||||
case (uintptr(old_data) & uintptr(alignment-1) == 0) && size < old_size:
|
||||
// shrink data in-place
|
||||
data = old_data[:size]
|
||||
return
|
||||
case uintptr(old_data) & uintptr(alignment-1) == 0:
|
||||
if size < old_size {
|
||||
// shrink data in-place
|
||||
data = old_data[:size]
|
||||
return
|
||||
}
|
||||
|
||||
if block := arena.curr_block; block != nil {
|
||||
start := uint(uintptr(old_memory)) - uint(uintptr(block.base))
|
||||
old_end := start + old_size
|
||||
new_end := start + size
|
||||
if start < old_end && old_end == block.used && new_end <= block.capacity {
|
||||
// grow data in-place, adjusting next allocation
|
||||
block.used = uint(new_end)
|
||||
data = block.base[start:new_end]
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
new_memory := arena_alloc(arena, size, alignment, location) or_return
|
||||
@@ -282,9 +296,10 @@ arena_temp_end :: proc(temp: Arena_Temp, loc := #caller_location) {
|
||||
|
||||
if block := arena.curr_block; block != nil {
|
||||
assert(block.used >= temp.used, "out of order use of arena_temp_end", loc)
|
||||
amount_to_zero := min(block.used-temp.used, block.capacity-block.used)
|
||||
amount_to_zero := block.used-temp.used
|
||||
intrinsics.mem_zero(block.base[temp.used:], amount_to_zero)
|
||||
block.used = temp.used
|
||||
arena.total_used -= amount_to_zero
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -158,21 +158,21 @@ map_cell_index_static :: #force_inline proc "contextless" (cells: [^]Map_Cell($T
|
||||
} else when (N & (N - 1)) == 0 && N <= 8*size_of(uintptr) {
|
||||
// Likely case, N is a power of two because T is a power of two.
|
||||
|
||||
// Unique case, no need to index data here since only one element.
|
||||
when N == 1 {
|
||||
return &cells[index].data[0]
|
||||
}
|
||||
|
||||
// Compute the integer log 2 of N, this is the shift amount to index the
|
||||
// correct cell. Odin's intrinsics.count_leading_zeros does not produce a
|
||||
// constant, hence this approach. We only need to check up to N = 64.
|
||||
SHIFT :: 1 when N < 2 else
|
||||
2 when N < 4 else
|
||||
3 when N < 8 else
|
||||
4 when N < 16 else
|
||||
5 when N < 32 else 6
|
||||
SHIFT :: 1 when N == 2 else
|
||||
2 when N == 4 else
|
||||
3 when N == 8 else
|
||||
4 when N == 16 else
|
||||
5 when N == 32 else 6
|
||||
#assert(SHIFT <= MAP_CACHE_LINE_LOG2)
|
||||
// Unique case, no need to index data here since only one element.
|
||||
when N == 1 {
|
||||
return &cells[index >> SHIFT].data[0]
|
||||
} else {
|
||||
return &cells[index >> SHIFT].data[index & (N - 1)]
|
||||
}
|
||||
return &cells[index >> SHIFT].data[index & (N - 1)]
|
||||
} else {
|
||||
// Least likely (and worst case), we pay for a division operation but we
|
||||
// assume the compiler does not actually generate a division. N will be in the
|
||||
@@ -400,7 +400,7 @@ map_alloc_dynamic :: proc "odin" (info: ^Map_Info, log2_capacity: uintptr, alloc
|
||||
// This procedure returns the address of the just inserted value, and will
|
||||
// return 'nil' if there was no room to insert the entry
|
||||
@(require_results)
|
||||
map_insert_hash_dynamic :: proc "odin" (#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info, h: Map_Hash, ik: uintptr, iv: uintptr) -> (result: uintptr) {
|
||||
map_insert_hash_dynamic_with_key :: proc "odin" (#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info, h: Map_Hash, ik: uintptr, iv: uintptr) -> (key: uintptr, result: uintptr) {
|
||||
h := h
|
||||
pos := map_desired_position(m^, h)
|
||||
distance := uintptr(0)
|
||||
@@ -436,7 +436,11 @@ map_insert_hash_dynamic :: proc "odin" (#no_alias m: ^Raw_Map, #no_alias info: ^
|
||||
intrinsics.mem_copy_non_overlapping(rawptr(v_dst), rawptr(v), size_of_v)
|
||||
hs[pos] = h
|
||||
|
||||
return result if result != 0 else v_dst
|
||||
if result == 0 {
|
||||
key = k_dst
|
||||
result = v_dst
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if map_hash_is_deleted(element_hash) {
|
||||
@@ -444,13 +448,14 @@ map_insert_hash_dynamic :: proc "odin" (#no_alias m: ^Raw_Map, #no_alias info: ^
|
||||
}
|
||||
|
||||
if probe_distance := map_probe_distance(m^, element_hash, pos); distance > probe_distance {
|
||||
if result == 0 {
|
||||
result = map_cell_index_dynamic(vs, info.vs, pos)
|
||||
}
|
||||
|
||||
kp := map_cell_index_dynamic(ks, info.ks, pos)
|
||||
vp := map_cell_index_dynamic(vs, info.vs, pos)
|
||||
|
||||
if result == 0 {
|
||||
key = kp
|
||||
result = vp
|
||||
}
|
||||
|
||||
intrinsics.mem_copy_non_overlapping(rawptr(tk), rawptr(k), size_of_k)
|
||||
intrinsics.mem_copy_non_overlapping(rawptr(k), rawptr(kp), size_of_k)
|
||||
intrinsics.mem_copy_non_overlapping(rawptr(kp), rawptr(tk), size_of_k)
|
||||
@@ -491,7 +496,11 @@ map_insert_hash_dynamic :: proc "odin" (#no_alias m: ^Raw_Map, #no_alias info: ^
|
||||
intrinsics.mem_copy_non_overlapping(rawptr(v_dst), rawptr(v), size_of_v)
|
||||
hs[pos] = h
|
||||
|
||||
return result if result != 0 else v_dst
|
||||
if result == 0 {
|
||||
key = k_dst
|
||||
result = v_dst
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
k_src := map_cell_index_dynamic(ks, info.ks, la_pos)
|
||||
@@ -501,6 +510,7 @@ map_insert_hash_dynamic :: proc "odin" (#no_alias m: ^Raw_Map, #no_alias info: ^
|
||||
if probe_distance < look_ahead {
|
||||
// probed can be made ideal while placing saved (ending condition)
|
||||
if result == 0 {
|
||||
key = k_dst
|
||||
result = v_dst
|
||||
}
|
||||
intrinsics.mem_copy_non_overlapping(rawptr(k_dst), rawptr(k), size_of_k)
|
||||
@@ -550,6 +560,7 @@ map_insert_hash_dynamic :: proc "odin" (#no_alias m: ^Raw_Map, #no_alias info: ^
|
||||
} else {
|
||||
// place saved, save probed
|
||||
if result == 0 {
|
||||
key = k_dst
|
||||
result = v_dst
|
||||
}
|
||||
intrinsics.mem_copy_non_overlapping(rawptr(k_dst), rawptr(k), size_of_k)
|
||||
@@ -568,6 +579,12 @@ map_insert_hash_dynamic :: proc "odin" (#no_alias m: ^Raw_Map, #no_alias info: ^
|
||||
}
|
||||
}
|
||||
|
||||
@(require_results)
|
||||
map_insert_hash_dynamic :: #force_inline proc "odin" (#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info, h: Map_Hash, ik: uintptr, iv: uintptr) -> (result: uintptr) {
|
||||
_, result = map_insert_hash_dynamic_with_key(m, info, h, ik, iv)
|
||||
return
|
||||
}
|
||||
|
||||
@(require_results)
|
||||
map_grow_dynamic :: proc "odin" (#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info, loc := #caller_location) -> Allocator_Error {
|
||||
log2_capacity := map_log2_cap(m^)
|
||||
@@ -941,6 +958,29 @@ __dynamic_map_set_extra :: proc "odin" (#no_alias m: ^Raw_Map, #no_alias info: ^
|
||||
return nil, rawptr(result)
|
||||
}
|
||||
|
||||
__dynamic_map_entry :: proc "odin" (#no_alias m: ^Raw_Map, #no_alias info: ^Map_Info, key: rawptr, zero: rawptr, loc := #caller_location) -> (key_ptr: rawptr, value_ptr: rawptr, just_inserted: bool, err: Allocator_Error) {
|
||||
hash := info.key_hasher(key, map_seed(m^))
|
||||
|
||||
if key_ptr, value_ptr = __dynamic_map_get_key_and_value(m, info, hash, key); value_ptr != nil {
|
||||
return
|
||||
}
|
||||
|
||||
has_grown: bool
|
||||
if err, has_grown = __dynamic_map_check_grow(m, info, loc); err != nil {
|
||||
return
|
||||
} else if has_grown {
|
||||
hash = info.key_hasher(key, map_seed(m^))
|
||||
}
|
||||
|
||||
kp, vp := map_insert_hash_dynamic_with_key(m, info, hash, uintptr(key), uintptr(zero))
|
||||
key_ptr = rawptr(kp)
|
||||
value_ptr = rawptr(vp)
|
||||
|
||||
m.len += 1
|
||||
just_inserted = true
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
// IMPORTANT: USED WITHIN THE COMPILER
|
||||
@(private)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#+private
|
||||
package runtime
|
||||
|
||||
@(priority_index=-1e6)
|
||||
foreign import "system:Foundation.framework"
|
||||
|
||||
import "base:intrinsics"
|
||||
|
||||
@@ -119,6 +119,7 @@ default_random_generator_proc :: proc(data: rawptr, mode: Random_Generator_Mode,
|
||||
}
|
||||
}
|
||||
|
||||
@(require_results)
|
||||
default_random_generator :: proc "contextless" (state: ^Default_Random_State = nil) -> Random_Generator {
|
||||
return {
|
||||
procedure = default_random_generator_proc,
|
||||
|
||||
@@ -19,7 +19,11 @@ if "%VSCMD_ARG_TGT_ARCH%" neq "x64" (
|
||||
)
|
||||
)
|
||||
|
||||
for /f %%i in ('powershell get-date -format "{yyyyMMdd}"') do (
|
||||
pushd misc
|
||||
cl /nologo get-date.c
|
||||
popd
|
||||
|
||||
for /f %%i in ('misc\get-date') do (
|
||||
set CURR_DATE_TIME=%%i
|
||||
)
|
||||
set curr_year=%CURR_DATE_TIME:~0,4%
|
||||
@@ -58,7 +62,6 @@ set V4=0
|
||||
set odin_version_full="%V1%.%V2%.%V3%.%V4%"
|
||||
set odin_version_raw="dev-%V1%-%V2%"
|
||||
|
||||
|
||||
set compiler_flags= -nologo -Oi -TP -fp:precise -Gm- -MP -FC -EHsc- -GR- -GF
|
||||
rem Parse source code as utf-8 even on shift-jis and other codepages
|
||||
rem See https://learn.microsoft.com/en-us/cpp/build/reference/utf-8-set-source-and-executable-character-sets-to-utf-8?view=msvc-170
|
||||
|
||||
@@ -9,7 +9,7 @@ set -eu
|
||||
CPPFLAGS="$CPPFLAGS -DODIN_VERSION_RAW=\"dev-$(date +"%Y-%m")\""
|
||||
CXXFLAGS="$CXXFLAGS -std=c++14"
|
||||
DISABLED_WARNINGS="-Wno-switch -Wno-macro-redefined -Wno-unused-value"
|
||||
LDFLAGS="$LDFLAGS -pthread -lm -lstdc++"
|
||||
LDFLAGS="$LDFLAGS -pthread -lm"
|
||||
OS_ARCH="$(uname -m)"
|
||||
OS_NAME="$(uname -s)"
|
||||
|
||||
@@ -95,28 +95,28 @@ Darwin)
|
||||
;;
|
||||
FreeBSD)
|
||||
CXXFLAGS="$CXXFLAGS $($LLVM_CONFIG --cxxflags --ldflags)"
|
||||
LDFLAGS="$LDFLAGS $($LLVM_CONFIG --libs core native --system-libs)"
|
||||
LDFLAGS="$LDFLAGS -lstdc++ $($LLVM_CONFIG --libs core native --system-libs)"
|
||||
;;
|
||||
NetBSD)
|
||||
CXXFLAGS="$CXXFLAGS $($LLVM_CONFIG --cxxflags --ldflags)"
|
||||
LDFLAGS="$LDFLAGS $($LLVM_CONFIG --libs core native --system-libs)"
|
||||
LDFLAGS="$LDFLAGS -lstdc++ $($LLVM_CONFIG --libs core native --system-libs)"
|
||||
;;
|
||||
Linux)
|
||||
CXXFLAGS="$CXXFLAGS $($LLVM_CONFIG --cxxflags --ldflags)"
|
||||
LDFLAGS="$LDFLAGS -ldl $($LLVM_CONFIG --libs core native --system-libs --libfiles)"
|
||||
LDFLAGS="$LDFLAGS -lstdc++ -ldl $($LLVM_CONFIG --libs core native --system-libs --libfiles)"
|
||||
# Copy libLLVM*.so into current directory for linking
|
||||
# NOTE: This is needed by the Linux release pipeline!
|
||||
# cp $(readlink -f $($LLVM_CONFIG --libfiles)) ./
|
||||
LDFLAGS="$LDFLAGS -Wl,-rpath=\$ORIGIN"
|
||||
;;
|
||||
OpenBSD)
|
||||
CXXFLAGS="$CXXFLAGS $($LLVM_CONFIG --cxxflags --ldflags)"
|
||||
LDFLAGS="$LDFLAGS -liconv"
|
||||
CXXFLAGS="$CXXFLAGS -I/usr/local/include $($LLVM_CONFIG --cxxflags --ldflags)"
|
||||
LDFLAGS="$LDFLAGS -lstdc++ -L/usr/local/lib -liconv"
|
||||
LDFLAGS="$LDFLAGS $($LLVM_CONFIG --libs core native --system-libs)"
|
||||
;;
|
||||
Haiku)
|
||||
CXXFLAGS="$CXXFLAGS $($LLVM_CONFIG --cxxflags --ldflags) -I/system/develop/headers/private/shared -I/system/develop/headers/private/kernel"
|
||||
LDFLAGS="$LDFLAGS -liconv"
|
||||
CXXFLAGS="$CXXFLAGS -D_GNU_SOURCE $($LLVM_CONFIG --cxxflags --ldflags) -I/system/develop/headers/private/shared -I/system/develop/headers/private/kernel"
|
||||
LDFLAGS="$LDFLAGS -lstdc++ -liconv"
|
||||
LDFLAGS="$LDFLAGS $($LLVM_CONFIG --libs core native --system-libs)"
|
||||
;;
|
||||
*)
|
||||
|
||||
@@ -114,3 +114,5 @@ CHAR_BIT :: 8
|
||||
va_list :: struct #align(16) {
|
||||
_: [4096]u8,
|
||||
}
|
||||
|
||||
FILE :: struct {}
|
||||
|
||||
@@ -88,14 +88,15 @@ when ODIN_OS == .Haiku {
|
||||
_get_errno :: proc() -> ^int ---
|
||||
}
|
||||
|
||||
@(private="file")
|
||||
B_GENERAL_ERROR_BASE :: min(i32)
|
||||
@(private="file")
|
||||
B_POSIX_ERROR_BASE :: B_GENERAL_ERROR_BASE + 0x7000
|
||||
_HAIKU_USE_POSITIVE_POSIX_ERRORS :: #config(HAIKU_USE_POSITIVE_POSIX_ERRORS, false)
|
||||
_POSIX_ERROR_FACTOR :: -1 when _HAIKU_USE_POSITIVE_POSIX_ERRORS else 1
|
||||
|
||||
EDOM :: B_POSIX_ERROR_BASE + 16
|
||||
EILSEQ :: B_POSIX_ERROR_BASE + 38
|
||||
ERANGE :: B_POSIX_ERROR_BASE + 17
|
||||
@(private="file") _GENERAL_ERROR_BASE :: min(int)
|
||||
@(private="file") _POSIX_ERROR_BASE :: _GENERAL_ERROR_BASE + 0x7000
|
||||
|
||||
EDOM :: _POSIX_ERROR_FACTOR * (_POSIX_ERROR_BASE + 16)
|
||||
EILSEQ :: _POSIX_ERROR_FACTOR * (_POSIX_ERROR_BASE + 38)
|
||||
ERANGE :: _POSIX_ERROR_FACTOR * (_POSIX_ERROR_BASE + 17)
|
||||
}
|
||||
|
||||
when ODIN_OS == .JS {
|
||||
|
||||
@@ -110,7 +110,7 @@ when ODIN_OS == .Windows {
|
||||
}
|
||||
}
|
||||
|
||||
when ODIN_OS == .Darwin || ODIN_OS == .FreeBSD || ODIN_OS == .NetBSD || ODIN_OS == .OpenBSD || ODIN_OS == .Windows {
|
||||
when ODIN_OS == .Darwin || ODIN_OS == .FreeBSD || ODIN_OS == .NetBSD || ODIN_OS == .OpenBSD || ODIN_OS == .Haiku || ODIN_OS == .Windows {
|
||||
|
||||
LC_ALL :: 0
|
||||
LC_COLLATE :: 1
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
package libc
|
||||
|
||||
import "core:c"
|
||||
import "core:io"
|
||||
|
||||
when ODIN_OS == .Windows {
|
||||
@@ -15,7 +16,7 @@ when ODIN_OS == .Windows {
|
||||
|
||||
// 7.21 Input/output
|
||||
|
||||
FILE :: struct {}
|
||||
FILE :: c.FILE
|
||||
|
||||
Whence :: enum int {
|
||||
SET = SEEK_SET,
|
||||
|
||||
@@ -42,6 +42,21 @@ when ODIN_OS == .Linux {
|
||||
}
|
||||
}
|
||||
|
||||
when ODIN_OS == .Haiku {
|
||||
RAND_MAX :: 0x7fffffff
|
||||
|
||||
// GLIBC and MUSL only
|
||||
@(private="file")
|
||||
@(default_calling_convention="c")
|
||||
foreign libc {
|
||||
__ctype_get_mb_cur_max :: proc() -> ushort ---
|
||||
}
|
||||
|
||||
MB_CUR_MAX :: #force_inline proc() -> size_t {
|
||||
return size_t(__ctype_get_mb_cur_max())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
when ODIN_OS == .Darwin || ODIN_OS == .FreeBSD || ODIN_OS == .OpenBSD {
|
||||
RAND_MAX :: 0x7fffffff
|
||||
|
||||
@@ -95,7 +95,7 @@ when ODIN_OS == .Linux || ODIN_OS == .FreeBSD || ODIN_OS == .Darwin || ODIN_OS =
|
||||
|
||||
time_t :: distinct i64
|
||||
|
||||
when ODIN_OS == .FreeBSD || ODIN_OS == .NetBSD {
|
||||
when ODIN_OS == .FreeBSD || ODIN_OS == .NetBSD || ODIN_OS == .Haiku {
|
||||
clock_t :: distinct int32_t
|
||||
} else {
|
||||
clock_t :: distinct long
|
||||
|
||||
@@ -46,8 +46,7 @@ init_with_contents :: proc(q: ^$Q/Queue($T), backing: []T) -> bool {
|
||||
cap = builtin.len(backing),
|
||||
allocator = {procedure=runtime.nil_allocator_proc, data=nil},
|
||||
}
|
||||
q.len = len(backing)
|
||||
q.offset = len(backing)
|
||||
q.len = builtin.len(backing)
|
||||
return true
|
||||
}
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
#+build !netbsd
|
||||
#+build !darwin
|
||||
#+build !js
|
||||
#+build !wasi
|
||||
package crypto
|
||||
|
||||
HAS_RAND_BYTES :: false
|
||||
|
||||
13
core/crypto/rand_wasi.odin
Normal file
13
core/crypto/rand_wasi.odin
Normal file
@@ -0,0 +1,13 @@
|
||||
package crypto
|
||||
|
||||
import "core:fmt"
|
||||
import "core:sys/wasm/wasi"
|
||||
|
||||
HAS_RAND_BYTES :: true
|
||||
|
||||
@(private)
|
||||
_rand_bytes :: proc(dst: []byte) {
|
||||
if err := wasi.random_get(dst); err != nil {
|
||||
fmt.panicf("crypto: wasi.random_get failed: %v", err)
|
||||
}
|
||||
}
|
||||
@@ -13,6 +13,8 @@ _load_library :: proc(path: string, global_symbols: bool, allocator: runtime.All
|
||||
flags := posix.RTLD_Flags{.NOW}
|
||||
if global_symbols {
|
||||
flags += {.GLOBAL}
|
||||
} else {
|
||||
flags += posix.RTLD_LOCAL
|
||||
}
|
||||
|
||||
cpath := strings.clone_to_cstring(path, allocator)
|
||||
|
||||
@@ -1,148 +1,230 @@
|
||||
package encoding_base32
|
||||
|
||||
// @note(zh): Encoding utility for Base32
|
||||
// A secondary param can be used to supply a custom alphabet to
|
||||
// @link(encode) and a matching decoding table to @link(decode).
|
||||
// If none is supplied it just uses the standard Base32 alphabet.
|
||||
// Incase your specific version does not use padding, you may
|
||||
// truncate it from the encoded output.
|
||||
|
||||
ENC_TABLE := [32]byte {
|
||||
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
|
||||
'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P',
|
||||
'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X',
|
||||
'Y', 'Z', '2', '3', '4', '5', '6', '7',
|
||||
}
|
||||
|
||||
PADDING :: '='
|
||||
|
||||
DEC_TABLE := [?]u8 {
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 26, 27, 28, 29, 30, 31, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
|
||||
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 0, 0, 0, 0, 0,
|
||||
0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
|
||||
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
}
|
||||
|
||||
encode :: proc(data: []byte, ENC_TBL := ENC_TABLE, allocator := context.allocator) -> string {
|
||||
out_length := (len(data) + 4) / 5 * 8
|
||||
out := make([]byte, out_length)
|
||||
_encode(out, data)
|
||||
return string(out)
|
||||
}
|
||||
|
||||
@private
|
||||
_encode :: proc(out, data: []byte, ENC_TBL := ENC_TABLE, allocator := context.allocator) {
|
||||
out := out
|
||||
data := data
|
||||
|
||||
for len(data) > 0 {
|
||||
carry: byte
|
||||
switch len(data) {
|
||||
case:
|
||||
out[7] = ENC_TABLE[data[4] & 0x1f]
|
||||
carry = data[4] >> 5
|
||||
fallthrough
|
||||
case 4:
|
||||
out[6] = ENC_TABLE[carry | (data[3] << 3) & 0x1f]
|
||||
out[5] = ENC_TABLE[(data[3] >> 2) & 0x1f]
|
||||
carry = data[3] >> 7
|
||||
fallthrough
|
||||
case 3:
|
||||
out[4] = ENC_TABLE[carry | (data[2] << 1) & 0x1f]
|
||||
carry = (data[2] >> 4) & 0x1f
|
||||
fallthrough
|
||||
case 2:
|
||||
out[3] = ENC_TABLE[carry | (data[1] << 4) & 0x1f]
|
||||
out[2] = ENC_TABLE[(data[1] >> 1) & 0x1f]
|
||||
carry = (data[1] >> 6) & 0x1f
|
||||
fallthrough
|
||||
case 1:
|
||||
out[1] = ENC_TABLE[carry | (data[0] << 2) & 0x1f]
|
||||
out[0] = ENC_TABLE[data[0] >> 3]
|
||||
}
|
||||
|
||||
if len(data) < 5 {
|
||||
out[7] = byte(PADDING)
|
||||
if len(data) < 4 {
|
||||
out[6] = byte(PADDING)
|
||||
out[5] = byte(PADDING)
|
||||
if len(data) < 3 {
|
||||
out[4] = byte(PADDING)
|
||||
if len(data) < 2 {
|
||||
out[3] = byte(PADDING)
|
||||
out[2] = byte(PADDING)
|
||||
}
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
data = data[5:]
|
||||
out = out[8:]
|
||||
}
|
||||
}
|
||||
|
||||
decode :: proc(data: string, DEC_TBL := DEC_TABLE, allocator := context.allocator) -> []byte #no_bounds_check{
|
||||
if len(data) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
outi := 0
|
||||
data := data
|
||||
|
||||
out := make([]byte, len(data) / 8 * 5, allocator)
|
||||
end := false
|
||||
for len(data) > 0 && !end {
|
||||
dbuf : [8]byte
|
||||
dlen := 8
|
||||
|
||||
for j := 0; j < 8; {
|
||||
if len(data) == 0 {
|
||||
dlen, end = j, true
|
||||
break
|
||||
}
|
||||
input := data[0]
|
||||
data = data[1:]
|
||||
if input == byte(PADDING) && j >= 2 && len(data) < 8 {
|
||||
assert(!(len(data) + j < 8 - 1), "Corrupted input")
|
||||
for k := 0; k < 8-1-j; k +=1 {
|
||||
assert(len(data) < k || data[k] == byte(PADDING), "Corrupted input")
|
||||
}
|
||||
dlen, end = j, true
|
||||
assert(dlen != 1 && dlen != 3 && dlen != 6, "Corrupted input")
|
||||
break
|
||||
}
|
||||
dbuf[j] = DEC_TABLE[input]
|
||||
assert(dbuf[j] != 0xff, "Corrupted input")
|
||||
j += 1
|
||||
}
|
||||
|
||||
switch dlen {
|
||||
case 8:
|
||||
out[outi + 4] = dbuf[6] << 5 | dbuf[7]
|
||||
fallthrough
|
||||
case 7:
|
||||
out[outi + 3] = dbuf[4] << 7 | dbuf[5] << 2 | dbuf[6] >> 3
|
||||
fallthrough
|
||||
case 5:
|
||||
out[outi + 2] = dbuf[3] << 4 | dbuf[4] >> 1
|
||||
fallthrough
|
||||
case 4:
|
||||
out[outi + 1] = dbuf[1] << 6 | dbuf[2] << 1 | dbuf[3] >> 4
|
||||
fallthrough
|
||||
case 2:
|
||||
out[outi + 0] = dbuf[0] << 3 | dbuf[1] >> 2
|
||||
}
|
||||
outi += 5
|
||||
}
|
||||
return out
|
||||
}
|
||||
// Base32 encoding/decoding implementation as specified in RFC 4648.
|
||||
// [[ More; https://www.rfc-editor.org/rfc/rfc4648.html ]]
|
||||
package encoding_base32
|
||||
|
||||
// @note(zh): Encoding utility for Base32
|
||||
// A secondary param can be used to supply a custom alphabet to
|
||||
// @link(encode) and a matching decoding table to @link(decode).
|
||||
// If none is supplied it just uses the standard Base32 alphabet.
|
||||
// In case your specific version does not use padding, you may
|
||||
// truncate it from the encoded output.
|
||||
|
||||
// Error represents errors that can occur during base32 decoding operations.
|
||||
// As per RFC 4648:
|
||||
// - Section 3.3: Invalid character handling
|
||||
// - Section 3.2: Padding requirements
|
||||
// - Section 6: Base32 encoding specifics (including block size requirements)
|
||||
Error :: enum {
|
||||
None,
|
||||
Invalid_Character, // Input contains characters outside the specified alphabet
|
||||
Invalid_Length, // Input length is not valid for base32 (must be a multiple of 8 with proper padding)
|
||||
Malformed_Input, // Input has improper structure (wrong padding position or incomplete groups)
|
||||
}
|
||||
|
||||
Validate_Proc :: #type proc(c: byte) -> bool
|
||||
|
||||
@private
|
||||
_validate_default :: proc(c: byte) -> bool {
|
||||
return (c >= 'A' && c <= 'Z') || (c >= '2' && c <= '7')
|
||||
}
|
||||
|
||||
@(rodata)
|
||||
ENC_TABLE := [32]byte {
|
||||
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
|
||||
'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P',
|
||||
'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X',
|
||||
'Y', 'Z', '2', '3', '4', '5', '6', '7',
|
||||
}
|
||||
|
||||
PADDING :: '='
|
||||
|
||||
@(rodata)
|
||||
DEC_TABLE := [256]u8 {
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 26, 27, 28, 29, 30, 31, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
|
||||
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 0, 0, 0, 0, 0,
|
||||
0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
|
||||
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
}
|
||||
|
||||
encode :: proc(data: []byte, ENC_TBL := ENC_TABLE, allocator := context.allocator) -> string {
|
||||
out_length := (len(data) + 4) / 5 * 8
|
||||
out := make([]byte, out_length, allocator)
|
||||
_encode(out, data, ENC_TBL)
|
||||
return string(out[:])
|
||||
}
|
||||
|
||||
@private
|
||||
_encode :: proc(out, data: []byte, ENC_TBL := ENC_TABLE, allocator := context.allocator) {
|
||||
out := out
|
||||
data := data
|
||||
|
||||
for len(data) > 0 {
|
||||
carry: byte
|
||||
switch len(data) {
|
||||
case:
|
||||
out[7] = ENC_TBL[data[4] & 0x1f]
|
||||
carry = data[4] >> 5
|
||||
fallthrough
|
||||
case 4:
|
||||
out[6] = ENC_TBL[carry | (data[3] << 3) & 0x1f]
|
||||
out[5] = ENC_TBL[(data[3] >> 2) & 0x1f]
|
||||
carry = data[3] >> 7
|
||||
fallthrough
|
||||
case 3:
|
||||
out[4] = ENC_TBL[carry | (data[2] << 1) & 0x1f]
|
||||
carry = (data[2] >> 4) & 0x1f
|
||||
fallthrough
|
||||
case 2:
|
||||
out[3] = ENC_TBL[carry | (data[1] << 4) & 0x1f]
|
||||
out[2] = ENC_TBL[(data[1] >> 1) & 0x1f]
|
||||
carry = (data[1] >> 6) & 0x1f
|
||||
fallthrough
|
||||
case 1:
|
||||
out[1] = ENC_TBL[carry | (data[0] << 2) & 0x1f]
|
||||
out[0] = ENC_TBL[data[0] >> 3]
|
||||
}
|
||||
|
||||
if len(data) < 5 {
|
||||
out[7] = byte(PADDING)
|
||||
if len(data) < 4 {
|
||||
out[6] = byte(PADDING)
|
||||
out[5] = byte(PADDING)
|
||||
if len(data) < 3 {
|
||||
out[4] = byte(PADDING)
|
||||
if len(data) < 2 {
|
||||
out[3] = byte(PADDING)
|
||||
out[2] = byte(PADDING)
|
||||
}
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
data = data[5:]
|
||||
out = out[8:]
|
||||
}
|
||||
}
|
||||
|
||||
@(optimization_mode="favor_size")
|
||||
decode :: proc(
|
||||
data: string,
|
||||
DEC_TBL := DEC_TABLE,
|
||||
validate: Validate_Proc = _validate_default,
|
||||
allocator := context.allocator) -> (out: []byte, err: Error) {
|
||||
if len(data) == 0 {
|
||||
return nil, .None
|
||||
}
|
||||
|
||||
// Check minimum length requirement first
|
||||
if len(data) < 2 {
|
||||
return nil, .Invalid_Length
|
||||
}
|
||||
|
||||
// Validate characters using provided validation function
|
||||
for i := 0; i < len(data); i += 1 {
|
||||
c := data[i]
|
||||
if c == byte(PADDING) {
|
||||
break
|
||||
}
|
||||
if !validate(c) {
|
||||
return nil, .Invalid_Character
|
||||
}
|
||||
}
|
||||
|
||||
// Validate padding and length
|
||||
data_len := len(data)
|
||||
padding_count := 0
|
||||
for i := data_len - 1; i >= 0; i -= 1 {
|
||||
if data[i] != byte(PADDING) {
|
||||
break
|
||||
}
|
||||
padding_count += 1
|
||||
}
|
||||
|
||||
// Check for proper padding and length combinations
|
||||
if padding_count > 0 {
|
||||
// Verify no padding in the middle
|
||||
for i := 0; i < data_len - padding_count; i += 1 {
|
||||
if data[i] == byte(PADDING) {
|
||||
return nil, .Malformed_Input
|
||||
}
|
||||
}
|
||||
|
||||
content_len := data_len - padding_count
|
||||
mod8 := content_len % 8
|
||||
required_padding: int
|
||||
switch mod8 {
|
||||
case 2: required_padding = 6 // 2 chars need 6 padding chars
|
||||
case 4: required_padding = 4 // 4 chars need 4 padding chars
|
||||
case 5: required_padding = 3 // 5 chars need 3 padding chars
|
||||
case 7: required_padding = 1 // 7 chars need 1 padding char
|
||||
case: required_padding = 0
|
||||
}
|
||||
|
||||
if required_padding > 0 {
|
||||
if padding_count != required_padding {
|
||||
return nil, .Malformed_Input
|
||||
}
|
||||
} else if mod8 != 0 {
|
||||
return nil, .Malformed_Input
|
||||
}
|
||||
} else {
|
||||
// No padding - must be multiple of 8
|
||||
if data_len % 8 != 0 {
|
||||
return nil, .Malformed_Input
|
||||
}
|
||||
}
|
||||
|
||||
// Calculate decoded length: 5 bytes for every 8 input chars
|
||||
input_chars := data_len - padding_count
|
||||
out_len := input_chars * 5 / 8
|
||||
out = make([]byte, out_len, allocator)
|
||||
defer if err != .None {
|
||||
delete(out)
|
||||
}
|
||||
|
||||
// Process input in 8-byte blocks
|
||||
outi := 0
|
||||
for i := 0; i < input_chars; i += 8 {
|
||||
buf: [8]byte
|
||||
block_size := min(8, input_chars - i)
|
||||
|
||||
// Decode block
|
||||
for j := 0; j < block_size; j += 1 {
|
||||
buf[j] = DEC_TBL[data[i + j]]
|
||||
}
|
||||
|
||||
// Convert to output bytes based on block size
|
||||
bytes_to_write := block_size * 5 / 8
|
||||
switch block_size {
|
||||
case 8:
|
||||
out[outi + 4] = (buf[6] << 5) | buf[7]
|
||||
fallthrough
|
||||
case 7:
|
||||
out[outi + 3] = (buf[4] << 7) | (buf[5] << 2) | (buf[6] >> 3)
|
||||
fallthrough
|
||||
case 5:
|
||||
out[outi + 2] = (buf[3] << 4) | (buf[4] >> 1)
|
||||
fallthrough
|
||||
case 4:
|
||||
out[outi + 1] = (buf[1] << 6) | (buf[2] << 1) | (buf[3] >> 4)
|
||||
fallthrough
|
||||
case 2:
|
||||
out[outi] = (buf[0] << 3) | (buf[1] >> 2)
|
||||
}
|
||||
outi += bytes_to_write
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
227
core/encoding/base32/base32_test.odin
Normal file
227
core/encoding/base32/base32_test.odin
Normal file
@@ -0,0 +1,227 @@
|
||||
package encoding_base32
|
||||
|
||||
import "core:testing"
|
||||
import "core:bytes"
|
||||
|
||||
@(test)
|
||||
test_base32_decode_valid :: proc(t: ^testing.T) {
|
||||
// RFC 4648 Section 10 - Test vectors
|
||||
cases := [?]struct {
|
||||
input, expected: string,
|
||||
}{
|
||||
{"", ""},
|
||||
{"MY======", "f"},
|
||||
{"MZXQ====", "fo"},
|
||||
{"MZXW6===", "foo"},
|
||||
{"MZXW6YQ=", "foob"},
|
||||
{"MZXW6YTB", "fooba"},
|
||||
{"MZXW6YTBOI======", "foobar"},
|
||||
}
|
||||
|
||||
for c in cases {
|
||||
output, err := decode(c.input)
|
||||
if output != nil {
|
||||
defer delete(output)
|
||||
}
|
||||
testing.expect_value(t, err, Error.None)
|
||||
expected := transmute([]u8)c.expected
|
||||
if output != nil {
|
||||
testing.expect(t, bytes.equal(output, expected))
|
||||
} else {
|
||||
testing.expect(t, len(c.expected) == 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@(test)
|
||||
test_base32_encode :: proc(t: ^testing.T) {
|
||||
// RFC 4648 Section 10 - Test vectors
|
||||
cases := [?]struct {
|
||||
input, expected: string,
|
||||
}{
|
||||
{"", ""},
|
||||
{"f", "MY======"},
|
||||
{"fo", "MZXQ===="},
|
||||
{"foo", "MZXW6==="},
|
||||
{"foob", "MZXW6YQ="},
|
||||
{"fooba", "MZXW6YTB"},
|
||||
{"foobar", "MZXW6YTBOI======"},
|
||||
}
|
||||
|
||||
for c in cases {
|
||||
output := encode(transmute([]byte)c.input)
|
||||
defer delete(output)
|
||||
testing.expect(t, output == c.expected)
|
||||
}
|
||||
}
|
||||
|
||||
@(test)
|
||||
test_base32_decode_invalid :: proc(t: ^testing.T) {
|
||||
// Section 3.3 - Non-alphabet characters
|
||||
{
|
||||
// Characters outside alphabet
|
||||
input := "MZ1W6YTB" // '1' not in alphabet (A-Z, 2-7)
|
||||
output, err := decode(input)
|
||||
if output != nil {
|
||||
defer delete(output)
|
||||
}
|
||||
testing.expect_value(t, err, Error.Invalid_Character)
|
||||
}
|
||||
{
|
||||
// Lowercase not allowed
|
||||
input := "mzxq===="
|
||||
output, err := decode(input)
|
||||
if output != nil {
|
||||
defer delete(output)
|
||||
}
|
||||
testing.expect_value(t, err, Error.Invalid_Character)
|
||||
}
|
||||
|
||||
// Section 3.2 - Padding requirements
|
||||
{
|
||||
// Padding must only be at end
|
||||
input := "MZ=Q===="
|
||||
output, err := decode(input)
|
||||
if output != nil {
|
||||
defer delete(output)
|
||||
}
|
||||
testing.expect_value(t, err, Error.Malformed_Input)
|
||||
}
|
||||
{
|
||||
// Missing padding
|
||||
input := "MZXQ" // Should be MZXQ====
|
||||
output, err := decode(input)
|
||||
if output != nil {
|
||||
defer delete(output)
|
||||
}
|
||||
testing.expect_value(t, err, Error.Malformed_Input)
|
||||
}
|
||||
{
|
||||
// Incorrect padding length
|
||||
input := "MZXQ=" // Needs 4 padding chars
|
||||
output, err := decode(input)
|
||||
if output != nil {
|
||||
defer delete(output)
|
||||
}
|
||||
testing.expect_value(t, err, Error.Malformed_Input)
|
||||
}
|
||||
{
|
||||
// Too much padding
|
||||
input := "MY=========" // Extra padding chars
|
||||
output, err := decode(input)
|
||||
if output != nil {
|
||||
defer delete(output)
|
||||
}
|
||||
testing.expect_value(t, err, Error.Malformed_Input)
|
||||
}
|
||||
|
||||
// Section 6 - Base32 block size requirements
|
||||
{
|
||||
// Single character (invalid block)
|
||||
input := "M"
|
||||
output, err := decode(input)
|
||||
if output != nil {
|
||||
defer delete(output)
|
||||
}
|
||||
testing.expect_value(t, err, Error.Invalid_Length)
|
||||
}
|
||||
}
|
||||
|
||||
@(test)
|
||||
test_base32_roundtrip :: proc(t: ^testing.T) {
|
||||
cases := [?]string{
|
||||
"",
|
||||
"f",
|
||||
"fo",
|
||||
"foo",
|
||||
"foob",
|
||||
"fooba",
|
||||
"foobar",
|
||||
}
|
||||
|
||||
for input in cases {
|
||||
encoded := encode(transmute([]byte)input)
|
||||
defer delete(encoded)
|
||||
decoded, err := decode(encoded)
|
||||
if decoded != nil {
|
||||
defer delete(decoded)
|
||||
}
|
||||
testing.expect_value(t, err, Error.None)
|
||||
testing.expect(t, bytes.equal(decoded, transmute([]byte)input))
|
||||
}
|
||||
}
|
||||
|
||||
@(test)
|
||||
test_base32_custom_alphabet :: proc(t: ^testing.T) {
|
||||
custom_enc_table := [32]byte{
|
||||
'0', '1', '2', '3', '4', '5', '6', '7',
|
||||
'8', '9', 'A', 'B', 'C', 'D', 'E', 'F',
|
||||
'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N',
|
||||
'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',
|
||||
}
|
||||
|
||||
custom_dec_table: [256]u8
|
||||
for i := 0; i < len(custom_enc_table); i += 1 {
|
||||
custom_dec_table[custom_enc_table[i]] = u8(i)
|
||||
}
|
||||
|
||||
/*
|
||||
custom_dec_table := [256]u8{
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0x00-0x0f
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0x10-0x1f
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0x20-0x2f
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, // 0x30-0x3f ('0'-'9')
|
||||
0, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, // 0x40-0x4f ('A'-'O')
|
||||
25, 26, 27, 28, 29, 30, 31, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0x50-0x5f ('P'-'V')
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0x60-0x6f
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0x70-0x7f
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0x80-0x8f
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0x90-0x9f
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0xa0-0xaf
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0xb0-0xbf
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0xc0-0xcf
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0xd0-0xdf
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0xe0-0xef
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0xf0-0xff
|
||||
}
|
||||
*/
|
||||
|
||||
custom_validate :: proc(c: byte) -> bool {
|
||||
return (c >= '0' && c <= '9') || (c >= 'A' && c <= 'V') || c == byte(PADDING)
|
||||
}
|
||||
|
||||
cases := [?]struct {
|
||||
input: string,
|
||||
enc_expected: string,
|
||||
}{
|
||||
{"f", "CO======"},
|
||||
{"fo", "CPNG===="},
|
||||
{"foo", "CPNMU==="},
|
||||
}
|
||||
|
||||
for c in cases {
|
||||
// Test encoding
|
||||
encoded := encode(transmute([]byte)c.input, custom_enc_table)
|
||||
defer delete(encoded)
|
||||
testing.expect(t, encoded == c.enc_expected)
|
||||
|
||||
// Test decoding
|
||||
decoded, err := decode(encoded, custom_dec_table, custom_validate)
|
||||
defer if decoded != nil {
|
||||
delete(decoded)
|
||||
}
|
||||
|
||||
testing.expect_value(t, err, Error.None)
|
||||
testing.expect(t, bytes.equal(decoded, transmute([]byte)c.input))
|
||||
}
|
||||
|
||||
// Test invalid character detection
|
||||
{
|
||||
input := "WXY=====" // Contains chars not in our alphabet
|
||||
output, err := decode(input, custom_dec_table, custom_validate)
|
||||
if output != nil {
|
||||
delete(output)
|
||||
}
|
||||
testing.expect_value(t, err, Error.Invalid_Character)
|
||||
}
|
||||
}
|
||||
@@ -209,13 +209,23 @@ marshal_to_writer :: proc(w: io.Writer, v: any, opt: ^Marshal_Options) -> (err:
|
||||
opt_write_end(w, opt, ']') or_return
|
||||
|
||||
case runtime.Type_Info_Enumerated_Array:
|
||||
opt_write_start(w, opt, '[') or_return
|
||||
index_type := reflect.type_info_base(info.index)
|
||||
enum_type := index_type.variant.(reflect.Type_Info_Enum)
|
||||
|
||||
opt_write_start(w, opt, '{') or_return
|
||||
for i in 0..<info.count {
|
||||
value := cast(runtime.Type_Info_Enum_Value)i
|
||||
index, found := slice.linear_search(enum_type.values, value)
|
||||
if !found {
|
||||
continue
|
||||
}
|
||||
|
||||
opt_write_iteration(w, opt, i == 0) or_return
|
||||
opt_write_key(w, opt, enum_type.names[index]) or_return
|
||||
data := uintptr(v.data) + uintptr(i*info.elem_size)
|
||||
marshal_to_writer(w, any{rawptr(data), info.elem.id}, opt) or_return
|
||||
}
|
||||
opt_write_end(w, opt, ']') or_return
|
||||
opt_write_end(w, opt, '}') or_return
|
||||
|
||||
case runtime.Type_Info_Dynamic_Array:
|
||||
opt_write_start(w, opt, '[') or_return
|
||||
@@ -667,4 +677,4 @@ cast_any_int_to_u128 :: proc(any_int_value: any) -> u128 {
|
||||
}
|
||||
|
||||
return u
|
||||
}
|
||||
}
|
||||
|
||||
@@ -259,6 +259,7 @@ get_token :: proc(t: ^Tokenizer) -> (token: Token, err: Error) {
|
||||
skip_digits(t)
|
||||
}
|
||||
if t.r == 'e' || t.r == 'E' {
|
||||
token.kind = .Float
|
||||
switch r := next_rune(t); r {
|
||||
case '+', '-':
|
||||
next_rune(t)
|
||||
@@ -485,7 +486,7 @@ is_valid_string_literal :: proc(str: string, spec: Specification) -> bool {
|
||||
case '"':
|
||||
// okay
|
||||
case '\'':
|
||||
if spec != .JSON {
|
||||
if spec == .JSON {
|
||||
return false
|
||||
}
|
||||
// okay
|
||||
|
||||
@@ -417,15 +417,15 @@ unmarshal_object :: proc(p: ^Parser, v: any, end_token: Token_Kind) -> (err: Unm
|
||||
if .raw_union in t.flags {
|
||||
return UNSUPPORTED_TYPE
|
||||
}
|
||||
|
||||
|
||||
fields := reflect.struct_fields_zipped(ti.id)
|
||||
|
||||
struct_loop: for p.curr_token.kind != end_token {
|
||||
key := parse_object_key(p, p.allocator) or_return
|
||||
defer delete(key, p.allocator)
|
||||
|
||||
unmarshal_expect_token(p, .Colon)
|
||||
|
||||
fields := reflect.struct_fields_zipped(ti.id)
|
||||
|
||||
field_test :: #force_inline proc "contextless" (field_used: [^]byte, offset: uintptr) -> bool {
|
||||
prev_set := field_used[offset/8] & byte(offset&7) != 0
|
||||
field_used[offset/8] |= byte(offset&7)
|
||||
@@ -433,13 +433,13 @@ unmarshal_object :: proc(p: ^Parser, v: any, end_token: Token_Kind) -> (err: Unm
|
||||
}
|
||||
|
||||
field_used_bytes := (reflect.size_of_typeid(ti.id)+7)/8
|
||||
field_used := intrinsics.alloca(field_used_bytes, 1)
|
||||
field_used := intrinsics.alloca(field_used_bytes + 1, 1) // + 1 to not overflow on size_of 0 types.
|
||||
intrinsics.mem_zero(field_used, field_used_bytes)
|
||||
|
||||
use_field_idx := -1
|
||||
|
||||
for field, field_idx in fields {
|
||||
tag_value := string(reflect.struct_tag_get(field.tag, "json"))
|
||||
tag_value := reflect.struct_tag_get(field.tag, "json")
|
||||
json_name, _ := json_name_from_tag_value(tag_value)
|
||||
if key == json_name {
|
||||
use_field_idx = field_idx
|
||||
@@ -470,7 +470,7 @@ unmarshal_object :: proc(p: ^Parser, v: any, end_token: Token_Kind) -> (err: Unm
|
||||
}
|
||||
}
|
||||
|
||||
if field.name == key {
|
||||
if field.name == key || (field.tag != "" && reflect.struct_tag_get(field.tag, "json") == key) {
|
||||
offset = field.offset
|
||||
type = field.type
|
||||
found = true
|
||||
|
||||
@@ -36,7 +36,7 @@ parse_or_exit :: proc(
|
||||
args = program_args[1:]
|
||||
}
|
||||
|
||||
error := parse(model, args, style)
|
||||
error := parse(model, args, style, true, true, allocator, loc)
|
||||
if error != nil {
|
||||
stderr := os.stream_from_handle(os.stderr)
|
||||
|
||||
|
||||
@@ -314,7 +314,29 @@ assertf :: proc(condition: bool, fmt: string, args: ..any, loc := #caller_locati
|
||||
p = runtime.default_assertion_failure_proc
|
||||
}
|
||||
message := tprintf(fmt, ..args)
|
||||
p("Runtime assertion", message, loc)
|
||||
p("runtime assertion", message, loc)
|
||||
}
|
||||
internal(loc, fmt, ..args)
|
||||
}
|
||||
}
|
||||
// Runtime ensure with a formatted message
|
||||
//
|
||||
// Inputs:
|
||||
// - condition: The boolean condition to be asserted
|
||||
// - fmt: A format string with placeholders for the provided arguments
|
||||
// - args: A variadic list of arguments to be formatted
|
||||
// - loc: The location of the caller
|
||||
//
|
||||
ensuref :: proc(condition: bool, fmt: string, args: ..any, loc := #caller_location) {
|
||||
if !condition {
|
||||
@(cold)
|
||||
internal :: proc(loc: runtime.Source_Code_Location, fmt: string, args: ..any) {
|
||||
p := context.assertion_failure_proc
|
||||
if p == nil {
|
||||
p = runtime.default_assertion_failure_proc
|
||||
}
|
||||
message := tprintf(fmt, ..args)
|
||||
p("unsatisfied ensure", message, loc)
|
||||
}
|
||||
internal(loc, fmt, ..args)
|
||||
}
|
||||
@@ -332,7 +354,7 @@ panicf :: proc(fmt: string, args: ..any, loc := #caller_location) -> ! {
|
||||
p = runtime.default_assertion_failure_proc
|
||||
}
|
||||
message := tprintf(fmt, ..args)
|
||||
p("Panic", message, loc)
|
||||
p("panic", message, loc)
|
||||
}
|
||||
|
||||
// Creates a formatted C string
|
||||
@@ -591,6 +613,10 @@ wprintf :: proc(w: io.Writer, fmt: string, args: ..any, flush := true, newline :
|
||||
i += 1
|
||||
width_index, _, index_ok := _arg_number(fmt, &i, len(args))
|
||||
|
||||
if !index_ok {
|
||||
width_index, index_ok = error_check_arg(fi, false, unused_args^)
|
||||
}
|
||||
|
||||
if index_ok {
|
||||
unused_args^ -= {width_index}
|
||||
|
||||
@@ -616,6 +642,10 @@ wprintf :: proc(w: io.Writer, fmt: string, args: ..any, flush := true, newline :
|
||||
i += 1
|
||||
precision_index, _, index_ok := _arg_number(fmt, &i, len(args))
|
||||
|
||||
if !index_ok {
|
||||
precision_index, index_ok = error_check_arg(fi, false, unused_args^)
|
||||
}
|
||||
|
||||
if index_ok {
|
||||
unused_args^ -= {precision_index}
|
||||
fi.prec, _, fi.prec_set = int_from_arg(args, precision_index)
|
||||
@@ -1267,7 +1297,7 @@ fmt_rune :: proc(fi: ^Info, r: rune, verb: rune) {
|
||||
case 'q', 'w':
|
||||
fi.n += io.write_quoted_rune(fi.writer, r)
|
||||
case:
|
||||
fmt_int(fi, u64(r), false, 32, verb)
|
||||
fmt_int(fi, u64(u32(r)), false, 32, verb)
|
||||
}
|
||||
}
|
||||
// Formats an integer value according to the specified formatting verb.
|
||||
@@ -1357,9 +1387,9 @@ _pad :: proc(fi: ^Info, s: string) {
|
||||
if fi.minus { // right pad
|
||||
io.write_string(fi.writer, s, &fi.n)
|
||||
fmt_write_padding(fi, width)
|
||||
} else if !fi.space && s != "" && s[0] == '-' {
|
||||
} else if !fi.space && s != "" && (s[0] == '-' || s[0] == '+') {
|
||||
// left pad accounting for zero pad of negative number
|
||||
io.write_byte(fi.writer, '-', &fi.n)
|
||||
io.write_byte(fi.writer, s[0], &fi.n)
|
||||
fmt_write_padding(fi, width)
|
||||
io.write_string(fi.writer, s[1:], &fi.n)
|
||||
} else { // left pad
|
||||
|
||||
@@ -146,7 +146,7 @@ which_bytes :: proc(data: []byte) -> Which_File_Type {
|
||||
case s[6:10] == "JFIF", s[6:10] == "Exif":
|
||||
return .JPEG
|
||||
case s[:3] == "\xff\xd8\xff":
|
||||
switch s[4] {
|
||||
switch s[3] {
|
||||
case 0xdb, 0xee, 0xe1, 0xe0:
|
||||
return .JPEG
|
||||
}
|
||||
|
||||
@@ -396,132 +396,4 @@ exif :: proc(c: image.PNG_Chunk) -> (res: Exif, ok: bool) {
|
||||
General helper functions
|
||||
*/
|
||||
|
||||
compute_buffer_size :: image.compute_buffer_size
|
||||
|
||||
/*
|
||||
PNG save helpers
|
||||
*/
|
||||
|
||||
when false {
|
||||
|
||||
make_chunk :: proc(c: any, t: Chunk_Type) -> (res: Chunk) {
|
||||
|
||||
data: []u8
|
||||
if v, ok := c.([]u8); ok {
|
||||
data = v
|
||||
} else {
|
||||
data = mem.any_to_bytes(c)
|
||||
}
|
||||
|
||||
res.header.length = u32be(len(data))
|
||||
res.header.type = t
|
||||
res.data = data
|
||||
|
||||
// CRC the type
|
||||
crc := hash.crc32(mem.any_to_bytes(res.header.type))
|
||||
// Extend the CRC with the data
|
||||
res.crc = u32be(hash.crc32(data, crc))
|
||||
return
|
||||
}
|
||||
|
||||
write_chunk :: proc(fd: os.Handle, chunk: Chunk) {
|
||||
c := chunk
|
||||
// Write length + type
|
||||
os.write_ptr(fd, &c.header, 8)
|
||||
// Write data
|
||||
os.write_ptr(fd, mem.raw_data(c.data), int(c.header.length))
|
||||
// Write CRC32
|
||||
os.write_ptr(fd, &c.crc, 4)
|
||||
}
|
||||
|
||||
write_image_as_png :: proc(filename: string, image: Image) -> (err: Error) {
|
||||
profiler.timed_proc()
|
||||
using image
|
||||
using os
|
||||
flags: int = O_WRONLY|O_CREATE|O_TRUNC
|
||||
|
||||
if len(image.pixels) == 0 || len(image.pixels) < image.width * image.height * int(image.channels) {
|
||||
return .Invalid_Image_Dimensions
|
||||
}
|
||||
|
||||
mode: int = 0
|
||||
when ODIN_OS == .Linux || ODIN_OS == .Darwin {
|
||||
// NOTE(justasd): 644 (owner read, write; group read; others read)
|
||||
mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH
|
||||
}
|
||||
|
||||
fd, fderr := open(filename, flags, mode)
|
||||
if fderr != nil {
|
||||
return .Cannot_Open_File
|
||||
}
|
||||
defer close(fd)
|
||||
|
||||
magic := Signature
|
||||
|
||||
write_ptr(fd, &magic, 8)
|
||||
|
||||
ihdr := IHDR{
|
||||
width = u32be(width),
|
||||
height = u32be(height),
|
||||
bit_depth = depth,
|
||||
compression_method = 0,
|
||||
filter_method = 0,
|
||||
interlace_method = .None,
|
||||
}
|
||||
|
||||
switch channels {
|
||||
case 1: ihdr.color_type = Color_Type{}
|
||||
case 2: ihdr.color_type = Color_Type{.Alpha}
|
||||
case 3: ihdr.color_type = Color_Type{.Color}
|
||||
case 4: ihdr.color_type = Color_Type{.Color, .Alpha}
|
||||
case:// Unhandled
|
||||
return .Unknown_Color_Type
|
||||
}
|
||||
h := make_chunk(ihdr, .IHDR)
|
||||
write_chunk(fd, h)
|
||||
|
||||
bytes_needed := width * height * int(channels) + height
|
||||
filter_bytes := mem.make_dynamic_array_len_cap([dynamic]u8, bytes_needed, bytes_needed, context.allocator)
|
||||
defer delete(filter_bytes)
|
||||
|
||||
i := 0; j := 0
|
||||
// Add a filter byte 0 per pixel row
|
||||
for y := 0; y < height; y += 1 {
|
||||
filter_bytes[j] = 0; j += 1
|
||||
for x := 0; x < width; x += 1 {
|
||||
for z := 0; z < channels; z += 1 {
|
||||
filter_bytes[j+z] = image.pixels[i+z]
|
||||
}
|
||||
i += channels; j += channels
|
||||
}
|
||||
}
|
||||
assert(j == bytes_needed)
|
||||
|
||||
a: []u8 = filter_bytes[:]
|
||||
|
||||
out_buf: ^[dynamic]u8
|
||||
defer free(out_buf)
|
||||
|
||||
ctx := zlib.ZLIB_Context{
|
||||
in_buf = &a,
|
||||
out_buf = out_buf,
|
||||
}
|
||||
err = zlib.write_zlib_stream_from_memory(&ctx)
|
||||
|
||||
b: []u8
|
||||
if err == nil {
|
||||
b = ctx.out_buf[:]
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
|
||||
idat := make_chunk(b, .IDAT)
|
||||
|
||||
write_chunk(fd, idat)
|
||||
|
||||
iend := make_chunk([]u8{}, .IEND)
|
||||
write_chunk(fd, iend)
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
compute_buffer_size :: image.compute_buffer_size
|
||||
@@ -126,7 +126,7 @@ _i64_err :: #force_inline proc "contextless" (n: int, err: Error) -> (i64, Error
|
||||
}
|
||||
|
||||
|
||||
// read reads up to len(p) bytes into s. It returns the number of bytes read and any error if occurred.
|
||||
// read reads up to len(p) bytes into p. It returns the number of bytes read and any error if occurred.
|
||||
//
|
||||
// When read encounters an .EOF or error after successfully reading n > 0 bytes, it returns the number of
|
||||
// bytes read along with the error.
|
||||
@@ -142,7 +142,7 @@ read :: proc(s: Reader, p: []byte, n_read: ^int = nil) -> (n: int, err: Error) {
|
||||
return
|
||||
}
|
||||
|
||||
// write writes up to len(p) bytes into s. It returns the number of bytes written and any error if occurred.
|
||||
// write writes up to len(p) bytes into p. It returns the number of bytes written and any error if occurred.
|
||||
write :: proc(s: Writer, p: []byte, n_written: ^int = nil) -> (n: int, err: Error) {
|
||||
if s.procedure != nil {
|
||||
n64: i64
|
||||
|
||||
@@ -132,9 +132,13 @@ write_encoded_rune :: proc(w: Writer, r: rune, write_quote := true, n_written: ^
|
||||
buf: [2]byte
|
||||
s := strconv.append_bits(buf[:], u64(r), 16, true, 64, strconv.digits, nil)
|
||||
switch len(s) {
|
||||
case 0: write_string(w, "00", &n) or_return
|
||||
case 1: write_byte(w, '0', &n) or_return
|
||||
case 2: write_string(w, s, &n) or_return
|
||||
case 0:
|
||||
write_string(w, "00", &n) or_return
|
||||
case 1:
|
||||
write_byte(w, '0', &n) or_return
|
||||
fallthrough
|
||||
case 2:
|
||||
write_string(w, s, &n) or_return
|
||||
}
|
||||
} else {
|
||||
write_rune(w, r, &n) or_return
|
||||
|
||||
@@ -37,30 +37,30 @@ File_Console_Logger_Data :: struct {
|
||||
ident: string,
|
||||
}
|
||||
|
||||
create_file_logger :: proc(h: os.Handle, lowest := Level.Debug, opt := Default_File_Logger_Opts, ident := "") -> Logger {
|
||||
data := new(File_Console_Logger_Data)
|
||||
create_file_logger :: proc(h: os.Handle, lowest := Level.Debug, opt := Default_File_Logger_Opts, ident := "", allocator := context.allocator) -> Logger {
|
||||
data := new(File_Console_Logger_Data, allocator)
|
||||
data.file_handle = h
|
||||
data.ident = ident
|
||||
return Logger{file_console_logger_proc, data, lowest, opt}
|
||||
}
|
||||
|
||||
destroy_file_logger :: proc(log: Logger) {
|
||||
destroy_file_logger :: proc(log: Logger, allocator := context.allocator) {
|
||||
data := cast(^File_Console_Logger_Data)log.data
|
||||
if data.file_handle != os.INVALID_HANDLE {
|
||||
os.close(data.file_handle)
|
||||
}
|
||||
free(data)
|
||||
free(data, allocator)
|
||||
}
|
||||
|
||||
create_console_logger :: proc(lowest := Level.Debug, opt := Default_Console_Logger_Opts, ident := "") -> Logger {
|
||||
data := new(File_Console_Logger_Data)
|
||||
create_console_logger :: proc(lowest := Level.Debug, opt := Default_Console_Logger_Opts, ident := "", allocator := context.allocator) -> Logger {
|
||||
data := new(File_Console_Logger_Data, allocator)
|
||||
data.file_handle = os.INVALID_HANDLE
|
||||
data.ident = ident
|
||||
return Logger{file_console_logger_proc, data, lowest, opt}
|
||||
}
|
||||
|
||||
destroy_console_logger :: proc(log: Logger) {
|
||||
free(log.data)
|
||||
destroy_console_logger :: proc(log: Logger, allocator := context.allocator) {
|
||||
free(log.data, allocator)
|
||||
}
|
||||
|
||||
file_console_logger_proc :: proc(logger_data: rawptr, level: Level, text: string, options: Options, location := #caller_location) {
|
||||
|
||||
@@ -115,7 +115,7 @@ panicf :: proc(fmt_str: string, args: ..any, location := #caller_location) -> !
|
||||
}
|
||||
|
||||
@(disabled=ODIN_DISABLE_ASSERT)
|
||||
assert :: proc(condition: bool, message := "", loc := #caller_location) {
|
||||
assert :: proc(condition: bool, message := #caller_expression(condition), loc := #caller_location) {
|
||||
if !condition {
|
||||
@(cold)
|
||||
internal :: proc(message: string, loc: runtime.Source_Code_Location) {
|
||||
@@ -145,7 +145,38 @@ assertf :: proc(condition: bool, fmt_str: string, args: ..any, loc := #caller_lo
|
||||
}
|
||||
message := fmt.tprintf(fmt_str, ..args)
|
||||
log(.Fatal, message, location=loc)
|
||||
p("Runtime assertion", message, loc)
|
||||
p("runtime assertion", message, loc)
|
||||
}
|
||||
internal(loc, fmt_str, ..args)
|
||||
}
|
||||
}
|
||||
|
||||
ensure :: proc(condition: bool, message := #caller_expression(condition), loc := #caller_location) {
|
||||
if !condition {
|
||||
@(cold)
|
||||
internal :: proc(message: string, loc: runtime.Source_Code_Location) {
|
||||
p := context.assertion_failure_proc
|
||||
if p == nil {
|
||||
p = runtime.default_assertion_failure_proc
|
||||
}
|
||||
log(.Fatal, message, location=loc)
|
||||
p("unsatisfied ensure", message, loc)
|
||||
}
|
||||
internal(message, loc)
|
||||
}
|
||||
}
|
||||
|
||||
ensuref :: proc(condition: bool, fmt_str: string, args: ..any, loc := #caller_location) {
|
||||
if !condition {
|
||||
@(cold)
|
||||
internal :: proc(loc: runtime.Source_Code_Location, fmt_str: string, args: ..any) {
|
||||
p := context.assertion_failure_proc
|
||||
if p == nil {
|
||||
p = runtime.default_assertion_failure_proc
|
||||
}
|
||||
message := fmt.tprintf(fmt_str, ..args)
|
||||
log(.Fatal, message, location=loc)
|
||||
p("unsatisfied ensure", message, loc)
|
||||
}
|
||||
internal(loc, fmt_str, ..args)
|
||||
}
|
||||
|
||||
@@ -5,17 +5,17 @@ Multi_Logger_Data :: struct {
|
||||
loggers: []Logger,
|
||||
}
|
||||
|
||||
create_multi_logger :: proc(logs: ..Logger) -> Logger {
|
||||
data := new(Multi_Logger_Data)
|
||||
data.loggers = make([]Logger, len(logs))
|
||||
create_multi_logger :: proc(logs: ..Logger, allocator := context.allocator) -> Logger {
|
||||
data := new(Multi_Logger_Data, allocator)
|
||||
data.loggers = make([]Logger, len(logs), allocator)
|
||||
copy(data.loggers, logs)
|
||||
return Logger{multi_logger_proc, data, Level.Debug, nil}
|
||||
}
|
||||
|
||||
destroy_multi_logger :: proc(log: Logger) {
|
||||
destroy_multi_logger :: proc(log: Logger, allocator := context.allocator) {
|
||||
data := (^Multi_Logger_Data)(log.data)
|
||||
delete(data.loggers)
|
||||
free(data)
|
||||
delete(data.loggers, allocator)
|
||||
free(data, allocator)
|
||||
}
|
||||
|
||||
multi_logger_proc :: proc(logger_data: rawptr, level: Level, text: string,
|
||||
|
||||
@@ -167,6 +167,18 @@ vector_triple_product :: proc "contextless" (a, b, c: $T/[$N]$E) -> T where IS_N
|
||||
length :: proc{vector_length, quaternion_length}
|
||||
length2 :: proc{vector_length2, quaternion_length2}
|
||||
|
||||
|
||||
@(require_results)
|
||||
clamp_length :: proc "contextless" (v: $T/[$N]$E, a: E) -> T where IS_FLOAT(E) {
|
||||
if a <= 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
m2 := length2(v)
|
||||
return v if (m2 <= a*a) else (v / sqrt(m2) * a) // returns original when m2 is 0
|
||||
}
|
||||
|
||||
|
||||
@(require_results)
|
||||
projection :: proc "contextless" (x, normal: $T/[$N]$E) -> T where IS_NUMERIC(E) {
|
||||
return dot(x, normal) / dot(normal, normal) * normal
|
||||
@@ -405,6 +417,13 @@ adjugate :: proc{
|
||||
matrix4x4_adjugate,
|
||||
}
|
||||
|
||||
cofactor :: proc{
|
||||
matrix1x1_cofactor,
|
||||
matrix2x2_cofactor,
|
||||
matrix3x3_cofactor,
|
||||
matrix4x4_cofactor,
|
||||
}
|
||||
|
||||
inverse_transpose :: proc{
|
||||
matrix1x1_inverse_transpose,
|
||||
matrix2x2_inverse_transpose,
|
||||
@@ -467,9 +486,9 @@ matrix3x3_determinant :: proc "contextless" (m: $M/matrix[3, 3]$T) -> (det: T) #
|
||||
}
|
||||
@(require_results)
|
||||
matrix4x4_determinant :: proc "contextless" (m: $M/matrix[4, 4]$T) -> (det: T) #no_bounds_check {
|
||||
a := adjugate(m)
|
||||
c := cofactor(m)
|
||||
for i in 0..<4 {
|
||||
det += m[0, i] * a[0, i]
|
||||
det += m[0, i] * c[0, i]
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -485,6 +504,47 @@ matrix1x1_adjugate :: proc "contextless" (x: $M/matrix[1, 1]$T) -> (y: M) #no_bo
|
||||
|
||||
@(require_results)
|
||||
matrix2x2_adjugate :: proc "contextless" (x: $M/matrix[2, 2]$T) -> (y: M) #no_bounds_check {
|
||||
y[0, 0] = +x[1, 1]
|
||||
y[0, 1] = -x[0, 1]
|
||||
y[1, 0] = -x[1, 0]
|
||||
y[1, 1] = +x[0, 0]
|
||||
return
|
||||
}
|
||||
|
||||
@(require_results)
|
||||
matrix3x3_adjugate :: proc "contextless" (m: $M/matrix[3, 3]$T) -> (y: M) #no_bounds_check {
|
||||
y[0, 0] = +(m[1, 1] * m[2, 2] - m[2, 1] * m[1, 2])
|
||||
y[1, 0] = -(m[1, 0] * m[2, 2] - m[2, 0] * m[1, 2])
|
||||
y[2, 0] = +(m[1, 0] * m[2, 1] - m[2, 0] * m[1, 1])
|
||||
y[0, 1] = -(m[0, 1] * m[2, 2] - m[2, 1] * m[0, 2])
|
||||
y[1, 1] = +(m[0, 0] * m[2, 2] - m[2, 0] * m[0, 2])
|
||||
y[2, 1] = -(m[0, 0] * m[2, 1] - m[2, 0] * m[0, 1])
|
||||
y[0, 2] = +(m[0, 1] * m[1, 2] - m[1, 1] * m[0, 2])
|
||||
y[1, 2] = -(m[0, 0] * m[1, 2] - m[1, 0] * m[0, 2])
|
||||
y[2, 2] = +(m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1])
|
||||
return
|
||||
}
|
||||
|
||||
@(require_results)
|
||||
matrix4x4_adjugate :: proc "contextless" (x: $M/matrix[4, 4]$T) -> (y: M) #no_bounds_check {
|
||||
for i in 0..<4 {
|
||||
for j in 0..<4 {
|
||||
sign: T = 1 if (i + j) % 2 == 0 else -1
|
||||
y[i, j] = sign * matrix_minor(x, j, i)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@(require_results)
|
||||
matrix1x1_cofactor :: proc "contextless" (x: $M/matrix[1, 1]$T) -> (y: M) #no_bounds_check {
|
||||
y = x
|
||||
return
|
||||
}
|
||||
|
||||
@(require_results)
|
||||
matrix2x2_cofactor :: proc "contextless" (x: $M/matrix[2, 2]$T) -> (y: M) #no_bounds_check {
|
||||
y[0, 0] = +x[1, 1]
|
||||
y[0, 1] = -x[1, 0]
|
||||
y[1, 0] = -x[0, 1]
|
||||
@@ -493,7 +553,7 @@ matrix2x2_adjugate :: proc "contextless" (x: $M/matrix[2, 2]$T) -> (y: M) #no_bo
|
||||
}
|
||||
|
||||
@(require_results)
|
||||
matrix3x3_adjugate :: proc "contextless" (m: $M/matrix[3, 3]$T) -> (y: M) #no_bounds_check {
|
||||
matrix3x3_cofactor :: proc "contextless" (m: $M/matrix[3, 3]$T) -> (y: M) #no_bounds_check {
|
||||
y[0, 0] = +(m[1, 1] * m[2, 2] - m[2, 1] * m[1, 2])
|
||||
y[0, 1] = -(m[1, 0] * m[2, 2] - m[2, 0] * m[1, 2])
|
||||
y[0, 2] = +(m[1, 0] * m[2, 1] - m[2, 0] * m[1, 1])
|
||||
@@ -508,7 +568,7 @@ matrix3x3_adjugate :: proc "contextless" (m: $M/matrix[3, 3]$T) -> (y: M) #no_bo
|
||||
|
||||
|
||||
@(require_results)
|
||||
matrix4x4_adjugate :: proc "contextless" (x: $M/matrix[4, 4]$T) -> (y: M) #no_bounds_check {
|
||||
matrix4x4_cofactor :: proc "contextless" (x: $M/matrix[4, 4]$T) -> (y: M) #no_bounds_check {
|
||||
for i in 0..<4 {
|
||||
for j in 0..<4 {
|
||||
sign: T = 1 if (i + j) % 2 == 0 else -1
|
||||
@@ -544,19 +604,19 @@ matrix2x2_inverse_transpose :: proc "contextless" (x: $M/matrix[2, 2]$T) -> (y:
|
||||
|
||||
@(require_results)
|
||||
matrix3x3_inverse_transpose :: proc "contextless" (x: $M/matrix[3, 3]$T) -> (y: M) #no_bounds_check {
|
||||
a := adjugate(x)
|
||||
c := cofactor(x)
|
||||
d := determinant(x)
|
||||
when intrinsics.type_is_integer(T) {
|
||||
for i in 0..<3 {
|
||||
for j in 0..<3 {
|
||||
y[i, j] = a[i, j] / d
|
||||
y[i, j] = c[i, j] / d
|
||||
}
|
||||
}
|
||||
} else {
|
||||
id := 1/d
|
||||
for i in 0..<3 {
|
||||
for j in 0..<3 {
|
||||
y[i, j] = a[i, j] * id
|
||||
y[i, j] = c[i, j] * id
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -565,22 +625,22 @@ matrix3x3_inverse_transpose :: proc "contextless" (x: $M/matrix[3, 3]$T) -> (y:
|
||||
|
||||
@(require_results)
|
||||
matrix4x4_inverse_transpose :: proc "contextless" (x: $M/matrix[4, 4]$T) -> (y: M) #no_bounds_check {
|
||||
a := adjugate(x)
|
||||
c := cofactor(x)
|
||||
d: T
|
||||
for i in 0..<4 {
|
||||
d += x[0, i] * a[0, i]
|
||||
d += x[0, i] * c[0, i]
|
||||
}
|
||||
when intrinsics.type_is_integer(T) {
|
||||
for i in 0..<4 {
|
||||
for j in 0..<4 {
|
||||
y[i, j] = a[i, j] / d
|
||||
y[i, j] = c[i, j] / d
|
||||
}
|
||||
}
|
||||
} else {
|
||||
id := 1/d
|
||||
for i in 0..<4 {
|
||||
for j in 0..<4 {
|
||||
y[i, j] = a[i, j] * id
|
||||
y[i, j] = c[i, j] * id
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -613,19 +673,19 @@ matrix2x2_inverse :: proc "contextless" (x: $M/matrix[2, 2]$T) -> (y: M) #no_bou
|
||||
|
||||
@(require_results)
|
||||
matrix3x3_inverse :: proc "contextless" (x: $M/matrix[3, 3]$T) -> (y: M) #no_bounds_check {
|
||||
a := adjugate(x)
|
||||
c := cofactor(x)
|
||||
d := determinant(x)
|
||||
when intrinsics.type_is_integer(T) {
|
||||
for i in 0..<3 {
|
||||
for j in 0..<3 {
|
||||
y[i, j] = a[j, i] / d
|
||||
y[i, j] = c[j, i] / d
|
||||
}
|
||||
}
|
||||
} else {
|
||||
id := 1/d
|
||||
for i in 0..<3 {
|
||||
for j in 0..<3 {
|
||||
y[i, j] = a[j, i] * id
|
||||
y[i, j] = c[j, i] * id
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -634,22 +694,22 @@ matrix3x3_inverse :: proc "contextless" (x: $M/matrix[3, 3]$T) -> (y: M) #no_bou
|
||||
|
||||
@(require_results)
|
||||
matrix4x4_inverse :: proc "contextless" (x: $M/matrix[4, 4]$T) -> (y: M) #no_bounds_check {
|
||||
a := adjugate(x)
|
||||
c := cofactor(x)
|
||||
d: T
|
||||
for i in 0..<4 {
|
||||
d += x[0, i] * a[0, i]
|
||||
d += x[0, i] * c[0, i]
|
||||
}
|
||||
when intrinsics.type_is_integer(T) {
|
||||
for i in 0..<4 {
|
||||
for j in 0..<4 {
|
||||
y[i, j] = a[j, i] / d
|
||||
y[i, j] = c[j, i] / d
|
||||
}
|
||||
}
|
||||
} else {
|
||||
id := 1/d
|
||||
for i in 0..<4 {
|
||||
for j in 0..<4 {
|
||||
y[i, j] = a[j, i] * id
|
||||
y[i, j] = c[j, i] * id
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -473,6 +473,22 @@ floor :: proc{
|
||||
@(require_results) floor_dvec3 :: proc "c" (x: dvec3) -> dvec3 { return {floor(x.x), floor(x.y), floor(x.z)} }
|
||||
@(require_results) floor_dvec4 :: proc "c" (x: dvec4) -> dvec4 { return {floor(x.x), floor(x.y), floor(x.z), floor(x.w)} }
|
||||
|
||||
trunc :: proc{
|
||||
trunc_f32,
|
||||
trunc_f64,
|
||||
trunc_vec2,
|
||||
trunc_vec3,
|
||||
trunc_vec4,
|
||||
trunc_dvec2,
|
||||
trunc_dvec3,
|
||||
trunc_dvec4,
|
||||
}
|
||||
@(require_results) trunc_vec2 :: proc "c" (x: vec2) -> vec2 { return {trunc(x.x), trunc(x.y)} }
|
||||
@(require_results) trunc_vec3 :: proc "c" (x: vec3) -> vec3 { return {trunc(x.x), trunc(x.y), trunc(x.z)} }
|
||||
@(require_results) trunc_vec4 :: proc "c" (x: vec4) -> vec4 { return {trunc(x.x), trunc(x.y), trunc(x.z), trunc(x.w)} }
|
||||
@(require_results) trunc_dvec2 :: proc "c" (x: dvec2) -> dvec2 { return {trunc(x.x), trunc(x.y)} }
|
||||
@(require_results) trunc_dvec3 :: proc "c" (x: dvec3) -> dvec3 { return {trunc(x.x), trunc(x.y), trunc(x.z)} }
|
||||
@(require_results) trunc_dvec4 :: proc "c" (x: dvec4) -> dvec4 { return {trunc(x.x), trunc(x.y), trunc(x.z), trunc(x.w)} }
|
||||
|
||||
|
||||
round :: proc{
|
||||
@@ -1866,6 +1882,13 @@ adjugate :: proc{
|
||||
adjugate_matrix4x4,
|
||||
}
|
||||
|
||||
cofactor :: proc{
|
||||
cofactor_matrix1x1,
|
||||
cofactor_matrix2x2,
|
||||
cofactor_matrix3x3,
|
||||
cofactor_matrix4x4,
|
||||
}
|
||||
|
||||
inverse_transpose :: proc{
|
||||
inverse_transpose_matrix1x1,
|
||||
inverse_transpose_matrix2x2,
|
||||
@@ -1928,9 +1951,9 @@ determinant_matrix3x3 :: proc "contextless" (m: $M/matrix[3, 3]$T) -> (det: T) {
|
||||
}
|
||||
@(require_results)
|
||||
determinant_matrix4x4 :: proc "contextless" (m: $M/matrix[4, 4]$T) -> (det: T) {
|
||||
a := adjugate(m)
|
||||
c := cofactor(m)
|
||||
#no_bounds_check for i in 0..<4 {
|
||||
det += m[0, i] * a[0, i]
|
||||
det += m[0, i] * c[0, i]
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -1946,6 +1969,47 @@ adjugate_matrix1x1 :: proc "contextless" (x: $M/matrix[1, 1]$T) -> (y: M) {
|
||||
|
||||
@(require_results)
|
||||
adjugate_matrix2x2 :: proc "contextless" (x: $M/matrix[2, 2]$T) -> (y: M) {
|
||||
y[0, 0] = +x[1, 1]
|
||||
y[0, 1] = -x[0, 1]
|
||||
y[1, 0] = -x[1, 0]
|
||||
y[1, 1] = +x[0, 0]
|
||||
return
|
||||
}
|
||||
|
||||
@(require_results)
|
||||
adjugate_matrix3x3 :: proc "contextless" (m: $M/matrix[3, 3]$T) -> (y: M) {
|
||||
y[0, 0] = +(m[1, 1] * m[2, 2] - m[2, 1] * m[1, 2])
|
||||
y[1, 0] = -(m[1, 0] * m[2, 2] - m[2, 0] * m[1, 2])
|
||||
y[2, 0] = +(m[1, 0] * m[2, 1] - m[2, 0] * m[1, 1])
|
||||
y[0, 1] = -(m[0, 1] * m[2, 2] - m[2, 1] * m[0, 2])
|
||||
y[1, 1] = +(m[0, 0] * m[2, 2] - m[2, 0] * m[0, 2])
|
||||
y[2, 1] = -(m[0, 0] * m[2, 1] - m[2, 0] * m[0, 1])
|
||||
y[0, 2] = +(m[0, 1] * m[1, 2] - m[1, 1] * m[0, 2])
|
||||
y[1, 2] = -(m[0, 0] * m[1, 2] - m[1, 0] * m[0, 2])
|
||||
y[2, 2] = +(m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1])
|
||||
return
|
||||
}
|
||||
|
||||
@(require_results)
|
||||
adjugate_matrix4x4 :: proc "contextless" (x: $M/matrix[4, 4]$T) -> (y: M) {
|
||||
for i in 0..<4 {
|
||||
for j in 0..<4 {
|
||||
sign: T = 1 if (i + j) % 2 == 0 else -1
|
||||
y[i, j] = sign * matrix_minor(x, j, i)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@(require_results)
|
||||
cofactor_matrix1x1 :: proc "contextless" (x: $M/matrix[1, 1]$T) -> (y: M) {
|
||||
y = x
|
||||
return
|
||||
}
|
||||
|
||||
@(require_results)
|
||||
cofactor_matrix2x2 :: proc "contextless" (x: $M/matrix[2, 2]$T) -> (y: M) {
|
||||
y[0, 0] = +x[1, 1]
|
||||
y[0, 1] = -x[1, 0]
|
||||
y[1, 0] = -x[0, 1]
|
||||
@@ -1954,7 +2018,7 @@ adjugate_matrix2x2 :: proc "contextless" (x: $M/matrix[2, 2]$T) -> (y: M) {
|
||||
}
|
||||
|
||||
@(require_results)
|
||||
adjugate_matrix3x3 :: proc "contextless" (m: $M/matrix[3, 3]$T) -> (y: M) {
|
||||
cofactor_matrix3x3 :: proc "contextless" (m: $M/matrix[3, 3]$T) -> (y: M) {
|
||||
y[0, 0] = +(m[1, 1] * m[2, 2] - m[2, 1] * m[1, 2])
|
||||
y[0, 1] = -(m[1, 0] * m[2, 2] - m[2, 0] * m[1, 2])
|
||||
y[0, 2] = +(m[1, 0] * m[2, 1] - m[2, 0] * m[1, 1])
|
||||
@@ -1969,7 +2033,7 @@ adjugate_matrix3x3 :: proc "contextless" (m: $M/matrix[3, 3]$T) -> (y: M) {
|
||||
|
||||
|
||||
@(require_results)
|
||||
adjugate_matrix4x4 :: proc "contextless" (x: $M/matrix[4, 4]$T) -> (y: M) {
|
||||
cofactor_matrix4x4 :: proc "contextless" (x: $M/matrix[4, 4]$T) -> (y: M) {
|
||||
for i in 0..<4 {
|
||||
for j in 0..<4 {
|
||||
sign: T = 1 if (i + j) % 2 == 0 else -1
|
||||
@@ -2005,19 +2069,19 @@ inverse_transpose_matrix2x2 :: proc "contextless" (x: $M/matrix[2, 2]$T) -> (y:
|
||||
|
||||
@(require_results)
|
||||
inverse_transpose_matrix3x3 :: proc "contextless" (x: $M/matrix[3, 3]$T) -> (y: M) #no_bounds_check {
|
||||
a := adjugate(x)
|
||||
c := cofactor(x)
|
||||
d := determinant(x)
|
||||
when intrinsics.type_is_integer(T) {
|
||||
for i in 0..<3 {
|
||||
for j in 0..<3 {
|
||||
y[i, j] = a[i, j] / d
|
||||
y[i, j] = c[i, j] / d
|
||||
}
|
||||
}
|
||||
} else {
|
||||
id := 1/d
|
||||
for i in 0..<3 {
|
||||
for j in 0..<3 {
|
||||
y[i, j] = a[i, j] * id
|
||||
y[i, j] = c[i, j] * id
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2026,22 +2090,22 @@ inverse_transpose_matrix3x3 :: proc "contextless" (x: $M/matrix[3, 3]$T) -> (y:
|
||||
|
||||
@(require_results)
|
||||
inverse_transpose_matrix4x4 :: proc "contextless" (x: $M/matrix[4, 4]$T) -> (y: M) #no_bounds_check {
|
||||
a := adjugate(x)
|
||||
c := cofactor(x)
|
||||
d: T
|
||||
for i in 0..<4 {
|
||||
d += x[0, i] * a[0, i]
|
||||
d += x[0, i] * c[0, i]
|
||||
}
|
||||
when intrinsics.type_is_integer(T) {
|
||||
for i in 0..<4 {
|
||||
for j in 0..<4 {
|
||||
y[i, j] = a[i, j] / d
|
||||
y[i, j] = c[i, j] / d
|
||||
}
|
||||
}
|
||||
} else {
|
||||
id := 1/d
|
||||
for i in 0..<4 {
|
||||
for j in 0..<4 {
|
||||
y[i, j] = a[i, j] * id
|
||||
y[i, j] = c[i, j] * id
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2074,19 +2138,19 @@ inverse_matrix2x2 :: proc "contextless" (x: $M/matrix[2, 2]$T) -> (y: M) {
|
||||
|
||||
@(require_results)
|
||||
inverse_matrix3x3 :: proc "contextless" (x: $M/matrix[3, 3]$T) -> (y: M) #no_bounds_check {
|
||||
a := adjugate(x)
|
||||
c := cofactor(x)
|
||||
d := determinant(x)
|
||||
when intrinsics.type_is_integer(T) {
|
||||
for i in 0..<3 {
|
||||
for j in 0..<3 {
|
||||
y[i, j] = a[j, i] / d
|
||||
y[i, j] = c[j, i] / d
|
||||
}
|
||||
}
|
||||
} else {
|
||||
id := 1/d
|
||||
for i in 0..<3 {
|
||||
for j in 0..<3 {
|
||||
y[i, j] = a[j, i] * id
|
||||
y[i, j] = c[j, i] * id
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2095,22 +2159,22 @@ inverse_matrix3x3 :: proc "contextless" (x: $M/matrix[3, 3]$T) -> (y: M) #no_bou
|
||||
|
||||
@(require_results)
|
||||
inverse_matrix4x4 :: proc "contextless" (x: $M/matrix[4, 4]$T) -> (y: M) #no_bounds_check {
|
||||
a := adjugate(x)
|
||||
c := cofactor(x)
|
||||
d: T
|
||||
for i in 0..<4 {
|
||||
d += x[0, i] * a[0, i]
|
||||
d += x[0, i] * c[0, i]
|
||||
}
|
||||
when intrinsics.type_is_integer(T) {
|
||||
for i in 0..<4 {
|
||||
for j in 0..<4 {
|
||||
y[i, j] = a[j, i] / d
|
||||
y[i, j] = c[j, i] / d
|
||||
}
|
||||
}
|
||||
} else {
|
||||
id := 1/d
|
||||
for i in 0..<4 {
|
||||
for j in 0..<4 {
|
||||
y[i, j] = a[j, i] * id
|
||||
y[i, j] = c[j, i] * id
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,6 +23,7 @@ import "core:math"
|
||||
@(require_results) exp2_f32 :: proc "c" (x: f32) -> f32 { return math.pow(f32(2), x) }
|
||||
@(require_results) sign_f32 :: proc "c" (x: f32) -> f32 { return math.sign(x) }
|
||||
@(require_results) floor_f32 :: proc "c" (x: f32) -> f32 { return math.floor(x) }
|
||||
@(require_results) trunc_f32 :: proc "c" (x: f32) -> f32 { return math.trunc(x) }
|
||||
@(require_results) round_f32 :: proc "c" (x: f32) -> f32 { return math.round(x) }
|
||||
@(require_results) ceil_f32 :: proc "c" (x: f32) -> f32 { return math.ceil(x) }
|
||||
@(require_results) mod_f32 :: proc "c" (x, y: f32) -> f32 { return math.mod(x, y) }
|
||||
@@ -55,6 +56,7 @@ fract_f32 :: proc "c" (x: f32) -> f32 {
|
||||
@(require_results) exp2_f64 :: proc "c" (x: f64) -> f64 { return math.pow(f64(2), x) }
|
||||
@(require_results) sign_f64 :: proc "c" (x: f64) -> f64 { return math.sign(x) }
|
||||
@(require_results) floor_f64 :: proc "c" (x: f64) -> f64 { return math.floor(x) }
|
||||
@(require_results) trunc_f64 :: proc "c" (x: f64) -> f64 { return math.trunc(x) }
|
||||
@(require_results) round_f64 :: proc "c" (x: f64) -> f64 { return math.round(x) }
|
||||
@(require_results) ceil_f64 :: proc "c" (x: f64) -> f64 { return math.ceil(x) }
|
||||
@(require_results) mod_f64 :: proc "c" (x, y: f64) -> f64 { return math.mod(x, y) }
|
||||
|
||||
@@ -1514,6 +1514,13 @@ adjugate :: proc{
|
||||
adjugate_matrix4x4,
|
||||
}
|
||||
|
||||
cofactor :: proc{
|
||||
cofactor_matrix1x1,
|
||||
cofactor_matrix2x2,
|
||||
cofactor_matrix3x3,
|
||||
cofactor_matrix4x4,
|
||||
}
|
||||
|
||||
inverse_transpose :: proc{
|
||||
inverse_transpose_matrix1x1,
|
||||
inverse_transpose_matrix2x2,
|
||||
@@ -1568,9 +1575,9 @@ determinant_matrix3x3 :: proc "contextless" (m: $M/matrix[3, 3]$T) -> (det: T) {
|
||||
}
|
||||
@(require_results)
|
||||
determinant_matrix4x4 :: proc "contextless" (m: $M/matrix[4, 4]$T) -> (det: T) {
|
||||
a := adjugate(m)
|
||||
c := cofactor(m)
|
||||
#no_bounds_check for i in 0..<4 {
|
||||
det += m[0, i] * a[0, i]
|
||||
det += m[0, i] * c[0, i]
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -1586,6 +1593,47 @@ adjugate_matrix1x1 :: proc "contextless" (x: $M/matrix[1, 1]$T) -> (y: M) {
|
||||
|
||||
@(require_results)
|
||||
adjugate_matrix2x2 :: proc "contextless" (x: $M/matrix[2, 2]$T) -> (y: M) {
|
||||
y[0, 0] = +x[1, 1]
|
||||
y[0, 1] = -x[0, 1]
|
||||
y[1, 0] = -x[1, 0]
|
||||
y[1, 1] = +x[0, 0]
|
||||
return
|
||||
}
|
||||
|
||||
@(require_results)
|
||||
adjugate_matrix3x3 :: proc "contextless" (m: $M/matrix[3, 3]$T) -> (y: M) {
|
||||
y[0, 0] = +(m[1, 1] * m[2, 2] - m[2, 1] * m[1, 2])
|
||||
y[1, 0] = -(m[1, 0] * m[2, 2] - m[2, 0] * m[1, 2])
|
||||
y[2, 0] = +(m[1, 0] * m[2, 1] - m[2, 0] * m[1, 1])
|
||||
y[0, 1] = -(m[0, 1] * m[2, 2] - m[2, 1] * m[0, 2])
|
||||
y[1, 1] = +(m[0, 0] * m[2, 2] - m[2, 0] * m[0, 2])
|
||||
y[2, 1] = -(m[0, 0] * m[2, 1] - m[2, 0] * m[0, 1])
|
||||
y[0, 2] = +(m[0, 1] * m[1, 2] - m[1, 1] * m[0, 2])
|
||||
y[1, 2] = -(m[0, 0] * m[1, 2] - m[1, 0] * m[0, 2])
|
||||
y[2, 2] = +(m[0, 0] * m[1, 1] - m[1, 0] * m[0, 1])
|
||||
return
|
||||
}
|
||||
|
||||
@(require_results)
|
||||
adjugate_matrix4x4 :: proc "contextless" (x: $M/matrix[4, 4]$T) -> (y: M) {
|
||||
for i in 0..<4 {
|
||||
for j in 0..<4 {
|
||||
sign: T = 1 if (i + j) % 2 == 0 else -1
|
||||
y[i, j] = sign * matrix_minor(x, j, i)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@(require_results)
|
||||
cofactor_matrix1x1 :: proc "contextless" (x: $M/matrix[1, 1]$T) -> (y: M) {
|
||||
y = x
|
||||
return
|
||||
}
|
||||
|
||||
@(require_results)
|
||||
cofactor_matrix2x2 :: proc "contextless" (x: $M/matrix[2, 2]$T) -> (y: M) {
|
||||
y[0, 0] = +x[1, 1]
|
||||
y[0, 1] = -x[1, 0]
|
||||
y[1, 0] = -x[0, 1]
|
||||
@@ -1594,7 +1642,7 @@ adjugate_matrix2x2 :: proc "contextless" (x: $M/matrix[2, 2]$T) -> (y: M) {
|
||||
}
|
||||
|
||||
@(require_results)
|
||||
adjugate_matrix3x3 :: proc "contextless" (m: $M/matrix[3, 3]$T) -> (y: M) {
|
||||
cofactor_matrix3x3 :: proc "contextless" (m: $M/matrix[3, 3]$T) -> (y: M) {
|
||||
y[0, 0] = +(m[1, 1] * m[2, 2] - m[2, 1] * m[1, 2])
|
||||
y[0, 1] = -(m[1, 0] * m[2, 2] - m[2, 0] * m[1, 2])
|
||||
y[0, 2] = +(m[1, 0] * m[2, 1] - m[2, 0] * m[1, 1])
|
||||
@@ -1609,7 +1657,7 @@ adjugate_matrix3x3 :: proc "contextless" (m: $M/matrix[3, 3]$T) -> (y: M) {
|
||||
|
||||
|
||||
@(require_results)
|
||||
adjugate_matrix4x4 :: proc "contextless" (x: $M/matrix[4, 4]$T) -> (y: M) {
|
||||
cofactor_matrix4x4 :: proc "contextless" (x: $M/matrix[4, 4]$T) -> (y: M) {
|
||||
for i in 0..<4 {
|
||||
for j in 0..<4 {
|
||||
sign: T = 1 if (i + j) % 2 == 0 else -1
|
||||
@@ -1645,19 +1693,19 @@ inverse_transpose_matrix2x2 :: proc "contextless" (x: $M/matrix[2, 2]$T) -> (y:
|
||||
|
||||
@(require_results)
|
||||
inverse_transpose_matrix3x3 :: proc "contextless" (x: $M/matrix[3, 3]$T) -> (y: M) #no_bounds_check {
|
||||
a := adjugate(x)
|
||||
c := cofactor(x)
|
||||
d := determinant(x)
|
||||
when intrinsics.type_is_integer(T) {
|
||||
for i in 0..<3 {
|
||||
for j in 0..<3 {
|
||||
y[i, j] = a[i, j] / d
|
||||
y[i, j] = c[i, j] / d
|
||||
}
|
||||
}
|
||||
} else {
|
||||
id := 1/d
|
||||
for i in 0..<3 {
|
||||
for j in 0..<3 {
|
||||
y[i, j] = a[i, j] * id
|
||||
y[i, j] = c[i, j] * id
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1666,22 +1714,22 @@ inverse_transpose_matrix3x3 :: proc "contextless" (x: $M/matrix[3, 3]$T) -> (y:
|
||||
|
||||
@(require_results)
|
||||
inverse_transpose_matrix4x4 :: proc "contextless" (x: $M/matrix[4, 4]$T) -> (y: M) #no_bounds_check {
|
||||
a := adjugate(x)
|
||||
c := cofactor(x)
|
||||
d: T
|
||||
for i in 0..<4 {
|
||||
d += x[0, i] * a[0, i]
|
||||
d += x[0, i] * c[0, i]
|
||||
}
|
||||
when intrinsics.type_is_integer(T) {
|
||||
for i in 0..<4 {
|
||||
for j in 0..<4 {
|
||||
y[i, j] = a[i, j] / d
|
||||
y[i, j] = c[i, j] / d
|
||||
}
|
||||
}
|
||||
} else {
|
||||
id := 1/d
|
||||
for i in 0..<4 {
|
||||
for j in 0..<4 {
|
||||
y[i, j] = a[i, j] * id
|
||||
y[i, j] = c[i, j] * id
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1714,19 +1762,19 @@ inverse_matrix2x2 :: proc "contextless" (x: $M/matrix[2, 2]$T) -> (y: M) {
|
||||
|
||||
@(require_results)
|
||||
inverse_matrix3x3 :: proc "contextless" (x: $M/matrix[3, 3]$T) -> (y: M) #no_bounds_check {
|
||||
a := adjugate(x)
|
||||
c := cofactor(x)
|
||||
d := determinant(x)
|
||||
when intrinsics.type_is_integer(T) {
|
||||
for i in 0..<3 {
|
||||
for j in 0..<3 {
|
||||
y[i, j] = a[j, i] / d
|
||||
y[i, j] = c[j, i] / d
|
||||
}
|
||||
}
|
||||
} else {
|
||||
id := 1/d
|
||||
for i in 0..<3 {
|
||||
for j in 0..<3 {
|
||||
y[i, j] = a[j, i] * id
|
||||
y[i, j] = c[j, i] * id
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1735,22 +1783,22 @@ inverse_matrix3x3 :: proc "contextless" (x: $M/matrix[3, 3]$T) -> (y: M) #no_bou
|
||||
|
||||
@(require_results)
|
||||
inverse_matrix4x4 :: proc "contextless" (x: $M/matrix[4, 4]$T) -> (y: M) #no_bounds_check {
|
||||
a := adjugate(x)
|
||||
c := cofactor(x)
|
||||
d: T
|
||||
for i in 0..<4 {
|
||||
d += x[0, i] * a[0, i]
|
||||
d += x[0, i] * c[0, i]
|
||||
}
|
||||
when intrinsics.type_is_integer(T) {
|
||||
for i in 0..<4 {
|
||||
for j in 0..<4 {
|
||||
y[i, j] = a[j, i] / d
|
||||
y[i, j] = c[j, i] / d
|
||||
}
|
||||
}
|
||||
} else {
|
||||
id := 1/d
|
||||
for i in 0..<4 {
|
||||
for j in 0..<4 {
|
||||
y[i, j] = a[j, i] * id
|
||||
y[i, j] = c[j, i] * id
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1207,8 +1207,8 @@ matrix2_inverse_f16 :: proc "contextless" (m: Matrix2f16) -> (c: Matrix2f16) #no
|
||||
d := m[0, 0]*m[1, 1] - m[0, 1]*m[1, 0]
|
||||
id := 1.0/d
|
||||
c[0, 0] = +m[1, 1] * id
|
||||
c[0, 1] = -m[1, 0] * id
|
||||
c[1, 0] = -m[0, 1] * id
|
||||
c[0, 1] = -m[0, 1] * id
|
||||
c[1, 0] = -m[1, 0] * id
|
||||
c[1, 1] = +m[0, 0] * id
|
||||
return c
|
||||
}
|
||||
@@ -1217,8 +1217,8 @@ matrix2_inverse_f32 :: proc "contextless" (m: Matrix2f32) -> (c: Matrix2f32) #no
|
||||
d := m[0, 0]*m[1, 1] - m[0, 1]*m[1, 0]
|
||||
id := 1.0/d
|
||||
c[0, 0] = +m[1, 1] * id
|
||||
c[0, 1] = -m[1, 0] * id
|
||||
c[1, 0] = -m[0, 1] * id
|
||||
c[0, 1] = -m[0, 1] * id
|
||||
c[1, 0] = -m[1, 0] * id
|
||||
c[1, 1] = +m[0, 0] * id
|
||||
return c
|
||||
}
|
||||
@@ -1227,8 +1227,8 @@ matrix2_inverse_f64 :: proc "contextless" (m: Matrix2f64) -> (c: Matrix2f64) #no
|
||||
d := m[0, 0]*m[1, 1] - m[0, 1]*m[1, 0]
|
||||
id := 1.0/d
|
||||
c[0, 0] = +m[1, 1] * id
|
||||
c[0, 1] = -m[1, 0] * id
|
||||
c[1, 0] = -m[0, 1] * id
|
||||
c[0, 1] = -m[0, 1] * id
|
||||
c[1, 0] = -m[1, 0] * id
|
||||
c[1, 1] = +m[0, 0] * id
|
||||
return c
|
||||
}
|
||||
|
||||
@@ -16,9 +16,9 @@ Generator_Query_Info :: runtime.Random_Generator_Query_Info
|
||||
Default_Random_State :: runtime.Default_Random_State
|
||||
default_random_generator :: runtime.default_random_generator
|
||||
|
||||
@(require_results)
|
||||
create :: proc(seed: u64) -> (state: Default_Random_State) {
|
||||
seed := seed
|
||||
runtime.default_random_generator(&state)
|
||||
runtime.default_random_generator_proc(&state, .Reset, ([^]byte)(&seed)[:size_of(seed)])
|
||||
return
|
||||
}
|
||||
@@ -29,30 +29,6 @@ Reset the seed used by the context.random_generator.
|
||||
Inputs:
|
||||
- seed: The seed value
|
||||
|
||||
Example:
|
||||
import "core:math/rand"
|
||||
import "core:fmt"
|
||||
|
||||
set_global_seed_example :: proc() {
|
||||
rand.set_global_seed(1)
|
||||
fmt.println(rand.uint64())
|
||||
}
|
||||
|
||||
Possible Output:
|
||||
|
||||
10
|
||||
*/
|
||||
@(deprecated="Prefer `rand.reset`")
|
||||
set_global_seed :: proc(seed: u64) {
|
||||
runtime.random_generator_reset_u64(context.random_generator, seed)
|
||||
}
|
||||
|
||||
/*
|
||||
Reset the seed used by the context.random_generator.
|
||||
|
||||
Inputs:
|
||||
- seed: The seed value
|
||||
|
||||
Example:
|
||||
import "core:math/rand"
|
||||
import "core:fmt"
|
||||
@@ -491,7 +467,7 @@ Example:
|
||||
Possible Output:
|
||||
|
||||
15.312
|
||||
673.130
|
||||
273.130
|
||||
|
||||
*/
|
||||
@(require_results) float32_range :: proc(low, high: f32, gen := context.random_generator) -> (val: f32) {
|
||||
|
||||
@@ -785,6 +785,27 @@ delete_map :: proc(
|
||||
return runtime.delete_map(m, loc)
|
||||
}
|
||||
|
||||
/*
|
||||
Free an SoA slice.
|
||||
*/
|
||||
delete_soa_slice :: proc(
|
||||
array: $T/#soa[]$E,
|
||||
allocator := context.allocator,
|
||||
loc := #caller_location,
|
||||
) -> Allocator_Error {
|
||||
return runtime.delete_soa_slice(array, allocator, loc)
|
||||
}
|
||||
|
||||
/*
|
||||
Free an SoA dynamic array.
|
||||
*/
|
||||
delete_soa_dynamic_array :: proc(
|
||||
array: $T/#soa[dynamic]$E,
|
||||
loc := #caller_location,
|
||||
) -> Allocator_Error {
|
||||
return runtime.delete_soa_dynamic_array(array, loc)
|
||||
}
|
||||
|
||||
/*
|
||||
Free.
|
||||
*/
|
||||
@@ -794,6 +815,8 @@ delete :: proc{
|
||||
delete_dynamic_array,
|
||||
delete_slice,
|
||||
delete_map,
|
||||
delete_soa_slice,
|
||||
delete_soa_dynamic_array,
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -900,8 +923,7 @@ make_dynamic_array :: proc(
|
||||
Allocate a dynamic array with initial length.
|
||||
|
||||
This procedure creates a dynamic array of type `T`, with `allocator` as its
|
||||
backing allocator, and initial capacity of `0`, and initial length specified by
|
||||
`len`.
|
||||
backing allocator, and initial capacity and length specified by `len`.
|
||||
*/
|
||||
@(require_results)
|
||||
make_dynamic_array_len :: proc(
|
||||
@@ -910,7 +932,7 @@ make_dynamic_array_len :: proc(
|
||||
allocator := context.allocator,
|
||||
loc := #caller_location,
|
||||
) -> (T, Allocator_Error) {
|
||||
return runtime.make_dynamic_array_len_cap(T, len, len, allocator, loc)
|
||||
return runtime.make_dynamic_array_len(T, len, allocator, loc)
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -964,6 +986,71 @@ make_multi_pointer :: proc(
|
||||
return runtime.make_multi_pointer(T, len, allocator, loc)
|
||||
}
|
||||
|
||||
/*
|
||||
Allocate an SoA slice.
|
||||
|
||||
This procedure allocates an SoA slice of type `T` with length `len`, from an
|
||||
allocator specified by `allocator`, and returns the allocated SoA slice.
|
||||
*/
|
||||
@(require_results)
|
||||
make_soa_slice :: proc(
|
||||
$T: typeid/#soa[]$E,
|
||||
#any_int len: int,
|
||||
allocator := context.allocator,
|
||||
loc := #caller_location
|
||||
) -> (array: T, err: Allocator_Error) {
|
||||
return runtime.make_soa_slice(T, len, allocator, loc)
|
||||
}
|
||||
|
||||
/*
|
||||
Allocate an SoA dynamic array.
|
||||
|
||||
This procedure creates an SoA dynamic array of type `T`, with `allocator` as
|
||||
its backing allocator, and initial length and capacity of `0`.
|
||||
*/
|
||||
@(require_results)
|
||||
make_soa_dynamic_array :: proc(
|
||||
$T: typeid/#soa[dynamic]$E,
|
||||
allocator := context.allocator,
|
||||
loc := #caller_location
|
||||
) -> (array: T, err: Allocator_Error) {
|
||||
return runtime.make_soa_dynamic_array(T, allocator, loc)
|
||||
}
|
||||
|
||||
/*
|
||||
Allocate an SoA dynamic array with initial length.
|
||||
|
||||
This procedure creates an SoA dynamic array of type `T`, with `allocator` as its
|
||||
backing allocator, and initial capacity and length specified by `len`.
|
||||
*/
|
||||
@(require_results)
|
||||
make_soa_dynamic_array_len :: proc(
|
||||
$T: typeid/#soa[dynamic]$E,
|
||||
#any_int len: int,
|
||||
allocator := context.allocator,
|
||||
loc := #caller_location
|
||||
) -> (array: T, err: Allocator_Error) {
|
||||
return runtime.make_soa_dynamic_array_len(T, len, allocator, loc)
|
||||
}
|
||||
|
||||
/*
|
||||
Allocate an SoA dynamic array with initial length and capacity.
|
||||
|
||||
This procedure creates an SoA dynamic array of type `T`, with `allocator` as its
|
||||
backing allocator, and initial capacity specified by `cap`, and initial length
|
||||
specified by `len`.
|
||||
*/
|
||||
@(require_results)
|
||||
make_soa_dynamic_array_len_cap :: proc(
|
||||
$T: typeid/#soa[dynamic]$E,
|
||||
#any_int len: int,
|
||||
#any_int cap: int,
|
||||
allocator := context.allocator,
|
||||
loc := #caller_location
|
||||
) -> (array: T, err: Allocator_Error) {
|
||||
return runtime.make_soa_dynamic_array_len_cap(T, len, cap, allocator, loc)
|
||||
}
|
||||
|
||||
/*
|
||||
Allocate.
|
||||
*/
|
||||
@@ -974,6 +1061,10 @@ make :: proc{
|
||||
make_dynamic_array_len_cap,
|
||||
make_map,
|
||||
make_multi_pointer,
|
||||
make_soa_slice,
|
||||
make_soa_dynamic_array,
|
||||
make_soa_dynamic_array_len,
|
||||
make_soa_dynamic_array_len_cap,
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -140,14 +140,6 @@ arena_init :: proc(a: ^Arena, data: []byte) {
|
||||
a.temp_count = 0
|
||||
}
|
||||
|
||||
@(deprecated="prefer 'mem.arena_init'")
|
||||
init_arena :: proc(a: ^Arena, data: []byte) {
|
||||
a.data = data
|
||||
a.offset = 0
|
||||
a.peak_used = 0
|
||||
a.temp_count = 0
|
||||
}
|
||||
|
||||
/*
|
||||
Allocate memory from an arena.
|
||||
|
||||
@@ -786,14 +778,6 @@ stack_init :: proc(s: ^Stack, data: []byte) {
|
||||
s.peak_used = 0
|
||||
}
|
||||
|
||||
@(deprecated="prefer 'mem.stack_init'")
|
||||
init_stack :: proc(s: ^Stack, data: []byte) {
|
||||
s.data = data
|
||||
s.prev_offset = 0
|
||||
s.curr_offset = 0
|
||||
s.peak_used = 0
|
||||
}
|
||||
|
||||
/*
|
||||
Allocate memory from stack.
|
||||
|
||||
@@ -1162,13 +1146,6 @@ small_stack_init :: proc(s: ^Small_Stack, data: []byte) {
|
||||
s.peak_used = 0
|
||||
}
|
||||
|
||||
@(deprecated="prefer 'small_stack_init'")
|
||||
init_small_stack :: proc(s: ^Small_Stack, data: []byte) {
|
||||
s.data = data
|
||||
s.offset = 0
|
||||
s.peak_used = 0
|
||||
}
|
||||
|
||||
/*
|
||||
Small stack allocator.
|
||||
|
||||
|
||||
@@ -16,8 +16,8 @@ a multipointer can be indexed, but does not have a definite length. A slice is
|
||||
a pointer that points to multiple objects equipped with the length, specifying
|
||||
the amount of objects a slice points to.
|
||||
|
||||
When object's values are read through a pointer, that operation is called a
|
||||
*load* operation. When memory is read through a pointer, that operation is
|
||||
When an object's values are read through a pointer, that operation is called a
|
||||
*load* operation. When memory is written to through a pointer, that operation is
|
||||
called a *store* operation. Both of these operations can be called a *memory
|
||||
access operation*.
|
||||
|
||||
|
||||
@@ -685,11 +685,4 @@ calc_padding_with_header :: proc "contextless" (ptr: uintptr, align: uintptr, he
|
||||
}
|
||||
}
|
||||
return int(padding)
|
||||
}
|
||||
|
||||
@(require_results, deprecated="prefer 'slice.clone'")
|
||||
clone_slice :: proc(slice: $T/[]$E, allocator := context.allocator, loc := #caller_location) -> (new_slice: T) {
|
||||
new_slice, _ = make(T, len(slice), allocator, loc)
|
||||
runtime.copy(new_slice, slice)
|
||||
return new_slice
|
||||
}
|
||||
}
|
||||
@@ -260,7 +260,7 @@ adjust_request_size :: proc(size, align: uint) -> (adjusted: uint) {
|
||||
|
||||
// aligned size must not exceed `BLOCK_SIZE_MAX`, or we'll go out of bounds on `sl_bitmap`.
|
||||
if aligned := align_up(size, align); aligned < BLOCK_SIZE_MAX {
|
||||
adjusted = min(aligned, BLOCK_SIZE_MAX)
|
||||
adjusted = max(aligned, BLOCK_SIZE_MIN)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -34,12 +34,18 @@ Tracking_Allocator_Bad_Free_Entry :: struct {
|
||||
location: runtime.Source_Code_Location,
|
||||
}
|
||||
|
||||
/*
|
||||
Callback type for when tracking allocator runs into a bad free.
|
||||
*/
|
||||
Tracking_Allocator_Bad_Free_Callback :: proc(t: ^Tracking_Allocator, memory: rawptr, location: runtime.Source_Code_Location)
|
||||
|
||||
/*
|
||||
Tracking allocator data.
|
||||
*/
|
||||
Tracking_Allocator :: struct {
|
||||
backing: Allocator,
|
||||
allocation_map: map[rawptr]Tracking_Allocator_Entry,
|
||||
bad_free_callback: Tracking_Allocator_Bad_Free_Callback,
|
||||
bad_free_array: [dynamic]Tracking_Allocator_Bad_Free_Entry,
|
||||
mutex: sync.Mutex,
|
||||
clear_on_free_all: bool,
|
||||
@@ -61,6 +67,7 @@ allocate the tracked data.
|
||||
tracking_allocator_init :: proc(t: ^Tracking_Allocator, backing_allocator: Allocator, internals_allocator := context.allocator) {
|
||||
t.backing = backing_allocator
|
||||
t.allocation_map.allocator = internals_allocator
|
||||
t.bad_free_callback = tracking_allocator_bad_free_callback_panic
|
||||
t.bad_free_array.allocator = internals_allocator
|
||||
if .Free_All in query_features(t.backing) {
|
||||
t.clear_on_free_all = true
|
||||
@@ -109,6 +116,33 @@ tracking_allocator_reset :: proc(t: ^Tracking_Allocator) {
|
||||
sync.mutex_unlock(&t.mutex)
|
||||
}
|
||||
|
||||
/*
|
||||
Default behavior for a bad free: Crash with error message that says where the
|
||||
bad free happened.
|
||||
|
||||
Override Tracking_Allocator.bad_free_callback to have something else happen. For
|
||||
example, you can use tracking_allocator_bad_free_callback_add_to_array to return
|
||||
the tracking allocator to the old behavior, where the bad_free_array was used.
|
||||
*/
|
||||
tracking_allocator_bad_free_callback_panic :: proc(t: ^Tracking_Allocator, memory: rawptr, location: runtime.Source_Code_Location) {
|
||||
runtime.print_caller_location(location)
|
||||
runtime.print_string(" Tracking allocator error: Bad free of pointer ")
|
||||
runtime.print_uintptr(uintptr(memory))
|
||||
runtime.print_string("\n")
|
||||
runtime.trap()
|
||||
}
|
||||
|
||||
/*
|
||||
Alternative behavior for a bad free: Store in `bad_free_array`. If you use this,
|
||||
then you must make sure to check Tracking_Allocator.bad_free_array at some point.
|
||||
*/
|
||||
tracking_allocator_bad_free_callback_add_to_array :: proc(t: ^Tracking_Allocator, memory: rawptr, location: runtime.Source_Code_Location) {
|
||||
append(&t.bad_free_array, Tracking_Allocator_Bad_Free_Entry {
|
||||
memory = memory,
|
||||
location = location,
|
||||
})
|
||||
}
|
||||
|
||||
/*
|
||||
Tracking allocator.
|
||||
|
||||
@@ -116,8 +150,10 @@ The tracking allocator is an allocator wrapper that tracks memory allocations.
|
||||
This allocator stores all the allocations in a map. Whenever a pointer that's
|
||||
not inside of the map is freed, the `bad_free_array` entry is added.
|
||||
|
||||
An example of how to use the `Tracking_Allocator` to track subsequent allocations
|
||||
in your program and report leaks and bad frees:
|
||||
Here follows an example of how to use the `Tracking_Allocator` to track
|
||||
subsequent allocations in your program and report leaks. By default, the
|
||||
tracking allocator will crash on bad frees. You can override that behavior by
|
||||
overriding `track.bad_free_callback`.
|
||||
|
||||
Example:
|
||||
|
||||
@@ -137,9 +173,6 @@ Example:
|
||||
for _, leak in track.allocation_map {
|
||||
fmt.printf("%v leaked %m\n", leak.location, leak.size)
|
||||
}
|
||||
for bad_free in track.bad_free_array {
|
||||
fmt.printf("%v allocation %p was freed badly\n", bad_free.location, bad_free.memory)
|
||||
}
|
||||
}
|
||||
*/
|
||||
@(require_results)
|
||||
@@ -191,10 +224,9 @@ tracking_allocator_proc :: proc(
|
||||
}
|
||||
|
||||
if mode == .Free && old_memory != nil && old_memory not_in data.allocation_map {
|
||||
append(&data.bad_free_array, Tracking_Allocator_Bad_Free_Entry{
|
||||
memory = old_memory,
|
||||
location = loc,
|
||||
})
|
||||
if data.bad_free_callback != nil {
|
||||
data.bad_free_callback(data, old_memory, loc)
|
||||
}
|
||||
} else {
|
||||
result = data.backing.procedure(data.backing.data, mode, size, alignment, old_memory, old_size, loc) or_return
|
||||
}
|
||||
|
||||
@@ -204,8 +204,9 @@ arena_free_all :: proc(arena: ^Arena, loc := #caller_location) {
|
||||
}
|
||||
// Zero the first block's memory
|
||||
if arena.curr_block != nil {
|
||||
mem.zero(arena.curr_block.base, int(arena.curr_block.used))
|
||||
curr_block_used := int(arena.curr_block.used)
|
||||
arena.curr_block.used = 0
|
||||
mem.zero(arena.curr_block.base, curr_block_used)
|
||||
}
|
||||
arena.total_used = 0
|
||||
case .Static, .Buffer:
|
||||
@@ -327,10 +328,24 @@ arena_allocator_proc :: proc(allocator_data: rawptr, mode: mem.Allocator_Mode,
|
||||
case size == 0:
|
||||
err = .Mode_Not_Implemented
|
||||
return
|
||||
case (uintptr(old_data) & uintptr(alignment-1) == 0) && size < old_size:
|
||||
// shrink data in-place
|
||||
data = old_data[:size]
|
||||
return
|
||||
case uintptr(old_data) & uintptr(alignment-1) == 0:
|
||||
if size < old_size {
|
||||
// shrink data in-place
|
||||
data = old_data[:size]
|
||||
return
|
||||
}
|
||||
|
||||
if block := arena.curr_block; block != nil {
|
||||
start := uint(uintptr(old_memory)) - uint(uintptr(block.base))
|
||||
old_end := start + old_size
|
||||
new_end := start + size
|
||||
if start < old_end && old_end == block.used && new_end <= block.reserved {
|
||||
// grow data in-place, adjusting next allocation
|
||||
_ = alloc_from_memory_block(block, new_end - old_end, 1, default_commit_size=arena.default_commit_size) or_return
|
||||
data = block.base[start:new_end]
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
new_memory := arena_alloc(arena, size, alignment, location) or_return
|
||||
@@ -401,9 +416,10 @@ arena_temp_end :: proc(temp: Arena_Temp, loc := #caller_location) {
|
||||
|
||||
if block := arena.curr_block; block != nil {
|
||||
assert(block.used >= temp.used, "out of order use of arena_temp_end", loc)
|
||||
amount_to_zero := min(block.used-temp.used, block.reserved-block.used)
|
||||
amount_to_zero := block.used-temp.used
|
||||
mem.zero_slice(block.base[temp.used:][:amount_to_zero])
|
||||
block.used = temp.used
|
||||
arena.total_used -= amount_to_zero
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -95,6 +95,7 @@ Resolve_Error :: enum u32 {
|
||||
}
|
||||
|
||||
DNS_Error :: enum u32 {
|
||||
None = 0,
|
||||
Invalid_Hostname_Error = 1,
|
||||
Invalid_Hosts_Config_Error,
|
||||
Invalid_Resolv_Config_Error,
|
||||
@@ -147,6 +148,9 @@ IP6_Loopback :: IP6_Address{0, 0, 0, 0, 0, 0, 0, 1}
|
||||
IP4_Any := IP4_Address{}
|
||||
IP6_Any := IP6_Address{}
|
||||
|
||||
IP4_mDNS_Broadcast := Endpoint{address=IP4_Address{224, 0, 0, 251}, port=5353}
|
||||
IP6_mDNS_Broadcast := Endpoint{address=IP6_Address{65282, 0, 0, 0, 0, 0, 0, 251}, port = 5353}
|
||||
|
||||
Endpoint :: struct {
|
||||
address: Address,
|
||||
port: int,
|
||||
|
||||
@@ -7,10 +7,11 @@ package net
|
||||
*/
|
||||
|
||||
/*
|
||||
Copyright 2022 Tetralux <tetraluxonpc@gmail.com>
|
||||
Copyright 2022 Colin Davidson <colrdavidson@gmail.com>
|
||||
Copyright 2022 Jeroen van Rijn <nom@duclavier.com>.
|
||||
Copyright 2024 Feoramund <rune@swevencraft.org>.
|
||||
Copyright 2022 Tetralux <tetraluxonpc@gmail.com>
|
||||
Copyright 2022 Colin Davidson <colrdavidson@gmail.com>
|
||||
Copyright 2022 Jeroen van Rijn <nom@duclavier.com>.
|
||||
Copyright 2024 Feoramund <rune@swevencraft.org>.
|
||||
Copyright 2025 Christiano Haesbaert <haesbaert@haesbaert.org>.
|
||||
Made available under Odin's BSD-3 license.
|
||||
|
||||
List of contributors:
|
||||
@@ -18,12 +19,14 @@ package net
|
||||
Colin Davidson: Linux platform code, OSX platform code, Odin-native DNS resolver
|
||||
Jeroen van Rijn: Cross platform unification, code style, documentation
|
||||
Feoramund: FreeBSD platform code
|
||||
Haesbaert: Security fixes
|
||||
*/
|
||||
|
||||
import "core:mem"
|
||||
import "core:strings"
|
||||
import "core:time"
|
||||
import "core:os"
|
||||
import "core:math/rand"
|
||||
/*
|
||||
Default configuration for DNS resolution.
|
||||
*/
|
||||
@@ -50,9 +53,12 @@ init_dns_configuration :: proc() {
|
||||
dns_configuration.hosts_file, _ = replace_environment_path(dns_configuration.hosts_file)
|
||||
}
|
||||
|
||||
@(fini, private)
|
||||
destroy_dns_configuration :: proc() {
|
||||
delete(dns_configuration.resolv_conf)
|
||||
dns_configuration.resolv_conf = ""
|
||||
delete(dns_configuration.hosts_file)
|
||||
dns_configuration.hosts_file = ""
|
||||
}
|
||||
|
||||
dns_configuration := DEFAULT_DNS_CONFIGURATION
|
||||
@@ -132,7 +138,14 @@ resolve_ip4 :: proc(hostname_and_maybe_port: string) -> (ep4: Endpoint, err: Net
|
||||
return
|
||||
}
|
||||
case Host:
|
||||
recs, _ := get_dns_records_from_os(t.hostname, .IP4, context.temp_allocator)
|
||||
recs: []DNS_Record
|
||||
|
||||
if ODIN_OS != .Windows && strings.has_suffix(t.hostname, ".local") {
|
||||
recs, _ = get_dns_records_from_nameservers(t.hostname, .IP4, {IP4_mDNS_Broadcast}, nil, context.temp_allocator)
|
||||
} else {
|
||||
recs, _ = get_dns_records_from_os(t.hostname, .IP4, context.temp_allocator)
|
||||
}
|
||||
|
||||
if len(recs) == 0 {
|
||||
err = .Unable_To_Resolve
|
||||
return
|
||||
@@ -159,7 +172,14 @@ resolve_ip6 :: proc(hostname_and_maybe_port: string) -> (ep6: Endpoint, err: Net
|
||||
return t, nil
|
||||
}
|
||||
case Host:
|
||||
recs, _ := get_dns_records_from_os(t.hostname, .IP6, context.temp_allocator)
|
||||
recs: []DNS_Record
|
||||
|
||||
if ODIN_OS != .Windows && strings.has_suffix(t.hostname, ".local") {
|
||||
recs, _ = get_dns_records_from_nameservers(t.hostname, .IP6, {IP6_mDNS_Broadcast}, nil, context.temp_allocator)
|
||||
} else {
|
||||
recs, _ = get_dns_records_from_os(t.hostname, .IP6, context.temp_allocator)
|
||||
}
|
||||
|
||||
if len(recs) == 0 {
|
||||
err = .Unable_To_Resolve
|
||||
return
|
||||
@@ -210,7 +230,7 @@ get_dns_records_from_nameservers :: proc(hostname: string, type: DNS_Record_Type
|
||||
}
|
||||
|
||||
hdr := DNS_Header{
|
||||
id = 0,
|
||||
id = u16be(rand.uint32()),
|
||||
is_response = false,
|
||||
opcode = 0,
|
||||
is_authoritative = false,
|
||||
@@ -255,23 +275,23 @@ get_dns_records_from_nameservers :: proc(hostname: string, type: DNS_Record_Type
|
||||
return nil, .Connection_Error
|
||||
}
|
||||
|
||||
// recv_sz, _, recv_err := recv_udp(conn, dns_response_buf[:])
|
||||
// if recv_err == UDP_Recv_Error.Timeout {
|
||||
// continue
|
||||
// } else if recv_err != nil {
|
||||
// continue
|
||||
// }
|
||||
recv_sz, _ := recv_udp(conn, dns_response_buf[:]) or_continue
|
||||
recv_sz, src := recv_udp(conn, dns_response_buf[:]) or_continue
|
||||
if recv_sz == 0 {
|
||||
continue
|
||||
}
|
||||
if src != name_server {
|
||||
continue
|
||||
}
|
||||
|
||||
dns_response = dns_response_buf[:recv_sz]
|
||||
|
||||
rsp, _ok := parse_response(dns_response, type)
|
||||
rsp, xid, _ok := parse_response(dns_response, type)
|
||||
if !_ok {
|
||||
return nil, .Server_Error
|
||||
}
|
||||
if id != xid {
|
||||
continue
|
||||
}
|
||||
|
||||
if len(rsp) == 0 {
|
||||
continue
|
||||
@@ -525,18 +545,21 @@ decode_hostname :: proc(packet: []u8, start_idx: int, allocator := context.alloc
|
||||
return
|
||||
}
|
||||
|
||||
if packet[cur_idx] > 63 && packet[cur_idx] != 0xC0 {
|
||||
return
|
||||
}
|
||||
switch {
|
||||
|
||||
switch packet[cur_idx] {
|
||||
|
||||
// This is a offset to more data in the packet, jump to it
|
||||
case 0xC0:
|
||||
// A pointer is when the two higher bits are set.
|
||||
case packet[cur_idx] & 0xC0 == 0xC0:
|
||||
if len(packet[cur_idx:]) < 2 {
|
||||
return
|
||||
}
|
||||
pkt := packet[cur_idx:cur_idx+2]
|
||||
val := (^u16be)(raw_data(pkt))^
|
||||
offset := int(val & 0x3FFF)
|
||||
if offset > len(packet) {
|
||||
// RFC 9267 a ptr should only point backwards, enough to avoid infinity.
|
||||
// "The offset at which this octet is located must be smaller than the offset
|
||||
// at which the compression pointer is located". Still keep iteration_max to
|
||||
// avoid tiny jumps.
|
||||
if offset > len(packet) || offset >= cur_idx {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -547,6 +570,10 @@ decode_hostname :: proc(packet: []u8, start_idx: int, allocator := context.alloc
|
||||
level += 1
|
||||
}
|
||||
|
||||
// Validate label len
|
||||
case packet[cur_idx] > LABEL_MAX:
|
||||
return
|
||||
|
||||
// This is a label, insert it into the hostname
|
||||
case:
|
||||
label_size := int(packet[cur_idx])
|
||||
@@ -785,7 +812,7 @@ parse_record :: proc(packet: []u8, cur_off: ^int, filter: DNS_Record_Type = nil)
|
||||
- Data[]
|
||||
*/
|
||||
|
||||
parse_response :: proc(response: []u8, filter: DNS_Record_Type = nil, allocator := context.allocator) -> (records: []DNS_Record, ok: bool) {
|
||||
parse_response :: proc(response: []u8, filter: DNS_Record_Type = nil, allocator := context.allocator) -> (records: []DNS_Record, xid: u16be, ok: bool) {
|
||||
context.allocator = allocator
|
||||
|
||||
HEADER_SIZE_BYTES :: 12
|
||||
@@ -798,11 +825,13 @@ parse_response :: proc(response: []u8, filter: DNS_Record_Type = nil, allocator
|
||||
dns_hdr_chunks := mem.slice_data_cast([]u16be, response[:HEADER_SIZE_BYTES])
|
||||
hdr := unpack_dns_header(dns_hdr_chunks[0], dns_hdr_chunks[1])
|
||||
if !hdr.is_response {
|
||||
delete(_records)
|
||||
return
|
||||
}
|
||||
|
||||
question_count := int(dns_hdr_chunks[2])
|
||||
if question_count != 1 {
|
||||
delete(_records)
|
||||
return
|
||||
}
|
||||
answer_count := int(dns_hdr_chunks[3])
|
||||
@@ -854,6 +883,7 @@ parse_response :: proc(response: []u8, filter: DNS_Record_Type = nil, allocator
|
||||
append(&_records, rec)
|
||||
}
|
||||
}
|
||||
xid = hdr.id
|
||||
|
||||
return _records[:], true
|
||||
return _records[:], xid, true
|
||||
}
|
||||
|
||||
@@ -29,9 +29,14 @@ import win "core:sys/windows"
|
||||
_get_dns_records_os :: proc(hostname: string, type: DNS_Record_Type, allocator := context.allocator) -> (records: []DNS_Record, err: DNS_Error) {
|
||||
context.allocator = allocator
|
||||
|
||||
options := win.DNS_QUERY_OPTIONS{}
|
||||
if strings.has_suffix(hostname, ".local") {
|
||||
options = {.MULTICAST_ONLY, .MULTICAST_WAIT} // 0x00020500
|
||||
}
|
||||
|
||||
host_cstr := strings.clone_to_cstring(hostname, context.temp_allocator)
|
||||
rec: ^win.DNS_RECORD
|
||||
res := win.DnsQuery_UTF8(host_cstr, u16(type), 0, nil, &rec, nil)
|
||||
res := win.DnsQuery_UTF8(host_cstr, u16(type), options, nil, &rec, nil)
|
||||
|
||||
switch u32(res) {
|
||||
case 0:
|
||||
|
||||
@@ -34,23 +34,12 @@ any_socket_to_socket :: proc "contextless" (socket: Any_Socket) -> Socket {
|
||||
Expects both hostname and port to be present in the `hostname_and_port` parameter, either as:
|
||||
`a.host.name:9999`, or as `1.2.3.4:9999`, or IP6 equivalent.
|
||||
|
||||
Calls `parse_hostname_or_endpoint` and `resolve`, then `dial_tcp_from_endpoint`.
|
||||
Calls `parse_hostname_or_endpoint` and `dial_tcp_from_host_or_endpoint`.
|
||||
*/
|
||||
dial_tcp_from_hostname_and_port_string :: proc(hostname_and_port: string, options := default_tcp_options) -> (socket: TCP_Socket, err: Network_Error) {
|
||||
target := parse_hostname_or_endpoint(hostname_and_port) or_return
|
||||
switch t in target {
|
||||
case Endpoint:
|
||||
return dial_tcp_from_endpoint(t, options)
|
||||
case Host:
|
||||
if t.port == 0 {
|
||||
return 0, .Port_Required
|
||||
}
|
||||
ep4, ep6 := resolve(t.hostname) or_return
|
||||
ep := ep4 if ep4.address != nil else ep6 // NOTE(tetra): We don't know what family the server uses, so we just default to IP4.
|
||||
ep.port = t.port
|
||||
return dial_tcp_from_endpoint(ep, options)
|
||||
}
|
||||
unreachable()
|
||||
|
||||
return dial_tcp_from_host_or_endpoint(target, options)
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -61,17 +50,39 @@ dial_tcp_from_hostname_and_port_string :: proc(hostname_and_port: string, option
|
||||
*/
|
||||
dial_tcp_from_hostname_with_port_override :: proc(hostname: string, port: int, options := default_tcp_options) -> (socket: TCP_Socket, err: Network_Error) {
|
||||
target := parse_hostname_or_endpoint(hostname) or_return
|
||||
switch &t in target {
|
||||
case Endpoint:
|
||||
t.port = port
|
||||
case Host:
|
||||
t.port = port
|
||||
}
|
||||
|
||||
return dial_tcp_from_host_or_endpoint(target, options)
|
||||
}
|
||||
|
||||
/*
|
||||
Expects the `host` as Host.
|
||||
*/
|
||||
dial_tcp_from_host :: proc(host: Host, options := default_tcp_options) -> (socket: TCP_Socket, err: Network_Error) {
|
||||
if host.port == 0 {
|
||||
return 0, .Port_Required
|
||||
}
|
||||
ep4, ep6 := resolve(host.hostname) or_return
|
||||
ep := ep4 if ep4.address != nil else ep6 // NOTE(tetra): We don't know what family the server uses, so we just default to IP4.
|
||||
ep.port = host.port
|
||||
return dial_tcp_from_endpoint(ep, options)
|
||||
}
|
||||
|
||||
/*
|
||||
Expects the `target` as a Host_OrEndpoint.
|
||||
Unwraps the underlying type and calls `dial_tcp_from_host` or `dial_tcp_from_endpoint`.
|
||||
*/
|
||||
dial_tcp_from_host_or_endpoint :: proc(target: Host_Or_Endpoint, options := default_tcp_options) -> (socket: TCP_Socket, err: Network_Error) {
|
||||
switch t in target {
|
||||
case Endpoint:
|
||||
return dial_tcp_from_endpoint({t.address, port}, options)
|
||||
return dial_tcp_from_endpoint(t, options)
|
||||
case Host:
|
||||
if port == 0 {
|
||||
return 0, .Port_Required
|
||||
}
|
||||
ep4, ep6 := resolve(t.hostname) or_return
|
||||
ep := ep4 if ep4.address != nil else ep6 // NOTE(tetra): We don't know what family the server uses, so we just default to IP4.
|
||||
ep.port = port
|
||||
return dial_tcp_from_endpoint(ep, options)
|
||||
return dial_tcp_from_host(t, options)
|
||||
}
|
||||
unreachable()
|
||||
}
|
||||
@@ -90,6 +101,8 @@ dial_tcp :: proc{
|
||||
dial_tcp_from_address_and_port,
|
||||
dial_tcp_from_hostname_and_port_string,
|
||||
dial_tcp_from_hostname_with_port_override,
|
||||
dial_tcp_from_host,
|
||||
dial_tcp_from_host_or_endpoint,
|
||||
}
|
||||
|
||||
create_socket :: proc(family: Address_Family, protocol: Socket_Protocol) -> (socket: Any_Socket, err: Network_Error) {
|
||||
|
||||
@@ -35,6 +35,7 @@ Socket_Option :: enum c.int {
|
||||
Send_Buffer_Size = c.int(linux.Socket_Option.SNDBUF),
|
||||
Receive_Timeout = c.int(linux.Socket_Option.RCVTIMEO),
|
||||
Send_Timeout = c.int(linux.Socket_Option.SNDTIMEO),
|
||||
Broadcast = c.int(linux.Socket_Option.BROADCAST),
|
||||
}
|
||||
|
||||
// Wrappers and unwrappers for system-native types
|
||||
@@ -337,7 +338,8 @@ _set_option :: proc(sock: Any_Socket, option: Socket_Option, value: any, loc :=
|
||||
.Reuse_Address,
|
||||
.Keep_Alive,
|
||||
.Out_Of_Bounds_Data_Inline,
|
||||
.TCP_Nodelay:
|
||||
.TCP_Nodelay,
|
||||
.Broadcast:
|
||||
// TODO: verify whether these are options or not on Linux
|
||||
// .Broadcast, <-- yes
|
||||
// .Conditional_Accept,
|
||||
|
||||
@@ -80,8 +80,9 @@ _dial_tcp_from_endpoint :: proc(endpoint: Endpoint, options := default_tcp_optio
|
||||
sockaddr := _endpoint_to_sockaddr(endpoint)
|
||||
res := win.connect(win.SOCKET(socket), &sockaddr, size_of(sockaddr))
|
||||
if res < 0 {
|
||||
err = Dial_Error(win.WSAGetLastError())
|
||||
close(socket)
|
||||
return {}, Dial_Error(win.WSAGetLastError())
|
||||
return {}, err
|
||||
}
|
||||
|
||||
if options.no_delay {
|
||||
|
||||
@@ -432,10 +432,13 @@ Range_Stmt :: struct {
|
||||
reverse: bool,
|
||||
}
|
||||
|
||||
Inline_Range_Stmt :: struct {
|
||||
Inline_Range_Stmt :: Unroll_Range_Stmt
|
||||
|
||||
Unroll_Range_Stmt :: struct {
|
||||
using node: Stmt,
|
||||
label: ^Expr,
|
||||
inline_pos: tokenizer.Pos,
|
||||
unroll_pos: tokenizer.Pos,
|
||||
args: []^Expr,
|
||||
for_pos: tokenizer.Pos,
|
||||
val0: ^Expr,
|
||||
val1: ^Expr,
|
||||
|
||||
@@ -242,8 +242,9 @@ clone_node :: proc(node: ^Node) -> ^Node {
|
||||
r.vals = clone(r.vals)
|
||||
r.expr = clone(r.expr)
|
||||
r.body = clone(r.body)
|
||||
case ^Inline_Range_Stmt:
|
||||
case ^Unroll_Range_Stmt:
|
||||
r.label = clone(r.label)
|
||||
r.args = clone(r.args)
|
||||
r.val0 = clone(r.val0)
|
||||
r.val1 = clone(r.val1)
|
||||
r.expr = clone(r.expr)
|
||||
|
||||
@@ -17,6 +17,9 @@ Build_Kind :: struct {
|
||||
arch: runtime.Odin_Arch_Types,
|
||||
}
|
||||
|
||||
// empty build kind acts as a marker for separating multiple lines with build tags
|
||||
BUILD_KIND_NEWLINE_MARKER :: Build_Kind{}
|
||||
|
||||
File_Tags :: struct {
|
||||
build_project_name: [][]string,
|
||||
build: []Build_Kind,
|
||||
@@ -147,6 +150,11 @@ parse_file_tags :: proc(file: ast.File, allocator := context.allocator) -> (tags
|
||||
append(build_project_names, build_project_name_strings[index_start:])
|
||||
}
|
||||
case "build":
|
||||
|
||||
if len(build_kinds) > 0 {
|
||||
append(build_kinds, BUILD_KIND_NEWLINE_MARKER)
|
||||
}
|
||||
|
||||
kinds_loop: for {
|
||||
os_positive: runtime.Odin_OS_Types
|
||||
os_negative: runtime.Odin_OS_Types
|
||||
@@ -248,10 +256,20 @@ match_build_tags :: proc(file_tags: File_Tags, target: Build_Target) -> bool {
|
||||
project_name_correct ||= group_correct
|
||||
}
|
||||
|
||||
os_and_arch_correct := len(file_tags.build) == 0
|
||||
os_and_arch_correct := true
|
||||
|
||||
for kind in file_tags.build {
|
||||
os_and_arch_correct ||= target.os in kind.os && target.arch in kind.arch
|
||||
if len(file_tags.build) > 0 {
|
||||
os_and_arch_correct_line := false
|
||||
|
||||
for kind in file_tags.build {
|
||||
if kind == BUILD_KIND_NEWLINE_MARKER {
|
||||
os_and_arch_correct &&= os_and_arch_correct_line
|
||||
os_and_arch_correct_line = false
|
||||
} else {
|
||||
os_and_arch_correct_line ||= target.os in kind.os && target.arch in kind.arch
|
||||
}
|
||||
}
|
||||
os_and_arch_correct &&= os_and_arch_correct_line
|
||||
}
|
||||
|
||||
return !file_tags.ignore && project_name_correct && os_and_arch_correct
|
||||
|
||||
@@ -1262,11 +1262,49 @@ parse_foreign_decl :: proc(p: ^Parser) -> ^ast.Decl {
|
||||
|
||||
|
||||
parse_unrolled_for_loop :: proc(p: ^Parser, inline_tok: tokenizer.Token) -> ^ast.Stmt {
|
||||
for_tok := expect_token(p, .For)
|
||||
val0, val1: ^ast.Expr
|
||||
in_tok: tokenizer.Token
|
||||
expr: ^ast.Expr
|
||||
body: ^ast.Stmt
|
||||
args: [dynamic]^ast.Expr
|
||||
|
||||
if allow_token(p, .Open_Paren) {
|
||||
p.expr_level += 1
|
||||
if p.curr_tok.kind == .Close_Paren {
|
||||
error(p, p.curr_tok.pos, "#unroll expected at least 1 argument, got 0")
|
||||
} else {
|
||||
args = make([dynamic]^ast.Expr)
|
||||
for p.curr_tok.kind != .Close_Paren &&
|
||||
p.curr_tok.kind != .EOF {
|
||||
arg := parse_value(p)
|
||||
|
||||
if p.curr_tok.kind == .Eq {
|
||||
eq := expect_token(p, .Eq)
|
||||
if arg != nil {
|
||||
if _, ok := arg.derived.(^ast.Ident); !ok {
|
||||
error(p, arg.pos, "expected an identifier for 'key=value'")
|
||||
}
|
||||
}
|
||||
value := parse_value(p)
|
||||
fv := ast.new(ast.Field_Value, arg.pos, value)
|
||||
fv.field = arg
|
||||
fv.sep = eq.pos
|
||||
fv.value = value
|
||||
|
||||
arg = fv
|
||||
}
|
||||
|
||||
append(&args, arg)
|
||||
|
||||
allow_token(p, .Comma) or_break
|
||||
}
|
||||
}
|
||||
|
||||
p.expr_level -= 1
|
||||
_ = expect_token_after(p, .Close_Paren, "#unroll")
|
||||
}
|
||||
|
||||
for_tok := expect_token(p, .For)
|
||||
|
||||
bad_stmt := false
|
||||
|
||||
@@ -1309,7 +1347,8 @@ parse_unrolled_for_loop :: proc(p: ^Parser, inline_tok: tokenizer.Token) -> ^ast
|
||||
}
|
||||
|
||||
range_stmt := ast.new(ast.Inline_Range_Stmt, inline_tok.pos, body)
|
||||
range_stmt.inline_pos = inline_tok.pos
|
||||
range_stmt.unroll_pos = inline_tok.pos
|
||||
range_stmt.args = args[:]
|
||||
range_stmt.for_pos = for_tok.pos
|
||||
range_stmt.val0 = val0
|
||||
range_stmt.val1 = val1
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#+build darwin, linux, netbsd, freebsd, openbsd
|
||||
#+build darwin, linux, netbsd, freebsd, openbsd, haiku
|
||||
package os
|
||||
|
||||
import "core:strings"
|
||||
|
||||
@@ -20,7 +20,7 @@ read_directory :: proc(f: ^File, n: int, allocator: runtime.Allocator) -> (files
|
||||
|
||||
TEMP_ALLOCATOR_GUARD()
|
||||
|
||||
it := read_directory_iterator_create(f) or_return
|
||||
it := read_directory_iterator_create(f)
|
||||
defer _read_directory_iterator_destroy(&it)
|
||||
|
||||
dfi := make([dynamic]File_Info, 0, size, temp_allocator())
|
||||
@@ -34,9 +34,14 @@ read_directory :: proc(f: ^File, n: int, allocator: runtime.Allocator) -> (files
|
||||
if n > 0 && index == n {
|
||||
break
|
||||
}
|
||||
|
||||
_ = read_directory_iterator_error(&it) or_break
|
||||
|
||||
append(&dfi, file_info_clone(fi, allocator) or_return)
|
||||
}
|
||||
|
||||
_ = read_directory_iterator_error(&it) or_return
|
||||
|
||||
return slice.clone(dfi[:], allocator)
|
||||
}
|
||||
|
||||
@@ -61,22 +66,129 @@ read_all_directory_by_path :: proc(path: string, allocator: runtime.Allocator) -
|
||||
|
||||
|
||||
Read_Directory_Iterator :: struct {
|
||||
f: ^File,
|
||||
f: ^File,
|
||||
err: struct {
|
||||
err: Error,
|
||||
path: [dynamic]byte,
|
||||
},
|
||||
index: int,
|
||||
impl: Read_Directory_Iterator_Impl,
|
||||
}
|
||||
|
||||
/*
|
||||
Creates a directory iterator with the given directory.
|
||||
|
||||
@(require_results)
|
||||
read_directory_iterator_create :: proc(f: ^File) -> (Read_Directory_Iterator, Error) {
|
||||
return _read_directory_iterator_create(f)
|
||||
For an example on how to use the iterator, see `read_directory_iterator`.
|
||||
*/
|
||||
read_directory_iterator_create :: proc(f: ^File) -> (it: Read_Directory_Iterator) {
|
||||
read_directory_iterator_init(&it, f)
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
Initialize a directory iterator with the given directory.
|
||||
|
||||
This procedure may be called on an existing iterator to reuse it for another directory.
|
||||
|
||||
For an example on how to use the iterator, see `read_directory_iterator`.
|
||||
*/
|
||||
read_directory_iterator_init :: proc(it: ^Read_Directory_Iterator, f: ^File) {
|
||||
it.err.err = nil
|
||||
it.err.path.allocator = file_allocator()
|
||||
clear(&it.err.path)
|
||||
|
||||
it.f = f
|
||||
it.index = 0
|
||||
|
||||
_read_directory_iterator_init(it, f)
|
||||
}
|
||||
|
||||
/*
|
||||
Destroys a directory iterator.
|
||||
*/
|
||||
read_directory_iterator_destroy :: proc(it: ^Read_Directory_Iterator) {
|
||||
if it == nil {
|
||||
return
|
||||
}
|
||||
|
||||
delete(it.err.path)
|
||||
|
||||
_read_directory_iterator_destroy(it)
|
||||
}
|
||||
|
||||
// NOTE(bill): `File_Info` does not need to deleted on each iteration. Any copies must be manually copied with `file_info_clone`
|
||||
/*
|
||||
Retrieve the last error that happened during iteration.
|
||||
*/
|
||||
@(require_results)
|
||||
read_directory_iterator_error :: proc(it: ^Read_Directory_Iterator) -> (path: string, err: Error) {
|
||||
return string(it.err.path[:]), it.err.err
|
||||
}
|
||||
|
||||
@(private)
|
||||
read_directory_iterator_set_error :: proc(it: ^Read_Directory_Iterator, path: string, err: Error) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
resize(&it.err.path, len(path))
|
||||
copy(it.err.path[:], path)
|
||||
|
||||
it.err.err = err
|
||||
}
|
||||
|
||||
/*
|
||||
Returns the next file info entry for the iterator's directory.
|
||||
|
||||
The given `File_Info` is reused in subsequent calls so a copy (`file_info_clone`) has to be made to
|
||||
extend its lifetime.
|
||||
|
||||
Example:
|
||||
package main
|
||||
|
||||
import "core:fmt"
|
||||
import os "core:os/os2"
|
||||
|
||||
main :: proc() {
|
||||
f, oerr := os.open("core")
|
||||
ensure(oerr == nil)
|
||||
defer os.close(f)
|
||||
|
||||
it := os.read_directory_iterator_create(f)
|
||||
defer os.read_directory_iterator_destroy(&it)
|
||||
|
||||
for info in os.read_directory_iterator(&it) {
|
||||
// Optionally break on the first error:
|
||||
// Supports not doing this, and keeping it going with remaining items.
|
||||
// _ = os.read_directory_iterator_error(&it) or_break
|
||||
|
||||
// Handle error as we go:
|
||||
// Again, no need to do this as it will keep going with remaining items.
|
||||
if path, err := os.read_directory_iterator_error(&it); err != nil {
|
||||
fmt.eprintfln("failed reading %s: %s", path, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Or, do not handle errors during iteration, and just check the error at the end.
|
||||
|
||||
|
||||
fmt.printfln("%#v", info)
|
||||
}
|
||||
|
||||
// Handle error if one happened during iteration at the end:
|
||||
if path, err := os.read_directory_iterator_error(&it); err != nil {
|
||||
fmt.eprintfln("read directory failed at %s: %s", path, err)
|
||||
}
|
||||
}
|
||||
*/
|
||||
@(require_results)
|
||||
read_directory_iterator :: proc(it: ^Read_Directory_Iterator) -> (fi: File_Info, index: int, ok: bool) {
|
||||
if it.f == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if it.index == 0 && it.err.err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return _read_directory_iterator(it)
|
||||
}
|
||||
|
||||
@@ -1,20 +1,119 @@
|
||||
#+private
|
||||
package os2
|
||||
|
||||
import "core:sys/linux"
|
||||
|
||||
Read_Directory_Iterator_Impl :: struct {
|
||||
|
||||
prev_fi: File_Info,
|
||||
dirent_backing: []u8,
|
||||
dirent_buflen: int,
|
||||
dirent_off: int,
|
||||
}
|
||||
|
||||
|
||||
@(require_results)
|
||||
_read_directory_iterator :: proc(it: ^Read_Directory_Iterator) -> (fi: File_Info, index: int, ok: bool) {
|
||||
scan_entries :: proc(it: ^Read_Directory_Iterator, dfd: linux.Fd, entries: []u8, offset: ^int) -> (fd: linux.Fd, file_name: string) {
|
||||
for d in linux.dirent_iterate_buf(entries, offset) {
|
||||
file_name = linux.dirent_name(d)
|
||||
if file_name == "." || file_name == ".." {
|
||||
continue
|
||||
}
|
||||
|
||||
file_name_cstr := cstring(raw_data(file_name))
|
||||
entry_fd, errno := linux.openat(dfd, file_name_cstr, {.NOFOLLOW, .PATH})
|
||||
if errno == .NONE {
|
||||
return entry_fd, file_name
|
||||
} else {
|
||||
read_directory_iterator_set_error(it, file_name, _get_platform_error(errno))
|
||||
}
|
||||
}
|
||||
|
||||
return -1, ""
|
||||
}
|
||||
|
||||
index = it.index
|
||||
it.index += 1
|
||||
|
||||
dfd := linux.Fd(_fd(it.f))
|
||||
|
||||
entries := it.impl.dirent_backing[:it.impl.dirent_buflen]
|
||||
entry_fd, file_name := scan_entries(it, dfd, entries, &it.impl.dirent_off)
|
||||
|
||||
for entry_fd == -1 {
|
||||
if len(it.impl.dirent_backing) == 0 {
|
||||
it.impl.dirent_backing = make([]u8, 512, file_allocator())
|
||||
}
|
||||
|
||||
loop: for {
|
||||
buflen, errno := linux.getdents(linux.Fd(dfd), it.impl.dirent_backing[:])
|
||||
#partial switch errno {
|
||||
case .EINVAL:
|
||||
delete(it.impl.dirent_backing, file_allocator())
|
||||
n := len(it.impl.dirent_backing) * 2
|
||||
it.impl.dirent_backing = make([]u8, n, file_allocator())
|
||||
continue
|
||||
case .NONE:
|
||||
if buflen == 0 {
|
||||
return
|
||||
}
|
||||
it.impl.dirent_off = 0
|
||||
it.impl.dirent_buflen = buflen
|
||||
entries = it.impl.dirent_backing[:buflen]
|
||||
break loop
|
||||
case:
|
||||
read_directory_iterator_set_error(it, name(it.f), _get_platform_error(errno))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
entry_fd, file_name = scan_entries(it, dfd, entries, &it.impl.dirent_off)
|
||||
}
|
||||
defer linux.close(entry_fd)
|
||||
|
||||
// PERF: reuse the fullpath string like on posix and wasi.
|
||||
file_info_delete(it.impl.prev_fi, file_allocator())
|
||||
|
||||
err: Error
|
||||
fi, err = _fstat_internal(entry_fd, file_allocator())
|
||||
it.impl.prev_fi = fi
|
||||
|
||||
if err != nil {
|
||||
path, _ := _get_full_path(entry_fd, temp_allocator())
|
||||
read_directory_iterator_set_error(it, path, err)
|
||||
}
|
||||
|
||||
ok = true
|
||||
return
|
||||
}
|
||||
|
||||
@(require_results)
|
||||
_read_directory_iterator_create :: proc(f: ^File) -> (Read_Directory_Iterator, Error) {
|
||||
return {}, .Unsupported
|
||||
_read_directory_iterator_init :: proc(it: ^Read_Directory_Iterator, f: ^File) {
|
||||
// NOTE: Allow calling `init` to target a new directory with the same iterator.
|
||||
it.impl.dirent_buflen = 0
|
||||
it.impl.dirent_off = 0
|
||||
|
||||
if f == nil || f.impl == nil {
|
||||
read_directory_iterator_set_error(it, "", .Invalid_File)
|
||||
return
|
||||
}
|
||||
|
||||
stat: linux.Stat
|
||||
errno := linux.fstat(linux.Fd(fd(f)), &stat)
|
||||
if errno != .NONE {
|
||||
read_directory_iterator_set_error(it, name(f), _get_platform_error(errno))
|
||||
return
|
||||
}
|
||||
|
||||
if (stat.mode & linux.S_IFMT) != linux.S_IFDIR {
|
||||
read_directory_iterator_set_error(it, name(f), .Invalid_Dir)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
_read_directory_iterator_destroy :: proc(it: ^Read_Directory_Iterator) {
|
||||
if it == nil {
|
||||
return
|
||||
}
|
||||
|
||||
delete(it.impl.dirent_backing, file_allocator())
|
||||
file_info_delete(it.impl.prev_fi, file_allocator())
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import "core:sys/posix"
|
||||
|
||||
Read_Directory_Iterator_Impl :: struct {
|
||||
dir: posix.DIR,
|
||||
idx: int,
|
||||
fullpath: [dynamic]byte,
|
||||
}
|
||||
|
||||
@@ -14,14 +13,16 @@ Read_Directory_Iterator_Impl :: struct {
|
||||
_read_directory_iterator :: proc(it: ^Read_Directory_Iterator) -> (fi: File_Info, index: int, ok: bool) {
|
||||
fimpl := (^File_Impl)(it.f.impl)
|
||||
|
||||
index = it.impl.idx
|
||||
it.impl.idx += 1
|
||||
index = it.index
|
||||
it.index += 1
|
||||
|
||||
for {
|
||||
posix.set_errno(nil)
|
||||
entry := posix.readdir(it.impl.dir)
|
||||
if entry == nil {
|
||||
// NOTE(laytan): would be good to have an `error` field on the `Read_Directory_Iterator`
|
||||
// There isn't a way to now know if it failed or if we are at the end.
|
||||
if errno := posix.errno(); errno != nil {
|
||||
read_directory_iterator_set_error(it, name(it.f), _get_platform_error(errno))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -31,16 +32,20 @@ _read_directory_iterator :: proc(it: ^Read_Directory_Iterator) -> (fi: File_Info
|
||||
}
|
||||
sname := string(cname)
|
||||
|
||||
stat: posix.stat_t
|
||||
if posix.fstatat(posix.dirfd(it.impl.dir), cname, &stat, { .SYMLINK_NOFOLLOW }) != .OK {
|
||||
// NOTE(laytan): would be good to have an `error` field on the `Read_Directory_Iterator`
|
||||
// There isn't a way to now know if it failed or if we are at the end.
|
||||
n := len(fimpl.name)+1
|
||||
if err := non_zero_resize(&it.impl.fullpath, n+len(sname)); err != nil {
|
||||
read_directory_iterator_set_error(it, sname, err)
|
||||
ok = true
|
||||
return
|
||||
}
|
||||
copy(it.impl.fullpath[n:], sname)
|
||||
|
||||
n := len(fimpl.name)+1
|
||||
non_zero_resize(&it.impl.fullpath, n+len(sname))
|
||||
n += copy(it.impl.fullpath[n:], sname)
|
||||
stat: posix.stat_t
|
||||
if posix.fstatat(posix.dirfd(it.impl.dir), cname, &stat, { .SYMLINK_NOFOLLOW }) != .OK {
|
||||
read_directory_iterator_set_error(it, string(it.impl.fullpath[:]), _get_platform_error())
|
||||
ok = true
|
||||
return
|
||||
}
|
||||
|
||||
fi = internal_stat(stat, string(it.impl.fullpath[:]))
|
||||
ok = true
|
||||
@@ -48,34 +53,41 @@ _read_directory_iterator :: proc(it: ^Read_Directory_Iterator) -> (fi: File_Info
|
||||
}
|
||||
}
|
||||
|
||||
@(require_results)
|
||||
_read_directory_iterator_create :: proc(f: ^File) -> (iter: Read_Directory_Iterator, err: Error) {
|
||||
_read_directory_iterator_init :: proc(it: ^Read_Directory_Iterator, f: ^File) {
|
||||
if f == nil || f.impl == nil {
|
||||
err = .Invalid_File
|
||||
read_directory_iterator_set_error(it, "", .Invalid_File)
|
||||
return
|
||||
}
|
||||
|
||||
impl := (^File_Impl)(f.impl)
|
||||
|
||||
iter.f = f
|
||||
iter.impl.idx = 0
|
||||
// NOTE: Allow calling `init` to target a new directory with the same iterator.
|
||||
it.impl.fullpath.allocator = file_allocator()
|
||||
clear(&it.impl.fullpath)
|
||||
if err := reserve(&it.impl.fullpath, len(impl.name)+128); err != nil {
|
||||
read_directory_iterator_set_error(it, name(f), err)
|
||||
return
|
||||
}
|
||||
|
||||
iter.impl.fullpath.allocator = file_allocator()
|
||||
append(&iter.impl.fullpath, impl.name)
|
||||
append(&iter.impl.fullpath, "/")
|
||||
defer if err != nil { delete(iter.impl.fullpath) }
|
||||
append(&it.impl.fullpath, impl.name)
|
||||
append(&it.impl.fullpath, "/")
|
||||
|
||||
// `fdopendir` consumes the file descriptor so we need to `dup` it.
|
||||
dupfd := posix.dup(impl.fd)
|
||||
if dupfd == -1 {
|
||||
err = _get_platform_error()
|
||||
read_directory_iterator_set_error(it, name(f), _get_platform_error())
|
||||
return
|
||||
}
|
||||
defer if err != nil { posix.close(dupfd) }
|
||||
defer if it.err.err != nil { posix.close(dupfd) }
|
||||
|
||||
iter.impl.dir = posix.fdopendir(dupfd)
|
||||
if iter.impl.dir == nil {
|
||||
err = _get_platform_error()
|
||||
// NOTE: Allow calling `init` to target a new directory with the same iterator.
|
||||
if it.impl.dir != nil {
|
||||
posix.closedir(it.impl.dir)
|
||||
}
|
||||
|
||||
it.impl.dir = posix.fdopendir(dupfd)
|
||||
if it.impl.dir == nil {
|
||||
read_directory_iterator_set_error(it, name(f), _get_platform_error())
|
||||
return
|
||||
}
|
||||
|
||||
@@ -83,7 +95,7 @@ _read_directory_iterator_create :: proc(f: ^File) -> (iter: Read_Directory_Itera
|
||||
}
|
||||
|
||||
_read_directory_iterator_destroy :: proc(it: ^Read_Directory_Iterator) {
|
||||
if it == nil || it.impl.dir == nil {
|
||||
if it.impl.dir == nil {
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
230
core/os/os2/dir_walker.odin
Normal file
230
core/os/os2/dir_walker.odin
Normal file
@@ -0,0 +1,230 @@
|
||||
package os2
|
||||
|
||||
import "core:container/queue"
|
||||
|
||||
/*
|
||||
A recursive directory walker.
|
||||
|
||||
Note that none of the fields should be accessed directly.
|
||||
*/
|
||||
Walker :: struct {
|
||||
todo: queue.Queue(string),
|
||||
skip_dir: bool,
|
||||
err: struct {
|
||||
path: [dynamic]byte,
|
||||
err: Error,
|
||||
},
|
||||
iter: Read_Directory_Iterator,
|
||||
}
|
||||
|
||||
walker_init_path :: proc(w: ^Walker, path: string) {
|
||||
cloned_path, err := clone_string(path, file_allocator())
|
||||
if err != nil {
|
||||
walker_set_error(w, path, err)
|
||||
return
|
||||
}
|
||||
|
||||
walker_clear(w)
|
||||
|
||||
if _, err = queue.push(&w.todo, cloned_path); err != nil {
|
||||
walker_set_error(w, cloned_path, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
walker_init_file :: proc(w: ^Walker, f: ^File) {
|
||||
handle, err := clone(f)
|
||||
if err != nil {
|
||||
path, _ := clone_string(name(f), file_allocator())
|
||||
walker_set_error(w, path, err)
|
||||
return
|
||||
}
|
||||
|
||||
walker_clear(w)
|
||||
|
||||
read_directory_iterator_init(&w.iter, handle)
|
||||
}
|
||||
|
||||
/*
|
||||
Initializes a walker, either using a path or a file pointer to a directory the walker will start at.
|
||||
|
||||
You are allowed to repeatedly call this to reuse it for later walks.
|
||||
|
||||
For an example on how to use the walker, see `walker_walk`.
|
||||
*/
|
||||
walker_init :: proc {
|
||||
walker_init_path,
|
||||
walker_init_file,
|
||||
}
|
||||
|
||||
@(require_results)
|
||||
walker_create_path :: proc(path: string) -> (w: Walker) {
|
||||
walker_init_path(&w, path)
|
||||
return
|
||||
}
|
||||
|
||||
@(require_results)
|
||||
walker_create_file :: proc(f: ^File) -> (w: Walker) {
|
||||
walker_init_file(&w, f)
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
Creates a walker, either using a path or a file pointer to a directory the walker will start at.
|
||||
|
||||
For an example on how to use the walker, see `walker_walk`.
|
||||
*/
|
||||
walker_create :: proc {
|
||||
walker_create_path,
|
||||
walker_create_file,
|
||||
}
|
||||
|
||||
/*
|
||||
Returns the last error that occurred during the walker's operations.
|
||||
|
||||
Can be called while iterating, or only at the end to check if anything failed.
|
||||
*/
|
||||
@(require_results)
|
||||
walker_error :: proc(w: ^Walker) -> (path: string, err: Error) {
|
||||
return string(w.err.path[:]), w.err.err
|
||||
}
|
||||
|
||||
@(private)
|
||||
walker_set_error :: proc(w: ^Walker, path: string, err: Error) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
resize(&w.err.path, len(path))
|
||||
copy(w.err.path[:], path)
|
||||
|
||||
w.err.err = err
|
||||
}
|
||||
|
||||
@(private)
|
||||
walker_clear :: proc(w: ^Walker) {
|
||||
w.iter.f = nil
|
||||
w.skip_dir = false
|
||||
|
||||
w.err.path.allocator = file_allocator()
|
||||
clear(&w.err.path)
|
||||
|
||||
w.todo.data.allocator = file_allocator()
|
||||
for path in queue.pop_front_safe(&w.todo) {
|
||||
delete(path, file_allocator())
|
||||
}
|
||||
}
|
||||
|
||||
walker_destroy :: proc(w: ^Walker) {
|
||||
walker_clear(w)
|
||||
queue.destroy(&w.todo)
|
||||
delete(w.err.path)
|
||||
read_directory_iterator_destroy(&w.iter)
|
||||
}
|
||||
|
||||
// Marks the current directory to be skipped (not entered into).
|
||||
walker_skip_dir :: proc(w: ^Walker) {
|
||||
w.skip_dir = true
|
||||
}
|
||||
|
||||
/*
|
||||
Returns the next file info in the iterator, files are iterated in breadth-first order.
|
||||
|
||||
If an error occurred opening a directory, you may get zero'd info struct and
|
||||
`walker_error` will return the error.
|
||||
|
||||
Example:
|
||||
package main
|
||||
|
||||
import "core:fmt"
|
||||
import "core:strings"
|
||||
import os "core:os/os2"
|
||||
|
||||
main :: proc() {
|
||||
w := os.walker_create("core")
|
||||
defer os.walker_destroy(&w)
|
||||
|
||||
for info in os.walker_walk(&w) {
|
||||
// Optionally break on the first error:
|
||||
// _ = walker_error(&w) or_break
|
||||
|
||||
// Or, handle error as we go:
|
||||
if path, err := os.walker_error(&w); err != nil {
|
||||
fmt.eprintfln("failed walking %s: %s", path, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Or, do not handle errors during iteration, and just check the error at the end.
|
||||
|
||||
|
||||
|
||||
// Skip a directory:
|
||||
if strings.has_suffix(info.fullpath, ".git") {
|
||||
os.walker_skip_dir(&w)
|
||||
continue
|
||||
}
|
||||
|
||||
fmt.printfln("%#v", info)
|
||||
}
|
||||
|
||||
// Handle error if one happened during iteration at the end:
|
||||
if path, err := os.walker_error(&w); err != nil {
|
||||
fmt.eprintfln("failed walking %s: %v", path, err)
|
||||
}
|
||||
}
|
||||
*/
|
||||
@(require_results)
|
||||
walker_walk :: proc(w: ^Walker) -> (fi: File_Info, ok: bool) {
|
||||
if w.skip_dir {
|
||||
w.skip_dir = false
|
||||
if skip, sok := queue.pop_back_safe(&w.todo); sok {
|
||||
delete(skip, file_allocator())
|
||||
}
|
||||
}
|
||||
|
||||
if w.iter.f == nil {
|
||||
if queue.len(w.todo) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
next := queue.pop_front(&w.todo)
|
||||
|
||||
handle, err := open(next)
|
||||
if err != nil {
|
||||
walker_set_error(w, next, err)
|
||||
return {}, true
|
||||
}
|
||||
|
||||
read_directory_iterator_init(&w.iter, handle)
|
||||
|
||||
delete(next, file_allocator())
|
||||
}
|
||||
|
||||
info, _, iter_ok := read_directory_iterator(&w.iter)
|
||||
|
||||
if path, err := read_directory_iterator_error(&w.iter); err != nil {
|
||||
walker_set_error(w, path, err)
|
||||
}
|
||||
|
||||
if !iter_ok {
|
||||
close(w.iter.f)
|
||||
w.iter.f = nil
|
||||
return walker_walk(w)
|
||||
}
|
||||
|
||||
if info.type == .Directory {
|
||||
path, err := clone_string(info.fullpath, file_allocator())
|
||||
if err != nil {
|
||||
walker_set_error(w, "", err)
|
||||
return
|
||||
}
|
||||
|
||||
_, err = queue.push_back(&w.todo, path)
|
||||
if err != nil {
|
||||
walker_set_error(w, path, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return info, iter_ok
|
||||
}
|
||||
124
core/os/os2/dir_wasi.odin
Normal file
124
core/os/os2/dir_wasi.odin
Normal file
@@ -0,0 +1,124 @@
|
||||
#+private
|
||||
package os2
|
||||
|
||||
import "base:runtime"
|
||||
import "core:slice"
|
||||
import "base:intrinsics"
|
||||
import "core:sys/wasm/wasi"
|
||||
|
||||
Read_Directory_Iterator_Impl :: struct {
|
||||
fullpath: [dynamic]byte,
|
||||
buf: []byte,
|
||||
off: int,
|
||||
}
|
||||
|
||||
@(require_results)
|
||||
_read_directory_iterator :: proc(it: ^Read_Directory_Iterator) -> (fi: File_Info, index: int, ok: bool) {
|
||||
fimpl := (^File_Impl)(it.f.impl)
|
||||
|
||||
buf := it.impl.buf[it.impl.off:]
|
||||
|
||||
index = it.index
|
||||
it.index += 1
|
||||
|
||||
for {
|
||||
if len(buf) < size_of(wasi.dirent_t) {
|
||||
return
|
||||
}
|
||||
|
||||
entry := intrinsics.unaligned_load((^wasi.dirent_t)(raw_data(buf)))
|
||||
buf = buf[size_of(wasi.dirent_t):]
|
||||
|
||||
assert(len(buf) < int(entry.d_namlen))
|
||||
|
||||
name := string(buf[:entry.d_namlen])
|
||||
buf = buf[entry.d_namlen:]
|
||||
it.impl.off += size_of(wasi.dirent_t) + int(entry.d_namlen)
|
||||
|
||||
if name == "." || name == ".." {
|
||||
continue
|
||||
}
|
||||
|
||||
n := len(fimpl.name)+1
|
||||
if alloc_err := non_zero_resize(&it.impl.fullpath, n+len(name)); alloc_err != nil {
|
||||
read_directory_iterator_set_error(it, name, alloc_err)
|
||||
ok = true
|
||||
return
|
||||
}
|
||||
copy(it.impl.fullpath[n:], name)
|
||||
|
||||
stat, err := wasi.path_filestat_get(__fd(it.f), {}, name)
|
||||
if err != nil {
|
||||
// Can't stat, fill what we have from dirent.
|
||||
stat = {
|
||||
ino = entry.d_ino,
|
||||
filetype = entry.d_type,
|
||||
}
|
||||
read_directory_iterator_set_error(it, string(it.impl.fullpath[:]), _get_platform_error(err))
|
||||
}
|
||||
|
||||
fi = internal_stat(stat, string(it.impl.fullpath[:]))
|
||||
ok = true
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
_read_directory_iterator_init :: proc(it: ^Read_Directory_Iterator, f: ^File) {
|
||||
// NOTE: Allow calling `init` to target a new directory with the same iterator.
|
||||
it.impl.off = 0
|
||||
|
||||
if f == nil || f.impl == nil {
|
||||
read_directory_iterator_set_error(it, "", .Invalid_File)
|
||||
return
|
||||
}
|
||||
|
||||
impl := (^File_Impl)(f.impl)
|
||||
|
||||
buf: [dynamic]byte
|
||||
// NOTE: Allow calling `init` to target a new directory with the same iterator.
|
||||
if it.impl.buf != nil {
|
||||
buf = slice.into_dynamic(it.impl.buf)
|
||||
}
|
||||
buf.allocator = file_allocator()
|
||||
|
||||
defer if it.err.err != nil { delete(buf) }
|
||||
|
||||
for {
|
||||
if err := non_zero_resize(&buf, 512 if len(buf) == 0 else len(buf)*2); err != nil {
|
||||
read_directory_iterator_set_error(it, name(f), err)
|
||||
return
|
||||
}
|
||||
|
||||
n, err := wasi.fd_readdir(__fd(f), buf[:], 0)
|
||||
if err != nil {
|
||||
read_directory_iterator_set_error(it, name(f), _get_platform_error(err))
|
||||
return
|
||||
}
|
||||
|
||||
if n < len(buf) {
|
||||
non_zero_resize(&buf, n)
|
||||
break
|
||||
}
|
||||
|
||||
assert(n == len(buf))
|
||||
}
|
||||
it.impl.buf = buf[:]
|
||||
|
||||
// NOTE: Allow calling `init` to target a new directory with the same iterator.
|
||||
it.impl.fullpath.allocator = file_allocator()
|
||||
clear(&it.impl.fullpath)
|
||||
if err := reserve(&it.impl.fullpath, len(impl.name)+128); err != nil {
|
||||
read_directory_iterator_set_error(it, name(f), err)
|
||||
return
|
||||
}
|
||||
|
||||
append(&it.impl.fullpath, impl.name)
|
||||
append(&it.impl.fullpath, "/")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
_read_directory_iterator_destroy :: proc(it: ^Read_Directory_Iterator) {
|
||||
delete(it.impl.buf, file_allocator())
|
||||
delete(it.impl.fullpath)
|
||||
}
|
||||
@@ -44,16 +44,11 @@ Read_Directory_Iterator_Impl :: struct {
|
||||
path: string,
|
||||
prev_fi: File_Info,
|
||||
no_more_files: bool,
|
||||
index: int,
|
||||
}
|
||||
|
||||
|
||||
@(require_results)
|
||||
_read_directory_iterator :: proc(it: ^Read_Directory_Iterator) -> (fi: File_Info, index: int, ok: bool) {
|
||||
if it.f == nil {
|
||||
return
|
||||
}
|
||||
|
||||
TEMP_ALLOCATOR_GUARD()
|
||||
|
||||
for !it.impl.no_more_files {
|
||||
@@ -63,19 +58,21 @@ _read_directory_iterator :: proc(it: ^Read_Directory_Iterator) -> (fi: File_Info
|
||||
|
||||
fi, err = find_data_to_file_info(it.impl.path, &it.impl.find_data, file_allocator())
|
||||
if err != nil {
|
||||
read_directory_iterator_set_error(it, it.impl.path, err)
|
||||
return
|
||||
}
|
||||
|
||||
if fi.name != "" {
|
||||
it.impl.prev_fi = fi
|
||||
ok = true
|
||||
index = it.impl.index
|
||||
it.impl.index += 1
|
||||
index = it.index
|
||||
it.index += 1
|
||||
}
|
||||
|
||||
if !win32.FindNextFileW(it.impl.find_handle, &it.impl.find_data) {
|
||||
e := _get_platform_error()
|
||||
if pe, _ := is_platform_error(e); pe == i32(win32.ERROR_NO_MORE_FILES) {
|
||||
it.impl.no_more_files = true
|
||||
if pe, _ := is_platform_error(e); pe != i32(win32.ERROR_NO_MORE_FILES) {
|
||||
read_directory_iterator_set_error(it, it.impl.path, e)
|
||||
}
|
||||
it.impl.no_more_files = true
|
||||
}
|
||||
@@ -86,16 +83,27 @@ _read_directory_iterator :: proc(it: ^Read_Directory_Iterator) -> (fi: File_Info
|
||||
return
|
||||
}
|
||||
|
||||
@(require_results)
|
||||
_read_directory_iterator_create :: proc(f: ^File) -> (it: Read_Directory_Iterator, err: Error) {
|
||||
if f == nil {
|
||||
_read_directory_iterator_init :: proc(it: ^Read_Directory_Iterator, f: ^File) {
|
||||
it.impl.no_more_files = false
|
||||
|
||||
if f == nil || f.impl == nil {
|
||||
read_directory_iterator_set_error(it, "", .Invalid_File)
|
||||
return
|
||||
}
|
||||
|
||||
it.f = f
|
||||
impl := (^File_Impl)(f.impl)
|
||||
|
||||
// NOTE: Allow calling `init` to target a new directory with the same iterator - reset idx.
|
||||
if it.impl.find_handle != nil {
|
||||
win32.FindClose(it.impl.find_handle)
|
||||
}
|
||||
if it.impl.path != "" {
|
||||
delete(it.impl.path, file_allocator())
|
||||
}
|
||||
|
||||
if !is_directory(impl.name) {
|
||||
err = .Invalid_Dir
|
||||
read_directory_iterator_set_error(it, impl.name, .Invalid_Dir)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -118,14 +126,19 @@ _read_directory_iterator_create :: proc(f: ^File) -> (it: Read_Directory_Iterato
|
||||
|
||||
it.impl.find_handle = win32.FindFirstFileW(raw_data(wpath_search), &it.impl.find_data)
|
||||
if it.impl.find_handle == win32.INVALID_HANDLE_VALUE {
|
||||
err = _get_platform_error()
|
||||
read_directory_iterator_set_error(it, impl.name, _get_platform_error())
|
||||
return
|
||||
}
|
||||
defer if err != nil {
|
||||
defer if it.err.err != nil {
|
||||
win32.FindClose(it.impl.find_handle)
|
||||
}
|
||||
|
||||
it.impl.path = _cleanpath_from_buf(wpath, file_allocator()) or_return
|
||||
err: Error
|
||||
it.impl.path, err = _cleanpath_from_buf(wpath, file_allocator())
|
||||
if err != nil {
|
||||
read_directory_iterator_set_error(it, impl.name, err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -22,8 +22,8 @@ lookup_env :: proc(key: string, allocator: runtime.Allocator) -> (value: string,
|
||||
}
|
||||
|
||||
// set_env sets the value of the environment variable named by the key
|
||||
// Returns true on success, false on failure
|
||||
set_env :: proc(key, value: string) -> bool {
|
||||
// Returns Error on failure
|
||||
set_env :: proc(key, value: string) -> Error {
|
||||
return _set_env(key, value)
|
||||
}
|
||||
|
||||
@@ -41,7 +41,7 @@ clear_env :: proc() {
|
||||
// environ returns a copy of strings representing the environment, in the form "key=value"
|
||||
// NOTE: the slice of strings and the strings with be allocated using the supplied allocator
|
||||
@(require_results)
|
||||
environ :: proc(allocator: runtime.Allocator) -> []string {
|
||||
environ :: proc(allocator: runtime.Allocator) -> ([]string, Error) {
|
||||
return _environ(allocator)
|
||||
}
|
||||
|
||||
|
||||
@@ -54,7 +54,7 @@ _lookup_env :: proc(key: string, allocator: runtime.Allocator) -> (value: string
|
||||
return
|
||||
}
|
||||
|
||||
_set_env :: proc(key, v_new: string) -> bool {
|
||||
_set_env :: proc(key, v_new: string) -> Error {
|
||||
if _org_env_begin == 0 {
|
||||
_build_env()
|
||||
}
|
||||
@@ -63,7 +63,7 @@ _set_env :: proc(key, v_new: string) -> bool {
|
||||
kv_size := len(key) + len(v_new) + 2
|
||||
if v_curr, idx := _lookup(key); idx != NOT_FOUND {
|
||||
if v_curr == v_new {
|
||||
return true
|
||||
return nil
|
||||
}
|
||||
sync.mutex_lock(&_env_mutex)
|
||||
defer sync.mutex_unlock(&_env_mutex)
|
||||
@@ -76,9 +76,9 @@ _set_env :: proc(key, v_new: string) -> bool {
|
||||
// wasn't in the environment in the first place.
|
||||
k_addr, v_addr := _kv_addr_from_val(v_curr, key)
|
||||
if len(v_new) > len(v_curr) {
|
||||
k_addr = ([^]u8)(heap_resize(k_addr, kv_size))
|
||||
k_addr = ([^]u8)(runtime.heap_resize(k_addr, kv_size))
|
||||
if k_addr == nil {
|
||||
return false
|
||||
return .Out_Of_Memory
|
||||
}
|
||||
v_addr = &k_addr[len(key) + 1]
|
||||
}
|
||||
@@ -86,13 +86,13 @@ _set_env :: proc(key, v_new: string) -> bool {
|
||||
v_addr[len(v_new)] = 0
|
||||
|
||||
append(&_env, string(k_addr[:kv_size]))
|
||||
return true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
k_addr := ([^]u8)(heap_alloc(kv_size))
|
||||
k_addr := ([^]u8)(runtime.heap_alloc(kv_size))
|
||||
if k_addr == nil {
|
||||
return false
|
||||
return .Out_Of_Memory
|
||||
}
|
||||
intrinsics.mem_copy_non_overlapping(k_addr, raw_data(key), len(key))
|
||||
k_addr[len(key)] = '='
|
||||
@@ -104,7 +104,7 @@ _set_env :: proc(key, v_new: string) -> bool {
|
||||
sync.mutex_lock(&_env_mutex)
|
||||
append(&_env, string(k_addr[:kv_size - 1]))
|
||||
sync.mutex_unlock(&_env_mutex)
|
||||
return true
|
||||
return nil
|
||||
}
|
||||
|
||||
_unset_env :: proc(key: string) -> bool {
|
||||
@@ -129,7 +129,7 @@ _unset_env :: proc(key: string) -> bool {
|
||||
// if we got this far, the envrionment variable
|
||||
// existed AND was allocated by us.
|
||||
k_addr, _ := _kv_addr_from_val(v, key)
|
||||
heap_free(k_addr)
|
||||
runtime.heap_free(k_addr)
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -139,7 +139,7 @@ _clear_env :: proc() {
|
||||
|
||||
for kv in _env {
|
||||
if !_is_in_org_env(kv) {
|
||||
heap_free(raw_data(kv))
|
||||
runtime.heap_free(raw_data(kv))
|
||||
}
|
||||
}
|
||||
clear(&_env)
|
||||
@@ -149,18 +149,26 @@ _clear_env :: proc() {
|
||||
_org_env_end = ~uintptr(0)
|
||||
}
|
||||
|
||||
_environ :: proc(allocator: runtime.Allocator) -> []string {
|
||||
_environ :: proc(allocator: runtime.Allocator) -> (environ: []string, err: Error) {
|
||||
if _org_env_begin == 0 {
|
||||
_build_env()
|
||||
}
|
||||
env := make([]string, len(_env), allocator)
|
||||
env := make([dynamic]string, 0, len(_env), allocator) or_return
|
||||
defer if err != nil {
|
||||
for e in env {
|
||||
delete(e, allocator)
|
||||
}
|
||||
delete(env)
|
||||
}
|
||||
|
||||
sync.mutex_lock(&_env_mutex)
|
||||
defer sync.mutex_unlock(&_env_mutex)
|
||||
for entry, i in _env {
|
||||
env[i], _ = clone_string(entry, allocator)
|
||||
for entry in _env {
|
||||
s := clone_string(entry, allocator) or_return
|
||||
append(&env, s)
|
||||
}
|
||||
return env
|
||||
environ = env[:]
|
||||
return
|
||||
}
|
||||
|
||||
// The entire environment is stored as 0 terminated strings,
|
||||
@@ -193,7 +201,7 @@ _build_env :: proc() {
|
||||
return
|
||||
}
|
||||
|
||||
_env = make(type_of(_env), heap_allocator())
|
||||
_env = make(type_of(_env), runtime.heap_allocator())
|
||||
cstring_env := _get_original_env()
|
||||
_org_env_begin = uintptr(rawptr(cstring_env[0]))
|
||||
for i := 0; cstring_env[i] != nil; i += 1 {
|
||||
|
||||
@@ -26,13 +26,15 @@ _lookup_env :: proc(key: string, allocator: runtime.Allocator) -> (value: string
|
||||
return
|
||||
}
|
||||
|
||||
_set_env :: proc(key, value: string) -> (ok: bool) {
|
||||
_set_env :: proc(key, value: string) -> (err: Error) {
|
||||
TEMP_ALLOCATOR_GUARD()
|
||||
|
||||
ckey := strings.clone_to_cstring(key, temp_allocator())
|
||||
cval := strings.clone_to_cstring(key, temp_allocator())
|
||||
ckey := strings.clone_to_cstring(key, temp_allocator()) or_return
|
||||
cval := strings.clone_to_cstring(key, temp_allocator()) or_return
|
||||
|
||||
ok = posix.setenv(ckey, cval, true) == .OK
|
||||
if posix.setenv(ckey, cval, true) != nil {
|
||||
err = _get_platform_error_from_errno()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -54,23 +56,23 @@ _clear_env :: proc() {
|
||||
}
|
||||
}
|
||||
|
||||
_environ :: proc(allocator: runtime.Allocator) -> (environ: []string) {
|
||||
_environ :: proc(allocator: runtime.Allocator) -> (environ: []string, err: Error) {
|
||||
n := 0
|
||||
for entry := posix.environ[0]; entry != nil; n, entry = n+1, posix.environ[n] {}
|
||||
|
||||
err: runtime.Allocator_Error
|
||||
if environ, err = make([]string, n, allocator); err != nil {
|
||||
// NOTE(laytan): is the environment empty or did allocation fail, how does the user know?
|
||||
return
|
||||
r := make([dynamic]string, 0, n, allocator) or_return
|
||||
defer if err != nil {
|
||||
for e in r {
|
||||
delete(e, allocator)
|
||||
}
|
||||
delete(r)
|
||||
}
|
||||
|
||||
for i, entry := 0, posix.environ[0]; entry != nil; i, entry = i+1, posix.environ[i] {
|
||||
if environ[i], err = strings.clone(string(entry), allocator); err != nil {
|
||||
// NOTE(laytan): is the entire environment returned or did allocation fail, how does the user know?
|
||||
return
|
||||
}
|
||||
append(&r, strings.clone(string(entry), allocator) or_return)
|
||||
}
|
||||
|
||||
environ = r[:]
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
159
core/os/os2/env_wasi.odin
Normal file
159
core/os/os2/env_wasi.odin
Normal file
@@ -0,0 +1,159 @@
|
||||
#+private
|
||||
package os2
|
||||
|
||||
import "base:runtime"
|
||||
|
||||
import "core:strings"
|
||||
import "core:sync"
|
||||
import "core:sys/wasm/wasi"
|
||||
|
||||
g_env: map[string]string
|
||||
g_env_buf: []byte
|
||||
g_env_mutex: sync.RW_Mutex
|
||||
g_env_error: Error
|
||||
g_env_built: bool
|
||||
|
||||
build_env :: proc() -> (err: Error) {
|
||||
if g_env_built || g_env_error != nil {
|
||||
return g_env_error
|
||||
}
|
||||
|
||||
sync.guard(&g_env_mutex)
|
||||
|
||||
if g_env_built || g_env_error != nil {
|
||||
return g_env_error
|
||||
}
|
||||
|
||||
defer if err != nil {
|
||||
g_env_error = err
|
||||
}
|
||||
|
||||
num_envs, size_of_envs, _err := wasi.environ_sizes_get()
|
||||
if _err != nil {
|
||||
return _get_platform_error(_err)
|
||||
}
|
||||
|
||||
g_env = make(map[string]string, num_envs, file_allocator()) or_return
|
||||
defer if err != nil { delete(g_env) }
|
||||
|
||||
g_env_buf = make([]byte, size_of_envs, file_allocator()) or_return
|
||||
defer if err != nil { delete(g_env_buf, file_allocator()) }
|
||||
|
||||
TEMP_ALLOCATOR_GUARD()
|
||||
|
||||
envs := make([]cstring, num_envs, temp_allocator()) or_return
|
||||
|
||||
_err = wasi.environ_get(raw_data(envs), raw_data(g_env_buf))
|
||||
if _err != nil {
|
||||
return _get_platform_error(_err)
|
||||
}
|
||||
|
||||
for env in envs {
|
||||
key, _, value := strings.partition(string(env), "=")
|
||||
g_env[key] = value
|
||||
}
|
||||
|
||||
g_env_built = true
|
||||
return
|
||||
}
|
||||
|
||||
delete_string_if_not_original :: proc(str: string) {
|
||||
start := uintptr(raw_data(g_env_buf))
|
||||
end := start + uintptr(len(g_env_buf))
|
||||
ptr := uintptr(raw_data(str))
|
||||
if ptr < start || ptr > end {
|
||||
delete(str, file_allocator())
|
||||
}
|
||||
}
|
||||
|
||||
@(require_results)
|
||||
_lookup_env :: proc(key: string, allocator: runtime.Allocator) -> (value: string, found: bool) {
|
||||
if err := build_env(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
sync.shared_guard(&g_env_mutex)
|
||||
|
||||
value = g_env[key] or_return
|
||||
value, _ = clone_string(value, allocator)
|
||||
return
|
||||
}
|
||||
|
||||
@(require_results)
|
||||
_set_env :: proc(key, value: string) -> (err: Error) {
|
||||
build_env() or_return
|
||||
|
||||
sync.guard(&g_env_mutex)
|
||||
|
||||
defer if err != nil {
|
||||
delete_key(&g_env, key)
|
||||
}
|
||||
|
||||
key_ptr, value_ptr, just_inserted := map_entry(&g_env, key) or_return
|
||||
|
||||
if just_inserted {
|
||||
key_ptr^ = clone_string(key, file_allocator()) or_return
|
||||
defer if err != nil {
|
||||
delete(key_ptr^, file_allocator())
|
||||
}
|
||||
value_ptr^ = clone_string(value, file_allocator()) or_return
|
||||
return
|
||||
}
|
||||
|
||||
delete_string_if_not_original(value_ptr^)
|
||||
|
||||
value_ptr^ = clone_string(value, file_allocator()) or_return
|
||||
return
|
||||
}
|
||||
|
||||
@(require_results)
|
||||
_unset_env :: proc(key: string) -> bool {
|
||||
if err := build_env(); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
sync.guard(&g_env_mutex)
|
||||
|
||||
dkey, dval := delete_key(&g_env, key)
|
||||
delete_string_if_not_original(dkey)
|
||||
delete_string_if_not_original(dval)
|
||||
return true
|
||||
}
|
||||
|
||||
_clear_env :: proc() {
|
||||
sync.guard(&g_env_mutex)
|
||||
|
||||
for k, v in g_env {
|
||||
delete_string_if_not_original(k)
|
||||
delete_string_if_not_original(v)
|
||||
}
|
||||
|
||||
delete(g_env_buf, file_allocator())
|
||||
g_env_buf = {}
|
||||
|
||||
clear(&g_env)
|
||||
|
||||
g_env_built = true
|
||||
}
|
||||
|
||||
@(require_results)
|
||||
_environ :: proc(allocator: runtime.Allocator) -> (environ: []string, err: Error) {
|
||||
build_env() or_return
|
||||
|
||||
sync.shared_guard(&g_env_mutex)
|
||||
|
||||
envs := make([dynamic]string, 0, len(g_env), allocator) or_return
|
||||
defer if err != nil {
|
||||
for env in envs {
|
||||
delete(env, allocator)
|
||||
}
|
||||
delete(envs)
|
||||
}
|
||||
|
||||
for k, v in g_env {
|
||||
append(&envs, concatenate({k, "=", v}, allocator) or_return)
|
||||
}
|
||||
|
||||
environ = envs[:]
|
||||
return
|
||||
}
|
||||
@@ -36,12 +36,15 @@ _lookup_env :: proc(key: string, allocator: runtime.Allocator) -> (value: string
|
||||
return
|
||||
}
|
||||
|
||||
_set_env :: proc(key, value: string) -> bool {
|
||||
_set_env :: proc(key, value: string) -> Error {
|
||||
TEMP_ALLOCATOR_GUARD()
|
||||
k, _ := win32_utf8_to_wstring(key, temp_allocator())
|
||||
v, _ := win32_utf8_to_wstring(value, temp_allocator())
|
||||
k := win32_utf8_to_wstring(key, temp_allocator()) or_return
|
||||
v := win32_utf8_to_wstring(value, temp_allocator()) or_return
|
||||
|
||||
return bool(win32.SetEnvironmentVariableW(k, v))
|
||||
if !win32.SetEnvironmentVariableW(k, v) {
|
||||
return _get_platform_error()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
_unset_env :: proc(key: string) -> bool {
|
||||
@@ -52,7 +55,7 @@ _unset_env :: proc(key: string) -> bool {
|
||||
|
||||
_clear_env :: proc() {
|
||||
TEMP_ALLOCATOR_GUARD()
|
||||
envs := environ(temp_allocator())
|
||||
envs, _ := environ(temp_allocator())
|
||||
for env in envs {
|
||||
for j in 1..<len(env) {
|
||||
if env[j] == '=' {
|
||||
@@ -63,10 +66,10 @@ _clear_env :: proc() {
|
||||
}
|
||||
}
|
||||
|
||||
_environ :: proc(allocator: runtime.Allocator) -> []string {
|
||||
_environ :: proc(allocator: runtime.Allocator) -> (environ: []string, err: Error) {
|
||||
envs := win32.GetEnvironmentStringsW()
|
||||
if envs == nil {
|
||||
return nil
|
||||
return
|
||||
}
|
||||
defer win32.FreeEnvironmentStringsW(envs)
|
||||
|
||||
@@ -82,7 +85,13 @@ _environ :: proc(allocator: runtime.Allocator) -> []string {
|
||||
}
|
||||
}
|
||||
|
||||
r := make([dynamic]string, 0, n, allocator)
|
||||
r := make([dynamic]string, 0, n, allocator) or_return
|
||||
defer if err != nil {
|
||||
for e in r {
|
||||
delete(e, allocator)
|
||||
}
|
||||
delete(r)
|
||||
}
|
||||
for from, i, p := 0, 0, envs; true; i += 1 {
|
||||
c := ([^]u16)(p)[i]
|
||||
if c == 0 {
|
||||
@@ -90,12 +99,14 @@ _environ :: proc(allocator: runtime.Allocator) -> []string {
|
||||
break
|
||||
}
|
||||
w := ([^]u16)(p)[from:i]
|
||||
append(&r, win32_utf16_to_utf8(w, allocator) or_else "")
|
||||
s := win32_utf16_to_utf8(w, allocator) or_return
|
||||
append(&r, s)
|
||||
from = i + 1
|
||||
}
|
||||
}
|
||||
|
||||
return r[:]
|
||||
environ = r[:]
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -10,8 +10,12 @@ _error_string :: proc(errno: i32) -> string {
|
||||
return string(posix.strerror(posix.Errno(errno)))
|
||||
}
|
||||
|
||||
_get_platform_error :: proc() -> Error {
|
||||
#partial switch errno := posix.errno(); errno {
|
||||
_get_platform_error_from_errno :: proc() -> Error {
|
||||
return _get_platform_error_existing(posix.errno())
|
||||
}
|
||||
|
||||
_get_platform_error_existing :: proc(errno: posix.Errno) -> Error {
|
||||
#partial switch errno {
|
||||
case .EPERM:
|
||||
return .Permission_Denied
|
||||
case .EEXIST:
|
||||
@@ -32,3 +36,8 @@ _get_platform_error :: proc() -> Error {
|
||||
return Platform_Error(errno)
|
||||
}
|
||||
}
|
||||
|
||||
_get_platform_error :: proc{
|
||||
_get_platform_error_existing,
|
||||
_get_platform_error_from_errno,
|
||||
}
|
||||
|
||||
47
core/os/os2/errors_wasi.odin
Normal file
47
core/os/os2/errors_wasi.odin
Normal file
@@ -0,0 +1,47 @@
|
||||
#+private
|
||||
package os2
|
||||
|
||||
import "base:runtime"
|
||||
|
||||
import "core:slice"
|
||||
import "core:sys/wasm/wasi"
|
||||
|
||||
_Platform_Error :: wasi.errno_t
|
||||
|
||||
_error_string :: proc(errno: i32) -> string {
|
||||
e := wasi.errno_t(errno)
|
||||
if e == .NONE {
|
||||
return ""
|
||||
}
|
||||
|
||||
err := runtime.Type_Info_Enum_Value(e)
|
||||
|
||||
ti := &runtime.type_info_base(type_info_of(wasi.errno_t)).variant.(runtime.Type_Info_Enum)
|
||||
if idx, ok := slice.binary_search(ti.values, err); ok {
|
||||
return ti.names[idx]
|
||||
}
|
||||
return "<unknown platform error>"
|
||||
}
|
||||
|
||||
_get_platform_error :: proc(errno: wasi.errno_t) -> Error {
|
||||
#partial switch errno {
|
||||
case .PERM:
|
||||
return .Permission_Denied
|
||||
case .EXIST:
|
||||
return .Exist
|
||||
case .NOENT:
|
||||
return .Not_Exist
|
||||
case .TIMEDOUT:
|
||||
return .Timeout
|
||||
case .PIPE:
|
||||
return .Broken_Pipe
|
||||
case .BADF:
|
||||
return .Invalid_File
|
||||
case .NOMEM:
|
||||
return .Out_Of_Memory
|
||||
case .NOSYS:
|
||||
return .Unsupported
|
||||
case:
|
||||
return Platform_Error(errno)
|
||||
}
|
||||
}
|
||||
@@ -115,13 +115,18 @@ open :: proc(name: string, flags := File_Flags{.Read}, perm := 0o777) -> (^File,
|
||||
|
||||
@(require_results)
|
||||
new_file :: proc(handle: uintptr, name: string) -> ^File {
|
||||
file, err := _new_file(handle, name)
|
||||
file, err := _new_file(handle, name, file_allocator())
|
||||
if err != nil {
|
||||
panic(error_string(err))
|
||||
}
|
||||
return file
|
||||
}
|
||||
|
||||
@(require_results)
|
||||
clone :: proc(f: ^File) -> (^File, Error) {
|
||||
return _clone(f)
|
||||
}
|
||||
|
||||
@(require_results)
|
||||
fd :: proc(f: ^File) -> uintptr {
|
||||
return _fd(f)
|
||||
|
||||
@@ -7,6 +7,13 @@ import "core:time"
|
||||
import "core:sync"
|
||||
import "core:sys/linux"
|
||||
|
||||
// Most implementations will EINVAL at some point when doing big writes.
|
||||
// In practice a read/write call would probably never read/write these big buffers all at once,
|
||||
// which is why the number of bytes is returned and why there are procs that will call this in a
|
||||
// loop for you.
|
||||
// We set a max of 1GB to keep alignment and to be safe.
|
||||
MAX_RW :: 1 << 30
|
||||
|
||||
File_Impl :: struct {
|
||||
file: File,
|
||||
name: string,
|
||||
@@ -39,37 +46,23 @@ _stderr := File{
|
||||
|
||||
@init
|
||||
_standard_stream_init :: proc() {
|
||||
@static stdin_impl := File_Impl {
|
||||
name = "/proc/self/fd/0",
|
||||
fd = 0,
|
||||
new_std :: proc(impl: ^File_Impl, fd: linux.Fd, name: string) -> ^File {
|
||||
impl.file.impl = impl
|
||||
impl.fd = linux.Fd(fd)
|
||||
impl.allocator = runtime.nil_allocator()
|
||||
impl.name = name
|
||||
impl.file.stream = {
|
||||
data = impl,
|
||||
procedure = _file_stream_proc,
|
||||
}
|
||||
impl.file.fstat = _fstat
|
||||
return &impl.file
|
||||
}
|
||||
|
||||
@static stdout_impl := File_Impl {
|
||||
name = "/proc/self/fd/1",
|
||||
fd = 1,
|
||||
}
|
||||
|
||||
@static stderr_impl := File_Impl {
|
||||
name = "/proc/self/fd/2",
|
||||
fd = 2,
|
||||
}
|
||||
|
||||
stdin_impl.allocator = file_allocator()
|
||||
stdout_impl.allocator = file_allocator()
|
||||
stderr_impl.allocator = file_allocator()
|
||||
|
||||
_stdin.impl = &stdin_impl
|
||||
_stdout.impl = &stdout_impl
|
||||
_stderr.impl = &stderr_impl
|
||||
|
||||
// cannot define these initially because cyclic reference
|
||||
_stdin.stream.data = &stdin_impl
|
||||
_stdout.stream.data = &stdout_impl
|
||||
_stderr.stream.data = &stderr_impl
|
||||
|
||||
stdin = &_stdin
|
||||
stdout = &_stdout
|
||||
stderr = &_stderr
|
||||
@(static) files: [3]File_Impl
|
||||
stdin = new_std(&files[0], 0, "/proc/self/fd/0")
|
||||
stdout = new_std(&files[1], 1, "/proc/self/fd/1")
|
||||
stderr = new_std(&files[2], 2, "/proc/self/fd/2")
|
||||
}
|
||||
|
||||
_open :: proc(name: string, flags: File_Flags, perm: int) -> (f: ^File, err: Error) {
|
||||
@@ -80,6 +73,9 @@ _open :: proc(name: string, flags: File_Flags, perm: int) -> (f: ^File, err: Err
|
||||
// terminal would be incredibly rare. This has no effect on files while
|
||||
// allowing us to open serial devices.
|
||||
sys_flags: linux.Open_Flags = {.NOCTTY, .CLOEXEC}
|
||||
when size_of(rawptr) == 4 {
|
||||
sys_flags += {.LARGEFILE}
|
||||
}
|
||||
switch flags & (O_RDONLY|O_WRONLY|O_RDWR) {
|
||||
case O_RDONLY:
|
||||
case O_WRONLY: sys_flags += {.WRONLY}
|
||||
@@ -97,18 +93,18 @@ _open :: proc(name: string, flags: File_Flags, perm: int) -> (f: ^File, err: Err
|
||||
return nil, _get_platform_error(errno)
|
||||
}
|
||||
|
||||
return _new_file(uintptr(fd), name)
|
||||
return _new_file(uintptr(fd), name, file_allocator())
|
||||
}
|
||||
|
||||
_new_file :: proc(fd: uintptr, _: string = "") -> (f: ^File, err: Error) {
|
||||
impl := new(File_Impl, file_allocator()) or_return
|
||||
_new_file :: proc(fd: uintptr, _: string, allocator: runtime.Allocator) -> (f: ^File, err: Error) {
|
||||
impl := new(File_Impl, allocator) or_return
|
||||
defer if err != nil {
|
||||
free(impl, file_allocator())
|
||||
free(impl, allocator)
|
||||
}
|
||||
impl.file.impl = impl
|
||||
impl.fd = linux.Fd(fd)
|
||||
impl.allocator = file_allocator()
|
||||
impl.name = _get_full_path(impl.fd, file_allocator()) or_return
|
||||
impl.allocator = allocator
|
||||
impl.name = _get_full_path(impl.fd, impl.allocator) or_return
|
||||
impl.file.stream = {
|
||||
data = impl,
|
||||
procedure = _file_stream_proc,
|
||||
@@ -117,6 +113,23 @@ _new_file :: proc(fd: uintptr, _: string = "") -> (f: ^File, err: Error) {
|
||||
return &impl.file, nil
|
||||
}
|
||||
|
||||
_clone :: proc(f: ^File) -> (clone: ^File, err: Error) {
|
||||
if f == nil || f.impl == nil {
|
||||
return
|
||||
}
|
||||
|
||||
fd := (^File_Impl)(f.impl).fd
|
||||
|
||||
clonefd, errno := linux.dup(fd)
|
||||
if errno != nil {
|
||||
err = _get_platform_error(errno)
|
||||
return
|
||||
}
|
||||
defer if err != nil { linux.close(clonefd) }
|
||||
|
||||
return _new_file(uintptr(clonefd), "", file_allocator())
|
||||
}
|
||||
|
||||
|
||||
@(require_results)
|
||||
_open_buffered :: proc(name: string, buffer_size: uint, flags := File_Flags{.Read}, perm := 0o777) -> (f: ^File, err: Error) {
|
||||
@@ -190,10 +203,11 @@ _seek :: proc(f: ^File_Impl, offset: i64, whence: io.Seek_From) -> (ret: i64, er
|
||||
}
|
||||
|
||||
_read :: proc(f: ^File_Impl, p: []byte) -> (i64, Error) {
|
||||
if len(p) == 0 {
|
||||
if len(p) <= 0 {
|
||||
return 0, nil
|
||||
}
|
||||
n, errno := linux.read(f.fd, p[:])
|
||||
|
||||
n, errno := linux.read(f.fd, p[:min(len(p), MAX_RW)])
|
||||
if errno != .NONE {
|
||||
return -1, _get_platform_error(errno)
|
||||
}
|
||||
@@ -201,13 +215,13 @@ _read :: proc(f: ^File_Impl, p: []byte) -> (i64, Error) {
|
||||
}
|
||||
|
||||
_read_at :: proc(f: ^File_Impl, p: []byte, offset: i64) -> (i64, Error) {
|
||||
if len(p) == 0 {
|
||||
if len(p) <= 0 {
|
||||
return 0, nil
|
||||
}
|
||||
if offset < 0 {
|
||||
return 0, .Invalid_Offset
|
||||
}
|
||||
n, errno := linux.pread(f.fd, p[:], offset)
|
||||
n, errno := linux.pread(f.fd, p[:min(len(p), MAX_RW)], offset)
|
||||
if errno != .NONE {
|
||||
return -1, _get_platform_error(errno)
|
||||
}
|
||||
@@ -217,29 +231,42 @@ _read_at :: proc(f: ^File_Impl, p: []byte, offset: i64) -> (i64, Error) {
|
||||
return i64(n), nil
|
||||
}
|
||||
|
||||
_write :: proc(f: ^File_Impl, p: []byte) -> (i64, Error) {
|
||||
if len(p) == 0 {
|
||||
return 0, nil
|
||||
_write :: proc(f: ^File_Impl, p: []byte) -> (nt: i64, err: Error) {
|
||||
p := p
|
||||
for len(p) > 0 {
|
||||
n, errno := linux.write(f.fd, p[:min(len(p), MAX_RW)])
|
||||
if errno != .NONE {
|
||||
err = _get_platform_error(errno)
|
||||
return
|
||||
}
|
||||
|
||||
p = p[n:]
|
||||
nt += i64(n)
|
||||
}
|
||||
n, errno := linux.write(f.fd, p[:])
|
||||
if errno != .NONE {
|
||||
return -1, _get_platform_error(errno)
|
||||
}
|
||||
return i64(n), nil
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
_write_at :: proc(f: ^File_Impl, p: []byte, offset: i64) -> (i64, Error) {
|
||||
if len(p) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
_write_at :: proc(f: ^File_Impl, p: []byte, offset: i64) -> (nt: i64, err: Error) {
|
||||
if offset < 0 {
|
||||
return 0, .Invalid_Offset
|
||||
}
|
||||
n, errno := linux.pwrite(f.fd, p[:], offset)
|
||||
if errno != .NONE {
|
||||
return -1, _get_platform_error(errno)
|
||||
|
||||
p := p
|
||||
offset := offset
|
||||
for len(p) > 0 {
|
||||
n, errno := linux.pwrite(f.fd, p[:min(len(p), MAX_RW)], offset)
|
||||
if errno != .NONE {
|
||||
err = _get_platform_error(errno)
|
||||
return
|
||||
}
|
||||
|
||||
p = p[n:]
|
||||
nt += i64(n)
|
||||
offset += i64(n)
|
||||
}
|
||||
return i64(n), nil
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
_file_size :: proc(f: ^File_Impl) -> (n: i64, err: Error) {
|
||||
@@ -272,28 +299,12 @@ _truncate :: proc(f: ^File, size: i64) -> Error {
|
||||
}
|
||||
|
||||
_remove :: proc(name: string) -> Error {
|
||||
is_dir_fd :: proc(fd: linux.Fd) -> bool {
|
||||
s: linux.Stat
|
||||
if linux.fstat(fd, &s) != .NONE {
|
||||
return false
|
||||
}
|
||||
return linux.S_ISDIR(s.mode)
|
||||
}
|
||||
|
||||
TEMP_ALLOCATOR_GUARD()
|
||||
name_cstr := temp_cstring(name) or_return
|
||||
|
||||
fd, errno := linux.open(name_cstr, {.NOFOLLOW})
|
||||
#partial switch (errno) {
|
||||
case .ELOOP:
|
||||
/* symlink */
|
||||
case .NONE:
|
||||
defer linux.close(fd)
|
||||
if is_dir_fd(fd) {
|
||||
return _get_platform_error(linux.rmdir(name_cstr))
|
||||
}
|
||||
case:
|
||||
return _get_platform_error(errno)
|
||||
if fd, errno := linux.open(name_cstr, _OPENDIR_FLAGS + {.NOFOLLOW}); errno == .NONE {
|
||||
linux.close(fd)
|
||||
return _get_platform_error(linux.rmdir(name_cstr))
|
||||
}
|
||||
|
||||
return _get_platform_error(linux.unlink(name_cstr))
|
||||
|
||||
@@ -21,23 +21,29 @@ File_Impl :: struct {
|
||||
name: string,
|
||||
cname: cstring,
|
||||
fd: posix.FD,
|
||||
allocator: runtime.Allocator,
|
||||
}
|
||||
|
||||
@(init)
|
||||
init_std_files :: proc() {
|
||||
// NOTE: is this (paths) also the case on non darwin?
|
||||
new_std :: proc(impl: ^File_Impl, fd: posix.FD, name: cstring) -> ^File {
|
||||
impl.file.impl = impl
|
||||
impl.fd = fd
|
||||
impl.allocator = runtime.nil_allocator()
|
||||
impl.cname = name
|
||||
impl.name = string(name)
|
||||
impl.file.stream = {
|
||||
data = impl,
|
||||
procedure = _file_stream_proc,
|
||||
}
|
||||
impl.file.fstat = _fstat
|
||||
return &impl.file
|
||||
}
|
||||
|
||||
stdin = __new_file(posix.STDIN_FILENO)
|
||||
(^File_Impl)(stdin.impl).name = "/dev/stdin"
|
||||
(^File_Impl)(stdin.impl).cname = "/dev/stdin"
|
||||
|
||||
stdout = __new_file(posix.STDIN_FILENO)
|
||||
(^File_Impl)(stdout.impl).name = "/dev/stdout"
|
||||
(^File_Impl)(stdout.impl).cname = "/dev/stdout"
|
||||
|
||||
stderr = __new_file(posix.STDIN_FILENO)
|
||||
(^File_Impl)(stderr.impl).name = "/dev/stderr"
|
||||
(^File_Impl)(stderr.impl).cname = "/dev/stderr"
|
||||
@(static) files: [3]File_Impl
|
||||
stdin = new_std(&files[0], posix.STDIN_FILENO, "/dev/stdin")
|
||||
stdout = new_std(&files[1], posix.STDOUT_FILENO, "/dev/stdout")
|
||||
stderr = new_std(&files[2], posix.STDERR_FILENO, "/dev/stderr")
|
||||
}
|
||||
|
||||
_open :: proc(name: string, flags: File_Flags, perm: int) -> (f: ^File, err: Error) {
|
||||
@@ -72,10 +78,10 @@ _open :: proc(name: string, flags: File_Flags, perm: int) -> (f: ^File, err: Err
|
||||
return
|
||||
}
|
||||
|
||||
return _new_file(uintptr(fd), name)
|
||||
return _new_file(uintptr(fd), name, file_allocator())
|
||||
}
|
||||
|
||||
_new_file :: proc(handle: uintptr, name: string) -> (f: ^File, err: Error) {
|
||||
_new_file :: proc(handle: uintptr, name: string, allocator: runtime.Allocator) -> (f: ^File, err: Error) {
|
||||
if name == "" {
|
||||
err = .Invalid_Path
|
||||
return
|
||||
@@ -84,10 +90,10 @@ _new_file :: proc(handle: uintptr, name: string) -> (f: ^File, err: Error) {
|
||||
return
|
||||
}
|
||||
|
||||
crname := _posix_absolute_path(posix.FD(handle), name, file_allocator()) or_return
|
||||
crname := _posix_absolute_path(posix.FD(handle), name, allocator) or_return
|
||||
rname := string(crname)
|
||||
|
||||
f = __new_file(posix.FD(handle))
|
||||
f = __new_file(posix.FD(handle), allocator)
|
||||
impl := (^File_Impl)(f.impl)
|
||||
impl.name = rname
|
||||
impl.cname = crname
|
||||
@@ -95,10 +101,11 @@ _new_file :: proc(handle: uintptr, name: string) -> (f: ^File, err: Error) {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
__new_file :: proc(handle: posix.FD) -> ^File {
|
||||
impl := new(File_Impl, file_allocator())
|
||||
__new_file :: proc(handle: posix.FD, allocator: runtime.Allocator) -> ^File {
|
||||
impl := new(File_Impl, allocator)
|
||||
impl.file.impl = impl
|
||||
impl.fd = posix.FD(handle)
|
||||
impl.allocator = allocator
|
||||
impl.file.stream = {
|
||||
data = impl,
|
||||
procedure = _file_stream_proc,
|
||||
@@ -107,6 +114,29 @@ __new_file :: proc(handle: posix.FD) -> ^File {
|
||||
return &impl.file
|
||||
}
|
||||
|
||||
_clone :: proc(f: ^File) -> (clone: ^File, err: Error) {
|
||||
if f == nil || f.impl == nil {
|
||||
err = .Invalid_Pointer
|
||||
return
|
||||
}
|
||||
|
||||
impl := (^File_Impl)(f.impl)
|
||||
|
||||
fd := posix.dup(impl.fd)
|
||||
if fd <= 0 {
|
||||
err = _get_platform_error()
|
||||
return
|
||||
}
|
||||
defer if err != nil { posix.close(fd) }
|
||||
|
||||
clone = __new_file(fd, file_allocator())
|
||||
clone_impl := (^File_Impl)(clone.impl)
|
||||
clone_impl.cname = clone_to_cstring(impl.name, file_allocator()) or_return
|
||||
clone_impl.name = string(clone_impl.cname)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
_close :: proc(f: ^File_Impl) -> (err: Error) {
|
||||
if f == nil { return nil }
|
||||
|
||||
@@ -114,8 +144,10 @@ _close :: proc(f: ^File_Impl) -> (err: Error) {
|
||||
err = _get_platform_error()
|
||||
}
|
||||
|
||||
delete(f.cname, file_allocator())
|
||||
free(f, file_allocator())
|
||||
allocator := f.allocator
|
||||
|
||||
delete(f.cname, allocator)
|
||||
free(f, allocator)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
560
core/os/os2/file_wasi.odin
Normal file
560
core/os/os2/file_wasi.odin
Normal file
@@ -0,0 +1,560 @@
|
||||
#+private
|
||||
package os2
|
||||
|
||||
import "base:runtime"
|
||||
|
||||
import "core:io"
|
||||
import "core:sys/wasm/wasi"
|
||||
import "core:time"
|
||||
|
||||
// NOTE: Don't know if there is a max in wasi.
|
||||
MAX_RW :: 1 << 30
|
||||
|
||||
File_Impl :: struct {
|
||||
file: File,
|
||||
name: string,
|
||||
fd: wasi.fd_t,
|
||||
allocator: runtime.Allocator,
|
||||
}
|
||||
|
||||
// WASI works with "preopened" directories, the environment retrieves directories
|
||||
// (for example with `wasmtime --dir=. module.wasm`) and those given directories
|
||||
// are the only ones accessible by the application.
|
||||
//
|
||||
// So in order to facilitate the `os` API (absolute paths etc.) we keep a list
|
||||
// of the given directories and match them when needed (notably `os.open`).
|
||||
Preopen :: struct {
|
||||
fd: wasi.fd_t,
|
||||
prefix: string,
|
||||
}
|
||||
preopens: []Preopen
|
||||
|
||||
@(init)
|
||||
init_std_files :: proc() {
|
||||
new_std :: proc(impl: ^File_Impl, fd: wasi.fd_t, name: string) -> ^File {
|
||||
impl.file.impl = impl
|
||||
impl.allocator = runtime.nil_allocator()
|
||||
impl.fd = fd
|
||||
impl.name = string(name)
|
||||
impl.file.stream = {
|
||||
data = impl,
|
||||
procedure = _file_stream_proc,
|
||||
}
|
||||
impl.file.fstat = _fstat
|
||||
return &impl.file
|
||||
}
|
||||
|
||||
@(static) files: [3]File_Impl
|
||||
stdin = new_std(&files[0], 0, "/dev/stdin")
|
||||
stdout = new_std(&files[1], 1, "/dev/stdout")
|
||||
stderr = new_std(&files[2], 2, "/dev/stderr")
|
||||
}
|
||||
|
||||
@(init)
|
||||
init_preopens :: proc() {
|
||||
strip_prefixes :: proc(path: string) -> string {
|
||||
path := path
|
||||
loop: for len(path) > 0 {
|
||||
switch {
|
||||
case path[0] == '/':
|
||||
path = path[1:]
|
||||
case len(path) > 2 && path[0] == '.' && path[1] == '/':
|
||||
path = path[2:]
|
||||
case len(path) == 1 && path[0] == '.':
|
||||
path = path[1:]
|
||||
case:
|
||||
break loop
|
||||
}
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
n: int
|
||||
n_loop: for fd := wasi.fd_t(3); ; fd += 1 {
|
||||
_, err := wasi.fd_prestat_get(fd)
|
||||
#partial switch err {
|
||||
case .BADF: break n_loop
|
||||
case .SUCCESS: n += 1
|
||||
case:
|
||||
print_error(stderr, _get_platform_error(err), "unexpected error from wasi_prestat_get")
|
||||
break n_loop
|
||||
}
|
||||
}
|
||||
|
||||
alloc_err: runtime.Allocator_Error
|
||||
preopens, alloc_err = make([]Preopen, n, file_allocator())
|
||||
if alloc_err != nil {
|
||||
print_error(stderr, alloc_err, "could not allocate memory for wasi preopens")
|
||||
return
|
||||
}
|
||||
|
||||
loop: for &preopen, i in preopens {
|
||||
fd := wasi.fd_t(3 + i)
|
||||
|
||||
desc, err := wasi.fd_prestat_get(fd)
|
||||
assert(err == .SUCCESS)
|
||||
|
||||
switch desc.tag {
|
||||
case .DIR:
|
||||
buf: []byte
|
||||
buf, alloc_err = make([]byte, desc.dir.pr_name_len, file_allocator())
|
||||
if alloc_err != nil {
|
||||
print_error(stderr, alloc_err, "could not allocate memory for wasi preopen dir name")
|
||||
continue loop
|
||||
}
|
||||
|
||||
if err = wasi.fd_prestat_dir_name(fd, buf); err != .SUCCESS {
|
||||
print_error(stderr, _get_platform_error(err), "could not get filesystem preopen dir name")
|
||||
continue loop
|
||||
}
|
||||
|
||||
preopen.fd = fd
|
||||
preopen.prefix = strip_prefixes(string(buf))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@(require_results)
|
||||
match_preopen :: proc(path: string) -> (wasi.fd_t, string, bool) {
|
||||
@(require_results)
|
||||
prefix_matches :: proc(prefix, path: string) -> bool {
|
||||
// Empty is valid for any relative path.
|
||||
if len(prefix) == 0 && len(path) > 0 && path[0] != '/' {
|
||||
return true
|
||||
}
|
||||
|
||||
if len(path) < len(prefix) {
|
||||
return false
|
||||
}
|
||||
|
||||
if path[:len(prefix)] != prefix {
|
||||
return false
|
||||
}
|
||||
|
||||
// Only match on full components.
|
||||
i := len(prefix)
|
||||
for i > 0 && prefix[i-1] == '/' {
|
||||
i -= 1
|
||||
}
|
||||
return path[i] == '/'
|
||||
}
|
||||
|
||||
path := path
|
||||
if path == "" {
|
||||
return 0, "", false
|
||||
}
|
||||
|
||||
for len(path) > 0 && path[0] == '/' {
|
||||
path = path[1:]
|
||||
}
|
||||
|
||||
match: Preopen
|
||||
#reverse for preopen in preopens {
|
||||
if (match.fd == 0 || len(preopen.prefix) > len(match.prefix)) && prefix_matches(preopen.prefix, path) {
|
||||
match = preopen
|
||||
}
|
||||
}
|
||||
|
||||
if match.fd == 0 {
|
||||
return 0, "", false
|
||||
}
|
||||
|
||||
relative := path[len(match.prefix):]
|
||||
for len(relative) > 0 && relative[0] == '/' {
|
||||
relative = relative[1:]
|
||||
}
|
||||
|
||||
if len(relative) == 0 {
|
||||
relative = "."
|
||||
}
|
||||
|
||||
return match.fd, relative, true
|
||||
}
|
||||
|
||||
_open :: proc(name: string, flags: File_Flags, perm: int) -> (f: ^File, err: Error) {
|
||||
dir_fd, relative, ok := match_preopen(name)
|
||||
if !ok {
|
||||
return nil, .Invalid_Path
|
||||
}
|
||||
|
||||
oflags: wasi.oflags_t
|
||||
if .Create in flags { oflags += {.CREATE} }
|
||||
if .Excl in flags { oflags += {.EXCL} }
|
||||
if .Trunc in flags { oflags += {.TRUNC} }
|
||||
|
||||
fdflags: wasi.fdflags_t
|
||||
if .Append in flags { fdflags += {.APPEND} }
|
||||
if .Sync in flags { fdflags += {.SYNC} }
|
||||
|
||||
// NOTE: rights are adjusted to what this package's functions might want to call.
|
||||
rights: wasi.rights_t
|
||||
if .Read in flags { rights += {.FD_READ, .FD_FILESTAT_GET, .PATH_FILESTAT_GET} }
|
||||
if .Write in flags { rights += {.FD_WRITE, .FD_SYNC, .FD_FILESTAT_SET_SIZE, .FD_FILESTAT_SET_TIMES, .FD_SEEK} }
|
||||
|
||||
fd, fderr := wasi.path_open(dir_fd, {.SYMLINK_FOLLOW}, relative, oflags, rights, {}, fdflags)
|
||||
if fderr != nil {
|
||||
err = _get_platform_error(fderr)
|
||||
return
|
||||
}
|
||||
|
||||
return _new_file(uintptr(fd), name, file_allocator())
|
||||
}
|
||||
|
||||
_new_file :: proc(handle: uintptr, name: string, allocator: runtime.Allocator) -> (f: ^File, err: Error) {
|
||||
if name == "" {
|
||||
err = .Invalid_Path
|
||||
return
|
||||
}
|
||||
|
||||
impl := new(File_Impl, allocator) or_return
|
||||
defer if err != nil { free(impl, allocator) }
|
||||
|
||||
impl.allocator = allocator
|
||||
// NOTE: wasi doesn't really do full paths afact.
|
||||
impl.name = clone_string(name, allocator) or_return
|
||||
impl.fd = wasi.fd_t(handle)
|
||||
impl.file.impl = impl
|
||||
impl.file.stream = {
|
||||
data = impl,
|
||||
procedure = _file_stream_proc,
|
||||
}
|
||||
impl.file.fstat = _fstat
|
||||
|
||||
return &impl.file, nil
|
||||
}
|
||||
|
||||
_clone :: proc(f: ^File) -> (clone: ^File, err: Error) {
|
||||
if f == nil || f.impl == nil {
|
||||
return
|
||||
}
|
||||
|
||||
dir_fd, relative, ok := match_preopen(name(f))
|
||||
if !ok {
|
||||
return nil, .Invalid_Path
|
||||
}
|
||||
|
||||
fd, fderr := wasi.path_open(dir_fd, {.SYMLINK_FOLLOW}, relative, {}, {}, {}, {})
|
||||
if fderr != nil {
|
||||
err = _get_platform_error(fderr)
|
||||
return
|
||||
}
|
||||
defer if err != nil { wasi.fd_close(fd) }
|
||||
|
||||
fderr = wasi.fd_renumber((^File_Impl)(f.impl).fd, fd)
|
||||
if fderr != nil {
|
||||
err = _get_platform_error(fderr)
|
||||
return
|
||||
}
|
||||
|
||||
return _new_file(uintptr(fd), name(f), file_allocator())
|
||||
}
|
||||
|
||||
_close :: proc(f: ^File_Impl) -> (err: Error) {
|
||||
if errno := wasi.fd_close(f.fd); errno != nil {
|
||||
err = _get_platform_error(errno)
|
||||
}
|
||||
|
||||
delete(f.name, f.allocator)
|
||||
free(f, f.allocator)
|
||||
return
|
||||
}
|
||||
|
||||
_fd :: proc(f: ^File) -> uintptr {
|
||||
return uintptr(__fd(f))
|
||||
}
|
||||
|
||||
__fd :: proc(f: ^File) -> wasi.fd_t {
|
||||
if f != nil && f.impl != nil {
|
||||
return (^File_Impl)(f.impl).fd
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
_name :: proc(f: ^File) -> string {
|
||||
if f != nil && f.impl != nil {
|
||||
return (^File_Impl)(f.impl).name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
_sync :: proc(f: ^File) -> Error {
|
||||
return _get_platform_error(wasi.fd_sync(__fd(f)))
|
||||
}
|
||||
|
||||
_truncate :: proc(f: ^File, size: i64) -> Error {
|
||||
return _get_platform_error(wasi.fd_filestat_set_size(__fd(f), wasi.filesize_t(size)))
|
||||
}
|
||||
|
||||
_remove :: proc(name: string) -> Error {
|
||||
dir_fd, relative, ok := match_preopen(name)
|
||||
if !ok {
|
||||
return .Invalid_Path
|
||||
}
|
||||
|
||||
err := wasi.path_remove_directory(dir_fd, relative)
|
||||
if err == .NOTDIR {
|
||||
err = wasi.path_unlink_file(dir_fd, relative)
|
||||
}
|
||||
|
||||
return _get_platform_error(err)
|
||||
}
|
||||
|
||||
_rename :: proc(old_path, new_path: string) -> Error {
|
||||
src_dir_fd, src_relative, src_ok := match_preopen(old_path)
|
||||
if !src_ok {
|
||||
return .Invalid_Path
|
||||
}
|
||||
|
||||
new_dir_fd, new_relative, new_ok := match_preopen(new_path)
|
||||
if !new_ok {
|
||||
return .Invalid_Path
|
||||
}
|
||||
|
||||
return _get_platform_error(wasi.path_rename(src_dir_fd, src_relative, new_dir_fd, new_relative))
|
||||
}
|
||||
|
||||
_link :: proc(old_name, new_name: string) -> Error {
|
||||
src_dir_fd, src_relative, src_ok := match_preopen(old_name)
|
||||
if !src_ok {
|
||||
return .Invalid_Path
|
||||
}
|
||||
|
||||
new_dir_fd, new_relative, new_ok := match_preopen(new_name)
|
||||
if !new_ok {
|
||||
return .Invalid_Path
|
||||
}
|
||||
|
||||
return _get_platform_error(wasi.path_link(src_dir_fd, {.SYMLINK_FOLLOW}, src_relative, new_dir_fd, new_relative))
|
||||
}
|
||||
|
||||
_symlink :: proc(old_name, new_name: string) -> Error {
|
||||
src_dir_fd, src_relative, src_ok := match_preopen(old_name)
|
||||
if !src_ok {
|
||||
return .Invalid_Path
|
||||
}
|
||||
|
||||
new_dir_fd, new_relative, new_ok := match_preopen(new_name)
|
||||
if !new_ok {
|
||||
return .Invalid_Path
|
||||
}
|
||||
|
||||
if src_dir_fd != new_dir_fd {
|
||||
return .Invalid_Path
|
||||
}
|
||||
|
||||
return _get_platform_error(wasi.path_symlink(src_relative, src_dir_fd, new_relative))
|
||||
}
|
||||
|
||||
_read_link :: proc(name: string, allocator: runtime.Allocator) -> (s: string, err: Error) {
|
||||
dir_fd, relative, ok := match_preopen(name)
|
||||
if !ok {
|
||||
return "", .Invalid_Path
|
||||
}
|
||||
|
||||
n, _err := wasi.path_readlink(dir_fd, relative, nil)
|
||||
if _err != nil {
|
||||
err = _get_platform_error(_err)
|
||||
return
|
||||
}
|
||||
|
||||
buf := make([]byte, n, allocator) or_return
|
||||
|
||||
_, _err = wasi.path_readlink(dir_fd, relative, buf)
|
||||
s = string(buf)
|
||||
err = _get_platform_error(_err)
|
||||
return
|
||||
}
|
||||
|
||||
_chdir :: proc(name: string) -> Error {
|
||||
return .Unsupported
|
||||
}
|
||||
|
||||
_fchdir :: proc(f: ^File) -> Error {
|
||||
return .Unsupported
|
||||
}
|
||||
|
||||
_fchmod :: proc(f: ^File, mode: int) -> Error {
|
||||
return .Unsupported
|
||||
}
|
||||
|
||||
_chmod :: proc(name: string, mode: int) -> Error {
|
||||
return .Unsupported
|
||||
}
|
||||
|
||||
_fchown :: proc(f: ^File, uid, gid: int) -> Error {
|
||||
return .Unsupported
|
||||
}
|
||||
|
||||
_chown :: proc(name: string, uid, gid: int) -> Error {
|
||||
return .Unsupported
|
||||
}
|
||||
|
||||
_lchown :: proc(name: string, uid, gid: int) -> Error {
|
||||
return .Unsupported
|
||||
}
|
||||
|
||||
_chtimes :: proc(name: string, atime, mtime: time.Time) -> Error {
|
||||
dir_fd, relative, ok := match_preopen(name)
|
||||
if !ok {
|
||||
return .Invalid_Path
|
||||
}
|
||||
|
||||
_atime := wasi.timestamp_t(atime._nsec)
|
||||
_mtime := wasi.timestamp_t(mtime._nsec)
|
||||
|
||||
return _get_platform_error(wasi.path_filestat_set_times(dir_fd, {.SYMLINK_FOLLOW}, relative, _atime, _mtime, {.MTIM, .ATIM}))
|
||||
}
|
||||
|
||||
_fchtimes :: proc(f: ^File, atime, mtime: time.Time) -> Error {
|
||||
_atime := wasi.timestamp_t(atime._nsec)
|
||||
_mtime := wasi.timestamp_t(mtime._nsec)
|
||||
|
||||
return _get_platform_error(wasi.fd_filestat_set_times(__fd(f), _atime, _mtime, {.ATIM, .MTIM}))
|
||||
}
|
||||
|
||||
_exists :: proc(path: string) -> bool {
|
||||
dir_fd, relative, ok := match_preopen(path)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
_, err := wasi.path_filestat_get(dir_fd, {.SYMLINK_FOLLOW}, relative)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
_file_stream_proc :: proc(stream_data: rawptr, mode: io.Stream_Mode, p: []byte, offset: i64, whence: io.Seek_From) -> (n: i64, err: io.Error) {
|
||||
f := (^File_Impl)(stream_data)
|
||||
fd := f.fd
|
||||
|
||||
switch mode {
|
||||
case .Read:
|
||||
if len(p) <= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
to_read := min(len(p), MAX_RW)
|
||||
_n, _err := wasi.fd_read(fd, {p[:to_read]})
|
||||
n = i64(_n)
|
||||
|
||||
if _err != nil {
|
||||
err = .Unknown
|
||||
} else if n == 0 {
|
||||
err = .EOF
|
||||
}
|
||||
|
||||
return
|
||||
|
||||
case .Read_At:
|
||||
if len(p) <= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if offset < 0 {
|
||||
err = .Invalid_Offset
|
||||
return
|
||||
}
|
||||
|
||||
to_read := min(len(p), MAX_RW)
|
||||
_n, _err := wasi.fd_pread(fd, {p[:to_read]}, wasi.filesize_t(offset))
|
||||
n = i64(_n)
|
||||
|
||||
if _err != nil {
|
||||
err = .Unknown
|
||||
} else if n == 0 {
|
||||
err = .EOF
|
||||
}
|
||||
|
||||
return
|
||||
|
||||
case .Write:
|
||||
p := p
|
||||
for len(p) > 0 {
|
||||
to_write := min(len(p), MAX_RW)
|
||||
_n, _err := wasi.fd_write(fd, {p[:to_write]})
|
||||
if _err != nil {
|
||||
err = .Unknown
|
||||
return
|
||||
}
|
||||
p = p[_n:]
|
||||
n += i64(_n)
|
||||
}
|
||||
return
|
||||
|
||||
case .Write_At:
|
||||
p := p
|
||||
offset := offset
|
||||
|
||||
if offset < 0 {
|
||||
err = .Invalid_Offset
|
||||
return
|
||||
}
|
||||
|
||||
for len(p) > 0 {
|
||||
to_write := min(len(p), MAX_RW)
|
||||
_n, _err := wasi.fd_pwrite(fd, {p[:to_write]}, wasi.filesize_t(offset))
|
||||
if _err != nil {
|
||||
err = .Unknown
|
||||
return
|
||||
}
|
||||
|
||||
p = p[_n:]
|
||||
n += i64(_n)
|
||||
offset += i64(_n)
|
||||
}
|
||||
return
|
||||
|
||||
case .Seek:
|
||||
#assert(int(wasi.whence_t.SET) == int(io.Seek_From.Start))
|
||||
#assert(int(wasi.whence_t.CUR) == int(io.Seek_From.Current))
|
||||
#assert(int(wasi.whence_t.END) == int(io.Seek_From.End))
|
||||
|
||||
switch whence {
|
||||
case .Start, .Current, .End:
|
||||
break
|
||||
case:
|
||||
err = .Invalid_Whence
|
||||
return
|
||||
}
|
||||
|
||||
_n, _err := wasi.fd_seek(fd, wasi.filedelta_t(offset), wasi.whence_t(whence))
|
||||
#partial switch _err {
|
||||
case .INVAL:
|
||||
err = .Invalid_Offset
|
||||
case:
|
||||
err = .Unknown
|
||||
case .SUCCESS:
|
||||
n = i64(_n)
|
||||
}
|
||||
return
|
||||
|
||||
case .Size:
|
||||
stat, _err := wasi.fd_filestat_get(fd)
|
||||
if _err != nil {
|
||||
err = .Unknown
|
||||
return
|
||||
}
|
||||
|
||||
n = i64(stat.size)
|
||||
return
|
||||
|
||||
case .Flush:
|
||||
ferr := _sync(&f.file)
|
||||
err = error_to_io_error(ferr)
|
||||
return
|
||||
|
||||
case .Close, .Destroy:
|
||||
ferr := _close(f)
|
||||
err = error_to_io_error(ferr)
|
||||
return
|
||||
|
||||
case .Query:
|
||||
return io.query_utility({.Read, .Read_At, .Write, .Write_At, .Seek, .Size, .Flush, .Close, .Destroy, .Query})
|
||||
|
||||
case:
|
||||
return 0, .Empty
|
||||
}
|
||||
}
|
||||
@@ -44,17 +44,38 @@ File_Impl :: struct {
|
||||
|
||||
@(init)
|
||||
init_std_files :: proc() {
|
||||
stdin = new_file(uintptr(win32.GetStdHandle(win32.STD_INPUT_HANDLE)), "<stdin>")
|
||||
stdout = new_file(uintptr(win32.GetStdHandle(win32.STD_OUTPUT_HANDLE)), "<stdout>")
|
||||
stderr = new_file(uintptr(win32.GetStdHandle(win32.STD_ERROR_HANDLE)), "<stderr>")
|
||||
}
|
||||
@(fini)
|
||||
fini_std_files :: proc() {
|
||||
_destroy((^File_Impl)(stdin.impl))
|
||||
_destroy((^File_Impl)(stdout.impl))
|
||||
_destroy((^File_Impl)(stderr.impl))
|
||||
}
|
||||
new_std :: proc(impl: ^File_Impl, code: u32, name: string) -> ^File {
|
||||
impl.file.impl = impl
|
||||
|
||||
impl.allocator = runtime.nil_allocator()
|
||||
impl.fd = win32.GetStdHandle(code)
|
||||
impl.name = name
|
||||
impl.wname = nil
|
||||
|
||||
handle := _handle(&impl.file)
|
||||
kind := File_Impl_Kind.File
|
||||
if m: u32; win32.GetConsoleMode(handle, &m) {
|
||||
kind = .Console
|
||||
}
|
||||
if win32.GetFileType(handle) == win32.FILE_TYPE_PIPE {
|
||||
kind = .Pipe
|
||||
}
|
||||
impl.kind = kind
|
||||
|
||||
impl.file.stream = {
|
||||
data = impl,
|
||||
procedure = _file_stream_proc,
|
||||
}
|
||||
impl.file.fstat = _fstat
|
||||
|
||||
return &impl.file
|
||||
}
|
||||
|
||||
@(static) files: [3]File_Impl
|
||||
stdin = new_std(&files[0], win32.STD_INPUT_HANDLE, "<stdin>")
|
||||
stdout = new_std(&files[1], win32.STD_OUTPUT_HANDLE, "<stdout>")
|
||||
stderr = new_std(&files[2], win32.STD_ERROR_HANDLE, "<stderr>")
|
||||
}
|
||||
|
||||
_handle :: proc(f: ^File) -> win32.HANDLE {
|
||||
return win32.HANDLE(_fd(f))
|
||||
@@ -132,21 +153,21 @@ _open_internal :: proc(name: string, flags: File_Flags, perm: int) -> (handle: u
|
||||
_open :: proc(name: string, flags: File_Flags, perm: int) -> (f: ^File, err: Error) {
|
||||
flags := flags if flags != nil else {.Read}
|
||||
handle := _open_internal(name, flags, perm) or_return
|
||||
return _new_file(handle, name)
|
||||
return _new_file(handle, name, file_allocator())
|
||||
}
|
||||
|
||||
_new_file :: proc(handle: uintptr, name: string) -> (f: ^File, err: Error) {
|
||||
_new_file :: proc(handle: uintptr, name: string, allocator: runtime.Allocator) -> (f: ^File, err: Error) {
|
||||
if handle == INVALID_HANDLE {
|
||||
return
|
||||
}
|
||||
impl := new(File_Impl, file_allocator()) or_return
|
||||
impl := new(File_Impl, allocator) or_return
|
||||
defer if err != nil {
|
||||
free(impl, file_allocator())
|
||||
free(impl, allocator)
|
||||
}
|
||||
|
||||
impl.file.impl = impl
|
||||
|
||||
impl.allocator = file_allocator()
|
||||
impl.allocator = allocator
|
||||
impl.fd = rawptr(handle)
|
||||
impl.name = clone_string(name, impl.allocator) or_return
|
||||
impl.wname = win32_utf8_to_wstring(name, impl.allocator) or_return
|
||||
@@ -180,7 +201,7 @@ _open_buffered :: proc(name: string, buffer_size: uint, flags := File_Flags{.Rea
|
||||
}
|
||||
|
||||
_new_file_buffered :: proc(handle: uintptr, name: string, buffer_size: uint) -> (f: ^File, err: Error) {
|
||||
f, err = _new_file(handle, name)
|
||||
f, err = _new_file(handle, name, file_allocator())
|
||||
if f != nil && err == nil {
|
||||
impl := (^File_Impl)(f.impl)
|
||||
impl.r_buf = make([]byte, buffer_size, file_allocator())
|
||||
@@ -189,6 +210,29 @@ _new_file_buffered :: proc(handle: uintptr, name: string, buffer_size: uint) ->
|
||||
return
|
||||
}
|
||||
|
||||
_clone :: proc(f: ^File) -> (clone: ^File, err: Error) {
|
||||
if f == nil || f.impl == nil {
|
||||
return
|
||||
}
|
||||
|
||||
clonefd: win32.HANDLE
|
||||
process := win32.GetCurrentProcess()
|
||||
if !win32.DuplicateHandle(
|
||||
process,
|
||||
win32.HANDLE(_fd(f)),
|
||||
process,
|
||||
&clonefd,
|
||||
0,
|
||||
false,
|
||||
win32.DUPLICATE_SAME_ACCESS,
|
||||
) {
|
||||
err = _get_platform_error()
|
||||
return
|
||||
}
|
||||
defer if err != nil { win32.CloseHandle(clonefd) }
|
||||
|
||||
return _new_file(uintptr(clonefd), name(f), file_allocator())
|
||||
}
|
||||
|
||||
_fd :: proc(f: ^File) -> uintptr {
|
||||
if f == nil || f.impl == nil {
|
||||
|
||||
@@ -1,726 +1,6 @@
|
||||
#+private
|
||||
package os2
|
||||
|
||||
import "core:sys/linux"
|
||||
import "core:sync"
|
||||
import "core:mem"
|
||||
|
||||
// NOTEs
|
||||
//
|
||||
// All allocations below DIRECT_MMAP_THRESHOLD exist inside of memory "Regions." A region
|
||||
// consists of a Region_Header and the memory that will be divided into allocations to
|
||||
// send to the user. The memory is an array of "Allocation_Headers" which are 8 bytes.
|
||||
// Allocation_Headers are used to navigate the memory in the region. The "next" member of
|
||||
// the Allocation_Header points to the next header, and the space between the headers
|
||||
// can be used to send to the user. This space between is referred to as "blocks" in the
|
||||
// code. The indexes in the header refer to these blocks instead of bytes. This allows us
|
||||
// to index all the memory in the region with a u16.
|
||||
//
|
||||
// When an allocation request is made, it will use the first free block that can contain
|
||||
// the entire block. If there is an excess number of blocks (as specified by the constant
|
||||
// BLOCK_SEGMENT_THRESHOLD), this extra space will be segmented and left in the free_list.
|
||||
//
|
||||
// To keep the implementation simple, there can never exist 2 free blocks adjacent to each
|
||||
// other. Any freeing will result in attempting to merge the blocks before and after the
|
||||
// newly free'd blocks.
|
||||
//
|
||||
// Any request for size above the DIRECT_MMAP_THRESHOLD will result in the allocation
|
||||
// getting its own individual mmap. Individual mmaps will still get an Allocation_Header
|
||||
// that contains the size with the last bit set to 1 to indicate it is indeed a direct
|
||||
// mmap allocation.
|
||||
|
||||
// Why not brk?
|
||||
// glibc's malloc utilizes a mix of the brk and mmap system calls. This implementation
|
||||
// does *not* utilize the brk system call to avoid possible conflicts with foreign C
|
||||
// code. Just because we aren't directly using libc, there is nothing stopping the user
|
||||
// from doing it.
|
||||
|
||||
// What's with all the #no_bounds_check?
|
||||
// When memory is returned from mmap, it technically doesn't get written ... well ... anywhere
|
||||
// until that region is written to by *you*. So, when a new region is created, we call mmap
|
||||
// to get a pointer to some memory, and we claim that memory is a ^Region. Therefor, the
|
||||
// region itself is never formally initialized by the compiler as this would result in writing
|
||||
// zeros to memory that we can already assume are 0. This would also have the effect of
|
||||
// actually commiting this data to memory whether it gets used or not.
|
||||
|
||||
|
||||
//
|
||||
// Some variables to play with
|
||||
//
|
||||
|
||||
// Minimum blocks used for any one allocation
|
||||
MINIMUM_BLOCK_COUNT :: 2
|
||||
|
||||
// Number of extra blocks beyond the requested amount where we would segment.
|
||||
// E.g. (blocks) |H0123456| 7 available
|
||||
// |H01H0123| Ask for 2, now 4 available
|
||||
BLOCK_SEGMENT_THRESHOLD :: 4
|
||||
|
||||
// Anything above this threshold will get its own memory map. Since regions
|
||||
// are indexed by 16 bit integers, this value should not surpass max(u16) * 6
|
||||
DIRECT_MMAP_THRESHOLD_USER :: int(max(u16))
|
||||
|
||||
// The point at which we convert direct mmap to region. This should be a decent
|
||||
// amount less than DIRECT_MMAP_THRESHOLD to avoid jumping in and out of regions.
|
||||
MMAP_TO_REGION_SHRINK_THRESHOLD :: DIRECT_MMAP_THRESHOLD - PAGE_SIZE * 4
|
||||
|
||||
// free_list is dynamic and is initialized in the begining of the region memory
|
||||
// when the region is initialized. Once resized, it can be moved anywhere.
|
||||
FREE_LIST_DEFAULT_CAP :: 32
|
||||
|
||||
|
||||
//
|
||||
// Other constants that should not be touched
|
||||
//
|
||||
|
||||
// This universally seems to be 4096 outside of uncommon archs.
|
||||
PAGE_SIZE :: 4096
|
||||
|
||||
// just rounding up to nearest PAGE_SIZE
|
||||
DIRECT_MMAP_THRESHOLD :: (DIRECT_MMAP_THRESHOLD_USER-1) + PAGE_SIZE - (DIRECT_MMAP_THRESHOLD_USER-1) % PAGE_SIZE
|
||||
|
||||
// Regions must be big enough to hold DIRECT_MMAP_THRESHOLD - 1 as well
|
||||
// as end right on a page boundary as to not waste space.
|
||||
SIZE_OF_REGION :: DIRECT_MMAP_THRESHOLD + 4 * int(PAGE_SIZE)
|
||||
|
||||
// size of user memory blocks
|
||||
BLOCK_SIZE :: size_of(Allocation_Header)
|
||||
|
||||
// number of allocation sections (call them blocks) of the region used for allocations
|
||||
BLOCKS_PER_REGION :: u16((SIZE_OF_REGION - size_of(Region_Header)) / BLOCK_SIZE)
|
||||
|
||||
// minimum amount of space that can used by any individual allocation (includes header)
|
||||
MINIMUM_ALLOCATION :: (MINIMUM_BLOCK_COUNT * BLOCK_SIZE) + BLOCK_SIZE
|
||||
|
||||
// This is used as a boolean value for Region_Header.local_addr.
|
||||
CURRENTLY_ACTIVE :: (^^Region)(~uintptr(0))
|
||||
|
||||
FREE_LIST_ENTRIES_PER_BLOCK :: BLOCK_SIZE / size_of(u16)
|
||||
|
||||
MMAP_FLAGS : linux.Map_Flags : {.ANONYMOUS, .PRIVATE}
|
||||
MMAP_PROT : linux.Mem_Protection : {.READ, .WRITE}
|
||||
|
||||
@thread_local _local_region: ^Region
|
||||
global_regions: ^Region
|
||||
|
||||
|
||||
// There is no way of correctly setting the last bit of free_idx or
|
||||
// the last bit of requested, so we can safely use it as a flag to
|
||||
// determine if we are interacting with a direct mmap.
|
||||
REQUESTED_MASK :: 0x7FFFFFFFFFFFFFFF
|
||||
IS_DIRECT_MMAP :: 0x8000000000000000
|
||||
|
||||
// Special free_idx value that does not index the free_list.
|
||||
NOT_FREE :: 0x7FFF
|
||||
Allocation_Header :: struct #raw_union {
|
||||
using _: struct {
|
||||
// Block indicies
|
||||
idx: u16,
|
||||
prev: u16,
|
||||
next: u16,
|
||||
free_idx: u16,
|
||||
},
|
||||
requested: u64,
|
||||
}
|
||||
|
||||
Region_Header :: struct #align(16) {
|
||||
next_region: ^Region, // points to next region in global_heap (linked list)
|
||||
local_addr: ^^Region, // tracks region ownership via address of _local_region
|
||||
reset_addr: ^^Region, // tracks old local addr for reset
|
||||
free_list: []u16,
|
||||
free_list_len: u16,
|
||||
free_blocks: u16, // number of free blocks in region (includes headers)
|
||||
last_used: u16, // farthest back block that has been used (need zeroing?)
|
||||
_reserved: u16,
|
||||
}
|
||||
|
||||
Region :: struct {
|
||||
hdr: Region_Header,
|
||||
memory: [BLOCKS_PER_REGION]Allocation_Header,
|
||||
}
|
||||
|
||||
_heap_allocator_proc :: proc(allocator_data: rawptr, mode: mem.Allocator_Mode,
|
||||
size, alignment: int,
|
||||
old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, mem.Allocator_Error) {
|
||||
//
|
||||
// NOTE(tetra, 2020-01-14): The heap doesn't respect alignment.
|
||||
// Instead, we overallocate by `alignment + size_of(rawptr) - 1`, and insert
|
||||
// padding. We also store the original pointer returned by heap_alloc right before
|
||||
// the pointer we return to the user.
|
||||
//
|
||||
|
||||
aligned_alloc :: proc(size, alignment: int, old_ptr: rawptr = nil) -> ([]byte, mem.Allocator_Error) {
|
||||
a := max(alignment, align_of(rawptr))
|
||||
space := size + a - 1
|
||||
|
||||
allocated_mem: rawptr
|
||||
if old_ptr != nil {
|
||||
original_old_ptr := mem.ptr_offset((^rawptr)(old_ptr), -1)^
|
||||
allocated_mem = heap_resize(original_old_ptr, space+size_of(rawptr))
|
||||
} else {
|
||||
allocated_mem = heap_alloc(space+size_of(rawptr))
|
||||
}
|
||||
aligned_mem := rawptr(mem.ptr_offset((^u8)(allocated_mem), size_of(rawptr)))
|
||||
|
||||
ptr := uintptr(aligned_mem)
|
||||
aligned_ptr := (ptr - 1 + uintptr(a)) & -uintptr(a)
|
||||
diff := int(aligned_ptr - ptr)
|
||||
if (size + diff) > space || allocated_mem == nil {
|
||||
return nil, .Out_Of_Memory
|
||||
}
|
||||
|
||||
aligned_mem = rawptr(aligned_ptr)
|
||||
mem.ptr_offset((^rawptr)(aligned_mem), -1)^ = allocated_mem
|
||||
|
||||
return mem.byte_slice(aligned_mem, size), nil
|
||||
}
|
||||
|
||||
aligned_free :: proc(p: rawptr) {
|
||||
if p != nil {
|
||||
heap_free(mem.ptr_offset((^rawptr)(p), -1)^)
|
||||
}
|
||||
}
|
||||
|
||||
aligned_resize :: proc(p: rawptr, old_size: int, new_size: int, new_alignment: int) -> (new_memory: []byte, err: mem.Allocator_Error) {
|
||||
if p == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return aligned_alloc(new_size, new_alignment, p)
|
||||
}
|
||||
|
||||
switch mode {
|
||||
case .Alloc, .Alloc_Non_Zeroed:
|
||||
return aligned_alloc(size, alignment)
|
||||
|
||||
case .Free:
|
||||
aligned_free(old_memory)
|
||||
|
||||
case .Free_All:
|
||||
return nil, .Mode_Not_Implemented
|
||||
|
||||
case .Resize, .Resize_Non_Zeroed:
|
||||
if old_memory == nil {
|
||||
return aligned_alloc(size, alignment)
|
||||
}
|
||||
return aligned_resize(old_memory, old_size, size, alignment)
|
||||
|
||||
case .Query_Features:
|
||||
set := (^mem.Allocator_Mode_Set)(old_memory)
|
||||
if set != nil {
|
||||
set^ = {.Alloc, .Free, .Resize, .Query_Features}
|
||||
}
|
||||
return nil, nil
|
||||
|
||||
case .Query_Info:
|
||||
return nil, .Mode_Not_Implemented
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
heap_alloc :: proc(size: int) -> rawptr {
|
||||
if size >= DIRECT_MMAP_THRESHOLD {
|
||||
return _direct_mmap_alloc(size)
|
||||
}
|
||||
|
||||
// atomically check if the local region has been stolen
|
||||
if _local_region != nil {
|
||||
res := sync.atomic_compare_exchange_strong_explicit(
|
||||
&_local_region.hdr.local_addr,
|
||||
&_local_region,
|
||||
CURRENTLY_ACTIVE,
|
||||
.Acquire,
|
||||
.Relaxed,
|
||||
)
|
||||
if res != &_local_region {
|
||||
// At this point, the region has been stolen and res contains the unexpected value
|
||||
expected := res
|
||||
if res != CURRENTLY_ACTIVE {
|
||||
expected = res
|
||||
res = sync.atomic_compare_exchange_strong_explicit(
|
||||
&_local_region.hdr.local_addr,
|
||||
expected,
|
||||
CURRENTLY_ACTIVE,
|
||||
.Acquire,
|
||||
.Relaxed,
|
||||
)
|
||||
}
|
||||
if res != expected {
|
||||
_local_region = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
size := size
|
||||
size = _round_up_to_nearest(size, BLOCK_SIZE)
|
||||
blocks_needed := u16(max(MINIMUM_BLOCK_COUNT, size / BLOCK_SIZE))
|
||||
|
||||
// retrieve a region if new thread or stolen
|
||||
if _local_region == nil {
|
||||
_local_region, _ = _region_retrieve_with_space(blocks_needed)
|
||||
if _local_region == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
defer sync.atomic_store_explicit(&_local_region.hdr.local_addr, &_local_region, .Release)
|
||||
|
||||
// At this point we have a usable region. Let's find the user some memory
|
||||
idx: u16
|
||||
local_region_idx := _region_get_local_idx()
|
||||
back_idx := -1
|
||||
infinite: for {
|
||||
for i := 0; i < int(_local_region.hdr.free_list_len); i += 1 {
|
||||
idx = _local_region.hdr.free_list[i]
|
||||
#no_bounds_check if _get_block_count(_local_region.memory[idx]) >= blocks_needed {
|
||||
break infinite
|
||||
}
|
||||
}
|
||||
sync.atomic_store_explicit(&_local_region.hdr.local_addr, &_local_region, .Release)
|
||||
_local_region, back_idx = _region_retrieve_with_space(blocks_needed, local_region_idx, back_idx)
|
||||
}
|
||||
user_ptr, used := _region_get_block(_local_region, idx, blocks_needed)
|
||||
|
||||
sync.atomic_sub_explicit(&_local_region.hdr.free_blocks, used + 1, .Release)
|
||||
|
||||
// If this memory was ever used before, it now needs to be zero'd.
|
||||
if idx < _local_region.hdr.last_used {
|
||||
mem.zero(user_ptr, int(used) * BLOCK_SIZE)
|
||||
} else {
|
||||
_local_region.hdr.last_used = idx + used
|
||||
}
|
||||
|
||||
return user_ptr
|
||||
}
|
||||
|
||||
heap_resize :: proc(old_memory: rawptr, new_size: int) -> rawptr #no_bounds_check {
|
||||
alloc := _get_allocation_header(old_memory)
|
||||
if alloc.requested & IS_DIRECT_MMAP > 0 {
|
||||
return _direct_mmap_resize(alloc, new_size)
|
||||
}
|
||||
|
||||
if new_size > DIRECT_MMAP_THRESHOLD {
|
||||
return _direct_mmap_from_region(alloc, new_size)
|
||||
}
|
||||
|
||||
return _region_resize(alloc, new_size)
|
||||
}
|
||||
|
||||
heap_free :: proc(memory: rawptr) {
|
||||
alloc := _get_allocation_header(memory)
|
||||
if sync.atomic_load(&alloc.requested) & IS_DIRECT_MMAP == IS_DIRECT_MMAP {
|
||||
_direct_mmap_free(alloc)
|
||||
return
|
||||
}
|
||||
|
||||
assert(alloc.free_idx == NOT_FREE)
|
||||
|
||||
_region_find_and_assign_local(alloc)
|
||||
_region_local_free(alloc)
|
||||
sync.atomic_store_explicit(&_local_region.hdr.local_addr, &_local_region, .Release)
|
||||
}
|
||||
|
||||
//
|
||||
// Regions
|
||||
//
|
||||
_new_region :: proc() -> ^Region #no_bounds_check {
|
||||
ptr, errno := linux.mmap(0, uint(SIZE_OF_REGION), MMAP_PROT, MMAP_FLAGS, -1, 0)
|
||||
if errno != .NONE {
|
||||
return nil
|
||||
}
|
||||
new_region := (^Region)(ptr)
|
||||
|
||||
new_region.hdr.local_addr = CURRENTLY_ACTIVE
|
||||
new_region.hdr.reset_addr = &_local_region
|
||||
|
||||
free_list_blocks := _round_up_to_nearest(FREE_LIST_DEFAULT_CAP, FREE_LIST_ENTRIES_PER_BLOCK)
|
||||
_region_assign_free_list(new_region, &new_region.memory[1], u16(free_list_blocks) * FREE_LIST_ENTRIES_PER_BLOCK)
|
||||
|
||||
// + 2 to account for free_list's allocation header
|
||||
first_user_block := len(new_region.hdr.free_list) / FREE_LIST_ENTRIES_PER_BLOCK + 2
|
||||
|
||||
// first allocation header (this is a free list)
|
||||
new_region.memory[0].next = u16(first_user_block)
|
||||
new_region.memory[0].free_idx = NOT_FREE
|
||||
new_region.memory[first_user_block].idx = u16(first_user_block)
|
||||
new_region.memory[first_user_block].next = BLOCKS_PER_REGION - 1
|
||||
|
||||
// add the first user block to the free list
|
||||
new_region.hdr.free_list[0] = u16(first_user_block)
|
||||
new_region.hdr.free_list_len = 1
|
||||
new_region.hdr.free_blocks = _get_block_count(new_region.memory[first_user_block]) + 1
|
||||
|
||||
for r := sync.atomic_compare_exchange_strong(&global_regions, nil, new_region);
|
||||
r != nil;
|
||||
r = sync.atomic_compare_exchange_strong(&r.hdr.next_region, nil, new_region) {}
|
||||
|
||||
return new_region
|
||||
}
|
||||
|
||||
_region_resize :: proc(alloc: ^Allocation_Header, new_size: int, alloc_is_free_list: bool = false) -> rawptr #no_bounds_check {
|
||||
assert(alloc.free_idx == NOT_FREE)
|
||||
|
||||
old_memory := mem.ptr_offset(alloc, 1)
|
||||
|
||||
old_block_count := _get_block_count(alloc^)
|
||||
new_block_count := u16(
|
||||
max(MINIMUM_BLOCK_COUNT, _round_up_to_nearest(new_size, BLOCK_SIZE) / BLOCK_SIZE),
|
||||
)
|
||||
if new_block_count < old_block_count {
|
||||
if new_block_count - old_block_count >= MINIMUM_BLOCK_COUNT {
|
||||
_region_find_and_assign_local(alloc)
|
||||
_region_segment(_local_region, alloc, new_block_count, alloc.free_idx)
|
||||
new_block_count = _get_block_count(alloc^)
|
||||
sync.atomic_store_explicit(&_local_region.hdr.local_addr, &_local_region, .Release)
|
||||
}
|
||||
// need to zero anything within the new block that that lies beyond new_size
|
||||
extra_bytes := int(new_block_count * BLOCK_SIZE) - new_size
|
||||
extra_bytes_ptr := mem.ptr_offset((^u8)(alloc), new_size + BLOCK_SIZE)
|
||||
mem.zero(extra_bytes_ptr, extra_bytes)
|
||||
return old_memory
|
||||
}
|
||||
|
||||
if !alloc_is_free_list {
|
||||
_region_find_and_assign_local(alloc)
|
||||
}
|
||||
defer if !alloc_is_free_list {
|
||||
sync.atomic_store_explicit(&_local_region.hdr.local_addr, &_local_region, .Release)
|
||||
}
|
||||
|
||||
// First, let's see if we can grow in place.
|
||||
if alloc.next != BLOCKS_PER_REGION - 1 && _local_region.memory[alloc.next].free_idx != NOT_FREE {
|
||||
next_alloc := _local_region.memory[alloc.next]
|
||||
total_available := old_block_count + _get_block_count(next_alloc) + 1
|
||||
if total_available >= new_block_count {
|
||||
alloc.next = next_alloc.next
|
||||
_local_region.memory[alloc.next].prev = alloc.idx
|
||||
if total_available - new_block_count > BLOCK_SEGMENT_THRESHOLD {
|
||||
_region_segment(_local_region, alloc, new_block_count, next_alloc.free_idx)
|
||||
} else {
|
||||
_region_free_list_remove(_local_region, next_alloc.free_idx)
|
||||
}
|
||||
mem.zero(&_local_region.memory[next_alloc.idx], int(alloc.next - next_alloc.idx) * BLOCK_SIZE)
|
||||
_local_region.hdr.last_used = max(alloc.next, _local_region.hdr.last_used)
|
||||
_local_region.hdr.free_blocks -= (_get_block_count(alloc^) - old_block_count)
|
||||
if alloc_is_free_list {
|
||||
_region_assign_free_list(_local_region, old_memory, _get_block_count(alloc^))
|
||||
}
|
||||
return old_memory
|
||||
}
|
||||
}
|
||||
|
||||
// If we made it this far, we need to resize, copy, zero and free.
|
||||
region_iter := _local_region
|
||||
local_region_idx := _region_get_local_idx()
|
||||
back_idx := -1
|
||||
idx: u16
|
||||
infinite: for {
|
||||
for i := 0; i < len(region_iter.hdr.free_list); i += 1 {
|
||||
idx = region_iter.hdr.free_list[i]
|
||||
if _get_block_count(region_iter.memory[idx]) >= new_block_count {
|
||||
break infinite
|
||||
}
|
||||
}
|
||||
if region_iter != _local_region {
|
||||
sync.atomic_store_explicit(
|
||||
®ion_iter.hdr.local_addr,
|
||||
region_iter.hdr.reset_addr,
|
||||
.Release,
|
||||
)
|
||||
}
|
||||
region_iter, back_idx = _region_retrieve_with_space(new_block_count, local_region_idx, back_idx)
|
||||
}
|
||||
if region_iter != _local_region {
|
||||
sync.atomic_store_explicit(
|
||||
®ion_iter.hdr.local_addr,
|
||||
region_iter.hdr.reset_addr,
|
||||
.Release,
|
||||
)
|
||||
}
|
||||
|
||||
// copy from old memory
|
||||
new_memory, used_blocks := _region_get_block(region_iter, idx, new_block_count)
|
||||
mem.copy(new_memory, old_memory, int(old_block_count * BLOCK_SIZE))
|
||||
|
||||
// zero any new memory
|
||||
addon_section := mem.ptr_offset((^Allocation_Header)(new_memory), old_block_count)
|
||||
new_blocks := used_blocks - old_block_count
|
||||
mem.zero(addon_section, int(new_blocks) * BLOCK_SIZE)
|
||||
|
||||
region_iter.hdr.free_blocks -= (used_blocks + 1)
|
||||
|
||||
// Set free_list before freeing.
|
||||
if alloc_is_free_list {
|
||||
_region_assign_free_list(_local_region, new_memory, used_blocks)
|
||||
}
|
||||
|
||||
// free old memory
|
||||
_region_local_free(alloc)
|
||||
return new_memory
|
||||
}
|
||||
|
||||
_region_local_free :: proc(alloc: ^Allocation_Header) #no_bounds_check {
|
||||
alloc := alloc
|
||||
add_to_free_list := true
|
||||
|
||||
idx := sync.atomic_load(&alloc.idx)
|
||||
prev := sync.atomic_load(&alloc.prev)
|
||||
next := sync.atomic_load(&alloc.next)
|
||||
block_count := next - idx - 1
|
||||
free_blocks := sync.atomic_load(&_local_region.hdr.free_blocks) + block_count + 1
|
||||
sync.atomic_store_explicit(&_local_region.hdr.free_blocks, free_blocks, .Release)
|
||||
|
||||
// try to merge with prev
|
||||
if idx > 0 && sync.atomic_load(&_local_region.memory[prev].free_idx) != NOT_FREE {
|
||||
sync.atomic_store_explicit(&_local_region.memory[prev].next, next, .Release)
|
||||
_local_region.memory[next].prev = prev
|
||||
alloc = &_local_region.memory[prev]
|
||||
add_to_free_list = false
|
||||
}
|
||||
|
||||
// try to merge with next
|
||||
if next < BLOCKS_PER_REGION - 1 && sync.atomic_load(&_local_region.memory[next].free_idx) != NOT_FREE {
|
||||
old_next := next
|
||||
sync.atomic_store_explicit(&alloc.next, sync.atomic_load(&_local_region.memory[old_next].next), .Release)
|
||||
|
||||
sync.atomic_store_explicit(&_local_region.memory[next].prev, idx, .Release)
|
||||
|
||||
if add_to_free_list {
|
||||
sync.atomic_store_explicit(&_local_region.hdr.free_list[_local_region.memory[old_next].free_idx], idx, .Release)
|
||||
sync.atomic_store_explicit(&alloc.free_idx, _local_region.memory[old_next].free_idx, .Release)
|
||||
} else {
|
||||
// NOTE: We have aleady merged with prev, and now merged with next.
|
||||
// Now, we are actually going to remove from the free_list.
|
||||
_region_free_list_remove(_local_region, _local_region.memory[old_next].free_idx)
|
||||
}
|
||||
add_to_free_list = false
|
||||
}
|
||||
|
||||
// This is the only place where anything is appended to the free list.
|
||||
if add_to_free_list {
|
||||
fl := _local_region.hdr.free_list
|
||||
fl_len := sync.atomic_load(&_local_region.hdr.free_list_len)
|
||||
sync.atomic_store_explicit(&alloc.free_idx, fl_len, .Release)
|
||||
fl[alloc.free_idx] = idx
|
||||
sync.atomic_store_explicit(&_local_region.hdr.free_list_len, fl_len + 1, .Release)
|
||||
if int(fl_len + 1) == len(fl) {
|
||||
free_alloc := _get_allocation_header(mem.raw_data(_local_region.hdr.free_list))
|
||||
_region_resize(free_alloc, len(fl) * 2 * size_of(fl[0]), true)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_region_assign_free_list :: proc(region: ^Region, memory: rawptr, blocks: u16) {
|
||||
raw_free_list := transmute(mem.Raw_Slice)region.hdr.free_list
|
||||
raw_free_list.len = int(blocks) * FREE_LIST_ENTRIES_PER_BLOCK
|
||||
raw_free_list.data = memory
|
||||
region.hdr.free_list = transmute([]u16)(raw_free_list)
|
||||
}
|
||||
|
||||
_region_retrieve_with_space :: proc(blocks: u16, local_idx: int = -1, back_idx: int = -1) -> (^Region, int) {
|
||||
r: ^Region
|
||||
idx: int
|
||||
for r = sync.atomic_load(&global_regions); r != nil; r = r.hdr.next_region {
|
||||
if idx == local_idx || idx < back_idx || sync.atomic_load(&r.hdr.free_blocks) < blocks {
|
||||
idx += 1
|
||||
continue
|
||||
}
|
||||
idx += 1
|
||||
local_addr: ^^Region = sync.atomic_load(&r.hdr.local_addr)
|
||||
if local_addr != CURRENTLY_ACTIVE {
|
||||
res := sync.atomic_compare_exchange_strong_explicit(
|
||||
&r.hdr.local_addr,
|
||||
local_addr,
|
||||
CURRENTLY_ACTIVE,
|
||||
.Acquire,
|
||||
.Relaxed,
|
||||
)
|
||||
if res == local_addr {
|
||||
r.hdr.reset_addr = local_addr
|
||||
return r, idx
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return _new_region(), idx
|
||||
}
|
||||
|
||||
_region_retrieve_from_addr :: proc(addr: rawptr) -> ^Region {
|
||||
r: ^Region
|
||||
for r = global_regions; r != nil; r = r.hdr.next_region {
|
||||
if _region_contains_mem(r, addr) {
|
||||
return r
|
||||
}
|
||||
}
|
||||
unreachable()
|
||||
}
|
||||
|
||||
_region_get_block :: proc(region: ^Region, idx, blocks_needed: u16) -> (rawptr, u16) #no_bounds_check {
|
||||
alloc := ®ion.memory[idx]
|
||||
|
||||
assert(alloc.free_idx != NOT_FREE)
|
||||
assert(alloc.next > 0)
|
||||
|
||||
block_count := _get_block_count(alloc^)
|
||||
if block_count - blocks_needed > BLOCK_SEGMENT_THRESHOLD {
|
||||
_region_segment(region, alloc, blocks_needed, alloc.free_idx)
|
||||
} else {
|
||||
_region_free_list_remove(region, alloc.free_idx)
|
||||
}
|
||||
|
||||
alloc.free_idx = NOT_FREE
|
||||
return mem.ptr_offset(alloc, 1), _get_block_count(alloc^)
|
||||
}
|
||||
|
||||
_region_segment :: proc(region: ^Region, alloc: ^Allocation_Header, blocks, new_free_idx: u16) #no_bounds_check {
|
||||
old_next := alloc.next
|
||||
alloc.next = alloc.idx + blocks + 1
|
||||
region.memory[old_next].prev = alloc.next
|
||||
|
||||
// Initialize alloc.next allocation header here.
|
||||
region.memory[alloc.next].prev = alloc.idx
|
||||
region.memory[alloc.next].next = old_next
|
||||
region.memory[alloc.next].idx = alloc.next
|
||||
region.memory[alloc.next].free_idx = new_free_idx
|
||||
|
||||
// Replace our original spot in the free_list with new segment.
|
||||
region.hdr.free_list[new_free_idx] = alloc.next
|
||||
}
|
||||
|
||||
_region_get_local_idx :: proc() -> int {
|
||||
idx: int
|
||||
for r := sync.atomic_load(&global_regions); r != nil; r = r.hdr.next_region {
|
||||
if r == _local_region {
|
||||
return idx
|
||||
}
|
||||
idx += 1
|
||||
}
|
||||
|
||||
return -1
|
||||
}
|
||||
|
||||
_region_find_and_assign_local :: proc(alloc: ^Allocation_Header) {
|
||||
// Find the region that contains this memory
|
||||
if !_region_contains_mem(_local_region, alloc) {
|
||||
_local_region = _region_retrieve_from_addr(alloc)
|
||||
}
|
||||
|
||||
// At this point, _local_region is set correctly. Spin until acquire
|
||||
res := CURRENTLY_ACTIVE
|
||||
|
||||
for res == CURRENTLY_ACTIVE {
|
||||
res = sync.atomic_compare_exchange_strong_explicit(
|
||||
&_local_region.hdr.local_addr,
|
||||
&_local_region,
|
||||
CURRENTLY_ACTIVE,
|
||||
.Acquire,
|
||||
.Relaxed,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
_region_contains_mem :: proc(r: ^Region, memory: rawptr) -> bool #no_bounds_check {
|
||||
if r == nil {
|
||||
return false
|
||||
}
|
||||
mem_int := uintptr(memory)
|
||||
return mem_int >= uintptr(&r.memory[0]) && mem_int <= uintptr(&r.memory[BLOCKS_PER_REGION - 1])
|
||||
}
|
||||
|
||||
_region_free_list_remove :: proc(region: ^Region, free_idx: u16) #no_bounds_check {
|
||||
// pop, swap and update allocation hdr
|
||||
if n := region.hdr.free_list_len - 1; free_idx != n {
|
||||
region.hdr.free_list[free_idx] = sync.atomic_load(®ion.hdr.free_list[n])
|
||||
alloc_idx := region.hdr.free_list[free_idx]
|
||||
sync.atomic_store_explicit(®ion.memory[alloc_idx].free_idx, free_idx, .Release)
|
||||
}
|
||||
region.hdr.free_list_len -= 1
|
||||
}
|
||||
|
||||
//
|
||||
// Direct mmap
|
||||
//
|
||||
_direct_mmap_alloc :: proc(size: int) -> rawptr {
|
||||
mmap_size := _round_up_to_nearest(size + BLOCK_SIZE, PAGE_SIZE)
|
||||
new_allocation, errno := linux.mmap(0, uint(mmap_size), MMAP_PROT, MMAP_FLAGS, -1, 0)
|
||||
if errno != .NONE {
|
||||
return nil
|
||||
}
|
||||
|
||||
alloc := (^Allocation_Header)(uintptr(new_allocation))
|
||||
alloc.requested = u64(size) // NOTE: requested = requested size
|
||||
alloc.requested += IS_DIRECT_MMAP
|
||||
return rawptr(mem.ptr_offset(alloc, 1))
|
||||
}
|
||||
|
||||
_direct_mmap_resize :: proc(alloc: ^Allocation_Header, new_size: int) -> rawptr {
|
||||
old_requested := int(alloc.requested & REQUESTED_MASK)
|
||||
old_mmap_size := _round_up_to_nearest(old_requested + BLOCK_SIZE, PAGE_SIZE)
|
||||
new_mmap_size := _round_up_to_nearest(new_size + BLOCK_SIZE, PAGE_SIZE)
|
||||
if int(new_mmap_size) < MMAP_TO_REGION_SHRINK_THRESHOLD {
|
||||
return _direct_mmap_to_region(alloc, new_size)
|
||||
} else if old_requested == new_size {
|
||||
return mem.ptr_offset(alloc, 1)
|
||||
}
|
||||
|
||||
new_allocation, errno := linux.mremap(alloc, uint(old_mmap_size), uint(new_mmap_size), {.MAYMOVE})
|
||||
if errno != .NONE {
|
||||
return nil
|
||||
}
|
||||
|
||||
new_header := (^Allocation_Header)(uintptr(new_allocation))
|
||||
new_header.requested = u64(new_size)
|
||||
new_header.requested += IS_DIRECT_MMAP
|
||||
|
||||
if new_mmap_size > old_mmap_size {
|
||||
// new section may not be pointer aligned, so cast to ^u8
|
||||
new_section := mem.ptr_offset((^u8)(new_header), old_requested + BLOCK_SIZE)
|
||||
mem.zero(new_section, new_mmap_size - old_mmap_size)
|
||||
}
|
||||
return mem.ptr_offset(new_header, 1)
|
||||
|
||||
}
|
||||
|
||||
_direct_mmap_from_region :: proc(alloc: ^Allocation_Header, new_size: int) -> rawptr {
|
||||
new_memory := _direct_mmap_alloc(new_size)
|
||||
if new_memory != nil {
|
||||
old_memory := mem.ptr_offset(alloc, 1)
|
||||
mem.copy(new_memory, old_memory, int(_get_block_count(alloc^)) * BLOCK_SIZE)
|
||||
}
|
||||
_region_find_and_assign_local(alloc)
|
||||
_region_local_free(alloc)
|
||||
sync.atomic_store_explicit(&_local_region.hdr.local_addr, &_local_region, .Release)
|
||||
return new_memory
|
||||
}
|
||||
|
||||
_direct_mmap_to_region :: proc(alloc: ^Allocation_Header, new_size: int) -> rawptr {
|
||||
new_memory := heap_alloc(new_size)
|
||||
if new_memory != nil {
|
||||
mem.copy(new_memory, mem.ptr_offset(alloc, -1), new_size)
|
||||
_direct_mmap_free(alloc)
|
||||
}
|
||||
return new_memory
|
||||
}
|
||||
|
||||
_direct_mmap_free :: proc(alloc: ^Allocation_Header) {
|
||||
requested := int(alloc.requested & REQUESTED_MASK)
|
||||
mmap_size := _round_up_to_nearest(requested + BLOCK_SIZE, PAGE_SIZE)
|
||||
linux.munmap(alloc, uint(mmap_size))
|
||||
}
|
||||
|
||||
//
|
||||
// Util
|
||||
//
|
||||
|
||||
_get_block_count :: #force_inline proc(alloc: Allocation_Header) -> u16 {
|
||||
return alloc.next - alloc.idx - 1
|
||||
}
|
||||
|
||||
_get_allocation_header :: #force_inline proc(raw_mem: rawptr) -> ^Allocation_Header {
|
||||
return mem.ptr_offset((^Allocation_Header)(raw_mem), -1)
|
||||
}
|
||||
|
||||
_round_up_to_nearest :: #force_inline proc(size, round: int) -> int {
|
||||
return (size-1) + round - (size-1) % round
|
||||
}
|
||||
import "base:runtime"
|
||||
|
||||
_heap_allocator_proc :: runtime.heap_allocator_proc
|
||||
|
||||
6
core/os/os2/heap_wasi.odin
Normal file
6
core/os/os2/heap_wasi.odin
Normal file
@@ -0,0 +1,6 @@
|
||||
#+private
|
||||
package os2
|
||||
|
||||
import "base:runtime"
|
||||
|
||||
_heap_allocator_proc :: runtime.wasm_allocator_proc
|
||||
@@ -2,6 +2,8 @@ package os2
|
||||
|
||||
import "base:runtime"
|
||||
|
||||
import "core:path/filepath"
|
||||
|
||||
Path_Separator :: _Path_Separator // OS-Specific
|
||||
Path_Separator_String :: _Path_Separator_String // OS-Specific
|
||||
Path_List_Separator :: _Path_List_Separator // OS-Specific
|
||||
@@ -39,3 +41,13 @@ setwd :: set_working_directory
|
||||
set_working_directory :: proc(dir: string) -> (err: Error) {
|
||||
return _set_working_directory(dir)
|
||||
}
|
||||
|
||||
get_executable_path :: proc(allocator: runtime.Allocator) -> (path: string, err: Error) {
|
||||
return _get_executable_path(allocator)
|
||||
}
|
||||
|
||||
get_executable_directory :: proc(allocator: runtime.Allocator) -> (path: string, err: Error) {
|
||||
path = _get_executable_path(allocator) or_return
|
||||
path, _ = filepath.split(path)
|
||||
return
|
||||
}
|
||||
|
||||
17
core/os/os2/path_darwin.odin
Normal file
17
core/os/os2/path_darwin.odin
Normal file
@@ -0,0 +1,17 @@
|
||||
package os2
|
||||
|
||||
import "base:runtime"
|
||||
|
||||
import "core:sys/darwin"
|
||||
import "core:sys/posix"
|
||||
|
||||
_get_executable_path :: proc(allocator: runtime.Allocator) -> (path: string, err: Error) {
|
||||
buffer: [darwin.PIDPATHINFO_MAXSIZE]byte = ---
|
||||
ret := darwin.proc_pidpath(posix.getpid(), raw_data(buffer[:]), len(buffer))
|
||||
if ret > 0 {
|
||||
return clone_string(string(buffer[:ret]), allocator)
|
||||
}
|
||||
|
||||
err = _get_platform_error()
|
||||
return
|
||||
}
|
||||
29
core/os/os2/path_freebsd.odin
Normal file
29
core/os/os2/path_freebsd.odin
Normal file
@@ -0,0 +1,29 @@
|
||||
package os2
|
||||
|
||||
import "base:runtime"
|
||||
|
||||
import "core:sys/freebsd"
|
||||
import "core:sys/posix"
|
||||
|
||||
_get_executable_path :: proc(allocator: runtime.Allocator) -> (path: string, err: Error) {
|
||||
req := []freebsd.MIB_Identifier{.CTL_KERN, .KERN_PROC, .KERN_PROC_PATHNAME, freebsd.MIB_Identifier(-1)}
|
||||
|
||||
size: uint
|
||||
if ret := freebsd.sysctl(req, nil, &size, nil, 0); ret != .NONE {
|
||||
err = _get_platform_error(posix.Errno(ret))
|
||||
return
|
||||
}
|
||||
assert(size > 0)
|
||||
|
||||
buf := make([]byte, size, allocator) or_return
|
||||
defer if err != nil { delete(buf, allocator) }
|
||||
|
||||
assert(uint(len(buf)) == size)
|
||||
|
||||
if ret := freebsd.sysctl(req, raw_data(buf), &size, nil, 0); ret != .NONE {
|
||||
err = _get_platform_error(posix.Errno(ret))
|
||||
return
|
||||
}
|
||||
|
||||
return string(buf[:size]), nil
|
||||
}
|
||||
@@ -1,9 +1,10 @@
|
||||
#+private
|
||||
package os2
|
||||
|
||||
import "base:runtime"
|
||||
|
||||
import "core:strings"
|
||||
import "core:strconv"
|
||||
import "base:runtime"
|
||||
import "core:sys/linux"
|
||||
|
||||
_Path_Separator :: '/'
|
||||
@@ -77,8 +78,6 @@ _mkdir_all :: proc(path: string, perm: int) -> Error {
|
||||
}
|
||||
|
||||
_remove_all :: proc(path: string) -> Error {
|
||||
DT_DIR :: 4
|
||||
|
||||
remove_all_dir :: proc(dfd: linux.Fd) -> Error {
|
||||
n := 64
|
||||
buf := make([]u8, n)
|
||||
@@ -173,6 +172,25 @@ _set_working_directory :: proc(dir: string) -> Error {
|
||||
return _get_platform_error(linux.chdir(dir_cstr))
|
||||
}
|
||||
|
||||
_get_executable_path :: proc(allocator: runtime.Allocator) -> (path: string, err: Error) {
|
||||
TEMP_ALLOCATOR_GUARD()
|
||||
|
||||
buf := make([dynamic]byte, 1024, temp_allocator()) or_return
|
||||
for {
|
||||
n, errno := linux.readlink("/proc/self/exe", buf[:])
|
||||
if errno != .NONE {
|
||||
err = _get_platform_error(errno)
|
||||
return
|
||||
}
|
||||
|
||||
if n < len(buf) {
|
||||
return clone_string(string(buf[:n]), allocator)
|
||||
}
|
||||
|
||||
resize(&buf, len(buf)*2) or_return
|
||||
}
|
||||
}
|
||||
|
||||
_get_full_path :: proc(fd: linux.Fd, allocator: runtime.Allocator) -> (fullpath: string, err: Error) {
|
||||
PROC_FD_PATH :: "/proc/self/fd/"
|
||||
|
||||
|
||||
24
core/os/os2/path_netbsd.odin
Normal file
24
core/os/os2/path_netbsd.odin
Normal file
@@ -0,0 +1,24 @@
|
||||
package os2
|
||||
|
||||
import "base:runtime"
|
||||
|
||||
import "core:sys/posix"
|
||||
|
||||
_get_executable_path :: proc(allocator: runtime.Allocator) -> (path: string, err: Error) {
|
||||
TEMP_ALLOCATOR_GUARD()
|
||||
|
||||
buf := make([dynamic]byte, 1024, temp_allocator()) or_return
|
||||
for {
|
||||
n := posix.readlink("/proc/curproc/exe", raw_data(buf), len(buf))
|
||||
if n < 0 {
|
||||
err = _get_platform_error()
|
||||
return
|
||||
}
|
||||
|
||||
if n < len(buf) {
|
||||
return clone_string(string(buf[:n]), allocator)
|
||||
}
|
||||
|
||||
resize(&buf, len(buf)*2) or_return
|
||||
}
|
||||
}
|
||||
57
core/os/os2/path_openbsd.odin
Normal file
57
core/os/os2/path_openbsd.odin
Normal file
@@ -0,0 +1,57 @@
|
||||
package os2
|
||||
|
||||
import "base:runtime"
|
||||
|
||||
import "core:strings"
|
||||
import "core:sys/posix"
|
||||
|
||||
_get_executable_path :: proc(allocator: runtime.Allocator) -> (path: string, err: Error) {
|
||||
// OpenBSD does not have an API for this, we do our best below.
|
||||
|
||||
if len(runtime.args__) <= 0 {
|
||||
err = .Invalid_Path
|
||||
return
|
||||
}
|
||||
|
||||
real :: proc(path: cstring, allocator: runtime.Allocator) -> (out: string, err: Error) {
|
||||
real := posix.realpath(path)
|
||||
if real == nil {
|
||||
err = _get_platform_error()
|
||||
return
|
||||
}
|
||||
defer posix.free(real)
|
||||
return clone_string(string(real), allocator)
|
||||
}
|
||||
|
||||
arg := runtime.args__[0]
|
||||
sarg := string(arg)
|
||||
|
||||
if len(sarg) == 0 {
|
||||
err = .Invalid_Path
|
||||
return
|
||||
}
|
||||
|
||||
if sarg[0] == '.' || sarg[0] == '/' {
|
||||
return real(arg, allocator)
|
||||
}
|
||||
|
||||
TEMP_ALLOCATOR_GUARD()
|
||||
|
||||
buf := strings.builder_make(temp_allocator())
|
||||
|
||||
paths := get_env("PATH", temp_allocator())
|
||||
for dir in strings.split_iterator(&paths, ":") {
|
||||
strings.builder_reset(&buf)
|
||||
strings.write_string(&buf, dir)
|
||||
strings.write_string(&buf, "/")
|
||||
strings.write_string(&buf, sarg)
|
||||
|
||||
cpath := strings.to_cstring(&buf) or_return
|
||||
if posix.access(cpath, {.X_OK}) == .OK {
|
||||
return real(cpath, allocator)
|
||||
}
|
||||
}
|
||||
|
||||
err = .Invalid_Path
|
||||
return
|
||||
}
|
||||
@@ -81,7 +81,7 @@ _remove_all :: proc(path: string) -> Error {
|
||||
|
||||
fullpath, _ := concatenate({path, "/", string(cname), "\x00"}, temp_allocator())
|
||||
if entry.d_type == .DIR {
|
||||
_remove_all(fullpath[:len(fullpath)-1])
|
||||
_remove_all(fullpath[:len(fullpath)-1]) or_return
|
||||
} else {
|
||||
if posix.unlink(cstring(raw_data(fullpath))) != .OK {
|
||||
return _get_platform_error()
|
||||
|
||||
117
core/os/os2/path_wasi.odin
Normal file
117
core/os/os2/path_wasi.odin
Normal file
@@ -0,0 +1,117 @@
|
||||
#+private
|
||||
package os2
|
||||
|
||||
import "base:runtime"
|
||||
|
||||
import "core:path/filepath"
|
||||
import "core:sync"
|
||||
import "core:sys/wasm/wasi"
|
||||
|
||||
_Path_Separator :: '/'
|
||||
_Path_Separator_String :: "/"
|
||||
_Path_List_Separator :: ':'
|
||||
|
||||
_is_path_separator :: proc(c: byte) -> bool {
|
||||
return c == _Path_Separator
|
||||
}
|
||||
|
||||
_mkdir :: proc(name: string, perm: int) -> Error {
|
||||
dir_fd, relative, ok := match_preopen(name)
|
||||
if !ok {
|
||||
return .Invalid_Path
|
||||
}
|
||||
|
||||
return _get_platform_error(wasi.path_create_directory(dir_fd, relative))
|
||||
}
|
||||
|
||||
_mkdir_all :: proc(path: string, perm: int) -> Error {
|
||||
if path == "" {
|
||||
return .Invalid_Path
|
||||
}
|
||||
|
||||
TEMP_ALLOCATOR_GUARD()
|
||||
|
||||
if exists(path) {
|
||||
return .Exist
|
||||
}
|
||||
|
||||
clean_path := filepath.clean(path, temp_allocator())
|
||||
return internal_mkdir_all(clean_path)
|
||||
|
||||
internal_mkdir_all :: proc(path: string) -> Error {
|
||||
dir, file := filepath.split(path)
|
||||
if file != path && dir != "/" {
|
||||
if len(dir) > 1 && dir[len(dir) - 1] == '/' {
|
||||
dir = dir[:len(dir) - 1]
|
||||
}
|
||||
internal_mkdir_all(dir) or_return
|
||||
}
|
||||
|
||||
err := _mkdir(path, 0)
|
||||
if err == .Exist { err = nil }
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
_remove_all :: proc(path: string) -> (err: Error) {
|
||||
// PERF: this works, but wastes a bunch of memory using the read_directory_iterator API
|
||||
// and using open instead of wasi fds directly.
|
||||
{
|
||||
dir := open(path) or_return
|
||||
defer close(dir)
|
||||
|
||||
iter := read_directory_iterator_create(dir)
|
||||
defer read_directory_iterator_destroy(&iter)
|
||||
|
||||
for fi in read_directory_iterator(&iter) {
|
||||
_ = read_directory_iterator_error(&iter) or_break
|
||||
|
||||
if fi.type == .Directory {
|
||||
_remove_all(fi.fullpath) or_return
|
||||
} else {
|
||||
remove(fi.fullpath) or_return
|
||||
}
|
||||
}
|
||||
|
||||
_ = read_directory_iterator_error(&iter) or_return
|
||||
}
|
||||
|
||||
return remove(path)
|
||||
}
|
||||
|
||||
g_wd: string
|
||||
g_wd_mutex: sync.Mutex
|
||||
|
||||
_get_working_directory :: proc(allocator: runtime.Allocator) -> (dir: string, err: Error) {
|
||||
sync.guard(&g_wd_mutex)
|
||||
|
||||
return clone_string(g_wd if g_wd != "" else "/", allocator)
|
||||
}
|
||||
|
||||
_set_working_directory :: proc(dir: string) -> (err: Error) {
|
||||
sync.guard(&g_wd_mutex)
|
||||
|
||||
if dir == g_wd {
|
||||
return
|
||||
}
|
||||
|
||||
if g_wd != "" {
|
||||
delete(g_wd, file_allocator())
|
||||
}
|
||||
|
||||
g_wd = clone_string(dir, file_allocator()) or_return
|
||||
return
|
||||
}
|
||||
|
||||
_get_executable_path :: proc(allocator: runtime.Allocator) -> (path: string, err: Error) {
|
||||
if len(args) <= 0 {
|
||||
return clone_string("/", allocator)
|
||||
}
|
||||
|
||||
arg := args[0]
|
||||
if len(arg) > 0 && (arg[0] == '.' || arg[0] == '/') {
|
||||
return clone_string(arg, allocator)
|
||||
}
|
||||
|
||||
return concatenate({"/", arg}, allocator)
|
||||
}
|
||||
@@ -136,6 +136,26 @@ _set_working_directory :: proc(dir: string) -> (err: Error) {
|
||||
return
|
||||
}
|
||||
|
||||
_get_executable_path :: proc(allocator: runtime.Allocator) -> (path: string, err: Error) {
|
||||
TEMP_ALLOCATOR_GUARD()
|
||||
|
||||
buf := make([dynamic]u16, 512, temp_allocator()) or_return
|
||||
for {
|
||||
ret := win32.GetModuleFileNameW(nil, raw_data(buf), win32.DWORD(len(buf)))
|
||||
if ret == 0 {
|
||||
err = _get_platform_error()
|
||||
return
|
||||
}
|
||||
|
||||
if ret == win32.DWORD(len(buf)) && win32.GetLastError() == win32.ERROR_INSUFFICIENT_BUFFER {
|
||||
resize(&buf, len(buf)*2) or_return
|
||||
continue
|
||||
}
|
||||
|
||||
return win32_utf16_to_utf8(buf[:ret], allocator)
|
||||
}
|
||||
}
|
||||
|
||||
can_use_long_paths: bool
|
||||
|
||||
@(init)
|
||||
|
||||
@@ -10,8 +10,8 @@ _pipe :: proc() -> (r, w: ^File, err: Error) {
|
||||
return nil, nil,_get_platform_error(errno)
|
||||
}
|
||||
|
||||
r = _new_file(uintptr(fds[0])) or_return
|
||||
w = _new_file(uintptr(fds[1])) or_return
|
||||
r = _new_file(uintptr(fds[0]), "", file_allocator()) or_return
|
||||
w = _new_file(uintptr(fds[1]), "", file_allocator()) or_return
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ _pipe :: proc() -> (r, w: ^File, err: Error) {
|
||||
return
|
||||
}
|
||||
|
||||
r = __new_file(fds[0])
|
||||
r = __new_file(fds[0], file_allocator())
|
||||
ri := (^File_Impl)(r.impl)
|
||||
|
||||
rname := strings.builder_make(file_allocator())
|
||||
@@ -29,9 +29,9 @@ _pipe :: proc() -> (r, w: ^File, err: Error) {
|
||||
strings.write_string(&rname, "/dev/fd/")
|
||||
strings.write_int(&rname, int(fds[0]))
|
||||
ri.name = strings.to_string(rname)
|
||||
ri.cname = strings.to_cstring(&rname)
|
||||
ri.cname = strings.to_cstring(&rname) or_return
|
||||
|
||||
w = __new_file(fds[1])
|
||||
w = __new_file(fds[1], file_allocator())
|
||||
wi := (^File_Impl)(w.impl)
|
||||
|
||||
wname := strings.builder_make(file_allocator())
|
||||
@@ -39,7 +39,7 @@ _pipe :: proc() -> (r, w: ^File, err: Error) {
|
||||
strings.write_string(&wname, "/dev/fd/")
|
||||
strings.write_int(&wname, int(fds[1]))
|
||||
wi.name = strings.to_string(wname)
|
||||
wi.cname = strings.to_cstring(&wname)
|
||||
wi.cname = strings.to_cstring(&wname) or_return
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
13
core/os/os2/pipe_wasi.odin
Normal file
13
core/os/os2/pipe_wasi.odin
Normal file
@@ -0,0 +1,13 @@
|
||||
#+private
|
||||
package os2
|
||||
|
||||
_pipe :: proc() -> (r, w: ^File, err: Error) {
|
||||
err = .Unsupported
|
||||
return
|
||||
}
|
||||
|
||||
@(require_results)
|
||||
_pipe_has_data :: proc(r: ^File) -> (ok: bool, err: Error) {
|
||||
err = .Unsupported
|
||||
return
|
||||
}
|
||||
@@ -290,12 +290,21 @@ process_open :: proc(pid: int, flags := Process_Open_Flags {}) -> (Process, Erro
|
||||
return _process_open(pid, flags)
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
OS-specific process attributes.
|
||||
*/
|
||||
Process_Attributes :: struct {
|
||||
sys_attr: _Sys_Process_Attributes,
|
||||
}
|
||||
|
||||
/*
|
||||
The description of how a process should be created.
|
||||
*/
|
||||
Process_Desc :: struct {
|
||||
// OS-specific attributes.
|
||||
sys_attr: _Sys_Process_Attributes,
|
||||
sys_attr: Process_Attributes,
|
||||
|
||||
// The working directory of the process. If the string has length 0, the
|
||||
// working directory is assumed to be the current working directory of the
|
||||
// current process.
|
||||
|
||||
@@ -111,7 +111,7 @@ _process_info_by_pid :: proc(pid: int, selection: Process_Info_Fields, allocator
|
||||
|
||||
strings.write_string(&path_builder, "/proc/")
|
||||
strings.write_int(&path_builder, pid)
|
||||
proc_fd, errno := linux.open(strings.to_cstring(&path_builder), _OPENDIR_FLAGS)
|
||||
proc_fd, errno := linux.open(strings.to_cstring(&path_builder) or_return, _OPENDIR_FLAGS)
|
||||
if errno != .NONE {
|
||||
err = _get_platform_error(errno)
|
||||
return
|
||||
@@ -169,7 +169,7 @@ _process_info_by_pid :: proc(pid: int, selection: Process_Info_Fields, allocator
|
||||
strings.write_int(&path_builder, pid)
|
||||
strings.write_string(&path_builder, "/cmdline")
|
||||
|
||||
cmdline_bytes, cmdline_err := _read_entire_pseudo_file(strings.to_cstring(&path_builder), temp_allocator())
|
||||
cmdline_bytes, cmdline_err := _read_entire_pseudo_file(strings.to_cstring(&path_builder) or_return, temp_allocator())
|
||||
if cmdline_err != nil || len(cmdline_bytes) == 0 {
|
||||
err = cmdline_err
|
||||
break cmdline_if
|
||||
@@ -190,7 +190,7 @@ _process_info_by_pid :: proc(pid: int, selection: Process_Info_Fields, allocator
|
||||
strings.write_int(&path_builder, pid)
|
||||
strings.write_string(&path_builder, "/cwd")
|
||||
|
||||
cwd, cwd_err = _read_link_cstr(strings.to_cstring(&path_builder), temp_allocator()) // allowed to fail
|
||||
cwd, cwd_err = _read_link_cstr(strings.to_cstring(&path_builder) or_return, temp_allocator()) // allowed to fail
|
||||
if cwd_err == nil && .Working_Dir in selection {
|
||||
info.working_dir = strings.clone(cwd, allocator) or_return
|
||||
info.fields += {.Working_Dir}
|
||||
@@ -258,7 +258,7 @@ _process_info_by_pid :: proc(pid: int, selection: Process_Info_Fields, allocator
|
||||
strings.write_int(&path_builder, pid)
|
||||
strings.write_string(&path_builder, "/stat")
|
||||
|
||||
proc_stat_bytes, stat_err := _read_entire_pseudo_file(strings.to_cstring(&path_builder), temp_allocator())
|
||||
proc_stat_bytes, stat_err := _read_entire_pseudo_file(strings.to_cstring(&path_builder) or_return, temp_allocator())
|
||||
if stat_err != nil {
|
||||
err = stat_err
|
||||
break stat_if
|
||||
@@ -330,7 +330,7 @@ _process_info_by_pid :: proc(pid: int, selection: Process_Info_Fields, allocator
|
||||
strings.write_int(&path_builder, pid)
|
||||
strings.write_string(&path_builder, "/environ")
|
||||
|
||||
if env_bytes, env_err := _read_entire_pseudo_file(strings.to_cstring(&path_builder), temp_allocator()); env_err == nil {
|
||||
if env_bytes, env_err := _read_entire_pseudo_file(strings.to_cstring(&path_builder) or_return, temp_allocator()); env_err == nil {
|
||||
env := string(env_bytes)
|
||||
|
||||
env_list := make([dynamic]string, allocator) or_return
|
||||
@@ -384,14 +384,6 @@ _Sys_Process_Attributes :: struct {}
|
||||
|
||||
@(private="package")
|
||||
_process_start :: proc(desc: Process_Desc) -> (process: Process, err: Error) {
|
||||
has_executable_permissions :: proc(fd: linux.Fd) -> bool {
|
||||
backing: [48]u8
|
||||
b := strings.builder_from_bytes(backing[:])
|
||||
strings.write_string(&b, "/proc/self/fd/")
|
||||
strings.write_int(&b, int(fd))
|
||||
return linux.access(strings.to_cstring(&b), linux.X_OK) == .NONE
|
||||
}
|
||||
|
||||
TEMP_ALLOCATOR_GUARD()
|
||||
|
||||
if len(desc.command) == 0 {
|
||||
@@ -411,7 +403,7 @@ _process_start :: proc(desc: Process_Desc) -> (process: Process, err: Error) {
|
||||
}
|
||||
|
||||
// search PATH if just a plain name is provided
|
||||
exe_fd: linux.Fd
|
||||
exe_path: cstring
|
||||
executable_name := desc.command[0]
|
||||
if strings.index_byte(executable_name, '/') < 0 {
|
||||
path_env := get_env("PATH", temp_allocator())
|
||||
@@ -426,16 +418,11 @@ _process_start :: proc(desc: Process_Desc) -> (process: Process, err: Error) {
|
||||
strings.write_byte(&exe_builder, '/')
|
||||
strings.write_string(&exe_builder, executable_name)
|
||||
|
||||
exe_path := strings.to_cstring(&exe_builder)
|
||||
if exe_fd, errno = linux.openat(dir_fd, exe_path, {.PATH, .CLOEXEC}); errno != .NONE {
|
||||
continue
|
||||
exe_path = strings.to_cstring(&exe_builder) or_return
|
||||
if linux.access(exe_path, linux.X_OK) == .NONE {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
if !has_executable_permissions(exe_fd) {
|
||||
linux.close(exe_fd)
|
||||
continue
|
||||
}
|
||||
found = true
|
||||
break
|
||||
}
|
||||
if !found {
|
||||
// check in cwd to match windows behavior
|
||||
@@ -443,29 +430,18 @@ _process_start :: proc(desc: Process_Desc) -> (process: Process, err: Error) {
|
||||
strings.write_string(&exe_builder, "./")
|
||||
strings.write_string(&exe_builder, executable_name)
|
||||
|
||||
exe_path := strings.to_cstring(&exe_builder)
|
||||
if exe_fd, errno = linux.openat(dir_fd, exe_path, {.PATH, .CLOEXEC}); errno != .NONE {
|
||||
exe_path = strings.to_cstring(&exe_builder) or_return
|
||||
if linux.access(exe_path, linux.X_OK) != .NONE {
|
||||
return process, .Not_Exist
|
||||
}
|
||||
if !has_executable_permissions(exe_fd) {
|
||||
linux.close(exe_fd)
|
||||
return process, .Permission_Denied
|
||||
}
|
||||
}
|
||||
} else {
|
||||
exe_path := temp_cstring(executable_name) or_return
|
||||
if exe_fd, errno = linux.openat(dir_fd, exe_path, {.PATH, .CLOEXEC}); errno != .NONE {
|
||||
return process, _get_platform_error(errno)
|
||||
}
|
||||
if !has_executable_permissions(exe_fd) {
|
||||
linux.close(exe_fd)
|
||||
return process, .Permission_Denied
|
||||
exe_path = temp_cstring(executable_name) or_return
|
||||
if linux.access(exe_path, linux.X_OK) != .NONE {
|
||||
return process, .Not_Exist
|
||||
}
|
||||
}
|
||||
|
||||
// At this point, we have an executable.
|
||||
defer linux.close(exe_fd)
|
||||
|
||||
// args and environment need to be a list of cstrings
|
||||
// that are terminated by a nil pointer.
|
||||
cargs := make([]cstring, len(desc.command) + 1, temp_allocator()) or_return
|
||||
@@ -492,7 +468,6 @@ _process_start :: proc(desc: Process_Desc) -> (process: Process, err: Error) {
|
||||
}
|
||||
defer linux.close(child_pipe_fds[READ])
|
||||
|
||||
|
||||
// TODO: This is the traditional textbook implementation with fork.
|
||||
// A more efficient implementation with vfork:
|
||||
//
|
||||
@@ -572,8 +547,13 @@ _process_start :: proc(desc: Process_Desc) -> (process: Process, err: Error) {
|
||||
if _, errno = linux.dup2(stderr_fd, STDERR); errno != .NONE {
|
||||
write_errno_to_parent_and_abort(child_pipe_fds[WRITE], errno)
|
||||
}
|
||||
if dir_fd != linux.AT_FDCWD {
|
||||
if errno = linux.fchdir(dir_fd); errno != .NONE {
|
||||
write_errno_to_parent_and_abort(child_pipe_fds[WRITE], errno)
|
||||
}
|
||||
}
|
||||
|
||||
errno = linux.execveat(exe_fd, "", &cargs[0], env, {.AT_EMPTY_PATH})
|
||||
errno = linux.execveat(dir_fd, exe_path, &cargs[0], env)
|
||||
assert(errno != nil)
|
||||
write_errno_to_parent_and_abort(child_pipe_fds[WRITE], errno)
|
||||
}
|
||||
@@ -614,7 +594,7 @@ _process_state_update_times :: proc(state: ^Process_State) -> (err: Error) {
|
||||
strings.write_string(&path_builder, "/stat")
|
||||
|
||||
stat_buf: []u8
|
||||
stat_buf, err = _read_entire_pseudo_file(strings.to_cstring(&path_builder), temp_allocator())
|
||||
stat_buf, err = _read_entire_pseudo_file(strings.to_cstring(&path_builder) or_return, temp_allocator())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -71,7 +71,7 @@ _process_start :: proc(desc: Process_Desc) -> (process: Process, err: Error) {
|
||||
strings.write_byte(&exe_builder, '/')
|
||||
strings.write_string(&exe_builder, exe_name)
|
||||
|
||||
if exe_fd := posix.open(strings.to_cstring(&exe_builder), {.CLOEXEC, .EXEC}); exe_fd == -1 {
|
||||
if exe_fd := posix.open(strings.to_cstring(&exe_builder) or_return, {.CLOEXEC, .EXEC}); exe_fd == -1 {
|
||||
continue
|
||||
} else {
|
||||
posix.close(exe_fd)
|
||||
@@ -91,7 +91,7 @@ _process_start :: proc(desc: Process_Desc) -> (process: Process, err: Error) {
|
||||
|
||||
// "hello/./world" is fine right?
|
||||
|
||||
if exe_fd := posix.open(strings.to_cstring(&exe_builder), {.CLOEXEC, .EXEC}); exe_fd == -1 {
|
||||
if exe_fd := posix.open(strings.to_cstring(&exe_builder) or_return, {.CLOEXEC, .EXEC}); exe_fd == -1 {
|
||||
err = .Not_Exist
|
||||
return
|
||||
} else {
|
||||
@@ -102,7 +102,7 @@ _process_start :: proc(desc: Process_Desc) -> (process: Process, err: Error) {
|
||||
strings.builder_reset(&exe_builder)
|
||||
strings.write_string(&exe_builder, exe_name)
|
||||
|
||||
if exe_fd := posix.open(strings.to_cstring(&exe_builder), {.CLOEXEC, .EXEC}); exe_fd == -1 {
|
||||
if exe_fd := posix.open(strings.to_cstring(&exe_builder) or_return, {.CLOEXEC, .EXEC}); exe_fd == -1 {
|
||||
err = .Not_Exist
|
||||
return
|
||||
} else {
|
||||
@@ -181,7 +181,7 @@ _process_start :: proc(desc: Process_Desc) -> (process: Process, err: Error) {
|
||||
if posix.chdir(cwd) != .OK { abort(pipe[WRITE]) }
|
||||
}
|
||||
|
||||
res := posix.execve(strings.to_cstring(&exe_builder), raw_data(cmd), env)
|
||||
res := posix.execve(strings.to_cstring(&exe_builder) or_return, raw_data(cmd), env)
|
||||
assert(res == -1)
|
||||
abort(pipe[WRITE])
|
||||
|
||||
|
||||
89
core/os/os2/process_wasi.odin
Normal file
89
core/os/os2/process_wasi.odin
Normal file
@@ -0,0 +1,89 @@
|
||||
#+private
|
||||
package os2
|
||||
|
||||
import "base:runtime"
|
||||
|
||||
import "core:time"
|
||||
import "core:sys/wasm/wasi"
|
||||
|
||||
_exit :: proc "contextless" (code: int) -> ! {
|
||||
wasi.proc_exit(wasi.exitcode_t(code))
|
||||
}
|
||||
|
||||
_get_uid :: proc() -> int {
|
||||
return 0
|
||||
}
|
||||
|
||||
_get_euid :: proc() -> int {
|
||||
return 0
|
||||
}
|
||||
|
||||
_get_gid :: proc() -> int {
|
||||
return 0
|
||||
}
|
||||
|
||||
_get_egid :: proc() -> int {
|
||||
return 0
|
||||
}
|
||||
|
||||
_get_pid :: proc() -> int {
|
||||
return 0
|
||||
}
|
||||
|
||||
_get_ppid :: proc() -> int {
|
||||
return 0
|
||||
}
|
||||
|
||||
_process_info_by_handle :: proc(process: Process, selection: Process_Info_Fields, allocator: runtime.Allocator) -> (info: Process_Info, err: Error) {
|
||||
err = .Unsupported
|
||||
return
|
||||
}
|
||||
|
||||
_current_process_info :: proc(selection: Process_Info_Fields, allocator: runtime.Allocator) -> (info: Process_Info, err: Error) {
|
||||
err = .Unsupported
|
||||
return
|
||||
}
|
||||
|
||||
_Sys_Process_Attributes :: struct {}
|
||||
|
||||
_process_start :: proc(desc: Process_Desc) -> (process: Process, err: Error) {
|
||||
err = .Unsupported
|
||||
return
|
||||
}
|
||||
|
||||
_process_wait :: proc(process: Process, timeout: time.Duration) -> (process_state: Process_State, err: Error) {
|
||||
err = .Unsupported
|
||||
return
|
||||
}
|
||||
|
||||
_process_close :: proc(process: Process) -> Error {
|
||||
return .Unsupported
|
||||
}
|
||||
|
||||
_process_kill :: proc(process: Process) -> (err: Error) {
|
||||
return .Unsupported
|
||||
}
|
||||
|
||||
_process_info_by_pid :: proc(pid: int, selection: Process_Info_Fields, allocator: runtime.Allocator) -> (info: Process_Info, err: Error) {
|
||||
err = .Unsupported
|
||||
return
|
||||
}
|
||||
|
||||
_process_list :: proc(allocator: runtime.Allocator) -> (list: []int, err: Error) {
|
||||
err = .Unsupported
|
||||
return
|
||||
}
|
||||
|
||||
_process_open :: proc(pid: int, flags: Process_Open_Flags) -> (process: Process, err: Error) {
|
||||
process.pid = pid
|
||||
err = .Unsupported
|
||||
return
|
||||
}
|
||||
|
||||
_process_handle_still_valid :: proc(p: Process) -> Error {
|
||||
return nil
|
||||
}
|
||||
|
||||
_process_state_update_times :: proc(p: Process, state: ^Process_State) {
|
||||
return
|
||||
}
|
||||
@@ -427,7 +427,7 @@ _process_start :: proc(desc: Process_Desc) -> (process: Process, err: Error) {
|
||||
command_line_w := win32_utf8_to_wstring(command_line, temp_allocator()) or_return
|
||||
environment := desc.env
|
||||
if desc.env == nil {
|
||||
environment = environ(temp_allocator())
|
||||
environment = environ(temp_allocator()) or_return
|
||||
}
|
||||
environment_block := _build_environment_block(environment, temp_allocator())
|
||||
environment_block_w := win32_utf8_to_utf16(environment_block, temp_allocator()) or_return
|
||||
|
||||
101
core/os/os2/stat_wasi.odin
Normal file
101
core/os/os2/stat_wasi.odin
Normal file
@@ -0,0 +1,101 @@
|
||||
#+private
|
||||
package os2
|
||||
|
||||
import "base:runtime"
|
||||
|
||||
import "core:path/filepath"
|
||||
import "core:sys/wasm/wasi"
|
||||
import "core:time"
|
||||
|
||||
internal_stat :: proc(stat: wasi.filestat_t, fullpath: string) -> (fi: File_Info) {
|
||||
fi.fullpath = fullpath
|
||||
fi.name = filepath.base(fi.fullpath)
|
||||
|
||||
fi.inode = u128(stat.ino)
|
||||
fi.size = i64(stat.size)
|
||||
|
||||
switch stat.filetype {
|
||||
case .BLOCK_DEVICE: fi.type = .Block_Device
|
||||
case .CHARACTER_DEVICE: fi.type = .Character_Device
|
||||
case .DIRECTORY: fi.type = .Directory
|
||||
case .REGULAR_FILE: fi.type = .Regular
|
||||
case .SOCKET_DGRAM, .SOCKET_STREAM: fi.type = .Socket
|
||||
case .SYMBOLIC_LINK: fi.type = .Symlink
|
||||
case .UNKNOWN: fi.type = .Undetermined
|
||||
case: fi.type = .Undetermined
|
||||
}
|
||||
|
||||
fi.creation_time = time.Time{_nsec=i64(stat.ctim)}
|
||||
fi.modification_time = time.Time{_nsec=i64(stat.mtim)}
|
||||
fi.access_time = time.Time{_nsec=i64(stat.atim)}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
_fstat :: proc(f: ^File, allocator: runtime.Allocator) -> (fi: File_Info, err: Error) {
|
||||
if f == nil || f.impl == nil {
|
||||
err = .Invalid_File
|
||||
return
|
||||
}
|
||||
|
||||
impl := (^File_Impl)(f.impl)
|
||||
|
||||
stat, _err := wasi.fd_filestat_get(__fd(f))
|
||||
if _err != nil {
|
||||
err = _get_platform_error(_err)
|
||||
return
|
||||
}
|
||||
|
||||
fullpath := clone_string(impl.name, allocator) or_return
|
||||
return internal_stat(stat, fullpath), nil
|
||||
}
|
||||
|
||||
_stat :: proc(name: string, allocator: runtime.Allocator) -> (fi: File_Info, err: Error) {
|
||||
if name == "" {
|
||||
err = .Invalid_Path
|
||||
return
|
||||
}
|
||||
|
||||
dir_fd, relative, ok := match_preopen(name)
|
||||
if !ok {
|
||||
err = .Invalid_Path
|
||||
return
|
||||
}
|
||||
|
||||
stat, _err := wasi.path_filestat_get(dir_fd, {.SYMLINK_FOLLOW}, relative)
|
||||
if _err != nil {
|
||||
err = _get_platform_error(_err)
|
||||
return
|
||||
}
|
||||
|
||||
// NOTE: wasi doesn't really do full paths afact.
|
||||
fullpath := clone_string(name, allocator) or_return
|
||||
return internal_stat(stat, fullpath), nil
|
||||
}
|
||||
|
||||
_lstat :: proc(name: string, allocator: runtime.Allocator) -> (fi: File_Info, err: Error) {
|
||||
if name == "" {
|
||||
err = .Invalid_Path
|
||||
return
|
||||
}
|
||||
|
||||
dir_fd, relative, ok := match_preopen(name)
|
||||
if !ok {
|
||||
err = .Invalid_Path
|
||||
return
|
||||
}
|
||||
|
||||
stat, _err := wasi.path_filestat_get(dir_fd, {}, relative)
|
||||
if _err != nil {
|
||||
err = _get_platform_error(_err)
|
||||
return
|
||||
}
|
||||
|
||||
// NOTE: wasi doesn't really do full paths afact.
|
||||
fullpath := clone_string(name, allocator) or_return
|
||||
return internal_stat(stat, fullpath), nil
|
||||
}
|
||||
|
||||
_same_file :: proc(fi1, fi2: File_Info) -> bool {
|
||||
return fi1.fullpath == fi2.fullpath
|
||||
}
|
||||
@@ -72,7 +72,11 @@ internal_stat :: proc(name: string, create_file_attributes: u32, allocator: runt
|
||||
ok := win32.GetFileAttributesExW(wname, win32.GetFileExInfoStandard, &fa)
|
||||
if ok && fa.dwFileAttributes & win32.FILE_ATTRIBUTE_REPARSE_POINT == 0 {
|
||||
// Not a symlink
|
||||
return _file_info_from_win32_file_attribute_data(&fa, name, allocator)
|
||||
fi = _file_info_from_win32_file_attribute_data(&fa, name, allocator) or_return
|
||||
if fi.type == .Undetermined {
|
||||
fi.type = _file_type_from_create_file(wname, create_file_attributes)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
err := 0 if ok else win32.GetLastError()
|
||||
@@ -86,7 +90,11 @@ internal_stat :: proc(name: string, create_file_attributes: u32, allocator: runt
|
||||
}
|
||||
win32.FindClose(sh)
|
||||
|
||||
return _file_info_from_win32_find_data(&fd, name, allocator)
|
||||
fi = _file_info_from_win32_find_data(&fd, name, allocator) or_return
|
||||
if fi.type == .Undetermined {
|
||||
fi.type = _file_type_from_create_file(wname, create_file_attributes)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
h := win32.CreateFileW(wname, 0, 0, nil, win32.OPEN_EXISTING, create_file_attributes, nil)
|
||||
@@ -194,6 +202,15 @@ file_type :: proc(h: win32.HANDLE) -> File_Type {
|
||||
return .Undetermined
|
||||
}
|
||||
|
||||
_file_type_from_create_file :: proc(wname: win32.wstring, create_file_attributes: u32) -> File_Type {
|
||||
h := win32.CreateFileW(wname, 0, 0, nil, win32.OPEN_EXISTING, create_file_attributes, nil)
|
||||
if h == win32.INVALID_HANDLE_VALUE {
|
||||
return .Undetermined
|
||||
}
|
||||
defer win32.CloseHandle(h)
|
||||
return file_type(h)
|
||||
}
|
||||
|
||||
_file_type_mode_from_file_attributes :: proc(file_attributes: win32.DWORD, h: win32.HANDLE, ReparseTag: win32.DWORD) -> (type: File_Type, mode: int) {
|
||||
if file_attributes & win32.FILE_ATTRIBUTE_READONLY != 0 {
|
||||
mode |= 0o444
|
||||
@@ -266,7 +283,7 @@ _file_info_from_get_file_information_by_handle :: proc(path: string, h: win32.HA
|
||||
fi.name = basename(path)
|
||||
fi.inode = u128(u64(d.nFileIndexHigh)<<32 + u64(d.nFileIndexLow))
|
||||
fi.size = i64(d.nFileSizeHigh)<<32 + i64(d.nFileSizeLow)
|
||||
type, mode := _file_type_mode_from_file_attributes(d.dwFileAttributes, nil, 0)
|
||||
type, mode := _file_type_mode_from_file_attributes(d.dwFileAttributes, h, 0)
|
||||
fi.type = type
|
||||
fi.mode |= mode
|
||||
fi.creation_time = time.unix(0, win32.FILETIME_as_unix_nanoseconds(d.ftCreationTime))
|
||||
|
||||
9
core/os/os2/temp_file_wasi.odin
Normal file
9
core/os/os2/temp_file_wasi.odin
Normal file
@@ -0,0 +1,9 @@
|
||||
#+private
|
||||
package os2
|
||||
|
||||
import "base:runtime"
|
||||
|
||||
_temp_dir :: proc(allocator: runtime.Allocator) -> (string, runtime.Allocator_Error) {
|
||||
// NOTE: requires user to add /tmp to their preopen dirs, no standard way exists.
|
||||
return clone_string("/tmp", allocator)
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user