Merge branch 'odin-lang:master' into more-import-cleanup

This commit is contained in:
jakubtomsu
2026-02-17 17:54:34 +01:00
committed by GitHub
329 changed files with 8263 additions and 4568 deletions

View File

@@ -139,6 +139,8 @@ jobs:
run: ./odin test tests/core/normal.odin -file -all-packages -vet -vet-tabs -strict-style -vet-style -warnings-as-errors -disallow-do -define:ODIN_TEST_FANCY=false -define:ODIN_TEST_FAIL_ON_BAD_MEMORY=true -sanitize:address
- name: Optimized Core library tests
run: ./odin test tests/core/speed.odin -o:speed -file -all-packages -vet -vet-tabs -strict-style -vet-style -warnings-as-errors -disallow-do -define:ODIN_TEST_FANCY=false -define:ODIN_TEST_FAIL_ON_BAD_MEMORY=true -sanitize:address
- name: Wycheproof tests
run: ./odin test tests/core/crypto/wycheproof -vet -vet-tabs -strict-style -vet-style -vet-cast -warnings-as-errors -disallow-do -o:speed -microarch:native
- name: Vendor library tests
run: ./odin test tests/vendor -all-packages -vet -vet-tabs -strict-style -vet-style -warnings-as-errors -disallow-do -define:ODIN_TEST_FANCY=false -define:ODIN_TEST_FAIL_ON_BAD_MEMORY=true -sanitize:address
- name: Internals tests
@@ -187,7 +189,7 @@ jobs:
- name: build Odin
shell: cmd
run: |
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat"
./build.bat 1
- name: Odin version
run: ./odin version
@@ -196,70 +198,75 @@ jobs:
- name: Odin check
shell: cmd
run: |
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat"
odin check examples/demo -vet
- name: Odin run
shell: cmd
run: |
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat"
odin run examples/demo
- name: Odin run -debug
shell: cmd
run: |
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat"
odin run examples/demo -debug -vet -vet-tabs -strict-style -vet-style -warnings-as-errors -disallow-do
- name: Odin check examples/all
shell: cmd
run: |
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat"
odin check examples/all -vet -vet-tabs -strict-style -vet-style -warnings-as-errors -disallow-do
- name: Odin check examples/all/sdl3
shell: cmd
run: |
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat"
odin check examples/all/sdl3 -vet -vet-tabs -strict-style -vet-style -warnings-as-errors -disallow-do -no-entry-point
- name: Core library tests
shell: cmd
run: |
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat"
odin test tests/core/normal.odin -file -all-packages -vet -vet-tabs -strict-style -vet-style -warnings-as-errors -disallow-do -define:ODIN_TEST_FANCY=false -define:ODIN_TEST_FAIL_ON_BAD_MEMORY=true -sanitize:address
- name: Optimized core library tests
shell: cmd
run: |
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat"
odin test tests/core/speed.odin -o:speed -file -all-packages -vet -vet-tabs -strict-style -vet-style -warnings-as-errors -disallow-do -define:ODIN_TEST_FANCY=false -define:ODIN_TEST_FAIL_ON_BAD_MEMORY=true -sanitize:address
- name: Wycheproof tests
shell: cmd
run: |
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat"
odin test tests/core/crypto/wycheproof -vet -vet-tabs -strict-style -vet-style -vet-cast -warnings-as-errors -disallow-do -o:speed -microarch:native
- name: Vendor library tests
shell: cmd
run: |
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat"
copy vendor\lua\5.4\windows\*.dll .
odin test tests/vendor -all-packages -vet -vet-tabs -strict-style -vet-style -warnings-as-errors -disallow-do -define:ODIN_TEST_FANCY=false -define:ODIN_TEST_FAIL_ON_BAD_MEMORY=true -sanitize:address
- name: Odin internals tests
shell: cmd
run: |
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat"
odin test tests/internal -all-packages -vet -vet-tabs -strict-style -vet-style -warnings-as-errors -disallow-do -define:ODIN_TEST_FANCY=false -define:ODIN_TEST_FAIL_ON_BAD_MEMORY=true -sanitize:address
- name: Check issues
shell: cmd
run: |
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat"
cd tests/issues
call run.bat
- name: Check benchmarks
shell: cmd
run: |
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat"
odin check tests/benchmark -vet -strict-style -no-entry-point
- name: Odin documentation tests
shell: cmd
run: |
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat"
cd tests\documentation
call build.bat
- name: Odin check examples/all for Windows 32bits
shell: cmd
run: |
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat"
odin check examples/all -strict-style -target:windows_i386
build_linux_riscv64:

View File

@@ -12,6 +12,8 @@ jobs:
runs-on: windows-2022
steps:
- uses: actions/checkout@v4
with:
lfs: true
- name: build Odin
shell: cmd
run: |
@@ -47,6 +49,8 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
lfs: true
- name: (Linux) Download LLVM and Build Odin
run: |
docker run --rm -v "$PWD:/src" -w /src alpine sh -c '
@@ -87,6 +91,8 @@ jobs:
runs-on: ubuntu-24.04-arm
steps:
- uses: actions/checkout@v4
with:
lfs: true
- name: (Linux ARM) Download LLVM and Build Odin
run: |
docker run --rm -v "$PWD:/src" -w /src arm64v8/alpine sh -c '
@@ -127,6 +133,8 @@ jobs:
runs-on: macos-15-intel
steps:
- uses: actions/checkout@v4
with:
lfs: true
- name: Download LLVM and setup PATH
run: |
brew update
@@ -166,6 +174,8 @@ jobs:
runs-on: macos-latest # ARM machine
steps:
- uses: actions/checkout@v4
with:
lfs: true
- name: Download LLVM and setup PATH
run: |
brew update

3
.gitignore vendored
View File

@@ -279,6 +279,9 @@ demo.bin
libLLVM*.so*
*.a
# WASM
*.wasm
# shared collection
shared/

View File

@@ -176,6 +176,11 @@ buffer_write_ptr :: proc(b: ^Buffer, ptr: rawptr, size: int, loc := #caller_loca
return buffer_write(b, ([^]byte)(ptr)[:size], loc=loc)
}
buffer_write_slice :: proc(b: ^Buffer, slice: $S/[]$T, loc := #caller_location) -> (n: int, err: io.Error) {
size := len(slice)*size_of(T)
return buffer_write(b, ([^]byte)(raw_data(slice))[:size], loc=loc)
}
buffer_write_string :: proc(b: ^Buffer, s: string, loc := #caller_location) -> (n: int, err: io.Error) {
b.last_read = .Invalid
m, ok := _buffer_try_grow(b, len(s), loc=loc)
@@ -248,6 +253,12 @@ buffer_read_ptr :: proc(b: ^Buffer, ptr: rawptr, size: int) -> (n: int, err: io.
return buffer_read(b, ([^]byte)(ptr)[:size])
}
buffer_read_slice :: proc(b: ^Buffer, slice: $S/[]$T) -> (n: int, err: io.Error) {
size := len(slice)*size_of(T)
return buffer_read(b, ([^]byte)(raw_data(slice))[:size])
}
buffer_read_at :: proc(b: ^Buffer, p: []byte, offset: int) -> (n: int, err: io.Error) {
if len(p) == 0 {
return 0, nil

View File

@@ -67,8 +67,8 @@ truncate_to_rune :: proc(str: []byte, r: rune) -> []byte {
return str[:n]
}
// Compares two strings, returning a value representing which one comes first lexiographically.
// -1 for `a`; 1 for `b`, or 0 if they are equal.
// Compares two []byte, returning a value representing which one comes first lexiographically.
// Returns: -1 for `lhs`, 1 for `rhs`, or 0 if they are equal.
compare :: proc(lhs, rhs: []byte) -> int {
res := runtime.memory_compare(raw_data(lhs), raw_data(rhs), min(len(lhs), len(rhs)))
if res == 0 && len(lhs) != len(rhs) {
@@ -997,16 +997,18 @@ trim_left :: proc(s: []byte, cutset: []byte) -> []byte {
if s == nil || cutset == nil {
return s
}
state := cutset
return trim_left_proc_with_state(s, is_in_cutset, &state)
begin := 0; end := len(s)
for ; begin < end && index_byte(cutset, s[begin]) >= 0; begin += 1 {}
return s[begin:]
}
trim_right :: proc(s: []byte, cutset: []byte) -> []byte {
if s == nil || cutset == nil {
return s
}
state := cutset
return trim_right_proc_with_state(s, is_in_cutset, &state)
begin := 0; end := len(s)
for ; end > begin && index_byte(cutset, s[end - 1]) >= 0; end -= 1 {}
return s[:end]
}
trim :: proc(s: []byte, cutset: []byte) -> []byte {

View File

@@ -61,6 +61,27 @@ reader_read_at :: proc(r: ^Reader, p: []byte, off: i64) -> (n: int, err: io.Erro
}
return
}
reader_read_slice :: proc(r: ^Reader, slice: $T/[]$S) -> (n: int, err: io.Error) {
b := ([^]byte)(raw_data(slice))[:len(slice)*size_of(S)]
return reader_read(r, b)
}
reader_read_slice_at :: proc(r: ^Reader, slice: $T/[]$S, off: i64) -> (n: int, err: io.Error) {
b := ([^]byte)(raw_data(slice))[:len(slice)*size_of(S)]
return reader_read_at(r, b, off)
}
reader_read_ptr :: proc(r: ^Reader, data: rawptr, len: int) -> (n: int, err: io.Error) {
b := ([^]byte)(data)[:len]
return reader_read(r, b)
}
reader_read_ptr_at :: proc(r: ^Reader, data: rawptr, len: int, off: i64) -> (n: int, err: io.Error) {
b := ([^]byte)(data)[:len]
return reader_read_at(r, b, off)
}
reader_read_byte :: proc(r: ^Reader) -> (byte, io.Error) {
r.prev_rune = -1
if r.i >= i64(len(r.s)) {

View File

@@ -5,6 +5,7 @@ Example:
import "core:bytes"
import "core:os"
import "core:compress"
import "core:compress/gzip"
import "core:fmt"
// Small GZIP file with fextra, fname and fcomment present.
@@ -22,7 +23,8 @@ Example:
main :: proc() {
// Set up output buffer.
buf := bytes.Buffer{}
buf: bytes.Buffer
defer bytes.buffer_destroy(&buf)
stdout :: proc(s: string) {
os.write_string(os.stdout, s)
@@ -31,15 +33,13 @@ Example:
os.write_string(os.stderr, s)
}
args := os.args
if len(args) < 2 {
if len(os.args) < 2 {
stderr("No input file specified.\n")
err := load(data=TEST, buf=&buf, known_gzip_size=len(TEST))
err := gzip.load(data=TEST, buf=&buf, known_gzip_size=len(TEST))
if err == nil {
stdout("Displaying test vector: ")
stdout("Displaying test vector: \"")
stdout(bytes.buffer_to_string(&buf))
stdout("\n")
stdout("\"\n")
} else {
fmt.printf("gzip.load returned %v\n", err)
}
@@ -47,35 +47,31 @@ Example:
os.exit(0)
}
// The rest are all files.
args = args[1:]
err: Error
for file in os.args[1:] {
err: gzip.Error
for file in args {
if file == "-" {
// Read from stdin
s := os.stream_from_handle(os.stdin)
ctx := &compress.Context_Stream_Input{
input = s,
input = os.stdin.stream,
}
err = load(ctx, &buf)
err = gzip.load(ctx, &buf)
} else {
err = load(file, &buf)
err = gzip.load(file, &buf)
}
if err != nil {
if err != E_General.File_Not_Found {
stderr("File not found: ")
stderr(file)
stderr("\n")
os.exit(1)
}
switch err {
case nil:
stdout(bytes.buffer_to_string(&buf))
case gzip.E_General.File_Not_Found:
stderr("File not found: ")
stderr(file)
stderr("\n")
os.exit(1)
case:
stderr("GZIP returned an error.\n")
bytes.buffer_destroy(&buf)
os.exit(2)
}
stdout(bytes.buffer_to_string(&buf))
}
bytes.buffer_destroy(&buf)
}
*/
package compress_gzip

View File

@@ -107,14 +107,10 @@ load :: proc{load_from_bytes, load_from_file, load_from_context}
load_from_file :: proc(filename: string, buf: ^bytes.Buffer, expected_output_size := -1, allocator := context.allocator) -> (err: Error) {
context.allocator = allocator
data, ok := os.read_entire_file(filename)
defer delete(data)
file_data, file_err := os.read_entire_file(filename, allocator)
defer delete(file_data)
err = E_General.File_Not_Found
if ok {
err = load_from_bytes(data, buf, len(data), expected_output_size)
}
return
return load_from_bytes(file_data, buf, len(file_data), expected_output_size) if file_err == nil else E_General.File_Not_Found
}
load_from_bytes :: proc(data: []byte, buf: ^bytes.Buffer, known_gzip_size := -1, expected_output_size := -1, allocator := context.allocator) -> (err: Error) {

View File

@@ -2,7 +2,6 @@ package container_dynamic_bit_array
import "base:builtin"
import "base:intrinsics"
import "core:mem"
/*
Note that these constants are dependent on the backing being a u64.
@@ -329,7 +328,7 @@ Inputs:
*/
clear :: proc(ba: ^Bit_Array) {
if ba == nil { return }
mem.zero_slice(ba.bits[:])
intrinsics.mem_zero(raw_data(ba.bits), builtin.len(ba.bits) * NUM_BITS / 8)
}
/*
Gets the length of set and unset valid bits in the Bit_Array.

View File

@@ -49,7 +49,7 @@ dynamic_add :: proc(m: ^$D/Dynamic_Handle_Map($T, $Handle_Type), item: T, loc :=
}
_ = xar.append(&m.items, item, loc) or_return
i := len(m.items)-1
i := xar.len(m.items)-1
ptr := xar.get_ptr_unsafe(&m.items, i)
ptr^ = item

View File

@@ -1,12 +1,12 @@
package container_pool
import "base:intrinsics"
import "base:runtime"
import "base:sanitizer"
import "core:mem"
import "core:sync"
_ :: sanitizer
_ :: sync
DEFAULT_BLOCK_SIZE :: _DEFAULT_BLOCK_SIZE
@@ -33,7 +33,7 @@ Pool :: struct($T: typeid) {
}
@(require_results)
init :: proc(p: ^Pool($T), $link_field: string, block_size: uint = DEFAULT_BLOCK_SIZE) -> (err: mem.Allocator_Error)
init :: proc(p: ^Pool($T), $link_field: string, block_size: uint = DEFAULT_BLOCK_SIZE) -> (err: runtime.Allocator_Error)
where intrinsics.type_has_field(T, link_field),
intrinsics.type_field_type(T, link_field) == ^T {
p.link_off = offset_of_by_string(T, link_field)
@@ -58,7 +58,7 @@ destroy :: proc(p: ^Pool($T)) {
}
@(require_results)
get :: proc(p: ^Pool($T)) -> (elem: ^T, err: mem.Allocator_Error) #optional_allocator_error {
get :: proc(p: ^Pool($T)) -> (elem: ^T, err: runtime.Allocator_Error) #optional_allocator_error {
defer sync.atomic_add_explicit(&p.num_outstanding, 1, .Relaxed)
for {
@@ -78,7 +78,7 @@ get :: proc(p: ^Pool($T)) -> (elem: ^T, err: mem.Allocator_Error) #optional_allo
}
put :: proc(p: ^Pool($T), elem: ^T) {
mem.zero_item(elem)
intrinsics.mem_zero(elem, size_of(T))
_poison_elem(p, elem)
defer sync.atomic_sub_explicit(&p.num_outstanding, 1, .Relaxed)
@@ -113,28 +113,30 @@ _set_next :: proc(p: ^Pool($T), elem: ^T, next: ^T) {
(^^T)(uintptr(elem) + p.link_off)^ = next
}
@(disabled=.Address not_in ODIN_SANITIZER_FLAGS)
_poison_elem :: proc(p: ^Pool($T), elem: ^T) {
if p.link_off > 0 {
sanitizer.address_poison_rawptr(elem, int(p.link_off))
}
when .Address in ODIN_SANITIZER_FLAGS {
if p.link_off > 0 {
sanitizer.address_poison_rawptr(elem, int(p.link_off))
}
len := size_of(T) - p.link_off - size_of(rawptr)
if len > 0 {
ptr := rawptr(uintptr(elem) + p.link_off + size_of(rawptr))
sanitizer.address_poison_rawptr(ptr, int(len))
len := size_of(T) - p.link_off - size_of(rawptr)
if len > 0 {
ptr := rawptr(uintptr(elem) + p.link_off + size_of(rawptr))
sanitizer.address_poison_rawptr(ptr, int(len))
}
}
}
@(disabled=.Address not_in ODIN_SANITIZER_FLAGS)
_unpoison_elem :: proc(p: ^Pool($T), elem: ^T) {
if p.link_off > 0 {
sanitizer.address_unpoison_rawptr(elem, int(p.link_off))
}
when .Address in ODIN_SANITIZER_FLAGS {
if p.link_off > 0 {
sanitizer.address_unpoison_rawptr(elem, int(p.link_off))
}
len := size_of(T) - p.link_off - size_of(rawptr)
if len > 0 {
ptr := rawptr(uintptr(elem) + p.link_off + size_of(rawptr))
sanitizer.address_unpoison_rawptr(ptr, int(len))
len := size_of(T) - p.link_off - size_of(rawptr)
if len > 0 {
ptr := rawptr(uintptr(elem) + p.link_off + size_of(rawptr))
sanitizer.address_unpoison_rawptr(ptr, int(len))
}
}
}

View File

@@ -9,11 +9,9 @@ package container_pool
import "base:runtime"
import "core:mem"
_Pool_Arena :: runtime.Arena
_DEFAULT_BLOCK_SIZE :: mem.Megabyte
_DEFAULT_BLOCK_SIZE :: runtime.Megabyte
_pool_arena_init :: proc(arena: ^Pool_Arena, block_size: uint = DEFAULT_BLOCK_SIZE) -> (err: runtime.Allocator_Error) {
runtime.arena_init(arena, block_size, runtime.default_allocator()) or_return

View File

@@ -2,13 +2,11 @@
package container_pool
import "base:runtime"
import "core:mem"
import "core:mem/virtual"
_Pool_Arena :: virtual.Arena
_DEFAULT_BLOCK_SIZE :: mem.Gigabyte
_DEFAULT_BLOCK_SIZE :: runtime.Gigabyte
_pool_arena_init :: proc(arena: ^Pool_Arena, block_size: uint = DEFAULT_BLOCK_SIZE) -> (err: runtime.Allocator_Error) {
virtual.arena_init_growing(arena, block_size) or_return

View File

@@ -386,7 +386,7 @@ Attempts to add the given element at the beginning.
This operation assumes that the small-array is not empty.
Note: Performing this operation will cause pointers obtained
through get_ptr(_save) to reference incorrect elements.
through get_ptr(_safe) to reference incorrect elements.
**Inputs**
- `a`: A pointer to the small-array
@@ -466,7 +466,7 @@ Removes and returns the first element of the small-array.
This operation assumes that the small-array is not empty.
Note: Performing this operation will cause pointers obtained
through get_ptr(_save) to reference incorrect elements.
through get_ptr(_safe) to reference incorrect elements.
**Inputs**
- `a`: A pointer to the small-array
@@ -542,7 +542,7 @@ Attempts to remove and return the first element of the small array.
Unlike `pop_front`, it does not assume that the array is non-empty.
Note: Performing this operation will cause pointers obtained
through get_ptr(_save) to reference incorrect elements.
through get_ptr(_safe) to reference incorrect elements.
**Inputs**
- `a`: A pointer to the small-array
@@ -616,7 +616,7 @@ consume :: proc "odin" (a: ^$A/Small_Array($N, $T), count: int, loc := #caller_l
Removes the element at the specified index while retaining order.
Note: Performing this operation will cause pointers obtained
through get_ptr(_save) to reference incorrect elements.
through get_ptr(_safe) to reference incorrect elements.
**Inputs**
- `a`: A pointer to the small-array
@@ -754,7 +754,7 @@ push_back_elems :: proc "contextless" (a: ^$A/Small_Array($N, $T), items: ..T) -
Tries to insert an element at the specified position.
Note: Performing this operation will cause pointers obtained
through get_ptr(_save) to reference incorrect elements.
through get_ptr(_safe) to reference incorrect elements.
**Inputs**
- `a`: A pointer to the small-array

View File

@@ -0,0 +1,154 @@
package container_xar
@(require) import "base:runtime"
Freelist_Array :: struct($T: typeid, $SHIFT: uint) where
0 < SHIFT,
SHIFT <= MAX_SHIFT,
size_of(T) >= size_of(^T) {
array: Array(T, SHIFT),
freelist: ^T,
}
freelist_init :: proc(x: ^$X/Freelist_Array($T, $SHIFT), allocator := context.allocator) {
init(&x.array, allocator)
x.freelist = nil
}
freelist_destroy :: proc(x: ^$X/Freelist_Array($T, $SHIFT)) {
destroy(&x.array)
x.freelist = nil
}
freelist_clear :: proc(x: ^$X/Freelist_Array($T, $SHIFT)) {
clear(&x.array)
x.freelist = nil
}
@(require_results)
freelist_push_with_index :: proc(x: ^$X/Freelist_Array($T, $SHIFT), value: T, loc := #caller_location) -> (ptr: ^T, index: int, err: runtime.Allocator_Error) {
if x.freelist != nil {
slot := x.freelist
idx := freelist_index_of(x, slot)
x.freelist = (^^T)(slot)^
slot^ = value
return slot, idx, nil
}
idx := x.array.len
ptr = array_push_back_elem_and_get_ptr(&x.array, value, loc) or_return
return ptr, idx, nil
}
@(require_results)
freelist_push :: proc(x: ^$X/Freelist_Array($T, $SHIFT), value: T, loc := #caller_location) -> (ptr: ^T, err: runtime.Allocator_Error) {
ptr, _, err = freelist_push_with_index(x, value, loc)
return
}
freelist_pop :: proc(x: ^$X/Freelist_Array($T, $SHIFT), #any_int index: int, loc := #caller_location) -> T {
item := array_get_ptr(&x.array, index, loc)
result := item^
(^^T)(item)^ = x.freelist
x.freelist = item
return result
}
freelist_release :: proc(x: ^$X/Freelist_Array($T, $SHIFT), #any_int index: int, loc := #caller_location) {
item := array_get_ptr(&x.array, index, loc)
(^^T)(item)^ = x.freelist
x.freelist = item
}
@(require_results)
freelist_linear_search :: proc(x: ^$X/Freelist_Array($T, $SHIFT), ptr: ^T) -> (index: int, found: bool) {
base := 0
for chunk, c in x.array.chunks {
if chunk == nil {
break
}
chunk_cap := 1 << (SHIFT + uint(c if c > 0 else 1) - 1)
ptr_addr := uintptr(ptr)
chunk_start_addr := uintptr(chunk)
chunk_end_addr := chunk_start_addr + uintptr(chunk_cap * size_of(T))
if chunk_start_addr <= ptr_addr && ptr_addr < chunk_end_addr {
offset := int(ptr_addr - chunk_start_addr) / size_of(T)
return base + offset, true
}
base += chunk_cap
}
return -1, false
}
@(require_results)
freelist_get :: proc(x: ^$X/Freelist_Array($T, $SHIFT), #any_int index: int, loc := #caller_location) -> T {
return array_get(&x.array, index, loc)
}
@(require_results)
freelist_get_ptr :: proc(x: ^$X/Freelist_Array($T, $SHIFT), #any_int index: int, loc := #caller_location) -> ^T {
return array_get_ptr(&x.array, index, loc)
}
freelist_set :: proc(x: ^$X/Freelist_Array($T, $SHIFT), #any_int index: int, value: T, loc := #caller_location) {
array_set(&x.array, index, value, loc)
}
@(require_results)
freelist_len :: proc(x: $X/Freelist_Array($T, $SHIFT)) -> int {
return x.array.len
}
@(require_results)
freelist_cap :: proc(x: $X/Freelist_Array($T, $SHIFT)) -> int {
return array_cap(x.array)
}
@(require_results)
freelist_is_freed :: proc(x: ^$X/Freelist_Array($T, $SHIFT), #any_int index: int) -> bool {
ptr := array_get_ptr(&x.array, index)
current := x.freelist
for current != nil {
if current == ptr {
return true
}
current = (^^T)(current)^
}
return false
}
Freelist_Iterator :: struct($T: typeid, $SHIFT: uint) {
freelist_array: ^Freelist_Array(T, SHIFT),
idx: int,
}
freelist_iterator :: proc(x: ^$X/Freelist_Array($T, $SHIFT)) -> Freelist_Iterator(T, SHIFT) {
return {freelist_array = x, idx = 0}
}
@(require_results)
freelist_iterate_by_val :: proc(it: ^Freelist_Iterator($T, $SHIFT)) -> (val: T, idx: int, ok: bool) {
for it.idx < it.freelist_array.array.len {
if !freelist_is_freed(it.freelist_array, it.idx) {
val = array_get(&it.freelist_array.array, it.idx)
idx = it.idx
it.idx += 1
return val, idx, true
}
it.idx += 1
}
return
}
@(require_results)
freelist_iterate_by_ptr :: proc(it: ^Freelist_Iterator($T, $SHIFT)) -> (val: ^T, idx: int, ok: bool) {
for it.idx < it.freelist_array.array.len {
if !freelist_is_freed(it.freelist_array, it.idx) {
val = array_get_ptr(&it.freelist_array.array, it.idx)
idx = it.idx
it.idx += 1
return val, idx, true
}
it.idx += 1
}
return
}

View File

@@ -26,7 +26,6 @@
*/
package container_xar
@(require) import "core:mem"
@(require) import "base:intrinsics"
@(require) import "base:runtime"
@@ -73,7 +72,7 @@ MAX_SHIFT :: PLATFORM_BITS>>1
Array :: struct($T: typeid, $SHIFT: uint) where 0 < SHIFT, SHIFT <= MAX_SHIFT {
chunks: [(1 << (_LOG2_PLATFORM_BITS - intrinsics.constant_log2(SHIFT))) + 1][^]T,
len: int,
allocator: mem.Allocator,
allocator: runtime.Allocator,
}
@@ -84,7 +83,7 @@ Initializes an exponential array with the given allocator.
- `x`: Pointer to the exponential array to initialize
- `allocator`: Allocator to use for chunk allocations (defaults to context.allocator)
*/
init :: proc(x: ^$X/Array($T, $SHIFT), allocator := context.allocator) {
array_init :: proc(x: ^$X/Array($T, $SHIFT), allocator := context.allocator) {
x^ = {allocator = allocator}
}
@@ -94,12 +93,12 @@ Frees all allocated chunks and resets the exponential array.
**Inputs**
- `x`: Pointer to the exponential array to destroy
*/
destroy :: proc(x: ^$X/Array($T, $SHIFT)) {
array_destroy :: proc(x: ^$X/Array($T, $SHIFT)) {
#reverse for c, i in x.chunks {
if c != nil {
n := 1 << (SHIFT + uint(i if i > 0 else 1) - 1)
size_in_bytes := n * size_of(T)
mem.free_with_size(c, size_in_bytes, x.allocator)
runtime.mem_free_with_size(c, size_in_bytes, x.allocator)
}
}
x^ = {}
@@ -109,19 +108,19 @@ destroy :: proc(x: ^$X/Array($T, $SHIFT)) {
Resets the array's length to zero without freeing memory.
Allocated chunks are retained for reuse.
*/
clear :: proc "contextless" (x: ^$X/Array($T, $SHIFT)) {
array_clear :: proc "contextless" (x: ^$X/Array($T, $SHIFT)) {
x.len = 0
}
// Returns the length of the exponential-array
@(require_results)
len :: proc "contextless" (x: $X/Array($T, $SHIFT)) -> int {
array_len :: proc "contextless" (x: $X/Array($T, $SHIFT)) -> int {
return x.len
}
// Returns the number of allocated elements
@(require_results)
cap :: proc "contextless" (x: $X/Array($T, $SHIFT)) -> int {
array_cap :: proc "contextless" (x: $X/Array($T, $SHIFT)) -> int {
#reverse for c, i in x.chunks {
if c != nil {
return 1 << (SHIFT + uint(i if i > 0 else 1))
@@ -161,7 +160,7 @@ Get a copy of the element at the specified index.
- a copy of the element
*/
@(require_results)
get :: proc(x: ^$X/Array($T, $SHIFT), #any_int index: int, loc := #caller_location) -> (val: T) #no_bounds_check {
array_get :: proc(x: ^$X/Array($T, $SHIFT), #any_int index: int, loc := #caller_location) -> (val: T) #no_bounds_check {
runtime.bounds_check_error_loc(loc, index, x.len)
chunk_idx, elem_idx, _ := _meta_get(SHIFT, uint(index))
return x.chunks[chunk_idx][elem_idx]
@@ -200,7 +199,7 @@ Example:
}
*/
@(require_results)
get_ptr :: proc(x: ^$X/Array($T, $SHIFT), #any_int index: int, loc := #caller_location) -> (val: ^T) #no_bounds_check {
array_get_ptr :: proc(x: ^$X/Array($T, $SHIFT), #any_int index: int, loc := #caller_location) -> (val: ^T) #no_bounds_check {
runtime.bounds_check_error_loc(loc, index, x.len)
chunk_idx, elem_idx, _ := _meta_get(SHIFT, uint(index))
return &x.chunks[chunk_idx][elem_idx]
@@ -208,7 +207,7 @@ get_ptr :: proc(x: ^$X/Array($T, $SHIFT), #any_int index: int, loc := #caller_lo
// No bounds checking
@(require_results)
get_ptr_unsafe :: proc "contextless" (x: ^$X/Array($T, $SHIFT), #any_int index: int) -> (val: ^T) #no_bounds_check {
array_get_ptr_unsafe :: proc "contextless" (x: ^$X/Array($T, $SHIFT), #any_int index: int) -> (val: ^T) #no_bounds_check {
chunk_idx, elem_idx, _ := _meta_get(SHIFT, uint(index))
return &x.chunks[chunk_idx][elem_idx]
}
@@ -221,14 +220,15 @@ Set the element at the specified index to the given value.
- `index`: Position of the element (0-indexed)
- `value`: The value to set
*/
set :: proc(x: ^$X/Array($T, $SHIFT), #any_int index: int, value: T, loc := #caller_location) #no_bounds_check {
array_set :: proc(x: ^$X/Array($T, $SHIFT), #any_int index: int, value: T, loc := #caller_location) #no_bounds_check {
runtime.bounds_check_error_loc(loc, index, x.len)
chunk_idx, elem_idx, _ := _meta_get(SHIFT, uint(index))
x.chunks[chunk_idx][elem_idx] = value
}
append :: proc{push_back_elem, push_back_elems}
push_back :: proc{push_back_elem, push_back_elems}
array_append :: proc{array_push_back_elem, array_push_back_elems}
array_push_back :: proc{array_push_back_elem, array_push_back_elems}
/*
Append an element to the end of the exponential array.
@@ -257,7 +257,7 @@ Example:
fmt.println(xar.get(&x, 1)) // world
}
*/
push_back_elem :: proc(x: ^$X/Array($T, $SHIFT), value: T, loc := #caller_location) -> (n: int, err: mem.Allocator_Error) {
array_push_back_elem :: proc(x: ^$X/Array($T, $SHIFT), value: T, loc := #caller_location) -> (n: int, err: runtime.Allocator_Error) {
if x.allocator.procedure == nil {
// to minic `[dynamic]T` behaviour
x.allocator = context.allocator
@@ -284,14 +284,16 @@ Append multiple elements to the end of the exponential array.
- number of elements successfully added
- allocation error if chunk allocation failed (partial append possible)
*/
push_back_elems :: proc(x: ^$X/Array($T, $SHIFT), values: ..T, loc := #caller_location) -> (n: int, err: mem.Allocator_Error) {
array_push_back_elems :: proc(x: ^$X/Array($T, $SHIFT), values: ..T, loc := #caller_location) -> (n: int, err: runtime.Allocator_Error) {
for value in values {
n += push_back_elem(x, value, loc) or_return
n += array_push_back_elem(x, value, loc) or_return
}
return
}
append_and_get_ptr :: push_back_elem_and_get_ptr
array_append_and_get_ptr :: array_push_back_elem_and_get_ptr
append_and_get_ptr :: array_push_back_elem_and_get_ptr
/*
Append an element and return a stable pointer to it.
This is useful when you need to initialize a complex struct in-place or
@@ -318,7 +320,7 @@ Example:
}
*/
@(require_results)
push_back_elem_and_get_ptr :: proc(x: ^$X/Array($T, $SHIFT), value: T, loc := #caller_location) -> (ptr: ^T, err: mem.Allocator_Error) {
array_push_back_elem_and_get_ptr :: proc(x: ^$X/Array($T, $SHIFT), value: T, loc := #caller_location) -> (ptr: ^T, err: runtime.Allocator_Error) {
if x.allocator.procedure == nil {
// to minic `[dynamic]T` behaviour
x.allocator = context.allocator
@@ -337,7 +339,7 @@ push_back_elem_and_get_ptr :: proc(x: ^$X/Array($T, $SHIFT), value: T, loc := #c
// `pop` will remove and return the end value of an exponential array `x` and reduces the length of the array by 1.
//
// Note: If the exponential array has no elements (`xar.len(x) == 0`), this procedure will panic.
pop :: proc(x: ^$X/Array($T, $SHIFT), loc := #caller_location) -> (val: T) {
array_pop :: proc(x: ^$X/Array($T, $SHIFT), loc := #caller_location) -> (val: T) {
assert(x.len > 0, loc=loc)
index := uint(x.len-1)
chunk_idx, elem_idx, _ := _meta_get(SHIFT, index)
@@ -348,7 +350,7 @@ pop :: proc(x: ^$X/Array($T, $SHIFT), loc := #caller_location) -> (val: T) {
// `pop_safe` trys to remove and return the end value of dynamic array `x` and reduces the length of the array by 1.
// If the operation is not possible, it will return false.
@(require_results)
pop_safe :: proc(x: ^$X/Array($T, $SHIFT)) -> (val: T, ok: bool) {
array_pop_safe :: proc(x: ^$X/Array($T, $SHIFT)) -> (val: T, ok: bool) {
if x.len == 0 {
return
}
@@ -390,16 +392,27 @@ pop_safe :: proc(x: ^$X/Array($T, $SHIFT)) -> (val: T, ok: bool) {
fmt.println(xar.get(&x, 1)) // 20
}
*/
unordered_remove :: proc(x: ^$X/Array($T, $SHIFT), #any_int index: int, loc := #caller_location) {
array_unordered_remove :: proc(x: ^$X/Array($T, $SHIFT), #any_int index: int, loc := #caller_location) {
runtime.bounds_check_error_loc(loc, index, x.len)
n := x.len-1
if index != n {
end := get(x, n)
set(x, index, end)
end := array_get(x, n)
array_set(x, index, end)
}
x.len -= 1
}
@(require_results)
array_linear_search :: proc(x: ^$X/Array($T, $SHIFT), elem: T) -> (index: int, found: bool) where intrinsics.type_is_comparable(T) {
it := array_iterator(x)
for val, i in array_iterate_by_val(it) {
if val == elem {
return i, true
}
}
return -1, flase
}
/*
Iterator state for traversing a `Xar`.
@@ -408,7 +421,7 @@ Fields:
- `xar`: Pointer to the exponential array being iterated
- `idx`: Current iteration index
*/
Iterator :: struct($T: typeid, $SHIFT: uint) {
Array_Iterator :: struct($T: typeid, $SHIFT: uint) {
xar: ^Array(T, SHIFT),
idx: int,
}
@@ -447,7 +460,7 @@ Output:
20
30
*/
iterator :: proc(xar: ^$X/Array($T, $SHIFT)) -> Iterator(T, SHIFT) {
array_iterator :: proc(xar: ^$X/Array($T, $SHIFT)) -> Array_Iterator(T, SHIFT) {
return {xar = auto_cast xar, idx = 0}
}
@@ -461,11 +474,12 @@ Advance the iterator and returns the next element.
- current element
- `true` if an element was returned, `false` if iteration is complete
*/
iterate_by_val :: proc(it: ^Iterator($T, $SHIFT)) -> (val: T, ok: bool) {
array_iterate_by_val :: proc(it: ^Array_Iterator($T, $SHIFT)) -> (val: T, idx: int, ok: bool) {
if it.idx >= it.xar.len {
return
}
val = get(it.xar, it.idx)
val = array_get(it.xar, it.idx)
idx = it.idx
it.idx += 1
return val, true
}
@@ -481,11 +495,42 @@ Advance the iterator and returns a pointer to the next element.
- pointer to the current element
- `true` if an element was returned, `false` if iteration is complete
*/
iterate_by_ptr :: proc(it: ^Iterator($T, $SHIFT)) -> (val: ^T, ok: bool) {
array_iterate_by_ptr :: proc(it: ^Array_Iterator($T, $SHIFT)) -> (val: ^T, idx: int, ok: bool) {
if it.idx >= it.xar.len {
return
}
val = get_ptr(it.xar, it.idx)
val = array_get_ptr(it.xar, it.idx)
idx = it.idx
it.idx += 1
return val, true
}
init :: proc{array_init, freelist_init}
destroy :: proc{array_destroy, freelist_destroy}
clear :: proc{array_clear, freelist_clear}
len :: proc{array_len, freelist_len}
cap :: proc{array_cap, freelist_cap}
get :: proc{array_get, freelist_get}
get_ptr_unsafe :: proc{array_get_ptr_unsafe}
get_ptr :: proc{array_get_ptr, freelist_get_ptr}
set :: proc{array_set, freelist_set}
append :: proc{array_push_back_elem, array_push_back_elems}
push_back :: proc{array_push_back_elem, array_push_back_elems}
push_back_elem :: proc{array_push_back_elem}
push_back_elems :: proc{array_push_back_elems}
push_back_elem_and_get_ptr:: proc{array_push_back_elem_and_get_ptr}
pop :: proc{array_pop, freelist_pop}
pop_safe :: proc{array_pop_safe}
unordered_remove :: proc{array_unordered_remove}
iterator :: proc{array_iterator, freelist_iterator}
iterate_by_val :: proc{array_iterate_by_val, freelist_iterate_by_val}
iterate_by_ptr :: proc{array_iterate_by_ptr, freelist_iterate_by_ptr}
push_with_index :: proc{freelist_push_with_index}
push :: proc{freelist_push}
release :: proc{freelist_release}
linear_search :: proc{array_linear_search, freelist_linear_search}
is_freed :: proc{freelist_is_freed}

View File

@@ -1,7 +1,6 @@
package aes_ct64
import "base:intrinsics"
import "core:mem"
import "core:crypto"
STRIDE :: 4
@@ -82,5 +81,5 @@ decrypt_blocks :: proc(ctx: ^Context, dst, src: [][]byte) {
// reset sanitizes the Context. The Context must be re-initialized to
// be used again.
reset :: proc(ctx: ^Context) {
mem.zero_explicit(ctx, size_of(ctx))
}
crypto.zero_explicit(ctx, size_of(ctx))
}

View File

@@ -22,8 +22,6 @@
package aes_ct64
import "base:intrinsics"
inv_sub_bytes :: proc "contextless" (q: ^[8]u64) {
// AES S-box is:
// S(x) = A(I(x)) ^ 0x63

View File

@@ -22,9 +22,9 @@
package aes_ct64
import "core:crypto"
import "core:crypto/_aes"
import "core:encoding/endian"
import "core:mem"
@(private, require_results)
sub_word :: proc "contextless" (x: u32) -> u32 {
@@ -35,7 +35,7 @@ sub_word :: proc "contextless" (x: u32) -> u32 {
orthogonalize(&q)
ret := u32(q[0])
mem.zero_explicit(&q[0], size_of(u64))
crypto.zero_explicit(&q[0], size_of(u64))
return ret
}
@@ -97,8 +97,8 @@ keysched :: proc "contextless" (comp_skey: []u64, key: []byte) -> int {
(q[7] & 0x8888888888888888)
}
mem.zero_explicit(&skey, size_of(skey))
mem.zero_explicit(&q, size_of(q))
crypto.zero_explicit(&skey, size_of(skey))
crypto.zero_explicit(&q, size_of(q))
return num_rounds
}

View File

@@ -25,7 +25,6 @@ package aes_hw_intel
import "base:intrinsics"
import "core:crypto/_aes"
import "core:mem"
import "core:simd/x86"
// Intel AES-NI based implementation. Inspiration taken from BearSSL.
@@ -174,5 +173,28 @@ keysched :: proc(ctx: ^Context, key: []byte) {
ctx._num_rounds = num_rounds
mem.zero_explicit(&sks, size_of(sks))
zero_explicit(&sks, size_of(sks))
}
/*
Set each byte of a memory range to zero.
This procedure copies the value `0` into the `len` bytes of a memory range,
starting at address `data`.
This procedure returns the pointer to `data`.
Unlike the `zero()` procedure, which can be optimized away or reordered by the
compiler under certain circumstances, `zero_explicit()` procedure can not be
optimized away or reordered with other memory access operations, and the
compiler assumes volatile semantics of the memory.
*/
zero_explicit :: proc "contextless" (data: rawptr, len: int) -> rawptr {
// This routine tries to avoid the compiler optimizing away the call,
// so that it is always executed. It is intended to provide
// equivalent semantics to those provided by the C11 Annex K 3.7.4.1
// memset_s call.
intrinsics.mem_zero_volatile(data, len) // Use the volatile mem_zero
intrinsics.atomic_thread_fence(.Seq_Cst) // Prevent reordering
return data
}

View File

@@ -10,8 +10,8 @@ package _blake2
Implementation of the BLAKE2 hashing algorithm, as defined in <https://datatracker.ietf.org/doc/html/rfc7693> and <https://www.blake2.net/>
*/
import "base:intrinsics"
import "core:encoding/endian"
import "core:mem"
BLAKE2S_BLOCK_SIZE :: 64
BLAKE2S_SIZE :: 32
@@ -145,7 +145,7 @@ init :: proc "contextless" (ctx: ^$T, cfg: ^Blake2_Config) {
}
}
mem.zero(&ctx.x, size_of(ctx.x)) // Done with the scratch space, no barrier.
intrinsics.mem_zero(&ctx.x, size_of(ctx.x)) // Done with the scratch space, no barrier.
if cfg.tree != nil && cfg.tree.(Blake2_Tree).is_last_node {
ctx.is_last_node = true
@@ -222,7 +222,7 @@ reset :: proc "contextless" (ctx: ^$T) {
return
}
mem.zero_explicit(ctx, size_of(ctx^))
zero_explicit(ctx, size_of(ctx^))
}
@(private)
@@ -2877,3 +2877,27 @@ blake2b_blocks :: #force_inline proc "contextless" (ctx: ^Blake2b_Context, p: []
ctx.h[0], ctx.h[1], ctx.h[2], ctx.h[3], ctx.h[4], ctx.h[5], ctx.h[6], ctx.h[7] =
h0, h1, h2, h3, h4, h5, h6, h7
}
/*
Set each byte of a memory range to zero.
This procedure copies the value `0` into the `len` bytes of a memory range,
starting at address `data`.
This procedure returns the pointer to `data`.
Unlike the `zero()` procedure, which can be optimized away or reordered by the
compiler under certain circumstances, `zero_explicit()` procedure can not be
optimized away or reordered with other memory access operations, and the
compiler assumes volatile semantics of the memory.
*/
@(private)
zero_explicit :: proc "contextless" (data: rawptr, len: int) -> rawptr {
// This routine tries to avoid the compiler optimizing away the call,
// so that it is always executed. It is intended to provide
// equivalent semantics to those provided by the C11 Annex K 3.7.4.1
// memset_s call.
intrinsics.mem_zero_volatile(data, len) // Use the volatile mem_zero
intrinsics.atomic_thread_fence(.Seq_Cst) // Prevent reordering
return data
}

View File

@@ -1,8 +1,8 @@
package _chacha20
import "core:crypto"
import "core:encoding/endian"
import "core:math/bits"
import "core:mem"
// KEY_SIZE is the (X)ChaCha20 key size in bytes.
KEY_SIZE :: 32
@@ -88,8 +88,8 @@ seek :: proc(ctx: ^Context, block_nr: u64) {
// reset sanitizes the Context. The Context must be re-initialized to
// be used again.
reset :: proc(ctx: ^Context) {
mem.zero_explicit(&ctx._s, size_of(ctx._s))
mem.zero_explicit(&ctx._buffer, size_of(ctx._buffer))
crypto.zero_explicit(&ctx._s, size_of(ctx._s))
crypto.zero_explicit(&ctx._buffer, size_of(ctx._buffer))
ctx._is_initialized = false
}
@@ -116,4 +116,4 @@ check_counter_limit :: proc(ctx: ^Context, nr_blocks: int) {
}
ensure(ctr_ok, "crypto/chacha20: maximum (X)ChaCha20 keystream per IV reached")
}
}

View File

@@ -13,7 +13,8 @@ See:
import "core:crypto"
import field "core:crypto/_fiat/field_curve25519"
import "core:mem"
zero_explicit :: crypto.zero_explicit
// Group_Element is an edwards25519 group element, as extended homogenous
// coordinates, which represents the affine point `(x, y)` as `(X, Y, Z, T)`,
@@ -96,7 +97,7 @@ Group_Element :: struct {
}
ge_clear :: proc "contextless" (ge: ^Group_Element) {
mem.zero_explicit(ge, size_of(Group_Element))
zero_explicit(ge, size_of(Group_Element))
}
ge_set :: proc "contextless" (ge, a: ^Group_Element) {
@@ -159,7 +160,7 @@ ge_set_bytes :: proc "contextless" (ge: ^Group_Element, b: []byte) -> bool {
ge_cond_assign(ge, &tmp, is_canonical)
mem.zero_explicit(&buf, size_of(buf))
zero_explicit(&buf, size_of(buf))
return is_canonical == 1
}
@@ -231,8 +232,8 @@ ge_add :: proc "contextless" (ge, a, b: ^Group_Element) {
scratch: Add_Scratch = ---
ge_add_addend(ge, a, &b_, &scratch)
mem.zero_explicit(&b_, size_of(Addend_Group_Element))
mem.zero_explicit(&scratch, size_of(Add_Scratch))
zero_explicit(&b_, size_of(Addend_Group_Element))
zero_explicit(&scratch, size_of(Add_Scratch))
}
@(private)
@@ -352,7 +353,7 @@ ge_double :: proc "contextless" (ge, a: ^Group_Element, scratch: ^Double_Scratch
field.fe_carry_mul(&ge.z, F, G_)
if sanitize {
mem.zero_explicit(scratch, size_of(Double_Scratch))
zero_explicit(scratch, size_of(Double_Scratch))
}
}
@@ -420,4 +421,4 @@ ge_in_prime_order_subgroup_vartime :: proc "contextless" (ge: ^Group_Element) ->
tmp: Group_Element = ---
ge_scalarmult_raw(&tmp, ge, &SC_ELL, true)
return ge_equal(&tmp, &GE_IDENTITY) == 1
}
}

View File

@@ -1,7 +1,6 @@
package _edwards25519
import field "core:crypto/_fiat/field_scalar25519"
import "core:mem"
Scalar :: field.Montgomery_Domain_Field_Element
@@ -19,7 +18,7 @@ sc_set_u64 :: proc "contextless" (sc: ^Scalar, i: u64) {
tmp := field.Non_Montgomery_Domain_Field_Element{i, 0, 0, 0}
field.fe_to_montgomery(sc, &tmp)
mem.zero_explicit(&tmp, size_of(tmp))
zero_explicit(&tmp, size_of(tmp))
}
@(require_results)
@@ -36,7 +35,7 @@ sc_set_bytes_rfc8032 :: proc "contextless" (sc: ^Scalar, b: []byte) {
}
sc_clear :: proc "contextless" (sc: ^Scalar) {
mem.zero_explicit(sc, size_of(Scalar))
zero_explicit(sc, size_of(Scalar))
}
sc_set :: field.fe_set

View File

@@ -3,7 +3,6 @@ package _edwards25519
import "core:crypto"
import field "core:crypto/_fiat/field_scalar25519"
import subtle "core:crypto/_subtle"
import "core:mem"
ge_scalarmult :: proc "contextless" (ge, p: ^Group_Element, sc: ^Scalar) {
tmp: field.Non_Montgomery_Domain_Field_Element
@@ -11,7 +10,7 @@ ge_scalarmult :: proc "contextless" (ge, p: ^Group_Element, sc: ^Scalar) {
ge_scalarmult_raw(ge, p, &tmp)
mem.zero_explicit(&tmp, size_of(tmp))
zero_explicit(&tmp, size_of(tmp))
}
ge_scalarmult_vartime :: proc "contextless" (ge, p: ^Group_Element, sc: ^Scalar) {
@@ -134,9 +133,9 @@ ge_scalarmult_raw :: proc "contextless" (
if !unsafe_is_vartime {
ge_clear(&tmp)
mem.zero_explicit(&tmp_add, size_of(Add_Scratch))
mem.zero_explicit(&tmp_addend, size_of(Addend_Group_Element))
mem.zero_explicit(&tmp_dbl, size_of(Double_Scratch))
zero_explicit(&tmp_add, size_of(Add_Scratch))
zero_explicit(&tmp_addend, size_of(Addend_Group_Element))
zero_explicit(&tmp_dbl, size_of(Double_Scratch))
}
}

View File

@@ -4,7 +4,6 @@ import "core:crypto"
import field "core:crypto/_fiat/field_curve25519"
import scalar "core:crypto/_fiat/field_scalar25519"
import subtle "core:crypto/_subtle"
import "core:mem"
ge_scalarmult_basepoint :: proc "contextless" (ge: ^Group_Element, sc: ^Scalar) {
when crypto.COMPACT_IMPLS == true {
@@ -27,9 +26,9 @@ ge_scalarmult_basepoint :: proc "contextless" (ge: ^Group_Element, sc: ^Scalar)
mul_bp_tbl_add(ge, &Gen_Multiply_Table_edwards25519_hi[i], hi, &tmp_add, &tmp_addend, false)
}
mem.zero_explicit(&tmp_sc, size_of(tmp_sc))
mem.zero_explicit(&tmp_add, size_of(Add_Scratch))
mem.zero_explicit(&tmp_addend, size_of(Basepoint_Addend_Group_Element))
zero_explicit(&tmp_sc, size_of(tmp_sc))
zero_explicit(&tmp_add, size_of(Add_Scratch))
zero_explicit(&tmp_addend, size_of(Basepoint_Addend_Group_Element))
}
}

View File

@@ -1,7 +1,8 @@
package field_curve25519
import "core:crypto"
import "core:mem"
zero_explicit :: crypto.zero_explicit
fe_relax_cast :: #force_inline proc "contextless" (
arg1: ^Tight_Field_Element,
@@ -18,7 +19,7 @@ fe_tighten_cast :: #force_inline proc "contextless" (
fe_clear :: proc "contextless" (
arg1: $T,
) where T == ^Tight_Field_Element || T == ^Loose_Field_Element {
mem.zero_explicit(arg1, size_of(arg1^))
zero_explicit(arg1, size_of(arg1^))
}
fe_clear_vec :: proc "contextless" (
@@ -38,7 +39,7 @@ fe_from_bytes :: proc "contextless" (out1: ^Tight_Field_Element, arg1: ^[32]byte
_fe_from_bytes(out1, &tmp1)
mem.zero_explicit(&tmp1, size_of(tmp1))
zero_explicit(&tmp1, size_of(tmp1))
}
fe_is_negative :: proc "contextless" (arg1: ^Tight_Field_Element) -> int {
@@ -47,7 +48,7 @@ fe_is_negative :: proc "contextless" (arg1: ^Tight_Field_Element) -> int {
fe_to_bytes(&tmp1, arg1)
ret := tmp1[0] & 1
mem.zero_explicit(&tmp1, size_of(tmp1))
zero_explicit(&tmp1, size_of(tmp1))
return int(ret)
}
@@ -59,8 +60,8 @@ fe_equal :: proc "contextless" (arg1, arg2: ^Tight_Field_Element) -> int {
fe_to_bytes(&tmp2, arg2)
ret := crypto.compare_constant_time(tmp1[:], tmp2[:])
mem.zero_explicit(&tmp1, size_of(tmp1))
mem.zero_explicit(&tmp2, size_of(tmp2))
zero_explicit(&tmp1, size_of(tmp1))
zero_explicit(&tmp2, size_of(tmp2))
return ret
}
@@ -72,7 +73,7 @@ fe_equal_bytes :: proc "contextless" (arg1: ^Tight_Field_Element, arg2: ^[32]byt
ret := crypto.compare_constant_time(tmp1[:], arg2[:])
mem.zero_explicit(&tmp1, size_of(tmp1))
zero_explicit(&tmp1, size_of(tmp1))
return ret
}
@@ -175,7 +176,7 @@ fe_carry_sqrt_ratio_m1 :: proc "contextless" (
fe_carry_abs(out1, r)
fe_clear_vec([]^Tight_Field_Element{&w, &tmp1, &tmp2, &tmp3})
mem.zero_explicit(&b, size_of(b))
zero_explicit(&b, size_of(b))
return correct_sign_sqrt | flipped_sign_sqrt
}

View File

@@ -1,6 +1,6 @@
package field_curve448
import "core:mem"
import "core:crypto"
fe_relax_cast :: #force_inline proc "contextless" (
arg1: ^Tight_Field_Element,
@@ -17,7 +17,7 @@ fe_tighten_cast :: #force_inline proc "contextless" (
fe_clear :: proc "contextless" (
arg1: $T,
) where T == ^Tight_Field_Element || T == ^Loose_Field_Element {
mem.zero_explicit(arg1, size_of(arg1^))
crypto.zero_explicit(arg1, size_of(arg1^))
}
fe_clear_vec :: proc "contextless" (

View File

@@ -1,12 +1,12 @@
package field_p256r1
import "core:crypto"
import subtle "core:crypto/_subtle"
import "core:encoding/endian"
import "core:math/bits"
import "core:mem"
fe_clear :: proc "contextless" (arg1: ^Montgomery_Domain_Field_Element) {
mem.zero_explicit(arg1, size_of(Montgomery_Domain_Field_Element))
crypto.zero_explicit(arg1, size_of(Montgomery_Domain_Field_Element))
}
fe_clear_vec :: proc "contextless" (
@@ -31,7 +31,7 @@ fe_from_bytes :: proc "contextless" (
endian.unchecked_get_u64be(arg1[8:]),
endian.unchecked_get_u64be(arg1[0:]),
}
defer mem.zero_explicit(&tmp, size_of(tmp))
defer crypto.zero_explicit(&tmp, size_of(tmp))
// Check that tmp is in the the range [0, ELL).
if !unsafe_assume_canonical {
@@ -61,7 +61,7 @@ fe_to_bytes :: proc "contextless" (out1: []byte, arg1: ^Montgomery_Domain_Field_
endian.unchecked_put_u64be(out1[8:], tmp[2])
endian.unchecked_put_u64be(out1[0:], tmp[3])
mem.zero_explicit(&tmp, size_of(tmp))
crypto.zero_explicit(&tmp, size_of(tmp))
}
@(require_results)
@@ -81,7 +81,7 @@ fe_equal :: proc "contextless" (arg1, arg2: ^Montgomery_Domain_Field_Element) ->
@(require_results)
fe_is_odd :: proc "contextless" (arg1: ^Montgomery_Domain_Field_Element) -> int {
tmp: Non_Montgomery_Domain_Field_Element = ---
defer mem.zero_explicit(&tmp, size_of(tmp))
defer crypto.zero_explicit(&tmp, size_of(tmp))
fe_from_montgomery(&tmp, arg1)
return int(tmp[0] & 1)

View File

@@ -1,12 +1,12 @@
package field_p384r1
import "core:crypto"
import subtle "core:crypto/_subtle"
import "core:encoding/endian"
import "core:math/bits"
import "core:mem"
fe_clear :: proc "contextless" (arg1: ^Montgomery_Domain_Field_Element) {
mem.zero_explicit(arg1, size_of(Montgomery_Domain_Field_Element))
crypto.zero_explicit(arg1, size_of(Montgomery_Domain_Field_Element))
}
fe_clear_vec :: proc "contextless" (
@@ -33,7 +33,7 @@ fe_from_bytes :: proc "contextless" (
endian.unchecked_get_u64be(arg1[8:]),
endian.unchecked_get_u64be(arg1[0:]),
}
defer mem.zero_explicit(&tmp, size_of(tmp))
defer crypto.zero_explicit(&tmp, size_of(tmp))
// Check that tmp is in the the range [0, ELL).
if !unsafe_assume_canonical {
@@ -67,7 +67,7 @@ fe_to_bytes :: proc "contextless" (out1: []byte, arg1: ^Montgomery_Domain_Field_
endian.unchecked_put_u64be(out1[8:], tmp[4])
endian.unchecked_put_u64be(out1[0:], tmp[5])
mem.zero_explicit(&tmp, size_of(tmp))
crypto.zero_explicit(&tmp, size_of(tmp))
}
@(require_results)
@@ -87,7 +87,7 @@ fe_equal :: proc "contextless" (arg1, arg2: ^Montgomery_Domain_Field_Element) ->
@(require_results)
fe_is_odd :: proc "contextless" (arg1: ^Montgomery_Domain_Field_Element) -> int {
tmp: Non_Montgomery_Domain_Field_Element = ---
defer mem.zero_explicit(&tmp, size_of(tmp))
defer crypto.zero_explicit(&tmp, size_of(tmp))
fe_from_montgomery(&tmp, arg1)
return int(tmp[0] & 1)

View File

@@ -1,7 +1,7 @@
package field_poly1305
import "core:crypto"
import "core:encoding/endian"
import "core:mem"
fe_relax_cast :: #force_inline proc "contextless" (
arg1: ^Tight_Field_Element,
@@ -57,7 +57,7 @@ fe_from_u64s :: proc "contextless" (out1: ^Tight_Field_Element, lo, hi: u64) {
_fe_from_bytes(out1, &tmp)
// This routine is only used to deserialize `r` which is confidential.
mem.zero_explicit(&tmp, size_of(tmp))
crypto.zero_explicit(&tmp, size_of(tmp))
}
fe_zero :: proc "contextless" (out1: ^Tight_Field_Element) {

View File

@@ -1,9 +1,9 @@
package field_scalar25519
import "core:crypto"
import subtle "core:crypto/_subtle"
import "core:encoding/endian"
import "core:math/bits"
import "core:mem"
@(private, rodata)
_TWO_168 := Montgomery_Domain_Field_Element {
@@ -21,7 +21,7 @@ _TWO_336 := Montgomery_Domain_Field_Element {
}
fe_clear :: proc "contextless" (arg1: ^Montgomery_Domain_Field_Element) {
mem.zero_explicit(arg1, size_of(Montgomery_Domain_Field_Element))
crypto.zero_explicit(arg1, size_of(Montgomery_Domain_Field_Element))
}
fe_from_bytes :: proc "contextless" (
@@ -35,7 +35,7 @@ fe_from_bytes :: proc "contextless" (
endian.unchecked_get_u64le(arg1[16:]),
endian.unchecked_get_u64le(arg1[24:]),
}
defer mem.zero_explicit(&tmp, size_of(tmp))
defer crypto.zero_explicit(&tmp, size_of(tmp))
// Check that tmp is in the the range [0, ELL).
if !unsafe_assume_canonical {
@@ -67,7 +67,7 @@ fe_from_bytes_rfc8032 :: proc "contextless" (
fe_from_bytes_wide(out1, &tmp)
mem.zero_explicit(&tmp, size_of(tmp))
crypto.zero_explicit(&tmp, size_of(tmp))
}
fe_from_bytes_wide :: proc "contextless" (
@@ -101,7 +101,7 @@ _fe_from_bytes_short :: proc "contextless" (out1: ^Montgomery_Domain_Field_Eleme
copy(tmp[:], arg1)
_ = fe_from_bytes(out1, &tmp, true)
mem.zero_explicit(&tmp, size_of(tmp))
crypto.zero_explicit(&tmp, size_of(tmp))
}
fe_to_bytes :: proc "contextless" (out1: []byte, arg1: ^Montgomery_Domain_Field_Element) {
@@ -115,7 +115,7 @@ fe_to_bytes :: proc "contextless" (out1: []byte, arg1: ^Montgomery_Domain_Field_
endian.unchecked_put_u64le(out1[16:], tmp[2])
endian.unchecked_put_u64le(out1[24:], tmp[3])
mem.zero_explicit(&tmp, size_of(tmp))
crypto.zero_explicit(&tmp, size_of(tmp))
}
fe_equal :: proc "contextless" (arg1, arg2: ^Montgomery_Domain_Field_Element) -> int {

View File

@@ -1,9 +1,9 @@
package field_scalarp256r1
import "core:crypto"
import subtle "core:crypto/_subtle"
import "core:encoding/endian"
import "core:math/bits"
import "core:mem"
@(private, rodata)
TWO_192 := Montgomery_Domain_Field_Element{
@@ -23,7 +23,7 @@ TWO_384 := Montgomery_Domain_Field_Element{
// 0x431905529c0166ce652e96b7ccca0a99679b73e19ad16947f01cf013fc632551
fe_clear :: proc "contextless" (arg1: ^Montgomery_Domain_Field_Element) {
mem.zero_explicit(arg1, size_of(Montgomery_Domain_Field_Element))
crypto.zero_explicit(arg1, size_of(Montgomery_Domain_Field_Element))
}
fe_clear_vec :: proc "contextless" (
@@ -67,7 +67,7 @@ fe_from_bytes :: proc "contextless" (
// Zero extend to 512-bits.
src_512: [64]byte
copy(src_512[64-s_len:], arg1)
defer mem.zero_explicit(&src_512, size_of(src_512))
defer crypto.zero_explicit(&src_512, size_of(src_512))
fe_unchecked_set(out1, src_512[40:]) // a
b: Montgomery_Domain_Field_Element
@@ -102,7 +102,7 @@ fe_is_canonical :: proc "contextless" (arg1: []byte) -> bool {
@(private)
fe_unchecked_set :: proc "contextless" (out1: ^Montgomery_Domain_Field_Element, arg1: []byte) {
arg1_256: [32]byte
defer mem.zero_explicit(&arg1_256, size_of(arg1_256))
defer crypto.zero_explicit(&arg1_256, size_of(arg1_256))
copy(arg1_256[32-len(arg1):], arg1)
tmp := Non_Montgomery_Domain_Field_Element {
@@ -111,7 +111,7 @@ fe_unchecked_set :: proc "contextless" (out1: ^Montgomery_Domain_Field_Element,
endian.unchecked_get_u64be(arg1_256[8:]),
endian.unchecked_get_u64be(arg1_256[0:]),
}
defer mem.zero_explicit(&tmp, size_of(tmp))
defer crypto.zero_explicit(&tmp, size_of(tmp))
fe_to_montgomery(out1, &tmp)
}
@@ -128,7 +128,7 @@ fe_to_bytes :: proc "contextless" (out1: []byte, arg1: ^Montgomery_Domain_Field_
endian.unchecked_put_u64be(out1[8:], tmp[2])
endian.unchecked_put_u64be(out1[0:], tmp[3])
mem.zero_explicit(&tmp, size_of(tmp))
crypto.zero_explicit(&tmp, size_of(tmp))
}
fe_equal :: proc "contextless" (arg1, arg2: ^Montgomery_Domain_Field_Element) -> int {
@@ -144,7 +144,7 @@ fe_equal :: proc "contextless" (arg1, arg2: ^Montgomery_Domain_Field_Element) ->
fe_is_odd :: proc "contextless" (arg1: ^Montgomery_Domain_Field_Element) -> int {
tmp: Non_Montgomery_Domain_Field_Element = ---
defer mem.zero_explicit(&tmp, size_of(tmp))
defer crypto.zero_explicit(&tmp, size_of(tmp))
fe_from_montgomery(&tmp, arg1)
return int(tmp[0] & 1)

View File

@@ -1,9 +1,9 @@
package field_scalarp384r1
import "core:crypto"
import subtle "core:crypto/_subtle"
import "core:encoding/endian"
import "core:math/bits"
import "core:mem"
@(private, rodata)
TWO_256 := Montgomery_Domain_Field_Element{
@@ -16,7 +16,7 @@ TWO_256 := Montgomery_Domain_Field_Element{
}
fe_clear :: proc "contextless" (arg1: ^Montgomery_Domain_Field_Element) {
mem.zero_explicit(arg1, size_of(Montgomery_Domain_Field_Element))
crypto.zero_explicit(arg1, size_of(Montgomery_Domain_Field_Element))
}
fe_clear_vec :: proc "contextless" (
@@ -50,8 +50,8 @@ fe_from_bytes :: proc "contextless" (
tmp: Non_Montgomery_Domain_Field_Element = ---
fe_unchecked_set_saturated(&tmp, arg1)
reduced := tmp
defer mem.zero_explicit(&tmp, size_of(tmp))
defer mem.zero_explicit(&reduced, size_of(reduced))
defer crypto.zero_explicit(&tmp, size_of(tmp))
defer crypto.zero_explicit(&reduced, size_of(reduced))
borrow: u64
reduced[0], borrow = bits.sub_u64(tmp[0], ELL[0], borrow)
@@ -78,7 +78,7 @@ fe_from_bytes :: proc "contextless" (
// Zero extend to 512-bits.
src_512: [64]byte
copy(src_512[64-s_len:], arg1)
defer mem.zero_explicit(&src_512, size_of(src_512))
defer crypto.zero_explicit(&src_512, size_of(src_512))
fe_unchecked_set(out1, src_512[32:]) // a
b: Montgomery_Domain_Field_Element
@@ -117,12 +117,12 @@ fe_unchecked_set_saturated :: proc "contextless" (out1: ^Non_Montgomery_Domain_F
@(private)
fe_unchecked_set :: proc "contextless" (out1: ^Montgomery_Domain_Field_Element, arg1: []byte) {
arg1_384: [48]byte
defer mem.zero_explicit(&arg1_384, size_of(arg1_384))
defer crypto.zero_explicit(&arg1_384, size_of(arg1_384))
copy(arg1_384[48-len(arg1):], arg1)
tmp: Non_Montgomery_Domain_Field_Element = ---
fe_unchecked_set_saturated(&tmp, arg1_384[:])
defer mem.zero_explicit(&tmp, size_of(tmp))
defer crypto.zero_explicit(&tmp, size_of(tmp))
fe_to_montgomery(out1, &tmp)
}
@@ -141,7 +141,7 @@ fe_to_bytes :: proc "contextless" (out1: []byte, arg1: ^Montgomery_Domain_Field_
endian.unchecked_put_u64be(out1[8:], tmp[4])
endian.unchecked_put_u64be(out1[0:], tmp[5])
mem.zero_explicit(&tmp, size_of(tmp))
crypto.zero_explicit(&tmp, size_of(tmp))
}
fe_equal :: proc "contextless" (arg1, arg2: ^Montgomery_Domain_Field_Element) -> int {
@@ -157,7 +157,7 @@ fe_equal :: proc "contextless" (arg1, arg2: ^Montgomery_Domain_Field_Element) ->
fe_is_odd :: proc "contextless" (arg1: ^Montgomery_Domain_Field_Element) -> int {
tmp: Non_Montgomery_Domain_Field_Element = ---
defer mem.zero_explicit(&tmp, size_of(tmp))
defer crypto.zero_explicit(&tmp, size_of(tmp))
fe_from_montgomery(&tmp, arg1)
return int(tmp[0] & 1)

View File

@@ -16,7 +16,7 @@ package _sha3
*/
import "core:math/bits"
import "core:mem"
import "core:crypto"
ROUNDS :: 24
@@ -179,7 +179,7 @@ reset :: proc "contextless" (ctx: ^Context) {
return
}
mem.zero_explicit(ctx, size_of(ctx^))
crypto.zero_explicit(ctx, size_of(ctx^))
}
shake_xof :: proc "contextless" (ctx: ^Context) {

View File

@@ -1,6 +1,6 @@
package _weierstrass
@(require) import "core:mem"
@(require) import "core:crypto"
@(private)
SEC_PREFIX_IDENTITY :: 0x00
@@ -92,7 +92,7 @@ pt_sec_bytes :: proc "contextless" (b: []byte, p: ^$T, compressed: bool) -> bool
// 1 redundant rescale call.
y_is_odd := byte(y[FE_SZ-1] & 1)
b[0] = SEC_PREFIX_COMPRESSED_EVEN + y_is_odd
mem.zero_explicit(&y_, size_of(y_))
crypto.zero_explicit(&y_, size_of(y_))
}
return true

View File

@@ -2,7 +2,6 @@ package _weierstrass
import "core:crypto"
@(require) import subtle "core:crypto/_subtle"
@(require) import "core:mem"
pt_scalar_mul :: proc "contextless" (
p, a: ^$T,
@@ -23,7 +22,7 @@ pt_scalar_mul :: proc "contextless" (
pt_scalar_mul_bytes(p, a, b[:], unsafe_is_vartime)
if !unsafe_is_vartime {
mem.zero_explicit(&b, size_of(b))
crypto.zero_explicit(&b, size_of(b))
}
}
@@ -69,7 +68,7 @@ pt_scalar_mul_bytes :: proc "contextless" (
pt_set(p, &q)
if !unsafe_is_vartime {
mem.zero_explicit(&p_tbl, size_of(p_tbl))
crypto.zero_explicit(&p_tbl, size_of(p_tbl))
pt_clear_vec([]^T{&q, &tmp})
}
}
@@ -116,7 +115,7 @@ when crypto.COMPACT_IMPLS == true {
}
if !unsafe_is_vartime {
mem.zero_explicit(&b, size_of(b))
crypto.zero_explicit(&b, size_of(b))
pt_clear(&tmp)
}
}

View File

@@ -11,7 +11,6 @@ package aegis
import "core:bytes"
import "core:crypto"
import "core:crypto/aes"
import "core:mem"
// KEY_SIZE_128L is the AEGIS-128L key size in bytes.
KEY_SIZE_128L :: 16
@@ -197,8 +196,8 @@ open :: proc(ctx: ^Context, dst, iv, aad, ciphertext, tag: []byte) -> bool {
}
if crypto.compare_constant_time(tag, derived_tag) != 1 {
mem.zero_explicit(raw_data(derived_tag), len(derived_tag))
mem.zero_explicit(raw_data(dst), ct_len)
crypto.zero_explicit(raw_data(derived_tag), len(derived_tag))
crypto.zero_explicit(raw_data(dst), ct_len)
return false
}
@@ -208,7 +207,7 @@ open :: proc(ctx: ^Context, dst, iv, aad, ciphertext, tag: []byte) -> bool {
// reset sanitizes the Context. The Context must be
// re-initialized to be used again.
reset :: proc "contextless" (ctx: ^Context) {
mem.zero_explicit(&ctx._key, len(ctx._key))
crypto.zero_explicit(&ctx._key, len(ctx._key))
ctx._key_len = 0
ctx._is_initialized = false
}
}

View File

@@ -1,8 +1,8 @@
package aegis
import "core:crypto"
import aes "core:crypto/_aes/ct64"
import "core:encoding/endian"
import "core:mem"
// This uses the bitlsiced 64-bit general purpose register SWAR AES
// round function. The intermediate state is stored in interleaved
@@ -324,7 +324,7 @@ dec_sw_256 :: #force_inline proc "contextless" (st: ^State_SW, xi, ci: []byte) #
@(private = "file")
dec_partial_sw_128l :: proc "contextless" (st: ^State_SW, xn, cn: []byte) #no_bounds_check {
tmp: [_RATE_128L]byte
defer mem.zero_explicit(&tmp, size_of(tmp))
defer crypto.zero_explicit(&tmp, size_of(tmp))
z0_0, z0_1, z1_0, z1_1 := z_sw_128l(st)
copy(tmp[:], cn)
@@ -349,7 +349,7 @@ dec_partial_sw_128l :: proc "contextless" (st: ^State_SW, xn, cn: []byte) #no_bo
@(private = "file")
dec_partial_sw_256 :: proc "contextless" (st: ^State_SW, xn, cn: []byte) #no_bounds_check {
tmp: [_RATE_256]byte
defer mem.zero_explicit(&tmp, size_of(tmp))
defer crypto.zero_explicit(&tmp, size_of(tmp))
z_0, z_1 := z_sw_256(st)
copy(tmp[:], cn)
@@ -448,5 +448,5 @@ finalize_sw :: proc "contextless" (st: ^State_SW, tag: []byte, ad_len, msg_len:
@(private)
reset_state_sw :: proc "contextless" (st: ^State_SW) {
mem.zero_explicit(st, size_of(st^))
crypto.zero_explicit(st, size_of(st^))
}

View File

@@ -2,9 +2,9 @@
package aegis
import "base:intrinsics"
import "core:crypto"
import "core:crypto/aes"
import "core:encoding/endian"
import "core:mem"
import "core:simd/x86"
@(private)
@@ -261,7 +261,7 @@ dec_hw_256 :: #force_inline proc "contextless" (st: ^State_HW, xi, ci: []byte) #
@(private = "file", enable_target_feature = "sse2,aes")
dec_partial_hw_128l :: #force_inline proc "contextless" (st: ^State_HW, xn, cn: []byte) #no_bounds_check {
tmp: [_RATE_128L]byte
defer mem.zero_explicit(&tmp, size_of(tmp))
defer crypto.zero_explicit(&tmp, size_of(tmp))
z0, z1 := z_hw_128l(st)
copy(tmp[:], cn)
@@ -286,7 +286,7 @@ dec_partial_hw_128l :: #force_inline proc "contextless" (st: ^State_HW, xn, cn:
@(private = "file", enable_target_feature = "sse2,aes")
dec_partial_hw_256 :: #force_inline proc "contextless" (st: ^State_HW, xn, cn: []byte) #no_bounds_check {
tmp: [_RATE_256]byte
defer mem.zero_explicit(&tmp, size_of(tmp))
defer crypto.zero_explicit(&tmp, size_of(tmp))
z := z_hw_256(st)
copy(tmp[:], cn)
@@ -385,5 +385,5 @@ finalize_hw :: proc "contextless" (st: ^State_HW, tag: []byte, ad_len, msg_len:
@(private)
reset_state_hw :: proc "contextless" (st: ^State_HW) {
mem.zero_explicit(st, size_of(st^))
crypto.zero_explicit(st, size_of(st^))
}

View File

@@ -4,7 +4,6 @@ import "core:bytes"
import "core:crypto/_aes/ct64"
import "core:encoding/endian"
import "core:math/bits"
import "core:mem"
// CTR_IV_SIZE is the size of the CTR mode IV in bytes.
CTR_IV_SIZE :: 16
@@ -117,7 +116,7 @@ reset_ctr :: proc "contextless" (ctx: ^Context_CTR) {
ctx._off = 0
ctx._ctr_hi = 0
ctx._ctr_lo = 0
mem.zero_explicit(&ctx._buffer, size_of(ctx._buffer))
zero_explicit(&ctx._buffer, size_of(ctx._buffer))
ctx._is_initialized = false
}
@@ -172,7 +171,7 @@ ctr_blocks :: proc(ctx: ^Context_CTR, dst, src: []byte, nr_blocks: int) #no_boun
// Write back the counter.
ctx._ctr_hi, ctx._ctr_lo = ctr_hi, ctr_lo
mem.zero_explicit(&tmp, size_of(tmp))
zero_explicit(&tmp, size_of(tmp))
}
@(private)

View File

@@ -4,7 +4,6 @@ package aes
import "base:intrinsics"
import "core:crypto/_aes"
import "core:math/bits"
import "core:mem"
import "core:simd/x86"
@(private)
@@ -130,8 +129,8 @@ ctr_blocks_hw :: proc(ctx: ^Context_CTR, dst, src: []byte, nr_blocks: int) #no_b
// Write back the counter.
ctx._ctr_hi, ctx._ctr_lo = ctr_hi, ctr_lo
mem.zero_explicit(&blks, size_of(blks))
mem.zero_explicit(&sks, size_of(sks))
zero_explicit(&blks, size_of(blks))
zero_explicit(&sks, size_of(sks))
}
@(private, enable_target_feature = "sse2")

View File

@@ -5,7 +5,6 @@ import "core:crypto"
import "core:crypto/_aes"
import "core:crypto/_aes/ct64"
import "core:encoding/endian"
import "core:mem"
// GCM_IV_SIZE is the default size of the GCM IV in bytes.
GCM_IV_SIZE :: 12
@@ -59,9 +58,9 @@ seal_gcm :: proc(ctx: ^Context_GCM, dst, tag, iv, aad, plaintext: []byte) {
final_ghash_ct64(&s, &h, &j0_enc, len(aad), len(plaintext))
copy(tag, s[:])
mem.zero_explicit(&h, len(h))
mem.zero_explicit(&j0, len(j0))
mem.zero_explicit(&j0_enc, len(j0_enc))
zero_explicit(&h, len(h))
zero_explicit(&j0, len(j0))
zero_explicit(&j0_enc, len(j0_enc))
}
// open_gcm authenticates the aad and ciphertext, and decrypts the ciphertext,
@@ -94,13 +93,13 @@ open_gcm :: proc(ctx: ^Context_GCM, dst, iv, aad, ciphertext, tag: []byte) -> bo
ok := crypto.compare_constant_time(s[:], tag) == 1
if !ok {
mem.zero_explicit(raw_data(dst), len(dst))
zero_explicit(raw_data(dst), len(dst))
}
mem.zero_explicit(&h, len(h))
mem.zero_explicit(&j0, len(j0))
mem.zero_explicit(&j0_enc, len(j0_enc))
mem.zero_explicit(&s, len(s))
zero_explicit(&h, len(h))
zero_explicit(&j0, len(j0))
zero_explicit(&j0_enc, len(j0_enc))
zero_explicit(&s, len(s))
return ok
}
@@ -249,6 +248,6 @@ gctr_ct64 :: proc(
}
}
mem.zero_explicit(&tmp, size_of(tmp))
mem.zero_explicit(&tmp2, size_of(tmp2))
zero_explicit(&tmp, size_of(tmp))
zero_explicit(&tmp2, size_of(tmp2))
}

View File

@@ -6,7 +6,6 @@ import "core:crypto"
import "core:crypto/_aes"
import "core:crypto/_aes/hw_intel"
import "core:encoding/endian"
import "core:mem"
import "core:simd/x86"
@(private)
@@ -23,9 +22,9 @@ gcm_seal_hw :: proc(ctx: ^Context_Impl_Hardware, dst, tag, iv, aad, plaintext: [
final_ghash_hw(&s, &h, &j0_enc, len(aad), len(plaintext))
copy(tag, s[:])
mem.zero_explicit(&h, len(h))
mem.zero_explicit(&j0, len(j0))
mem.zero_explicit(&j0_enc, len(j0_enc))
zero_explicit(&h, len(h))
zero_explicit(&j0, len(j0))
zero_explicit(&j0_enc, len(j0_enc))
}
@(private)
@@ -42,13 +41,13 @@ gcm_open_hw :: proc(ctx: ^Context_Impl_Hardware, dst, iv, aad, ciphertext, tag:
ok := crypto.compare_constant_time(s[:], tag) == 1
if !ok {
mem.zero_explicit(raw_data(dst), len(dst))
zero_explicit(raw_data(dst), len(dst))
}
mem.zero_explicit(&h, len(h))
mem.zero_explicit(&j0, len(j0))
mem.zero_explicit(&j0_enc, len(j0_enc))
mem.zero_explicit(&s, len(s))
zero_explicit(&h, len(h))
zero_explicit(&j0, len(j0))
zero_explicit(&j0_enc, len(j0_enc))
zero_explicit(&s, len(s))
return ok
}
@@ -228,8 +227,8 @@ gctr_hw :: proc(
n -= l
}
mem.zero_explicit(&blks, size_of(blks))
mem.zero_explicit(&sks, size_of(sks))
zero_explicit(&blks, size_of(blks))
zero_explicit(&sks, size_of(sks))
}
// BUG: Sticking this in gctr_hw (like the other implementations) crashes

View File

@@ -1,9 +1,11 @@
package aes
import "core:crypto"
import "core:crypto/_aes/ct64"
import "core:mem"
import "core:reflect"
zero_explicit :: crypto.zero_explicit
@(private)
Context_Impl :: union {
ct64.Context,
@@ -41,5 +43,5 @@ init_impl :: proc(ctx: ^Context_Impl, key: []byte, impl: Implementation) {
@(private)
reset_impl :: proc "contextless" (ctx: ^Context_Impl) {
mem.zero_explicit(ctx, size_of(Context_Impl))
}
zero_explicit(ctx, size_of(Context_Impl))
}

View File

@@ -8,8 +8,8 @@ See:
package chacha20
import "core:bytes"
import "core:crypto"
import "core:crypto/_chacha20"
import "core:mem"
// KEY_SIZE is the (X)ChaCha20 key size in bytes.
KEY_SIZE :: _chacha20.KEY_SIZE
@@ -50,7 +50,7 @@ init :: proc(ctx: ^Context, key, iv: []byte, impl := DEFAULT_IMPLEMENTATION) {
// The sub-key is stored in the keystream buffer. While
// this will be overwritten in most circumstances, explicitly
// clear it out early.
mem.zero_explicit(&ctx._state._buffer, KEY_SIZE)
crypto.zero_explicit(&ctx._state._buffer, KEY_SIZE)
}
}

View File

@@ -13,7 +13,6 @@ import "core:crypto"
import "core:crypto/chacha20"
import "core:crypto/poly1305"
import "core:encoding/endian"
import "core:mem"
// KEY_SIZE is the chacha20poly1305 key size in bytes.
KEY_SIZE :: chacha20.KEY_SIZE
@@ -103,7 +102,7 @@ seal :: proc(ctx: ^Context, dst, tag, iv, aad, plaintext: []byte) {
chacha20.keystream_bytes(&stream_ctx, otk[:])
mac_ctx: poly1305.Context = ---
poly1305.init(&mac_ctx, otk[:])
mem.zero_explicit(&otk, size_of(otk))
crypto.zero_explicit(&otk, size_of(otk))
aad_len, ciphertext_len := len(aad), len(ciphertext)
@@ -164,7 +163,7 @@ open :: proc(ctx: ^Context, dst, iv, aad, ciphertext, tag: []byte) -> bool {
mac_ctx: poly1305.Context = ---
poly1305.init(&mac_ctx, otk[:])
defer mem.zero_explicit(&otk, size_of(otk))
defer crypto.zero_explicit(&otk, size_of(otk))
aad_len, ciphertext_len := len(aad), len(ciphertext)
@@ -188,7 +187,7 @@ open :: proc(ctx: ^Context, dst, iv, aad, ciphertext, tag: []byte) -> bool {
// Validate the tag in constant time.
if crypto.compare_constant_time(tag, derived_tag) != 1 {
// Zero out the plaintext, as a defense in depth measure.
mem.zero_explicit(raw_data(plaintext), ciphertext_len)
crypto.zero_explicit(raw_data(plaintext), ciphertext_len)
return false
}
@@ -202,7 +201,7 @@ open :: proc(ctx: ^Context, dst, iv, aad, ciphertext, tag: []byte) -> bool {
// reset sanitizes the Context. The Context must be
// re-initialized to be used again.
reset :: proc "contextless" (ctx: ^Context) {
mem.zero_explicit(&ctx._key, len(ctx._key))
crypto.zero_explicit(&ctx._key, len(ctx._key))
ctx._is_xchacha = false
ctx._is_initialized = false
}

View File

@@ -1,9 +1,9 @@
// A selection of cryptography algorithms and useful helper routines.
package crypto
import "base:intrinsics"
import "base:runtime"
import subtle "core:crypto/_subtle"
import "core:mem"
// Omit large precomputed tables, trading off performance for size.
COMPACT_IMPLS: bool : #config(ODIN_CRYPTO_COMPACT, false)
@@ -38,8 +38,8 @@ compare_constant_time :: proc "contextless" (a, b: []byte) -> int {
// contents of the memory being compared.
@(optimization_mode="none")
compare_byte_ptrs_constant_time :: proc "contextless" (a, b: ^byte, n: int) -> int {
x := mem.slice_ptr(a, n)
y := mem.slice_ptr(b, n)
x := ([^]byte)(a)[:n]
y := ([^]byte)(b)[:n]
v: byte
for i in 0..<n {
@@ -61,6 +61,41 @@ is_zero_constant_time :: proc "contextless" (b: []byte) -> int {
return subtle.byte_eq(0, v)
}
/*
Set each byte of a memory range to zero.
This procedure copies the value `0` into the `len` bytes of a memory range,
starting at address `data`.
This procedure returns the pointer to `data`.
Unlike the `zero()` procedure, which can be optimized away or reordered by the
compiler under certain circumstances, `zero_explicit()` procedure can not be
optimized away or reordered with other memory access operations, and the
compiler assumes volatile semantics of the memory.
*/
zero_explicit :: proc "contextless" (data: rawptr, len: int) -> rawptr {
// This routine tries to avoid the compiler optimizing away the call,
// so that it is always executed. It is intended to provide
// equivalent semantics to those provided by the C11 Annex K 3.7.4.1
// memset_s call.
intrinsics.mem_zero_volatile(data, len) // Use the volatile mem_zero
intrinsics.atomic_thread_fence(.Seq_Cst) // Prevent reordering
return data
}
/*
Set each byte of a memory range to a specific value.
This procedure copies value specified by the `value` parameter into each of the
`len` bytes of a memory range, located at address `data`.
This procedure returns the pointer to `data`.
*/
set :: proc "contextless" (data: rawptr, value: byte, len: int) -> rawptr {
return runtime.memset(data, i32(value), len)
}
// rand_bytes fills the dst buffer with cryptographic entropy taken from
// the system entropy source. This routine will block if the system entropy
// source is not ready yet. All system entropy source failures are treated
@@ -70,7 +105,7 @@ is_zero_constant_time :: proc "contextless" (b: []byte) -> int {
// `HAS_RAND_BYTES` boolean constant.
rand_bytes :: proc (dst: []byte) {
// zero-fill the buffer first
mem.zero_explicit(raw_data(dst), len(dst))
zero_explicit(raw_data(dst), len(dst))
runtime.rand_bytes(dst)
}

View File

@@ -8,8 +8,8 @@ package deoxysii
import "base:intrinsics"
import "core:bytes"
import "core:crypto"
import "core:crypto/aes"
import "core:mem"
import "core:simd"
// KEY_SIZE is the Deoxys-II-256 key size in bytes.
@@ -142,7 +142,7 @@ open :: proc(ctx: ^Context, dst, iv, aad, ciphertext, tag: []byte) -> bool {
ok = d_ref(ctx, dst, iv, aad, ciphertext, tag)
}
if !ok {
mem.zero_explicit(raw_data(dst), len(ciphertext))
crypto.zero_explicit(raw_data(dst), len(ciphertext))
}
return ok
@@ -151,7 +151,7 @@ open :: proc(ctx: ^Context, dst, iv, aad, ciphertext, tag: []byte) -> bool {
// reset sanitizes the Context. The Context must be
// re-initialized to be used again.
reset :: proc "contextless" (ctx: ^Context) {
mem.zero_explicit(&ctx._subkeys, len(ctx._subkeys))
crypto.zero_explicit(&ctx._subkeys, len(ctx._subkeys))
ctx._is_initialized = false
}

View File

@@ -4,7 +4,6 @@ import "base:intrinsics"
import "core:crypto"
import aes "core:crypto/_aes/ct64"
import "core:encoding/endian"
import "core:mem"
import "core:simd"
// This uses the bitlsiced 64-bit general purpose register SWAR AES
@@ -149,8 +148,8 @@ bc_absorb :: proc "contextless" (
intrinsics.unaligned_store((^simd.u8x16)(raw_data(dst)), dst_)
mem.zero_explicit(&tweaks, size_of(tweaks))
mem.zero_explicit(&tmp, size_of(tmp))
crypto.zero_explicit(&tweaks, size_of(tweaks))
crypto.zero_explicit(&tmp, size_of(tmp))
return stk_block_nr
}
@@ -214,8 +213,8 @@ bc_encrypt :: proc "contextless" (
nr_blocks -= n
}
mem.zero_explicit(&tweaks, size_of(tweaks))
mem.zero_explicit(&tmp, size_of(tmp))
crypto.zero_explicit(&tweaks, size_of(tweaks))
crypto.zero_explicit(&tmp, size_of(tmp))
return stk_block_nr
}
@@ -295,13 +294,13 @@ e_ref :: proc "contextless" (ctx: ^Context, dst, tag, iv, aad, plaintext: []byte
copy(dst[n*BLOCK_SIZE:], m_star[:])
mem.zero_explicit(&m_star, size_of(m_star))
crypto.zero_explicit(&m_star, size_of(m_star))
}
copy(tag, auth[:])
mem.zero_explicit(&st.q_stk, size_of(st.q_stk))
mem.zero_explicit(&st.q_b, size_of(st.q_b))
crypto.zero_explicit(&st.q_stk, size_of(st.q_stk))
crypto.zero_explicit(&st.q_b, size_of(st.q_b))
}
@(private, require_results)
@@ -336,7 +335,7 @@ d_ref :: proc "contextless" (ctx: ^Context, dst, iv, aad, ciphertext, tag: []byt
copy(dst[n*BLOCK_SIZE:], m_star[:])
mem.zero_explicit(&m_star, size_of(m_star))
crypto.zero_explicit(&m_star, size_of(m_star))
}
// Associated data
@@ -382,7 +381,7 @@ d_ref :: proc "contextless" (ctx: ^Context, dst, iv, aad, ciphertext, tag: []byt
_ = bc_absorb(&st, auth[:], m_star[:], PREFIX_MSG_FINAL, n)
mem.zero_explicit(&m_star, size_of(m_star))
crypto.zero_explicit(&m_star, size_of(m_star))
}
bc_final(&st, auth[:], iv)
@@ -391,9 +390,9 @@ d_ref :: proc "contextless" (ctx: ^Context, dst, iv, aad, ciphertext, tag: []byt
// else return false
ok := crypto.compare_constant_time(auth[:], tag) == 1
mem.zero_explicit(&auth, size_of(auth))
mem.zero_explicit(&st.q_stk, size_of(st.q_stk))
mem.zero_explicit(&st.q_b, size_of(st.q_b))
crypto.zero_explicit(&auth, size_of(auth))
crypto.zero_explicit(&st.q_stk, size_of(st.q_stk))
crypto.zero_explicit(&st.q_b, size_of(st.q_b))
return ok
}
}

View File

@@ -4,7 +4,6 @@ package deoxysii
import "base:intrinsics"
import "core:crypto"
import "core:crypto/aes"
import "core:mem"
import "core:simd"
import "core:simd/x86"
@@ -374,7 +373,7 @@ d_hw :: proc "contextless" (ctx: ^Context, dst, iv, aad, ciphertext, tag: []byte
copy(dst[n*BLOCK_SIZE:], m_star[:])
mem.zero_explicit(&m_star, size_of(m_star))
crypto.zero_explicit(&m_star, size_of(m_star))
}
// Associated data
@@ -428,7 +427,7 @@ d_hw :: proc "contextless" (ctx: ^Context, dst, iv, aad, ciphertext, tag: []byte
intrinsics.unaligned_store((^x86.__m128i)(raw_data(&tmp)), auth)
ok := crypto.compare_constant_time(tmp[:], tag) == 1
mem.zero_explicit(&tmp, size_of(tmp))
crypto.zero_explicit(&tmp, size_of(tmp))
return ok
}

View File

@@ -4,7 +4,6 @@ import "core:crypto"
import secec "core:crypto/_weierstrass"
import "core:crypto/x25519"
import "core:crypto/x448"
import "core:mem"
import "core:reflect"
// Note: For these primitives scalar size = point size
@@ -125,7 +124,7 @@ private_key_generate :: proc(priv_key: ^Private_Key, curve: Curve) -> bool {
// 384-bits reduced makes the modulo bias insignificant
b: [48]byte = ---
defer (mem.zero_explicit(&b, size_of(b)))
defer (crypto.zero_explicit(&b, size_of(b)))
for {
crypto.rand_bytes(b[:])
_ = secec.sc_set_bytes(sc, b[:])
@@ -137,7 +136,7 @@ private_key_generate :: proc(priv_key: ^Private_Key, curve: Curve) -> bool {
sc := &priv_key._impl.(secec.Scalar_p384r1)
b: [48]byte = ---
defer (mem.zero_explicit(&b, size_of(b)))
defer (crypto.zero_explicit(&b, size_of(b)))
for {
crypto.rand_bytes(b[:])
did_reduce := secec.sc_set_bytes(sc, b[:])
@@ -292,7 +291,7 @@ private_key_equal :: proc(p, q: ^Private_Key) -> bool {
// private_key_clear clears priv_key to the uninitialized state.
private_key_clear :: proc "contextless" (priv_key: ^Private_Key) {
mem.zero_explicit(priv_key, size_of(Private_Key))
crypto.zero_explicit(priv_key, size_of(Private_Key))
}
// public_key_set_bytes decodes a byte-encoded public key, and returns
@@ -412,7 +411,7 @@ public_key_equal :: proc(p, q: ^Public_Key) -> bool {
// public_key_clear clears pub_key to the uninitialized state.
public_key_clear :: proc "contextless" (pub_key: ^Public_Key) {
mem.zero_explicit(pub_key, size_of(Public_Key))
crypto.zero_explicit(pub_key, size_of(Public_Key))
}
// ecdh performs an Elliptic Curve Diffie-Hellman key exchange betwween

View File

@@ -11,7 +11,6 @@ package ed25519
import "core:crypto"
import grp "core:crypto/_edwards25519"
import "core:crypto/sha2"
import "core:mem"
// PRIVATE_KEY_SIZE is the byte-encoded private key size.
PRIVATE_KEY_SIZE :: 32
@@ -89,7 +88,7 @@ private_key_bytes :: proc(priv_key: ^Private_Key, dst: []byte) {
// private_key_clear clears priv_key to the uninitialized state.
private_key_clear :: proc "contextless" (priv_key: ^Private_Key) {
mem.zero_explicit(priv_key, size_of(Private_Key))
crypto.zero_explicit(priv_key, size_of(Private_Key))
}
// sign writes the signature by priv_key over msg to sig.

View File

@@ -8,8 +8,8 @@ package crypto_hash
zhibog, dotbmp: Initial implementation.
*/
import "core:crypto"
import "core:io"
import "core:mem"
// hash_bytes will hash the given input and return the computed digest
// in a newly allocated slice.
@@ -61,7 +61,7 @@ hash_stream :: proc(
ctx: Context
buf: [MAX_BLOCK_SIZE * 4]byte
defer mem.zero_explicit(&buf, size_of(buf))
defer crypto.zero_explicit(&buf, size_of(buf))
init(&ctx, algorithm)

View File

@@ -0,0 +1,10 @@
#+build js
package crypto_hash
hash :: proc {
hash_stream,
hash_bytes,
hash_string,
hash_bytes_to_buffer,
hash_string_to_buffer,
}

View File

@@ -1,26 +1,27 @@
#+build !freestanding
#+build !js
package crypto_hash
import "core:io"
import "core:os"
// hash_file will read the file provided by the given handle and return the
// `hash_file` will read the file provided by the given handle and return the
// computed digest in a newly allocated slice.
hash_file :: proc(
algorithm: Algorithm,
hd: os.Handle,
hash_file_by_handle :: proc(
algorithm: Algorithm,
handle: ^os.File,
load_at_once := false,
allocator := context.allocator,
allocator := context.allocator,
) -> (
[]byte,
io.Error,
) {
if !load_at_once {
return hash_stream(algorithm, os.stream_from_handle(hd), allocator)
return hash_stream(algorithm, os.to_stream(handle), allocator)
}
buf, ok := os.read_entire_file(hd, allocator)
if !ok {
buf, err := os.read_entire_file(handle, allocator)
if err != nil {
return nil, io.Error.Unknown
}
defer delete(buf, allocator)
@@ -28,11 +29,30 @@ hash_file :: proc(
return hash_bytes(algorithm, buf, allocator), io.Error.None
}
hash_file_by_name :: proc(
algorithm: Algorithm,
filename: string,
load_at_once := false,
allocator := context.allocator,
) -> (
[]byte,
io.Error,
) {
handle, err := os.open(filename)
defer os.close(handle)
if err != nil {
return {}, io.Error.Unknown
}
return hash_file_by_handle(algorithm, handle, load_at_once, allocator)
}
hash :: proc {
hash_stream,
hash_file,
hash_file_by_handle,
hash_bytes,
hash_string,
hash_bytes_to_buffer,
hash_string_to_buffer,
}
}

View File

@@ -5,9 +5,9 @@ See: [[ https://www.rfc-editor.org/rfc/rfc5869 ]]
*/
package hkdf
import "core:crypto"
import "core:crypto/hash"
import "core:crypto/hmac"
import "core:mem"
// extract_and_expand derives output keying material (OKM) via the
// HKDF-Extract and HKDF-Expand algorithms, with the specified has
@@ -18,7 +18,7 @@ extract_and_expand :: proc(algorithm: hash.Algorithm, salt, ikm, info, dst: []by
tmp: [hash.MAX_DIGEST_SIZE]byte
prk := tmp[:h_len]
defer mem.zero_explicit(raw_data(prk), h_len)
defer crypto.zero_explicit(raw_data(prk), h_len)
extract(algorithm, salt, ikm, prk)
expand(algorithm, prk, info, dst)
@@ -83,7 +83,7 @@ expand :: proc(algorithm: hash.Algorithm, prk, info, dst: []byte) {
if r > 0 {
tmp: [hash.MAX_DIGEST_SIZE]byte
blk := tmp[:h_len]
defer mem.zero_explicit(raw_data(blk), h_len)
defer crypto.zero_explicit(raw_data(blk), h_len)
_F(&base, prev, info, n + 1, blk)
copy(dst_blk, blk)

View File

@@ -8,7 +8,6 @@ package hmac
import "core:crypto"
import "core:crypto/hash"
import "core:mem"
// sum will compute the HMAC with the specified algorithm and key
// over msg, and write the computed tag to dst. It requires that
@@ -126,7 +125,7 @@ _init_hashes :: proc(ctx: ^Context, algorithm: hash.Algorithm, key: []byte) {
kLen := len(key)
B := hash.BLOCK_SIZES[algorithm]
K0 := K0_buf[:B]
defer mem.zero_explicit(raw_data(K0), B)
defer crypto.zero_explicit(raw_data(K0), B)
switch {
case kLen == B, kLen < B:
@@ -157,7 +156,7 @@ _init_hashes :: proc(ctx: ^Context, algorithm: hash.Algorithm, key: []byte) {
hash.init(&ctx._i_hash, algorithm)
kPad := kPad_buf[:B]
defer mem.zero_explicit(raw_data(kPad), B)
defer crypto.zero_explicit(raw_data(kPad), B)
for v, i in K0 {
kPad[i] = v ~ _I_PAD

View File

@@ -18,9 +18,9 @@ package md5
zhibog, dotbmp: Initial implementation.
*/
import "core:crypto"
import "core:encoding/endian"
import "core:math/bits"
import "core:mem"
// DIGEST_SIZE is the MD5 digest size in bytes.
DIGEST_SIZE :: 16
@@ -100,7 +100,7 @@ final :: proc(ctx: ^Context, hash: []byte, finalize_clone: bool = false) {
i += 1
}
transform(ctx, ctx.data[:])
mem.set(&ctx.data, 0, 56)
crypto.set(&ctx.data, 0, 56)
}
ctx.bitlen += u64(ctx.datalen * 8)
@@ -124,7 +124,7 @@ reset :: proc(ctx: ^$T) {
return
}
mem.zero_explicit(ctx, size_of(ctx^))
crypto.zero_explicit(ctx, size_of(ctx^))
}
/*

View File

@@ -19,9 +19,9 @@ package sha1
zhibog, dotbmp: Initial implementation.
*/
import "core:crypto"
import "core:encoding/endian"
import "core:math/bits"
import "core:mem"
// DIGEST_SIZE is the SHA1 digest size in bytes.
DIGEST_SIZE :: 20
@@ -107,7 +107,7 @@ final :: proc(ctx: ^Context, hash: []byte, finalize_clone: bool = false) {
i += 1
}
transform(ctx, ctx.data[:])
mem.set(&ctx.data, 0, 56)
crypto.set(&ctx.data, 0, 56)
}
ctx.bitlen += u64(ctx.datalen * 8)
@@ -131,7 +131,7 @@ reset :: proc(ctx: ^$T) {
return
}
mem.zero_explicit(ctx, size_of(ctx^))
crypto.zero_explicit(ctx, size_of(ctx^))
}
/*

View File

@@ -5,10 +5,10 @@ See: [[ https://www.rfc-editor.org/rfc/rfc2898 ]]
*/
package pbkdf2
import "core:crypto"
import "core:crypto/hash"
import "core:crypto/hmac"
import "core:encoding/endian"
import "core:mem"
// derive invokes PBKDF2-HMAC with the specified hash algorithm, password,
// salt, iteration count, and outputs the derived key to dst.
@@ -71,7 +71,7 @@ derive :: proc(
if r > 0 {
tmp: [hash.MAX_DIGEST_SIZE]byte
blk := tmp[:h_len]
defer mem.zero_explicit(raw_data(blk), h_len)
defer crypto.zero_explicit(raw_data(blk), h_len)
_F(&base, salt, iterations, u32(l + 1), blk)
copy(dst_blk, blk)
@@ -84,7 +84,7 @@ _F :: proc(base: ^hmac.Context, salt: []byte, c: u32, i: u32, dst_blk: []byte) {
tmp: [hash.MAX_DIGEST_SIZE]byte
u := tmp[:h_len]
defer mem.zero_explicit(raw_data(u), h_len)
defer crypto.zero_explicit(raw_data(u), h_len)
// F (P, S, c, i) = U_1 \xor U_2 \xor ... \xor U_c
//

View File

@@ -10,7 +10,6 @@ import "core:crypto"
import field "core:crypto/_fiat/field_poly1305"
import "core:encoding/endian"
import "core:math/bits"
import "core:mem"
// KEY_SIZE is the Poly1305 key size in bytes.
KEY_SIZE :: 32
@@ -155,10 +154,10 @@ final :: proc(ctx: ^Context, dst: []byte) {
// reset sanitizes the Context. The Context must be re-initialized to
// be used again.
reset :: proc(ctx: ^Context) {
mem.zero_explicit(&ctx._r, size_of(ctx._r))
mem.zero_explicit(&ctx._a, size_of(ctx._a))
mem.zero_explicit(&ctx._s, size_of(ctx._s))
mem.zero_explicit(&ctx._buffer, size_of(ctx._buffer))
crypto.zero_explicit(&ctx._r, size_of(ctx._r))
crypto.zero_explicit(&ctx._a, size_of(ctx._a))
crypto.zero_explicit(&ctx._s, size_of(ctx._s))
crypto.zero_explicit(&ctx._buffer, size_of(ctx._buffer))
ctx._is_initialized = false
}

View File

@@ -6,9 +6,9 @@ See:
*/
package ristretto255
import "core:crypto"
import grp "core:crypto/_edwards25519"
import field "core:crypto/_fiat/field_curve25519"
import "core:mem"
// ELEMENT_SIZE is the size of a byte-encoded ristretto255 group element.
ELEMENT_SIZE :: 32
@@ -71,7 +71,7 @@ Group_Element :: struct {
// ge_clear clears ge to the uninitialized state.
ge_clear :: proc "contextless" (ge: ^Group_Element) {
mem.zero_explicit(ge, size_of(Group_Element))
crypto.zero_explicit(ge, size_of(Group_Element))
}
// ge_set sets `ge = a`.

View File

@@ -15,9 +15,9 @@ package sha2
zhibog, dotbmp: Initial implementation.
*/
@(require) import "core:crypto"
@(require) import "core:encoding/endian"
import "core:math/bits"
@(require) import "core:mem"
// DIGEST_SIZE_224 is the SHA-224 digest size in bytes.
DIGEST_SIZE_224 :: 28
@@ -260,7 +260,7 @@ reset :: proc(ctx: ^$T) {
return
}
mem.zero_explicit(ctx, size_of(ctx^))
crypto.zero_explicit(ctx, size_of(ctx^))
}
/*

View File

@@ -14,9 +14,9 @@ package sm3
zhibog, dotbmp: Initial implementation.
*/
import "core:crypto"
import "core:encoding/endian"
import "core:math/bits"
import "core:mem"
// DIGEST_SIZE is the SM3 digest size in bytes.
DIGEST_SIZE :: 32
@@ -126,7 +126,7 @@ reset :: proc(ctx: ^Context) {
return
}
mem.zero_explicit(ctx, size_of(ctx^))
crypto.zero_explicit(ctx, size_of(ctx^))
}
/*

View File

@@ -9,7 +9,6 @@ package x25519
import "core:crypto"
import ed "core:crypto/_edwards25519"
import field "core:crypto/_fiat/field_curve25519"
import "core:mem"
// SCALAR_SIZE is the size of a X25519 scalar (private key) in bytes.
SCALAR_SIZE :: 32
@@ -119,7 +118,7 @@ scalarmult :: proc(dst, scalar, point: []byte) {
d := (^[32]byte)(raw_data(dst))
_scalarmult(d, &e, p)
mem.zero_explicit(&e, size_of(e))
crypto.zero_explicit(&e, size_of(e))
}
// scalarmult_basepoint "multiplies" the provided scalar with the X25519

View File

@@ -6,8 +6,8 @@ See:
*/
package x448
import "core:crypto"
import field "core:crypto/_fiat/field_curve448"
import "core:mem"
// SCALAR_SIZE is the size of a X448 scalar (private key) in bytes.
SCALAR_SIZE :: 56
@@ -143,8 +143,8 @@ scalarmult :: proc(dst, scalar, point: []byte) {
_scalarmult(&d, &e, &p)
copy_slice(dst, d[:])
mem.zero_explicit(&e, size_of(e))
mem.zero_explicit(&d, size_of(d))
crypto.zero_explicit(&e, size_of(e))
crypto.zero_explicit(&d, size_of(d))
}
// scalarmult_basepoint "multiplies" the provided scalar with the X448

View File

@@ -9,8 +9,8 @@ truncate it from the encoded output.
*/
package encoding_base64
import "base:runtime"
import "core:io"
import "core:mem"
import "core:strings"
ENC_TABLE := [64]byte {
@@ -110,7 +110,7 @@ DEC_URL_TABLE := [256]u8 {
}
encode :: proc(data: []byte, ENC_TBL := ENC_TABLE, allocator := context.allocator) -> (encoded: string, err: mem.Allocator_Error) #optional_allocator_error {
encode :: proc(data: []byte, ENC_TBL := ENC_TABLE, allocator := context.allocator) -> (encoded: string, err: runtime.Allocator_Error) #optional_allocator_error {
out_length := encoded_len(data)
if out_length == 0 {
return
@@ -161,7 +161,7 @@ encoded_len :: proc(data: []byte) -> int {
return ((4 * length / 3) + 3) &~ 3
}
decode :: proc(data: string, DEC_TBL := DEC_TABLE, allocator := context.allocator) -> (decoded: []byte, err: mem.Allocator_Error) #optional_allocator_error {
decode :: proc(data: string, DEC_TBL := DEC_TABLE, allocator := context.allocator) -> (decoded: []byte, err: runtime.Allocator_Error) #optional_allocator_error {
out_length := decoded_len(data)
out := strings.builder_make(0, out_length, allocator) or_return

View File

@@ -16,14 +16,15 @@ Example:
r.reuse_record_buffer = true // Without it you have to each of the fields within it
defer csv.reader_destroy(&r)
csv_data, ok := os.read_entire_file(filename)
if ok {
csv_data, csv_err := os.read_entire_file(filename, context.allocator)
defer delete(csv_data)
if csv_err == nil {
csv.reader_init_with_string(&r, string(csv_data))
} else {
fmt.printfln("Unable to open file: %v", filename)
fmt.eprintfln("Unable to open file: %v. Error: %v", filename, csv_err)
return
}
defer delete(csv_data)
for r, i, err in csv.iterator_next(&r) {
if err != nil { /* Do something with error */ }
@@ -39,16 +40,16 @@ Example:
r: csv.Reader
r.trim_leading_space = true
r.reuse_record = true // Without it you have to delete(record)
r.reuse_record_buffer = true // Without it you have to each of the fields within it
r.reuse_record_buffer = true // Without it you have to delete each of the fields within it
defer csv.reader_destroy(&r)
handle, err := os.open(filename)
defer os.close(handle)
if err != nil {
fmt.eprintfln("Error opening file: %v", filename)
fmt.eprintfln("Unable to open file: %v. Error: %v", filename, err)
return
}
defer os.close(handle)
csv.reader_init(&r, os.stream_from_handle(handle))
csv.reader_init(&r, handle.stream)
for r, i in csv.iterator_next(&r) {
for f, j in r {
@@ -64,21 +65,23 @@ Example:
r.trim_leading_space = true
defer csv.reader_destroy(&r)
csv_data, ok := os.read_entire_file(filename)
if ok {
csv.reader_init_with_string(&r, string(csv_data))
} else {
fmt.printfln("Unable to open file: %v", filename)
csv_data, csv_err := os.read_entire_file(filename, context.allocator)
defer delete(csv_data, context.allocator)
if err != nil {
fmt.eprintfln("Unable to open file: %v. Error: %v", filename, csv_err)
return
}
defer delete(csv_data)
csv.reader_init_with_string(&r, string(csv_data))
records, err := csv.read_all(&r)
if err != nil { /* Do something with CSV parse error */ }
defer {
for rec in records {
delete(rec)
for record in records {
for field in record {
delete(field)
}
delete(record)
}
delete(records)
}

View File

@@ -1,35 +1,115 @@
// Encoding and decoding of hex-encoded binary, e.g. `0x23` -> `#`.
package encoding_hex
import "base:runtime"
import "core:io"
import "core:strings"
encode :: proc(src: []byte, allocator := context.allocator, loc := #caller_location) -> []byte #no_bounds_check {
dst := make([]byte, len(src) * 2, allocator, loc)
for i, j := 0, 0; i < len(src); i += 1 {
/*
Encodes a byte slice into a lowercase hex sequence
*Allocates Using Provided Allocator*
Inputs:
- src: The `[]byte` to be hex-encoded
- allocator: (default: context.allocator)
- loc: The caller location for debugging purposes (default: #caller_location)
Returns:
- res: The hex-encoded result
- err: An optional allocator error if one occured, `.None` otherwise
*/
encode :: proc(src: []byte, allocator := context.allocator, loc := #caller_location) -> (res: []byte, err: runtime.Allocator_Error) #optional_allocator_error {
res, err = make([]byte, len(src) * 2, allocator, loc)
#no_bounds_check for i, j := 0, 0; i < len(src); i += 1 {
v := src[i]
dst[j] = HEXTABLE[v>>4]
dst[j+1] = HEXTABLE[v&0x0f]
res[j] = LOWER[v>>4]
res[j+1] = LOWER[v&0x0f]
j += 2
}
return dst
return
}
encode_into_writer :: proc(dst: io.Writer, src: []byte) -> io.Error {
/*
Encodes a byte slice as a lowercase hex sequence into an `io.Writer`
Inputs:
- dst: The `io.Writer` to encode into
- src: The `[]byte` to be hex-encoded
Returns:
- err: An `io.Error` if one occured, `.None` otherwise
*/
encode_into_writer :: proc(dst: io.Writer, src: []byte) -> (err: io.Error) {
for v in src {
io.write(dst, {HEXTABLE[v>>4], HEXTABLE[v&0x0f]}) or_return
io.write(dst, {LOWER[v>>4], LOWER[v&0x0f]}) or_return
}
return nil
return
}
decode :: proc(src: []byte, allocator := context.allocator, loc := #caller_location) -> (dst: []byte, ok: bool) #no_bounds_check {
/*
Encodes a byte slice into an uppercase hex sequence
*Allocates Using Provided Allocator*
Inputs:
- src: The `[]byte` to be hex-encoded
- allocator: (default: context.allocator)
- loc: The caller location for debugging purposes (default: #caller_location)
Returns:
- res: The hex-encoded result
- err: An optional allocator error if one occured, `.None` otherwise
*/
encode_upper :: proc(src: []byte, allocator := context.allocator, loc := #caller_location) -> (res: []byte, err: runtime.Allocator_Error) #optional_allocator_error {
res, err = make([]byte, len(src) * 2, allocator, loc)
#no_bounds_check for i, j := 0, 0; i < len(src); i += 1 {
v := src[i]
res[j] = UPPER[v>>4]
res[j+1] = UPPER[v&0x0f]
j += 2
}
return
}
/*
Encodes a byte slice as an uppercase hex sequence into an `io.Writer`
Inputs:
- dst: The `io.Writer` to encode into
- src: The `[]byte` to be hex-encoded
Returns:
- err: An `io.Error` if one occured, `.None` otherwise
*/
encode_upper_into_writer :: proc(dst: io.Writer, src: []byte) -> (err: io.Error) {
for v in src {
io.write(dst, {UPPER[v>>4], UPPER[v&0x0f]}) or_return
}
return
}
/*
Decodes a hex sequence into a byte slice
*Allocates Using Provided Allocator*
Inputs:
- dst: The hex sequence decoded into bytes
- src: The `[]byte` to be hex-decoded
- allocator: (default: context.allocator)
- loc: The caller location for debugging purposes (default: #caller_location)
Returns:
- ok: A bool, `true` if decoding succeeded, `false` otherwise
*/
decode :: proc(src: []byte, allocator := context.allocator, loc := #caller_location) -> (dst: []byte, ok: bool) {
if len(src) % 2 == 1 {
return
}
dst = make([]byte, len(src) / 2, allocator, loc)
for i, j := 0, 1; j < len(src); j += 2 {
#no_bounds_check for i, j := 0, 1; j < len(src); j += 2 {
p := src[j-1]
q := src[j]
@@ -43,8 +123,16 @@ decode :: proc(src: []byte, allocator := context.allocator, loc := #caller_locat
return dst, true
}
// Decodes the given sequence into one byte.
// Should be called with one byte worth of the source, eg: 0x23 -> '#'.
/*
Decodes the first byte in a hex sequence to a byte
Inputs:
- str: A hex-encoded `string`, e.g. `"0x23"`
Returns:
- res: The decoded byte, e.g. `'#'`
- ok: A bool, `true` if decoding succeeded, `false` otherwise
*/
decode_sequence :: proc(str: string) -> (res: byte, ok: bool) {
str := str
if strings.has_prefix(str, "0x") || strings.has_prefix(str, "0X") {
@@ -62,13 +150,21 @@ decode_sequence :: proc(str: string) -> (res: byte, ok: bool) {
}
@(private)
HEXTABLE := [16]byte {
LOWER := [16]byte {
'0', '1', '2', '3',
'4', '5', '6', '7',
'8', '9', 'a', 'b',
'c', 'd', 'e', 'f',
}
@(private)
UPPER := [16]byte {
'0', '1', '2', '3',
'4', '5', '6', '7',
'8', '9', 'A', 'B',
'C', 'D', 'E', 'F',
}
@(private)
hex_digit :: proc(char: byte) -> (u8, bool) {
switch char {

View File

@@ -1,6 +1,6 @@
package encoding_hxa
import "core:mem"
import "base:runtime"
LATEST_VERSION :: 3
VERSION_API :: "0.3"
@@ -16,7 +16,7 @@ Header :: struct #packed {
File :: struct {
using header: Header,
backing: []byte,
allocator: mem.Allocator,
allocator: runtime.Allocator,
nodes: []Node,
}

View File

@@ -0,0 +1,34 @@
#+build !freestanding
#+build !js
package encoding_hxa
import "core:os"
read_from_file :: proc(filename: string, print_error := false, allocator := context.allocator, loc := #caller_location) -> (file: File, err: Read_Error) {
context.allocator = allocator
data, data_err := os.read_entire_file(filename, allocator, loc)
if data_err != nil {
err = .Unable_To_Read_File
delete(data, allocator)
return
}
file, err = read(data, filename, print_error, allocator)
file.backing = data
return
}
write_to_file :: proc(filepath: string, file: File) -> (err: Write_Error) {
required := required_write_size(file)
buf, alloc_err := make([]byte, required)
if alloc_err == .Out_Of_Memory {
return .Failed_File_Write
}
defer delete(buf)
write_internal(&Writer{data = buf}, file)
if os.write_entire_file(filepath, buf) != nil {
err =.Failed_File_Write
}
return
}

View File

@@ -1,8 +1,6 @@
package encoding_hxa
import "core:fmt"
import "core:os"
import "core:mem"
Read_Error :: enum {
None,
@@ -11,20 +9,6 @@ Read_Error :: enum {
Unable_To_Read_File,
}
read_from_file :: proc(filename: string, print_error := false, allocator := context.allocator, loc := #caller_location) -> (file: File, err: Read_Error) {
context.allocator = allocator
data, ok := os.read_entire_file(filename, allocator, loc)
if !ok {
err = .Unable_To_Read_File
delete(data, allocator, loc)
return
}
file, err = read(data, filename, print_error, allocator, loc)
file.backing = data
return
}
read :: proc(data: []byte, filename := "<input>", print_error := false, allocator := context.allocator, loc := #caller_location) -> (file: File, err: Read_Error) {
Reader :: struct {
filename: string,
@@ -60,7 +44,7 @@ read :: proc(data: []byte, filename := "<input>", print_error := false, allocato
}
ptr := raw_data(r.data[r.offset:])
value = mem.slice_ptr((^T)(ptr), count)
value = ([^]T)(ptr)[:count]
r.offset += size_of(T)*count
return
}

View File

@@ -1,29 +1,11 @@
package encoding_hxa
import "core:os"
import "core:mem"
Write_Error :: enum {
None,
Buffer_Too_Small,
Failed_File_Write,
}
write_to_file :: proc(filepath: string, file: File) -> (err: Write_Error) {
required := required_write_size(file)
buf, alloc_err := make([]byte, required)
if alloc_err == .Out_Of_Memory {
return .Failed_File_Write
}
defer delete(buf)
write_internal(&Writer{data = buf}, file)
if !os.write_entire_file(filepath, buf) {
err =.Failed_File_Write
}
return
}
write :: proc(buf: []byte, file: File) -> (n: int, err: Write_Error) {
required := required_write_size(file)
if len(buf) < required {
@@ -66,7 +48,7 @@ write_internal :: proc(w: ^Writer, file: File) {
remaining := len(w.data) - w.offset
assert(size_of(T)*len(array) <= remaining)
ptr := raw_data(w.data[w.offset:])
dst := mem.slice_ptr((^T)(ptr), len(array))
dst := ([^]T)(ptr)[:len(array)]
copy(dst, array)
}
w.offset += size_of(T)*len(array)
@@ -76,7 +58,7 @@ write_internal :: proc(w: ^Writer, file: File) {
remaining := len(w.data) - w.offset
assert(size_of(byte)*len(str) <= remaining)
ptr := raw_data(w.data[w.offset:])
dst := mem.slice_ptr((^byte)(ptr), len(str))
dst := ([^]byte)(ptr)[:len(str)]
copy(dst, str)
}
w.offset += size_of(byte)*len(str)

View File

@@ -1,13 +1,12 @@
// Reader and writer for a variant of the `.ini` file format with `key = value` entries in `[sections]`.
package encoding_ini
import "base:runtime"
import "base:intrinsics"
import "core:strings"
import "core:strconv"
import "core:io"
import "core:os"
import "core:fmt"
import "base:runtime"
import "base:intrinsics"
import "core:strings"
import "core:strconv"
import "core:io"
import "core:fmt"
_ :: fmt
Options :: struct {
@@ -120,17 +119,6 @@ load_map_from_string :: proc(src: string, allocator: runtime.Allocator, options
return
}
load_map_from_path :: proc(path: string, allocator: runtime.Allocator, options := DEFAULT_OPTIONS) -> (m: Map, err: runtime.Allocator_Error, ok: bool) {
data := os.read_entire_file(path, allocator) or_return
defer delete(data, allocator)
m, err = load_map_from_string(string(data), allocator, options)
ok = err == nil
defer if !ok {
delete_map(m)
}
return
}
save_map_to_string :: proc(m: Map, allocator: runtime.Allocator) -> (data: string) {
b := strings.builder_make(allocator)
_, _ = write_map(strings.to_writer(&b), m)
@@ -191,4 +179,4 @@ write_map :: proc(w: io.Writer, m: Map) -> (n: int, err: io.Error) {
section_index += 1
}
return
}
}

View File

@@ -0,0 +1,20 @@
#+build !freestanding
#+build !js
package encoding_ini
import "base:runtime"
import "core:os"
load_map_from_path :: proc(path: string, allocator: runtime.Allocator, options := DEFAULT_OPTIONS) -> (m: Map, err: runtime.Allocator_Error, ok: bool) {
data, data_err := os.read_entire_file(path, allocator)
defer delete(data, allocator)
if data_err != nil {
return
}
m, err = load_map_from_string(string(data), allocator, options)
ok = err == nil
defer if !ok {
delete_map(m)
}
return
}

View File

@@ -0,0 +1,18 @@
#+build !freestanding
#+build !js
package encoding_xml
import "core:os"
// Load an XML file
load_from_file :: proc(filename: string, options := DEFAULT_OPTIONS, error_handler := default_error_handler, allocator := context.allocator) -> (doc: ^Document, err: Error) {
context.allocator = allocator
options := options
data, data_err := os.read_entire_file(filename, allocator)
if data_err != nil { return {}, .File_Error }
options.flags += { .Input_May_Be_Modified }
return parse_bytes(data, options, filename, error_handler, allocator)
}

View File

@@ -9,13 +9,12 @@ package encoding_xml
- Jeroen van Rijn: Initial implementation.
*/
import "core:bytes"
import "core:encoding/entity"
import "base:intrinsics"
import "core:mem"
import "core:os"
import "core:strings"
import "base:runtime"
import "base:runtime"
import "core:bytes"
import "core:encoding/entity"
import "base:intrinsics"
import "core:mem"
import "core:strings"
likely :: intrinsics.expect
@@ -373,19 +372,6 @@ parse_string :: proc(data: string, options := DEFAULT_OPTIONS, path := "", error
parse :: proc { parse_string, parse_bytes }
// Load an XML file
load_from_file :: proc(filename: string, options := DEFAULT_OPTIONS, error_handler := default_error_handler, allocator := context.allocator) -> (doc: ^Document, err: Error) {
context.allocator = allocator
options := options
data, data_ok := os.read_entire_file(filename)
if !data_ok { return {}, .File_Error }
options.flags += { .Input_May_Be_Modified }
return parse_bytes(data, options, filename, error_handler, allocator)
}
destroy :: proc(doc: ^Document, allocator := context.allocator) {
context.allocator = allocator
if doc == nil { return }

View File

@@ -37,8 +37,8 @@ Unified_Parse_Error_Reason :: union #shared_nil {
Open_File_Error :: struct {
filename: string,
errno: os.Error,
mode: int,
perms: int,
flags: os.File_Flags,
perms: os.Permissions,
}
// Raised during parsing.

View File

@@ -76,8 +76,8 @@ Distinct_Int :: distinct int
main :: proc() {
Options :: struct {
file: os.Handle `args:"pos=0,required,file=r" usage:"Input file."`,
output: os.Handle `args:"pos=1,file=cw" usage:"Output file."`,
file: ^os.File `args:"pos=0,required,file=r" usage:"Input file."`,
output: ^os.File `args:"pos=1,file=cw" usage:"Output file."`,
hub: net.Host_Or_Endpoint `usage:"Internet address to contact for updates."`,
schedule: datetime.DateTime `usage:"Launch tasks at this time."`,
@@ -126,7 +126,7 @@ main :: proc() {
fmt.printfln("%#v", opt)
if opt.output != 0 {
if opt.output != nil {
os.write_string(opt.output, "Hellope!\n")
}
}

View File

@@ -1,18 +1,18 @@
#+private
package flags
import "base:intrinsics"
import "base:runtime"
import "core:fmt"
import "core:mem"
import "core:net"
import "core:os"
import "core:reflect"
import "core:strconv"
import "core:strings"
@require import "core:time"
@require import "core:time/datetime"
import "core:unicode/utf8"
import "base:intrinsics"
import "base:runtime"
import "core:fmt"
import "core:mem"
import "core:net"
@(require) import "core:os"
import "core:reflect"
import "core:strconv"
import "core:strings"
@(require) import "core:time"
@(require) import "core:time/datetime"
import "core:unicode/utf8"
@(optimization_mode="favor_size")
parse_and_set_pointer_by_base_type :: proc(ptr: rawptr, str: string, type_info: ^runtime.Type_Info) -> bool {
@@ -209,7 +209,7 @@ parse_and_set_pointer_by_base_type :: proc(ptr: rawptr, str: string, type_info:
parse_and_set_pointer_by_named_type :: proc(ptr: rawptr, str: string, data_type: typeid, arg_tag: string, out_error: ^Error) {
// Core types currently supported:
//
// - os.Handle
// - ^os.File
// - time.Time
// - datetime.DateTime
// - net.Host_Or_Endpoint
@@ -217,64 +217,61 @@ parse_and_set_pointer_by_named_type :: proc(ptr: rawptr, str: string, data_type:
GENERIC_RFC_3339_ERROR :: "Invalid RFC 3339 string. Try this format: `yyyy-mm-ddThh:mm:ssZ`, for example `2024-02-29T16:30:00Z`."
out_error^ = nil
if data_type == os.Handle {
if data_type == ^os.File {
// NOTE: `os` is hopefully available everywhere, even if it might panic on some calls.
wants_read := false
wants_write := false
mode: int
flags: os.File_Flags
if file, ok := get_struct_subtag(arg_tag, SUBTAG_FILE); ok {
for i in 0..<len(file) {
#no_bounds_check switch file[i] {
case 'r': wants_read = true
case 'w': wants_write = true
case 'c': mode |= os.O_CREATE
case 'a': mode |= os.O_APPEND
case 't': mode |= os.O_TRUNC
case 'r': flags |= {.Read}
case 'w': flags |= {.Write}
case 'c': flags |= {.Create}
case 'a': flags |= {.Append}
case 't': flags |= {.Trunc}
}
}
}
// Sane default.
// owner/group/other: r--r--r--
perms: int = 0o444
octal_perms: int = 0o444
if wants_read && wants_write {
mode |= os.O_RDWR
perms |= 0o200
} else if wants_write {
mode |= os.O_WRONLY
perms |= 0o200
if flags >= {.Read, .Write} {
octal_perms |= 0o200
} else if .Write in flags {
octal_perms |= 0o200
} else {
mode |= os.O_RDONLY
flags |= {.Read}
}
if permstr, ok := get_struct_subtag(arg_tag, SUBTAG_PERMS); ok {
if value, parse_ok := strconv.parse_u64_of_base(permstr, 8); parse_ok {
perms = int(value)
octal_perms = int(value)
}
}
handle, errno := os.open(str, mode, perms)
if errno != nil {
perms := os.perm(octal_perms)
f, error := os.open(str, flags, perms)
if error != nil {
// NOTE(Feoramund): os.Error is system-dependent, and there's
// currently no good way to translate them all into strings.
//
// The upcoming `os2` package will hopefully solve this.
// The upcoming `core:os` package will hopefully solve this.
//
// We can at least provide the number for now, so the user can look
// it up.
out_error^ = Open_File_Error {
str,
errno,
mode,
error,
flags,
perms,
}
return
}
(^os.Handle)(ptr)^ = handle
(^^os.File)(ptr)^ = f
return
}
@@ -475,6 +472,11 @@ parse_and_set_pointer_by_type :: proc(ptr: rawptr, str: string, type_info: ^runt
}
case:
if type_info.id == ^os.File {
parse_and_set_pointer_by_named_type(ptr, str, type_info.id, arg_tag, &error)
return
}
if !parse_and_set_pointer_by_base_type(ptr, str, type_info) {
return Parse_Error {
// The caller will add more details.

View File

@@ -138,20 +138,20 @@ validate_structure :: proc(model_type: $T, style: Parsing_Style, loc := #caller_
allowed_to_define_file_perms: bool = ---
#partial switch specific_type_info in field.type.variant {
case runtime.Type_Info_Map:
allowed_to_define_file_perms = specific_type_info.value.id == os.Handle
allowed_to_define_file_perms = specific_type_info.value.id == ^os.File
case runtime.Type_Info_Dynamic_Array:
allowed_to_define_file_perms = specific_type_info.elem.id == os.Handle
allowed_to_define_file_perms = specific_type_info.elem.id == ^os.File
case:
allowed_to_define_file_perms = field.type.id == os.Handle
allowed_to_define_file_perms = field.type.id == ^os.File
}
if _, has_file := get_struct_subtag(args_tag, SUBTAG_FILE); has_file {
fmt.assertf(allowed_to_define_file_perms, "%T.%s has `%s` defined, but it is not nor does it contain an `os.Handle` type.",
fmt.assertf(allowed_to_define_file_perms, "%T.%s has `%s` defined, but it is not nor does it contain an `^os.File` type.",
model_type, field.name, SUBTAG_FILE, loc = loc)
}
if _, has_perms := get_struct_subtag(args_tag, SUBTAG_PERMS); has_perms {
fmt.assertf(allowed_to_define_file_perms, "%T.%s has `%s` defined, but it is not nor does it contain an `os.Handle` type.",
fmt.assertf(allowed_to_define_file_perms, "%T.%s has `%s` defined, but it is not nor does it contain an `^os.File` type.",
model_type, field.name, SUBTAG_PERMS, loc = loc)
}

View File

@@ -1,9 +1,9 @@
package flags
import "core:fmt"
import "core:fmt"
@require import "core:os"
@require import "core:path/filepath"
import "core:strings"
import "core:strings"
/*
Parse any arguments into an annotated struct or exit if there was an error.
@@ -38,7 +38,7 @@ parse_or_exit :: proc(
error := parse(model, args, style, true, true, allocator, loc)
if error != nil {
stderr := os.stream_from_handle(os.stderr)
stderr := os.to_stream(os.stderr)
if len(args) == 0 {
// No arguments entered, and there was an error; show the usage,
@@ -65,19 +65,44 @@ Inputs:
*/
@(optimization_mode="favor_size")
print_errors :: proc(data_type: typeid, error: Error, program: string, style: Parsing_Style = .Odin) {
stderr := os.stream_from_handle(os.stderr)
stdout := os.stream_from_handle(os.stdout)
stderr := os.to_stream(os.stderr)
stdout := os.to_stream(os.stdout)
switch specific_error in error {
case Parse_Error:
fmt.wprintfln(stderr, "[%T.%v] %s", specific_error, specific_error.reason, specific_error.message)
case Open_File_Error:
fmt.wprintfln(stderr, "[%T#%i] Unable to open file with perms 0o%o in mode 0x%x: %s",
specific_error,
specific_error.errno,
specific_error.perms,
specific_error.mode,
specific_error.filename)
if os.exists(specific_error.filename) {
flags: string
if specific_error.flags == {.Read} {
flags = "read-only"
} else if specific_error.flags == {.Write} {
flags = "write-only"
} else if specific_error.flags == {.Read, .Write} {
flags = "read/write"
}
if flags != "" {
fmt.wprintfln(stderr, "[%T#%i] Unable to open %q with perms 0o%o as %s",
specific_error,
specific_error.errno,
specific_error.filename,
u16(transmute(u32)specific_error.perms),
flags)
} else {
fmt.wprintfln(stderr, "[%T#%i] Unable to open %q with perms 0o%o and flags %v",
specific_error,
specific_error.errno,
specific_error.filename,
u16(transmute(u32)specific_error.perms),
specific_error.flags)
}
} else {
fmt.wprintfln(stderr, "[%T#%i] Unable to open %q. File not found",
specific_error,
specific_error.errno,
specific_error.filename)
}
case Validation_Error:
fmt.wprintfln(stderr, "[%T] %s", specific_error, specific_error.message)
case Help_Request:

View File

@@ -1,10 +1,6 @@
#+build js
package fmt
import "core:bufio"
import "core:io"
import "core:os"
foreign import "odin_env"
@(private="file")
@@ -12,90 +8,77 @@ foreign odin_env {
write :: proc "contextless" (fd: u32, p: []byte) ---
}
@(private="file")
write_stream_proc :: proc(stream_data: rawptr, mode: io.Stream_Mode, p: []byte, offset: i64, whence: io.Seek_From) -> (n: i64, err: io.Error) {
if mode == .Write {
fd := u32(uintptr(stream_data))
write(fd, p)
return i64(len(p)), nil
}
return 0, .Unsupported
}
stdout :: u32(1)
stderr :: u32(2)
@(private="file")
stdout := io.Writer{
procedure = write_stream_proc,
data = rawptr(uintptr(1)),
}
@(private="file")
stderr := io.Writer{
procedure = write_stream_proc,
data = rawptr(uintptr(2)),
}
BUF_SIZE :: 1024
@(private="file")
fd_to_writer :: proc(fd: os.Handle, loc := #caller_location) -> io.Writer {
switch fd {
case 1: return stdout
case 2: return stderr
case: panic("`fmt.fprint` variant called with invalid file descriptor for JS, only 1 (stdout) and 2 (stderr) are supported", loc)
// TODO: Find a way to grow this if necessary
buf: [BUF_SIZE]byte
@(private="file")
get_fd :: proc(f: any, loc := #caller_location) -> (fd: u32) {
if _fd, _ok := f.(u32); _ok {
fd = _fd
}
if fd != 1 && fd != 2 {
panic("`fmt.fprint` variant called with invalid file descriptor for JS, only 1 (stdout) and 2 (stderr) are supported", loc)
}
return fd
}
// fprint formats using the default print settings and writes to fd
fprint :: proc(fd: os.Handle, args: ..any, sep := " ", flush := true, loc := #caller_location) -> int {
buf: [1024]byte
b: bufio.Writer
defer bufio.writer_flush(&b)
bufio.writer_init_with_buf(&b, fd_to_writer(fd, loc), buf[:])
w := bufio.writer_to_writer(&b)
return wprint(w, ..args, sep=sep, flush=flush)
// flush is ignored
fprint :: proc(f: any, args: ..any, sep := " ", flush := true, loc := #caller_location) -> (n: int) {
fd := get_fd(f)
s := bprint(buf[:], ..args, sep=sep)
n = len(s)
write(fd, transmute([]byte)s)
return n
}
// fprintln formats using the default print settings and writes to fd
fprintln :: proc(fd: os.Handle, args: ..any, sep := " ", flush := true, loc := #caller_location) -> int {
buf: [1024]byte
b: bufio.Writer
defer bufio.writer_flush(&b)
bufio.writer_init_with_buf(&b, fd_to_writer(fd, loc), buf[:])
w := bufio.writer_to_writer(&b)
return wprintln(w, ..args, sep=sep, flush=flush)
// fprintln formats using the default print settings and writes to fd, followed by a newline
// flush is ignored
fprintln :: proc(f: any, args: ..any, sep := " ", flush := true, loc := #caller_location) -> (n: int) {
fd := get_fd(f)
s := bprintln(buf[:], ..args, sep=sep)
n = len(s)
write(fd, transmute([]byte)s)
return n
}
// fprintf formats according to the specified format string and writes to fd
fprintf :: proc(fd: os.Handle, fmt: string, args: ..any, flush := true, newline := false, loc := #caller_location) -> int {
buf: [1024]byte
b: bufio.Writer
defer bufio.writer_flush(&b)
bufio.writer_init_with_buf(&b, fd_to_writer(fd, loc), buf[:])
w := bufio.writer_to_writer(&b)
return wprintf(w, fmt, ..args, flush=flush, newline=newline)
// flush is ignored
fprintf :: proc(f: any, fmt: string, args: ..any, flush := true, newline := false, loc := #caller_location) -> (n: int) {
fd := get_fd(f)
s := bprintf(buf[:], fmt, ..args, newline=newline)
n = len(s)
write(fd, transmute([]byte)s)
return n
}
// fprintfln formats according to the specified format string and writes to fd, followed by a newline.
fprintfln :: proc(fd: os.Handle, fmt: string, args: ..any, flush := true, loc := #caller_location) -> int {
return fprintf(fd, fmt, ..args, flush=flush, newline=true, loc=loc)
// flush is ignored
fprintfln :: proc(f: any, fmt: string, args: ..any, flush := true, loc := #caller_location) -> int {
return fprintf(f, fmt, ..args, flush=flush, newline=true, loc=loc)
}
// print formats using the default print settings and writes to stdout
print :: proc(args: ..any, sep := " ", flush := true) -> int { return wprint(w=stdout, args=args, sep=sep, flush=flush) }
print :: proc(args: ..any, sep := " ", flush := true) -> int { return fprint(stdout, ..args, sep=sep, flush=flush) }
// println formats using the default print settings and writes to stdout
println :: proc(args: ..any, sep := " ", flush := true) -> int { return wprintln(w=stdout, args=args, sep=sep, flush=flush) }
println :: proc(args: ..any, sep := " ", flush := true) -> int { return fprintln(stdout, ..args, sep=sep, flush=flush) }
// printf formats according to the specififed format string and writes to stdout
printf :: proc(fmt: string, args: ..any, flush := true) -> int { return wprintf(stdout, fmt, ..args, flush=flush) }
printf :: proc(fmt: string, args: ..any, flush := true) -> int { return fprintf(stdout, fmt, ..args, flush=flush) }
// printfln formats according to the specified format string and writes to stdout, followed by a newline.
printfln :: proc(fmt: string, args: ..any, flush := true) -> int { return wprintf(stdout, fmt, ..args, flush=flush, newline=true) }
printfln :: proc(fmt: string, args: ..any, flush := true) -> int { return fprintf(stdout, fmt, ..args, flush=flush, newline=true) }
// eprint formats using the default print settings and writes to stderr
eprint :: proc(args: ..any, sep := " ", flush := true) -> int { return wprint(w=stderr, args=args, sep=sep, flush=flush) }
eprint :: proc(args: ..any, sep := " ", flush := true) -> int { return fprint(stderr, ..args, sep=sep, flush=flush) }
// eprintln formats using the default print settings and writes to stderr
eprintln :: proc(args: ..any, sep := " ", flush := true) -> int { return wprintln(w=stderr, args=args, sep=sep, flush=flush) }
eprintln :: proc(args: ..any, sep := " ", flush := true) -> int { return fprintln(stderr, ..args, sep=sep, flush=flush) }
// eprintf formats according to the specififed format string and writes to stderr
eprintf :: proc(fmt: string, args: ..any, flush := true) -> int { return wprintf(stderr, fmt, ..args, flush=flush) }
eprintf :: proc(fmt: string, args: ..any, flush := true) -> int { return fprintf(stderr, fmt, ..args, flush=flush) }
// eprintfln formats according to the specified format string and writes to stderr, followed by a newline.
eprintfln :: proc(fmt: string, args: ..any, flush := true) -> int { return wprintf(stdout, fmt, ..args, flush=flush, newline=true) }
eprintfln :: proc(fmt: string, args: ..any, flush := true) -> int { return fprintf(stderr, fmt, ..args, flush=flush, newline=true) }

View File

@@ -8,59 +8,61 @@ import "core:os"
import "core:io"
import "core:bufio"
// NOTE(Jeroen): The other option is to deprecate `fprint*` and make it an alias for `wprint*`, using File.stream directly.
// fprint formats using the default print settings and writes to fd
fprint :: proc(fd: os.Handle, args: ..any, sep := " ", flush := true) -> int {
fprint :: proc(f: ^os.File, args: ..any, sep := " ", flush := true) -> int {
buf: [1024]byte
b: bufio.Writer
defer bufio.writer_flush(&b)
bufio.writer_init_with_buf(&b, os.stream_from_handle(fd), buf[:])
bufio.writer_init_with_buf(&b, os.to_stream(f), buf[:])
w := bufio.writer_to_writer(&b)
return wprint(w, ..args, sep=sep, flush=flush)
}
// fprintln formats using the default print settings and writes to fd
fprintln :: proc(fd: os.Handle, args: ..any, sep := " ", flush := true) -> int {
fprintln :: proc(f: ^os.File, args: ..any, sep := " ", flush := true) -> int {
buf: [1024]byte
b: bufio.Writer
defer bufio.writer_flush(&b)
bufio.writer_init_with_buf(&b, os.stream_from_handle(fd), buf[:])
bufio.writer_init_with_buf(&b, os.to_stream(f), buf[:])
w := bufio.writer_to_writer(&b)
return wprintln(w, ..args, sep=sep, flush=flush)
}
// fprintf formats according to the specified format string and writes to fd
fprintf :: proc(fd: os.Handle, fmt: string, args: ..any, flush := true, newline := false) -> int {
fprintf :: proc(f: ^os.File, fmt: string, args: ..any, flush := true, newline := false) -> int {
buf: [1024]byte
b: bufio.Writer
defer bufio.writer_flush(&b)
bufio.writer_init_with_buf(&b, os.stream_from_handle(fd), buf[:])
bufio.writer_init_with_buf(&b, os.to_stream(f), buf[:])
w := bufio.writer_to_writer(&b)
return wprintf(w, fmt, ..args, flush=flush, newline=newline)
}
// fprintfln formats according to the specified format string and writes to fd, followed by a newline.
fprintfln :: proc(fd: os.Handle, fmt: string, args: ..any, flush := true) -> int {
return fprintf(fd, fmt, ..args, flush=flush, newline=true)
fprintfln :: proc(f: ^os.File, fmt: string, args: ..any, flush := true) -> int {
return fprintf(f, fmt, ..args, flush=flush, newline=true)
}
fprint_type :: proc(fd: os.Handle, info: ^runtime.Type_Info, flush := true) -> (n: int, err: io.Error) {
fprint_type :: proc(f: ^os.File, info: ^runtime.Type_Info, flush := true) -> (n: int, err: io.Error) {
buf: [1024]byte
b: bufio.Writer
defer bufio.writer_flush(&b)
bufio.writer_init_with_buf(&b, os.stream_from_handle(fd), buf[:])
bufio.writer_init_with_buf(&b, os.to_stream(f), buf[:])
w := bufio.writer_to_writer(&b)
return wprint_type(w, info, flush=flush)
}
fprint_typeid :: proc(fd: os.Handle, id: typeid, flush := true) -> (n: int, err: io.Error) {
fprint_typeid :: proc(f: ^os.File, id: typeid, flush := true) -> (n: int, err: io.Error) {
buf: [1024]byte
b: bufio.Writer
defer bufio.writer_flush(&b)
bufio.writer_init_with_buf(&b, os.stream_from_handle(fd), buf[:])
bufio.writer_init_with_buf(&b, os.to_stream(f), buf[:])
w := bufio.writer_to_writer(&b)
return wprint_typeid(w, id, flush=flush)

View File

@@ -9,10 +9,10 @@ load :: proc{load_from_file, load_from_bytes, load_from_context}
load_from_file :: proc(filename: string, options := Options{}, allocator := context.allocator) -> (img: ^Image, err: Error) {
context.allocator = allocator
data, ok := os.read_entire_file(filename)
defer delete(data)
data, data_err := os.read_entire_file(filename, allocator)
defer delete(data, allocator)
if ok {
if data_err == nil {
return load_from_bytes(data, options)
} else {
return nil, .Unable_To_Read_File
@@ -28,7 +28,7 @@ save_to_file :: proc(output: string, img: ^Image, options := Options{}, allocato
defer bytes.buffer_destroy(out)
save_to_buffer(out, img, options) or_return
write_ok := os.write_entire_file(output, out.buf[:])
write_err := os.write_entire_file(output, out.buf[:])
return nil if write_ok else .Unable_To_Write_File
return nil if write_err == nil else .Unable_To_Write_File
}

View File

@@ -8,18 +8,16 @@ load :: proc{
load_from_file,
}
load_from_file :: proc(filename: string, options := Options{}, allocator := context.allocator) -> (img: ^Image, err: Error) {
data, ok := os.read_entire_file(filename, allocator)
data, data_err := os.read_entire_file(filename, allocator)
defer delete(data, allocator)
if ok {
if data_err == nil {
return load_from_bytes(data, options, allocator)
} else {
return nil, .Unable_To_Read_File
}
}
which :: proc{
which_bytes,
which_file,

View File

@@ -8,10 +8,10 @@ load :: proc{load_from_file, load_from_bytes, load_from_context}
load_from_file :: proc(filename: string, options := Options{}, allocator := context.allocator) -> (img: ^Image, err: Error) {
context.allocator = allocator
data, ok := os.read_entire_file(filename)
defer delete(data)
data, data_err := os.read_entire_file(filename, allocator)
defer delete(data, allocator)
if ok {
if data_err == nil {
return load_from_bytes(data, options)
} else {
return nil, .Unable_To_Read_File

View File

@@ -8,20 +8,18 @@ load :: proc {
load_from_bytes,
}
load_from_file :: proc(filename: string, allocator := context.allocator) -> (img: ^Image, err: Error) {
context.allocator = allocator
data, ok := os.read_entire_file(filename); defer delete(data)
if !ok {
data, data_err := os.read_entire_file(filename, allocator); defer delete(data)
if data_err == nil {
return load_from_bytes(data)
} else {
err = .Unable_To_Read_File
return
}
return load_from_bytes(data)
}
save :: proc {
save_to_file,
save_to_buffer,
@@ -33,7 +31,7 @@ save_to_file :: proc(filename: string, img: ^Image, custom_info: Info = {}, allo
data: []byte; defer delete(data)
data = save_to_buffer(img, custom_info) or_return
if ok := os.write_entire_file(filename, data); !ok {
if save_err := os.write_entire_file(filename, data); save_err != nil {
return .Unable_To_Write_File
}

View File

@@ -1,348 +0,0 @@
/*
Reader for `PNG` images.
The PNG specification is at [[ https://www.w3.org/TR/PNG/ ]].
Example:
package main
import "core:image"
// import "core:image/png"
import "core:bytes"
import "core:fmt"
// For PPM writer
import "core:mem"
import "core:os"
main :: proc() {
track := mem.Tracking_Allocator{}
mem.tracking_allocator_init(&track, context.allocator)
context.allocator = mem.tracking_allocator(&track)
demo()
if len(track.allocation_map) > 0 {
fmt.println("Leaks:")
for _, v in track.allocation_map {
fmt.printf("\t%v\n\n", v)
}
}
}
demo :: proc() {
file: string
options := image.Options{.return_metadata}
err: image.Error
img: ^image.Image
file = "../../../misc/logo-slim.png"
img, err = load(file, options)
defer destroy(img)
if err != nil {
fmt.printf("Trying to read PNG file %v returned %v\n", file, err)
} else {
fmt.printf("Image: %vx%vx%v, %v-bit.\n", img.width, img.height, img.channels, img.depth)
if v, ok := img.metadata.(^image.PNG_Info); ok {
// Handle ancillary chunks as you wish.
// We provide helper functions for a few types.
for c in v.chunks {
#partial switch c.header.type {
case .tIME:
if t, t_ok := core_time(c); t_ok {
fmt.printf("[tIME]: %v\n", t)
}
case .gAMA:
if gama, gama_ok := gamma(c); gama_ok {
fmt.printf("[gAMA]: %v\n", gama)
}
case .pHYs:
if phys, phys_ok := phys(c); phys_ok {
if phys.unit == .Meter {
xm := f32(img.width) / f32(phys.ppu_x)
ym := f32(img.height) / f32(phys.ppu_y)
dpi_x, dpi_y := phys_to_dpi(phys)
fmt.printf("[pHYs] Image resolution is %v x %v pixels per meter.\n", phys.ppu_x, phys.ppu_y)
fmt.printf("[pHYs] Image resolution is %v x %v DPI.\n", dpi_x, dpi_y)
fmt.printf("[pHYs] Image dimensions are %v x %v meters.\n", xm, ym)
} else {
fmt.printf("[pHYs] x: %v, y: %v pixels per unknown unit.\n", phys.ppu_x, phys.ppu_y)
}
}
case .iTXt, .zTXt, .tEXt:
res, ok_text := text(c)
if ok_text {
if c.header.type == .iTXt {
fmt.printf("[iTXt] %v (%v:%v): %v\n", res.keyword, res.language, res.keyword_localized, res.text)
} else {
fmt.printf("[tEXt/zTXt] %v: %v\n", res.keyword, res.text)
}
}
defer text_destroy(res)
case .bKGD:
fmt.printf("[bKGD] %v\n", img.background)
case .eXIf:
if res, ok_exif := exif(c); ok_exif {
/*
Other than checking the signature and byte order, we don't handle Exif data.
If you wish to interpret it, pass it to an Exif parser.
*/
fmt.printf("[eXIf] %v\n", res)
}
case .PLTE:
if plte, plte_ok := plte(c); plte_ok {
fmt.printf("[PLTE] %v\n", plte)
} else {
fmt.printf("[PLTE] Error\n")
}
case .hIST:
if res, ok_hist := hist(c); ok_hist {
fmt.printf("[hIST] %v\n", res)
}
case .cHRM:
if res, ok_chrm := chrm(c); ok_chrm {
fmt.printf("[cHRM] %v\n", res)
}
case .sPLT:
res, ok_splt := splt(c)
if ok_splt {
fmt.printf("[sPLT] %v\n", res)
}
splt_destroy(res)
case .sBIT:
if res, ok_sbit := sbit(c); ok_sbit {
fmt.printf("[sBIT] %v\n", res)
}
case .iCCP:
res, ok_iccp := iccp(c)
if ok_iccp {
fmt.printf("[iCCP] %v\n", res)
}
iccp_destroy(res)
case .sRGB:
if res, ok_srgb := srgb(c); ok_srgb {
fmt.printf("[sRGB] Rendering intent: %v\n", res)
}
case:
type := c.header.type
name := chunk_type_to_name(&type)
fmt.printf("[%v]: %v\n", name, c.data)
}
}
}
}
fmt.printf("Done parsing metadata.\n")
if err == nil && .do_not_decompress_image not_in options && .info not_in options {
if ok := write_image_as_ppm("out.ppm", img); ok {
fmt.println("Saved decoded image.")
} else {
fmt.println("Error saving out.ppm.")
fmt.println(img)
}
}
}
// Crappy PPM writer used during testing. Don't use in production.
write_image_as_ppm :: proc(filename: string, image: ^image.Image) -> (success: bool) {
_bg :: proc(bg: Maybe([3]u16), x, y: int, high := true) -> (res: [3]u16) {
if v, ok := bg.?; ok {
res = v
} else {
if high {
l := u16(30 * 256 + 30)
if (x & 4 == 0) ~ (y & 4 == 0) {
res = [3]u16{l, 0, l}
} else {
res = [3]u16{l >> 1, 0, l >> 1}
}
} else {
if (x & 4 == 0) ~ (y & 4 == 0) {
res = [3]u16{30, 30, 30}
} else {
res = [3]u16{15, 15, 15}
}
}
}
return
}
// profiler.timed_proc();
using image
using os
flags: int = O_WRONLY|O_CREATE|O_TRUNC
img := image
// PBM 16-bit images are big endian
when ODIN_ENDIAN == .Little {
if img.depth == 16 {
// The pixel components are in Big Endian. Let's byteswap back.
input := mem.slice_data_cast([]u16, img.pixels.buf[:])
output := mem.slice_data_cast([]u16be, img.pixels.buf[:])
#no_bounds_check for v, i in input {
output[i] = u16be(v)
}
}
}
pix := bytes.buffer_to_bytes(&img.pixels)
if len(pix) == 0 || len(pix) < image.width * image.height * int(image.channels) {
return false
}
mode: int = 0
when ODIN_OS == .Linux || ODIN_OS == .Darwin {
// NOTE(justasd): 644 (owner read, write; group read; others read)
mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH
}
fd, err := open(filename, flags, mode)
if err != nil {
return false
}
defer close(fd)
write_string(fd,
fmt.tprintf("P6\n%v %v\n%v\n", width, height, uint(1 << uint(depth) - 1)),
)
if channels == 3 {
// We don't handle transparency here...
write_ptr(fd, raw_data(pix), len(pix))
} else {
bpp := depth == 16 ? 2 : 1
bytes_needed := width * height * 3 * bpp
op := bytes.Buffer{}
bytes.buffer_init_allocator(&op, bytes_needed, bytes_needed)
defer bytes.buffer_destroy(&op)
if channels == 1 {
if depth == 16 {
assert(len(pix) == width * height * 2)
p16 := mem.slice_data_cast([]u16, pix)
o16 := mem.slice_data_cast([]u16, op.buf[:])
#no_bounds_check for len(p16) != 0 {
r := u16(p16[0])
o16[0] = r
o16[1] = r
o16[2] = r
p16 = p16[1:]
o16 = o16[3:]
}
} else {
o := 0
for i := 0; i < len(pix); i += 1 {
r := pix[i]
op.buf[o ] = r
op.buf[o+1] = r
op.buf[o+2] = r
o += 3
}
}
write_ptr(fd, raw_data(op.buf), len(op.buf))
} else if channels == 2 {
if depth == 16 {
p16 := mem.slice_data_cast([]u16, pix)
o16 := mem.slice_data_cast([]u16, op.buf[:])
bgcol := img.background
#no_bounds_check for len(p16) != 0 {
r := f64(u16(p16[0]))
bg: f64
if bgcol != nil {
v := bgcol.([3]u16)[0]
bg = f64(v)
}
a := f64(u16(p16[1])) / 65535.0
l := (a * r) + (1 - a) * bg
o16[0] = u16(l)
o16[1] = u16(l)
o16[2] = u16(l)
p16 = p16[2:]
o16 = o16[3:]
}
} else {
o := 0
for i := 0; i < len(pix); i += 2 {
r := pix[i]; a := pix[i+1]; a1 := f32(a) / 255.0
c := u8(f32(r) * a1)
op.buf[o ] = c
op.buf[o+1] = c
op.buf[o+2] = c
o += 3
}
}
write_ptr(fd, raw_data(op.buf), len(op.buf))
} else if channels == 4 {
if depth == 16 {
p16 := mem.slice_data_cast([]u16be, pix)
o16 := mem.slice_data_cast([]u16be, op.buf[:])
#no_bounds_check for len(p16) != 0 {
bg := _bg(img.background, 0, 0)
r := f32(p16[0])
g := f32(p16[1])
b := f32(p16[2])
a := f32(p16[3]) / 65535.0
lr := (a * r) + (1 - a) * f32(bg[0])
lg := (a * g) + (1 - a) * f32(bg[1])
lb := (a * b) + (1 - a) * f32(bg[2])
o16[0] = u16be(lr)
o16[1] = u16be(lg)
o16[2] = u16be(lb)
p16 = p16[4:]
o16 = o16[3:]
}
} else {
o := 0
for i := 0; i < len(pix); i += 4 {
x := (i / 4) % width
y := i / width / 4
_b := _bg(img.background, x, y, false)
bgcol := [3]u8{u8(_b[0]), u8(_b[1]), u8(_b[2])}
r := f32(pix[i])
g := f32(pix[i+1])
b := f32(pix[i+2])
a := f32(pix[i+3]) / 255.0
lr := u8(f32(r) * a + (1 - a) * f32(bgcol[0]))
lg := u8(f32(g) * a + (1 - a) * f32(bgcol[1]))
lb := u8(f32(b) * a + (1 - a) * f32(bgcol[2]))
op.buf[o ] = lr
op.buf[o+1] = lg
op.buf[o+2] = lb
o += 3
}
}
write_ptr(fd, raw_data(op.buf), len(op.buf))
} else {
return false
}
}
return true
}
*/
package png

136
core/image/png/example.odin Normal file
View File

@@ -0,0 +1,136 @@
#+build ignore
package png_example
import "core:image"
import "core:image/png"
import "core:image/tga"
import "core:fmt"
import "core:mem"
demo :: proc() {
options := image.Options{.return_metadata}
err: image.Error
img: ^image.Image
PNG_FILE :: ODIN_ROOT + "misc/logo-slim.png"
img, err = png.load(PNG_FILE, options)
defer png.destroy(img)
if err != nil {
fmt.eprintfln("Trying to read PNG file %v returned %v.", PNG_FILE, err)
} else {
fmt.printfln("Image: %vx%vx%v, %v-bit.", img.width, img.height, img.channels, img.depth)
if v, ok := img.metadata.(^image.PNG_Info); ok {
// Handle ancillary chunks as you wish.
// We provide helper functions for a few types.
for c in v.chunks {
#partial switch c.header.type {
case .tIME:
if t, t_ok := png.core_time(c); t_ok {
fmt.printfln("[tIME]: %v", t)
}
case .gAMA:
if gama, gama_ok := png.gamma(c); gama_ok {
fmt.printfln("[gAMA]: %v", gama)
}
case .pHYs:
if phys, phys_ok := png.phys(c); phys_ok {
if phys.unit == .Meter {
xm := f32(img.width) / f32(phys.ppu_x)
ym := f32(img.height) / f32(phys.ppu_y)
dpi_x, dpi_y := png.phys_to_dpi(phys)
fmt.printfln("[pHYs] Image resolution is %v x %v pixels per meter.", phys.ppu_x, phys.ppu_y)
fmt.printfln("[pHYs] Image resolution is %v x %v DPI.", dpi_x, dpi_y)
fmt.printfln("[pHYs] Image dimensions are %v x %v meters.", xm, ym)
} else {
fmt.printfln("[pHYs] x: %v, y: %v pixels per unknown unit.", phys.ppu_x, phys.ppu_y)
}
}
case .iTXt, .zTXt, .tEXt:
res, ok_text := png.text(c)
if ok_text {
if c.header.type == .iTXt {
fmt.printfln("[iTXt] %v (%v:%v): %v", res.keyword, res.language, res.keyword_localized, res.text)
} else {
fmt.printfln("[tEXt/zTXt] %v: %v", res.keyword, res.text)
}
}
defer png.text_destroy(res)
case .bKGD:
fmt.printfln("[bKGD] %v", img.background)
case .eXIf:
if res, ok_exif := png.exif(c); ok_exif {
/*
Other than checking the signature and byte order, we don't handle Exif data.
If you wish to interpret it, pass it to an Exif parser.
*/
fmt.printfln("[eXIf] %v", res)
}
case .PLTE:
if plte, plte_ok := png.plte(c); plte_ok {
fmt.printfln("[PLTE] %v", plte)
} else {
fmt.printfln("[PLTE] Error")
}
case .hIST:
if res, ok_hist := png.hist(c); ok_hist {
fmt.printfln("[hIST] %v", res)
}
case .cHRM:
if res, ok_chrm := png.chrm(c); ok_chrm {
fmt.printfln("[cHRM] %v", res)
}
case .sPLT:
res, ok_splt := png.splt(c)
if ok_splt {
fmt.printfln("[sPLT] %v", res)
}
png.splt_destroy(res)
case .sBIT:
if res, ok_sbit := png.sbit(c); ok_sbit {
fmt.printfln("[sBIT] %v", res)
}
case .iCCP:
res, ok_iccp := png.iccp(c)
if ok_iccp {
fmt.printfln("[iCCP] %v", res)
}
png.iccp_destroy(res)
case .sRGB:
if res, ok_srgb := png.srgb(c); ok_srgb {
fmt.printfln("[sRGB] Rendering intent: %v", res)
}
case:
type := c.header.type
name := png.chunk_type_to_name(&type)
fmt.printfln("[%v]: %v", name, c.data)
}
}
}
}
fmt.printfln("Done parsing metadata.")
if err == nil && .do_not_decompress_image not_in options && .info not_in options {
if err = tga.save("out.tga", img); err == nil {
fmt.println("Saved decoded image.")
} else {
fmt.eprintfln("Error %v saving out.ppm.", err)
}
}
}
main :: proc() {
track: mem.Tracking_Allocator
mem.tracking_allocator_init(&track, context.allocator)
defer mem.tracking_allocator_destroy(&track)
context.allocator = mem.tracking_allocator(&track)
demo()
for _, leak in track.allocation_map {
fmt.printf("%v leaked %m", leak.location, leak.size)
}
}

View File

@@ -1,4 +1,6 @@
#+feature using-stmt
// Reader for `PNG` images.
// The PNG specification is at [[ https://www.w3.org/TR/PNG/ ]].
package png
/*

View File

@@ -8,12 +8,12 @@ load :: proc{load_from_file, load_from_bytes, load_from_context}
load_from_file :: proc(filename: string, options := Options{}, allocator := context.allocator) -> (img: ^Image, err: Error) {
context.allocator = allocator
data, ok := os.read_entire_file(filename)
defer delete(data)
data, data_err := os.read_entire_file(filename, allocator)
defer delete(data, allocator)
if ok {
if data_err == nil {
return load_from_bytes(data, options)
} else {
return nil, .Unable_To_Read_File
}
}
}

View File

@@ -4,8 +4,22 @@ package qoi
import "core:os"
import "core:bytes"
save :: proc{save_to_buffer, save_to_file}
load :: proc{load_from_file, load_from_bytes, load_from_context}
load_from_file :: proc(filename: string, options := Options{}, allocator := context.allocator) -> (img: ^Image, err: Error) {
context.allocator = allocator
data, data_err := os.read_entire_file(filename, allocator)
defer delete(data, allocator)
if data_err == nil {
return load_from_bytes(data, options)
} else {
return nil, .Unable_To_Read_File
}
}
save :: proc{save_to_buffer, save_to_file}
save_to_file :: proc(output: string, img: ^Image, options := Options{}, allocator := context.allocator) -> (err: Error) {
context.allocator = allocator
@@ -14,24 +28,7 @@ save_to_file :: proc(output: string, img: ^Image, options := Options{}, allocato
defer bytes.buffer_destroy(out)
save_to_buffer(out, img, options) or_return
write_ok := os.write_entire_file(output, out.buf[:])
write_err := os.write_entire_file(output, out.buf[:])
return nil if write_ok else .Unable_To_Write_File
}
load :: proc{load_from_file, load_from_bytes, load_from_context}
load_from_file :: proc(filename: string, options := Options{}, allocator := context.allocator) -> (img: ^Image, err: Error) {
context.allocator = allocator
data, ok := os.read_entire_file(filename)
defer delete(data)
if ok {
return load_from_bytes(data, options)
} else {
return nil, .Unable_To_Read_File
}
return nil if write_err == nil else .Unable_To_Write_File
}

View File

@@ -4,6 +4,21 @@ package tga
import "core:os"
import "core:bytes"
load :: proc{load_from_file, load_from_bytes, load_from_context}
load_from_file :: proc(filename: string, options := Options{}, allocator := context.allocator) -> (img: ^Image, err: Error) {
context.allocator = allocator
data, data_err := os.read_entire_file(filename, allocator)
defer delete(data)
if data_err == nil {
return load_from_bytes(data, options)
} else {
return nil, .Unable_To_Read_File
}
}
save :: proc{save_to_buffer, save_to_file}
save_to_file :: proc(output: string, img: ^Image, options := Options{}, allocator := context.allocator) -> (err: Error) {
@@ -13,22 +28,7 @@ save_to_file :: proc(output: string, img: ^Image, options := Options{}, allocato
defer bytes.buffer_destroy(out)
save_to_buffer(out, img, options) or_return
write_ok := os.write_entire_file(output, out.buf[:])
write_err := os.write_entire_file(output, out.buf[:])
return nil if write_ok else .Unable_To_Write_File
}
load :: proc{load_from_file, load_from_bytes, load_from_context}
load_from_file :: proc(filename: string, options := Options{}, allocator := context.allocator) -> (img: ^Image, err: Error) {
context.allocator = allocator
data, ok := os.read_entire_file(filename)
defer delete(data)
if ok {
return load_from_bytes(data, options)
} else {
return nil, .Unable_To_Read_File
}
return nil if write_err == nil else .Unable_To_Write_File
}

View File

@@ -8,6 +8,11 @@ read_ptr :: proc(r: Reader, p: rawptr, byte_size: int, n_read: ^int = nil) -> (n
return read(r, ([^]byte)(p)[:byte_size], n_read)
}
read_slice :: proc(r: Reader, slice: $S/[]$T, n_read: ^int = nil) -> (n: int, err: Error) {
size := len(slice)*size_of(T)
return read_ptr(w, raw_data(slice), size, n_read)
}
write_ptr :: proc(w: Writer, p: rawptr, byte_size: int, n_written: ^int = nil) -> (n: int, err: Error) {
return write(w, ([^]byte)(p)[:byte_size], n_written)
}
@@ -20,6 +25,12 @@ write_ptr_at :: proc(w: Writer_At, p: rawptr, byte_size: int, offset: i64, n_wri
return write_at(w, ([^]byte)(p)[:byte_size], offset, n_written)
}
write_slice :: proc(w: Writer, slice: $S/[]$T, n_written: ^int = nil) -> (n: int, err: Error) {
size := len(slice)*size_of(T)
return write_ptr(w, raw_data(slice), size, n_written)
}
write_u64 :: proc(w: Writer, i: u64, base: int = 10, n_written: ^int = nil) -> (n: int, err: Error) {
buf: [64]byte
s := strconv.write_bits(buf[:], i, base, false, 64, strconv.digits, nil)

View File

@@ -0,0 +1,119 @@
#+build js
package log
import "core:fmt"
import "core:strings"
import "core:time"
Level_Headers := [?]string{
0..<10 = "[DEBUG] --- ",
10..<20 = "[INFO ] --- ",
20..<30 = "[WARN ] --- ",
30..<40 = "[ERROR] --- ",
40..<50 = "[FATAL] --- ",
}
Default_Console_Logger_Opts :: Options{
.Level,
.Short_File_Path,
.Line,
.Procedure,
} | Full_Timestamp_Opts
Console_Logger_Data :: struct {
ident: string,
}
create_console_logger :: proc(lowest := Level.Debug, opt := Default_Console_Logger_Opts, ident := "", allocator := context.allocator) -> Logger {
data := new(Console_Logger_Data, allocator)
data.ident = strings.clone(ident)
return Logger{console_logger_proc, data, lowest, opt}
}
destroy_console_logger :: proc(log: Logger, allocator := context.allocator) {
data := cast(^Console_Logger_Data)log.data
delete(data.ident)
free(log.data, allocator)
}
console_logger_proc :: proc(logger_data: rawptr, level: Level, text: string, options: Options, location := #caller_location) {
options := options
data := cast(^Console_Logger_Data)logger_data
backing: [1024]byte //NOTE(Hoej): 1024 might be too much for a header backing, unless somebody has really long paths.
buf := strings.builder_from_bytes(backing[:])
if .Level in options {
fmt.sbprint(&buf, Level_Headers[level])
}
when time.IS_SUPPORTED {
do_time_header(options, &buf, time.now())
}
do_location_header(options, &buf, location)
if data.ident != "" {
fmt.sbprintf(&buf, "[%s] ", data.ident)
}
h := fmt.stderr if level >= .Error else fmt.stdout
//TODO(Hoej): When we have better atomics and such, make this thread-safe
fmt.fprintf(h, "%s%s\n", strings.to_string(buf), text)
}
do_time_header :: proc(opts: Options, buf: ^strings.Builder, t: time.Time) {
when time.IS_SUPPORTED {
if Full_Timestamp_Opts & opts != nil {
fmt.sbprint(buf, "[")
y, m, d := time.date(t)
h, min, s := time.clock(t)
if .Date in opts {
fmt.sbprintf(buf, "%d-%02d-%02d", y, m, d)
if .Time in opts {
fmt.sbprint(buf, " ")
}
}
if .Time in opts { fmt.sbprintf(buf, "%02d:%02d:%02d", h, min, s) }
fmt.sbprint(buf, "] ")
}
}
}
do_location_header :: proc(opts: Options, buf: ^strings.Builder, location := #caller_location) {
if Location_Header_Opts & opts == nil {
return
}
fmt.sbprint(buf, "[")
file := location.file_path
if .Short_File_Path in opts {
last := 0
for r, i in location.file_path {
if r == '/' {
last = i+1
}
}
file = location.file_path[last:]
}
if Location_File_Opts & opts != nil {
fmt.sbprint(buf, file)
}
if .Line in opts {
if Location_File_Opts & opts != nil {
fmt.sbprint(buf, ":")
}
fmt.sbprint(buf, location.line)
}
if .Procedure in opts {
if (Location_File_Opts | {.Line}) & opts != nil {
fmt.sbprint(buf, ":")
}
fmt.sbprintf(buf, "%s()", location.procedure)
}
fmt.sbprint(buf, "] ")
}

View File

@@ -1,5 +1,6 @@
#+build !freestanding
#+build !orca
#+build !js
package log
import "base:runtime"
@@ -35,7 +36,7 @@ Default_File_Logger_Opts :: Options{
File_Console_Logger_Data :: struct {
file_handle: os.Handle,
file_handle: ^os.File,
ident: string,
}
@@ -66,16 +67,16 @@ init_standard_stream_status :: proc "contextless" () {
}
}
create_file_logger :: proc(h: os.Handle, lowest := Level.Debug, opt := Default_File_Logger_Opts, ident := "", allocator := context.allocator) -> Logger {
create_file_logger :: proc(f: ^os.File, lowest := Level.Debug, opt := Default_File_Logger_Opts, ident := "", allocator := context.allocator) -> Logger {
data := new(File_Console_Logger_Data, allocator)
data.file_handle = h
data.file_handle = f
data.ident = ident
return Logger{file_logger_proc, data, lowest, opt}
}
destroy_file_logger :: proc(log: Logger, allocator := context.allocator) {
data := cast(^File_Console_Logger_Data)log.data
if data.file_handle != os.INVALID_HANDLE {
if data.file_handle != nil {
os.close(data.file_handle)
}
free(data, allocator)
@@ -83,7 +84,7 @@ destroy_file_logger :: proc(log: Logger, allocator := context.allocator) {
create_console_logger :: proc(lowest := Level.Debug, opt := Default_Console_Logger_Opts, ident := "", allocator := context.allocator) -> Logger {
data := new(File_Console_Logger_Data, allocator)
data.file_handle = os.INVALID_HANDLE
data.file_handle = nil
data.ident = ident
return Logger{console_logger_proc, data, lowest, opt}
}
@@ -93,7 +94,7 @@ destroy_console_logger :: proc(log: Logger, allocator := context.allocator) {
}
@(private)
_file_console_logger_proc :: proc(h: os.Handle, ident: string, level: Level, text: string, options: Options, location: runtime.Source_Code_Location) {
_file_console_logger_proc :: proc(h: ^os.File, ident: string, level: Level, text: string, options: Options, location: runtime.Source_Code_Location) {
backing: [1024]byte //NOTE(Hoej): 1024 might be too much for a header backing, unless somebody has really long paths.
buf := strings.builder_from_bytes(backing[:])
@@ -106,9 +107,7 @@ _file_console_logger_proc :: proc(h: os.Handle, ident: string, level: Level, tex
do_location_header(options, &buf, location)
if .Thread_Id in options {
// NOTE(Oskar): not using context.thread_id here since that could be
// incorrect when replacing context for a thread.
fmt.sbprintf(&buf, "[{}] ", os.current_thread_id())
fmt.sbprintf(&buf, "[{}] ", os.get_current_thread_id())
}
if ident != "" {
@@ -126,7 +125,7 @@ file_logger_proc :: proc(logger_data: rawptr, level: Level, text: string, option
console_logger_proc :: proc(logger_data: rawptr, level: Level, text: string, options: Options, location := #caller_location) {
options := options
data := cast(^File_Console_Logger_Data)logger_data
h: os.Handle = ---
h: ^os.File = nil
if level < Level.Error {
h = os.stdout
options -= global_subtract_stdout_options
@@ -216,4 +215,4 @@ do_location_header :: proc(opts: Options, buf: ^strings.Builder, location := #ca
}
fmt.sbprint(buf, "] ")
}
}

View File

@@ -14,21 +14,10 @@ import "base:runtime"
This allows to benchmark and/or setting optimized values for a certain CPU without recompiling.
*/
/*
There is a bug with DLL globals. They don't get set.
To allow tests to run we add `-define:MATH_BIG_EXE=false` to hardcode the cutoffs for now.
*/
when #config(MATH_BIG_EXE, true) {
MUL_KARATSUBA_CUTOFF := _DEFAULT_MUL_KARATSUBA_CUTOFF
SQR_KARATSUBA_CUTOFF := _DEFAULT_SQR_KARATSUBA_CUTOFF
MUL_TOOM_CUTOFF := _DEFAULT_MUL_TOOM_CUTOFF
SQR_TOOM_CUTOFF := _DEFAULT_SQR_TOOM_CUTOFF
} else {
MUL_KARATSUBA_CUTOFF := _DEFAULT_MUL_KARATSUBA_CUTOFF
SQR_KARATSUBA_CUTOFF := _DEFAULT_SQR_KARATSUBA_CUTOFF
MUL_TOOM_CUTOFF := _DEFAULT_MUL_TOOM_CUTOFF
SQR_TOOM_CUTOFF := _DEFAULT_SQR_TOOM_CUTOFF
}
MUL_KARATSUBA_CUTOFF := _DEFAULT_MUL_KARATSUBA_CUTOFF
SQR_KARATSUBA_CUTOFF := _DEFAULT_SQR_KARATSUBA_CUTOFF
MUL_TOOM_CUTOFF := _DEFAULT_MUL_TOOM_CUTOFF
SQR_TOOM_CUTOFF := _DEFAULT_SQR_TOOM_CUTOFF
/*
These defaults were tuned on an AMD A8-6600K (64-bit) using libTomMath's `make tune`.
@@ -38,9 +27,6 @@ when #config(MATH_BIG_EXE, true) {
It would also be cool if we collected some data across various processor families.
This would let uss set reasonable defaults at runtime as this library initializes
itself by using `cpuid` or the ARM equivalent.
IMPORTANT: The 32_BIT path has largely gone untested. It needs to be tested and
debugged where necessary.
*/
_DEFAULT_MUL_KARATSUBA_CUTOFF :: #config(MATH_BIG_MUL_KARATSUBA_CUTOFF, 80)
@@ -54,7 +40,7 @@ MAX_ITERATIONS_ROOT_N := 500
/*
Largest `N` for which we'll compute `N!`
*/
FACTORIAL_MAX_N := 1_000_000
FACTORIAL_MAX_N := 1_000_000
/*
Cutoff to switch to int_factorial_binary_split, and its max recursion level.
@@ -86,22 +72,10 @@ MAX_ITERATIONS_RANDOM_PRIME := 1_000_000
*/
@thread_local RANDOM_PRIME_ITERATIONS_USED: int
/*
We don't allow these to be switched at runtime for two reasons:
1) 32-bit and 64-bit versions of procedures use different types for their storage,
so we'd have to double the number of procedures, and they couldn't interact.
2) Optimizations thanks to precomputed masks wouldn't work.
*/
MATH_BIG_FORCE_64_BIT :: #config(MATH_BIG_FORCE_64_BIT, false)
MATH_BIG_FORCE_32_BIT :: #config(MATH_BIG_FORCE_32_BIT, false)
when (MATH_BIG_FORCE_32_BIT && MATH_BIG_FORCE_64_BIT) { #panic("Cannot force 32-bit and 64-bit big backend simultaneously.") }
/*
Trade a smaller memory footprint for more processing overhead?
*/
_LOW_MEMORY :: #config(MATH_BIG_SMALL_MEMORY, false)
_LOW_MEMORY :: #config(MATH_BIG_SMALL_MEMORY, false)
when _LOW_MEMORY {
_DEFAULT_DIGIT_COUNT :: 8
_TAB_SIZE :: 32
@@ -217,28 +191,19 @@ _MIN_DIGIT_COUNT :: max(3, ((size_of(u128) + _DIGIT_BITS) - 1) / _DIGIT_BITS)
_MAX_BIT_COUNT :: (max(int) - 2)
_MAX_DIGIT_COUNT :: _MAX_BIT_COUNT / _DIGIT_BITS
when MATH_BIG_FORCE_64_BIT || (!MATH_BIG_FORCE_32_BIT && size_of(rawptr) == 8) {
/*
We can use u128 as an intermediary.
*/
DIGIT :: distinct u64
_WORD :: distinct u128
// Base 10 extraction constants
ITOA_DIVISOR :: DIGIT(1_000_000_000_000_000_000)
ITOA_COUNT :: 18
} else {
DIGIT :: distinct u32
_WORD :: distinct u64
// Base 10 extraction constants
ITOA_DIVISOR :: DIGIT(100_000_000)
ITOA_COUNT :: 8
}
// We use u128 as an intermediary.
DIGIT :: distinct u64
_WORD :: distinct u128
// Base 10 extraction constants
ITOA_DIVISOR :: DIGIT(1_000_000_000_000_000_000)
ITOA_COUNT :: 18
#assert(size_of(_WORD) == 2 * size_of(DIGIT))
_DIGIT_TYPE_BITS :: 8 * size_of(DIGIT)
_WORD_TYPE_BITS :: 8 * size_of(_WORD)
_DIGIT_NAILS :: 4
_DIGIT_NAILS :: 1
_DIGIT_BITS :: _DIGIT_TYPE_BITS - _DIGIT_NAILS
_WORD_BITS :: 2 * _DIGIT_BITS

View File

@@ -7,7 +7,6 @@ package math_big
import "base:intrinsics"
import "base:runtime"
import rnd "core:math/rand"
/*
TODO: Int.flags and Constants like ONE, NAN, etc, are not yet properly handled everywhere.
@@ -362,17 +361,7 @@ platform_count_lsb :: #force_inline proc(a: $T) -> (count: int)
count_lsb :: proc { int_count_lsb, platform_count_lsb, }
int_random_digit :: proc() -> (res: DIGIT) {
when _DIGIT_BITS == 60 { // DIGIT = u64
return DIGIT(rnd.uint64()) & _MASK
} else when _DIGIT_BITS == 28 { // DIGIT = u32
return DIGIT(rnd.uint32()) & _MASK
} else {
panic("Unsupported DIGIT size.")
}
return 0 // We shouldn't get here.
}
int_random_digit :: internal_int_random_digit
int_random :: proc(dest: ^Int, bits: int, allocator := context.allocator) -> (err: Error) {
/*

Some files were not shown because too many files have changed in this diff Show More