Merge branch 'odin-lang:master' into fix-omitempty-comma

This commit is contained in:
korvahkh
2024-06-13 01:27:44 +00:00
committed by GitHub
275 changed files with 14876 additions and 8223 deletions

View File

@@ -10,7 +10,7 @@ jobs:
steps:
- uses: actions/checkout@v4
- name: Build, Check, and Test
timeout-minutes: 25
timeout-minutes: 15
uses: vmactions/netbsd-vm@v1
with:
release: "10.0"
@@ -24,157 +24,135 @@ jobs:
/usr/sbin/pkg_add https://github.com/andreas-jonsson/llvm17-netbsd-bin/releases/download/pkgsrc-current/llvm-17.0.6.tgz
/usr/sbin/pkg_add https://github.com/andreas-jonsson/llvm17-netbsd-bin/releases/download/pkgsrc-current/clang-17.0.6.tgz
ln -s /usr/pkg/bin/python3.11 /usr/bin/python3
ln -s /usr/pkg/bin/bash /bin/bash
run: |
git config --global --add safe.directory $(pwd)
gmake release
./odin version
./odin report
gmake -C vendor/stb/src
gmake -C vendor/cgltf/src
gmake -C vendor/miniaudio/src
./odin check examples/all -vet -strict-style -target:netbsd_amd64
(cd tests/core; gmake all_bsd)
(cd tests/internal; gmake all_bsd)
./odin check examples/all -vet -strict-style -target:netbsd_arm64
./odin test tests/core/normal.odin -file -all-packages -define:ODIN_TEST_FANCY=false
./odin test tests/core/speed.odin -file -all-packages -o:speed -define:ODIN_TEST_FANCY=false
./odin test tests/vendor -all-packages -define:ODIN_TEST_FANCY=false
./odin test tests/benchmark -all-packages -define:ODIN_TEST_FANCY=false
(cd tests/issues; ./run.sh)
build_linux:
name: Ubuntu Build, Check, and Test
build_freebsd:
name: FreeBSD Build, Check, and Test
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
- name: Download LLVM
- uses: actions/checkout@v4
- name: Build, Check, and Test
timeout-minutes: 15
uses: vmactions/freebsd-vm@v1
with:
usesh: true
copyback: false
prepare: |
pkg install -y gmake git bash python3 libxml2 llvm17
run: |
# `set -e` is needed for test failures to register. https://github.com/vmactions/freebsd-vm/issues/72
set -e -x
git config --global --add safe.directory $(pwd)
gmake release
./odin version
./odin report
gmake -C vendor/stb/src
gmake -C vendor/cgltf/src
gmake -C vendor/miniaudio/src
./odin check examples/all -vet -strict-style -target:freebsd_amd64
./odin test tests/core/normal.odin -file -all-packages -define:ODIN_TEST_FANCY=false
./odin test tests/core/speed.odin -file -all-packages -o:speed -define:ODIN_TEST_FANCY=false
./odin test tests/vendor -all-packages -define:ODIN_TEST_FANCY=false
./odin test tests/benchmark -all-packages -define:ODIN_TEST_FANCY=false
(cd tests/issues; ./run.sh)
ci:
strategy:
fail-fast: false
matrix:
# MacOS 13 runs on Intel, 14 runs on ARM
os: [ubuntu-latest, macos-13, macos-14]
runs-on: ${{ matrix.os }}
name: ${{ matrix.os == 'macos-14' && 'MacOS ARM' || (matrix.os == 'macos-13' && 'MacOS Intel' || 'Ubuntu') }} Build, Check, and Test
timeout-minutes: 15
steps:
- uses: actions/checkout@v4
- name: Download LLVM (Linux)
if: matrix.os == 'ubuntu-latest'
run: |
wget https://apt.llvm.org/llvm.sh
chmod +x llvm.sh
sudo ./llvm.sh 17
echo "/usr/lib/llvm-17/bin" >> $GITHUB_PATH
- name: build odin
run: ./build_odin.sh release
- name: Odin version
run: ./odin version
timeout-minutes: 1
- name: Odin report
run: ./odin report
timeout-minutes: 1
- name: Odin check
run: ./odin check examples/demo -vet
timeout-minutes: 10
- name: Odin run
run: ./odin run examples/demo
timeout-minutes: 10
- name: Odin run -debug
run: ./odin run examples/demo -debug
timeout-minutes: 10
- name: Odin check examples/all
run: ./odin check examples/all -strict-style
timeout-minutes: 10
- name: Core library tests
run: |
cd tests/core
make
timeout-minutes: 10
- name: Vendor library tests
run: |
cd tests/vendor
make
timeout-minutes: 10
- name: Odin internals tests
run: |
cd tests/internal
make
timeout-minutes: 10
- name: Odin check examples/all for Linux i386
run: ./odin check examples/all -vet -strict-style -target:linux_i386
timeout-minutes: 10
- name: Odin check examples/all for Linux arm64
run: ./odin check examples/all -vet -strict-style -target:linux_arm64
timeout-minutes: 10
- name: Odin check examples/all for FreeBSD amd64
run: ./odin check examples/all -vet -strict-style -target:freebsd_amd64
timeout-minutes: 10
- name: Odin check examples/all for OpenBSD amd64
run: ./odin check examples/all -vet -strict-style -target:openbsd_amd64
timeout-minutes: 10
build_macOS:
name: MacOS Build, Check, and Test
runs-on: macos-13
steps:
- uses: actions/checkout@v1
- name: Download LLVM, and setup PATH
- name: Download LLVM (MacOS Intel)
if: matrix.os == 'macos-13'
run: |
brew install llvm@17
echo "/usr/local/opt/llvm@17/bin" >> $GITHUB_PATH
- name: build odin
run: ./build_odin.sh release
- name: Odin version
run: ./odin version
timeout-minutes: 1
- name: Odin report
run: ./odin report
timeout-minutes: 1
- name: Odin check
run: ./odin check examples/demo -vet
timeout-minutes: 10
- name: Odin run
run: ./odin run examples/demo
timeout-minutes: 10
- name: Odin run -debug
run: ./odin run examples/demo -debug
timeout-minutes: 10
- name: Odin check examples/all
run: ./odin check examples/all -strict-style
timeout-minutes: 10
- name: Core library tests
run: |
cd tests/core
make
timeout-minutes: 10
- name: Odin internals tests
run: |
cd tests/internal
make
timeout-minutes: 10
build_macOS_arm:
name: MacOS ARM Build, Check, and Test
runs-on: macos-14 # This is an arm/m1 runner.
steps:
- uses: actions/checkout@v1
- name: Download LLVM and setup PATH
- name: Download LLVM (MacOS ARM)
if: matrix.os == 'macos-14'
run: |
brew install llvm@17
echo "/opt/homebrew/opt/llvm@17/bin" >> $GITHUB_PATH
- name: build odin
- name: Build Odin
run: ./build_odin.sh release
- name: Odin version
run: ./odin version
timeout-minutes: 1
- name: Odin report
run: ./odin report
timeout-minutes: 1
- name: Compile needed Vendor
run: |
make -C vendor/stb/src
make -C vendor/cgltf/src
make -C vendor/miniaudio/src
- name: Odin check
run: ./odin check examples/demo -vet
timeout-minutes: 10
- name: Odin run
run: ./odin run examples/demo
timeout-minutes: 10
- name: Odin run -debug
run: ./odin run examples/demo -debug
timeout-minutes: 10
- name: Odin check examples/all
run: ./odin check examples/all -strict-style
timeout-minutes: 10
- name: Core library tests
- name: Normal Core library tests
run: ./odin test tests/core/normal.odin -file -all-packages -define:ODIN_TEST_FANCY=false
- name: Optimized Core library tests
run: ./odin test tests/core/speed.odin -o:speed -file -all-packages -define:ODIN_TEST_FANCY=false
- name: Vendor library tests
run: ./odin test tests/vendor -all-packages -define:ODIN_TEST_FANCY=false
- name: Internals tests
run: ./odin test tests/internal -all-packages -define:ODIN_TEST_FANCY=false
- name: Core library benchmarks
run: ./odin test tests/benchmark -all-packages -define:ODIN_TEST_FANCY=false
- name: GitHub Issue tests
run: |
cd tests/core
make
timeout-minutes: 10
- name: Odin internals tests
run: |
cd tests/internal
make
timeout-minutes: 10
cd tests/issues
./run.sh
- name: Odin check examples/all for Linux i386
run: ./odin check examples/all -vet -strict-style -target:linux_i386
if: matrix.os == 'ubuntu-latest'
- name: Odin check examples/all for Linux arm64
run: ./odin check examples/all -vet -strict-style -target:linux_arm64
if: matrix.os == 'ubuntu-latest'
- name: Odin check examples/all for FreeBSD amd64
run: ./odin check examples/all -vet -strict-style -target:freebsd_amd64
if: matrix.os == 'ubuntu-latest'
- name: Odin check examples/all for OpenBSD amd64
run: ./odin check examples/all -vet -strict-style -target:openbsd_amd64
if: matrix.os == 'ubuntu-latest'
build_windows:
name: Windows Build, Check, and Test
runs-on: windows-2022
timeout-minutes: 15
steps:
- uses: actions/checkout@v1
- uses: actions/checkout@v4
- name: build Odin
shell: cmd
run: |
@@ -182,72 +160,67 @@ jobs:
./build.bat 1
- name: Odin version
run: ./odin version
timeout-minutes: 1
- name: Odin report
run: ./odin report
timeout-minutes: 1
- name: Odin check
shell: cmd
run: |
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat
odin check examples/demo -vet
timeout-minutes: 10
- name: Odin run
shell: cmd
run: |
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat
odin run examples/demo
timeout-minutes: 10
- name: Odin run -debug
shell: cmd
run: |
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat
odin run examples/demo -debug
timeout-minutes: 10
- name: Odin check examples/all
shell: cmd
run: |
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat
odin check examples/all -strict-style
timeout-minutes: 10
- name: Core library tests
shell: cmd
run: |
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat
cd tests\core
call build.bat
timeout-minutes: 10
odin test tests/core/normal.odin -file -all-packages -define:ODIN_TEST_FANCY=false
- name: Optimized core library tests
shell: cmd
run: |
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat
odin test tests/core/speed.odin -o:speed -file -all-packages -define:ODIN_TEST_FANCY=false
- name: Core library benchmarks
shell: cmd
run: |
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat
odin test tests/benchmark -all-packages -define:ODIN_TEST_FANCY=false
- name: Vendor library tests
shell: cmd
run: |
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat
cd tests\vendor
call build.bat
timeout-minutes: 10
odin test tests/vendor -all-packages -define:ODIN_TEST_FANCY=false
- name: Odin internals tests
shell: cmd
run: |
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat
cd tests\internal
call build.bat
timeout-minutes: 10
odin test tests/internal -all-packages -define:ODIN_TEST_FANCY=false
- name: Odin documentation tests
shell: cmd
run: |
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat
cd tests\documentation
call build.bat
timeout-minutes: 10
- name: core:math/big tests
shell: cmd
run: |
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat
cd tests\core\math\big
call build.bat
timeout-minutes: 10
- name: Odin check examples/all for Windows 32bits
shell: cmd
run: |
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvars64.bat
odin check examples/all -strict-style -target:windows_i386
timeout-minutes: 10

View File

@@ -11,7 +11,7 @@ jobs:
if: github.repository == 'odin-lang/Odin'
runs-on: windows-2022
steps:
- uses: actions/checkout@v1
- uses: actions/checkout@v4
- name: build Odin
shell: cmd
run: |
@@ -45,7 +45,7 @@ jobs:
if: github.repository == 'odin-lang/Odin'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
- uses: actions/checkout@v4
- name: (Linux) Download LLVM
run: |
wget https://apt.llvm.org/llvm.sh
@@ -79,7 +79,7 @@ jobs:
if: github.repository == 'odin-lang/Odin'
runs-on: macos-13
steps:
- uses: actions/checkout@v1
- uses: actions/checkout@v4
- name: Download LLVM and setup PATH
run: |
brew install llvm@17 dylibbundler
@@ -113,7 +113,7 @@ jobs:
if: github.repository == 'odin-lang/Odin'
runs-on: macos-14 # ARM machine
steps:
- uses: actions/checkout@v1
- uses: actions/checkout@v4
- name: Download LLVM and setup PATH
run: |
brew install llvm@17 dylibbundler
@@ -146,16 +146,16 @@ jobs:
runs-on: [ubuntu-latest]
needs: [build_windows, build_macos, build_macos_arm, build_ubuntu]
steps:
- uses: actions/checkout@v1
- uses: actions/checkout@v4
- uses: actions/setup-python@v2
with:
python-version: '3.8.x'
- name: Install B2 CLI
- name: Install B2 SDK
shell: bash
run: |
python -m pip install --upgrade pip
pip install --upgrade b2
pip install --upgrade b2sdk
- name: Display Python version
run: python -c "import sys; print(sys.version)"
@@ -188,24 +188,9 @@ jobs:
BUCKET: ${{ secrets.B2_BUCKET }}
DAYS_TO_KEEP: ${{ secrets.B2_DAYS_TO_KEEP }}
run: |
echo Authorizing B2 account
b2 authorize-account "$APPID" "$APPKEY"
echo Uploading artifcates to B2
chmod +x ./ci/upload_create_nightly.sh
./ci/upload_create_nightly.sh "$BUCKET" windows-amd64 windows_artifacts/
./ci/upload_create_nightly.sh "$BUCKET" ubuntu-amd64 ubuntu_artifacts/dist.zip
./ci/upload_create_nightly.sh "$BUCKET" macos-amd64 macos_artifacts/dist.zip
./ci/upload_create_nightly.sh "$BUCKET" macos-arm64 macos_arm_artifacts/dist.zip
echo Deleting old artifacts in B2
python3 ci/delete_old_binaries.py "$BUCKET" "$DAYS_TO_KEEP"
echo Creating nightly.json
python3 ci/create_nightly_json.py "$BUCKET" > nightly.json
echo Uploading nightly.json
b2 upload-file "$BUCKET" nightly.json nightly.json
echo Clear B2 account info
b2 clear-account
python3 ci/nightly.py artifact windows-amd64 windows_artifacts/
python3 ci/nightly.py artifact ubuntu-amd64 ubuntu_artifacts/dist.zip
python3 ci/nightly.py artifact macos-amd64 macos_artifacts/dist.zip
python3 ci/nightly.py artifact macos-arm64 macos_arm_artifacts/dist.zip
python3 ci/nightly.py prune
python3 ci/nightly.py json

View File

@@ -126,3 +126,5 @@ clamp :: proc(value, minimum, maximum: T) -> T ---
soa_zip :: proc(slices: ...) -> #soa[]Struct ---
soa_unzip :: proc(value: $S/#soa[]$E) -> (slices: ...) ---
unreachable :: proc() -> ! ---

View File

@@ -295,6 +295,10 @@ simd_rotate_right :: proc(a: #simd[N]T, $offset: int) -> #simd[N]T ---
// if all listed features are supported.
has_target_feature :: proc($test: $T) -> bool where type_is_string(T) || type_is_proc(T) ---
// Returns the value of the procedure where `x` must be a call expression
procedure_of :: proc(x: $T) -> T where type_is_proc(T) ---
// WASM targets only
wasm_memory_grow :: proc(index, delta: uintptr) -> int ---
wasm_memory_size :: proc(index: uintptr) -> int ---

View File

@@ -470,6 +470,15 @@ Raw_Soa_Pointer :: struct {
index: int,
}
Raw_Complex32 :: struct {real, imag: f16}
Raw_Complex64 :: struct {real, imag: f32}
Raw_Complex128 :: struct {real, imag: f64}
Raw_Quaternion64 :: struct {imag, jmag, kmag: f16, real: f16}
Raw_Quaternion128 :: struct {imag, jmag, kmag: f32, real: f32}
Raw_Quaternion256 :: struct {imag, jmag, kmag: f64, real: f64}
Raw_Quaternion64_Vector_Scalar :: struct {vector: [3]f16, scalar: f16}
Raw_Quaternion128_Vector_Scalar :: struct {vector: [3]f32, scalar: f32}
Raw_Quaternion256_Vector_Scalar :: struct {vector: [3]f64, scalar: f64}
/*
@@ -481,7 +490,9 @@ Raw_Soa_Pointer :: struct {
Linux,
Essence,
FreeBSD,
Haiku,
OpenBSD,
NetBSD,
WASI,
JS,
Freestanding,
@@ -508,6 +519,7 @@ Odin_Arch_Type :: type_of(ODIN_ARCH)
Odin_Build_Mode_Type :: enum int {
Executable,
Dynamic,
Static,
Object,
Assembly,
LLVM_IR,
@@ -548,6 +560,19 @@ Odin_Platform_Subtarget_Type :: type_of(ODIN_PLATFORM_SUBTARGET)
*/
Odin_Sanitizer_Flags :: type_of(ODIN_SANITIZER_FLAGS)
/*
// Defined internally by the compiler
Odin_Optimization_Mode :: enum int {
None = -1,
Minimal = 0,
Size = 1,
Speed = 2,
Aggressive = 3,
}
ODIN_OPTIMIZATION_MODE // is a constant
*/
Odin_Optimization_Mode :: type_of(ODIN_OPTIMIZATION_MODE)
/////////////////////////////
// Init Startup Procedures //
@@ -689,7 +714,7 @@ default_assertion_failure_proc :: proc(prefix, message: string, loc: Source_Code
when ODIN_OS == .Freestanding {
// Do nothing
} else {
when !ODIN_DISABLE_ASSERT {
when ODIN_OS != .Orca && !ODIN_DISABLE_ASSERT {
print_caller_location(loc)
print_string(" ")
}
@@ -698,7 +723,18 @@ default_assertion_failure_proc :: proc(prefix, message: string, loc: Source_Code
print_string(": ")
print_string(message)
}
print_byte('\n')
when ODIN_OS == .Orca {
assert_fail(
cstring(raw_data(loc.file_path)),
cstring(raw_data(loc.procedure)),
loc.line,
"",
cstring(raw_data(orca_stderr_buffer[:orca_stderr_buffer_idx])),
)
} else {
print_byte('\n')
}
}
trap()
}

View File

@@ -383,7 +383,7 @@ clear_map :: proc "contextless" (m: ^$T/map[$K]$V) {
//
// Note: Prefer the procedure group `reserve`
@builtin
reserve_map :: proc(m: ^$T/map[$K]$V, capacity: int, loc := #caller_location) -> Allocator_Error {
reserve_map :: proc(m: ^$T/map[$K]$V, #any_int capacity: int, loc := #caller_location) -> Allocator_Error {
return __dynamic_map_reserve((^Raw_Map)(m), map_info(T), uint(capacity), loc) if m != nil else nil
}
@@ -721,12 +721,12 @@ _reserve_dynamic_array :: #force_inline proc(array: ^$T/[dynamic]$E, capacity: i
}
@builtin
reserve_dynamic_array :: proc(array: ^$T/[dynamic]$E, capacity: int, loc := #caller_location) -> Allocator_Error {
reserve_dynamic_array :: proc(array: ^$T/[dynamic]$E, #any_int capacity: int, loc := #caller_location) -> Allocator_Error {
return _reserve_dynamic_array(array, capacity, true, loc)
}
@builtin
non_zero_reserve_dynamic_array :: proc(array: ^$T/[dynamic]$E, capacity: int, loc := #caller_location) -> Allocator_Error {
non_zero_reserve_dynamic_array :: proc(array: ^$T/[dynamic]$E, #any_int capacity: int, loc := #caller_location) -> Allocator_Error {
return _reserve_dynamic_array(array, capacity, false, loc)
}
@@ -773,12 +773,12 @@ _resize_dynamic_array :: #force_inline proc(array: ^$T/[dynamic]$E, length: int,
}
@builtin
resize_dynamic_array :: proc(array: ^$T/[dynamic]$E, length: int, loc := #caller_location) -> Allocator_Error {
resize_dynamic_array :: proc(array: ^$T/[dynamic]$E, #any_int length: int, loc := #caller_location) -> Allocator_Error {
return _resize_dynamic_array(array, length, true, loc=loc)
}
@builtin
non_zero_resize_dynamic_array :: proc(array: ^$T/[dynamic]$E, length: int, loc := #caller_location) -> Allocator_Error {
non_zero_resize_dynamic_array :: proc(array: ^$T/[dynamic]$E, #any_int length: int, loc := #caller_location) -> Allocator_Error {
return _resize_dynamic_array(array, length, false, loc=loc)
}

View File

@@ -6,7 +6,7 @@ when ODIN_DEFAULT_TO_NIL_ALLOCATOR {
} else when ODIN_DEFAULT_TO_PANIC_ALLOCATOR {
default_allocator_proc :: panic_allocator_proc
default_allocator :: panic_allocator
} else when ODIN_ARCH == .wasm32 || ODIN_ARCH == .wasm64p32 {
} else when ODIN_OS != .Orca && (ODIN_ARCH == .wasm32 || ODIN_ARCH == .wasm64p32) {
default_allocator :: default_wasm_allocator
default_allocator_proc :: wasm_allocator_proc
} else {

View File

@@ -157,7 +157,7 @@ __dynamic_map_get // dynamic map calls
__dynamic_map_set // dynamic map calls
## Dynamic literals ([dymamic]T and map[K]V) (can be disabled with -no-dynamic-literals)
## Dynamic literals ([dynamic]T and map[K]V) (can be disabled with -no-dynamic-literals)
__dynamic_array_reserve
__dynamic_array_append

View File

@@ -6,15 +6,29 @@ package runtime
import "base:intrinsics"
when !ODIN_TEST && !ODIN_NO_ENTRY_POINT {
@(link_name="_start", linkage="strong", require, export)
_start :: proc "c" () {
context = default_context()
#force_no_inline _startup_runtime()
intrinsics.__entry_point()
when ODIN_OS == .Orca {
@(linkage="strong", require, export)
oc_on_init :: proc "c" () {
context = default_context()
#force_no_inline _startup_runtime()
intrinsics.__entry_point()
}
@(linkage="strong", require, export)
oc_on_terminate :: proc "c" () {
context = default_context()
#force_no_inline _cleanup_runtime()
}
} else {
@(link_name="_start", linkage="strong", require, export)
_start :: proc "c" () {
context = default_context()
#force_no_inline _startup_runtime()
intrinsics.__entry_point()
}
@(link_name="_end", linkage="strong", require, export)
_end :: proc "c" () {
context = default_context()
#force_no_inline _cleanup_runtime()
}
}
@(link_name="_end", linkage="strong", require, export)
_end :: proc "c" () {
context = default_context()
#force_no_inline _cleanup_runtime()
}
}
}

View File

@@ -4,6 +4,8 @@ package runtime
bounds_trap :: proc "contextless" () -> ! {
when ODIN_OS == .Windows {
windows_trap_array_bounds()
} else when ODIN_OS == .Orca {
abort_ext("", "", 0, "bounds trap")
} else {
trap()
}
@@ -13,6 +15,8 @@ bounds_trap :: proc "contextless" () -> ! {
type_assertion_trap :: proc "contextless" () -> ! {
when ODIN_OS == .Windows {
windows_trap_type_assertion()
} else when ODIN_OS == .Orca {
abort_ext("", "", 0, "type assertion trap")
} else {
trap()
}

View File

@@ -0,0 +1,29 @@
//+build orca
//+private
package runtime
foreign {
@(link_name="malloc") _orca_malloc :: proc "c" (size: int) -> rawptr ---
@(link_name="calloc") _orca_calloc :: proc "c" (num, size: int) -> rawptr ---
@(link_name="free") _orca_free :: proc "c" (ptr: rawptr) ---
@(link_name="realloc") _orca_realloc :: proc "c" (ptr: rawptr, size: int) -> rawptr ---
}
_heap_alloc :: proc(size: int, zero_memory := true) -> rawptr {
if size <= 0 {
return nil
}
if zero_memory {
return _orca_calloc(1, size)
} else {
return _orca_malloc(size)
}
}
_heap_resize :: proc(ptr: rawptr, new_size: int) -> rawptr {
return _orca_realloc(ptr, new_size)
}
_heap_free :: proc(ptr: rawptr) {
_orca_free(ptr)
}

View File

@@ -12,4 +12,4 @@ _heap_resize :: proc(ptr: rawptr, new_size: int) -> rawptr {
_heap_free :: proc(ptr: rawptr) {
unimplemented("base:runtime 'heap_free' procedure is not supported on this platform")
}
}

View File

@@ -483,7 +483,7 @@ quaternion256_ne :: #force_inline proc "contextless" (a, b: quaternion256) -> bo
string_decode_rune :: #force_inline proc "contextless" (s: string) -> (rune, int) {
// NOTE(bill): Duplicated here to remove dependency on package unicode/utf8
@static accept_sizes := [256]u8{
@(static, rodata) accept_sizes := [256]u8{
0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, // 0x00-0x0f
0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, // 0x10-0x1f
0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, // 0x20-0x2f
@@ -504,7 +504,7 @@ string_decode_rune :: #force_inline proc "contextless" (s: string) -> (rune, int
}
Accept_Range :: struct {lo, hi: u8}
@static accept_ranges := [5]Accept_Range{
@(static, rodata) accept_ranges := [5]Accept_Range{
{0x80, 0xbf},
{0xa0, 0xbf},
{0x80, 0x9f},

View File

@@ -0,0 +1,43 @@
//+build orca
//+private
package runtime
import "base:intrinsics"
// Constants allowing to specify the level of logging verbosity.
log_level :: enum u32 {
// Only errors are logged.
ERROR = 0,
// Only warnings and errors are logged.
WARNING = 1,
// All messages are logged.
INFO = 2,
COUNT = 3,
}
@(default_calling_convention="c", link_prefix="oc_")
foreign {
abort_ext :: proc(file: cstring, function: cstring, line: i32, fmt: cstring, #c_vararg args: ..any) -> ! ---
assert_fail :: proc(file: cstring, function: cstring, line: i32, src: cstring, fmt: cstring, #c_vararg args: ..any) -> ! ---
log_ext :: proc(level: log_level, function: cstring, file: cstring, line: i32, fmt: cstring, #c_vararg args: ..any) ---
}
// NOTE: This is all pretty gross, don't look.
// WASM is single threaded so this should be fine.
orca_stderr_buffer: [4096]byte
orca_stderr_buffer_idx: int
_stderr_write :: proc "contextless" (data: []byte) -> (int, _OS_Errno) {
for b in data {
orca_stderr_buffer[orca_stderr_buffer_idx] = b
orca_stderr_buffer_idx += 1
if b == '\n' || orca_stderr_buffer_idx == len(orca_stderr_buffer)-1 {
log_ext(.ERROR, "", "", 0, cstring(raw_data(orca_stderr_buffer[:orca_stderr_buffer_idx])))
orca_stderr_buffer_idx = 0
}
}
return len(data), 0
}

View File

@@ -25,13 +25,19 @@ when ODIN_NO_CRT && ODIN_OS == .Windows {
RtlMoveMemory(dst, src, len)
return dst
}
} else when ODIN_NO_CRT || (ODIN_ARCH == .wasm32 || ODIN_ARCH == .wasm64p32) {
} else when ODIN_NO_CRT || (ODIN_OS != .Orca && (ODIN_ARCH == .wasm32 || ODIN_ARCH == .wasm64p32)) {
// NOTE: on wasm, calls to these procs are generated (by LLVM) with type `i32` instead of `int`.
//
// NOTE: `#any_int` is also needed, because calls that we generate (and package code)
// will be using `int` and need to be converted.
int_t :: i32 when ODIN_ARCH == .wasm64p32 else int
@(link_name="memset", linkage="strong", require)
memset :: proc "c" (ptr: rawptr, val: i32, len: int) -> rawptr {
memset :: proc "c" (ptr: rawptr, val: i32, #any_int len: int_t) -> rawptr {
if ptr != nil && len != 0 {
b := byte(val)
p := ([^]byte)(ptr)
for i := 0; i < len; i += 1 {
for i := int_t(0); i < len; i += 1 {
p[i] = b
}
}
@@ -39,10 +45,10 @@ when ODIN_NO_CRT && ODIN_OS == .Windows {
}
@(link_name="bzero", linkage="strong", require)
bzero :: proc "c" (ptr: rawptr, len: int) -> rawptr {
bzero :: proc "c" (ptr: rawptr, #any_int len: int_t) -> rawptr {
if ptr != nil && len != 0 {
p := ([^]byte)(ptr)
for i := 0; i < len; i += 1 {
for i := int_t(0); i < len; i += 1 {
p[i] = 0
}
}
@@ -50,7 +56,7 @@ when ODIN_NO_CRT && ODIN_OS == .Windows {
}
@(link_name="memmove", linkage="strong", require)
memmove :: proc "c" (dst, src: rawptr, len: int) -> rawptr {
memmove :: proc "c" (dst, src: rawptr, #any_int len: int_t) -> rawptr {
d, s := ([^]byte)(dst), ([^]byte)(src)
if d == s || len == 0 {
return dst
@@ -63,7 +69,7 @@ when ODIN_NO_CRT && ODIN_OS == .Windows {
}
if s > d && uintptr(s)-uintptr(d) < uintptr(len) {
for i := 0; i < len; i += 1 {
for i := int_t(0); i < len; i += 1 {
d[i] = s[i]
}
return dst
@@ -71,10 +77,10 @@ when ODIN_NO_CRT && ODIN_OS == .Windows {
return memcpy(dst, src, len)
}
@(link_name="memcpy", linkage="strong", require)
memcpy :: proc "c" (dst, src: rawptr, len: int) -> rawptr {
memcpy :: proc "c" (dst, src: rawptr, #any_int len: int_t) -> rawptr {
d, s := ([^]byte)(dst), ([^]byte)(src)
if d != s {
for i := 0; i < len; i += 1 {
for i := int_t(0); i < len; i += 1 {
d[i] = s[i]
}
}
@@ -92,4 +98,4 @@ when ODIN_NO_CRT && ODIN_OS == .Windows {
}
return ptr
}
}
}

View File

@@ -7,20 +7,20 @@ import "base:intrinsics"
Port of emmalloc, modified for use in Odin.
Invariants:
- Per-allocation header overhead is 8 bytes, smallest allocated payload
amount is 8 bytes, and a multiple of 4 bytes.
- Acquired memory blocks are subdivided into disjoint regions that lie
next to each other.
- A region is either in used or free.
Used regions may be adjacent, and a used and unused region
may be adjacent, but not two unused ones - they would be
merged.
- Memory allocation takes constant time, unless the alloc needs to wasm_memory_grow()
or memory is very close to being exhausted.
- Free and used regions are managed inside "root regions", which are slabs
of memory acquired via wasm_memory_grow().
- Memory retrieved using wasm_memory_grow() can not be given back to the OS.
Therefore, frees are internal to the allocator.
- Per-allocation header overhead is 8 bytes, smallest allocated payload
amount is 8 bytes, and a multiple of 4 bytes.
- Acquired memory blocks are subdivided into disjoint regions that lie
next to each other.
- A region is either in used or free.
Used regions may be adjacent, and a used and unused region
may be adjacent, but not two unused ones - they would be
merged.
- Memory allocation takes constant time, unless the alloc needs to wasm_memory_grow()
or memory is very close to being exhausted.
- Free and used regions are managed inside "root regions", which are slabs
of memory acquired via wasm_memory_grow().
- Memory retrieved using wasm_memory_grow() can not be given back to the OS.
Therefore, frees are internal to the allocator.
Copyright (c) 2010-2014 Emscripten authors, see AUTHORS file.

View File

@@ -71,8 +71,8 @@ Darwin)
fi
darwin_sysroot=
if [ $(which xcode-select) ]; then
darwin_sysroot="--sysroot $(xcode-select -p)/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk"
if [ $(which xcrun) ]; then
darwin_sysroot="--sysroot $(xcrun --sdk macosx --show-sdk-path)"
elif [[ -e "/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk" ]]; then
darwin_sysroot="--sysroot /Library/Developer/CommandLineTools/SDKs/MacOSX.sdk"
else

View File

@@ -1,51 +0,0 @@
import subprocess
import sys
import json
import datetime
import urllib.parse
import sys
def main():
files_by_date = {}
bucket = sys.argv[1]
files_lines = execute_cli(f"b2 ls --long {bucket} nightly").split("\n")
for x in files_lines:
parts = x.split(" ", 1)
if parts[0]:
json_str = execute_cli(f"b2 get-file-info {parts[0]}")
data = json.loads(json_str)
name = remove_prefix(data['fileName'], "nightly/")
url = f"https://f001.backblazeb2.com/file/{bucket}/nightly/{urllib.parse.quote_plus(name)}"
sha1 = data['contentSha1']
size = int(data['size'])
ts = int(data['fileInfo']['src_last_modified_millis'])
date = datetime.datetime.fromtimestamp(ts/1000).strftime('%Y-%m-%d')
if date not in files_by_date.keys():
files_by_date[date] = []
files_by_date[date].append({
'name': name,
'url': url,
'sha1': sha1,
'sizeInBytes': size,
})
now = datetime.datetime.utcnow().isoformat()
print(json.dumps({
'last_updated' : now,
'files': files_by_date
}, sort_keys=True, indent=4))
def remove_prefix(text, prefix):
return text[text.startswith(prefix) and len(prefix):]
def execute_cli(command):
sb = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
return sb.stdout.read().decode("utf-8");
if __name__ == '__main__':
sys.exit(main())

View File

@@ -1,34 +0,0 @@
import subprocess
import sys
import json
import datetime
import urllib.parse
import sys
def main():
files_by_date = {}
bucket = sys.argv[1]
days_to_keep = int(sys.argv[2])
print(f"Looking for binaries to delete older than {days_to_keep} days")
files_lines = execute_cli(f"b2 ls --long --versions {bucket} nightly").split("\n")
for x in files_lines:
parts = [y for y in x.split(' ') if y]
if parts and parts[0]:
date = datetime.datetime.strptime(parts[2], '%Y-%m-%d').replace(hour=0, minute=0, second=0, microsecond=0)
now = datetime.datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0)
delta = now - date
if delta.days > days_to_keep:
print(f'Deleting {parts[5]}')
execute_cli(f'b2 delete-file-version {parts[0]}')
def execute_cli(command):
sb = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
return sb.stdout.read().decode("utf-8");
if __name__ == '__main__':
sys.exit(main())

140
ci/nightly.py Normal file
View File

@@ -0,0 +1,140 @@
import os
import sys
from zipfile import ZipFile, ZIP_DEFLATED
from b2sdk.v2 import InMemoryAccountInfo, B2Api
from datetime import datetime
import json
UPLOAD_FOLDER = "nightly/"
info = InMemoryAccountInfo()
b2_api = B2Api(info)
application_key_id = os.environ['APPID']
application_key = os.environ['APPKEY']
bucket_name = os.environ['BUCKET']
days_to_keep = os.environ['DAYS_TO_KEEP']
def auth() -> bool:
try:
realm = b2_api.account_info.get_realm()
return True # Already authenticated
except:
pass # Not yet authenticated
err = b2_api.authorize_account("production", application_key_id, application_key)
return err == None
def get_bucket():
if not auth(): sys.exit(1)
return b2_api.get_bucket_by_name(bucket_name)
def remove_prefix(text: str, prefix: str) -> str:
return text[text.startswith(prefix) and len(prefix):]
def create_and_upload_artifact_zip(platform: str, artifact: str) -> int:
now = datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0)
destination_zip_name = "odin-{}-nightly+{}.zip".format(platform, now.strftime("%Y-%m-%d"))
source_zip_name = artifact
if not artifact.endswith(".zip"):
print(f"Creating archive {destination_zip_name} from {artifact} and uploading to {bucket_name}")
source_zip_name = destination_zip_name
with ZipFile(source_zip_name, mode='w', compression=ZIP_DEFLATED, compresslevel=9) as z:
for root, directory, filenames in os.walk(artifact):
for file in filenames:
file_path = os.path.join(root, file)
zip_path = os.path.join("dist", os.path.relpath(file_path, artifact))
z.write(file_path, zip_path)
if not os.path.exists(source_zip_name):
print(f"Error: Newly created ZIP archive {source_zip_name} not found.")
return 1
print("Uploading {} to {}".format(source_zip_name, UPLOAD_FOLDER + destination_zip_name))
bucket = get_bucket()
res = bucket.upload_local_file(
source_zip_name, # Local file to upload
"nightly/" + destination_zip_name, # B2 destination path
)
return 0
def prune_artifacts():
print(f"Looking for binaries to delete older than {days_to_keep} days")
bucket = get_bucket()
for file, _ in bucket.ls(UPLOAD_FOLDER, latest_only=False):
# Timestamp is in milliseconds
date = datetime.fromtimestamp(file.upload_timestamp / 1_000.0).replace(hour=0, minute=0, second=0, microsecond=0)
now = datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0)
delta = now - date
if delta.days > int(days_to_keep):
print("Deleting {}".format(file.file_name))
file.delete()
return 0
def update_nightly_json():
print(f"Updating nightly.json with files {days_to_keep} days or newer")
files_by_date = {}
bucket = get_bucket()
for file, _ in bucket.ls(UPLOAD_FOLDER, latest_only=True):
# Timestamp is in milliseconds
date = datetime.fromtimestamp(file.upload_timestamp / 1_000.0).replace(hour=0, minute=0, second=0, microsecond=0).strftime('%Y-%m-%d')
name = remove_prefix(file.file_name, UPLOAD_FOLDER)
sha1 = file.content_sha1
size = file.size
url = bucket.get_download_url(file.file_name)
if date not in files_by_date.keys():
files_by_date[date] = []
files_by_date[date].append({
'name': name,
'url': url,
'sha1': sha1,
'sizeInBytes': size,
})
now = datetime.utcnow().isoformat()
nightly = json.dumps({
'last_updated' : now,
'files': files_by_date
}, sort_keys=True, indent=4, ensure_ascii=False).encode('utf-8')
res = bucket.upload_bytes(
nightly, # JSON bytes
"nightly.json", # B2 destination path
)
return 0
if __name__ == "__main__":
if len(sys.argv) == 1:
print("Usage: {} <verb> [arguments]".format(sys.argv[0]))
print("\tartifact <platform prefix> <artifact path>\n\t\tCreates and uploads a platform artifact zip.")
print("\tprune\n\t\tDeletes old artifacts from bucket")
print("\tjson\n\t\tUpdate and upload nightly.json")
sys.exit(1)
else:
command = sys.argv[1].lower()
if command == "artifact":
if len(sys.argv) != 4:
print("Usage: {} artifact <platform prefix> <artifact path>".format(sys.argv[0]))
print("Error: Expected artifact command to be given platform prefix and artifact path.\n")
sys.exit(1)
res = create_and_upload_artifact_zip(sys.argv[2], sys.argv[3])
sys.exit(res)
elif command == "prune":
res = prune_artifacts()
sys.exit(res)
elif command == "json":
res = update_nightly_json()
sys.exit(res)

View File

@@ -1,25 +0,0 @@
#!/bin/bash
set -e
bucket=$1
platform=$2
artifact=$3
now=$(date +'%Y-%m-%d')
filename="odin-$platform-nightly+$now.zip"
echo "Creating archive $filename from $artifact and uploading to $bucket"
# If this is already zipped up (done before artifact upload to keep permissions in tact), just move it.
if [ "${artifact: -4}" == ".zip" ]
then
echo "Artifact already a zip"
mkdir -p "output"
mv "$artifact" "output/$filename"
else
echo "Artifact needs to be zipped"
7z a -bd "output/$filename" -r "$artifact"
fi
b2 upload-file --noProgress "$bucket" "output/$filename" "nightly/$filename"

View File

@@ -29,12 +29,12 @@ MIN_READ_BUFFER_SIZE :: 16
@(private)
DEFAULT_MAX_CONSECUTIVE_EMPTY_READS :: 128
reader_init :: proc(b: ^Reader, rd: io.Reader, size: int = DEFAULT_BUF_SIZE, allocator := context.allocator) {
reader_init :: proc(b: ^Reader, rd: io.Reader, size: int = DEFAULT_BUF_SIZE, allocator := context.allocator, loc := #caller_location) {
size := size
size = max(size, MIN_READ_BUFFER_SIZE)
reader_reset(b, rd)
b.buf_allocator = allocator
b.buf = make([]byte, size, allocator)
b.buf = make([]byte, size, allocator, loc)
}
reader_init_with_buf :: proc(b: ^Reader, rd: io.Reader, buf: []byte) {

View File

@@ -27,19 +27,19 @@ Read_Op :: enum i8 {
}
buffer_init :: proc(b: ^Buffer, buf: []byte) {
resize(&b.buf, len(buf))
buffer_init :: proc(b: ^Buffer, buf: []byte, loc := #caller_location) {
resize(&b.buf, len(buf), loc=loc)
copy(b.buf[:], buf)
}
buffer_init_string :: proc(b: ^Buffer, s: string) {
resize(&b.buf, len(s))
buffer_init_string :: proc(b: ^Buffer, s: string, loc := #caller_location) {
resize(&b.buf, len(s), loc=loc)
copy(b.buf[:], s)
}
buffer_init_allocator :: proc(b: ^Buffer, len, cap: int, allocator := context.allocator) {
buffer_init_allocator :: proc(b: ^Buffer, len, cap: int, allocator := context.allocator, loc := #caller_location) {
if b.buf == nil {
b.buf = make([dynamic]byte, len, cap, allocator)
b.buf = make([dynamic]byte, len, cap, allocator, loc)
return
}
@@ -96,28 +96,28 @@ buffer_truncate :: proc(b: ^Buffer, n: int) {
}
@(private)
_buffer_try_grow :: proc(b: ^Buffer, n: int) -> (int, bool) {
_buffer_try_grow :: proc(b: ^Buffer, n: int, loc := #caller_location) -> (int, bool) {
if l := len(b.buf); n <= cap(b.buf)-l {
resize(&b.buf, l+n)
resize(&b.buf, l+n, loc=loc)
return l, true
}
return 0, false
}
@(private)
_buffer_grow :: proc(b: ^Buffer, n: int) -> int {
_buffer_grow :: proc(b: ^Buffer, n: int, loc := #caller_location) -> int {
m := buffer_length(b)
if m == 0 && b.off != 0 {
buffer_reset(b)
}
if i, ok := _buffer_try_grow(b, n); ok {
if i, ok := _buffer_try_grow(b, n, loc=loc); ok {
return i
}
if b.buf == nil && n <= SMALL_BUFFER_SIZE {
// Fixes #2756 by preserving allocator if already set on Buffer via init_buffer_allocator
reserve(&b.buf, SMALL_BUFFER_SIZE)
resize(&b.buf, n)
reserve(&b.buf, SMALL_BUFFER_SIZE, loc=loc)
resize(&b.buf, n, loc=loc)
return 0
}
@@ -127,31 +127,31 @@ _buffer_grow :: proc(b: ^Buffer, n: int) -> int {
} else if c > max(int) - c - n {
panic("bytes.Buffer: too large")
} else {
resize(&b.buf, 2*c + n)
resize(&b.buf, 2*c + n, loc=loc)
copy(b.buf[:], b.buf[b.off:])
}
b.off = 0
resize(&b.buf, m+n)
resize(&b.buf, m+n, loc=loc)
return m
}
buffer_grow :: proc(b: ^Buffer, n: int) {
buffer_grow :: proc(b: ^Buffer, n: int, loc := #caller_location) {
if n < 0 {
panic("bytes.buffer_grow: negative count")
}
m := _buffer_grow(b, n)
resize(&b.buf, m)
m := _buffer_grow(b, n, loc=loc)
resize(&b.buf, m, loc=loc)
}
buffer_write_at :: proc(b: ^Buffer, p: []byte, offset: int) -> (n: int, err: io.Error) {
buffer_write_at :: proc(b: ^Buffer, p: []byte, offset: int, loc := #caller_location) -> (n: int, err: io.Error) {
b.last_read = .Invalid
if offset < 0 {
err = .Invalid_Offset
return
}
_, ok := _buffer_try_grow(b, offset+len(p))
_, ok := _buffer_try_grow(b, offset+len(p), loc=loc)
if !ok {
_ = _buffer_grow(b, offset+len(p))
_ = _buffer_grow(b, offset+len(p), loc=loc)
}
if len(b.buf) <= offset {
return 0, .Short_Write
@@ -160,47 +160,47 @@ buffer_write_at :: proc(b: ^Buffer, p: []byte, offset: int) -> (n: int, err: io.
}
buffer_write :: proc(b: ^Buffer, p: []byte) -> (n: int, err: io.Error) {
buffer_write :: proc(b: ^Buffer, p: []byte, loc := #caller_location) -> (n: int, err: io.Error) {
b.last_read = .Invalid
m, ok := _buffer_try_grow(b, len(p))
m, ok := _buffer_try_grow(b, len(p), loc=loc)
if !ok {
m = _buffer_grow(b, len(p))
m = _buffer_grow(b, len(p), loc=loc)
}
return copy(b.buf[m:], p), nil
}
buffer_write_ptr :: proc(b: ^Buffer, ptr: rawptr, size: int) -> (n: int, err: io.Error) {
return buffer_write(b, ([^]byte)(ptr)[:size])
buffer_write_ptr :: proc(b: ^Buffer, ptr: rawptr, size: int, loc := #caller_location) -> (n: int, err: io.Error) {
return buffer_write(b, ([^]byte)(ptr)[:size], loc=loc)
}
buffer_write_string :: proc(b: ^Buffer, s: string) -> (n: int, err: io.Error) {
buffer_write_string :: proc(b: ^Buffer, s: string, loc := #caller_location) -> (n: int, err: io.Error) {
b.last_read = .Invalid
m, ok := _buffer_try_grow(b, len(s))
m, ok := _buffer_try_grow(b, len(s), loc=loc)
if !ok {
m = _buffer_grow(b, len(s))
m = _buffer_grow(b, len(s), loc=loc)
}
return copy(b.buf[m:], s), nil
}
buffer_write_byte :: proc(b: ^Buffer, c: byte) -> io.Error {
buffer_write_byte :: proc(b: ^Buffer, c: byte, loc := #caller_location) -> io.Error {
b.last_read = .Invalid
m, ok := _buffer_try_grow(b, 1)
m, ok := _buffer_try_grow(b, 1, loc=loc)
if !ok {
m = _buffer_grow(b, 1)
m = _buffer_grow(b, 1, loc=loc)
}
b.buf[m] = c
return nil
}
buffer_write_rune :: proc(b: ^Buffer, r: rune) -> (n: int, err: io.Error) {
buffer_write_rune :: proc(b: ^Buffer, r: rune, loc := #caller_location) -> (n: int, err: io.Error) {
if r < utf8.RUNE_SELF {
buffer_write_byte(b, byte(r))
buffer_write_byte(b, byte(r), loc=loc)
return 1, nil
}
b.last_read = .Invalid
m, ok := _buffer_try_grow(b, utf8.UTF_MAX)
m, ok := _buffer_try_grow(b, utf8.UTF_MAX, loc=loc)
if !ok {
m = _buffer_grow(b, utf8.UTF_MAX)
m = _buffer_grow(b, utf8.UTF_MAX, loc=loc)
}
res: [4]byte
res, n = utf8.encode_rune(r)

View File

@@ -34,7 +34,7 @@ when ODIN_OS == .Windows {
SIGTERM :: 15
}
when ODIN_OS == .Linux || ODIN_OS == .FreeBSD {
when ODIN_OS == .Linux || ODIN_OS == .FreeBSD || ODIN_OS == .Haiku || ODIN_OS == .OpenBSD || ODIN_OS == .NetBSD {
SIG_ERR :: rawptr(~uintptr(0))
SIG_DFL :: rawptr(uintptr(0))
SIG_IGN :: rawptr(uintptr(1))

View File

@@ -102,10 +102,12 @@ when ODIN_OS == .OpenBSD || ODIN_OS == .NetBSD {
SEEK_END :: 2
foreign libc {
stderr: ^FILE
stdin: ^FILE
stdout: ^FILE
__sF: [3]FILE
}
stdin: ^FILE = &__sF[0]
stdout: ^FILE = &__sF[1]
stderr: ^FILE = &__sF[2]
}
when ODIN_OS == .FreeBSD {
@@ -127,9 +129,9 @@ when ODIN_OS == .FreeBSD {
SEEK_END :: 2
foreign libc {
stderr: ^FILE
stdin: ^FILE
stdout: ^FILE
@(link_name="__stderrp") stderr: ^FILE
@(link_name="__stdinp") stdin: ^FILE
@(link_name="__stdoutp") stdout: ^FILE
}
}

28
core/crypto/_aes/aes.odin Normal file
View File

@@ -0,0 +1,28 @@
package _aes
// KEY_SIZE_128 is the AES-128 key size in bytes.
KEY_SIZE_128 :: 16
// KEY_SIZE_192 is the AES-192 key size in bytes.
KEY_SIZE_192 :: 24
// KEY_SIZE_256 is the AES-256 key size in bytes.
KEY_SIZE_256 :: 32
// BLOCK_SIZE is the AES block size in bytes.
BLOCK_SIZE :: 16
// ROUNDS_128 is the number of rounds for AES-128.
ROUNDS_128 :: 10
// ROUNDS_192 is the number of rounds for AES-192.
ROUNDS_192 :: 12
// ROUNDS_256 is the number of rounds for AES-256.
ROUNDS_256 :: 14
// GHASH_KEY_SIZE is the GHASH key size in bytes.
GHASH_KEY_SIZE :: 16
// GHASH_BLOCK_SIZE is the GHASH block size in bytes.
GHASH_BLOCK_SIZE :: 16
// GHASH_TAG_SIZE is the GHASH tag size in bytes.
GHASH_TAG_SIZE :: 16
// RCON is the AES keyschedule round constants.
RCON := [10]byte{0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1B, 0x36}

View File

@@ -0,0 +1,96 @@
package aes_ct64
import "base:intrinsics"
import "core:mem"
STRIDE :: 4
// Context is a keyed AES (ECB) instance.
Context :: struct {
_sk_exp: [120]u64,
_num_rounds: int,
_is_initialized: bool,
}
// init initializes a context for AES with the provided key.
init :: proc(ctx: ^Context, key: []byte) {
skey: [30]u64 = ---
ctx._num_rounds = keysched(skey[:], key)
skey_expand(ctx._sk_exp[:], skey[:], ctx._num_rounds)
ctx._is_initialized = true
}
// encrypt_block sets `dst` to `AES-ECB-Encrypt(src)`.
encrypt_block :: proc(ctx: ^Context, dst, src: []byte) {
assert(ctx._is_initialized)
q: [8]u64
load_blockx1(&q, src)
_encrypt(&q, ctx._sk_exp[:], ctx._num_rounds)
store_blockx1(dst, &q)
}
// encrypt_block sets `dst` to `AES-ECB-Decrypt(src)`.
decrypt_block :: proc(ctx: ^Context, dst, src: []byte) {
assert(ctx._is_initialized)
q: [8]u64
load_blockx1(&q, src)
_decrypt(&q, ctx._sk_exp[:], ctx._num_rounds)
store_blockx1(dst, &q)
}
// encrypt_blocks sets `dst` to `AES-ECB-Encrypt(src[0], .. src[n])`.
encrypt_blocks :: proc(ctx: ^Context, dst, src: [][]byte) {
assert(ctx._is_initialized)
q: [8]u64 = ---
src, dst := src, dst
n := len(src)
for n > 4 {
load_blocks(&q, src[0:4])
_encrypt(&q, ctx._sk_exp[:], ctx._num_rounds)
store_blocks(dst[0:4], &q)
src = src[4:]
dst = dst[4:]
n -= 4
}
if n > 0 {
load_blocks(&q, src)
_encrypt(&q, ctx._sk_exp[:], ctx._num_rounds)
store_blocks(dst, &q)
}
}
// decrypt_blocks sets dst to `AES-ECB-Decrypt(src[0], .. src[n])`.
decrypt_blocks :: proc(ctx: ^Context, dst, src: [][]byte) {
assert(ctx._is_initialized)
q: [8]u64 = ---
src, dst := src, dst
n := len(src)
for n > 4 {
load_blocks(&q, src[0:4])
_decrypt(&q, ctx._sk_exp[:], ctx._num_rounds)
store_blocks(dst[0:4], &q)
src = src[4:]
dst = dst[4:]
n -= 4
}
if n > 0 {
load_blocks(&q, src)
_decrypt(&q, ctx._sk_exp[:], ctx._num_rounds)
store_blocks(dst, &q)
}
}
// reset sanitizes the Context. The Context must be re-initialized to
// be used again.
reset :: proc(ctx: ^Context) {
mem.zero_explicit(ctx, size_of(ctx))
}

View File

@@ -0,0 +1,265 @@
// Copyright (c) 2016 Thomas Pornin <pornin@bolet.org>
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// THIS SOFTWARE IS PROVIDED BY THE AUTHORS “AS IS” AND ANY EXPRESS OR
// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
// GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package aes_ct64
import "base:intrinsics"
// Bitsliced AES for 64-bit general purpose (integer) registers. Each
// invocation will process up to 4 blocks at a time. This implementation
// is derived from the BearSSL ct64 code, and distributed under a 1-clause
// BSD license with permission from the original author.
//
// WARNING: "hic sunt dracones"
//
// This package also deliberately exposes enough internals to be able to
// function as a replacement for `AESENC` and `AESDEC` from AES-NI, to
// allow the implementation of non-AES primitives that use the AES round
// function such as AEGIS and Deoxys-II. This should ONLY be done when
// implementing something other than AES itself.
sub_bytes :: proc "contextless" (q: ^[8]u64) {
// This S-box implementation is a straightforward translation of
// the circuit described by Boyar and Peralta in "A new
// combinational logic minimization technique with applications
// to cryptology" (https://eprint.iacr.org/2009/191.pdf).
//
// Note that variables x* (input) and s* (output) are numbered
// in "reverse" order (x0 is the high bit, x7 is the low bit).
x0 := q[7]
x1 := q[6]
x2 := q[5]
x3 := q[4]
x4 := q[3]
x5 := q[2]
x6 := q[1]
x7 := q[0]
// Top linear transformation.
y14 := x3 ~ x5
y13 := x0 ~ x6
y9 := x0 ~ x3
y8 := x0 ~ x5
t0 := x1 ~ x2
y1 := t0 ~ x7
y4 := y1 ~ x3
y12 := y13 ~ y14
y2 := y1 ~ x0
y5 := y1 ~ x6
y3 := y5 ~ y8
t1 := x4 ~ y12
y15 := t1 ~ x5
y20 := t1 ~ x1
y6 := y15 ~ x7
y10 := y15 ~ t0
y11 := y20 ~ y9
y7 := x7 ~ y11
y17 := y10 ~ y11
y19 := y10 ~ y8
y16 := t0 ~ y11
y21 := y13 ~ y16
y18 := x0 ~ y16
// Non-linear section.
t2 := y12 & y15
t3 := y3 & y6
t4 := t3 ~ t2
t5 := y4 & x7
t6 := t5 ~ t2
t7 := y13 & y16
t8 := y5 & y1
t9 := t8 ~ t7
t10 := y2 & y7
t11 := t10 ~ t7
t12 := y9 & y11
t13 := y14 & y17
t14 := t13 ~ t12
t15 := y8 & y10
t16 := t15 ~ t12
t17 := t4 ~ t14
t18 := t6 ~ t16
t19 := t9 ~ t14
t20 := t11 ~ t16
t21 := t17 ~ y20
t22 := t18 ~ y19
t23 := t19 ~ y21
t24 := t20 ~ y18
t25 := t21 ~ t22
t26 := t21 & t23
t27 := t24 ~ t26
t28 := t25 & t27
t29 := t28 ~ t22
t30 := t23 ~ t24
t31 := t22 ~ t26
t32 := t31 & t30
t33 := t32 ~ t24
t34 := t23 ~ t33
t35 := t27 ~ t33
t36 := t24 & t35
t37 := t36 ~ t34
t38 := t27 ~ t36
t39 := t29 & t38
t40 := t25 ~ t39
t41 := t40 ~ t37
t42 := t29 ~ t33
t43 := t29 ~ t40
t44 := t33 ~ t37
t45 := t42 ~ t41
z0 := t44 & y15
z1 := t37 & y6
z2 := t33 & x7
z3 := t43 & y16
z4 := t40 & y1
z5 := t29 & y7
z6 := t42 & y11
z7 := t45 & y17
z8 := t41 & y10
z9 := t44 & y12
z10 := t37 & y3
z11 := t33 & y4
z12 := t43 & y13
z13 := t40 & y5
z14 := t29 & y2
z15 := t42 & y9
z16 := t45 & y14
z17 := t41 & y8
// Bottom linear transformation.
t46 := z15 ~ z16
t47 := z10 ~ z11
t48 := z5 ~ z13
t49 := z9 ~ z10
t50 := z2 ~ z12
t51 := z2 ~ z5
t52 := z7 ~ z8
t53 := z0 ~ z3
t54 := z6 ~ z7
t55 := z16 ~ z17
t56 := z12 ~ t48
t57 := t50 ~ t53
t58 := z4 ~ t46
t59 := z3 ~ t54
t60 := t46 ~ t57
t61 := z14 ~ t57
t62 := t52 ~ t58
t63 := t49 ~ t58
t64 := z4 ~ t59
t65 := t61 ~ t62
t66 := z1 ~ t63
s0 := t59 ~ t63
s6 := t56 ~ ~t62
s7 := t48 ~ ~t60
t67 := t64 ~ t65
s3 := t53 ~ t66
s4 := t51 ~ t66
s5 := t47 ~ t65
s1 := t64 ~ ~s3
s2 := t55 ~ ~t67
q[7] = s0
q[6] = s1
q[5] = s2
q[4] = s3
q[3] = s4
q[2] = s5
q[1] = s6
q[0] = s7
}
orthogonalize :: proc "contextless" (q: ^[8]u64) {
CL2 :: 0x5555555555555555
CH2 :: 0xAAAAAAAAAAAAAAAA
q[0], q[1] = (q[0] & CL2) | ((q[1] & CL2) << 1), ((q[0] & CH2) >> 1) | (q[1] & CH2)
q[2], q[3] = (q[2] & CL2) | ((q[3] & CL2) << 1), ((q[2] & CH2) >> 1) | (q[3] & CH2)
q[4], q[5] = (q[4] & CL2) | ((q[5] & CL2) << 1), ((q[4] & CH2) >> 1) | (q[5] & CH2)
q[6], q[7] = (q[6] & CL2) | ((q[7] & CL2) << 1), ((q[6] & CH2) >> 1) | (q[7] & CH2)
CL4 :: 0x3333333333333333
CH4 :: 0xCCCCCCCCCCCCCCCC
q[0], q[2] = (q[0] & CL4) | ((q[2] & CL4) << 2), ((q[0] & CH4) >> 2) | (q[2] & CH4)
q[1], q[3] = (q[1] & CL4) | ((q[3] & CL4) << 2), ((q[1] & CH4) >> 2) | (q[3] & CH4)
q[4], q[6] = (q[4] & CL4) | ((q[6] & CL4) << 2), ((q[4] & CH4) >> 2) | (q[6] & CH4)
q[5], q[7] = (q[5] & CL4) | ((q[7] & CL4) << 2), ((q[5] & CH4) >> 2) | (q[7] & CH4)
CL8 :: 0x0F0F0F0F0F0F0F0F
CH8 :: 0xF0F0F0F0F0F0F0F0
q[0], q[4] = (q[0] & CL8) | ((q[4] & CL8) << 4), ((q[0] & CH8) >> 4) | (q[4] & CH8)
q[1], q[5] = (q[1] & CL8) | ((q[5] & CL8) << 4), ((q[1] & CH8) >> 4) | (q[5] & CH8)
q[2], q[6] = (q[2] & CL8) | ((q[6] & CL8) << 4), ((q[2] & CH8) >> 4) | (q[6] & CH8)
q[3], q[7] = (q[3] & CL8) | ((q[7] & CL8) << 4), ((q[3] & CH8) >> 4) | (q[7] & CH8)
}
@(require_results)
interleave_in :: proc "contextless" (w: []u32) -> (q0, q1: u64) #no_bounds_check {
if len(w) < 4 {
intrinsics.trap()
}
x0, x1, x2, x3 := u64(w[0]), u64(w[1]), u64(w[2]), u64(w[3])
x0 |= (x0 << 16)
x1 |= (x1 << 16)
x2 |= (x2 << 16)
x3 |= (x3 << 16)
x0 &= 0x0000FFFF0000FFFF
x1 &= 0x0000FFFF0000FFFF
x2 &= 0x0000FFFF0000FFFF
x3 &= 0x0000FFFF0000FFFF
x0 |= (x0 << 8)
x1 |= (x1 << 8)
x2 |= (x2 << 8)
x3 |= (x3 << 8)
x0 &= 0x00FF00FF00FF00FF
x1 &= 0x00FF00FF00FF00FF
x2 &= 0x00FF00FF00FF00FF
x3 &= 0x00FF00FF00FF00FF
q0 = x0 | (x2 << 8)
q1 = x1 | (x3 << 8)
return
}
@(require_results)
interleave_out :: proc "contextless" (q0, q1: u64) -> (w0, w1, w2, w3: u32) {
x0 := q0 & 0x00FF00FF00FF00FF
x1 := q1 & 0x00FF00FF00FF00FF
x2 := (q0 >> 8) & 0x00FF00FF00FF00FF
x3 := (q1 >> 8) & 0x00FF00FF00FF00FF
x0 |= (x0 >> 8)
x1 |= (x1 >> 8)
x2 |= (x2 >> 8)
x3 |= (x3 >> 8)
x0 &= 0x0000FFFF0000FFFF
x1 &= 0x0000FFFF0000FFFF
x2 &= 0x0000FFFF0000FFFF
x3 &= 0x0000FFFF0000FFFF
w0 = u32(x0) | u32(x0 >> 16)
w1 = u32(x1) | u32(x1 >> 16)
w2 = u32(x2) | u32(x2 >> 16)
w3 = u32(x3) | u32(x3 >> 16)
return
}
@(private)
rotr32 :: #force_inline proc "contextless" (x: u64) -> u64 {
return (x << 32) | (x >> 32)
}

View File

@@ -0,0 +1,135 @@
// Copyright (c) 2016 Thomas Pornin <pornin@bolet.org>
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// THIS SOFTWARE IS PROVIDED BY THE AUTHORS “AS IS” AND ANY EXPRESS OR
// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
// GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package aes_ct64
import "base:intrinsics"
inv_sub_bytes :: proc "contextless" (q: ^[8]u64) {
// AES S-box is:
// S(x) = A(I(x)) ^ 0x63
// where I() is inversion in GF(256), and A() is a linear
// transform (0 is formally defined to be its own inverse).
// Since inversion is an involution, the inverse S-box can be
// computed from the S-box as:
// iS(x) = B(S(B(x ^ 0x63)) ^ 0x63)
// where B() is the inverse of A(). Indeed, for any y in GF(256):
// iS(S(y)) = B(A(I(B(A(I(y)) ^ 0x63 ^ 0x63))) ^ 0x63 ^ 0x63) = y
//
// Note: we reuse the implementation of the forward S-box,
// instead of duplicating it here, so that total code size is
// lower. By merging the B() transforms into the S-box circuit
// we could make faster CBC decryption, but CBC decryption is
// already quite faster than CBC encryption because we can
// process four blocks in parallel.
q0 := ~q[0]
q1 := ~q[1]
q2 := q[2]
q3 := q[3]
q4 := q[4]
q5 := ~q[5]
q6 := ~q[6]
q7 := q[7]
q[7] = q1 ~ q4 ~ q6
q[6] = q0 ~ q3 ~ q5
q[5] = q7 ~ q2 ~ q4
q[4] = q6 ~ q1 ~ q3
q[3] = q5 ~ q0 ~ q2
q[2] = q4 ~ q7 ~ q1
q[1] = q3 ~ q6 ~ q0
q[0] = q2 ~ q5 ~ q7
sub_bytes(q)
q0 = ~q[0]
q1 = ~q[1]
q2 = q[2]
q3 = q[3]
q4 = q[4]
q5 = ~q[5]
q6 = ~q[6]
q7 = q[7]
q[7] = q1 ~ q4 ~ q6
q[6] = q0 ~ q3 ~ q5
q[5] = q7 ~ q2 ~ q4
q[4] = q6 ~ q1 ~ q3
q[3] = q5 ~ q0 ~ q2
q[2] = q4 ~ q7 ~ q1
q[1] = q3 ~ q6 ~ q0
q[0] = q2 ~ q5 ~ q7
}
inv_shift_rows :: proc "contextless" (q: ^[8]u64) {
for x, i in q {
q[i] =
(x & 0x000000000000FFFF) |
((x & 0x000000000FFF0000) << 4) |
((x & 0x00000000F0000000) >> 12) |
((x & 0x000000FF00000000) << 8) |
((x & 0x0000FF0000000000) >> 8) |
((x & 0x000F000000000000) << 12) |
((x & 0xFFF0000000000000) >> 4)
}
}
inv_mix_columns :: proc "contextless" (q: ^[8]u64) {
q0 := q[0]
q1 := q[1]
q2 := q[2]
q3 := q[3]
q4 := q[4]
q5 := q[5]
q6 := q[6]
q7 := q[7]
r0 := (q0 >> 16) | (q0 << 48)
r1 := (q1 >> 16) | (q1 << 48)
r2 := (q2 >> 16) | (q2 << 48)
r3 := (q3 >> 16) | (q3 << 48)
r4 := (q4 >> 16) | (q4 << 48)
r5 := (q5 >> 16) | (q5 << 48)
r6 := (q6 >> 16) | (q6 << 48)
r7 := (q7 >> 16) | (q7 << 48)
q[0] = q5 ~ q6 ~ q7 ~ r0 ~ r5 ~ r7 ~ rotr32(q0 ~ q5 ~ q6 ~ r0 ~ r5)
q[1] = q0 ~ q5 ~ r0 ~ r1 ~ r5 ~ r6 ~ r7 ~ rotr32(q1 ~ q5 ~ q7 ~ r1 ~ r5 ~ r6)
q[2] = q0 ~ q1 ~ q6 ~ r1 ~ r2 ~ r6 ~ r7 ~ rotr32(q0 ~ q2 ~ q6 ~ r2 ~ r6 ~ r7)
q[3] = q0 ~ q1 ~ q2 ~ q5 ~ q6 ~ r0 ~ r2 ~ r3 ~ r5 ~ rotr32(q0 ~ q1 ~ q3 ~ q5 ~ q6 ~ q7 ~ r0 ~ r3 ~ r5 ~ r7)
q[4] = q1 ~ q2 ~ q3 ~ q5 ~ r1 ~ r3 ~ r4 ~ r5 ~ r6 ~ r7 ~ rotr32(q1 ~ q2 ~ q4 ~ q5 ~ q7 ~ r1 ~ r4 ~ r5 ~ r6)
q[5] = q2 ~ q3 ~ q4 ~ q6 ~ r2 ~ r4 ~ r5 ~ r6 ~ r7 ~ rotr32(q2 ~ q3 ~ q5 ~ q6 ~ r2 ~ r5 ~ r6 ~ r7)
q[6] = q3 ~ q4 ~ q5 ~ q7 ~ r3 ~ r5 ~ r6 ~ r7 ~ rotr32(q3 ~ q4 ~ q6 ~ q7 ~ r3 ~ r6 ~ r7)
q[7] = q4 ~ q5 ~ q6 ~ r4 ~ r6 ~ r7 ~ rotr32(q4 ~ q5 ~ q7 ~ r4 ~ r7)
}
@(private)
_decrypt :: proc "contextless" (q: ^[8]u64, skey: []u64, num_rounds: int) {
add_round_key(q, skey[num_rounds << 3:])
for u := num_rounds - 1; u > 0; u -= 1 {
inv_shift_rows(q)
inv_sub_bytes(q)
add_round_key(q, skey[u << 3:])
inv_mix_columns(q)
}
inv_shift_rows(q)
inv_sub_bytes(q)
add_round_key(q, skey)
}

View File

@@ -0,0 +1,95 @@
// Copyright (c) 2016 Thomas Pornin <pornin@bolet.org>
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// THIS SOFTWARE IS PROVIDED BY THE AUTHORS “AS IS” AND ANY EXPRESS OR
// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
// GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package aes_ct64
import "base:intrinsics"
add_round_key :: proc "contextless" (q: ^[8]u64, sk: []u64) #no_bounds_check {
if len(sk) < 8 {
intrinsics.trap()
}
q[0] ~= sk[0]
q[1] ~= sk[1]
q[2] ~= sk[2]
q[3] ~= sk[3]
q[4] ~= sk[4]
q[5] ~= sk[5]
q[6] ~= sk[6]
q[7] ~= sk[7]
}
shift_rows :: proc "contextless" (q: ^[8]u64) {
for x, i in q {
q[i] =
(x & 0x000000000000FFFF) |
((x & 0x00000000FFF00000) >> 4) |
((x & 0x00000000000F0000) << 12) |
((x & 0x0000FF0000000000) >> 8) |
((x & 0x000000FF00000000) << 8) |
((x & 0xF000000000000000) >> 12) |
((x & 0x0FFF000000000000) << 4)
}
}
mix_columns :: proc "contextless" (q: ^[8]u64) {
q0 := q[0]
q1 := q[1]
q2 := q[2]
q3 := q[3]
q4 := q[4]
q5 := q[5]
q6 := q[6]
q7 := q[7]
r0 := (q0 >> 16) | (q0 << 48)
r1 := (q1 >> 16) | (q1 << 48)
r2 := (q2 >> 16) | (q2 << 48)
r3 := (q3 >> 16) | (q3 << 48)
r4 := (q4 >> 16) | (q4 << 48)
r5 := (q5 >> 16) | (q5 << 48)
r6 := (q6 >> 16) | (q6 << 48)
r7 := (q7 >> 16) | (q7 << 48)
q[0] = q7 ~ r7 ~ r0 ~ rotr32(q0 ~ r0)
q[1] = q0 ~ r0 ~ q7 ~ r7 ~ r1 ~ rotr32(q1 ~ r1)
q[2] = q1 ~ r1 ~ r2 ~ rotr32(q2 ~ r2)
q[3] = q2 ~ r2 ~ q7 ~ r7 ~ r3 ~ rotr32(q3 ~ r3)
q[4] = q3 ~ r3 ~ q7 ~ r7 ~ r4 ~ rotr32(q4 ~ r4)
q[5] = q4 ~ r4 ~ r5 ~ rotr32(q5 ~ r5)
q[6] = q5 ~ r5 ~ r6 ~ rotr32(q6 ~ r6)
q[7] = q6 ~ r6 ~ r7 ~ rotr32(q7 ~ r7)
}
@(private)
_encrypt :: proc "contextless" (q: ^[8]u64, skey: []u64, num_rounds: int) {
add_round_key(q, skey)
for u in 1 ..< num_rounds {
sub_bytes(q)
shift_rows(q)
mix_columns(q)
add_round_key(q, skey[u << 3:])
}
sub_bytes(q)
shift_rows(q)
add_round_key(q, skey[num_rounds << 3:])
}

View File

@@ -0,0 +1,179 @@
// Copyright (c) 2016 Thomas Pornin <pornin@bolet.org>
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// THIS SOFTWARE IS PROVIDED BY THE AUTHORS “AS IS” AND ANY EXPRESS OR
// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
// GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package aes_ct64
import "base:intrinsics"
import "core:crypto/_aes"
import "core:encoding/endian"
import "core:mem"
@(private, require_results)
sub_word :: proc "contextless" (x: u32) -> u32 {
q := [8]u64{u64(x), 0, 0, 0, 0, 0, 0, 0}
orthogonalize(&q)
sub_bytes(&q)
orthogonalize(&q)
ret := u32(q[0])
mem.zero_explicit(&q[0], size_of(u64))
return ret
}
@(private, require_results)
keysched :: proc(comp_skey: []u64, key: []byte) -> int {
num_rounds, key_len := 0, len(key)
switch key_len {
case _aes.KEY_SIZE_128:
num_rounds = _aes.ROUNDS_128
case _aes.KEY_SIZE_192:
num_rounds = _aes.ROUNDS_192
case _aes.KEY_SIZE_256:
num_rounds = _aes.ROUNDS_256
case:
panic("crypto/aes: invalid AES key size")
}
skey: [60]u32 = ---
nk, nkf := key_len >> 2, (num_rounds + 1) << 2
for i in 0 ..< nk {
skey[i] = endian.unchecked_get_u32le(key[i << 2:])
}
tmp := skey[(key_len >> 2) - 1]
for i, j, k := nk, 0, 0; i < nkf; i += 1 {
if j == 0 {
tmp = (tmp << 24) | (tmp >> 8)
tmp = sub_word(tmp) ~ u32(_aes.RCON[k])
} else if nk > 6 && j == 4 {
tmp = sub_word(tmp)
}
tmp ~= skey[i - nk]
skey[i] = tmp
if j += 1; j == nk {
j = 0
k += 1
}
}
q: [8]u64 = ---
for i, j := 0, 0; i < nkf; i, j = i + 4, j + 2 {
q[0], q[4] = interleave_in(skey[i:])
q[1] = q[0]
q[2] = q[0]
q[3] = q[0]
q[5] = q[4]
q[6] = q[4]
q[7] = q[4]
orthogonalize(&q)
comp_skey[j + 0] =
(q[0] & 0x1111111111111111) |
(q[1] & 0x2222222222222222) |
(q[2] & 0x4444444444444444) |
(q[3] & 0x8888888888888888)
comp_skey[j + 1] =
(q[4] & 0x1111111111111111) |
(q[5] & 0x2222222222222222) |
(q[6] & 0x4444444444444444) |
(q[7] & 0x8888888888888888)
}
mem.zero_explicit(&skey, size_of(skey))
mem.zero_explicit(&q, size_of(q))
return num_rounds
}
@(private)
skey_expand :: proc "contextless" (skey, comp_skey: []u64, num_rounds: int) {
n := (num_rounds + 1) << 1
for u, v := 0, 0; u < n; u, v = u + 1, v + 4 {
x0 := comp_skey[u]
x1, x2, x3 := x0, x0, x0
x0 &= 0x1111111111111111
x1 &= 0x2222222222222222
x2 &= 0x4444444444444444
x3 &= 0x8888888888888888
x1 >>= 1
x2 >>= 2
x3 >>= 3
skey[v + 0] = (x0 << 4) - x0
skey[v + 1] = (x1 << 4) - x1
skey[v + 2] = (x2 << 4) - x2
skey[v + 3] = (x3 << 4) - x3
}
}
orthogonalize_roundkey :: proc "contextless" (qq: []u64, key: []byte) {
if len(qq) < 8 || len(key) != 16 {
intrinsics.trap()
}
skey: [4]u32 = ---
skey[0] = endian.unchecked_get_u32le(key[0:])
skey[1] = endian.unchecked_get_u32le(key[4:])
skey[2] = endian.unchecked_get_u32le(key[8:])
skey[3] = endian.unchecked_get_u32le(key[12:])
q: [8]u64 = ---
q[0], q[4] = interleave_in(skey[:])
q[1] = q[0]
q[2] = q[0]
q[3] = q[0]
q[5] = q[4]
q[6] = q[4]
q[7] = q[4]
orthogonalize(&q)
comp_skey: [2]u64 = ---
comp_skey[0] =
(q[0] & 0x1111111111111111) |
(q[1] & 0x2222222222222222) |
(q[2] & 0x4444444444444444) |
(q[3] & 0x8888888888888888)
comp_skey[1] =
(q[4] & 0x1111111111111111) |
(q[5] & 0x2222222222222222) |
(q[6] & 0x4444444444444444) |
(q[7] & 0x8888888888888888)
for x, u in comp_skey {
x0 := x
x1, x2, x3 := x0, x0, x0
x0 &= 0x1111111111111111
x1 &= 0x2222222222222222
x2 &= 0x4444444444444444
x3 &= 0x8888888888888888
x1 >>= 1
x2 >>= 2
x3 >>= 3
qq[u * 4 + 0] = (x0 << 4) - x0
qq[u * 4 + 1] = (x1 << 4) - x1
qq[u * 4 + 2] = (x2 << 4) - x2
qq[u * 4 + 3] = (x3 << 4) - x3
}
mem.zero_explicit(&skey, size_of(skey))
mem.zero_explicit(&q, size_of(q))
mem.zero_explicit(&comp_skey, size_of(comp_skey))
}

View File

@@ -0,0 +1,136 @@
// Copyright (c) 2016 Thomas Pornin <pornin@bolet.org>
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// THIS SOFTWARE IS PROVIDED BY THE AUTHORS “AS IS” AND ANY EXPRESS OR
// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
// GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package aes_ct64
import "base:intrinsics"
import "core:crypto/_aes"
import "core:encoding/endian"
@(private = "file")
bmul64 :: proc "contextless" (x, y: u64) -> u64 {
x0 := x & 0x1111111111111111
x1 := x & 0x2222222222222222
x2 := x & 0x4444444444444444
x3 := x & 0x8888888888888888
y0 := y & 0x1111111111111111
y1 := y & 0x2222222222222222
y2 := y & 0x4444444444444444
y3 := y & 0x8888888888888888
z0 := (x0 * y0) ~ (x1 * y3) ~ (x2 * y2) ~ (x3 * y1)
z1 := (x0 * y1) ~ (x1 * y0) ~ (x2 * y3) ~ (x3 * y2)
z2 := (x0 * y2) ~ (x1 * y1) ~ (x2 * y0) ~ (x3 * y3)
z3 := (x0 * y3) ~ (x1 * y2) ~ (x2 * y1) ~ (x3 * y0)
z0 &= 0x1111111111111111
z1 &= 0x2222222222222222
z2 &= 0x4444444444444444
z3 &= 0x8888888888888888
return z0 | z1 | z2 | z3
}
@(private = "file")
rev64 :: proc "contextless" (x: u64) -> u64 {
x := x
x = ((x & 0x5555555555555555) << 1) | ((x >> 1) & 0x5555555555555555)
x = ((x & 0x3333333333333333) << 2) | ((x >> 2) & 0x3333333333333333)
x = ((x & 0x0F0F0F0F0F0F0F0F) << 4) | ((x >> 4) & 0x0F0F0F0F0F0F0F0F)
x = ((x & 0x00FF00FF00FF00FF) << 8) | ((x >> 8) & 0x00FF00FF00FF00FF)
x = ((x & 0x0000FFFF0000FFFF) << 16) | ((x >> 16) & 0x0000FFFF0000FFFF)
return (x << 32) | (x >> 32)
}
// ghash calculates the GHASH of data, with the key `key`, and input `dst`
// and `data`, and stores the resulting digest in `dst`.
//
// Note: `dst` is both an input and an output, to support easy implementation
// of GCM.
ghash :: proc "contextless" (dst, key, data: []byte) {
if len(dst) != _aes.GHASH_BLOCK_SIZE || len(key) != _aes.GHASH_BLOCK_SIZE {
intrinsics.trap()
}
buf := data
l := len(buf)
y1 := endian.unchecked_get_u64be(dst[0:])
y0 := endian.unchecked_get_u64be(dst[8:])
h1 := endian.unchecked_get_u64be(key[0:])
h0 := endian.unchecked_get_u64be(key[8:])
h0r := rev64(h0)
h1r := rev64(h1)
h2 := h0 ~ h1
h2r := h0r ~ h1r
src: []byte
for l > 0 {
if l >= _aes.GHASH_BLOCK_SIZE {
src = buf
buf = buf[_aes.GHASH_BLOCK_SIZE:]
l -= _aes.GHASH_BLOCK_SIZE
} else {
tmp: [_aes.GHASH_BLOCK_SIZE]byte
copy(tmp[:], buf)
src = tmp[:]
l = 0
}
y1 ~= endian.unchecked_get_u64be(src)
y0 ~= endian.unchecked_get_u64be(src[8:])
y0r := rev64(y0)
y1r := rev64(y1)
y2 := y0 ~ y1
y2r := y0r ~ y1r
z0 := bmul64(y0, h0)
z1 := bmul64(y1, h1)
z2 := bmul64(y2, h2)
z0h := bmul64(y0r, h0r)
z1h := bmul64(y1r, h1r)
z2h := bmul64(y2r, h2r)
z2 ~= z0 ~ z1
z2h ~= z0h ~ z1h
z0h = rev64(z0h) >> 1
z1h = rev64(z1h) >> 1
z2h = rev64(z2h) >> 1
v0 := z0
v1 := z0h ~ z2
v2 := z1 ~ z2h
v3 := z1h
v3 = (v3 << 1) | (v2 >> 63)
v2 = (v2 << 1) | (v1 >> 63)
v1 = (v1 << 1) | (v0 >> 63)
v0 = (v0 << 1)
v2 ~= v0 ~ (v0 >> 1) ~ (v0 >> 2) ~ (v0 >> 7)
v1 ~= (v0 << 63) ~ (v0 << 62) ~ (v0 << 57)
v3 ~= v1 ~ (v1 >> 1) ~ (v1 >> 2) ~ (v1 >> 7)
v2 ~= (v1 << 63) ~ (v1 << 62) ~ (v1 << 57)
y0 = v2
y1 = v3
}
endian.unchecked_put_u64be(dst[0:], y1)
endian.unchecked_put_u64be(dst[8:], y0)
}

View File

@@ -0,0 +1,75 @@
package aes_ct64
import "base:intrinsics"
import "core:crypto/_aes"
import "core:encoding/endian"
load_blockx1 :: proc "contextless" (q: ^[8]u64, src: []byte) {
if len(src) != _aes.BLOCK_SIZE {
intrinsics.trap()
}
w: [4]u32 = ---
w[0] = endian.unchecked_get_u32le(src[0:])
w[1] = endian.unchecked_get_u32le(src[4:])
w[2] = endian.unchecked_get_u32le(src[8:])
w[3] = endian.unchecked_get_u32le(src[12:])
q[0], q[4] = interleave_in(w[:])
orthogonalize(q)
}
store_blockx1 :: proc "contextless" (dst: []byte, q: ^[8]u64) {
if len(dst) != _aes.BLOCK_SIZE {
intrinsics.trap()
}
orthogonalize(q)
w0, w1, w2, w3 := interleave_out(q[0], q[4])
endian.unchecked_put_u32le(dst[0:], w0)
endian.unchecked_put_u32le(dst[4:], w1)
endian.unchecked_put_u32le(dst[8:], w2)
endian.unchecked_put_u32le(dst[12:], w3)
}
load_blocks :: proc "contextless" (q: ^[8]u64, src: [][]byte) {
if n := len(src); n > STRIDE || n == 0 {
intrinsics.trap()
}
w: [4]u32 = ---
for s, i in src {
if len(s) != _aes.BLOCK_SIZE {
intrinsics.trap()
}
w[0] = endian.unchecked_get_u32le(s[0:])
w[1] = endian.unchecked_get_u32le(s[4:])
w[2] = endian.unchecked_get_u32le(s[8:])
w[3] = endian.unchecked_get_u32le(s[12:])
q[i], q[i + 4] = interleave_in(w[:])
}
orthogonalize(q)
}
store_blocks :: proc "contextless" (dst: [][]byte, q: ^[8]u64) {
if n := len(dst); n > STRIDE || n == 0 {
intrinsics.trap()
}
orthogonalize(q)
for d, i in dst {
// Allow storing [0,4] blocks.
if d == nil {
break
}
if len(d) != _aes.BLOCK_SIZE {
intrinsics.trap()
}
w0, w1, w2, w3 := interleave_out(q[i], q[i + 4])
endian.unchecked_put_u32le(d[0:], w0)
endian.unchecked_put_u32le(d[4:], w1)
endian.unchecked_put_u32le(d[8:], w2)
endian.unchecked_put_u32le(d[12:], w3)
}
}

22
core/crypto/aes/aes.odin Normal file
View File

@@ -0,0 +1,22 @@
/*
package aes implements the AES block cipher and some common modes.
See:
- https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.197-upd1.pdf
- https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38a.pdf
- https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38d.pdf
*/
package aes
import "core:crypto/_aes"
// KEY_SIZE_128 is the AES-128 key size in bytes.
KEY_SIZE_128 :: _aes.KEY_SIZE_128
// KEY_SIZE_192 is the AES-192 key size in bytes.
KEY_SIZE_192 :: _aes.KEY_SIZE_192
// KEY_SIZE_256 is the AES-256 key size in bytes.
KEY_SIZE_256 :: _aes.KEY_SIZE_256
// BLOCK_SIZE is the AES block size in bytes.
BLOCK_SIZE :: _aes.BLOCK_SIZE

View File

@@ -0,0 +1,199 @@
package aes
import "core:crypto/_aes/ct64"
import "core:encoding/endian"
import "core:math/bits"
import "core:mem"
// CTR_IV_SIZE is the size of the CTR mode IV in bytes.
CTR_IV_SIZE :: 16
// Context_CTR is a keyed AES-CTR instance.
Context_CTR :: struct {
_impl: Context_Impl,
_buffer: [BLOCK_SIZE]byte,
_off: int,
_ctr_hi: u64,
_ctr_lo: u64,
_is_initialized: bool,
}
// init_ctr initializes a Context_CTR with the provided key and IV.
init_ctr :: proc(ctx: ^Context_CTR, key, iv: []byte, impl := Implementation.Hardware) {
if len(iv) != CTR_IV_SIZE {
panic("crypto/aes: invalid CTR IV size")
}
init_impl(&ctx._impl, key, impl)
ctx._off = BLOCK_SIZE
ctx._ctr_hi = endian.unchecked_get_u64be(iv[0:])
ctx._ctr_lo = endian.unchecked_get_u64be(iv[8:])
ctx._is_initialized = true
}
// xor_bytes_ctr XORs each byte in src with bytes taken from the AES-CTR
// keystream, and writes the resulting output to dst. dst and src MUST
// alias exactly or not at all.
xor_bytes_ctr :: proc(ctx: ^Context_CTR, dst, src: []byte) {
assert(ctx._is_initialized)
// TODO: Enforcing that dst and src alias exactly or not at all
// is a good idea, though odd aliasing should be extremely uncommon.
src, dst := src, dst
if dst_len := len(dst); dst_len < len(src) {
src = src[:dst_len]
}
for remaining := len(src); remaining > 0; {
// Process multiple blocks at once
if ctx._off == BLOCK_SIZE {
if nr_blocks := remaining / BLOCK_SIZE; nr_blocks > 0 {
direct_bytes := nr_blocks * BLOCK_SIZE
ctr_blocks(ctx, dst, src, nr_blocks)
remaining -= direct_bytes
if remaining == 0 {
return
}
dst = dst[direct_bytes:]
src = src[direct_bytes:]
}
// If there is a partial block, generate and buffer 1 block
// worth of keystream.
ctr_blocks(ctx, ctx._buffer[:], nil, 1)
ctx._off = 0
}
// Process partial blocks from the buffered keystream.
to_xor := min(BLOCK_SIZE - ctx._off, remaining)
buffered_keystream := ctx._buffer[ctx._off:]
for i := 0; i < to_xor; i = i + 1 {
dst[i] = buffered_keystream[i] ~ src[i]
}
ctx._off += to_xor
dst = dst[to_xor:]
src = src[to_xor:]
remaining -= to_xor
}
}
// keystream_bytes_ctr fills dst with the raw AES-CTR keystream output.
keystream_bytes_ctr :: proc(ctx: ^Context_CTR, dst: []byte) {
assert(ctx._is_initialized)
dst := dst
for remaining := len(dst); remaining > 0; {
// Process multiple blocks at once
if ctx._off == BLOCK_SIZE {
if nr_blocks := remaining / BLOCK_SIZE; nr_blocks > 0 {
direct_bytes := nr_blocks * BLOCK_SIZE
ctr_blocks(ctx, dst, nil, nr_blocks)
remaining -= direct_bytes
if remaining == 0 {
return
}
dst = dst[direct_bytes:]
}
// If there is a partial block, generate and buffer 1 block
// worth of keystream.
ctr_blocks(ctx, ctx._buffer[:], nil, 1)
ctx._off = 0
}
// Process partial blocks from the buffered keystream.
to_copy := min(BLOCK_SIZE - ctx._off, remaining)
buffered_keystream := ctx._buffer[ctx._off:]
copy(dst[:to_copy], buffered_keystream[:to_copy])
ctx._off += to_copy
dst = dst[to_copy:]
remaining -= to_copy
}
}
// reset_ctr sanitizes the Context_CTR. The Context_CTR must be
// re-initialized to be used again.
reset_ctr :: proc "contextless" (ctx: ^Context_CTR) {
reset_impl(&ctx._impl)
ctx._off = 0
ctx._ctr_hi = 0
ctx._ctr_lo = 0
mem.zero_explicit(&ctx._buffer, size_of(ctx._buffer))
ctx._is_initialized = false
}
@(private)
ctr_blocks :: proc(ctx: ^Context_CTR, dst, src: []byte, nr_blocks: int) {
// Use the optimized hardware implementation if available.
if _, is_hw := ctx._impl.(Context_Impl_Hardware); is_hw {
ctr_blocks_hw(ctx, dst, src, nr_blocks)
return
}
// Portable implementation.
ct64_inc_ctr := #force_inline proc "contextless" (dst: []byte, hi, lo: u64) -> (u64, u64) {
endian.unchecked_put_u64be(dst[0:], hi)
endian.unchecked_put_u64be(dst[8:], lo)
hi, lo := hi, lo
carry: u64
lo, carry = bits.add_u64(lo, 1, 0)
hi, _ = bits.add_u64(hi, 0, carry)
return hi, lo
}
impl := &ctx._impl.(ct64.Context)
src, dst := src, dst
nr_blocks := nr_blocks
ctr_hi, ctr_lo := ctx._ctr_hi, ctx._ctr_lo
tmp: [ct64.STRIDE][BLOCK_SIZE]byte = ---
ctrs: [ct64.STRIDE][]byte = ---
for i in 0 ..< ct64.STRIDE {
ctrs[i] = tmp[i][:]
}
for nr_blocks > 0 {
n := min(ct64.STRIDE, nr_blocks)
blocks := ctrs[:n]
for i in 0 ..< n {
ctr_hi, ctr_lo = ct64_inc_ctr(blocks[i], ctr_hi, ctr_lo)
}
ct64.encrypt_blocks(impl, blocks, blocks)
xor_blocks(dst, src, blocks)
if src != nil {
src = src[n * BLOCK_SIZE:]
}
dst = dst[n * BLOCK_SIZE:]
nr_blocks -= n
}
// Write back the counter.
ctx._ctr_hi, ctx._ctr_lo = ctr_hi, ctr_lo
mem.zero_explicit(&tmp, size_of(tmp))
}
@(private)
xor_blocks :: #force_inline proc "contextless" (dst, src: []byte, blocks: [][]byte) {
// Note: This would be faster `core:simd` was used, however if
// performance of this implementation matters to where that
// optimization would be worth it, use chacha20poly1305, or a
// CPU that isn't e-waste.
if src != nil {
#no_bounds_check {
for i in 0 ..< len(blocks) {
off := i * BLOCK_SIZE
for j in 0 ..< BLOCK_SIZE {
blocks[i][j] ~= src[off + j]
}
}
}
}
for i in 0 ..< len(blocks) {
copy(dst[i * BLOCK_SIZE:], blocks[i])
}
}

View File

@@ -0,0 +1,57 @@
package aes
import "core:crypto/_aes/ct64"
// Context_ECB is a keyed AES-ECB instance.
//
// WARNING: Using ECB mode is strongly discouraged unless it is being
// used to implement higher level constructs.
Context_ECB :: struct {
_impl: Context_Impl,
_is_initialized: bool,
}
// init_ecb initializes a Context_ECB with the provided key.
init_ecb :: proc(ctx: ^Context_ECB, key: []byte, impl := Implementation.Hardware) {
init_impl(&ctx._impl, key, impl)
ctx._is_initialized = true
}
// encrypt_ecb encrypts the BLOCK_SIZE buffer src, and writes the result to dst.
encrypt_ecb :: proc(ctx: ^Context_ECB, dst, src: []byte) {
assert(ctx._is_initialized)
if len(dst) != BLOCK_SIZE || len(src) != BLOCK_SIZE {
panic("crypto/aes: invalid buffer size(s)")
}
switch &impl in ctx._impl {
case ct64.Context:
ct64.encrypt_block(&impl, dst, src)
case Context_Impl_Hardware:
encrypt_block_hw(&impl, dst, src)
}
}
// decrypt_ecb decrypts the BLOCK_SIZE buffer src, and writes the result to dst.
decrypt_ecb :: proc(ctx: ^Context_ECB, dst, src: []byte) {
assert(ctx._is_initialized)
if len(dst) != BLOCK_SIZE || len(src) != BLOCK_SIZE {
panic("crypto/aes: invalid buffer size(s)")
}
switch &impl in ctx._impl {
case ct64.Context:
ct64.decrypt_block(&impl, dst, src)
case Context_Impl_Hardware:
decrypt_block_hw(&impl, dst, src)
}
}
// reset_ecb sanitizes the Context_ECB. The Context_ECB must be
// re-initialized to be used again.
reset_ecb :: proc "contextless" (ctx: ^Context_ECB) {
reset_impl(&ctx._impl)
ctx._is_initialized = false
}

View File

@@ -0,0 +1,253 @@
package aes
import "core:crypto"
import "core:crypto/_aes"
import "core:crypto/_aes/ct64"
import "core:encoding/endian"
import "core:mem"
// GCM_NONCE_SIZE is the size of the GCM nonce in bytes.
GCM_NONCE_SIZE :: 12
// GCM_TAG_SIZE is the size of a GCM tag in bytes.
GCM_TAG_SIZE :: _aes.GHASH_TAG_SIZE
@(private)
GCM_A_MAX :: max(u64) / 8 // 2^64 - 1 bits -> bytes
@(private)
GCM_P_MAX :: 0xfffffffe0 // 2^39 - 256 bits -> bytes
// Context_GCM is a keyed AES-GCM instance.
Context_GCM :: struct {
_impl: Context_Impl,
_is_initialized: bool,
}
// init_gcm initializes a Context_GCM with the provided key.
init_gcm :: proc(ctx: ^Context_GCM, key: []byte, impl := Implementation.Hardware) {
init_impl(&ctx._impl, key, impl)
ctx._is_initialized = true
}
// seal_gcm encrypts the plaintext and authenticates the aad and ciphertext,
// with the provided Context_GCM and nonce, stores the output in dst and tag.
//
// dst and plaintext MUST alias exactly or not at all.
seal_gcm :: proc(ctx: ^Context_GCM, dst, tag, nonce, aad, plaintext: []byte) {
assert(ctx._is_initialized)
gcm_validate_common_slice_sizes(tag, nonce, aad, plaintext)
if len(dst) != len(plaintext) {
panic("crypto/aes: invalid destination ciphertext size")
}
if impl, is_hw := ctx._impl.(Context_Impl_Hardware); is_hw {
gcm_seal_hw(&impl, dst, tag, nonce, aad, plaintext)
return
}
h: [_aes.GHASH_KEY_SIZE]byte
j0: [_aes.GHASH_BLOCK_SIZE]byte
s: [_aes.GHASH_TAG_SIZE]byte
init_ghash_ct64(ctx, &h, &j0, nonce)
// Note: Our GHASH implementation handles appending padding.
ct64.ghash(s[:], h[:], aad)
gctr_ct64(ctx, dst, &s, plaintext, &h, nonce, true)
final_ghash_ct64(&s, &h, &j0, len(aad), len(plaintext))
copy(tag, s[:])
mem.zero_explicit(&h, len(h))
mem.zero_explicit(&j0, len(j0))
}
// open_gcm authenticates the aad and ciphertext, and decrypts the ciphertext,
// with the provided Context_GCM, nonce, and tag, and stores the output in dst,
// returning true iff the authentication was successful. If authentication
// fails, the destination buffer will be zeroed.
//
// dst and plaintext MUST alias exactly or not at all.
open_gcm :: proc(ctx: ^Context_GCM, dst, nonce, aad, ciphertext, tag: []byte) -> bool {
assert(ctx._is_initialized)
gcm_validate_common_slice_sizes(tag, nonce, aad, ciphertext)
if len(dst) != len(ciphertext) {
panic("crypto/aes: invalid destination plaintext size")
}
if impl, is_hw := ctx._impl.(Context_Impl_Hardware); is_hw {
return gcm_open_hw(&impl, dst, nonce, aad, ciphertext, tag)
}
h: [_aes.GHASH_KEY_SIZE]byte
j0: [_aes.GHASH_BLOCK_SIZE]byte
s: [_aes.GHASH_TAG_SIZE]byte
init_ghash_ct64(ctx, &h, &j0, nonce)
ct64.ghash(s[:], h[:], aad)
gctr_ct64(ctx, dst, &s, ciphertext, &h, nonce, false)
final_ghash_ct64(&s, &h, &j0, len(aad), len(ciphertext))
ok := crypto.compare_constant_time(s[:], tag) == 1
if !ok {
mem.zero_explicit(raw_data(dst), len(dst))
}
mem.zero_explicit(&h, len(h))
mem.zero_explicit(&j0, len(j0))
mem.zero_explicit(&s, len(s))
return ok
}
// reset_ctr sanitizes the Context_GCM. The Context_GCM must be
// re-initialized to be used again.
reset_gcm :: proc "contextless" (ctx: ^Context_GCM) {
reset_impl(&ctx._impl)
ctx._is_initialized = false
}
@(private)
gcm_validate_common_slice_sizes :: proc(tag, nonce, aad, text: []byte) {
if len(tag) != GCM_TAG_SIZE {
panic("crypto/aes: invalid GCM tag size")
}
// The specification supports nonces in the range [1, 2^64) bits
// however per NIST SP 800-38D 5.2.1.1:
//
// > For IVs, it is recommended that implementations restrict support
// > to the length of 96 bits, to promote interoperability, efficiency,
// > and simplicity of design.
if len(nonce) != GCM_NONCE_SIZE {
panic("crypto/aes: invalid GCM nonce size")
}
if aad_len := u64(len(aad)); aad_len > GCM_A_MAX {
panic("crypto/aes: oversized GCM aad")
}
if text_len := u64(len(text)); text_len > GCM_P_MAX {
panic("crypto/aes: oversized GCM src data")
}
}
@(private = "file")
init_ghash_ct64 :: proc(
ctx: ^Context_GCM,
h: ^[_aes.GHASH_KEY_SIZE]byte,
j0: ^[_aes.GHASH_BLOCK_SIZE]byte,
nonce: []byte,
) {
impl := &ctx._impl.(ct64.Context)
// 1. Let H = CIPH(k, 0^128)
ct64.encrypt_block(impl, h[:], h[:])
// ECB encrypt j0, so that we can just XOR with the tag. In theory
// this could be processed along with the final GCTR block, to
// potentially save a call to AES-ECB, but... just use AES-NI.
copy(j0[:], nonce)
j0[_aes.GHASH_BLOCK_SIZE - 1] = 1
ct64.encrypt_block(impl, j0[:], j0[:])
}
@(private = "file")
final_ghash_ct64 :: proc(
s: ^[_aes.GHASH_BLOCK_SIZE]byte,
h: ^[_aes.GHASH_KEY_SIZE]byte,
j0: ^[_aes.GHASH_BLOCK_SIZE]byte,
a_len: int,
t_len: int,
) {
blk: [_aes.GHASH_BLOCK_SIZE]byte
endian.unchecked_put_u64be(blk[0:], u64(a_len) * 8)
endian.unchecked_put_u64be(blk[8:], u64(t_len) * 8)
ct64.ghash(s[:], h[:], blk[:])
for i in 0 ..< len(s) {
s[i] ~= j0[i]
}
}
@(private = "file")
gctr_ct64 :: proc(
ctx: ^Context_GCM,
dst: []byte,
s: ^[_aes.GHASH_BLOCK_SIZE]byte,
src: []byte,
h: ^[_aes.GHASH_KEY_SIZE]byte,
nonce: []byte,
is_seal: bool,
) {
ct64_inc_ctr32 := #force_inline proc "contextless" (dst: []byte, ctr: u32) -> u32 {
endian.unchecked_put_u32be(dst[12:], ctr)
return ctr + 1
}
// 2. Define a block J_0 as follows:
// if len(IV) = 96, then let J0 = IV || 0^31 || 1
//
// Note: We only support 96 bit IVs.
tmp, tmp2: [ct64.STRIDE][BLOCK_SIZE]byte = ---, ---
ctrs, blks: [ct64.STRIDE][]byte = ---, ---
ctr: u32 = 2
for i in 0 ..< ct64.STRIDE {
// Setup scratch space for the keystream.
blks[i] = tmp2[i][:]
// Pre-copy the IV to all the counter blocks.
ctrs[i] = tmp[i][:]
copy(ctrs[i], nonce)
}
// We stitch the GCTR and GHASH operations together, so that only
// one pass over the ciphertext is required.
impl := &ctx._impl.(ct64.Context)
src, dst := src, dst
nr_blocks := len(src) / BLOCK_SIZE
for nr_blocks > 0 {
n := min(ct64.STRIDE, nr_blocks)
l := n * BLOCK_SIZE
if !is_seal {
ct64.ghash(s[:], h[:], src[:l])
}
// The keystream is written to a separate buffer, as we will
// reuse the first 96-bits of each counter.
for i in 0 ..< n {
ctr = ct64_inc_ctr32(ctrs[i], ctr)
}
ct64.encrypt_blocks(impl, blks[:n], ctrs[:n])
xor_blocks(dst, src, blks[:n])
if is_seal {
ct64.ghash(s[:], h[:], dst[:l])
}
src = src[l:]
dst = dst[l:]
nr_blocks -= n
}
if l := len(src); l > 0 {
if !is_seal {
ct64.ghash(s[:], h[:], src[:l])
}
ct64_inc_ctr32(ctrs[0], ctr)
ct64.encrypt_block(impl, ctrs[0], ctrs[0])
for i in 0 ..< l {
dst[i] = src[i] ~ ctrs[0][i]
}
if is_seal {
ct64.ghash(s[:], h[:], dst[:l])
}
}
mem.zero_explicit(&tmp, size_of(tmp))
mem.zero_explicit(&tmp2, size_of(tmp2))
}

View File

@@ -0,0 +1,41 @@
package aes
import "core:crypto/_aes/ct64"
import "core:mem"
import "core:reflect"
@(private)
Context_Impl :: union {
ct64.Context,
Context_Impl_Hardware,
}
// Implementation is an AES implementation. Most callers will not need
// to use this as the package will automatically select the most performant
// implementation available (See `is_hardware_accelerated()`).
Implementation :: enum {
Portable,
Hardware,
}
@(private)
init_impl :: proc(ctx: ^Context_Impl, key: []byte, impl: Implementation) {
impl := impl
if !is_hardware_accelerated() {
impl = .Portable
}
switch impl {
case .Portable:
reflect.set_union_variant_typeid(ctx^, typeid_of(ct64.Context))
ct64.init(&ctx.(ct64.Context), key)
case .Hardware:
reflect.set_union_variant_typeid(ctx^, typeid_of(Context_Impl_Hardware))
init_impl_hw(&ctx.(Context_Impl_Hardware), key)
}
}
@(private)
reset_impl :: proc "contextless" (ctx: ^Context_Impl) {
mem.zero_explicit(ctx, size_of(Context_Impl))
}

View File

@@ -0,0 +1,43 @@
package aes
@(private = "file")
ERR_HW_NOT_SUPPORTED :: "crypto/aes: hardware implementation unsupported"
// is_hardware_accelerated returns true iff hardware accelerated AES
// is supported.
is_hardware_accelerated :: proc "contextless" () -> bool {
return false
}
@(private)
Context_Impl_Hardware :: struct {}
@(private)
init_impl_hw :: proc(ctx: ^Context_Impl_Hardware, key: []byte) {
panic(ERR_HW_NOT_SUPPORTED)
}
@(private)
encrypt_block_hw :: proc(ctx: ^Context_Impl_Hardware, dst, src: []byte) {
panic(ERR_HW_NOT_SUPPORTED)
}
@(private)
decrypt_block_hw :: proc(ctx: ^Context_Impl_Hardware, dst, src: []byte) {
panic(ERR_HW_NOT_SUPPORTED)
}
@(private)
ctr_blocks_hw :: proc(ctx: ^Context_CTR, dst, src: []byte, nr_blocks: int) {
panic(ERR_HW_NOT_SUPPORTED)
}
@(private)
gcm_seal_hw :: proc(ctx: ^Context_Impl_Hardware, dst, tag, nonce, aad, plaintext: []byte) {
panic(ERR_HW_NOT_SUPPORTED)
}
@(private)
gcm_open_hw :: proc(ctx: ^Context_Impl_Hardware, dst, nonce, aad, ciphertext, tag: []byte) -> bool {
panic(ERR_HW_NOT_SUPPORTED)
}

View File

@@ -11,7 +11,7 @@ HAS_RAND_BYTES :: true
_rand_bytes :: proc(dst: []byte) {
err := Sec.RandomCopyBytes(count=len(dst), bytes=raw_data(dst))
if err != .Success {
msg := CF.StringCopyToOdinString(Sec.CopyErrorMessageString(err))
panic(fmt.tprintf("crypto/rand_bytes: SecRandomCopyBytes returned non-zero result: %v %s", err, msg))
msg := CF.StringCopyToOdinString(Sec.CopyErrorMessageString(err))
fmt.panicf("crypto/rand_bytes: SecRandomCopyBytes returned non-zero result: %v %s", err, msg)
}
}

View File

@@ -32,7 +32,7 @@ _rand_bytes :: proc (dst: []byte) {
// All other failures are things that should NEVER happen
// unless the kernel interface changes (ie: the Linux
// developers break userland).
panic(fmt.tprintf("crypto: getrandom failed: %v", errno))
fmt.panicf("crypto: getrandom failed: %v", errno)
}
l -= n_read
dst = dst[n_read:]

View File

@@ -11,16 +11,16 @@ _rand_bytes :: proc(dst: []byte) {
ret := (os.Errno)(win32.BCryptGenRandom(nil, raw_data(dst), u32(len(dst)), win32.BCRYPT_USE_SYSTEM_PREFERRED_RNG))
if ret != os.ERROR_NONE {
switch ret {
case os.ERROR_INVALID_HANDLE:
// The handle to the first parameter is invalid.
// This should not happen here, since we explicitly pass nil to it
panic("crypto: BCryptGenRandom Invalid handle for hAlgorithm")
case os.ERROR_INVALID_PARAMETER:
// One of the parameters was invalid
panic("crypto: BCryptGenRandom Invalid parameter")
case:
// Unknown error
panic(fmt.tprintf("crypto: BCryptGenRandom failed: %d\n", ret))
case os.ERROR_INVALID_HANDLE:
// The handle to the first parameter is invalid.
// This should not happen here, since we explicitly pass nil to it
panic("crypto: BCryptGenRandom Invalid handle for hAlgorithm")
case os.ERROR_INVALID_PARAMETER:
// One of the parameters was invalid
panic("crypto: BCryptGenRandom Invalid parameter")
case:
// Unknown error
fmt.panicf("crypto: BCryptGenRandom failed: %d\n", ret)
}
}
}

View File

@@ -0,0 +1,137 @@
package ansi
BEL :: "\a" // Bell
BS :: "\b" // Backspace
ESC :: "\e" // Escape
// Fe Escape sequences
CSI :: ESC + "[" // Control Sequence Introducer
OSC :: ESC + "]" // Operating System Command
ST :: ESC + "\\" // String Terminator
// CSI sequences
CUU :: "A" // Cursor Up
CUD :: "B" // Cursor Down
CUF :: "C" // Cursor Forward
CUB :: "D" // Cursor Back
CNL :: "E" // Cursor Next Line
CPL :: "F" // Cursor Previous Line
CHA :: "G" // Cursor Horizontal Absolute
CUP :: "H" // Cursor Position
ED :: "J" // Erase in Display
EL :: "K" // Erase in Line
SU :: "S" // Scroll Up
SD :: "T" // Scroll Down
HVP :: "f" // Horizontal Vertical Position
SGR :: "m" // Select Graphic Rendition
AUX_ON :: "5i" // AUX Port On
AUX_OFF :: "4i" // AUX Port Off
DSR :: "6n" // Device Status Report
// CSI: private sequences
SCP :: "s" // Save Current Cursor Position
RCP :: "u" // Restore Saved Cursor Position
DECAWM_ON :: "?7h" // Auto Wrap Mode (Enabled)
DECAWM_OFF :: "?7l" // Auto Wrap Mode (Disabled)
DECTCEM_SHOW :: "?25h" // Text Cursor Enable Mode (Visible)
DECTCEM_HIDE :: "?25l" // Text Cursor Enable Mode (Invisible)
// SGR sequences
RESET :: "0"
BOLD :: "1"
FAINT :: "2"
ITALIC :: "3" // Not widely supported.
UNDERLINE :: "4"
BLINK_SLOW :: "5"
BLINK_RAPID :: "6" // Not widely supported.
INVERT :: "7" // Also known as reverse video.
HIDE :: "8" // Not widely supported.
STRIKE :: "9"
FONT_PRIMARY :: "10"
FONT_ALT1 :: "11"
FONT_ALT2 :: "12"
FONT_ALT3 :: "13"
FONT_ALT4 :: "14"
FONT_ALT5 :: "15"
FONT_ALT6 :: "16"
FONT_ALT7 :: "17"
FONT_ALT8 :: "18"
FONT_ALT9 :: "19"
FONT_FRAKTUR :: "20" // Rarely supported.
UNDERLINE_DOUBLE :: "21" // May be interpreted as "disable bold."
NO_BOLD_FAINT :: "22"
NO_ITALIC_BLACKLETTER :: "23"
NO_UNDERLINE :: "24"
NO_BLINK :: "25"
PROPORTIONAL_SPACING :: "26"
NO_REVERSE :: "27"
NO_HIDE :: "28"
NO_STRIKE :: "29"
FG_BLACK :: "30"
FG_RED :: "31"
FG_GREEN :: "32"
FG_YELLOW :: "33"
FG_BLUE :: "34"
FG_MAGENTA :: "35"
FG_CYAN :: "36"
FG_WHITE :: "37"
FG_COLOR :: "38"
FG_COLOR_8_BIT :: "38;5" // Followed by ";n" where n is in 0..=255
FG_COLOR_24_BIT :: "38;2" // Followed by ";r;g;b" where r,g,b are in 0..=255
FG_DEFAULT :: "39"
BG_BLACK :: "40"
BG_RED :: "41"
BG_GREEN :: "42"
BG_YELLOW :: "43"
BG_BLUE :: "44"
BG_MAGENTA :: "45"
BG_CYAN :: "46"
BG_WHITE :: "47"
BG_COLOR :: "48"
BG_COLOR_8_BIT :: "48;5" // Followed by ";n" where n is in 0..=255
BG_COLOR_24_BIT :: "48;2" // Followed by ";r;g;b" where r,g,b are in 0..=255
BG_DEFAULT :: "49"
NO_PROPORTIONAL_SPACING :: "50"
FRAMED :: "51"
ENCIRCLED :: "52"
OVERLINED :: "53"
NO_FRAME_ENCIRCLE :: "54"
NO_OVERLINE :: "55"
// SGR: non-standard bright colors
FG_BRIGHT_BLACK :: "90" // Also known as grey.
FG_BRIGHT_RED :: "91"
FG_BRIGHT_GREEN :: "92"
FG_BRIGHT_YELLOW :: "93"
FG_BRIGHT_BLUE :: "94"
FG_BRIGHT_MAGENTA :: "95"
FG_BRIGHT_CYAN :: "96"
FG_BRIGHT_WHITE :: "97"
BG_BRIGHT_BLACK :: "100" // Also known as grey.
BG_BRIGHT_RED :: "101"
BG_BRIGHT_GREEN :: "102"
BG_BRIGHT_YELLOW :: "103"
BG_BRIGHT_BLUE :: "104"
BG_BRIGHT_MAGENTA :: "105"
BG_BRIGHT_CYAN :: "106"
BG_BRIGHT_WHITE :: "107"
// Fp Escape sequences
DECSC :: ESC + "7" // DEC Save Cursor
DECRC :: ESC + "8" // DEC Restore Cursor
// OSC sequences
WINDOW_TITLE :: "2" // Followed by ";<text>" ST.
HYPERLINK :: "8" // Followed by ";[params];<URI>" ST. Closed by OSC HYPERLINK ";;" ST.
CLIPBOARD :: "52" // Followed by ";c;<Base64-encoded string>" ST.

View File

@@ -0,0 +1,20 @@
/*
package ansi implements constant references to many widely-supported ANSI
escape codes, primarily used in terminal emulators for enhanced graphics, such
as colors, text styling, and animated displays.
For example, you can print out a line of cyan text like this:
fmt.println(ansi.CSI + ansi.FG_CYAN + ansi.SGR + "Hellope!" + ansi.CSI + ansi.RESET + ansi.SGR)
Multiple SGR (Select Graphic Rendition) codes can be joined by semicolons:
fmt.println(ansi.CSI + ansi.BOLD + ";" + ansi.FG_BLUE + ansi.SGR + "Hellope!" + ansi.CSI + ansi.RESET + ansi.SGR)
If your terminal supports 24-bit true color mode, you can also do this:
fmt.println(ansi.CSI + ansi.FG_COLOR_24_BIT + ";0;255;255" + ansi.SGR + "Hellope!" + ansi.CSI + ansi.RESET + ansi.SGR)
For more information, see:
1. https://en.wikipedia.org/wiki/ANSI_escape_code
2. https://www.vt100.net/docs/vt102-ug/chapter5.html
3. https://invisible-island.net/xterm/ctlseqs/ctlseqs.html
*/
package ansi

View File

@@ -320,8 +320,8 @@ to_diagnostic_format :: proc {
// Turns the given CBOR value into a human-readable string.
// See docs on the proc group `diagnose` for more info.
to_diagnostic_format_string :: proc(val: Value, padding := 0, allocator := context.allocator) -> (string, mem.Allocator_Error) #optional_allocator_error {
b := strings.builder_make(allocator)
to_diagnostic_format_string :: proc(val: Value, padding := 0, allocator := context.allocator, loc := #caller_location) -> (string, mem.Allocator_Error) #optional_allocator_error {
b := strings.builder_make(allocator, loc)
w := strings.to_stream(&b)
err := to_diagnostic_format_writer(w, val, padding)
if err == .EOF {

View File

@@ -95,24 +95,25 @@ decode :: decode_from
// Decodes the given string as CBOR.
// See docs on the proc group `decode` for more information.
decode_from_string :: proc(s: string, flags: Decoder_Flags = {}, allocator := context.allocator) -> (v: Value, err: Decode_Error) {
decode_from_string :: proc(s: string, flags: Decoder_Flags = {}, allocator := context.allocator, loc := #caller_location) -> (v: Value, err: Decode_Error) {
r: strings.Reader
strings.reader_init(&r, s)
return decode_from_reader(strings.reader_to_stream(&r), flags, allocator)
return decode_from_reader(strings.reader_to_stream(&r), flags, allocator, loc)
}
// Reads a CBOR value from the given reader.
// See docs on the proc group `decode` for more information.
decode_from_reader :: proc(r: io.Reader, flags: Decoder_Flags = {}, allocator := context.allocator) -> (v: Value, err: Decode_Error) {
decode_from_reader :: proc(r: io.Reader, flags: Decoder_Flags = {}, allocator := context.allocator, loc := #caller_location) -> (v: Value, err: Decode_Error) {
return decode_from_decoder(
Decoder{ DEFAULT_MAX_PRE_ALLOC, flags, r },
allocator=allocator,
loc = loc,
)
}
// Reads a CBOR value from the given decoder.
// See docs on the proc group `decode` for more information.
decode_from_decoder :: proc(d: Decoder, allocator := context.allocator) -> (v: Value, err: Decode_Error) {
decode_from_decoder :: proc(d: Decoder, allocator := context.allocator, loc := #caller_location) -> (v: Value, err: Decode_Error) {
context.allocator = allocator
d := d
@@ -121,13 +122,13 @@ decode_from_decoder :: proc(d: Decoder, allocator := context.allocator) -> (v: V
d.max_pre_alloc = DEFAULT_MAX_PRE_ALLOC
}
v, err = _decode_from_decoder(d)
v, err = _decode_from_decoder(d, {}, allocator, loc)
// Normal EOF does not exist here, we try to read the exact amount that is said to be provided.
if err == .EOF { err = .Unexpected_EOF }
return
}
_decode_from_decoder :: proc(d: Decoder, hdr: Header = Header(0)) -> (v: Value, err: Decode_Error) {
_decode_from_decoder :: proc(d: Decoder, hdr: Header = Header(0), allocator := context.allocator, loc := #caller_location) -> (v: Value, err: Decode_Error) {
hdr := hdr
r := d.reader
if hdr == Header(0) { hdr = _decode_header(r) or_return }
@@ -161,11 +162,11 @@ _decode_from_decoder :: proc(d: Decoder, hdr: Header = Header(0)) -> (v: Value,
switch maj {
case .Unsigned: return _decode_tiny_u8(add)
case .Negative: return Negative_U8(_decode_tiny_u8(add) or_return), nil
case .Bytes: return _decode_bytes_ptr(d, add)
case .Text: return _decode_text_ptr(d, add)
case .Array: return _decode_array_ptr(d, add)
case .Map: return _decode_map_ptr(d, add)
case .Tag: return _decode_tag_ptr(d, add)
case .Bytes: return _decode_bytes_ptr(d, add, .Bytes, allocator, loc)
case .Text: return _decode_text_ptr(d, add, allocator, loc)
case .Array: return _decode_array_ptr(d, add, allocator, loc)
case .Map: return _decode_map_ptr(d, add, allocator, loc)
case .Tag: return _decode_tag_ptr(d, add, allocator, loc)
case .Other: return _decode_tiny_simple(add)
case: return nil, .Bad_Major
}
@@ -203,27 +204,27 @@ encode :: encode_into
// Encodes the CBOR value into binary CBOR allocated on the given allocator.
// See the docs on the proc group `encode_into` for more info.
encode_into_bytes :: proc(v: Value, flags := ENCODE_SMALL, allocator := context.allocator, temp_allocator := context.temp_allocator) -> (data: []byte, err: Encode_Error) {
b := strings.builder_make(allocator) or_return
encode_into_bytes :: proc(v: Value, flags := ENCODE_SMALL, allocator := context.allocator, temp_allocator := context.temp_allocator, loc := #caller_location) -> (data: []byte, err: Encode_Error) {
b := strings.builder_make(allocator, loc) or_return
encode_into_builder(&b, v, flags, temp_allocator) or_return
return b.buf[:], nil
}
// Encodes the CBOR value into binary CBOR written to the given builder.
// See the docs on the proc group `encode_into` for more info.
encode_into_builder :: proc(b: ^strings.Builder, v: Value, flags := ENCODE_SMALL, temp_allocator := context.temp_allocator) -> Encode_Error {
return encode_into_writer(strings.to_stream(b), v, flags, temp_allocator)
encode_into_builder :: proc(b: ^strings.Builder, v: Value, flags := ENCODE_SMALL, temp_allocator := context.temp_allocator, loc := #caller_location) -> Encode_Error {
return encode_into_writer(strings.to_stream(b), v, flags, temp_allocator, loc=loc)
}
// Encodes the CBOR value into binary CBOR written to the given writer.
// See the docs on the proc group `encode_into` for more info.
encode_into_writer :: proc(w: io.Writer, v: Value, flags := ENCODE_SMALL, temp_allocator := context.temp_allocator) -> Encode_Error {
return encode_into_encoder(Encoder{flags, w, temp_allocator}, v)
encode_into_writer :: proc(w: io.Writer, v: Value, flags := ENCODE_SMALL, temp_allocator := context.temp_allocator, loc := #caller_location) -> Encode_Error {
return encode_into_encoder(Encoder{flags, w, temp_allocator}, v, loc=loc)
}
// Encodes the CBOR value into binary CBOR written to the given encoder.
// See the docs on the proc group `encode_into` for more info.
encode_into_encoder :: proc(e: Encoder, v: Value) -> Encode_Error {
encode_into_encoder :: proc(e: Encoder, v: Value, loc := #caller_location) -> Encode_Error {
e := e
if e.temp_allocator.procedure == nil {
@@ -366,21 +367,21 @@ _encode_u64_exact :: proc(w: io.Writer, v: u64, major: Major = .Unsigned) -> (er
return
}
_decode_bytes_ptr :: proc(d: Decoder, add: Add, type: Major = .Bytes) -> (v: ^Bytes, err: Decode_Error) {
v = new(Bytes) or_return
defer if err != nil { free(v) }
_decode_bytes_ptr :: proc(d: Decoder, add: Add, type: Major = .Bytes, allocator := context.allocator, loc := #caller_location) -> (v: ^Bytes, err: Decode_Error) {
v = new(Bytes, allocator, loc) or_return
defer if err != nil { free(v, allocator, loc) }
v^ = _decode_bytes(d, add, type) or_return
v^ = _decode_bytes(d, add, type, allocator, loc) or_return
return
}
_decode_bytes :: proc(d: Decoder, add: Add, type: Major = .Bytes, allocator := context.allocator) -> (v: Bytes, err: Decode_Error) {
_decode_bytes :: proc(d: Decoder, add: Add, type: Major = .Bytes, allocator := context.allocator, loc := #caller_location) -> (v: Bytes, err: Decode_Error) {
context.allocator = allocator
add := add
n, scap := _decode_len_str(d, add) or_return
buf := strings.builder_make(0, scap) or_return
buf := strings.builder_make(0, scap, allocator, loc) or_return
defer if err != nil { strings.builder_destroy(&buf) }
buf_stream := strings.to_stream(&buf)
@@ -426,40 +427,40 @@ _encode_bytes :: proc(e: Encoder, val: Bytes, major: Major = .Bytes) -> (err: En
return
}
_decode_text_ptr :: proc(d: Decoder, add: Add) -> (v: ^Text, err: Decode_Error) {
v = new(Text) or_return
_decode_text_ptr :: proc(d: Decoder, add: Add, allocator := context.allocator, loc := #caller_location) -> (v: ^Text, err: Decode_Error) {
v = new(Text, allocator, loc) or_return
defer if err != nil { free(v) }
v^ = _decode_text(d, add) or_return
v^ = _decode_text(d, add, allocator, loc) or_return
return
}
_decode_text :: proc(d: Decoder, add: Add, allocator := context.allocator) -> (v: Text, err: Decode_Error) {
return (Text)(_decode_bytes(d, add, .Text, allocator) or_return), nil
_decode_text :: proc(d: Decoder, add: Add, allocator := context.allocator, loc := #caller_location) -> (v: Text, err: Decode_Error) {
return (Text)(_decode_bytes(d, add, .Text, allocator, loc) or_return), nil
}
_encode_text :: proc(e: Encoder, val: Text) -> Encode_Error {
return _encode_bytes(e, transmute([]byte)val, .Text)
}
_decode_array_ptr :: proc(d: Decoder, add: Add) -> (v: ^Array, err: Decode_Error) {
v = new(Array) or_return
_decode_array_ptr :: proc(d: Decoder, add: Add, allocator := context.allocator, loc := #caller_location) -> (v: ^Array, err: Decode_Error) {
v = new(Array, allocator, loc) or_return
defer if err != nil { free(v) }
v^ = _decode_array(d, add) or_return
v^ = _decode_array(d, add, allocator, loc) or_return
return
}
_decode_array :: proc(d: Decoder, add: Add) -> (v: Array, err: Decode_Error) {
_decode_array :: proc(d: Decoder, add: Add, allocator := context.allocator, loc := #caller_location) -> (v: Array, err: Decode_Error) {
n, scap := _decode_len_container(d, add) or_return
array := make([dynamic]Value, 0, scap) or_return
array := make([dynamic]Value, 0, scap, allocator, loc) or_return
defer if err != nil {
for entry in array { destroy(entry) }
delete(array)
for entry in array { destroy(entry, allocator) }
delete(array, loc)
}
for i := 0; n == -1 || i < n; i += 1 {
val, verr := _decode_from_decoder(d)
val, verr := _decode_from_decoder(d, {}, allocator, loc)
if n == -1 && verr == .Break {
break
} else if verr != nil {
@@ -485,39 +486,39 @@ _encode_array :: proc(e: Encoder, arr: Array) -> Encode_Error {
return nil
}
_decode_map_ptr :: proc(d: Decoder, add: Add) -> (v: ^Map, err: Decode_Error) {
v = new(Map) or_return
_decode_map_ptr :: proc(d: Decoder, add: Add, allocator := context.allocator, loc := #caller_location) -> (v: ^Map, err: Decode_Error) {
v = new(Map, allocator, loc) or_return
defer if err != nil { free(v) }
v^ = _decode_map(d, add) or_return
v^ = _decode_map(d, add, allocator, loc) or_return
return
}
_decode_map :: proc(d: Decoder, add: Add) -> (v: Map, err: Decode_Error) {
_decode_map :: proc(d: Decoder, add: Add, allocator := context.allocator, loc := #caller_location) -> (v: Map, err: Decode_Error) {
n, scap := _decode_len_container(d, add) or_return
items := make([dynamic]Map_Entry, 0, scap) or_return
items := make([dynamic]Map_Entry, 0, scap, allocator, loc) or_return
defer if err != nil {
for entry in items {
destroy(entry.key)
destroy(entry.value)
}
delete(items)
delete(items, loc)
}
for i := 0; n == -1 || i < n; i += 1 {
key, kerr := _decode_from_decoder(d)
key, kerr := _decode_from_decoder(d, {}, allocator, loc)
if n == -1 && kerr == .Break {
break
} else if kerr != nil {
return nil, kerr
}
value := _decode_from_decoder(d) or_return
value := _decode_from_decoder(d, {}, allocator, loc) or_return
append(&items, Map_Entry{
key = key,
value = value,
}) or_return
}, loc) or_return
}
if .Shrink_Excess in d.flags { shrink(&items) }
@@ -578,20 +579,20 @@ _encode_map :: proc(e: Encoder, m: Map) -> (err: Encode_Error) {
return nil
}
_decode_tag_ptr :: proc(d: Decoder, add: Add) -> (v: Value, err: Decode_Error) {
tag := _decode_tag(d, add) or_return
_decode_tag_ptr :: proc(d: Decoder, add: Add, allocator := context.allocator, loc := #caller_location) -> (v: Value, err: Decode_Error) {
tag := _decode_tag(d, add, allocator, loc) or_return
if t, ok := tag.?; ok {
defer if err != nil { destroy(t.value) }
tp := new(Tag) or_return
tp := new(Tag, allocator, loc) or_return
tp^ = t
return tp, nil
}
// no error, no tag, this was the self described CBOR tag, skip it.
return _decode_from_decoder(d)
return _decode_from_decoder(d, {}, allocator, loc)
}
_decode_tag :: proc(d: Decoder, add: Add) -> (v: Maybe(Tag), err: Decode_Error) {
_decode_tag :: proc(d: Decoder, add: Add, allocator := context.allocator, loc := #caller_location) -> (v: Maybe(Tag), err: Decode_Error) {
num := _decode_uint_as_u64(d.reader, add) or_return
// CBOR can be wrapped in a tag that decoders can use to see/check if the binary data is CBOR.
@@ -602,7 +603,7 @@ _decode_tag :: proc(d: Decoder, add: Add) -> (v: Maybe(Tag), err: Decode_Error)
t := Tag{
number = num,
value = _decode_from_decoder(d) or_return,
value = _decode_from_decoder(d, {}, allocator, loc) or_return,
}
if nested, ok := t.value.(^Tag); ok {
@@ -883,4 +884,4 @@ _encode_deterministic_f64 :: proc(w: io.Writer, v: f64) -> io.Error {
}
return _encode_f64_exact(w, v)
}
}

View File

@@ -45,8 +45,8 @@ marshal :: marshal_into
// Marshals the given value into a CBOR byte stream (allocated using the given allocator).
// See docs on the `marshal_into` proc group for more info.
marshal_into_bytes :: proc(v: any, flags := ENCODE_SMALL, allocator := context.allocator, temp_allocator := context.temp_allocator) -> (bytes: []byte, err: Marshal_Error) {
b, alloc_err := strings.builder_make(allocator)
marshal_into_bytes :: proc(v: any, flags := ENCODE_SMALL, allocator := context.allocator, temp_allocator := context.temp_allocator, loc := #caller_location) -> (bytes: []byte, err: Marshal_Error) {
b, alloc_err := strings.builder_make(allocator, loc=loc)
// The builder as a stream also returns .EOF if it ran out of memory so this is consistent.
if alloc_err != nil {
return nil, .EOF
@@ -54,7 +54,7 @@ marshal_into_bytes :: proc(v: any, flags := ENCODE_SMALL, allocator := context.a
defer if err != nil { strings.builder_destroy(&b) }
if err = marshal_into_builder(&b, v, flags, temp_allocator); err != nil {
if err = marshal_into_builder(&b, v, flags, temp_allocator, loc=loc); err != nil {
return
}
@@ -63,20 +63,20 @@ marshal_into_bytes :: proc(v: any, flags := ENCODE_SMALL, allocator := context.a
// Marshals the given value into a CBOR byte stream written to the given builder.
// See docs on the `marshal_into` proc group for more info.
marshal_into_builder :: proc(b: ^strings.Builder, v: any, flags := ENCODE_SMALL, temp_allocator := context.temp_allocator) -> Marshal_Error {
return marshal_into_writer(strings.to_writer(b), v, flags, temp_allocator)
marshal_into_builder :: proc(b: ^strings.Builder, v: any, flags := ENCODE_SMALL, temp_allocator := context.temp_allocator, loc := #caller_location) -> Marshal_Error {
return marshal_into_writer(strings.to_writer(b), v, flags, temp_allocator, loc=loc)
}
// Marshals the given value into a CBOR byte stream written to the given writer.
// See docs on the `marshal_into` proc group for more info.
marshal_into_writer :: proc(w: io.Writer, v: any, flags := ENCODE_SMALL, temp_allocator := context.temp_allocator) -> Marshal_Error {
marshal_into_writer :: proc(w: io.Writer, v: any, flags := ENCODE_SMALL, temp_allocator := context.temp_allocator, loc := #caller_location) -> Marshal_Error {
encoder := Encoder{flags, w, temp_allocator}
return marshal_into_encoder(encoder, v)
return marshal_into_encoder(encoder, v, loc=loc)
}
// Marshals the given value into a CBOR byte stream written to the given encoder.
// See docs on the `marshal_into` proc group for more info.
marshal_into_encoder :: proc(e: Encoder, v: any) -> (err: Marshal_Error) {
marshal_into_encoder :: proc(e: Encoder, v: any, loc := #caller_location) -> (err: Marshal_Error) {
e := e
if e.temp_allocator.procedure == nil {

View File

@@ -31,8 +31,8 @@ unmarshal :: proc {
unmarshal_from_string,
}
unmarshal_from_reader :: proc(r: io.Reader, ptr: ^$T, flags := Decoder_Flags{}, allocator := context.allocator, temp_allocator := context.temp_allocator) -> (err: Unmarshal_Error) {
err = unmarshal_from_decoder(Decoder{ DEFAULT_MAX_PRE_ALLOC, flags, r }, ptr, allocator, temp_allocator)
unmarshal_from_reader :: proc(r: io.Reader, ptr: ^$T, flags := Decoder_Flags{}, allocator := context.allocator, temp_allocator := context.temp_allocator, loc := #caller_location) -> (err: Unmarshal_Error) {
err = unmarshal_from_decoder(Decoder{ DEFAULT_MAX_PRE_ALLOC, flags, r }, ptr, allocator, temp_allocator, loc)
// Normal EOF does not exist here, we try to read the exact amount that is said to be provided.
if err == .EOF { err = .Unexpected_EOF }
@@ -40,21 +40,21 @@ unmarshal_from_reader :: proc(r: io.Reader, ptr: ^$T, flags := Decoder_Flags{},
}
// Unmarshals from a string, see docs on the proc group `Unmarshal` for more info.
unmarshal_from_string :: proc(s: string, ptr: ^$T, flags := Decoder_Flags{}, allocator := context.allocator, temp_allocator := context.temp_allocator) -> (err: Unmarshal_Error) {
unmarshal_from_string :: proc(s: string, ptr: ^$T, flags := Decoder_Flags{}, allocator := context.allocator, temp_allocator := context.temp_allocator, loc := #caller_location) -> (err: Unmarshal_Error) {
sr: strings.Reader
r := strings.to_reader(&sr, s)
err = unmarshal_from_reader(r, ptr, flags, allocator, temp_allocator)
err = unmarshal_from_reader(r, ptr, flags, allocator, temp_allocator, loc)
// Normal EOF does not exist here, we try to read the exact amount that is said to be provided.
if err == .EOF { err = .Unexpected_EOF }
return
}
unmarshal_from_decoder :: proc(d: Decoder, ptr: ^$T, allocator := context.allocator, temp_allocator := context.temp_allocator) -> (err: Unmarshal_Error) {
unmarshal_from_decoder :: proc(d: Decoder, ptr: ^$T, allocator := context.allocator, temp_allocator := context.temp_allocator, loc := #caller_location) -> (err: Unmarshal_Error) {
d := d
err = _unmarshal_any_ptr(d, ptr, nil, allocator, temp_allocator)
err = _unmarshal_any_ptr(d, ptr, nil, allocator, temp_allocator, loc)
// Normal EOF does not exist here, we try to read the exact amount that is said to be provided.
if err == .EOF { err = .Unexpected_EOF }
@@ -62,7 +62,7 @@ unmarshal_from_decoder :: proc(d: Decoder, ptr: ^$T, allocator := context.alloca
}
_unmarshal_any_ptr :: proc(d: Decoder, v: any, hdr: Maybe(Header) = nil, allocator := context.allocator, temp_allocator := context.temp_allocator) -> Unmarshal_Error {
_unmarshal_any_ptr :: proc(d: Decoder, v: any, hdr: Maybe(Header) = nil, allocator := context.allocator, temp_allocator := context.temp_allocator, loc := #caller_location) -> Unmarshal_Error {
context.allocator = allocator
context.temp_allocator = temp_allocator
v := v
@@ -78,10 +78,10 @@ _unmarshal_any_ptr :: proc(d: Decoder, v: any, hdr: Maybe(Header) = nil, allocat
}
data := any{(^rawptr)(v.data)^, ti.variant.(reflect.Type_Info_Pointer).elem.id}
return _unmarshal_value(d, data, hdr.? or_else (_decode_header(d.reader) or_return))
return _unmarshal_value(d, data, hdr.? or_else (_decode_header(d.reader) or_return), allocator, temp_allocator, loc)
}
_unmarshal_value :: proc(d: Decoder, v: any, hdr: Header) -> (err: Unmarshal_Error) {
_unmarshal_value :: proc(d: Decoder, v: any, hdr: Header, allocator := context.allocator, temp_allocator := context.temp_allocator, loc := #caller_location) -> (err: Unmarshal_Error) {
v := v
ti := reflect.type_info_base(type_info_of(v.id))
r := d.reader
@@ -104,7 +104,7 @@ _unmarshal_value :: proc(d: Decoder, v: any, hdr: Header) -> (err: Unmarshal_Err
// Allow generic unmarshal by doing it into a `Value`.
switch &dst in v {
case Value:
dst = err_conv(_decode_from_decoder(d, hdr)) or_return
dst = err_conv(_decode_from_decoder(d, hdr, allocator, loc)) or_return
return
}
@@ -308,7 +308,7 @@ _unmarshal_value :: proc(d: Decoder, v: any, hdr: Header) -> (err: Unmarshal_Err
if impl, ok := _tag_implementations_nr[nr]; ok {
return impl->unmarshal(d, nr, v)
} else if nr == TAG_OBJECT_TYPE {
return _unmarshal_union(d, v, ti, hdr)
return _unmarshal_union(d, v, ti, hdr, loc=loc)
} else {
// Discard the tag info and unmarshal as its value.
return _unmarshal_value(d, v, _decode_header(r) or_return)
@@ -316,19 +316,19 @@ _unmarshal_value :: proc(d: Decoder, v: any, hdr: Header) -> (err: Unmarshal_Err
return _unsupported(v, hdr, add)
case .Bytes: return _unmarshal_bytes(d, v, ti, hdr, add)
case .Text: return _unmarshal_string(d, v, ti, hdr, add)
case .Array: return _unmarshal_array(d, v, ti, hdr, add)
case .Map: return _unmarshal_map(d, v, ti, hdr, add)
case .Bytes: return _unmarshal_bytes(d, v, ti, hdr, add, allocator=allocator, loc=loc)
case .Text: return _unmarshal_string(d, v, ti, hdr, add, allocator=allocator, loc=loc)
case .Array: return _unmarshal_array(d, v, ti, hdr, add, allocator=allocator, loc=loc)
case .Map: return _unmarshal_map(d, v, ti, hdr, add, allocator=allocator, loc=loc)
case: return .Bad_Major
}
}
_unmarshal_bytes :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header, add: Add) -> (err: Unmarshal_Error) {
_unmarshal_bytes :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header, add: Add, allocator := context.allocator, loc := #caller_location) -> (err: Unmarshal_Error) {
#partial switch t in ti.variant {
case reflect.Type_Info_String:
bytes := err_conv(_decode_bytes(d, add)) or_return
bytes := err_conv(_decode_bytes(d, add, allocator=allocator, loc=loc)) or_return
if t.is_cstring {
raw := (^cstring)(v.data)
@@ -347,7 +347,7 @@ _unmarshal_bytes :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
if elem_base.id != byte { return _unsupported(v, hdr) }
bytes := err_conv(_decode_bytes(d, add)) or_return
bytes := err_conv(_decode_bytes(d, add, allocator=allocator, loc=loc)) or_return
raw := (^mem.Raw_Slice)(v.data)
raw^ = transmute(mem.Raw_Slice)bytes
return
@@ -357,12 +357,12 @@ _unmarshal_bytes :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
if elem_base.id != byte { return _unsupported(v, hdr) }
bytes := err_conv(_decode_bytes(d, add)) or_return
bytes := err_conv(_decode_bytes(d, add, allocator=allocator, loc=loc)) or_return
raw := (^mem.Raw_Dynamic_Array)(v.data)
raw.data = raw_data(bytes)
raw.len = len(bytes)
raw.cap = len(bytes)
raw.allocator = context.allocator
raw.allocator = allocator
return
case reflect.Type_Info_Array:
@@ -385,10 +385,10 @@ _unmarshal_bytes :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
return _unsupported(v, hdr)
}
_unmarshal_string :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header, add: Add) -> (err: Unmarshal_Error) {
_unmarshal_string :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header, add: Add, allocator := context.allocator, temp_allocator := context.temp_allocator, loc := #caller_location) -> (err: Unmarshal_Error) {
#partial switch t in ti.variant {
case reflect.Type_Info_String:
text := err_conv(_decode_text(d, add)) or_return
text := err_conv(_decode_text(d, add, allocator, loc)) or_return
if t.is_cstring {
raw := (^cstring)(v.data)
@@ -403,8 +403,8 @@ _unmarshal_string :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Heade
// Enum by its variant name.
case reflect.Type_Info_Enum:
text := err_conv(_decode_text(d, add, allocator=context.temp_allocator)) or_return
defer delete(text, context.temp_allocator)
text := err_conv(_decode_text(d, add, allocator=temp_allocator, loc=loc)) or_return
defer delete(text, temp_allocator, loc)
for name, i in t.names {
if name == text {
@@ -414,8 +414,8 @@ _unmarshal_string :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Heade
}
case reflect.Type_Info_Rune:
text := err_conv(_decode_text(d, add, allocator=context.temp_allocator)) or_return
defer delete(text, context.temp_allocator)
text := err_conv(_decode_text(d, add, allocator=temp_allocator, loc=loc)) or_return
defer delete(text, temp_allocator, loc)
r := (^rune)(v.data)
dr, n := utf8.decode_rune(text)
@@ -430,13 +430,15 @@ _unmarshal_string :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Heade
return _unsupported(v, hdr)
}
_unmarshal_array :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header, add: Add) -> (err: Unmarshal_Error) {
_unmarshal_array :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header, add: Add, allocator := context.allocator, loc := #caller_location) -> (err: Unmarshal_Error) {
assign_array :: proc(
d: Decoder,
da: ^mem.Raw_Dynamic_Array,
elemt: ^reflect.Type_Info,
length: int,
growable := true,
allocator := context.allocator,
loc := #caller_location,
) -> (out_of_space: bool, err: Unmarshal_Error) {
for idx: uintptr = 0; length == -1 || idx < uintptr(length); idx += 1 {
elem_ptr := rawptr(uintptr(da.data) + idx*uintptr(elemt.size))
@@ -450,13 +452,13 @@ _unmarshal_array :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
if !growable { return true, .Out_Of_Memory }
cap := 2 * da.cap
ok := runtime.__dynamic_array_reserve(da, elemt.size, elemt.align, cap)
ok := runtime.__dynamic_array_reserve(da, elemt.size, elemt.align, cap, loc)
// NOTE: Might be lying here, but it is at least an allocator error.
if !ok { return false, .Out_Of_Memory }
}
err = _unmarshal_value(d, elem, hdr)
err = _unmarshal_value(d, elem, hdr, allocator=allocator, loc=loc)
if length == -1 && err == .Break { break }
if err != nil { return }
@@ -469,10 +471,10 @@ _unmarshal_array :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
// Allow generically storing the values array.
switch &dst in v {
case ^Array:
dst = err_conv(_decode_array_ptr(d, add)) or_return
dst = err_conv(_decode_array_ptr(d, add, allocator=allocator, loc=loc)) or_return
return
case Array:
dst = err_conv(_decode_array(d, add)) or_return
dst = err_conv(_decode_array(d, add, allocator=allocator, loc=loc)) or_return
return
}
@@ -480,8 +482,8 @@ _unmarshal_array :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
case reflect.Type_Info_Slice:
length, scap := err_conv(_decode_len_container(d, add)) or_return
data := mem.alloc_bytes_non_zeroed(t.elem.size * scap, t.elem.align) or_return
defer if err != nil { mem.free_bytes(data) }
data := mem.alloc_bytes_non_zeroed(t.elem.size * scap, t.elem.align, allocator=allocator, loc=loc) or_return
defer if err != nil { mem.free_bytes(data, allocator=allocator, loc=loc) }
da := mem.Raw_Dynamic_Array{raw_data(data), 0, length, context.allocator }
@@ -489,7 +491,7 @@ _unmarshal_array :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
if .Shrink_Excess in d.flags {
// Ignoring an error here, but this is not critical to succeed.
_ = runtime.__dynamic_array_shrink(&da, t.elem.size, t.elem.align, da.len)
_ = runtime.__dynamic_array_shrink(&da, t.elem.size, t.elem.align, da.len, loc=loc)
}
raw := (^mem.Raw_Slice)(v.data)
@@ -500,8 +502,8 @@ _unmarshal_array :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
case reflect.Type_Info_Dynamic_Array:
length, scap := err_conv(_decode_len_container(d, add)) or_return
data := mem.alloc_bytes_non_zeroed(t.elem.size * scap, t.elem.align) or_return
defer if err != nil { mem.free_bytes(data) }
data := mem.alloc_bytes_non_zeroed(t.elem.size * scap, t.elem.align, loc=loc) or_return
defer if err != nil { mem.free_bytes(data, allocator=allocator, loc=loc) }
raw := (^mem.Raw_Dynamic_Array)(v.data)
raw.data = raw_data(data)
@@ -513,7 +515,7 @@ _unmarshal_array :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
if .Shrink_Excess in d.flags {
// Ignoring an error here, but this is not critical to succeed.
_ = runtime.__dynamic_array_shrink(raw, t.elem.size, t.elem.align, raw.len)
_ = runtime.__dynamic_array_shrink(raw, t.elem.size, t.elem.align, raw.len, loc=loc)
}
return
@@ -525,7 +527,7 @@ _unmarshal_array :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
return _unsupported(v, hdr)
}
da := mem.Raw_Dynamic_Array{rawptr(v.data), 0, length, context.allocator }
da := mem.Raw_Dynamic_Array{rawptr(v.data), 0, length, allocator }
out_of_space := assign_array(d, &da, t.elem, length, growable=false) or_return
if out_of_space { return _unsupported(v, hdr) }
@@ -539,7 +541,7 @@ _unmarshal_array :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
return _unsupported(v, hdr)
}
da := mem.Raw_Dynamic_Array{rawptr(v.data), 0, length, context.allocator }
da := mem.Raw_Dynamic_Array{rawptr(v.data), 0, length, allocator }
out_of_space := assign_array(d, &da, t.elem, length, growable=false) or_return
if out_of_space { return _unsupported(v, hdr) }
@@ -553,7 +555,7 @@ _unmarshal_array :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
return _unsupported(v, hdr)
}
da := mem.Raw_Dynamic_Array{rawptr(v.data), 0, 2, context.allocator }
da := mem.Raw_Dynamic_Array{rawptr(v.data), 0, 2, allocator }
info: ^runtime.Type_Info
switch ti.id {
@@ -575,7 +577,7 @@ _unmarshal_array :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
return _unsupported(v, hdr)
}
da := mem.Raw_Dynamic_Array{rawptr(v.data), 0, 4, context.allocator }
da := mem.Raw_Dynamic_Array{rawptr(v.data), 0, 4, allocator }
info: ^runtime.Type_Info
switch ti.id {
@@ -593,17 +595,17 @@ _unmarshal_array :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
}
}
_unmarshal_map :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header, add: Add) -> (err: Unmarshal_Error) {
_unmarshal_map :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header, add: Add, allocator := context.allocator, loc := #caller_location) -> (err: Unmarshal_Error) {
r := d.reader
decode_key :: proc(d: Decoder, v: any, allocator := context.allocator) -> (k: string, err: Unmarshal_Error) {
decode_key :: proc(d: Decoder, v: any, allocator := context.allocator, loc := #caller_location) -> (k: string, err: Unmarshal_Error) {
entry_hdr := _decode_header(d.reader) or_return
entry_maj, entry_add := _header_split(entry_hdr)
#partial switch entry_maj {
case .Text:
k = err_conv(_decode_text(d, entry_add, allocator)) or_return
k = err_conv(_decode_text(d, entry_add, allocator=allocator, loc=loc)) or_return
return
case .Bytes:
bytes := err_conv(_decode_bytes(d, entry_add, allocator=allocator)) or_return
bytes := err_conv(_decode_bytes(d, entry_add, allocator=allocator, loc=loc)) or_return
k = string(bytes)
return
case:
@@ -615,10 +617,10 @@ _unmarshal_map :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header,
// Allow generically storing the map array.
switch &dst in v {
case ^Map:
dst = err_conv(_decode_map_ptr(d, add)) or_return
dst = err_conv(_decode_map_ptr(d, add, allocator=allocator, loc=loc)) or_return
return
case Map:
dst = err_conv(_decode_map(d, add)) or_return
dst = err_conv(_decode_map(d, add, allocator=allocator, loc=loc)) or_return
return
}
@@ -754,7 +756,7 @@ _unmarshal_map :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header,
// Unmarshal into a union, based on the `TAG_OBJECT_TYPE` tag of the spec, it denotes a tag which
// contains an array of exactly two elements, the first is a textual representation of the following
// CBOR value's type.
_unmarshal_union :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header) -> (err: Unmarshal_Error) {
_unmarshal_union :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header, loc := #caller_location) -> (err: Unmarshal_Error) {
r := d.reader
#partial switch t in ti.variant {
case reflect.Type_Info_Union:
@@ -792,7 +794,7 @@ _unmarshal_union :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
case reflect.Type_Info_Named:
if vti.name == target_name {
reflect.set_union_variant_raw_tag(v, tag)
return _unmarshal_value(d, any{v.data, variant.id}, _decode_header(r) or_return)
return _unmarshal_value(d, any{v.data, variant.id}, _decode_header(r) or_return, loc=loc)
}
case:
@@ -804,7 +806,7 @@ _unmarshal_union :: proc(d: Decoder, v: any, ti: ^reflect.Type_Info, hdr: Header
if variant_name == target_name {
reflect.set_union_variant_raw_tag(v, tag)
return _unmarshal_value(d, any{v.data, variant.id}, _decode_header(r) or_return)
return _unmarshal_value(d, any{v.data, variant.id}, _decode_header(r) or_return, loc=loc)
}
}
}

View File

@@ -56,38 +56,27 @@ CDATA_END :: "]]>"
COMMENT_START :: "<!--"
COMMENT_END :: "-->"
/*
Default: CDATA and comments are passed through unchanged.
*/
// Default: CDATA and comments are passed through unchanged.
XML_Decode_Option :: enum u8 {
/*
Do not decode & entities. It decodes by default.
If given, overrides `Decode_CDATA`.
*/
// Do not decode & entities. It decodes by default. If given, overrides `Decode_CDATA`.
No_Entity_Decode,
/*
CDATA is unboxed.
*/
// CDATA is unboxed.
Unbox_CDATA,
/*
Unboxed CDATA is decoded as well.
Ignored if `.Unbox_CDATA` is not given.
*/
// Unboxed CDATA is decoded as well. Ignored if `.Unbox_CDATA` is not given.
Decode_CDATA,
/*
Comments are stripped.
*/
// Comments are stripped.
Comment_Strip,
// Normalize whitespace
Normalize_Whitespace,
}
XML_Decode_Options :: bit_set[XML_Decode_Option; u8]
/*
Decode a string that may include SGML/XML/HTML entities.
The caller has to free the result.
*/
// Decode a string that may include SGML/XML/HTML entities.
// The caller has to free the result.
decode_xml :: proc(input: string, options := XML_Decode_Options{}, allocator := context.allocator) -> (decoded: string, err: Error) {
context.allocator = allocator
@@ -100,14 +89,14 @@ decode_xml :: proc(input: string, options := XML_Decode_Options{}, allocator :=
t := Tokenizer{src=input}
in_data := false
prev: rune = ' '
loop: for {
advance(&t) or_return
if t.r < 0 { break loop }
/*
Below here we're never inside a CDATA tag.
At most we'll see the start of one, but that doesn't affect the logic.
*/
// Below here we're never inside a CDATA tag. At most we'll see the start of one,
// but that doesn't affect the logic.
switch t.r {
case '<':
/*
@@ -126,9 +115,7 @@ decode_xml :: proc(input: string, options := XML_Decode_Options{}, allocator :=
in_data = _handle_xml_special(&t, &builder, options) or_return
case ']':
/*
If we're unboxing _and_ decoding CDATA, we'll have to check for the end tag.
*/
// If we're unboxing _and_ decoding CDATA, we'll have to check for the end tag.
if in_data {
if t.read_offset + len(CDATA_END) < len(t.src) {
if string(t.src[t.offset:][:len(CDATA_END)]) == CDATA_END {
@@ -143,22 +130,16 @@ decode_xml :: proc(input: string, options := XML_Decode_Options{}, allocator :=
case:
if in_data && .Decode_CDATA not_in options {
/*
Unboxed, but undecoded.
*/
// Unboxed, but undecoded.
write_rune(&builder, t.r)
continue
}
if t.r == '&' {
if entity, entity_err := _extract_xml_entity(&t); entity_err != .None {
/*
We read to the end of the string without closing the entity.
Pass through as-is.
*/
// We read to the end of the string without closing the entity. Pass through as-is.
write_string(&builder, entity)
} else {
if .No_Entity_Decode not_in options {
if decoded, ok := xml_decode_entity(entity); ok {
write_rune(&builder, decoded)
@@ -166,19 +147,41 @@ decode_xml :: proc(input: string, options := XML_Decode_Options{}, allocator :=
}
}
/*
Literal passthrough because the decode failed or we want entities not decoded.
*/
// Literal passthrough because the decode failed or we want entities not decoded.
write_string(&builder, "&")
write_string(&builder, entity)
write_string(&builder, ";")
}
} else {
write_rune(&builder, t.r)
// Handle AV Normalization: https://www.w3.org/TR/2006/REC-xml11-20060816/#AVNormalize
if .Normalize_Whitespace in options {
switch t.r {
case ' ', '\r', '\n', '\t':
if prev != ' ' {
write_rune(&builder, ' ')
prev = ' '
}
case:
write_rune(&builder, t.r)
prev = t.r
}
} else {
// https://www.w3.org/TR/2006/REC-xml11-20060816/#sec-line-ends
switch t.r {
case '\n', 0x85, 0x2028:
write_rune(&builder, '\n')
case '\r': // Do nothing until next character
case:
if prev == '\r' { // Turn a single carriage return into a \n
write_rune(&builder, '\n')
}
write_rune(&builder, t.r)
}
prev = t.r
}
}
}
}
return strings.clone(strings.to_string(builder), allocator), err
}
@@ -253,24 +256,18 @@ xml_decode_entity :: proc(entity: string) -> (decoded: rune, ok: bool) {
return rune(val), true
case:
/*
Named entity.
*/
// Named entity.
return named_xml_entity_to_rune(entity)
}
}
/*
Private XML helper to extract `&<stuff>;` entity.
*/
// Private XML helper to extract `&<stuff>;` entity.
@(private="file")
_extract_xml_entity :: proc(t: ^Tokenizer) -> (entity: string, err: Error) {
assert(t != nil && t.r == '&')
/*
All of these would be in the ASCII range.
Even if one is not, it doesn't matter. All characters we need to compare to extract are.
*/
// All of these would be in the ASCII range.
// Even if one is not, it doesn't matter. All characters we need to compare to extract are.
length := len(t.src)
found := false
@@ -292,9 +289,7 @@ _extract_xml_entity :: proc(t: ^Tokenizer) -> (entity: string, err: Error) {
return string(t.src[t.offset : t.read_offset]), .Invalid_Entity_Encoding
}
/*
Private XML helper for CDATA and comments.
*/
// Private XML helper for CDATA and comments.
@(private="file")
_handle_xml_special :: proc(t: ^Tokenizer, builder: ^strings.Builder, options: XML_Decode_Options) -> (in_data: bool, err: Error) {
assert(t != nil && t.r == '<')
@@ -304,20 +299,14 @@ _handle_xml_special :: proc(t: ^Tokenizer, builder: ^strings.Builder, options: X
t.read_offset += len(CDATA_START) - 1
if .Unbox_CDATA in options && .Decode_CDATA in options {
/*
We're unboxing _and_ decoding CDATA
*/
// We're unboxing _and_ decoding CDATA
return true, .None
}
/*
CDATA is passed through.
*/
// CDATA is passed through.
offset := t.offset
/*
Scan until end of CDATA.
*/
// Scan until end of CDATA.
for {
advance(t) or_return
if t.r < 0 { return true, .CDATA_Not_Terminated }
@@ -341,14 +330,10 @@ _handle_xml_special :: proc(t: ^Tokenizer, builder: ^strings.Builder, options: X
} else if string(t.src[t.offset:][:len(COMMENT_START)]) == COMMENT_START {
t.read_offset += len(COMMENT_START)
/*
Comment is passed through by default.
*/
// Comment is passed through by default.
offset := t.offset
/*
Scan until end of Comment.
*/
// Scan until end of Comment.
for {
advance(t) or_return
if t.r < 0 { return true, .Comment_Not_Terminated }

View File

@@ -2,8 +2,8 @@ package encoding_hex
import "core:strings"
encode :: proc(src: []byte, allocator := context.allocator) -> []byte #no_bounds_check {
dst := make([]byte, len(src) * 2, allocator)
encode :: proc(src: []byte, allocator := context.allocator, loc := #caller_location) -> []byte #no_bounds_check {
dst := make([]byte, len(src) * 2, allocator, loc)
for i, j := 0, 0; i < len(src); i += 1 {
v := src[i]
dst[j] = HEXTABLE[v>>4]
@@ -15,12 +15,12 @@ encode :: proc(src: []byte, allocator := context.allocator) -> []byte #no_bounds
}
decode :: proc(src: []byte, allocator := context.allocator) -> (dst: []byte, ok: bool) #no_bounds_check {
decode :: proc(src: []byte, allocator := context.allocator, loc := #caller_location) -> (dst: []byte, ok: bool) #no_bounds_check {
if len(src) % 2 == 1 {
return
}
dst = make([]byte, len(src) / 2, allocator)
dst = make([]byte, len(src) / 2, allocator, loc)
for i, j := 0, 1; j < len(src); j += 2 {
p := src[j-1]
q := src[j]
@@ -69,5 +69,4 @@ hex_digit :: proc(char: byte) -> (u8, bool) {
case 'A' ..= 'F': return char - 'A' + 10, true
case: return 0, false
}
}
}

View File

@@ -160,34 +160,35 @@ CONVENTION_SOFT_TRANSFORM :: "transform"
/* destroy procedures */
meta_destroy :: proc(meta: Meta, allocator := context.allocator) {
meta_destroy :: proc(meta: Meta, allocator := context.allocator, loc := #caller_location) {
if nested, ok := meta.value.([]Meta); ok {
for m in nested {
meta_destroy(m)
meta_destroy(m, loc=loc)
}
delete(nested, allocator)
delete(nested, allocator, loc=loc)
}
}
nodes_destroy :: proc(nodes: []Node, allocator := context.allocator) {
nodes_destroy :: proc(nodes: []Node, allocator := context.allocator, loc := #caller_location) {
for node in nodes {
for meta in node.meta_data {
meta_destroy(meta)
meta_destroy(meta, loc=loc)
}
delete(node.meta_data, allocator)
delete(node.meta_data, allocator, loc=loc)
switch n in node.content {
case Node_Geometry:
delete(n.corner_stack, allocator)
delete(n.edge_stack, allocator)
delete(n.face_stack, allocator)
delete(n.corner_stack, allocator, loc=loc)
delete(n.vertex_stack, allocator, loc=loc)
delete(n.edge_stack, allocator, loc=loc)
delete(n.face_stack, allocator, loc=loc)
case Node_Image:
delete(n.image_stack, allocator)
delete(n.image_stack, allocator, loc=loc)
}
}
delete(nodes, allocator)
delete(nodes, allocator, loc=loc)
}
file_destroy :: proc(file: File) {
nodes_destroy(file.nodes, file.allocator)
delete(file.backing, file.allocator)
}
file_destroy :: proc(file: File, loc := #caller_location) {
nodes_destroy(file.nodes, file.allocator, loc=loc)
delete(file.backing, file.allocator, loc=loc)
}

View File

@@ -11,24 +11,21 @@ Read_Error :: enum {
Unable_To_Read_File,
}
read_from_file :: proc(filename: string, print_error := false, allocator := context.allocator) -> (file: File, err: Read_Error) {
read_from_file :: proc(filename: string, print_error := false, allocator := context.allocator, loc := #caller_location) -> (file: File, err: Read_Error) {
context.allocator = allocator
data, ok := os.read_entire_file(filename)
data, ok := os.read_entire_file(filename, allocator, loc)
if !ok {
err = .Unable_To_Read_File
delete(data, allocator, loc)
return
}
defer if !ok {
delete(data)
} else {
file.backing = data
}
file, err = read(data, filename, print_error, allocator)
file, err = read(data, filename, print_error, allocator, loc)
file.backing = data
return
}
read :: proc(data: []byte, filename := "<input>", print_error := false, allocator := context.allocator) -> (file: File, err: Read_Error) {
read :: proc(data: []byte, filename := "<input>", print_error := false, allocator := context.allocator, loc := #caller_location) -> (file: File, err: Read_Error) {
Reader :: struct {
filename: string,
data: []byte,
@@ -79,8 +76,8 @@ read :: proc(data: []byte, filename := "<input>", print_error := false, allocato
return string(data[:len]), nil
}
read_meta :: proc(r: ^Reader, capacity: u32le) -> (meta_data: []Meta, err: Read_Error) {
meta_data = make([]Meta, int(capacity))
read_meta :: proc(r: ^Reader, capacity: u32le, allocator := context.allocator, loc := #caller_location) -> (meta_data: []Meta, err: Read_Error) {
meta_data = make([]Meta, int(capacity), allocator=allocator)
count := 0
defer meta_data = meta_data[:count]
for &m in meta_data {
@@ -111,10 +108,10 @@ read :: proc(data: []byte, filename := "<input>", print_error := false, allocato
return
}
read_layer_stack :: proc(r: ^Reader, capacity: u32le) -> (layers: Layer_Stack, err: Read_Error) {
read_layer_stack :: proc(r: ^Reader, capacity: u32le, allocator := context.allocator, loc := #caller_location) -> (layers: Layer_Stack, err: Read_Error) {
stack_count := read_value(r, u32le) or_return
layer_count := 0
layers = make(Layer_Stack, stack_count)
layers = make(Layer_Stack, stack_count, allocator=allocator, loc=loc)
defer layers = layers[:layer_count]
for &layer in layers {
layer.name = read_name(r) or_return
@@ -170,7 +167,8 @@ read :: proc(data: []byte, filename := "<input>", print_error := false, allocato
node_count := 0
file.header = header^
file.nodes = make([]Node, header.internal_node_count)
file.nodes = make([]Node, header.internal_node_count, allocator=allocator, loc=loc)
file.allocator = allocator
defer if err != nil {
nodes_destroy(file.nodes)
file.nodes = nil
@@ -198,15 +196,15 @@ read :: proc(data: []byte, filename := "<input>", print_error := false, allocato
case .Geometry:
g: Node_Geometry
g.vertex_count = read_value(r, u32le) or_return
g.vertex_stack = read_layer_stack(r, g.vertex_count) or_return
g.edge_corner_count = read_value(r, u32le) or_return
g.corner_stack = read_layer_stack(r, g.edge_corner_count) or_return
g.vertex_count = read_value(r, u32le) or_return
g.vertex_stack = read_layer_stack(r, g.vertex_count, loc=loc) or_return
g.edge_corner_count = read_value(r, u32le) or_return
g.corner_stack = read_layer_stack(r, g.edge_corner_count, loc=loc) or_return
if header.version > 2 {
g.edge_stack = read_layer_stack(r, g.edge_corner_count) or_return
g.edge_stack = read_layer_stack(r, g.edge_corner_count, loc=loc) or_return
}
g.face_count = read_value(r, u32le) or_return
g.face_stack = read_layer_stack(r, g.face_count) or_return
g.face_count = read_value(r, u32le) or_return
g.face_stack = read_layer_stack(r, g.face_count, loc=loc) or_return
node.content = g
@@ -233,4 +231,4 @@ read :: proc(data: []byte, filename := "<input>", print_error := false, allocato
}
return
}
}

189
core/encoding/ini/ini.odin Normal file
View File

@@ -0,0 +1,189 @@
package encoding_ini
import "base:runtime"
import "base:intrinsics"
import "core:strings"
import "core:strconv"
import "core:io"
import "core:os"
import "core:fmt"
_ :: fmt
Options :: struct {
comment: string,
key_lower_case: bool,
}
DEFAULT_OPTIONS :: Options {
comment = ";",
key_lower_case = false,
}
Iterator :: struct {
section: string,
_src: string,
options: Options,
}
iterator_from_string :: proc(src: string, options := DEFAULT_OPTIONS) -> Iterator {
return {
section = "",
options = options,
_src = src,
}
}
// Returns the raw `key` and `value`. `ok` will be false if no more key=value pairs cannot be found.
// They key and value may be quoted, which may require the use of `strconv.unquote_string`.
iterate :: proc(it: ^Iterator) -> (key, value: string, ok: bool) {
for line_ in strings.split_lines_iterator(&it._src) {
line := strings.trim_space(line_)
if len(line) == 0 {
continue
}
if line[0] == '[' {
end_idx := strings.index_byte(line, ']')
if end_idx < 0 {
end_idx = len(line)
}
it.section = line[1:end_idx]
continue
}
if it.options.comment != "" && strings.has_prefix(line, it.options.comment) {
continue
}
equal := strings.index(line, " =") // check for things keys that `ctrl+= = zoom_in`
quote := strings.index_byte(line, '"')
if equal < 0 || quote > 0 && quote < equal {
equal = strings.index_byte(line, '=')
if equal < 0 {
continue
}
} else {
equal += 1
}
key = strings.trim_space(line[:equal])
value = strings.trim_space(line[equal+1:])
ok = true
return
}
it.section = ""
return
}
Map :: distinct map[string]map[string]string
load_map_from_string :: proc(src: string, allocator: runtime.Allocator, options := DEFAULT_OPTIONS) -> (m: Map, err: runtime.Allocator_Error) {
unquote :: proc(val: string) -> (string, runtime.Allocator_Error) {
v, allocated, ok := strconv.unquote_string(val)
if !ok {
return strings.clone(val)
}
if allocated {
return v, nil
}
return strings.clone(v)
}
context.allocator = allocator
it := iterator_from_string(src, options)
for key, value in iterate(&it) {
section := it.section
if section not_in m {
section = strings.clone(section) or_return
m[section] = {}
}
// store key-value pair
pairs := &m[section]
new_key := unquote(key) or_return
if options.key_lower_case {
old_key := new_key
new_key = strings.to_lower(key) or_return
delete(old_key) or_return
}
pairs[new_key] = unquote(value) or_return
}
return
}
load_map_from_path :: proc(path: string, allocator: runtime.Allocator, options := DEFAULT_OPTIONS) -> (m: Map, err: runtime.Allocator_Error, ok: bool) {
data := os.read_entire_file(path, allocator) or_return
defer delete(data, allocator)
m, err = load_map_from_string(string(data), allocator, options)
ok = err != nil
defer if !ok {
delete_map(m)
}
return
}
save_map_to_string :: proc(m: Map, allocator: runtime.Allocator) -> (data: string) {
b := strings.builder_make(allocator)
_, _ = write_map(strings.to_writer(&b), m)
return strings.to_string(b)
}
delete_map :: proc(m: Map) {
allocator := m.allocator
for section, pairs in m {
for key, value in pairs {
delete(key, allocator)
delete(value, allocator)
}
delete(section)
}
delete(m)
}
write_section :: proc(w: io.Writer, name: string, n_written: ^int = nil) -> (n: int, err: io.Error) {
defer if n_written != nil { n_written^ += n }
io.write_byte (w, '[', &n) or_return
io.write_string(w, name, &n) or_return
io.write_byte (w, ']', &n) or_return
return
}
write_pair :: proc(w: io.Writer, key: string, value: $T, n_written: ^int = nil) -> (n: int, err: io.Error) {
defer if n_written != nil { n_written^ += n }
io.write_string(w, key, &n) or_return
io.write_string(w, " = ", &n) or_return
when intrinsics.type_is_string(T) {
val := string(value)
if len(val) > 0 && (val[0] == ' ' || val[len(val)-1] == ' ') {
io.write_quoted_string(w, val, n_written=&n) or_return
} else {
io.write_string(w, val, &n) or_return
}
} else {
n += fmt.wprint(w, value)
}
io.write_byte(w, '\n', &n) or_return
return
}
write_map :: proc(w: io.Writer, m: Map) -> (n: int, err: io.Error) {
section_index := 0
for section, pairs in m {
if section_index == 0 && section == "" {
// ignore section
} else {
write_section(w, section, &n) or_return
}
for key, value in pairs {
write_pair(w, key, value, &n) or_return
}
section_index += 1
}
return
}

View File

@@ -62,8 +62,8 @@ Marshal_Options :: struct {
mjson_skipped_first_braces_end: bool,
}
marshal :: proc(v: any, opt: Marshal_Options = {}, allocator := context.allocator) -> (data: []byte, err: Marshal_Error) {
b := strings.builder_make(allocator)
marshal :: proc(v: any, opt: Marshal_Options = {}, allocator := context.allocator, loc := #caller_location) -> (data: []byte, err: Marshal_Error) {
b := strings.builder_make(allocator, loc)
defer if err != nil {
strings.builder_destroy(&b)
}

View File

@@ -28,27 +28,27 @@ make_parser_from_string :: proc(data: string, spec := DEFAULT_SPECIFICATION, par
}
parse :: proc(data: []byte, spec := DEFAULT_SPECIFICATION, parse_integers := false, allocator := context.allocator) -> (Value, Error) {
return parse_string(string(data), spec, parse_integers, allocator)
parse :: proc(data: []byte, spec := DEFAULT_SPECIFICATION, parse_integers := false, allocator := context.allocator, loc := #caller_location) -> (Value, Error) {
return parse_string(string(data), spec, parse_integers, allocator, loc)
}
parse_string :: proc(data: string, spec := DEFAULT_SPECIFICATION, parse_integers := false, allocator := context.allocator) -> (Value, Error) {
parse_string :: proc(data: string, spec := DEFAULT_SPECIFICATION, parse_integers := false, allocator := context.allocator, loc := #caller_location) -> (Value, Error) {
context.allocator = allocator
p := make_parser_from_string(data, spec, parse_integers, allocator)
switch p.spec {
case .JSON:
return parse_object(&p)
return parse_object(&p, loc)
case .JSON5:
return parse_value(&p)
return parse_value(&p, loc)
case .SJSON:
#partial switch p.curr_token.kind {
case .Ident, .String:
return parse_object_body(&p, .EOF)
return parse_object_body(&p, .EOF, loc)
}
return parse_value(&p)
return parse_value(&p, loc)
}
return parse_object(&p)
return parse_object(&p, loc)
}
token_end_pos :: proc(tok: Token) -> Pos {
@@ -106,7 +106,7 @@ parse_comma :: proc(p: ^Parser) -> (do_break: bool) {
return false
}
parse_value :: proc(p: ^Parser) -> (value: Value, err: Error) {
parse_value :: proc(p: ^Parser, loc := #caller_location) -> (value: Value, err: Error) {
err = .None
token := p.curr_token
#partial switch token.kind {
@@ -142,13 +142,13 @@ parse_value :: proc(p: ^Parser) -> (value: Value, err: Error) {
case .String:
advance_token(p)
return unquote_string(token, p.spec, p.allocator)
return unquote_string(token, p.spec, p.allocator, loc)
case .Open_Brace:
return parse_object(p)
return parse_object(p, loc)
case .Open_Bracket:
return parse_array(p)
return parse_array(p, loc)
case:
if p.spec != .JSON {
@@ -176,7 +176,7 @@ parse_value :: proc(p: ^Parser) -> (value: Value, err: Error) {
return
}
parse_array :: proc(p: ^Parser) -> (value: Value, err: Error) {
parse_array :: proc(p: ^Parser, loc := #caller_location) -> (value: Value, err: Error) {
err = .None
expect_token(p, .Open_Bracket) or_return
@@ -184,14 +184,14 @@ parse_array :: proc(p: ^Parser) -> (value: Value, err: Error) {
array.allocator = p.allocator
defer if err != nil {
for elem in array {
destroy_value(elem)
destroy_value(elem, loc=loc)
}
delete(array)
delete(array, loc)
}
for p.curr_token.kind != .Close_Bracket {
elem := parse_value(p) or_return
append(&array, elem)
elem := parse_value(p, loc) or_return
append(&array, elem, loc)
if parse_comma(p) {
break
@@ -228,38 +228,39 @@ clone_string :: proc(s: string, allocator: mem.Allocator, loc := #caller_locatio
return
}
parse_object_key :: proc(p: ^Parser, key_allocator: mem.Allocator) -> (key: string, err: Error) {
parse_object_key :: proc(p: ^Parser, key_allocator: mem.Allocator, loc := #caller_location) -> (key: string, err: Error) {
tok := p.curr_token
if p.spec != .JSON {
if allow_token(p, .Ident) {
return clone_string(tok.text, key_allocator)
return clone_string(tok.text, key_allocator, loc)
}
}
if tok_err := expect_token(p, .String); tok_err != nil {
err = .Expected_String_For_Object_Key
return
}
return unquote_string(tok, p.spec, key_allocator)
return unquote_string(tok, p.spec, key_allocator, loc)
}
parse_object_body :: proc(p: ^Parser, end_token: Token_Kind) -> (obj: Object, err: Error) {
obj.allocator = p.allocator
parse_object_body :: proc(p: ^Parser, end_token: Token_Kind, loc := #caller_location) -> (obj: Object, err: Error) {
obj = make(Object, allocator=p.allocator, loc=loc)
defer if err != nil {
for key, elem in obj {
delete(key, p.allocator)
destroy_value(elem)
delete(key, p.allocator, loc)
destroy_value(elem, loc=loc)
}
delete(obj)
delete(obj, loc)
}
for p.curr_token.kind != end_token {
key := parse_object_key(p, p.allocator) or_return
key := parse_object_key(p, p.allocator, loc) or_return
parse_colon(p) or_return
elem := parse_value(p) or_return
elem := parse_value(p, loc) or_return
if key in obj {
err = .Duplicate_Object_Key
delete(key, p.allocator)
delete(key, p.allocator, loc)
return
}
@@ -267,7 +268,7 @@ parse_object_body :: proc(p: ^Parser, end_token: Token_Kind) -> (obj: Object, er
// inserting empty key/values into the object and for those we do not
// want to allocate anything
if key != "" {
reserve_error := reserve(&obj, len(obj) + 1)
reserve_error := reserve(&obj, len(obj) + 1, loc)
if reserve_error == mem.Allocator_Error.Out_Of_Memory {
return nil, .Out_Of_Memory
}
@@ -281,9 +282,9 @@ parse_object_body :: proc(p: ^Parser, end_token: Token_Kind) -> (obj: Object, er
return obj, .None
}
parse_object :: proc(p: ^Parser) -> (value: Value, err: Error) {
parse_object :: proc(p: ^Parser, loc := #caller_location) -> (value: Value, err: Error) {
expect_token(p, .Open_Brace) or_return
obj := parse_object_body(p, .Close_Brace) or_return
obj := parse_object_body(p, .Close_Brace, loc) or_return
expect_token(p, .Close_Brace) or_return
return obj, .None
}
@@ -480,4 +481,4 @@ unquote_string :: proc(token: Token, spec: Specification, allocator := context.a
}
return string(b[:w]), nil
}
}

View File

@@ -89,22 +89,22 @@ Error :: enum {
destroy_value :: proc(value: Value, allocator := context.allocator) {
destroy_value :: proc(value: Value, allocator := context.allocator, loc := #caller_location) {
context.allocator = allocator
#partial switch v in value {
case Object:
for key, elem in v {
delete(key)
destroy_value(elem)
delete(key, loc=loc)
destroy_value(elem, loc=loc)
}
delete(v)
delete(v, loc=loc)
case Array:
for elem in v {
destroy_value(elem)
destroy_value(elem, loc=loc)
}
delete(v)
delete(v, loc=loc)
case String:
delete(v)
delete(v, loc=loc)
}
}

View File

@@ -218,9 +218,7 @@ scan_identifier :: proc(t: ^Tokenizer) -> string {
for is_valid_identifier_rune(t.ch) {
advance_rune(t)
if t.ch == ':' {
/*
A namespaced attr can have at most two parts, `namespace:ident`.
*/
// A namespaced attr can have at most two parts, `namespace:ident`.
if namespaced {
break
}
@@ -268,14 +266,10 @@ scan_comment :: proc(t: ^Tokenizer) -> (comment: string, err: Error) {
return string(t.src[offset : t.offset - 1]), .None
}
/*
Skip CDATA
*/
// Skip CDATA
skip_cdata :: proc(t: ^Tokenizer) -> (err: Error) {
if t.read_offset + len(CDATA_START) >= len(t.src) {
/*
Can't be the start of a CDATA tag.
*/
// Can't be the start of a CDATA tag.
return .None
}
@@ -290,9 +284,7 @@ skip_cdata :: proc(t: ^Tokenizer) -> (err: Error) {
return .Premature_EOF
}
/*
Scan until the end of a CDATA tag.
*/
// Scan until the end of a CDATA tag.
if t.read_offset + len(CDATA_END) < len(t.src) {
if string(t.src[t.offset:][:len(CDATA_END)]) == CDATA_END {
t.read_offset += len(CDATA_END)
@@ -319,14 +311,10 @@ scan_string :: proc(t: ^Tokenizer, offset: int, close: rune = '<', consume_close
case '<':
if peek_byte(t) == '!' {
if peek_byte(t, 1) == '[' {
/*
Might be the start of a CDATA tag.
*/
// Might be the start of a CDATA tag.
skip_cdata(t) or_return
} else if peek_byte(t, 1) == '-' && peek_byte(t, 2) == '-' {
/*
Comment start. Eat comment.
*/
// Comment start. Eat comment.
t.read_offset += 3
_ = scan_comment(t) or_return
}
@@ -342,17 +330,13 @@ scan_string :: proc(t: ^Tokenizer, offset: int, close: rune = '<', consume_close
}
if t.ch == close {
/*
If it's not a CDATA or comment, it's the end of this body.
*/
// If it's not a CDATA or comment, it's the end of this body.
break loop
}
advance_rune(t)
}
/*
Strip trailing whitespace.
*/
// Strip trailing whitespace.
lit := string(t.src[offset : t.offset])
end := len(lit)
@@ -369,11 +353,6 @@ scan_string :: proc(t: ^Tokenizer, offset: int, close: rune = '<', consume_close
if consume_close {
advance_rune(t)
}
/*
TODO: Handle decoding escape characters and unboxing CDATA.
*/
return lit, err
}
@@ -384,7 +363,7 @@ peek :: proc(t: ^Tokenizer) -> (token: Token) {
return token
}
scan :: proc(t: ^Tokenizer) -> Token {
scan :: proc(t: ^Tokenizer, multiline_string := false) -> Token {
skip_whitespace(t)
offset := t.offset
@@ -418,7 +397,7 @@ scan :: proc(t: ^Tokenizer) -> Token {
case '"', '\'':
kind = .Invalid
lit, err = scan_string(t, t.offset, ch, true, false)
lit, err = scan_string(t, t.offset, ch, true, multiline_string)
if err == .None {
kind = .String
}
@@ -435,4 +414,4 @@ scan :: proc(t: ^Tokenizer) -> Token {
lit = string(t.src[offset : t.offset])
}
return Token{kind, lit, pos}
}
}

View File

@@ -203,9 +203,7 @@ parse_bytes :: proc(data: []u8, options := DEFAULT_OPTIONS, path := "", error_ha
doc.elements = make([dynamic]Element, 1024, 1024, allocator)
// strings.intern_init(&doc.intern, allocator, allocator)
err = .Unexpected_Token
err = .Unexpected_Token
element, parent: Element_ID
open: Token
@@ -259,8 +257,8 @@ parse_bytes :: proc(data: []u8, options := DEFAULT_OPTIONS, path := "", error_ha
case .Slash:
// Empty tag. Close it.
expect(t, .Gt) or_return
parent = doc.elements[element].parent
element = parent
parent = doc.elements[element].parent
element = parent
case:
error(t, t.offset, "Expected close tag, got: %#v\n", end_token)
@@ -276,8 +274,8 @@ parse_bytes :: proc(data: []u8, options := DEFAULT_OPTIONS, path := "", error_ha
error(t, t.offset, "Mismatched Closing Tag. Expected %v, got %v\n", doc.elements[element].ident, ident.text)
return doc, .Mismatched_Closing_Tag
}
parent = doc.elements[element].parent
element = parent
parent = doc.elements[element].parent
element = parent
} else if open.kind == .Exclaim {
// <!
@@ -463,8 +461,8 @@ validate_options :: proc(options: Options) -> (validated: Options, err: Error) {
return validated, .None
}
expect :: proc(t: ^Tokenizer, kind: Token_Kind) -> (tok: Token, err: Error) {
tok = scan(t)
expect :: proc(t: ^Tokenizer, kind: Token_Kind, multiline_string := false) -> (tok: Token, err: Error) {
tok = scan(t, multiline_string=multiline_string)
if tok.kind == kind { return tok, .None }
error(t, t.offset, "Expected \"%v\", got \"%v\".", kind, tok.kind)
@@ -480,7 +478,13 @@ parse_attribute :: proc(doc: ^Document) -> (attr: Attribute, offset: int, err: E
offset = t.offset - len(key.text)
_ = expect(t, .Eq) or_return
value := expect(t, .String) or_return
value := expect(t, .String, multiline_string=true) or_return
normalized, normalize_err := entity.decode_xml(value.text, {.Normalize_Whitespace}, doc.allocator)
if normalize_err == .None {
append(&doc.strings_to_free, normalized)
value.text = normalized
}
attr.key = key.text
attr.val = value.text

View File

@@ -2,6 +2,7 @@ package fmt
import "base:intrinsics"
import "base:runtime"
import "core:math"
import "core:math/bits"
import "core:mem"
import "core:io"
@@ -1494,7 +1495,7 @@ fmt_pointer :: proc(fi: ^Info, p: rawptr, verb: rune) {
u := u64(uintptr(p))
switch verb {
case 'p', 'v', 'w':
if !fi.hash && verb == 'v' {
if !fi.hash {
io.write_string(fi.writer, "0x", &fi.n)
}
_fmt_int(fi, u, 16, false, 8*size_of(rawptr), __DIGITS_UPPER)
@@ -2968,6 +2969,21 @@ fmt_value :: proc(fi: ^Info, v: any, verb: rune) {
fmt_bit_field(fi, v, verb, info, "")
}
}
// This proc helps keep some of the code around whether or not to print an
// intermediate plus sign in complexes and quaternions more readable.
@(private)
_cq_should_print_intermediate_plus :: proc "contextless" (fi: ^Info, f: f64) -> bool {
if !fi.plus && f >= 0 {
#partial switch math.classify(f) {
case .Neg_Zero, .Inf:
// These two classes print their own signs.
return false
case:
return true
}
}
return false
}
// Formats a complex number based on the given formatting verb
//
// Inputs:
@@ -2981,7 +2997,7 @@ fmt_complex :: proc(fi: ^Info, c: complex128, bits: int, verb: rune) {
case 'f', 'F', 'v', 'h', 'H', 'w':
r, i := real(c), imag(c)
fmt_float(fi, r, bits/2, verb)
if !fi.plus && i >= 0 {
if _cq_should_print_intermediate_plus(fi, i) {
io.write_rune(fi.writer, '+', &fi.n)
}
fmt_float(fi, i, bits/2, verb)
@@ -3007,19 +3023,19 @@ fmt_quaternion :: proc(fi: ^Info, q: quaternion256, bits: int, verb: rune) {
fmt_float(fi, r, bits/4, verb)
if !fi.plus && i >= 0 {
if _cq_should_print_intermediate_plus(fi, i) {
io.write_rune(fi.writer, '+', &fi.n)
}
fmt_float(fi, i, bits/4, verb)
io.write_rune(fi.writer, 'i', &fi.n)
if !fi.plus && j >= 0 {
if _cq_should_print_intermediate_plus(fi, j) {
io.write_rune(fi.writer, '+', &fi.n)
}
fmt_float(fi, j, bits/4, verb)
io.write_rune(fi.writer, 'j', &fi.n)
if !fi.plus && k >= 0 {
if _cq_should_print_intermediate_plus(fi, k) {
io.write_rune(fi.writer, '+', &fi.n)
}
fmt_float(fi, k, bits/4, verb)

View File

@@ -1,5 +1,6 @@
//+build !freestanding
//+build !js
//+build !orca
package fmt
import "base:runtime"

746
core/image/bmp/bmp.odin Normal file
View File

@@ -0,0 +1,746 @@
// package bmp implements a Microsoft BMP image reader
package core_image_bmp
import "core:image"
import "core:bytes"
import "core:compress"
import "core:mem"
import "base:intrinsics"
import "base:runtime"
Error :: image.Error
Image :: image.Image
Options :: image.Options
RGB_Pixel :: image.RGB_Pixel
RGBA_Pixel :: image.RGBA_Pixel
FILE_HEADER_SIZE :: 14
INFO_STUB_SIZE :: FILE_HEADER_SIZE + size_of(image.BMP_Version)
save_to_buffer :: proc(output: ^bytes.Buffer, img: ^Image, options := Options{}, allocator := context.allocator) -> (err: Error) {
context.allocator = allocator
if img == nil {
return .Invalid_Input_Image
}
if output == nil {
return .Invalid_Output
}
pixels := img.width * img.height
if pixels == 0 || pixels > image.MAX_DIMENSIONS {
return .Invalid_Input_Image
}
// While the BMP spec (and our loader) support more fanciful image types,
// `bmp.save` supports only 3 and 4 channel images with a bit depth of 8.
if img.depth != 8 || img.channels < 3 || img.channels > 4 {
return .Invalid_Input_Image
}
if img.channels * pixels != len(img.pixels.buf) {
return .Invalid_Input_Image
}
// Calculate and allocate size.
header_size := u32le(image.BMP_Version.V3)
total_header_size := header_size + 14 // file header = 14
pixel_count_bytes := u32le(align4(img.width * img.channels) * img.height)
header := image.BMP_Header{
// File header
magic = .Bitmap,
size = total_header_size + pixel_count_bytes,
_res1 = 0,
_res2 = 0,
pixel_offset = total_header_size,
// V3
info_size = .V3,
width = i32le(img.width),
height = i32le(img.height),
planes = 1,
bpp = u16le(8 * img.channels),
compression = .RGB,
image_size = pixel_count_bytes,
pels_per_meter = {2835, 2835}, // 72 DPI
colors_used = 0,
colors_important = 0,
}
written := 0
if resize(&output.buf, int(header.size)) != nil {
return .Unable_To_Allocate_Or_Resize
}
header_bytes := transmute([size_of(image.BMP_Header)]u8)header
written += int(total_header_size)
copy(output.buf[:], header_bytes[:written])
switch img.channels {
case 3:
row_bytes := img.width * img.channels
row_padded := align4(row_bytes)
pixels := mem.slice_data_cast([]RGB_Pixel, img.pixels.buf[:])
for y in 0..<img.height {
row_offset := row_padded * (img.height - y - 1) + written
for x in 0..<img.width {
pix_offset := 3 * x
output.buf[row_offset + pix_offset + 0] = pixels[0].b
output.buf[row_offset + pix_offset + 1] = pixels[0].g
output.buf[row_offset + pix_offset + 2] = pixels[0].r
pixels = pixels[1:]
}
}
case 4:
row_bytes := img.width * img.channels
pixels := mem.slice_data_cast([]RGBA_Pixel, img.pixels.buf[:])
for y in 0..<img.height {
row_offset := row_bytes * (img.height - y - 1) + written
for x in 0..<img.width {
pix_offset := 4 * x
output.buf[row_offset + pix_offset + 0] = pixels[0].b
output.buf[row_offset + pix_offset + 1] = pixels[0].g
output.buf[row_offset + pix_offset + 2] = pixels[0].r
output.buf[row_offset + pix_offset + 3] = pixels[0].a
pixels = pixels[1:]
}
}
}
return
}
load_from_bytes :: proc(data: []byte, options := Options{}, allocator := context.allocator) -> (img: ^Image, err: Error) {
ctx := &compress.Context_Memory_Input{
input_data = data,
}
img, err = load_from_context(ctx, options, allocator)
return img, err
}
@(optimization_mode="speed")
load_from_context :: proc(ctx: ^$C, options := Options{}, allocator := context.allocator) -> (img: ^Image, err: Error) {
context.allocator = allocator
options := options
// For compress.read_slice(), until that's rewritten to not use temp allocator
runtime.DEFAULT_TEMP_ALLOCATOR_TEMP_GUARD()
if .info in options {
options |= {.return_metadata, .do_not_decompress_image}
options -= {.info}
}
if .return_header in options && .return_metadata in options {
options -= {.return_header}
}
info_buf: [size_of(image.BMP_Header)]u8
// Read file header (14) + info size (4)
stub_data := compress.read_slice(ctx, INFO_STUB_SIZE) or_return
copy(info_buf[:], stub_data[:])
stub_info := transmute(image.BMP_Header)info_buf
if stub_info.magic != .Bitmap {
for v in image.BMP_Magic {
if stub_info.magic == v {
return img, .Unsupported_OS2_File
}
}
return img, .Invalid_Signature
}
info: image.BMP_Header
switch stub_info.info_size {
case .OS2_v1:
// Read the remainder of the header
os2_data := compress.read_data(ctx, image.OS2_Header) or_return
info = transmute(image.BMP_Header)info_buf
info.width = i32le(os2_data.width)
info.height = i32le(os2_data.height)
info.planes = os2_data.planes
info.bpp = os2_data.bpp
switch info.bpp {
case 1, 4, 8, 24:
case:
return img, .Unsupported_BPP
}
case .ABBR_16 ..= .V5:
// Sizes include V3, V4, V5 and OS2v2 outright, but can also handle truncated headers.
// Sometimes called BITMAPV2INFOHEADER or BITMAPV3INFOHEADER.
// Let's just try to process it.
to_read := int(stub_info.info_size) - size_of(image.BMP_Version)
info_data := compress.read_slice(ctx, to_read) or_return
copy(info_buf[INFO_STUB_SIZE:], info_data[:])
// Update info struct with the rest of the data we read
info = transmute(image.BMP_Header)info_buf
case:
return img, .Unsupported_BMP_Version
}
/* TODO(Jeroen): Add a "strict" option to catch these non-issues that violate spec?
if info.planes != 1 {
return img, .Invalid_Planes_Value
}
*/
if img == nil {
img = new(Image)
}
img.which = .BMP
img.metadata = new_clone(image.BMP_Info{
info = info,
})
img.width = abs(int(info.width))
img.height = abs(int(info.height))
img.channels = 3
img.depth = 8
if img.width == 0 || img.height == 0 {
return img, .Invalid_Image_Dimensions
}
total_pixels := abs(img.width * img.height)
if total_pixels > image.MAX_DIMENSIONS {
return img, .Image_Dimensions_Too_Large
}
// TODO(Jeroen): Handle RGBA.
switch info.compression {
case .Bit_Fields, .Alpha_Bit_Fields:
switch info.bpp {
case 16, 32:
make_output(img, allocator) or_return
decode_rgb(ctx, img, info, allocator) or_return
case:
if is_os2(info.info_size) {
return img, .Unsupported_Compression
}
return img, .Unsupported_BPP
}
case .RGB:
make_output(img, allocator) or_return
decode_rgb(ctx, img, info, allocator) or_return
case .RLE4, .RLE8:
make_output(img, allocator) or_return
decode_rle(ctx, img, info, allocator) or_return
case .CMYK, .CMYK_RLE4, .CMYK_RLE8: fallthrough
case .PNG, .JPEG: fallthrough
case: return img, .Unsupported_Compression
}
// Flipped vertically
if info.height < 0 {
pixels := mem.slice_data_cast([]RGB_Pixel, img.pixels.buf[:])
for y in 0..<img.height / 2 {
for x in 0..<img.width {
top := y * img.width + x
bot := (img.height - y - 1) * img.width + x
pixels[top], pixels[bot] = pixels[bot], pixels[top]
}
}
}
return
}
is_os2 :: proc(version: image.BMP_Version) -> (res: bool) {
#partial switch version {
case .OS2_v1, .OS2_v2: return true
case: return false
}
}
make_output :: proc(img: ^Image, allocator := context.allocator) -> (err: Error) {
assert(img != nil)
bytes_needed := img.channels * img.height * img.width
img.pixels.buf = make([dynamic]u8, bytes_needed, allocator)
if len(img.pixels.buf) != bytes_needed {
return .Unable_To_Allocate_Or_Resize
}
return
}
write :: proc(img: ^Image, x, y: int, pix: RGB_Pixel) -> (err: Error) {
if y >= img.height || x >= img.width {
return .Corrupt
}
out := mem.slice_data_cast([]RGB_Pixel, img.pixels.buf[:])
assert(img.height >= 1 && img.width >= 1)
out[(img.height - y - 1) * img.width + x] = pix
return
}
Bitmask :: struct {
mask: [4]u32le `fmt:"b"`,
shift: [4]u32le,
bits: [4]u32le,
}
read_or_make_bit_masks :: proc(ctx: ^$C, info: image.BMP_Header) -> (res: Bitmask, read: int, err: Error) {
ctz :: intrinsics.count_trailing_zeros
c1s :: intrinsics.count_ones
#partial switch info.compression {
case .RGB:
switch info.bpp {
case 16:
return {
mask = {31 << 10, 31 << 5, 31, 0},
shift = { 10, 5, 0, 0},
bits = { 5, 5, 5, 0},
}, int(4 * info.colors_used), nil
case 32:
return {
mask = {255 << 16, 255 << 8, 255, 255 << 24},
shift = { 16, 8, 0, 24},
bits = { 8, 8, 8, 8},
}, int(4 * info.colors_used), nil
case: return {}, 0, .Unsupported_BPP
}
case .Bit_Fields, .Alpha_Bit_Fields:
bf := info.masks
alpha_mask := false
bit_count: u32le
#partial switch info.info_size {
case .ABBR_52 ..= .V5:
// All possible BMP header sizes 52+ bytes long, includes V4 + V5
// Bit fields were read as part of the header
// V3 header is 40 bytes. We need 56 at a minimum for RGBA bit fields in the next section.
if info.info_size >= .ABBR_56 {
alpha_mask = true
}
case .V3:
// Version 3 doesn't have a bit field embedded, but can still have a 3 or 4 color bit field.
// Because it wasn't read as part of the header, we need to read it now.
if info.compression == .Alpha_Bit_Fields {
bf = compress.read_data(ctx, [4]u32le) or_return
alpha_mask = true
read = 16
} else {
bf.xyz = compress.read_data(ctx, [3]u32le) or_return
read = 12
}
case:
// Bit fields are unhandled for this BMP version
return {}, 0, .Bitfield_Version_Unhandled
}
if alpha_mask {
res = {
mask = {bf.r, bf.g, bf.b, bf.a},
shift = {ctz(bf.r), ctz(bf.g), ctz(bf.b), ctz(bf.a)},
bits = {c1s(bf.r), c1s(bf.g), c1s(bf.b), c1s(bf.a)},
}
bit_count = res.bits.r + res.bits.g + res.bits.b + res.bits.a
} else {
res = {
mask = {bf.r, bf.g, bf.b, 0},
shift = {ctz(bf.r), ctz(bf.g), ctz(bf.b), 0},
bits = {c1s(bf.r), c1s(bf.g), c1s(bf.b), 0},
}
bit_count = res.bits.r + res.bits.g + res.bits.b
}
if bit_count > u32le(info.bpp) {
err = .Bitfield_Sum_Exceeds_BPP
}
overlapped := res.mask.r | res.mask.g | res.mask.b | res.mask.a
if c1s(overlapped) < bit_count {
err = .Bitfield_Overlapped
}
return res, read, err
case:
return {}, 0, .Unsupported_Compression
}
return
}
scale :: proc(val: $T, mask, shift, bits: u32le) -> (res: u8) {
if bits == 0 { return 0 } // Guard against malformed bit fields
v := (u32le(val) & mask) >> shift
mask_in := u32le(1 << bits) - 1
return u8(v * 255 / mask_in)
}
decode_rgb :: proc(ctx: ^$C, img: ^Image, info: image.BMP_Header, allocator := context.allocator) -> (err: Error) {
pixel_offset := int(info.pixel_offset)
pixel_offset -= int(info.info_size) + FILE_HEADER_SIZE
palette: [256]RGBA_Pixel
// Palette size is info.colors_used if populated. If not it's min(1 << bpp, offset to the pixels / channel count)
colors_used := min(256, 1 << info.bpp if info.colors_used == 0 else info.colors_used)
max_colors := pixel_offset / 3 if info.info_size == .OS2_v1 else pixel_offset / 4
colors_used = min(colors_used, u32le(max_colors))
switch info.bpp {
case 1:
if info.info_size == .OS2_v1 {
// 2 x RGB palette of instead of variable RGBA palette
for i in 0..<colors_used {
palette[i].rgb = image.read_data(ctx, RGB_Pixel) or_return
}
pixel_offset -= int(3 * colors_used)
} else {
for i in 0..<colors_used {
palette[i] = image.read_data(ctx, RGBA_Pixel) or_return
}
pixel_offset -= int(4 * colors_used)
}
skip_space(ctx, pixel_offset)
stride := (img.width + 7) / 8
for y in 0..<img.height {
data := compress.read_slice(ctx, stride) or_return
for x in 0..<img.width {
shift := u8(7 - (x & 0x07))
p := (data[x / 8] >> shift) & 0x01
write(img, x, y, palette[p].bgr) or_return
}
}
case 2: // Non-standard on modern Windows, but was allowed on WinCE
for i in 0..<colors_used {
palette[i] = image.read_data(ctx, RGBA_Pixel) or_return
}
pixel_offset -= int(4 * colors_used)
skip_space(ctx, pixel_offset)
stride := (img.width + 3) / 4
for y in 0..<img.height {
data := compress.read_slice(ctx, stride) or_return
for x in 0..<img.width {
shift := 6 - (x & 0x03) << 1
p := (data[x / 4] >> u8(shift)) & 0x03
write(img, x, y, palette[p].bgr) or_return
}
}
case 4:
if info.info_size == .OS2_v1 {
// 16 x RGB palette of instead of variable RGBA palette
for i in 0..<colors_used {
palette[i].rgb = image.read_data(ctx, RGB_Pixel) or_return
}
pixel_offset -= int(3 * colors_used)
} else {
for i in 0..<colors_used {
palette[i] = image.read_data(ctx, RGBA_Pixel) or_return
}
pixel_offset -= int(4 * colors_used)
}
skip_space(ctx, pixel_offset)
stride := (img.width + 1) / 2
for y in 0..<img.height {
data := compress.read_slice(ctx, stride) or_return
for x in 0..<img.width {
p := data[x / 2] >> 4 if x & 1 == 0 else data[x / 2]
write(img, x, y, palette[p & 0x0f].bgr) or_return
}
}
case 8:
if info.info_size == .OS2_v1 {
// 256 x RGB palette of instead of variable RGBA palette
for i in 0..<colors_used {
palette[i].rgb = image.read_data(ctx, RGB_Pixel) or_return
}
pixel_offset -= int(3 * colors_used)
} else {
for i in 0..<colors_used {
palette[i] = image.read_data(ctx, RGBA_Pixel) or_return
}
pixel_offset -= int(4 * colors_used)
}
skip_space(ctx, pixel_offset)
stride := align4(img.width)
for y in 0..<img.height {
data := compress.read_slice(ctx, stride) or_return
for x in 0..<img.width {
write(img, x, y, palette[data[x]].bgr) or_return
}
}
case 16:
bm, read := read_or_make_bit_masks(ctx, info) or_return
// Skip optional palette and other data
pixel_offset -= read
skip_space(ctx, pixel_offset)
stride := align4(img.width * 2)
for y in 0..<img.height {
data := compress.read_slice(ctx, stride) or_return
pixels := mem.slice_data_cast([]u16le, data)
for x in 0..<img.width {
v := pixels[x]
r := scale(v, bm.mask.r, bm.shift.r, bm.bits.r)
g := scale(v, bm.mask.g, bm.shift.g, bm.bits.g)
b := scale(v, bm.mask.b, bm.shift.b, bm.bits.b)
write(img, x, y, RGB_Pixel{r, g, b}) or_return
}
}
case 24:
// Eat useless palette and other padding
skip_space(ctx, pixel_offset)
stride := align4(img.width * 3)
for y in 0..<img.height {
data := compress.read_slice(ctx, stride) or_return
pixels := mem.slice_data_cast([]RGB_Pixel, data)
for x in 0..<img.width {
write(img, x, y, pixels[x].bgr) or_return
}
}
case 32:
bm, read := read_or_make_bit_masks(ctx, info) or_return
// Skip optional palette and other data
pixel_offset -= read
skip_space(ctx, pixel_offset)
for y in 0..<img.height {
data := compress.read_slice(ctx, img.width * size_of(RGBA_Pixel)) or_return
pixels := mem.slice_data_cast([]u32le, data)
for x in 0..<img.width {
v := pixels[x]
r := scale(v, bm.mask.r, bm.shift.r, bm.bits.r)
g := scale(v, bm.mask.g, bm.shift.g, bm.bits.g)
b := scale(v, bm.mask.b, bm.shift.b, bm.bits.b)
write(img, x, y, RGB_Pixel{r, g, b}) or_return
}
}
case:
return .Unsupported_BPP
}
return nil
}
decode_rle :: proc(ctx: ^$C, img: ^Image, info: image.BMP_Header, allocator := context.allocator) -> (err: Error) {
pixel_offset := int(info.pixel_offset)
pixel_offset -= int(info.info_size) + FILE_HEADER_SIZE
bytes_needed := size_of(RGB_Pixel) * img.height * img.width
if resize(&img.pixels.buf, bytes_needed) != nil {
return .Unable_To_Allocate_Or_Resize
}
out := mem.slice_data_cast([]RGB_Pixel, img.pixels.buf[:])
assert(len(out) == img.height * img.width)
palette: [256]RGBA_Pixel
switch info.bpp {
case 4:
colors_used := info.colors_used if info.colors_used > 0 else 16
colors_used = min(colors_used, 16)
for i in 0..<colors_used {
palette[i] = image.read_data(ctx, RGBA_Pixel) or_return
pixel_offset -= size_of(RGBA_Pixel)
}
skip_space(ctx, pixel_offset)
pixel_size := info.size - info.pixel_offset
remaining := compress.input_size(ctx) or_return
if remaining < i64(pixel_size) {
return .Corrupt
}
data := make([]u8, int(pixel_size) + 4)
defer delete(data)
for i in 0..<pixel_size {
data[i] = image.read_u8(ctx) or_return
}
y, x := 0, 0
index := 0
for {
if len(data[index:]) < 2 {
return .Corrupt
}
if data[index] > 0 {
for count in 0..<data[index] {
if count & 1 == 1 {
write(img, x, y, palette[(data[index + 1] >> 0) & 0x0f].bgr)
} else {
write(img, x, y, palette[(data[index + 1] >> 4) & 0x0f].bgr)
}
x += 1
}
index += 2
} else {
switch data[index + 1] {
case 0: // EOL
x = 0; y += 1
index += 2
case 1: // EOB
return
case 2: // MOVE
x += int(data[index + 2])
y += int(data[index + 3])
index += 4
case: // Literals
run_length := int(data[index + 1])
aligned := (align4(run_length) >> 1) + 2
if index + aligned >= len(data) {
return .Corrupt
}
for count in 0..<run_length {
val := data[index + 2 + count / 2]
if count & 1 == 1 {
val &= 0xf
} else {
val = val >> 4
}
write(img, x, y, palette[val].bgr)
x += 1
}
index += aligned
}
}
}
case 8:
colors_used := info.colors_used if info.colors_used > 0 else 256
colors_used = min(colors_used, 256)
for i in 0..<colors_used {
palette[i] = image.read_data(ctx, RGBA_Pixel) or_return
pixel_offset -= size_of(RGBA_Pixel)
}
skip_space(ctx, pixel_offset)
pixel_size := info.size - info.pixel_offset
remaining := compress.input_size(ctx) or_return
if remaining < i64(pixel_size) {
return .Corrupt
}
data := make([]u8, int(pixel_size) + 4)
defer delete(data)
for i in 0..<pixel_size {
data[i] = image.read_u8(ctx) or_return
}
y, x := 0, 0
index := 0
for {
if len(data[index:]) < 2 {
return .Corrupt
}
if data[index] > 0 {
for _ in 0..<data[index] {
write(img, x, y, palette[data[index + 1]].bgr)
x += 1
}
index += 2
} else {
switch data[index + 1] {
case 0: // EOL
x = 0; y += 1
index += 2
case 1: // EOB
return
case 2: // MOVE
x += int(data[index + 2])
y += int(data[index + 3])
index += 4
case: // Literals
run_length := int(data[index + 1])
aligned := align2(run_length) + 2
if index + aligned >= len(data) {
return .Corrupt
}
for count in 0..<run_length {
write(img, x, y, palette[data[index + 2 + count]].bgr)
x += 1
}
index += aligned
}
}
}
case:
return .Unsupported_BPP
}
return nil
}
align2 :: proc(width: int) -> (stride: int) {
stride = width
if width & 1 != 0 {
stride += 2 - (width & 1)
}
return
}
align4 :: proc(width: int) -> (stride: int) {
stride = width
if width & 3 != 0 {
stride += 4 - (width & 3)
}
return
}
skip_space :: proc(ctx: ^$C, bytes_to_skip: int) -> (err: Error) {
if bytes_to_skip < 0 {
return .Corrupt
}
for _ in 0..<bytes_to_skip {
image.read_u8(ctx) or_return
}
return
}
// Cleanup of image-specific data.
destroy :: proc(img: ^Image) {
if img == nil {
// Nothing to do. Load must've returned with an error.
return
}
bytes.buffer_destroy(&img.pixels)
if v, ok := img.metadata.(^image.BMP_Info); ok {
free(v)
}
free(img)
}
@(init, private)
_register :: proc() {
image.register(.BMP, load_from_bytes, destroy)
}

View File

@@ -0,0 +1,4 @@
//+build js
package core_image_bmp
load :: proc{load_from_bytes, load_from_context}

View File

@@ -0,0 +1,34 @@
//+build !js
package core_image_bmp
import "core:os"
import "core:bytes"
load :: proc{load_from_file, load_from_bytes, load_from_context}
load_from_file :: proc(filename: string, options := Options{}, allocator := context.allocator) -> (img: ^Image, err: Error) {
context.allocator = allocator
data, ok := os.read_entire_file(filename)
defer delete(data)
if ok {
return load_from_bytes(data, options)
} else {
return nil, .Unable_To_Read_File
}
}
save :: proc{save_to_buffer, save_to_file}
save_to_file :: proc(output: string, img: ^Image, options := Options{}, allocator := context.allocator) -> (err: Error) {
context.allocator = allocator
out := &bytes.Buffer{}
defer bytes.buffer_destroy(out)
save_to_buffer(out, img, options) or_return
write_ok := os.write_entire_file(output, out.buf[:])
return nil if write_ok else .Unable_To_Write_File
}

View File

@@ -12,6 +12,7 @@ package image
import "core:bytes"
import "core:mem"
import "core:io"
import "core:compress"
import "base:runtime"
@@ -62,6 +63,7 @@ Image_Metadata :: union #shared_nil {
^PNG_Info,
^QOI_Info,
^TGA_Info,
^BMP_Info,
}
@@ -159,11 +161,13 @@ Error :: union #shared_nil {
Netpbm_Error,
PNG_Error,
QOI_Error,
BMP_Error,
compress.Error,
compress.General_Error,
compress.Deflate_Error,
compress.ZLIB_Error,
io.Error,
runtime.Allocator_Error,
}
@@ -196,6 +200,128 @@ General_Image_Error :: enum {
Unable_To_Allocate_Or_Resize,
}
/*
BMP-specific
*/
BMP_Error :: enum {
None = 0,
Invalid_File_Size,
Unsupported_BMP_Version,
Unsupported_OS2_File,
Unsupported_Compression,
Unsupported_BPP,
Invalid_Stride,
Invalid_Color_Count,
Implausible_File_Size,
Bitfield_Version_Unhandled, // We don't (yet) handle bit fields for this BMP version.
Bitfield_Sum_Exceeds_BPP, // Total mask bit count > bpp
Bitfield_Overlapped, // Channel masks overlap
}
// img.metadata is wrapped in a struct in case we need to add to it later
// without putting it in BMP_Header
BMP_Info :: struct {
info: BMP_Header,
}
BMP_Magic :: enum u16le {
Bitmap = 0x4d42, // 'BM'
OS2_Bitmap_Array = 0x4142, // 'BA'
OS2_Icon = 0x4349, // 'IC',
OS2_Color_Icon = 0x4943, // 'CI'
OS2_Pointer = 0x5450, // 'PT'
OS2_Color_Pointer = 0x5043, // 'CP'
}
// See: http://justsolve.archiveteam.org/wiki/BMP#Well-known_versions
BMP_Version :: enum u32le {
OS2_v1 = 12, // BITMAPCOREHEADER (Windows V2 / OS/2 version 1.0)
OS2_v2 = 64, // BITMAPCOREHEADER2 (OS/2 version 2.x)
V3 = 40, // BITMAPINFOHEADER
V4 = 108, // BITMAPV4HEADER
V5 = 124, // BITMAPV5HEADER
ABBR_16 = 16, // Abbreviated
ABBR_24 = 24, // ..
ABBR_48 = 48, // ..
ABBR_52 = 52, // ..
ABBR_56 = 56, // ..
}
BMP_Header :: struct #packed {
// File header
magic: BMP_Magic,
size: u32le,
_res1: u16le, // Reserved; must be zero
_res2: u16le, // Reserved; must be zero
pixel_offset: u32le, // Offset in bytes, from the beginning of BMP_Header to the pixel data
// V3
info_size: BMP_Version,
width: i32le,
height: i32le,
planes: u16le,
bpp: u16le,
compression: BMP_Compression,
image_size: u32le,
pels_per_meter: [2]u32le,
colors_used: u32le,
colors_important: u32le, // OS2_v2 is equal up to here
// V4
masks: [4]u32le `fmt:"32b"`,
colorspace: BMP_Logical_Color_Space,
endpoints: BMP_CIEXYZTRIPLE,
gamma: [3]BMP_GAMMA16_16,
// V5
intent: BMP_Gamut_Mapping_Intent,
profile_data: u32le,
profile_size: u32le,
reserved: u32le,
}
#assert(size_of(BMP_Header) == 138)
OS2_Header :: struct #packed {
// BITMAPCOREHEADER minus info_size field
width: i16le,
height: i16le,
planes: u16le,
bpp: u16le,
}
#assert(size_of(OS2_Header) == 8)
BMP_Compression :: enum u32le {
RGB = 0x0000,
RLE8 = 0x0001,
RLE4 = 0x0002,
Bit_Fields = 0x0003, // If Windows
Huffman1D = 0x0003, // If OS2v2
JPEG = 0x0004, // If Windows
RLE24 = 0x0004, // If OS2v2
PNG = 0x0005,
Alpha_Bit_Fields = 0x0006,
CMYK = 0x000B,
CMYK_RLE8 = 0x000C,
CMYK_RLE4 = 0x000D,
}
BMP_Logical_Color_Space :: enum u32le {
CALIBRATED_RGB = 0x00000000,
sRGB = 0x73524742, // 'sRGB'
WINDOWS_COLOR_SPACE = 0x57696E20, // 'Win '
}
BMP_FXPT2DOT30 :: u32le
BMP_CIEXYZ :: [3]BMP_FXPT2DOT30
BMP_CIEXYZTRIPLE :: [3]BMP_CIEXYZ
BMP_GAMMA16_16 :: [2]u16le
BMP_Gamut_Mapping_Intent :: enum u32le {
INVALID = 0x00000000, // If not V5, this field will just be zero-initialized and not valid.
ABS_COLORIMETRIC = 0x00000008,
BUSINESS = 0x00000001,
GRAPHICS = 0x00000002,
IMAGES = 0x00000004,
}
/*
Netpbm-specific definitions
*/
@@ -1133,6 +1259,40 @@ apply_palette_rgba :: proc(img: ^Image, palette: [256]RGBA_Pixel, allocator := c
}
apply_palette :: proc{apply_palette_rgb, apply_palette_rgba}
blend_single_channel :: #force_inline proc(fg, alpha, bg: $T) -> (res: T) where T == u8 || T == u16 {
MAX :: 256 when T == u8 else 65536
c := u32(fg) * (MAX - u32(alpha)) + u32(bg) * (1 + u32(alpha))
return T(c & (MAX - 1))
}
blend_pixel :: #force_inline proc(fg: [$N]$T, alpha: T, bg: [N]T) -> (res: [N]T) where (T == u8 || T == u16), N >= 1 && N <= 4 {
MAX :: 256 when T == u8 else 65536
when N == 1 {
r := u32(fg.r) * (MAX - u32(alpha)) + u32(bg.r) * (1 + u32(alpha))
return {T(r & (MAX - 1))}
}
when N == 2 {
r := u32(fg.r) * (MAX - u32(alpha)) + u32(bg.r) * (1 + u32(alpha))
g := u32(fg.g) * (MAX - u32(alpha)) + u32(bg.g) * (1 + u32(alpha))
return {T(r & (MAX - 1)), T(g & (MAX - 1))}
}
when N == 3 || N == 4 {
r := u32(fg.r) * (MAX - u32(alpha)) + u32(bg.r) * (1 + u32(alpha))
g := u32(fg.g) * (MAX - u32(alpha)) + u32(bg.g) * (1 + u32(alpha))
b := u32(fg.b) * (MAX - u32(alpha)) + u32(bg.b) * (1 + u32(alpha))
when N == 3 {
return {T(r & (MAX - 1)), T(g & (MAX - 1)), T(b & (MAX - 1))}
} else {
return {T(r & (MAX - 1)), T(g & (MAX - 1)), T(b & (MAX - 1)), MAX - 1}
}
}
unreachable()
}
blend :: proc{blend_single_channel, blend_pixel}
// Replicates grayscale values into RGB(A) 8- or 16-bit images as appropriate.
// Returns early with `false` if already an RGB(A) image.
@@ -1245,4 +1405,4 @@ write_bytes :: proc(buf: ^bytes.Buffer, data: []u8) -> (err: compress.General_Er
return .Resize_Failed
}
return nil
}
}

View File

@@ -597,7 +597,7 @@ load_from_context :: proc(ctx: ^$C, options := Options{}, allocator := context.a
dsc := depth_scale_table
scale := dsc[info.header.bit_depth]
if scale != 1 {
key := mem.slice_data_cast([]u16be, c.data)[0] * u16be(scale)
key := (^u16be)(raw_data(c.data))^ * u16be(scale)
c.data = []u8{0, u8(key & 255)}
}
}
@@ -735,59 +735,48 @@ load_from_context :: proc(ctx: ^$C, options := Options{}, allocator := context.a
return {}, .Unable_To_Allocate_Or_Resize
}
i := 0; j := 0
// If we don't have transparency or drop it without applying it, we can do this:
if (!seen_trns || (seen_trns && .alpha_drop_if_present in options && .alpha_premultiply not_in options)) && .alpha_add_if_missing not_in options {
for h := 0; h < int(img.height); h += 1 {
for w := 0; w < int(img.width); w += 1 {
c := _plte.entries[temp.buf[i]]
t.buf[j ] = c.r
t.buf[j+1] = c.g
t.buf[j+2] = c.b
i += 1; j += 3
}
output := mem.slice_data_cast([]image.RGB_Pixel, t.buf[:])
for pal_idx, idx in temp.buf {
output[idx] = _plte.entries[pal_idx]
}
} else if add_alpha || .alpha_drop_if_present in options {
bg := [3]f32{0, 0, 0}
bg := PLTE_Entry{0, 0, 0}
if premultiply && seen_bkgd {
c16 := img.background.([3]u16)
bg = [3]f32{f32(c16.r), f32(c16.g), f32(c16.b)}
bg = {u8(c16.r), u8(c16.g), u8(c16.b)}
}
no_alpha := (.alpha_drop_if_present in options || premultiply) && .alpha_add_if_missing not_in options
blend_background := seen_bkgd && .blend_background in options
for h := 0; h < int(img.height); h += 1 {
for w := 0; w < int(img.width); w += 1 {
index := temp.buf[i]
c := _plte.entries[index]
a := int(index) < len(trns.data) ? trns.data[index] : 255
alpha := f32(a) / 255.0
if no_alpha {
output := mem.slice_data_cast([]image.RGB_Pixel, t.buf[:])
for orig, idx in temp.buf {
c := _plte.entries[orig]
a := int(orig) < len(trns.data) ? trns.data[orig] : 255
if blend_background {
c.r = u8((1.0 - alpha) * bg[0] + f32(c.r) * alpha)
c.g = u8((1.0 - alpha) * bg[1] + f32(c.g) * alpha)
c.b = u8((1.0 - alpha) * bg[2] + f32(c.b) * alpha)
output[idx] = image.blend(c, a, bg)
} else if premultiply {
output[idx] = image.blend(PLTE_Entry{}, a, c)
}
}
} else {
output := mem.slice_data_cast([]image.RGBA_Pixel, t.buf[:])
for orig, idx in temp.buf {
c := _plte.entries[orig]
a := int(orig) < len(trns.data) ? trns.data[orig] : 255
if blend_background {
c = image.blend(c, a, bg)
a = 255
} else if premultiply {
c.r = u8(f32(c.r) * alpha)
c.g = u8(f32(c.g) * alpha)
c.b = u8(f32(c.b) * alpha)
c = image.blend(PLTE_Entry{}, a, c)
}
t.buf[j ] = c.r
t.buf[j+1] = c.g
t.buf[j+2] = c.b
i += 1
if no_alpha {
j += 3
} else {
t.buf[j+3] = u8(a)
j += 4
}
output[idx] = {c.r, c.g, c.b, u8(a)}
}
}
} else {
@@ -1015,8 +1004,8 @@ load_from_context :: proc(ctx: ^$C, options := Options{}, allocator := context.a
return {}, .Unable_To_Allocate_Or_Resize
}
p := mem.slice_data_cast([]u8, temp.buf[:])
o := mem.slice_data_cast([]u8, t.buf[:])
p := temp.buf[:]
o := t.buf[:]
switch raw_image_channels {
case 1:
@@ -1627,7 +1616,6 @@ defilter :: proc(img: ^Image, filter_bytes: ^bytes.Buffer, header: ^image.PNG_IH
return nil
}
@(init, private)
_register :: proc() {
image.register(.PNG, load_from_bytes, destroy)

View File

@@ -1,6 +1,7 @@
//+build !freestanding
package log
import "core:encoding/ansi"
import "core:fmt"
import "core:strings"
import "core:os"
@@ -70,18 +71,10 @@ file_console_logger_proc :: proc(logger_data: rawptr, level: Level, text: string
backing: [1024]byte //NOTE(Hoej): 1024 might be too much for a header backing, unless somebody has really long paths.
buf := strings.builder_from_bytes(backing[:])
do_level_header(options, level, &buf)
do_level_header(options, &buf, level)
when time.IS_SUPPORTED {
if Full_Timestamp_Opts & options != nil {
fmt.sbprint(&buf, "[")
t := time.now()
y, m, d := time.date(t)
h, min, s := time.clock(t)
if .Date in options { fmt.sbprintf(&buf, "%d-%02d-%02d ", y, m, d) }
if .Time in options { fmt.sbprintf(&buf, "%02d:%02d:%02d", h, min, s) }
fmt.sbprint(&buf, "] ")
}
do_time_header(options, &buf, time.now())
}
do_location_header(options, &buf, location)
@@ -99,12 +92,12 @@ file_console_logger_proc :: proc(logger_data: rawptr, level: Level, text: string
fmt.fprintf(h, "%s%s\n", strings.to_string(buf), text)
}
do_level_header :: proc(opts: Options, level: Level, str: ^strings.Builder) {
do_level_header :: proc(opts: Options, str: ^strings.Builder, level: Level) {
RESET :: "\x1b[0m"
RED :: "\x1b[31m"
YELLOW :: "\x1b[33m"
DARK_GREY :: "\x1b[90m"
RESET :: ansi.CSI + ansi.RESET + ansi.SGR
RED :: ansi.CSI + ansi.FG_RED + ansi.SGR
YELLOW :: ansi.CSI + ansi.FG_YELLOW + ansi.SGR
DARK_GREY :: ansi.CSI + ansi.FG_BRIGHT_BLACK + ansi.SGR
col := RESET
switch level {
@@ -125,6 +118,24 @@ do_level_header :: proc(opts: Options, level: Level, str: ^strings.Builder) {
}
}
do_time_header :: proc(opts: Options, buf: ^strings.Builder, t: time.Time) {
when time.IS_SUPPORTED {
if Full_Timestamp_Opts & opts != nil {
fmt.sbprint(buf, "[")
y, m, d := time.date(t)
h, min, s := time.clock(t)
if .Date in opts {
fmt.sbprintf(buf, "%d-%02d-%02d", y, m, d)
if .Time in opts {
fmt.sbprint(buf, " ")
}
}
if .Time in opts { fmt.sbprintf(buf, "%02d:%02d:%02d", h, min, s) }
fmt.sbprint(buf, "] ")
}
}
}
do_location_header :: proc(opts: Options, buf: ^strings.Builder, location := #caller_location) {
if Location_Header_Opts & opts == nil {
return

View File

@@ -12,11 +12,10 @@ create_multi_logger :: proc(logs: ..Logger) -> Logger {
return Logger{multi_logger_proc, data, Level.Debug, nil}
}
destroy_multi_logger :: proc(log : ^Logger) {
destroy_multi_logger :: proc(log: Logger) {
data := (^Multi_Logger_Data)(log.data)
delete(data.loggers)
free(log.data)
log^ = nil_logger()
free(data)
}
multi_logger_proc :: proc(logger_data: rawptr, level: Level, text: string,

View File

@@ -0,0 +1,60 @@
package math_big
/*
With `n` items, calculate how many ways that `r` of them can be ordered.
*/
permutations_with_repetition :: int_pow_int
/*
With `n` items, calculate how many ways that `r` of them can be ordered without any repeats.
*/
permutations_without_repetition :: proc(dest: ^Int, n, r: int) -> (error: Error) {
if n == r {
return factorial(dest, n)
}
tmp := &Int{}
defer internal_destroy(tmp)
// n!
// --------
// (n - r)!
factorial(dest, n) or_return
factorial(tmp, n - r) or_return
div(dest, dest, tmp) or_return
return
}
/*
With `n` items, calculate how many ways that `r` of them can be chosen.
Also known as the multiset coefficient or (n multichoose k).
*/
combinations_with_repetition :: proc(dest: ^Int, n, r: int) -> (error: Error) {
// (n + r - 1)!
// ------------
// r! (n - 1)!
return combinations_without_repetition(dest, n + r - 1, r)
}
/*
With `n` items, calculate how many ways that `r` of them can be chosen without any repeats.
Also known as the binomial coefficient or (n choose k).
*/
combinations_without_repetition :: proc(dest: ^Int, n, r: int) -> (error: Error) {
tmp_a, tmp_b := &Int{}, &Int{}
defer internal_destroy(tmp_a, tmp_b)
// n!
// ------------
// r! (n - r)!
factorial(dest, n) or_return
factorial(tmp_a, r) or_return
factorial(tmp_b, n - r) or_return
mul(tmp_a, tmp_a, tmp_b) or_return
div(dest, dest, tmp_a) or_return
return
}

View File

@@ -1188,9 +1188,6 @@ internal_random_prime :: proc(a: ^Int, size_in_bits: int, trials: int, flags :=
flags := flags
trials := trials
t := &Int{}
defer internal_destroy(t)
/*
Sanity check the input.
*/

View File

@@ -315,6 +315,7 @@ int_atoi :: proc(res: ^Int, input: string, radix := i8(10), allocator := context
atoi :: proc { int_atoi, }
string_to_int :: int_atoi
/*
We size for `string` by default.

View File

@@ -350,7 +350,7 @@ _reduce_pi_f64 :: proc "contextless" (x: f64) -> f64 #no_bounds_check {
// that is, 1/PI = SUM bdpi[i]*2^(-64*i).
// 19 64-bit digits give 1216 bits of precision
// to handle the largest possible f64 exponent.
@static bdpi := [?]u64{
@(static, rodata) bdpi := [?]u64{
0x0000000000000000,
0x517cc1b727220a94,
0xfe13abe8fa9a6ee0,

View File

@@ -3,6 +3,7 @@ package linalg
import "core:math"
import "base:builtin"
import "base:intrinsics"
import "base:runtime"
// Generic
@@ -223,33 +224,27 @@ quaternion_mul_quaternion :: proc "contextless" (q1, q2: $Q) -> Q where IS_QUATE
@(require_results)
quaternion64_mul_vector3 :: proc "contextless" (q: $Q/quaternion64, v: $V/[3]$F/f16) -> V {
Raw_Quaternion :: struct {xyz: [3]f16, r: f16}
q := transmute(Raw_Quaternion)q
q := transmute(runtime.Raw_Quaternion64_Vector_Scalar)q
v := v
t := cross(2*q.xyz, v)
return V(v + q.r*t + cross(q.xyz, t))
t := cross(2*q.vector, v)
return V(v + q.scalar*t + cross(q.vector, t))
}
@(require_results)
quaternion128_mul_vector3 :: proc "contextless" (q: $Q/quaternion128, v: $V/[3]$F/f32) -> V {
Raw_Quaternion :: struct {xyz: [3]f32, r: f32}
q := transmute(Raw_Quaternion)q
q := transmute(runtime.Raw_Quaternion128_Vector_Scalar)q
v := v
t := cross(2*q.xyz, v)
return V(v + q.r*t + cross(q.xyz, t))
t := cross(2*q.vector, v)
return V(v + q.scalar*t + cross(q.vector, t))
}
@(require_results)
quaternion256_mul_vector3 :: proc "contextless" (q: $Q/quaternion256, v: $V/[3]$F/f64) -> V {
Raw_Quaternion :: struct {xyz: [3]f64, r: f64}
q := transmute(Raw_Quaternion)q
q := transmute(runtime.Raw_Quaternion256_Vector_Scalar)q
v := v
t := cross(2*q.xyz, v)
return V(v + q.r*t + cross(q.xyz, t))
t := cross(2*q.vector, v)
return V(v + q.scalar*t + cross(q.vector, t))
}
quaternion_mul_vector3 :: proc{quaternion64_mul_vector3, quaternion128_mul_vector3, quaternion256_mul_vector3}

View File

@@ -527,7 +527,7 @@ angle_from_quaternion :: proc{
@(require_results)
axis_from_quaternion_f16 :: proc "contextless" (q: Quaternionf16) -> Vector3f16 {
t1 := 1 - q.w*q.w
if t1 < 0 {
if t1 <= 0 {
return {0, 0, 1}
}
t2 := 1.0 / math.sqrt(t1)
@@ -536,7 +536,7 @@ axis_from_quaternion_f16 :: proc "contextless" (q: Quaternionf16) -> Vector3f16
@(require_results)
axis_from_quaternion_f32 :: proc "contextless" (q: Quaternionf32) -> Vector3f32 {
t1 := 1 - q.w*q.w
if t1 < 0 {
if t1 <= 0 {
return {0, 0, 1}
}
t2 := 1.0 / math.sqrt(t1)
@@ -545,7 +545,7 @@ axis_from_quaternion_f32 :: proc "contextless" (q: Quaternionf32) -> Vector3f32
@(require_results)
axis_from_quaternion_f64 :: proc "contextless" (q: Quaternionf64) -> Vector3f64 {
t1 := 1 - q.w*q.w
if t1 < 0 {
if t1 <= 0 {
return {0, 0, 1}
}
t2 := 1.0 / math.sqrt(t1)

View File

@@ -159,7 +159,7 @@ roll_from_quaternion_f16 :: proc "contextless" (q: Quaternionf16) -> f16 {
@(require_results)
pitch_from_quaternion_f16 :: proc "contextless" (q: Quaternionf16) -> f16 {
y := 2 * (q.y*q.z + q.w*q.w)
y := 2 * (q.y*q.z + q.w*q.x)
x := q.w*q.w - q.x*q.x - q.y*q.y + q.z*q.z
if abs(x) <= F16_EPSILON && abs(y) <= F16_EPSILON {

View File

@@ -159,7 +159,7 @@ roll_from_quaternion_f32 :: proc "contextless" (q: Quaternionf32) -> f32 {
@(require_results)
pitch_from_quaternion_f32 :: proc "contextless" (q: Quaternionf32) -> f32 {
y := 2 * (q.y*q.z + q.w*q.w)
y := 2 * (q.y*q.z + q.w*q.x)
x := q.w*q.w - q.x*q.x - q.y*q.y + q.z*q.z
if abs(x) <= F32_EPSILON && abs(y) <= F32_EPSILON {

View File

@@ -159,7 +159,7 @@ roll_from_quaternion_f64 :: proc "contextless" (q: Quaternionf64) -> f64 {
@(require_results)
pitch_from_quaternion_f64 :: proc "contextless" (q: Quaternionf64) -> f64 {
y := 2 * (q.y*q.z + q.w*q.w)
y := 2 * (q.y*q.z + q.w*q.x)
x := q.w*q.w - q.x*q.x - q.y*q.y + q.z*q.z
if abs(x) <= F64_EPSILON && abs(y) <= F64_EPSILON {

View File

@@ -130,10 +130,10 @@ pow10 :: proc{
@(require_results)
pow10_f16 :: proc "contextless" (n: f16) -> f16 {
@static pow10_pos_tab := [?]f16{
@(static, rodata) pow10_pos_tab := [?]f16{
1e00, 1e01, 1e02, 1e03, 1e04,
}
@static pow10_neg_tab := [?]f16{
@(static, rodata) pow10_neg_tab := [?]f16{
1e-00, 1e-01, 1e-02, 1e-03, 1e-04, 1e-05, 1e-06, 1e-07,
}
@@ -151,13 +151,13 @@ pow10_f16 :: proc "contextless" (n: f16) -> f16 {
@(require_results)
pow10_f32 :: proc "contextless" (n: f32) -> f32 {
@static pow10_pos_tab := [?]f32{
@(static, rodata) pow10_pos_tab := [?]f32{
1e00, 1e01, 1e02, 1e03, 1e04, 1e05, 1e06, 1e07, 1e08, 1e09,
1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
1e20, 1e21, 1e22, 1e23, 1e24, 1e25, 1e26, 1e27, 1e28, 1e29,
1e30, 1e31, 1e32, 1e33, 1e34, 1e35, 1e36, 1e37, 1e38,
}
@static pow10_neg_tab := [?]f32{
@(static, rodata) pow10_neg_tab := [?]f32{
1e-00, 1e-01, 1e-02, 1e-03, 1e-04, 1e-05, 1e-06, 1e-07, 1e-08, 1e-09,
1e-10, 1e-11, 1e-12, 1e-13, 1e-14, 1e-15, 1e-16, 1e-17, 1e-18, 1e-19,
1e-20, 1e-21, 1e-22, 1e-23, 1e-24, 1e-25, 1e-26, 1e-27, 1e-28, 1e-29,
@@ -179,16 +179,16 @@ pow10_f32 :: proc "contextless" (n: f32) -> f32 {
@(require_results)
pow10_f64 :: proc "contextless" (n: f64) -> f64 {
@static pow10_tab := [?]f64{
@(static, rodata) pow10_tab := [?]f64{
1e00, 1e01, 1e02, 1e03, 1e04, 1e05, 1e06, 1e07, 1e08, 1e09,
1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
1e20, 1e21, 1e22, 1e23, 1e24, 1e25, 1e26, 1e27, 1e28, 1e29,
1e30, 1e31,
}
@static pow10_pos_tab32 := [?]f64{
@(static, rodata) pow10_pos_tab32 := [?]f64{
1e00, 1e32, 1e64, 1e96, 1e128, 1e160, 1e192, 1e224, 1e256, 1e288,
}
@static pow10_neg_tab32 := [?]f64{
@(static, rodata) pow10_neg_tab32 := [?]f64{
1e-00, 1e-32, 1e-64, 1e-96, 1e-128, 1e-160, 1e-192, 1e-224, 1e-256, 1e-288, 1e-320,
}
@@ -1274,7 +1274,7 @@ binomial :: proc "contextless" (n, k: int) -> int {
@(require_results)
factorial :: proc "contextless" (n: int) -> int {
when size_of(int) == size_of(i64) {
@static table := [21]int{
@(static, rodata) table := [21]int{
1,
1,
2,
@@ -1298,7 +1298,7 @@ factorial :: proc "contextless" (n: int) -> int {
2_432_902_008_176_640_000,
}
} else {
@static table := [13]int{
@(static, rodata) table := [13]int{
1,
1,
2,

View File

@@ -67,7 +67,7 @@ package math
// masks any imprecision in the polynomial.
@(private="file", require_results)
stirling :: proc "contextless" (x: f64) -> (f64, f64) {
@(static) gamS := [?]f64{
@(static, rodata) gamS := [?]f64{
+7.87311395793093628397e-04,
-2.29549961613378126380e-04,
-2.68132617805781232825e-03,
@@ -103,7 +103,7 @@ gamma_f64 :: proc "contextless" (x: f64) -> f64 {
return false
}
@(static) gamP := [?]f64{
@(static, rodata) gamP := [?]f64{
1.60119522476751861407e-04,
1.19135147006586384913e-03,
1.04213797561761569935e-02,
@@ -112,7 +112,7 @@ gamma_f64 :: proc "contextless" (x: f64) -> f64 {
4.94214826801497100753e-01,
9.99999999999999996796e-01,
}
@(static) gamQ := [?]f64{
@(static, rodata) gamQ := [?]f64{
-2.31581873324120129819e-05,
+5.39605580493303397842e-04,
-4.45641913851797240494e-03,

View File

@@ -123,7 +123,7 @@ lgamma_f64 :: proc "contextless" (x: f64) -> (lgamma: f64, sign: int) {
return -x
}
@static lgamA := [?]f64{
@(static, rodata) lgamA := [?]f64{
0h3FB3C467E37DB0C8,
0h3FD4A34CC4A60FAD,
0h3FB13E001A5562A7,
@@ -137,7 +137,7 @@ lgamma_f64 :: proc "contextless" (x: f64) -> (lgamma: f64, sign: int) {
0h3EFA7074428CFA52,
0h3F07858E90A45837,
}
@static lgamR := [?]f64{
@(static, rodata) lgamR := [?]f64{
1.0,
0h3FF645A762C4AB74,
0h3FE71A1893D3DCDC,
@@ -146,7 +146,7 @@ lgamma_f64 :: proc "contextless" (x: f64) -> (lgamma: f64, sign: int) {
0h3F497DDACA41A95B,
0h3EDEBAF7A5B38140,
}
@static lgamS := [?]f64{
@(static, rodata) lgamS := [?]f64{
0hBFB3C467E37DB0C8,
0h3FCB848B36E20878,
0h3FD4D98F4F139F59,
@@ -155,7 +155,7 @@ lgamma_f64 :: proc "contextless" (x: f64) -> (lgamma: f64, sign: int) {
0h3F5E26B67368F239,
0h3F00BFECDD17E945,
}
@static lgamT := [?]f64{
@(static, rodata) lgamT := [?]f64{
0h3FDEF72BC8EE38A2,
0hBFC2E4278DC6C509,
0h3FB08B4294D5419B,
@@ -172,7 +172,7 @@ lgamma_f64 :: proc "contextless" (x: f64) -> (lgamma: f64, sign: int) {
0hBF347F24ECC38C38,
0h3F35FD3EE8C2D3F4,
}
@static lgamU := [?]f64{
@(static, rodata) lgamU := [?]f64{
0hBFB3C467E37DB0C8,
0h3FE4401E8B005DFF,
0h3FF7475CD119BD6F,
@@ -180,7 +180,7 @@ lgamma_f64 :: proc "contextless" (x: f64) -> (lgamma: f64, sign: int) {
0h3FCD4EAEF6010924,
0h3F8B678BBF2BAB09,
}
@static lgamV := [?]f64{
@(static, rodata) lgamV := [?]f64{
1.0,
0h4003A5D7C2BD619C,
0h40010725A42B18F5,
@@ -188,7 +188,7 @@ lgamma_f64 :: proc "contextless" (x: f64) -> (lgamma: f64, sign: int) {
0h3FBAAE55D6537C88,
0h3F6A5ABB57D0CF61,
}
@static lgamW := [?]f64{
@(static, rodata) lgamW := [?]f64{
0h3FDACFE390C97D69,
0h3FB555555555553B,
0hBF66C16C16B02E5C,

View File

@@ -234,7 +234,7 @@ _trig_reduce_f64 :: proc "contextless" (x: f64) -> (j: u64, z: f64) #no_bounds_c
// that is, 4/pi = Sum bd_pi4[i]*2^(-64*i)
// 19 64-bit digits and the leading one bit give 1217 bits
// of precision to handle the largest possible f64 exponent.
@static bd_pi4 := [?]u64{
@(static, rodata) bd_pi4 := [?]u64{
0x0000000000000001,
0x45f306dc9c882a53,
0xf84eafa3ea69bb81,

View File

@@ -19,7 +19,7 @@ import "core:math"
exp_float64 :: proc(r: ^Rand = nil) -> f64 {
re :: 7.69711747013104972
@(static)
@(static, rodata)
ke := [256]u32{
0xe290a139, 0x0, 0x9beadebc, 0xc377ac71, 0xd4ddb990,
0xde893fb8, 0xe4a8e87c, 0xe8dff16a, 0xebf2deab, 0xee49a6e8,
@@ -74,7 +74,7 @@ exp_float64 :: proc(r: ^Rand = nil) -> f64 {
0xf7b577d2, 0xf69c650c, 0xf51530f0, 0xf2cb0e3c, 0xeeefb15d,
0xe6da6ecf,
}
@(static)
@(static, rodata)
we := [256]f32{
2.0249555e-09, 1.486674e-11, 2.4409617e-11, 3.1968806e-11,
3.844677e-11, 4.4228204e-11, 4.9516443e-11, 5.443359e-11,
@@ -141,7 +141,7 @@ exp_float64 :: proc(r: ^Rand = nil) -> f64 {
1.2393786e-09, 1.276585e-09, 1.3193139e-09, 1.3695435e-09,
1.4305498e-09, 1.508365e-09, 1.6160854e-09, 1.7921248e-09,
}
@(static)
@(static, rodata)
fe := [256]f32{
1, 0.9381437, 0.90046996, 0.87170434, 0.8477855, 0.8269933,
0.8084217, 0.7915276, 0.77595687, 0.7614634, 0.7478686,

View File

@@ -21,7 +21,7 @@ import "core:math"
norm_float64 :: proc(r: ^Rand = nil) -> f64 {
rn :: 3.442619855899
@(static)
@(static, rodata)
kn := [128]u32{
0x76ad2212, 0x00000000, 0x600f1b53, 0x6ce447a6, 0x725b46a2,
0x7560051d, 0x774921eb, 0x789a25bd, 0x799045c3, 0x7a4bce5d,
@@ -50,7 +50,7 @@ norm_float64 :: proc(r: ^Rand = nil) -> f64 {
0x7da61a1e, 0x7d72a0fb, 0x7d30e097, 0x7cd9b4ab, 0x7c600f1a,
0x7ba90bdc, 0x7a722176, 0x77d664e5,
}
@(static)
@(static, rodata)
wn := [128]f32{
1.7290405e-09, 1.2680929e-10, 1.6897518e-10, 1.9862688e-10,
2.2232431e-10, 2.4244937e-10, 2.601613e-10, 2.7611988e-10,
@@ -85,7 +85,7 @@ norm_float64 :: proc(r: ^Rand = nil) -> f64 {
1.2601323e-09, 1.2857697e-09, 1.3146202e-09, 1.347784e-09,
1.3870636e-09, 1.4357403e-09, 1.5008659e-09, 1.6030948e-09,
}
@(static)
@(static, rodata)
fn := [128]f32{
1.00000000, 0.9635997, 0.9362827, 0.9130436, 0.89228165,
0.87324303, 0.8555006, 0.8387836, 0.8229072, 0.8077383,

View File

@@ -11,12 +11,15 @@ Raw_Dynamic_Array :: runtime.Raw_Dynamic_Array
Raw_Map :: runtime.Raw_Map
Raw_Soa_Pointer :: runtime.Raw_Soa_Pointer
Raw_Complex64 :: struct {real, imag: f32}
Raw_Complex128 :: struct {real, imag: f64}
Raw_Quaternion128 :: struct {imag, jmag, kmag: f32, real: f32}
Raw_Quaternion256 :: struct {imag, jmag, kmag: f64, real: f64}
Raw_Quaternion128_Vector_Scalar :: struct {vector: [3]f32, scalar: f32}
Raw_Quaternion256_Vector_Scalar :: struct {vector: [3]f64, scalar: f64}
Raw_Complex32 :: runtime.Raw_Complex32
Raw_Complex64 :: runtime.Raw_Complex64
Raw_Complex128 :: runtime.Raw_Complex128
Raw_Quaternion64 :: runtime.Raw_Quaternion64
Raw_Quaternion128 :: runtime.Raw_Quaternion128
Raw_Quaternion256 :: runtime.Raw_Quaternion256
Raw_Quaternion64_Vector_Scalar :: runtime.Raw_Quaternion64_Vector_Scalar
Raw_Quaternion128_Vector_Scalar :: runtime.Raw_Quaternion128_Vector_Scalar
Raw_Quaternion256_Vector_Scalar :: runtime.Raw_Quaternion256_Vector_Scalar
make_any :: proc "contextless" (data: rawptr, id: typeid) -> any {
return transmute(any)Raw_Any{data, id}

View File

@@ -0,0 +1,341 @@
package mem
// The Rollback Stack Allocator was designed for the test runner to be fast,
// able to grow, and respect the Tracking Allocator's requirement for
// individual frees. It is not overly concerned with fragmentation, however.
//
// It has support for expansion when configured with a block allocator and
// limited support for out-of-order frees.
//
// Allocation has constant-time best and usual case performance.
// At worst, it is linear according to the number of memory blocks.
//
// Allocation follows a first-fit strategy when there are multiple memory
// blocks.
//
// Freeing has constant-time best and usual case performance.
// At worst, it is linear according to the number of memory blocks and number
// of freed items preceding the last item in a block.
//
// Resizing has constant-time performance, if it's the last item in a block, or
// the new size is smaller. Naturally, this becomes linear-time if there are
// multiple blocks to search for the pointer's owning block. Otherwise, the
// allocator defaults to a combined alloc & free operation internally.
//
// Out-of-order freeing is accomplished by collapsing a run of freed items
// from the last allocation backwards.
//
// Each allocation has an overhead of 8 bytes and any extra bytes to satisfy
// the requested alignment.
import "base:runtime"
ROLLBACK_STACK_DEFAULT_BLOCK_SIZE :: 4 * Megabyte
// This limitation is due to the size of `prev_ptr`, but it is only for the
// head block; any allocation in excess of the allocator's `block_size` is
// valid, so long as the block allocator can handle it.
//
// This is because allocations over the block size are not split up if the item
// within is freed; they are immediately returned to the block allocator.
ROLLBACK_STACK_MAX_HEAD_BLOCK_SIZE :: 2 * Gigabyte
Rollback_Stack_Header :: bit_field u64 {
prev_offset: uintptr | 32,
is_free: bool | 1,
prev_ptr: uintptr | 31,
}
Rollback_Stack_Block :: struct {
next_block: ^Rollback_Stack_Block,
last_alloc: rawptr,
offset: uintptr,
buffer: []byte,
}
Rollback_Stack :: struct {
head: ^Rollback_Stack_Block,
block_size: int,
block_allocator: Allocator,
}
@(private="file", require_results)
rb_ptr_in_bounds :: proc(block: ^Rollback_Stack_Block, ptr: rawptr) -> bool {
start := raw_data(block.buffer)
end := start[block.offset:]
return start < ptr && ptr <= end
}
@(private="file", require_results)
rb_find_ptr :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> (
parent: ^Rollback_Stack_Block,
block: ^Rollback_Stack_Block,
header: ^Rollback_Stack_Header,
err: Allocator_Error,
) {
for block = stack.head; block != nil; block = block.next_block {
if rb_ptr_in_bounds(block, ptr) {
header = cast(^Rollback_Stack_Header)(cast(uintptr)ptr - size_of(Rollback_Stack_Header))
return
}
parent = block
}
return nil, nil, nil, .Invalid_Pointer
}
@(private="file", require_results)
rb_find_last_alloc :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> (
block: ^Rollback_Stack_Block,
header: ^Rollback_Stack_Header,
ok: bool,
) {
for block = stack.head; block != nil; block = block.next_block {
if block.last_alloc == ptr {
header = cast(^Rollback_Stack_Header)(cast(uintptr)ptr - size_of(Rollback_Stack_Header))
return block, header, true
}
}
return nil, nil, false
}
@(private="file")
rb_rollback_block :: proc(block: ^Rollback_Stack_Block, header: ^Rollback_Stack_Header) {
header := header
for block.offset > 0 && header.is_free {
block.offset = header.prev_offset
block.last_alloc = raw_data(block.buffer)[header.prev_ptr:]
header = cast(^Rollback_Stack_Header)(raw_data(block.buffer)[header.prev_ptr - size_of(Rollback_Stack_Header):])
}
}
@(private="file", require_results)
rb_free :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> Allocator_Error {
parent, block, header := rb_find_ptr(stack, ptr) or_return
if header.is_free {
return .Invalid_Pointer
}
header.is_free = true
if block.last_alloc == ptr {
block.offset = header.prev_offset
rb_rollback_block(block, header)
}
if parent != nil && block.offset == 0 {
parent.next_block = block.next_block
runtime.mem_free_with_size(block, size_of(Rollback_Stack_Block) + len(block.buffer), stack.block_allocator)
}
return nil
}
@(private="file")
rb_free_all :: proc(stack: ^Rollback_Stack) {
for block := stack.head.next_block; block != nil; /**/ {
next_block := block.next_block
runtime.mem_free_with_size(block, size_of(Rollback_Stack_Block) + len(block.buffer), stack.block_allocator)
block = next_block
}
stack.head.next_block = nil
stack.head.last_alloc = nil
stack.head.offset = 0
}
@(private="file", require_results)
rb_resize :: proc(stack: ^Rollback_Stack, ptr: rawptr, old_size, size, alignment: int) -> (result: []byte, err: Allocator_Error) {
if ptr != nil {
if block, _, ok := rb_find_last_alloc(stack, ptr); ok {
// `block.offset` should never underflow because it is contingent
// on `old_size` in the first place, assuming sane arguments.
assert(block.offset >= cast(uintptr)old_size, "Rollback Stack Allocator received invalid `old_size`.")
if block.offset + cast(uintptr)size - cast(uintptr)old_size < cast(uintptr)len(block.buffer) {
// Prevent singleton allocations from fragmenting by forbidding
// them to shrink, removing the possibility of overflow bugs.
if len(block.buffer) <= stack.block_size {
block.offset += cast(uintptr)size - cast(uintptr)old_size
}
#no_bounds_check return (cast([^]byte)ptr)[:size], nil
}
}
}
result = rb_alloc(stack, size, alignment) or_return
runtime.mem_copy_non_overlapping(raw_data(result), ptr, old_size)
err = rb_free(stack, ptr)
return
}
@(private="file", require_results)
rb_alloc :: proc(stack: ^Rollback_Stack, size, alignment: int) -> (result: []byte, err: Allocator_Error) {
parent: ^Rollback_Stack_Block
for block := stack.head; /**/; block = block.next_block {
when !ODIN_DISABLE_ASSERT {
allocated_new_block: bool
}
if block == nil {
if stack.block_allocator.procedure == nil {
return nil, .Out_Of_Memory
}
minimum_size_required := size_of(Rollback_Stack_Header) + size + alignment - 1
new_block_size := max(minimum_size_required, stack.block_size)
block = rb_make_block(new_block_size, stack.block_allocator) or_return
parent.next_block = block
when !ODIN_DISABLE_ASSERT {
allocated_new_block = true
}
}
start := raw_data(block.buffer)[block.offset:]
padding := cast(uintptr)calc_padding_with_header(cast(uintptr)start, cast(uintptr)alignment, size_of(Rollback_Stack_Header))
if block.offset + padding + cast(uintptr)size > cast(uintptr)len(block.buffer) {
when !ODIN_DISABLE_ASSERT {
if allocated_new_block {
panic("Rollback Stack Allocator allocated a new block but did not use it.")
}
}
parent = block
continue
}
header := cast(^Rollback_Stack_Header)(start[padding - size_of(Rollback_Stack_Header):])
ptr := start[padding:]
header^ = {
prev_offset = block.offset,
prev_ptr = uintptr(0) if block.last_alloc == nil else cast(uintptr)block.last_alloc - cast(uintptr)raw_data(block.buffer),
is_free = false,
}
block.last_alloc = ptr
block.offset += padding + cast(uintptr)size
if len(block.buffer) > stack.block_size {
// This block exceeds the allocator's standard block size and is considered a singleton.
// Prevent any further allocations on it.
block.offset = cast(uintptr)len(block.buffer)
}
#no_bounds_check return ptr[:size], nil
}
return nil, .Out_Of_Memory
}
@(private="file", require_results)
rb_make_block :: proc(size: int, allocator: Allocator) -> (block: ^Rollback_Stack_Block, err: Allocator_Error) {
buffer := runtime.mem_alloc(size_of(Rollback_Stack_Block) + size, align_of(Rollback_Stack_Block), allocator) or_return
block = cast(^Rollback_Stack_Block)raw_data(buffer)
#no_bounds_check block.buffer = buffer[size_of(Rollback_Stack_Block):]
return
}
rollback_stack_init_buffered :: proc(stack: ^Rollback_Stack, buffer: []byte, location := #caller_location) {
MIN_SIZE :: size_of(Rollback_Stack_Block) + size_of(Rollback_Stack_Header) + size_of(rawptr)
assert(len(buffer) >= MIN_SIZE, "User-provided buffer to Rollback Stack Allocator is too small.", location)
block := cast(^Rollback_Stack_Block)raw_data(buffer)
block^ = {}
#no_bounds_check block.buffer = buffer[size_of(Rollback_Stack_Block):]
stack^ = {}
stack.head = block
stack.block_size = len(block.buffer)
}
rollback_stack_init_dynamic :: proc(
stack: ^Rollback_Stack,
block_size : int = ROLLBACK_STACK_DEFAULT_BLOCK_SIZE,
block_allocator := context.allocator,
location := #caller_location,
) -> Allocator_Error {
assert(block_size >= size_of(Rollback_Stack_Header) + size_of(rawptr), "Rollback Stack Allocator block size is too small.", location)
when size_of(int) > 4 {
// It's impossible to specify an argument in excess when your integer
// size is insufficient; check only on platforms with big enough ints.
assert(block_size <= ROLLBACK_STACK_MAX_HEAD_BLOCK_SIZE, "Rollback Stack Allocators cannot support head blocks larger than 2 gigabytes.", location)
}
block := rb_make_block(block_size, block_allocator) or_return
stack^ = {}
stack.head = block
stack.block_size = block_size
stack.block_allocator = block_allocator
return nil
}
rollback_stack_init :: proc {
rollback_stack_init_buffered,
rollback_stack_init_dynamic,
}
rollback_stack_destroy :: proc(stack: ^Rollback_Stack) {
if stack.block_allocator.procedure != nil {
rb_free_all(stack)
free(stack.head, stack.block_allocator)
}
stack^ = {}
}
@(require_results)
rollback_stack_allocator :: proc(stack: ^Rollback_Stack) -> Allocator {
return Allocator {
data = stack,
procedure = rollback_stack_allocator_proc,
}
}
@(require_results)
rollback_stack_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode,
size, alignment: int,
old_memory: rawptr, old_size: int, location := #caller_location,
) -> (result: []byte, err: Allocator_Error) {
stack := cast(^Rollback_Stack)allocator_data
switch mode {
case .Alloc, .Alloc_Non_Zeroed:
assert(size >= 0, "Size must be positive or zero.", location)
assert(is_power_of_two(cast(uintptr)alignment), "Alignment must be a power of two.", location)
result = rb_alloc(stack, size, alignment) or_return
if mode == .Alloc {
zero_slice(result)
}
case .Free:
err = rb_free(stack, old_memory)
case .Free_All:
rb_free_all(stack)
case .Resize, .Resize_Non_Zeroed:
assert(size >= 0, "Size must be positive or zero.", location)
assert(old_size >= 0, "Old size must be positive or zero.", location)
assert(is_power_of_two(cast(uintptr)alignment), "Alignment must be a power of two.", location)
result = rb_resize(stack, old_memory, old_size, size, alignment) or_return
#no_bounds_check if mode == .Resize && size > old_size {
zero_slice(result[old_size:])
}
case .Query_Features:
set := (^Allocator_Mode_Set)(old_memory)
if set != nil {
set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Resize_Non_Zeroed}
}
return nil, nil
case .Query_Info:
return nil, .Mode_Not_Implemented
}
return
}

36
core/mem/tlsf/LICENSE Normal file
View File

@@ -0,0 +1,36 @@
Original BSD-3 license:
Two Level Segregated Fit memory allocator, version 3.1.
Written by Matthew Conte
http://tlsf.baisoku.org
Based on the original documentation by Miguel Masmano:
http://www.gii.upv.es/tlsf/main/docs
This implementation was written to the specification
of the document, therefore no GPL restrictions apply.
Copyright (c) 2006-2016, Matthew Conte
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL MATTHEW CONTE BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

156
core/mem/tlsf/tlsf.odin Normal file
View File

@@ -0,0 +1,156 @@
/*
Copyright 2024 Jeroen van Rijn <nom@duclavier.com>.
Made available under Odin's BSD-3 license.
List of contributors:
Matt Conte: Original C implementation, see LICENSE file in this package
Jeroen van Rijn: Source port
*/
// package mem_tlsf implements a Two Level Segregated Fit memory allocator.
package mem_tlsf
import "base:runtime"
Error :: enum byte {
None = 0,
Invalid_Backing_Allocator = 1,
Invalid_Alignment = 2,
Backing_Buffer_Too_Small = 3,
Backing_Buffer_Too_Large = 4,
Backing_Allocator_Error = 5,
}
Allocator :: struct {
// Empty lists point at this block to indicate they are free.
block_null: Block_Header,
// Bitmaps for free lists.
fl_bitmap: u32 `fmt:"-"`,
sl_bitmap: [FL_INDEX_COUNT]u32 `fmt:"-"`,
// Head of free lists.
blocks: [FL_INDEX_COUNT][SL_INDEX_COUNT]^Block_Header `fmt:"-"`,
// Keep track of pools so we can deallocate them.
// If `pool.allocator` is blank, we don't do anything.
// We also use this linked list of pools to report
// statistics like how much memory is still available,
// fragmentation, etc.
pool: Pool,
}
#assert(size_of(Allocator) % ALIGN_SIZE == 0)
@(require_results)
allocator :: proc(t: ^Allocator) -> runtime.Allocator {
return runtime.Allocator{
procedure = allocator_proc,
data = t,
}
}
@(require_results)
init_from_buffer :: proc(control: ^Allocator, buf: []byte) -> Error {
assert(control != nil)
if uintptr(raw_data(buf)) % ALIGN_SIZE != 0 {
return .Invalid_Alignment
}
pool_bytes := align_down(len(buf) - POOL_OVERHEAD, ALIGN_SIZE)
if pool_bytes < BLOCK_SIZE_MIN {
return .Backing_Buffer_Too_Small
} else if pool_bytes > BLOCK_SIZE_MAX {
return .Backing_Buffer_Too_Large
}
clear(control)
return pool_add(control, buf[:])
}
@(require_results)
init_from_allocator :: proc(control: ^Allocator, backing: runtime.Allocator, initial_pool_size: int, new_pool_size := 0) -> Error {
assert(control != nil)
pool_bytes := align_up(uint(initial_pool_size) + POOL_OVERHEAD, ALIGN_SIZE)
if pool_bytes < BLOCK_SIZE_MIN {
return .Backing_Buffer_Too_Small
} else if pool_bytes > BLOCK_SIZE_MAX {
return .Backing_Buffer_Too_Large
}
buf, backing_err := runtime.make_aligned([]byte, pool_bytes, ALIGN_SIZE, backing)
if backing_err != nil {
return .Backing_Allocator_Error
}
err := init_from_buffer(control, buf)
control.pool = Pool{
data = buf,
allocator = backing,
}
return err
}
init :: proc{init_from_buffer, init_from_allocator}
destroy :: proc(control: ^Allocator) {
if control == nil { return }
// No need to call `pool_remove` or anything, as they're they're embedded in the backing memory.
// We do however need to free the `Pool` tracking entities and the backing memory itself.
// As `Allocator` is embedded in the first backing slice, the `control` pointer will be
// invalid after this call.
for p := control.pool.next; p != nil; {
next := p.next
// Free the allocation on the backing allocator
runtime.delete(p.data, p.allocator)
free(p, p.allocator)
p = next
}
}
allocator_proc :: proc(allocator_data: rawptr, mode: runtime.Allocator_Mode,
size, alignment: int,
old_memory: rawptr, old_size: int, location := #caller_location) -> ([]byte, runtime.Allocator_Error) {
control := (^Allocator)(allocator_data)
if control == nil {
return nil, .Invalid_Argument
}
switch mode {
case .Alloc:
return alloc_bytes(control, uint(size), uint(alignment))
case .Alloc_Non_Zeroed:
return alloc_bytes_non_zeroed(control, uint(size), uint(alignment))
case .Free:
free_with_size(control, old_memory, uint(old_size))
return nil, nil
case .Free_All:
clear(control)
return nil, nil
case .Resize:
return resize(control, old_memory, uint(old_size), uint(size), uint(alignment))
case .Resize_Non_Zeroed:
return resize_non_zeroed(control, old_memory, uint(old_size), uint(size), uint(alignment))
case .Query_Features:
set := (^runtime.Allocator_Mode_Set)(old_memory)
if set != nil {
set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Resize_Non_Zeroed, .Query_Features}
}
return nil, nil
case .Query_Info:
return nil, .Mode_Not_Implemented
}
return nil, nil
}

View File

@@ -0,0 +1,738 @@
/*
Copyright 2024 Jeroen van Rijn <nom@duclavier.com>.
Made available under Odin's BSD-3 license.
List of contributors:
Matt Conte: Original C implementation, see LICENSE file in this package
Jeroen van Rijn: Source port
*/
package mem_tlsf
import "base:intrinsics"
import "base:runtime"
// import "core:fmt"
// log2 of number of linear subdivisions of block sizes.
// Larger values require more memory in the control structure.
// Values of 4 or 5 are typical.
TLSF_SL_INDEX_COUNT_LOG2 :: #config(TLSF_SL_INDEX_COUNT_LOG2, 5)
// All allocation sizes and addresses are aligned to 4/8 bytes
ALIGN_SIZE_LOG2 :: 3 when size_of(uintptr) == 8 else 2
// We can increase this to support larger allocation sizes,
// at the expense of more overhead in the TLSF structure
FL_INDEX_MAX :: 32 when size_of(uintptr) == 8 else 30
#assert(FL_INDEX_MAX < 36)
ALIGN_SIZE :: 1 << ALIGN_SIZE_LOG2
SL_INDEX_COUNT :: 1 << TLSF_SL_INDEX_COUNT_LOG2
FL_INDEX_SHIFT :: TLSF_SL_INDEX_COUNT_LOG2 + ALIGN_SIZE_LOG2
FL_INDEX_COUNT :: FL_INDEX_MAX - FL_INDEX_SHIFT + 1
SMALL_BLOCK_SIZE :: 1 << FL_INDEX_SHIFT
/*
We support allocations of sizes up to (1 << `FL_INDEX_MAX`) bits.
However, because we linearly subdivide the second-level lists, and
our minimum size granularity is 4 bytes, it doesn't make sense to
create first-level lists for sizes smaller than `SL_INDEX_COUNT` * 4,
or (1 << (`TLSF_SL_INDEX_COUNT_LOG2` + 2)) bytes, as there we will be
trying to split size ranges into more slots than we have available.
Instead, we calculate the minimum threshold size, and place all
blocks below that size into the 0th first-level list.
*/
// SL_INDEX_COUNT must be <= number of bits in sl_bitmap's storage tree
#assert(size_of(uint) * 8 >= SL_INDEX_COUNT)
// Ensure we've properly tuned our sizes.
#assert(ALIGN_SIZE == SMALL_BLOCK_SIZE / SL_INDEX_COUNT)
#assert(size_of(Allocator) % ALIGN_SIZE == 0)
Pool :: struct {
data: []u8 `fmt:"-"`,
allocator: runtime.Allocator,
next: ^Pool,
}
/*
Block header structure.
There are several implementation subtleties involved:
- The `prev_phys_block` field is only valid if the previous block is free.
- The `prev_phys_block` field is actually stored at the end of the
previous block. It appears at the beginning of this structure only to
simplify the implementation.
- The `next_free` / `prev_free` fields are only valid if the block is free.
*/
Block_Header :: struct {
prev_phys_block: ^Block_Header,
size: uint, // The size of this block, excluding the block header
// Next and previous free blocks.
next_free: ^Block_Header,
prev_free: ^Block_Header,
}
#assert(offset_of(Block_Header, prev_phys_block) == 0)
/*
Since block sizes are always at least a multiple of 4, the two least
significant bits of the size field are used to store the block status:
- bit 0: whether block is busy or free
- bit 1: whether previous block is busy or free
*/
BLOCK_HEADER_FREE :: uint(1 << 0)
BLOCK_HEADER_PREV_FREE :: uint(1 << 1)
/*
The size of the block header exposed to used blocks is the `size` field.
The `prev_phys_block` field is stored *inside* the previous free block.
*/
BLOCK_HEADER_OVERHEAD :: uint(size_of(uint))
POOL_OVERHEAD :: 2 * BLOCK_HEADER_OVERHEAD
// User data starts directly after the size field in a used block.
BLOCK_START_OFFSET :: offset_of(Block_Header, size) + size_of(Block_Header{}.size)
/*
A free block must be large enough to store its header minus the size of
the `prev_phys_block` field, and no larger than the number of addressable
bits for `FL_INDEX`.
*/
BLOCK_SIZE_MIN :: uint(size_of(Block_Header) - size_of(^Block_Header))
BLOCK_SIZE_MAX :: uint(1) << FL_INDEX_MAX
/*
TLSF achieves O(1) cost for `alloc` and `free` operations by limiting
the search for a free block to a free list of guaranteed size
adequate to fulfill the request, combined with efficient free list
queries using bitmasks and architecture-specific bit-manipulation
routines.
NOTE: TLSF spec relies on ffs/fls returning value 0..31.
*/
@(require_results)
ffs :: proc "contextless" (word: u32) -> (bit: i32) {
return -1 if word == 0 else i32(intrinsics.count_trailing_zeros(word))
}
@(require_results)
fls :: proc "contextless" (word: u32) -> (bit: i32) {
N :: (size_of(u32) * 8) - 1
return i32(N - intrinsics.count_leading_zeros(word))
}
@(require_results)
fls_uint :: proc "contextless" (size: uint) -> (bit: i32) {
N :: (size_of(uint) * 8) - 1
return i32(N - intrinsics.count_leading_zeros(size))
}
@(require_results)
block_size :: proc "contextless" (block: ^Block_Header) -> (size: uint) {
return block.size &~ (BLOCK_HEADER_FREE | BLOCK_HEADER_PREV_FREE)
}
block_set_size :: proc "contextless" (block: ^Block_Header, size: uint) {
old_size := block.size
block.size = size | (old_size & (BLOCK_HEADER_FREE | BLOCK_HEADER_PREV_FREE))
}
@(require_results)
block_is_last :: proc "contextless" (block: ^Block_Header) -> (is_last: bool) {
return block_size(block) == 0
}
@(require_results)
block_is_free :: proc "contextless" (block: ^Block_Header) -> (is_free: bool) {
return (block.size & BLOCK_HEADER_FREE) == BLOCK_HEADER_FREE
}
block_set_free :: proc "contextless" (block: ^Block_Header) {
block.size |= BLOCK_HEADER_FREE
}
block_set_used :: proc "contextless" (block: ^Block_Header) {
block.size &~= BLOCK_HEADER_FREE
}
@(require_results)
block_is_prev_free :: proc "contextless" (block: ^Block_Header) -> (is_prev_free: bool) {
return (block.size & BLOCK_HEADER_PREV_FREE) == BLOCK_HEADER_PREV_FREE
}
block_set_prev_free :: proc "contextless" (block: ^Block_Header) {
block.size |= BLOCK_HEADER_PREV_FREE
}
block_set_prev_used :: proc "contextless" (block: ^Block_Header) {
block.size &~= BLOCK_HEADER_PREV_FREE
}
@(require_results)
block_from_ptr :: proc(ptr: rawptr) -> (block_ptr: ^Block_Header) {
return (^Block_Header)(uintptr(ptr) - BLOCK_START_OFFSET)
}
@(require_results)
block_to_ptr :: proc(block: ^Block_Header) -> (ptr: rawptr) {
return rawptr(uintptr(block) + BLOCK_START_OFFSET)
}
// Return location of next block after block of given size.
@(require_results)
offset_to_block :: proc(ptr: rawptr, size: uint) -> (block: ^Block_Header) {
return (^Block_Header)(uintptr(ptr) + uintptr(size))
}
@(require_results)
offset_to_block_backwards :: proc(ptr: rawptr, size: uint) -> (block: ^Block_Header) {
return (^Block_Header)(uintptr(ptr) - uintptr(size))
}
// Return location of previous block.
@(require_results)
block_prev :: proc(block: ^Block_Header) -> (prev: ^Block_Header) {
assert(block_is_prev_free(block), "previous block must be free")
return block.prev_phys_block
}
// Return location of next existing block.
@(require_results)
block_next :: proc(block: ^Block_Header) -> (next: ^Block_Header) {
return offset_to_block(block_to_ptr(block), block_size(block) - BLOCK_HEADER_OVERHEAD)
}
// Link a new block with its physical neighbor, return the neighbor.
@(require_results)
block_link_next :: proc(block: ^Block_Header) -> (next: ^Block_Header) {
next = block_next(block)
next.prev_phys_block = block
return
}
block_mark_as_free :: proc(block: ^Block_Header) {
// Link the block to the next block, first.
next := block_link_next(block)
block_set_prev_free(next)
block_set_free(block)
}
block_mark_as_used :: proc(block: ^Block_Header) {
next := block_next(block)
block_set_prev_used(next)
block_set_used(block)
}
@(require_results)
align_up :: proc(x, align: uint) -> (aligned: uint) {
assert(0 == (align & (align - 1)), "must align to a power of two")
return (x + (align - 1)) &~ (align - 1)
}
@(require_results)
align_down :: proc(x, align: uint) -> (aligned: uint) {
assert(0 == (align & (align - 1)), "must align to a power of two")
return x - (x & (align - 1))
}
@(require_results)
align_ptr :: proc(ptr: rawptr, align: uint) -> (aligned: rawptr) {
assert(0 == (align & (align - 1)), "must align to a power of two")
align_mask := uintptr(align) - 1
_ptr := uintptr(ptr)
_aligned := (_ptr + align_mask) &~ (align_mask)
return rawptr(_aligned)
}
// Adjust an allocation size to be aligned to word size, and no smaller than internal minimum.
@(require_results)
adjust_request_size :: proc(size, align: uint) -> (adjusted: uint) {
if size == 0 {
return 0
}
// aligned size must not exceed `BLOCK_SIZE_MAX`, or we'll go out of bounds on `sl_bitmap`.
if aligned := align_up(size, align); aligned < BLOCK_SIZE_MAX {
adjusted = min(aligned, BLOCK_SIZE_MAX)
}
return
}
// Adjust an allocation size to be aligned to word size, and no smaller than internal minimum.
@(require_results)
adjust_request_size_with_err :: proc(size, align: uint) -> (adjusted: uint, err: runtime.Allocator_Error) {
if size == 0 {
return 0, nil
}
// aligned size must not exceed `BLOCK_SIZE_MAX`, or we'll go out of bounds on `sl_bitmap`.
if aligned := align_up(size, align); aligned < BLOCK_SIZE_MAX {
adjusted = min(aligned, BLOCK_SIZE_MAX)
} else {
err = .Out_Of_Memory
}
return
}
// TLSF utility functions. In most cases these are direct translations of
// the documentation in the research paper.
@(optimization_mode="speed", require_results)
mapping_insert :: proc(size: uint) -> (fl, sl: i32) {
if size < SMALL_BLOCK_SIZE {
// Store small blocks in first list.
sl = i32(size) / (SMALL_BLOCK_SIZE / SL_INDEX_COUNT)
} else {
fl = fls_uint(size)
sl = i32(size >> (uint(fl) - TLSF_SL_INDEX_COUNT_LOG2)) ~ (1 << TLSF_SL_INDEX_COUNT_LOG2)
fl -= (FL_INDEX_SHIFT - 1)
}
return
}
@(optimization_mode="speed", require_results)
mapping_round :: #force_inline proc(size: uint) -> (rounded: uint) {
rounded = size
if size >= SMALL_BLOCK_SIZE {
round := uint(1 << (uint(fls_uint(size) - TLSF_SL_INDEX_COUNT_LOG2))) - 1
rounded += round
}
return
}
// This version rounds up to the next block size (for allocations)
@(optimization_mode="speed", require_results)
mapping_search :: proc(size: uint) -> (fl, sl: i32) {
return mapping_insert(mapping_round(size))
}
@(require_results)
search_suitable_block :: proc(control: ^Allocator, fli, sli: ^i32) -> (block: ^Block_Header) {
// First, search for a block in the list associated with the given fl/sl index.
fl := fli^; sl := sli^
sl_map := control.sl_bitmap[fli^] & (~u32(0) << uint(sl))
if sl_map == 0 {
// No block exists. Search in the next largest first-level list.
fl_map := control.fl_bitmap & (~u32(0) << uint(fl + 1))
if fl_map == 0 {
// No free blocks available, memory has been exhausted.
return {}
}
fl = ffs(fl_map)
fli^ = fl
sl_map = control.sl_bitmap[fl]
}
assert(sl_map != 0, "internal error - second level bitmap is null")
sl = ffs(sl_map)
sli^ = sl
// Return the first block in the free list.
return control.blocks[fl][sl]
}
// Remove a free block from the free list.
remove_free_block :: proc(control: ^Allocator, block: ^Block_Header, fl: i32, sl: i32) {
prev := block.prev_free
next := block.next_free
assert(prev != nil, "prev_free can not be nil")
assert(next != nil, "next_free can not be nil")
next.prev_free = prev
prev.next_free = next
// If this block is the head of the free list, set new head.
if control.blocks[fl][sl] == block {
control.blocks[fl][sl] = next
// If the new head is nil, clear the bitmap
if next == &control.block_null {
control.sl_bitmap[fl] &~= (u32(1) << uint(sl))
// If the second bitmap is now empty, clear the fl bitmap
if control.sl_bitmap[fl] == 0 {
control.fl_bitmap &~= (u32(1) << uint(fl))
}
}
}
}
// Insert a free block into the free block list.
insert_free_block :: proc(control: ^Allocator, block: ^Block_Header, fl: i32, sl: i32) {
current := control.blocks[fl][sl]
assert(current != nil, "free lists cannot have a nil entry")
assert(block != nil, "cannot insert a nil entry into the free list")
block.next_free = current
block.prev_free = &control.block_null
current.prev_free = block
assert(block_to_ptr(block) == align_ptr(block_to_ptr(block), ALIGN_SIZE), "block not properly aligned")
// Insert the new block at the head of the list, and mark the first- and second-level bitmaps appropriately.
control.blocks[fl][sl] = block
control.fl_bitmap |= (u32(1) << uint(fl))
control.sl_bitmap[fl] |= (u32(1) << uint(sl))
}
// Remove a given block from the free list.
block_remove :: proc(control: ^Allocator, block: ^Block_Header) {
fl, sl := mapping_insert(block_size(block))
remove_free_block(control, block, fl, sl)
}
// Insert a given block into the free list.
block_insert :: proc(control: ^Allocator, block: ^Block_Header) {
fl, sl := mapping_insert(block_size(block))
insert_free_block(control, block, fl, sl)
}
@(require_results)
block_can_split :: proc(block: ^Block_Header, size: uint) -> (can_split: bool) {
return block_size(block) >= size_of(Block_Header) + size
}
// Split a block into two, the second of which is free.
@(require_results)
block_split :: proc(block: ^Block_Header, size: uint) -> (remaining: ^Block_Header) {
// Calculate the amount of space left in the remaining block.
remaining = offset_to_block(block_to_ptr(block), size - BLOCK_HEADER_OVERHEAD)
remain_size := block_size(block) - (size + BLOCK_HEADER_OVERHEAD)
assert(block_to_ptr(remaining) == align_ptr(block_to_ptr(remaining), ALIGN_SIZE),
"remaining block not aligned properly")
assert(block_size(block) == remain_size + size + BLOCK_HEADER_OVERHEAD)
block_set_size(remaining, remain_size)
assert(block_size(remaining) >= BLOCK_SIZE_MIN, "block split with invalid size")
block_set_size(block, size)
block_mark_as_free(remaining)
return remaining
}
// Absorb a free block's storage into an adjacent previous free block.
@(require_results)
block_absorb :: proc(prev: ^Block_Header, block: ^Block_Header) -> (absorbed: ^Block_Header) {
assert(!block_is_last(prev), "previous block can't be last")
// Note: Leaves flags untouched.
prev.size += block_size(block) + BLOCK_HEADER_OVERHEAD
_ = block_link_next(prev)
return prev
}
// Merge a just-freed block with an adjacent previous free block.
@(require_results)
block_merge_prev :: proc(control: ^Allocator, block: ^Block_Header) -> (merged: ^Block_Header) {
merged = block
if (block_is_prev_free(block)) {
prev := block_prev(block)
assert(prev != nil, "prev physical block can't be nil")
assert(block_is_free(prev), "prev block is not free though marked as such")
block_remove(control, prev)
merged = block_absorb(prev, block)
}
return merged
}
// Merge a just-freed block with an adjacent free block.
@(require_results)
block_merge_next :: proc(control: ^Allocator, block: ^Block_Header) -> (merged: ^Block_Header) {
merged = block
next := block_next(block)
assert(next != nil, "next physical block can't be nil")
if (block_is_free(next)) {
assert(!block_is_last(block), "previous block can't be last")
block_remove(control, next)
merged = block_absorb(block, next)
}
return merged
}
// Trim any trailing block space off the end of a free block, return to pool.
block_trim_free :: proc(control: ^Allocator, block: ^Block_Header, size: uint) {
assert(block_is_free(block), "block must be free")
if (block_can_split(block, size)) {
remaining_block := block_split(block, size)
_ = block_link_next(block)
block_set_prev_free(remaining_block)
block_insert(control, remaining_block)
}
}
// Trim any trailing block space off the end of a used block, return to pool.
block_trim_used :: proc(control: ^Allocator, block: ^Block_Header, size: uint) {
assert(!block_is_free(block), "Block must be used")
if (block_can_split(block, size)) {
// If the next block is free, we must coalesce.
remaining_block := block_split(block, size)
block_set_prev_used(remaining_block)
remaining_block = block_merge_next(control, remaining_block)
block_insert(control, remaining_block)
}
}
// Trim leading block space, return to pool.
@(require_results)
block_trim_free_leading :: proc(control: ^Allocator, block: ^Block_Header, size: uint) -> (remaining: ^Block_Header) {
remaining = block
if block_can_split(block, size) {
// We want the 2nd block.
remaining = block_split(block, size - BLOCK_HEADER_OVERHEAD)
block_set_prev_free(remaining)
_ = block_link_next(block)
block_insert(control, block)
}
return remaining
}
@(require_results)
block_locate_free :: proc(control: ^Allocator, size: uint) -> (block: ^Block_Header) {
fl, sl: i32
if size != 0 {
fl, sl = mapping_search(size)
/*
`mapping_search` can futz with the size, so for excessively large sizes it can sometimes wind up
with indices that are off the end of the block array. So, we protect against that here,
since this is the only call site of `mapping_search`. Note that we don't need to check `sl`,
as it comes from a modulo operation that guarantees it's always in range.
*/
if fl < FL_INDEX_COUNT {
block = search_suitable_block(control, &fl, &sl)
}
}
if block != nil {
assert(block_size(block) >= size)
remove_free_block(control, block, fl, sl)
}
return block
}
@(require_results)
block_prepare_used :: proc(control: ^Allocator, block: ^Block_Header, size: uint) -> (res: []byte, err: runtime.Allocator_Error) {
if block != nil {
assert(size != 0, "Size must be non-zero")
block_trim_free(control, block, size)
block_mark_as_used(block)
res = ([^]byte)(block_to_ptr(block))[:size]
}
return
}
// Clear control structure and point all empty lists at the null block
clear :: proc(control: ^Allocator) {
control.block_null.next_free = &control.block_null
control.block_null.prev_free = &control.block_null
control.fl_bitmap = 0
for i in 0..<FL_INDEX_COUNT {
control.sl_bitmap[i] = 0
for j in 0..<SL_INDEX_COUNT {
control.blocks[i][j] = &control.block_null
}
}
}
@(require_results)
pool_add :: proc(control: ^Allocator, pool: []u8) -> (err: Error) {
assert(uintptr(raw_data(pool)) % ALIGN_SIZE == 0, "Added memory must be aligned")
pool_overhead := POOL_OVERHEAD
pool_bytes := align_down(len(pool) - pool_overhead, ALIGN_SIZE)
if pool_bytes < BLOCK_SIZE_MIN {
return .Backing_Buffer_Too_Small
} else if pool_bytes > BLOCK_SIZE_MAX {
return .Backing_Buffer_Too_Large
}
// Create the main free block. Offset the start of the block slightly,
// so that the `prev_phys_block` field falls outside of the pool -
// it will never be used.
block := offset_to_block_backwards(raw_data(pool), BLOCK_HEADER_OVERHEAD)
block_set_size(block, pool_bytes)
block_set_free(block)
block_set_prev_used(block)
block_insert(control, block)
// Split the block to create a zero-size sentinel block
next := block_link_next(block)
block_set_size(next, 0)
block_set_used(next)
block_set_prev_free(next)
return
}
pool_remove :: proc(control: ^Allocator, pool: []u8) {
block := offset_to_block_backwards(raw_data(pool), BLOCK_HEADER_OVERHEAD)
assert(block_is_free(block), "Block should be free")
assert(!block_is_free(block_next(block)), "Next block should not be free")
assert(block_size(block_next(block)) == 0, "Next block size should be zero")
fl, sl := mapping_insert(block_size(block))
remove_free_block(control, block, fl, sl)
}
@(require_results)
alloc_bytes_non_zeroed :: proc(control: ^Allocator, size: uint, align: uint) -> (res: []byte, err: runtime.Allocator_Error) {
assert(control != nil)
adjust := adjust_request_size(size, ALIGN_SIZE)
GAP_MINIMUM :: size_of(Block_Header)
size_with_gap := adjust_request_size(adjust + align + GAP_MINIMUM, align)
aligned_size := size_with_gap if adjust != 0 && align > ALIGN_SIZE else adjust
if aligned_size == 0 && size > 0 {
return nil, .Out_Of_Memory
}
block := block_locate_free(control, aligned_size)
if block == nil {
return nil, .Out_Of_Memory
}
ptr := block_to_ptr(block)
aligned := align_ptr(ptr, align)
gap := uint(int(uintptr(aligned)) - int(uintptr(ptr)))
if gap != 0 && gap < GAP_MINIMUM {
gap_remain := GAP_MINIMUM - gap
offset := uintptr(max(gap_remain, align))
next_aligned := rawptr(uintptr(aligned) + offset)
aligned = align_ptr(next_aligned, align)
gap = uint(int(uintptr(aligned)) - int(uintptr(ptr)))
}
if gap != 0 {
assert(gap >= GAP_MINIMUM, "gap size too small")
block = block_trim_free_leading(control, block, gap)
}
return block_prepare_used(control, block, adjust)
}
@(require_results)
alloc_bytes :: proc(control: ^Allocator, size: uint, align: uint) -> (res: []byte, err: runtime.Allocator_Error) {
res, err = alloc_bytes_non_zeroed(control, size, align)
if err != nil {
intrinsics.mem_zero(raw_data(res), len(res))
}
return
}
free_with_size :: proc(control: ^Allocator, ptr: rawptr, size: uint) {
assert(control != nil)
// `size` is currently ignored
if ptr == nil {
return
}
block := block_from_ptr(ptr)
assert(!block_is_free(block), "block already marked as free") // double free
block_mark_as_free(block)
block = block_merge_prev(control, block)
block = block_merge_next(control, block)
block_insert(control, block)
}
@(require_results)
resize :: proc(control: ^Allocator, ptr: rawptr, old_size, new_size: uint, alignment: uint) -> (res: []byte, err: runtime.Allocator_Error) {
assert(control != nil)
if ptr != nil && new_size == 0 {
free_with_size(control, ptr, old_size)
return
} else if ptr == nil {
return alloc_bytes(control, new_size, alignment)
}
block := block_from_ptr(ptr)
next := block_next(block)
curr_size := block_size(block)
combined := curr_size + block_size(next) + BLOCK_HEADER_OVERHEAD
adjust := adjust_request_size(new_size, max(ALIGN_SIZE, alignment))
assert(!block_is_free(block), "block already marked as free") // double free
min_size := min(curr_size, new_size, old_size)
if adjust > curr_size && (!block_is_free(next) || adjust > combined) {
res = alloc_bytes(control, new_size, alignment) or_return
if res != nil {
copy(res, ([^]byte)(ptr)[:min_size])
free_with_size(control, ptr, curr_size)
}
return
}
if adjust > curr_size {
_ = block_merge_next(control, block)
block_mark_as_used(block)
}
block_trim_used(control, block, adjust)
res = ([^]byte)(ptr)[:new_size]
if min_size < new_size {
to_zero := ([^]byte)(ptr)[min_size:new_size]
runtime.mem_zero(raw_data(to_zero), len(to_zero))
}
return
}
@(require_results)
resize_non_zeroed :: proc(control: ^Allocator, ptr: rawptr, old_size, new_size: uint, alignment: uint) -> (res: []byte, err: runtime.Allocator_Error) {
assert(control != nil)
if ptr != nil && new_size == 0 {
free_with_size(control, ptr, old_size)
return
} else if ptr == nil {
return alloc_bytes_non_zeroed(control, new_size, alignment)
}
block := block_from_ptr(ptr)
next := block_next(block)
curr_size := block_size(block)
combined := curr_size + block_size(next) + BLOCK_HEADER_OVERHEAD
adjust := adjust_request_size(new_size, max(ALIGN_SIZE, alignment))
assert(!block_is_free(block), "block already marked as free") // double free
min_size := min(curr_size, new_size, old_size)
if adjust > curr_size && (!block_is_free(next) || adjust > combined) {
res = alloc_bytes_non_zeroed(control, new_size, alignment) or_return
if res != nil {
copy(res, ([^]byte)(ptr)[:min_size])
free_with_size(control, ptr, old_size)
}
return
}
if adjust > curr_size {
_ = block_merge_next(control, block)
block_mark_as_used(block)
}
block_trim_used(control, block, adjust)
res = ([^]byte)(ptr)[:new_size]
return
}

View File

@@ -47,6 +47,7 @@ tracking_allocator_destroy :: proc(t: ^Tracking_Allocator) {
}
// Clear only the current allocation data while keeping the totals intact.
tracking_allocator_clear :: proc(t: ^Tracking_Allocator) {
sync.mutex_lock(&t.mutex)
clear(&t.allocation_map)
@@ -55,6 +56,19 @@ tracking_allocator_clear :: proc(t: ^Tracking_Allocator) {
sync.mutex_unlock(&t.mutex)
}
// Reset all of a Tracking Allocator's allocation data back to zero.
tracking_allocator_reset :: proc(t: ^Tracking_Allocator) {
sync.mutex_lock(&t.mutex)
clear(&t.allocation_map)
clear(&t.bad_free_array)
t.total_memory_allocated = 0
t.total_allocation_count = 0
t.total_memory_freed = 0
t.total_free_count = 0
t.peak_memory_allocated = 0
t.current_memory_allocated = 0
sync.mutex_unlock(&t.mutex)
}
@(require_results)
tracking_allocator :: proc(data: ^Tracking_Allocator) -> Allocator {

View File

@@ -538,7 +538,7 @@ Foreign_Import_Decl :: struct {
import_tok: tokenizer.Token,
name: ^Ident,
collection_name: string,
fullpaths: []string,
fullpaths: []^Expr,
comment: ^Comment_Group,
}
@@ -753,7 +753,7 @@ Array_Type :: struct {
using node: Expr,
open: tokenizer.Pos,
tag: ^Expr,
len: ^Expr, // Ellipsis node for [?]T arrray types, nil for slice types
len: ^Expr, // Ellipsis node for [?]T array types, nil for slice types
close: tokenizer.Pos,
elem: ^Expr,
}

View File

@@ -278,7 +278,9 @@ clone_node :: proc(node: ^Node) -> ^Node {
r.foreign_library = clone(r.foreign_library)
r.body = clone(r.body)
case ^Foreign_Import_Decl:
r.attributes = clone_dynamic_array(r.attributes)
r.name = auto_cast clone(r.name)
r.fullpaths = clone_array(r.fullpaths)
case ^Proc_Group:
r.args = clone(r.args)
case ^Attribute:

View File

@@ -320,6 +320,7 @@ walk :: proc(v: ^Visitor, node: ^Node) {
if n.comment != nil {
walk(v, n.comment)
}
walk_expr_list(v, n.fullpaths)
case ^Proc_Group:
walk_expr_list(v, n.args)

View File

@@ -1190,12 +1190,12 @@ parse_foreign_decl :: proc(p: ^Parser) -> ^ast.Decl {
error(p, name.pos, "illegal foreign import name: '_'")
}
fullpaths: [dynamic]string
fullpaths: [dynamic]^ast.Expr
if allow_token(p, .Open_Brace) {
for p.curr_tok.kind != .Close_Brace &&
p.curr_tok.kind != .EOF {
path := expect_token(p, .String)
append(&fullpaths, path.text)
path := parse_expr(p, false)
append(&fullpaths, path)
allow_token(p, .Comma) or_break
}
@@ -1203,7 +1203,9 @@ parse_foreign_decl :: proc(p: ^Parser) -> ^ast.Decl {
} else {
path := expect_token(p, .String)
reserve(&fullpaths, 1)
append(&fullpaths, path.text)
bl := ast.new(ast.Basic_Lit, path.pos, end_pos(path))
bl.tok = path
append(&fullpaths, bl)
}
if len(fullpaths) == 0 {
@@ -1453,7 +1455,7 @@ parse_stmt :: proc(p: ^Parser) -> ^ast.Stmt {
case "unroll":
return parse_unrolled_for_loop(p, tag)
case "reverse":
stmt := parse_for_stmt(p)
stmt := parse_stmt(p)
if range, is_range := stmt.derived.(^ast.Range_Stmt); is_range {
if range.reverse {
@@ -3513,6 +3515,25 @@ parse_simple_stmt :: proc(p: ^Parser, flags: Stmt_Allow_Flags) -> ^ast.Stmt {
case op.kind == .Colon:
expect_token_after(p, .Colon, "identifier list")
if .Label in flags && len(lhs) == 1 {
is_partial := false
is_reverse := false
partial_token: tokenizer.Token
if p.curr_tok.kind == .Hash {
name := peek_token(p)
if name.kind == .Ident && name.text == "partial" &&
peek_token(p, 1).kind == .Switch {
partial_token = expect_token(p, .Hash)
expect_token(p, .Ident)
is_partial = true
} else if name.kind == .Ident && name.text == "reverse" &&
peek_token(p, 1).kind == .For {
partial_token = expect_token(p, .Hash)
expect_token(p, .Ident)
is_reverse = true
}
}
#partial switch p.curr_tok.kind {
case .Open_Brace, .If, .For, .Switch:
label := lhs[0]
@@ -3527,6 +3548,22 @@ parse_simple_stmt :: proc(p: ^Parser, flags: Stmt_Allow_Flags) -> ^ast.Stmt {
case ^ast.Type_Switch_Stmt: n.label = label
case ^ast.Range_Stmt: n.label = label
}
if is_partial {
#partial switch n in stmt.derived_stmt {
case ^ast.Switch_Stmt: n.partial = true
case ^ast.Type_Switch_Stmt: n.partial = true
case:
error(p, partial_token.pos, "incorrect use of directive, use '%s: #partial switch'", partial_token.text)
}
}
if is_reverse {
#partial switch n in stmt.derived_stmt {
case ^ast.Range_Stmt: n.reverse = true
case:
error(p, partial_token.pos, "incorrect use of directive, use '%s: #reverse for'", partial_token.text)
}
}
}
return stmt

View File

@@ -87,8 +87,12 @@ read_dir :: proc(fd: Handle, n: int, allocator := context.allocator) -> (fi: []F
find_data := &win32.WIN32_FIND_DATAW{}
find_handle := win32.FindFirstFileW(raw_data(wpath_search), find_data)
if find_handle == win32.INVALID_HANDLE_VALUE {
err = Errno(win32.GetLastError())
return dfi[:], err
}
defer win32.FindClose(find_handle)
for n != 0 && find_handle != nil {
for n != 0 {
fi: File_Info
fi = find_data_to_file_info(path, find_data)
if fi.name != "" {

View File

@@ -111,7 +111,7 @@ next_random :: proc(r: ^[2]u64) -> u64 {
@(require_results)
random_string :: proc(buf: []byte) -> string {
@static digits := "0123456789"
@(static, rodata) digits := "0123456789"
u := next_random(&random_string_seed)

View File

@@ -442,7 +442,7 @@ F_GETPATH :: 50 // return the full path of the fd
foreign libc {
@(link_name="__error") __error :: proc() -> ^c.int ---
@(link_name="open") _unix_open :: proc(path: cstring, flags: i32, mode: u16) -> Handle ---
@(link_name="open") _unix_open :: proc(path: cstring, flags: i32, #c_vararg args: ..any) -> Handle ---
@(link_name="close") _unix_close :: proc(handle: Handle) -> c.int ---
@(link_name="read") _unix_read :: proc(handle: Handle, buffer: rawptr, count: c.size_t) -> int ---
@(link_name="write") _unix_write :: proc(handle: Handle, buffer: rawptr, count: c.size_t) -> int ---

View File

@@ -112,15 +112,15 @@ EOWNERDEAD: Errno : 96
O_RDONLY :: 0x00000
O_WRONLY :: 0x00001
O_RDWR :: 0x00002
O_CREATE :: 0x00040
O_EXCL :: 0x00080
O_NOCTTY :: 0x00100
O_TRUNC :: 0x00200
O_NONBLOCK :: 0x00800
O_APPEND :: 0x00400
O_SYNC :: 0x01000
O_ASYNC :: 0x02000
O_CLOEXEC :: 0x80000
O_NONBLOCK :: 0x00004
O_APPEND :: 0x00008
O_ASYNC :: 0x00040
O_SYNC :: 0x00080
O_CREATE :: 0x00200
O_TRUNC :: 0x00400
O_EXCL :: 0x00800
O_NOCTTY :: 0x08000
O_CLOEXEC :: 0100000
SEEK_DATA :: 3
@@ -140,6 +140,8 @@ RTLD_NOLOAD :: 0x02000
MAX_PATH :: 1024
KINFO_FILE_SIZE :: 1392
args := _alloc_command_line_arguments()
Unix_File_Time :: struct {
@@ -191,6 +193,21 @@ OS_Stat :: struct {
lspare: [10]u64,
}
KInfo_File :: struct {
structsize: c.int,
type: c.int,
fd: c.int,
ref_count: c.int,
flags: c.int,
pad0: c.int,
offset: i64,
// NOTE(Feoramund): This field represents a complicated union that I am
// avoiding implementing for now. I only need the path data below.
_union: [336]byte,
path: [MAX_PATH]c.char,
}
// since FreeBSD v12
Dirent :: struct {
@@ -254,6 +271,8 @@ X_OK :: 1 // Test for execute permission
W_OK :: 2 // Test for write permission
R_OK :: 4 // Test for read permission
F_KINFO :: 22
foreign libc {
@(link_name="__error") __errno_location :: proc() -> ^c.int ---
@@ -274,6 +293,7 @@ foreign libc {
@(link_name="unlink") _unix_unlink :: proc(path: cstring) -> c.int ---
@(link_name="rmdir") _unix_rmdir :: proc(path: cstring) -> c.int ---
@(link_name="mkdir") _unix_mkdir :: proc(path: cstring, mode: mode_t) -> c.int ---
@(link_name="fcntl") _unix_fcntl :: proc(fd: Handle, cmd: c.int, arg: uintptr) -> c.int ---
@(link_name="fdopendir") _unix_fdopendir :: proc(fd: Handle) -> Dir ---
@(link_name="closedir") _unix_closedir :: proc(dirp: Dir) -> c.int ---
@@ -365,7 +385,7 @@ seek :: proc(fd: Handle, offset: i64, whence: int) -> (i64, Errno) {
}
file_size :: proc(fd: Handle) -> (i64, Errno) {
s, err := fstat(fd)
s, err := _fstat(fd)
if err != ERROR_NONE {
return -1, err
}
@@ -591,9 +611,26 @@ _readlink :: proc(path: string) -> (string, Errno) {
return "", Errno{}
}
// XXX FreeBSD
absolute_path_from_handle :: proc(fd: Handle) -> (string, Errno) {
return "", Errno(ENOSYS)
// NOTE(Feoramund): The situation isn't ideal, but this was the best way I
// could find to implement this. There are a couple outstanding bug reports
// regarding the desire to retrieve an absolute path from a handle, but to
// my knowledge, there hasn't been any work done on it.
//
// https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=198570
//
// This may be unreliable, according to a comment from 2023.
kinfo: KInfo_File
kinfo.structsize = KINFO_FILE_SIZE
res := _unix_fcntl(fd, F_KINFO, cast(uintptr)&kinfo)
if res == -1 {
return "", Errno(get_last_error())
}
path := strings.clone_from_cstring_bounded(cast(cstring)&kinfo.path[0], len(kinfo.path))
return path, ERROR_NONE
}
absolute_path_from_relative :: proc(rel: string) -> (path: string, err: Errno) {

View File

@@ -5,7 +5,6 @@ foreign import libc "system:c"
import "base:runtime"
import "core:strings"
import "core:sys/unix"
import "core:c"
Handle :: distinct i32
@@ -328,6 +327,11 @@ foreign dl {
@(link_name="dlerror") _unix_dlerror :: proc() -> cstring ---
}
@(private)
foreign libc {
_lwp_self :: proc() -> i32 ---
}
// NOTE(phix): Perhaps share the following functions with FreeBSD if they turn out to be the same in the end.
is_path_separator :: proc(r: rune) -> bool {
@@ -721,7 +725,7 @@ exit :: proc "contextless" (code: int) -> ! {
}
current_thread_id :: proc "contextless" () -> int {
return cast(int) unix.pthread_self()
return int(_lwp_self())
}
dlopen :: proc(filename: string, flags: int) -> rawptr {

View File

@@ -56,7 +56,7 @@ foreign libc {
@(link_name="free") _unix_free :: proc(ptr: rawptr) ---
}
when ODIN_OS == .Darwin {
when ODIN_OS == .Darwin || ODIN_OS == .FreeBSD {
@(private)
foreign libc {
@(link_name="__error") __error :: proc() -> ^i32 ---

49
core/simd/x86/aes.odin Normal file
View File

@@ -0,0 +1,49 @@
//+build i386, amd64
package simd_x86
@(require_results, enable_target_feature = "aes")
_mm_aesdec :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
return aesdec(a, b)
}
@(require_results, enable_target_feature = "aes")
_mm_aesdeclast :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
return aesdeclast(a, b)
}
@(require_results, enable_target_feature = "aes")
_mm_aesenc :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
return aesenc(a, b)
}
@(require_results, enable_target_feature = "aes")
_mm_aesenclast :: #force_inline proc "c" (a, b: __m128i) -> __m128i {
return aesenclast(a, b)
}
@(require_results, enable_target_feature = "aes")
_mm_aesimc :: #force_inline proc "c" (a: __m128i) -> __m128i {
return aesimc(a)
}
@(require_results, enable_target_feature = "aes")
_mm_aeskeygenassist :: #force_inline proc "c" (a: __m128i, $IMM8: u8) -> __m128i {
return aeskeygenassist(a, u8(IMM8))
}
@(private, default_calling_convention = "none")
foreign _ {
@(link_name = "llvm.x86.aesni.aesdec")
aesdec :: proc(a, b: __m128i) -> __m128i ---
@(link_name = "llvm.x86.aesni.aesdeclast")
aesdeclast :: proc(a, b: __m128i) -> __m128i ---
@(link_name = "llvm.x86.aesni.aesenc")
aesenc :: proc(a, b: __m128i) -> __m128i ---
@(link_name = "llvm.x86.aesni.aesenclast")
aesenclast :: proc(a, b: __m128i) -> __m128i ---
@(link_name = "llvm.x86.aesni.aesimc")
aesimc :: proc(a: __m128i) -> __m128i ---
@(link_name = "llvm.x86.aesni.aeskeygenassist")
aeskeygenassist :: proc(a: __m128i, imm8: u8) -> __m128i ---
}

105
core/slice/permute.odin Normal file
View File

@@ -0,0 +1,105 @@
package slice
import "base:runtime"
// An in-place permutation iterator.
Permutation_Iterator :: struct($T: typeid) {
index: int,
slice: []T,
counters: []int,
}
/*
Make an iterator to permute a slice in-place.
*Allocates Using Provided Allocator*
This procedure allocates some state to assist in permutation and does not make
a copy of the underlying slice. If you want to permute a slice without altering
the underlying data, use `clone` to create a copy, then permute that instead.
Inputs:
- slice: The slice to permute.
- allocator: (default is context.allocator)
Returns:
- iter: The iterator, to be passed to `permute`.
- error: An `Allocator_Error`, if allocation failed.
*/
make_permutation_iterator :: proc(
slice: []$T,
allocator := context.allocator,
) -> (
iter: Permutation_Iterator(T),
error: runtime.Allocator_Error,
) #optional_allocator_error {
iter.slice = slice
iter.counters = make([]int, len(iter.slice), allocator) or_return
return
}
/*
Free the state allocated by `make_permutation_iterator`.
Inputs:
- iter: The iterator created by `make_permutation_iterator`.
- allocator: The allocator used to create the iterator. (default is context.allocator)
*/
destroy_permutation_iterator :: proc(
iter: Permutation_Iterator($T),
allocator := context.allocator,
) {
delete(iter.counters, allocator = allocator)
}
/*
Permute a slice in-place.
Note that the first iteration will always be the original, unpermuted slice.
Inputs:
- iter: The iterator created by `make_permutation_iterator`.
Returns:
- ok: True if the permutation succeeded, false if the iteration is complete.
*/
permute :: proc(iter: ^Permutation_Iterator($T)) -> (ok: bool) {
// This is an iterative, resumable implementation of Heap's algorithm.
//
// The original algorithm was described by B. R. Heap as "Permutations by
// interchanges" in The Computer Journal, 1963.
//
// This implementation is based on the nonrecursive version described by
// Robert Sedgewick in "Permutation Generation Methods" which was published
// in ACM Computing Surveys in 1977.
i := iter.index
if i == 0 {
iter.index = 1
return true
}
n := len(iter.counters)
#no_bounds_check for i < n {
if iter.counters[i] < i {
if i & 1 == 0 {
iter.slice[0], iter.slice[i] = iter.slice[i], iter.slice[0]
} else {
iter.slice[iter.counters[i]], iter.slice[i] = iter.slice[i], iter.slice[iter.counters[i]]
}
iter.counters[i] += 1
i = 1
break
} else {
iter.counters[i] = 0
i += 1
}
}
if i == n {
return false
}
iter.index = i
return true
}

Some files were not shown because too many files have changed in this diff Show More