diff --git a/core/mem/alloc.odin b/core/mem/alloc.odin index e51d971e1..558e810e3 100644 --- a/core/mem/alloc.odin +++ b/core/mem/alloc.odin @@ -63,30 +63,58 @@ DEFAULT_PAGE_SIZE :: 4 * 1024 @(require_results) -alloc :: proc(size: int, alignment: int = DEFAULT_ALIGNMENT, allocator := context.allocator, loc := #caller_location) -> (rawptr, Allocator_Error) { +alloc :: proc( + size: int, + alignment: int = DEFAULT_ALIGNMENT, + allocator := context.allocator, + loc := #caller_location, +) -> (rawptr, Allocator_Error) { data, err := runtime.mem_alloc(size, alignment, allocator, loc) return raw_data(data), err } @(require_results) -alloc_bytes :: proc(size: int, alignment: int = DEFAULT_ALIGNMENT, allocator := context.allocator, loc := #caller_location) -> ([]byte, Allocator_Error) { +alloc_bytes :: proc( + size: int, + alignment: int = DEFAULT_ALIGNMENT, + allocator := context.allocator, + loc := #caller_location, +) -> ([]byte, Allocator_Error) { return runtime.mem_alloc(size, alignment, allocator, loc) } @(require_results) -alloc_bytes_non_zeroed :: proc(size: int, alignment: int = DEFAULT_ALIGNMENT, allocator := context.allocator, loc := #caller_location) -> ([]byte, Allocator_Error) { +alloc_bytes_non_zeroed :: proc( + size: int, + alignment: int = DEFAULT_ALIGNMENT, + allocator := context.allocator, + loc := #caller_location, +) -> ([]byte, Allocator_Error) { return runtime.mem_alloc_non_zeroed(size, alignment, allocator, loc) } -free :: proc(ptr: rawptr, allocator := context.allocator, loc := #caller_location) -> Allocator_Error { +free :: proc( + ptr: rawptr, + allocator := context.allocator, + loc := #caller_location, +) -> Allocator_Error { return runtime.mem_free(ptr, allocator, loc) } -free_with_size :: proc(ptr: rawptr, byte_count: int, allocator := context.allocator, loc := #caller_location) -> Allocator_Error { +free_with_size :: proc( + ptr: rawptr, + byte_count: int, + allocator := context.allocator, + loc := #caller_location, +) -> Allocator_Error { return runtime.mem_free_with_size(ptr, byte_count, allocator, loc) } -free_bytes :: proc(bytes: []byte, allocator := context.allocator, loc := #caller_location) -> Allocator_Error { +free_bytes :: proc( + bytes: []byte, + allocator := context.allocator, + loc := #caller_location, +) -> Allocator_Error { return runtime.mem_free_bytes(bytes, allocator, loc) } @@ -95,13 +123,26 @@ free_all :: proc(allocator := context.allocator, loc := #caller_location) -> All } @(require_results) -resize :: proc(ptr: rawptr, old_size, new_size: int, alignment: int = DEFAULT_ALIGNMENT, allocator := context.allocator, loc := #caller_location) -> (rawptr, Allocator_Error) { +resize :: proc( + ptr: rawptr, + old_size: int, + new_size: int, + alignment: int = DEFAULT_ALIGNMENT, + allocator := context.allocator, + loc := #caller_location, +) -> (rawptr, Allocator_Error) { data, err := runtime.mem_resize(ptr, old_size, new_size, alignment, allocator, loc) return raw_data(data), err } @(require_results) -resize_bytes :: proc(old_data: []byte, new_size: int, alignment: int = DEFAULT_ALIGNMENT, allocator := context.allocator, loc := #caller_location) -> ([]byte, Allocator_Error) { +resize_bytes :: proc( + old_data: []byte, + new_size: int, + alignment: int = DEFAULT_ALIGNMENT, + allocator := context.allocator, + loc := #caller_location, +) -> ([]byte, Allocator_Error) { return runtime.mem_resize(raw_data(old_data), len(old_data), new_size, alignment, allocator, loc) } @@ -115,7 +156,11 @@ query_features :: proc(allocator: Allocator, loc := #caller_location) -> (set: A } @(require_results) -query_info :: proc(pointer: rawptr, allocator: Allocator, loc := #caller_location) -> (props: Allocator_Query_Info) { +query_info :: proc( + pointer: rawptr, + allocator: Allocator, + loc := #caller_location, +) -> (props: Allocator_Query_Info) { props.pointer = pointer if allocator.procedure != nil { allocator.procedure(allocator.data, .Query_Info, 0, 0, &props, 0, loc) @@ -123,25 +168,44 @@ query_info :: proc(pointer: rawptr, allocator: Allocator, loc := #caller_locatio return } - - -delete_string :: proc(str: string, allocator := context.allocator, loc := #caller_location) -> Allocator_Error { +delete_string :: proc( + str: string, + allocator := context.allocator, + loc := #caller_location, +) -> Allocator_Error { return runtime.delete_string(str, allocator, loc) } -delete_cstring :: proc(str: cstring, allocator := context.allocator, loc := #caller_location) -> Allocator_Error { + +delete_cstring :: proc( + str: cstring, + allocator := context.allocator, + loc := #caller_location, +) -> Allocator_Error { return runtime.delete_cstring(str, allocator, loc) } -delete_dynamic_array :: proc(array: $T/[dynamic]$E, loc := #caller_location) -> Allocator_Error { + +delete_dynamic_array :: proc( + array: $T/[dynamic]$E, + loc := #caller_location, +) -> Allocator_Error { return runtime.delete_dynamic_array(array, loc) } -delete_slice :: proc(array: $T/[]$E, allocator := context.allocator, loc := #caller_location) -> Allocator_Error { + +delete_slice :: proc( + array: $T/[]$E, + allocator := context.allocator, + loc := #caller_location, +) -> Allocator_Error { return runtime.delete_slice(array, allocator, loc) } -delete_map :: proc(m: $T/map[$K]$V, loc := #caller_location) -> Allocator_Error { + +delete_map :: proc( + m: $T/map[$K]$V, + loc := #caller_location, +) -> Allocator_Error { return runtime.delete_map(m, loc) } - delete :: proc{ delete_string, delete_cstring, @@ -150,46 +214,102 @@ delete :: proc{ delete_map, } - @(require_results) -new :: proc($T: typeid, allocator := context.allocator, loc := #caller_location) -> (^T, Allocator_Error) { +new :: proc( + $T: typeid, + allocator := context.allocator, + loc := #caller_location, +) -> (^T, Allocator_Error) { return new_aligned(T, align_of(T), allocator, loc) } + @(require_results) -new_aligned :: proc($T: typeid, alignment: int, allocator := context.allocator, loc := #caller_location) -> (t: ^T, err: Allocator_Error) { +new_aligned :: proc( + $T: typeid, + alignment: int, + allocator := context.allocator, + loc := #caller_location, +) -> (t: ^T, err: Allocator_Error) { return runtime.new_aligned(T, alignment, allocator, loc) } + @(require_results) -new_clone :: proc(data: $T, allocator := context.allocator, loc := #caller_location) -> (t: ^T, err: Allocator_Error) { +new_clone :: proc( + data: $T, + allocator := context.allocator, + loc := #caller_location, +) -> (t: ^T, err: Allocator_Error) { return runtime.new_clone(data, allocator, loc) } @(require_results) -make_aligned :: proc($T: typeid/[]$E, #any_int len: int, alignment: int, allocator := context.allocator, loc := #caller_location) -> (slice: T, err: Allocator_Error) { +make_aligned :: proc( + $T: typeid/[]$E, + #any_int len: int, + alignment: int, + allocator := context.allocator, + loc := #caller_location, +) -> (slice: T, err: Allocator_Error) { return runtime.make_aligned(T, len, alignment, allocator, loc) } + @(require_results) -make_slice :: proc($T: typeid/[]$E, #any_int len: int, allocator := context.allocator, loc := #caller_location) -> (T, Allocator_Error) { +make_slice :: proc( + $T: typeid/[]$E, + #any_int len: int, + allocator := context.allocator, + loc := #caller_location, +) -> (T, Allocator_Error) { return runtime.make_slice(T, len, allocator, loc) } + @(require_results) -make_dynamic_array :: proc($T: typeid/[dynamic]$E, allocator := context.allocator, loc := #caller_location) -> (T, Allocator_Error) { +make_dynamic_array :: proc( + $T: typeid/[dynamic]$E, + allocator := context.allocator, + loc := #caller_location, +) -> (T, Allocator_Error) { return runtime.make_dynamic_array(T, allocator, loc) } + @(require_results) -make_dynamic_array_len :: proc($T: typeid/[dynamic]$E, #any_int len: int, allocator := context.allocator, loc := #caller_location) -> (T, Allocator_Error) { +make_dynamic_array_len :: proc( + $T: typeid/[dynamic]$E, + #any_int len: int, + allocator := context.allocator, + loc := #caller_location, +) -> (T, Allocator_Error) { return runtime.make_dynamic_array_len_cap(T, len, len, allocator, loc) } + @(require_results) -make_dynamic_array_len_cap :: proc($T: typeid/[dynamic]$E, #any_int len: int, #any_int cap: int, allocator := context.allocator, loc := #caller_location) -> (array: T, err: Allocator_Error) { +make_dynamic_array_len_cap :: proc( + $T: typeid/[dynamic]$E, + #any_int len: int, + #any_int cap: int, + allocator := context.allocator, + loc := #caller_location, +) -> (array: T, err: Allocator_Error) { return runtime.make_dynamic_array_len_cap(T, len, cap, allocator, loc) } + @(require_results) -make_map :: proc($T: typeid/map[$K]$E, #any_int cap: int = 1< (m: T, err: Allocator_Error) { +make_map :: proc( + $T: typeid/map[$K]$E, + #any_int cap: int = 1< (m: T, err: Allocator_Error) { return runtime.make_map(T, cap, allocator, loc) } + @(require_results) -make_multi_pointer :: proc($T: typeid/[^]$E, #any_int len: int, allocator := context.allocator, loc := #caller_location) -> (mp: T, err: Allocator_Error) { +make_multi_pointer :: proc( + $T: typeid/[^]$E, + #any_int len: int, + allocator := context.allocator, + loc := #caller_location +) -> (mp: T, err: Allocator_Error) { return runtime.make_multi_pointer(T, len, allocator, loc) } @@ -202,26 +322,58 @@ make :: proc{ make_multi_pointer, } - @(require_results) -default_resize_align :: proc(old_memory: rawptr, old_size, new_size, alignment: int, allocator := context.allocator, loc := #caller_location) -> (res: rawptr, err: Allocator_Error) { +default_resize_align :: proc( + old_memory: rawptr, + old_size: int, + new_size: int, + alignment: int, + allocator := context.allocator, + loc := #caller_location, +) -> (res: rawptr, err: Allocator_Error) { data: []byte - data, err = default_resize_bytes_align(([^]byte)(old_memory)[:old_size], new_size, alignment, allocator, loc) + data, err = default_resize_bytes_align( + ([^]byte) (old_memory)[:old_size], + new_size, + alignment, + allocator, + loc, + ) res = raw_data(data) return } @(require_results) -default_resize_bytes_align_non_zeroed :: proc(old_data: []byte, new_size, alignment: int, allocator := context.allocator, loc := #caller_location) -> ([]byte, Allocator_Error) { +default_resize_bytes_align_non_zeroed :: proc( + old_data: []byte, + new_size: int, + alignment: int, + allocator := context.allocator, + loc := #caller_location, +) -> ([]byte, Allocator_Error) { return _default_resize_bytes_align(old_data, new_size, alignment, false, allocator, loc) } + @(require_results) -default_resize_bytes_align :: proc(old_data: []byte, new_size, alignment: int, allocator := context.allocator, loc := #caller_location) -> ([]byte, Allocator_Error) { +default_resize_bytes_align :: proc( + old_data: []byte, + new_size: int, + alignment: int, + allocator := context.allocator, + loc := #caller_location, +) -> ([]byte, Allocator_Error) { return _default_resize_bytes_align(old_data, new_size, alignment, true, allocator, loc) } @(require_results) -_default_resize_bytes_align :: #force_inline proc(old_data: []byte, new_size, alignment: int, should_zero: bool, allocator := context.allocator, loc := #caller_location) -> ([]byte, Allocator_Error) { +_default_resize_bytes_align :: #force_inline proc( + old_data: []byte, + new_size: int, + alignment: int, + should_zero: bool, + allocator := context.allocator, + loc := #caller_location, +) -> ([]byte, Allocator_Error) { old_memory := raw_data(old_data) old_size := len(old_data) if old_memory == nil { diff --git a/core/mem/allocators.odin b/core/mem/allocators.odin index a5b93ad05..7bc1a6d77 100644 --- a/core/mem/allocators.odin +++ b/core/mem/allocators.odin @@ -3,9 +3,14 @@ package mem import "base:intrinsics" import "base:runtime" -nil_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode, - size, alignment: int, - old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, Allocator_Error) { +nil_allocator_proc :: proc( + allocator_data: rawptr, + mode: Allocator_Mode, + size, alignment: int, + old_memory: rawptr, + old_size: int, + loc := #caller_location, +) -> ([]byte, Allocator_Error) { return nil, nil } @@ -16,8 +21,6 @@ nil_allocator :: proc() -> Allocator { } } -// Custom allocators - Arena :: struct { data: []byte, offset: int, @@ -30,7 +33,6 @@ Arena_Temp_Memory :: struct { prev_offset: int, } - arena_init :: proc(a: ^Arena, data: []byte) { a.data = data a.offset = 0 @@ -54,9 +56,15 @@ arena_allocator :: proc(arena: ^Arena) -> Allocator { } } -arena_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode, - size, alignment: int, - old_memory: rawptr, old_size: int, location := #caller_location) -> ([]byte, Allocator_Error) { +arena_allocator_proc :: proc( + allocator_data: rawptr, + mode: Allocator_Mode, + size: int, + alignment: int, + old_memory: rawptr, + old_size: int, + location := #caller_location, +) -> ([]byte, Allocator_Error) { arena := cast(^Arena)allocator_data switch mode { @@ -120,8 +128,6 @@ end_arena_temp_memory :: proc(tmp: Arena_Temp_Memory) { tmp.arena.temp_count -= 1 } - - Scratch_Allocator :: struct { data: []byte, curr_offset: int, @@ -151,9 +157,14 @@ scratch_allocator_destroy :: proc(s: ^Scratch_Allocator) { s^ = {} } -scratch_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode, - size, alignment: int, - old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, Allocator_Error) { +scratch_allocator_proc :: proc( + allocator_data: rawptr, + mode: Allocator_Mode, + size, alignment: int, + old_memory: rawptr, + old_size: int, + loc := #caller_location, +) -> ([]byte, Allocator_Error) { s := (^Scratch_Allocator)(allocator_data) @@ -299,10 +310,6 @@ scratch_allocator :: proc(allocator: ^Scratch_Allocator) -> Allocator { } } - - - - Stack_Allocation_Header :: struct { prev_offset: int, padding: int, @@ -339,34 +346,44 @@ stack_allocator :: proc(stack: ^Stack) -> Allocator { } } - -stack_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode, - size, alignment: int, - old_memory: rawptr, old_size: int, location := #caller_location) -> ([]byte, Allocator_Error) { +stack_allocator_proc :: proc( + allocator_data: rawptr, + mode: Allocator_Mode, + size: int, + alignment: int, + old_memory: rawptr, + old_size: int, + location := #caller_location, +) -> ([]byte, Allocator_Error) { s := cast(^Stack)allocator_data if s.data == nil { return nil, .Invalid_Argument } - raw_alloc :: proc(s: ^Stack, size, alignment: int, zero_memory: bool) -> ([]byte, Allocator_Error) { + raw_alloc :: proc( + s: ^Stack, + size: int, + alignment: int, + zero_memory: bool, + ) -> ([]byte, Allocator_Error) { curr_addr := uintptr(raw_data(s.data)) + uintptr(s.curr_offset) - padding := calc_padding_with_header(curr_addr, uintptr(alignment), size_of(Stack_Allocation_Header)) + padding := calc_padding_with_header( + curr_addr, + uintptr(alignment), + size_of(Stack_Allocation_Header), + ) if s.curr_offset + padding + size > len(s.data) { return nil, .Out_Of_Memory } s.prev_offset = s.curr_offset s.curr_offset += padding - next_addr := curr_addr + uintptr(padding) header := (^Stack_Allocation_Header)(next_addr - size_of(Stack_Allocation_Header)) header.padding = padding header.prev_offset = s.prev_offset - s.curr_offset += size - s.peak_used = max(s.peak_used, s.curr_offset) - if zero_memory { zero(rawptr(next_addr), size) } @@ -467,12 +484,6 @@ stack_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode, return nil, nil } - - - - - - Small_Stack_Allocation_Header :: struct { padding: u8, } @@ -505,9 +516,14 @@ small_stack_allocator :: proc(stack: ^Small_Stack) -> Allocator { } } -small_stack_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode, - size, alignment: int, - old_memory: rawptr, old_size: int, location := #caller_location) -> ([]byte, Allocator_Error) { +small_stack_allocator_proc :: proc( + allocator_data: rawptr, + mode: Allocator_Mode, + size, alignment: int, + old_memory: rawptr, + old_size: int, + location := #caller_location, +) -> ([]byte, Allocator_Error) { s := cast(^Small_Stack)allocator_data if s.data == nil { @@ -612,10 +628,6 @@ small_stack_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode, return nil, nil } - - - - Dynamic_Pool :: struct { block_size: int, out_band_size: int, @@ -632,15 +644,18 @@ Dynamic_Pool :: struct { block_allocator: Allocator, } - DYNAMIC_POOL_BLOCK_SIZE_DEFAULT :: 65536 DYNAMIC_POOL_OUT_OF_BAND_SIZE_DEFAULT :: 6554 - - -dynamic_pool_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode, - size, alignment: int, - old_memory: rawptr, old_size: int, loc := #caller_location) -> ([]byte, Allocator_Error) { +dynamic_pool_allocator_proc :: proc( + allocator_data: rawptr, + mode: Allocator_Mode, + size: int, + alignment: int, + old_memory: rawptr, + old_size: int, + loc := #caller_location, +) -> ([]byte, Allocator_Error) { pool := (^Dynamic_Pool)(allocator_data) switch mode { @@ -689,19 +704,21 @@ dynamic_pool_allocator :: proc(pool: ^Dynamic_Pool) -> Allocator { } } -dynamic_pool_init :: proc(pool: ^Dynamic_Pool, - block_allocator := context.allocator, - array_allocator := context.allocator, - block_size := DYNAMIC_POOL_BLOCK_SIZE_DEFAULT, - out_band_size := DYNAMIC_POOL_OUT_OF_BAND_SIZE_DEFAULT, - alignment := 8) { - pool.block_size = block_size - pool.out_band_size = out_band_size - pool.alignment = alignment +dynamic_pool_init :: proc( + pool: ^Dynamic_Pool, + block_allocator := context.allocator, + array_allocator := context.allocator, + block_size := DYNAMIC_POOL_BLOCK_SIZE_DEFAULT, + out_band_size := DYNAMIC_POOL_OUT_OF_BAND_SIZE_DEFAULT, + alignment := 8, +) { + pool.block_size = block_size + pool.out_band_size = out_band_size + pool.alignment = alignment pool.block_allocator = block_allocator pool.out_band_allocations.allocator = array_allocator - pool. unused_blocks.allocator = array_allocator - pool. used_blocks.allocator = array_allocator + pool.unused_blocks.allocator = array_allocator + pool.used_blocks.allocator = array_allocator } dynamic_pool_destroy :: proc(pool: ^Dynamic_Pool) { @@ -709,11 +726,9 @@ dynamic_pool_destroy :: proc(pool: ^Dynamic_Pool) { delete(pool.unused_blocks) delete(pool.used_blocks) delete(pool.out_band_allocations) - zero(pool, size_of(pool^)) } - @(require_results) dynamic_pool_alloc :: proc(pool: ^Dynamic_Pool, bytes: int) -> (rawptr, Allocator_Error) { data, err := dynamic_pool_alloc_bytes(pool, bytes) @@ -736,9 +751,14 @@ dynamic_pool_alloc_bytes :: proc(p: ^Dynamic_Pool, bytes: int) -> ([]byte, Alloc new_block = pop(&p.unused_blocks) } else { data: []byte - data, err = p.block_allocator.procedure(p.block_allocator.data, Allocator_Mode.Alloc, - p.block_size, p.alignment, - nil, 0) + data, err = p.block_allocator.procedure( + p.block_allocator.data, + Allocator_Mode.Alloc, + p.block_size, + p.alignment, + nil, + 0, + ) new_block = raw_data(data) } @@ -808,10 +828,14 @@ dynamic_pool_free_all :: proc(p: ^Dynamic_Pool) { clear(&p.unused_blocks) } - -panic_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode, - size, alignment: int, - old_memory: rawptr, old_size: int,loc := #caller_location) -> ([]byte, Allocator_Error) { +panic_allocator_proc :: proc( + allocator_data: rawptr, + mode: Allocator_Mode, + size, alignment: int, + old_memory: rawptr, + old_size: int, + loc := #caller_location, +) -> ([]byte, Allocator_Error) { switch mode { case .Alloc: @@ -859,11 +883,6 @@ panic_allocator :: proc() -> Allocator { } } - - - - - Buddy_Block :: struct #align(align_of(uint)) { size: uint, is_free: bool, @@ -929,7 +948,6 @@ buddy_block_coalescence :: proc(head, tail: ^Buddy_Block) { } } - @(require_results) buddy_block_find_best :: proc(head, tail: ^Buddy_Block, size: uint) -> ^Buddy_Block { assert(size != 0) @@ -998,7 +1016,6 @@ buddy_block_find_best :: proc(head, tail: ^Buddy_Block, size: uint) -> ^Buddy_Bl return nil } - Buddy_Allocator :: struct { head: ^Buddy_Block, tail: ^Buddy_Block, @@ -1089,9 +1106,13 @@ buddy_allocator_free :: proc(b: ^Buddy_Allocator, ptr: rawptr) -> Allocator_Erro return nil } -buddy_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode, - size, alignment: int, - old_memory: rawptr, old_size: int,loc := #caller_location) -> ([]byte, Allocator_Error) { +buddy_allocator_proc :: proc( + allocator_data: rawptr, mode: Allocator_Mode, + size, alignment: int, + old_memory: rawptr, + old_size: int, + loc := #caller_location, +) -> ([]byte, Allocator_Error) { b := (^Buddy_Allocator)(allocator_data) diff --git a/core/mem/mem.odin b/core/mem/mem.odin index d423cc1eb..9e47c9602 100644 --- a/core/mem/mem.odin +++ b/core/mem/mem.odin @@ -14,10 +14,12 @@ Exabyte :: runtime.Exabyte set :: proc "contextless" (data: rawptr, value: byte, len: int) -> rawptr { return runtime.memset(data, i32(value), len) } + zero :: proc "contextless" (data: rawptr, len: int) -> rawptr { intrinsics.mem_zero(data, len) return data } + zero_explicit :: proc "contextless" (data: rawptr, len: int) -> rawptr { // This routine tries to avoid the compiler optimizing away the call, // so that it is always executed. It is intended to provided @@ -27,20 +29,22 @@ zero_explicit :: proc "contextless" (data: rawptr, len: int) -> rawptr { intrinsics.atomic_thread_fence(.Seq_Cst) // Prevent reordering return data } + zero_item :: proc "contextless" (item: $P/^$T) -> P { intrinsics.mem_zero(item, size_of(T)) return item } + zero_slice :: proc "contextless" (data: $T/[]$E) -> T { zero(raw_data(data), size_of(E)*len(data)) return data } - copy :: proc "contextless" (dst, src: rawptr, len: int) -> rawptr { intrinsics.mem_copy(dst, src, len) return dst } + copy_non_overlapping :: proc "contextless" (dst, src: rawptr, len: int) -> rawptr { intrinsics.mem_copy_non_overlapping(dst, src, len) return dst @@ -120,6 +124,7 @@ compare_ptrs :: proc "contextless" (a, b: rawptr, n: int) -> int { } ptr_offset :: intrinsics.ptr_offset + ptr_sub :: intrinsics.ptr_sub @(require_results) @@ -211,6 +216,7 @@ align_forward_uintptr :: proc(ptr, align: uintptr) -> uintptr { align_forward_int :: proc(ptr, align: int) -> int { return int(align_forward_uintptr(uintptr(ptr), uintptr(align))) } + @(require_results) align_forward_uint :: proc(ptr, align: uint) -> uint { return uint(align_forward_uintptr(uintptr(ptr), uintptr(align))) @@ -230,6 +236,7 @@ align_backward_uintptr :: proc(ptr, align: uintptr) -> uintptr { align_backward_int :: proc(ptr, align: int) -> int { return int(align_backward_uintptr(uintptr(ptr), uintptr(align))) } + @(require_results) align_backward_uint :: proc(ptr, align: uint) -> uint { return uint(align_backward_uintptr(uintptr(ptr), uintptr(align))) @@ -247,7 +254,6 @@ reinterpret_copy :: proc "contextless" ($T: typeid, ptr: rawptr) -> (value: T) { return } - Fixed_Byte_Buffer :: distinct [dynamic]byte @(require_results) @@ -264,8 +270,6 @@ make_fixed_byte_buffer :: proc "contextless" (backing: []byte) -> Fixed_Byte_Buf return transmute(Fixed_Byte_Buffer)d } - - @(require_results) align_formula :: proc "contextless" (size, align: int) -> int { result := size + align-1 @@ -276,12 +280,10 @@ align_formula :: proc "contextless" (size, align: int) -> int { calc_padding_with_header :: proc "contextless" (ptr: uintptr, align: uintptr, header_size: int) -> int { p, a := ptr, align modulo := p & (a-1) - padding := uintptr(0) if modulo != 0 { padding = a - modulo } - needed_space := uintptr(header_size) if padding < needed_space { needed_space -= padding @@ -296,8 +298,6 @@ calc_padding_with_header :: proc "contextless" (ptr: uintptr, align: uintptr, he return int(padding) } - - @(require_results, deprecated="prefer 'slice.clone'") clone_slice :: proc(slice: $T/[]$E, allocator := context.allocator, loc := #caller_location) -> (new_slice: T) { new_slice, _ = make(T, len(slice), allocator, loc) diff --git a/core/mem/mutex_allocator.odin b/core/mem/mutex_allocator.odin index 591703eab..1cccc7dac 100644 --- a/core/mem/mutex_allocator.odin +++ b/core/mem/mutex_allocator.odin @@ -13,7 +13,6 @@ mutex_allocator_init :: proc(m: ^Mutex_Allocator, backing_allocator: Allocator) m.mutex = {} } - @(require_results) mutex_allocator :: proc(m: ^Mutex_Allocator) -> Allocator { return Allocator{ @@ -22,11 +21,16 @@ mutex_allocator :: proc(m: ^Mutex_Allocator) -> Allocator { } } -mutex_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode, - size, alignment: int, - old_memory: rawptr, old_size: int, loc := #caller_location) -> (result: []byte, err: Allocator_Error) { +mutex_allocator_proc :: proc( + allocator_data: rawptr, + mode: Allocator_Mode, + size: int, + alignment: int, + old_memory: rawptr, + old_size: int, + loc := #caller_location, +) -> (result: []byte, err: Allocator_Error) { m := (^Mutex_Allocator)(allocator_data) - sync.mutex_guard(&m.mutex) return m.backing.procedure(m.backing.data, mode, size, alignment, old_memory, old_size, loc) } diff --git a/core/mem/raw.odin b/core/mem/raw.odin index f56206957..7fda3229d 100644 --- a/core/mem/raw.odin +++ b/core/mem/raw.odin @@ -3,22 +3,36 @@ package mem import "base:builtin" import "base:runtime" -Raw_Any :: runtime.Raw_Any -Raw_String :: runtime.Raw_String -Raw_Cstring :: runtime.Raw_Cstring -Raw_Slice :: runtime.Raw_Slice -Raw_Dynamic_Array :: runtime.Raw_Dynamic_Array -Raw_Map :: runtime.Raw_Map -Raw_Soa_Pointer :: runtime.Raw_Soa_Pointer +Raw_Any :: runtime.Raw_Any + +Raw_String :: runtime.Raw_String + +Raw_Cstring :: runtime.Raw_Cstring + +Raw_Slice :: runtime.Raw_Slice + +Raw_Dynamic_Array :: runtime.Raw_Dynamic_Array + +Raw_Map :: runtime.Raw_Map + +Raw_Soa_Pointer :: runtime.Raw_Soa_Pointer + +Raw_Complex32 :: runtime.Raw_Complex32 + +Raw_Complex64 :: runtime.Raw_Complex64 + +Raw_Complex128 :: runtime.Raw_Complex128 + +Raw_Quaternion64 :: runtime.Raw_Quaternion64 -Raw_Complex32 :: runtime.Raw_Complex32 -Raw_Complex64 :: runtime.Raw_Complex64 -Raw_Complex128 :: runtime.Raw_Complex128 -Raw_Quaternion64 :: runtime.Raw_Quaternion64 Raw_Quaternion128 :: runtime.Raw_Quaternion128 + Raw_Quaternion256 :: runtime.Raw_Quaternion256 -Raw_Quaternion64_Vector_Scalar :: runtime.Raw_Quaternion64_Vector_Scalar + +Raw_Quaternion64_Vector_Scalar :: runtime.Raw_Quaternion64_Vector_Scalar + Raw_Quaternion128_Vector_Scalar :: runtime.Raw_Quaternion128_Vector_Scalar + Raw_Quaternion256_Vector_Scalar :: runtime.Raw_Quaternion256_Vector_Scalar make_any :: proc "contextless" (data: rawptr, id: typeid) -> any { diff --git a/core/mem/rollback_stack_allocator.odin b/core/mem/rollback_stack_allocator.odin index f5e428d87..761435552 100644 --- a/core/mem/rollback_stack_allocator.odin +++ b/core/mem/rollback_stack_allocator.odin @@ -1,45 +1,47 @@ package mem -// The Rollback Stack Allocator was designed for the test runner to be fast, -// able to grow, and respect the Tracking Allocator's requirement for -// individual frees. It is not overly concerned with fragmentation, however. -// -// It has support for expansion when configured with a block allocator and -// limited support for out-of-order frees. -// -// Allocation has constant-time best and usual case performance. -// At worst, it is linear according to the number of memory blocks. -// -// Allocation follows a first-fit strategy when there are multiple memory -// blocks. -// -// Freeing has constant-time best and usual case performance. -// At worst, it is linear according to the number of memory blocks and number -// of freed items preceding the last item in a block. -// -// Resizing has constant-time performance, if it's the last item in a block, or -// the new size is smaller. Naturally, this becomes linear-time if there are -// multiple blocks to search for the pointer's owning block. Otherwise, the -// allocator defaults to a combined alloc & free operation internally. -// -// Out-of-order freeing is accomplished by collapsing a run of freed items -// from the last allocation backwards. -// -// Each allocation has an overhead of 8 bytes and any extra bytes to satisfy -// the requested alignment. +/* +The Rollback Stack Allocator was designed for the test runner to be fast, +able to grow, and respect the Tracking Allocator's requirement for +individual frees. It is not overly concerned with fragmentation, however. +It has support for expansion when configured with a block allocator and +limited support for out-of-order frees. + +Allocation has constant-time best and usual case performance. +At worst, it is linear according to the number of memory blocks. + +Allocation follows a first-fit strategy when there are multiple memory +blocks. + +Freeing has constant-time best and usual case performance. +At worst, it is linear according to the number of memory blocks and number +of freed items preceding the last item in a block. + +Resizing has constant-time performance, if it's the last item in a block, or +the new size is smaller. Naturally, this becomes linear-time if there are +multiple blocks to search for the pointer's owning block. Otherwise, the +allocator defaults to a combined alloc & free operation internally. + +Out-of-order freeing is accomplished by collapsing a run of freed items +from the last allocation backwards. + +Each allocation has an overhead of 8 bytes and any extra bytes to satisfy +the requested alignment. +*/ import "base:runtime" ROLLBACK_STACK_DEFAULT_BLOCK_SIZE :: 4 * Megabyte -// This limitation is due to the size of `prev_ptr`, but it is only for the -// head block; any allocation in excess of the allocator's `block_size` is -// valid, so long as the block allocator can handle it. -// -// This is because allocations over the block size are not split up if the item -// within is freed; they are immediately returned to the block allocator. -ROLLBACK_STACK_MAX_HEAD_BLOCK_SIZE :: 2 * Gigabyte +/* +This limitation is due to the size of `prev_ptr`, but it is only for the +head block; any allocation in excess of the allocator's `block_size` is +valid, so long as the block allocator can handle it. +This is because allocations over the block size are not split up if the item +within is freed; they are immediately returned to the block allocator. +*/ +ROLLBACK_STACK_MAX_HEAD_BLOCK_SIZE :: 2 * Gigabyte Rollback_Stack_Header :: bit_field u64 { prev_offset: uintptr | 32, @@ -60,7 +62,6 @@ Rollback_Stack :: struct { block_allocator: Allocator, } - @(private="file", require_results) rb_ptr_in_bounds :: proc(block: ^Rollback_Stack_Block, ptr: rawptr) -> bool { start := raw_data(block.buffer) @@ -294,9 +295,13 @@ rollback_stack_allocator :: proc(stack: ^Rollback_Stack) -> Allocator { } @(require_results) -rollback_stack_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode, - size, alignment: int, - old_memory: rawptr, old_size: int, location := #caller_location, +rollback_stack_allocator_proc :: proc( + allocator_data: rawptr, + mode: Allocator_Mode, + size, alignment: int, + old_memory: rawptr, + old_size: int, + location := #caller_location, ) -> (result: []byte, err: Allocator_Error) { stack := cast(^Rollback_Stack)allocator_data diff --git a/core/mem/tracking_allocator.odin b/core/mem/tracking_allocator.odin index 1b57e5fb4..356180be1 100644 --- a/core/mem/tracking_allocator.odin +++ b/core/mem/tracking_allocator.odin @@ -12,22 +12,23 @@ Tracking_Allocator_Entry :: struct { err: Allocator_Error, location: runtime.Source_Code_Location, } + Tracking_Allocator_Bad_Free_Entry :: struct { memory: rawptr, location: runtime.Source_Code_Location, } -Tracking_Allocator :: struct { - backing: Allocator, - allocation_map: map[rawptr]Tracking_Allocator_Entry, - bad_free_array: [dynamic]Tracking_Allocator_Bad_Free_Entry, - mutex: sync.Mutex, - clear_on_free_all: bool, - total_memory_allocated: i64, - total_allocation_count: i64, - total_memory_freed: i64, - total_free_count: i64, - peak_memory_allocated: i64, +Tracking_Allocator :: struct { + backing: Allocator, + allocation_map: map[rawptr]Tracking_Allocator_Entry, + bad_free_array: [dynamic]Tracking_Allocator_Bad_Free_Entry, + mutex: sync.Mutex, + clear_on_free_all: bool, + total_memory_allocated: i64, + total_allocation_count: i64, + total_memory_freed: i64, + total_free_count: i64, + peak_memory_allocated: i64, current_memory_allocated: i64, } @@ -35,7 +36,6 @@ tracking_allocator_init :: proc(t: ^Tracking_Allocator, backing_allocator: Alloc t.backing = backing_allocator t.allocation_map.allocator = internals_allocator t.bad_free_array.allocator = internals_allocator - if .Free_All in query_features(t.backing) { t.clear_on_free_all = true } @@ -46,7 +46,6 @@ tracking_allocator_destroy :: proc(t: ^Tracking_Allocator) { delete(t.bad_free_array) } - // Clear only the current allocation data while keeping the totals intact. tracking_allocator_clear :: proc(t: ^Tracking_Allocator) { sync.mutex_lock(&t.mutex) @@ -78,9 +77,14 @@ tracking_allocator :: proc(data: ^Tracking_Allocator) -> Allocator { } } -tracking_allocator_proc :: proc(allocator_data: rawptr, mode: Allocator_Mode, - size, alignment: int, - old_memory: rawptr, old_size: int, loc := #caller_location) -> (result: []byte, err: Allocator_Error) { +tracking_allocator_proc :: proc( + allocator_data: rawptr, + mode: Allocator_Mode, + size, alignment: int, + old_memory: rawptr, + old_size: int, + loc := #caller_location, +) -> (result: []byte, err: Allocator_Error) { track_alloc :: proc(data: ^Tracking_Allocator, entry: ^Tracking_Allocator_Entry) { data.total_memory_allocated += i64(entry.size) data.total_allocation_count += 1