This commit is contained in:
gingerBill
2025-04-15 11:26:48 +01:00
4 changed files with 681 additions and 454 deletions

View File

@@ -10,6 +10,7 @@
// package mem_tlsf implements a Two Level Segregated Fit memory allocator.
package mem_tlsf
import "base:intrinsics"
import "base:runtime"
Error :: enum byte {
@@ -21,7 +22,6 @@ Error :: enum byte {
Backing_Allocator_Error = 5,
}
Allocator :: struct {
// Empty lists point at this block to indicate they are free.
block_null: Block_Header,
@@ -39,12 +39,13 @@ Allocator :: struct {
// statistics like how much memory is still available,
// fragmentation, etc.
pool: Pool,
// If we're expected to grow when we run out of memory,
// how much should we ask the backing allocator for?
new_pool_size: uint,
}
#assert(size_of(Allocator) % ALIGN_SIZE == 0)
@(require_results)
allocator :: proc(t: ^Allocator) -> runtime.Allocator {
return runtime.Allocator{
@@ -53,6 +54,21 @@ allocator :: proc(t: ^Allocator) -> runtime.Allocator {
}
}
// Tries to estimate a pool size sufficient for `count` allocations, each of `size` and with `alignment`.
estimate_pool_from_size_alignment :: proc(count: int, size: int, alignment: int) -> (pool_size: int) {
per_allocation := align_up(uint(size + alignment) + BLOCK_HEADER_OVERHEAD, ALIGN_SIZE)
return count * int(per_allocation) + int(INITIAL_POOL_OVERHEAD)
}
// Tries to estimate a pool size sufficient for `count` allocations of `type`.
estimate_pool_from_typeid :: proc(count: int, type: typeid) -> (pool_size: int) {
ti := type_info_of(type)
return estimate_pool_size(count, ti.size, ti.align)
}
estimate_pool_size :: proc{estimate_pool_from_size_alignment, estimate_pool_from_typeid}
@(require_results)
init_from_buffer :: proc(control: ^Allocator, buf: []byte) -> Error {
assert(control != nil)
@@ -60,21 +76,25 @@ init_from_buffer :: proc(control: ^Allocator, buf: []byte) -> Error {
return .Invalid_Alignment
}
pool_bytes := align_down(len(buf) - POOL_OVERHEAD, ALIGN_SIZE)
pool_bytes := align_down(len(buf) - INITIAL_POOL_OVERHEAD, ALIGN_SIZE)
if pool_bytes < BLOCK_SIZE_MIN {
return .Backing_Buffer_Too_Small
} else if pool_bytes > BLOCK_SIZE_MAX {
return .Backing_Buffer_Too_Large
}
clear(control)
return pool_add(control, buf[:])
control.pool = Pool{
data = buf,
allocator = {},
}
return free_all(control)
}
@(require_results)
init_from_allocator :: proc(control: ^Allocator, backing: runtime.Allocator, initial_pool_size: int, new_pool_size := 0) -> Error {
assert(control != nil)
pool_bytes := align_up(uint(initial_pool_size) + POOL_OVERHEAD, ALIGN_SIZE)
pool_bytes := uint(estimate_pool_size(1, initial_pool_size, ALIGN_SIZE))
if pool_bytes < BLOCK_SIZE_MIN {
return .Backing_Buffer_Too_Small
} else if pool_bytes > BLOCK_SIZE_MAX {
@@ -85,12 +105,15 @@ init_from_allocator :: proc(control: ^Allocator, backing: runtime.Allocator, ini
if backing_err != nil {
return .Backing_Allocator_Error
}
err := init_from_buffer(control, buf)
control.pool = Pool{
data = buf,
allocator = backing,
}
return err
control.new_pool_size = uint(new_pool_size)
return free_all(control)
}
init :: proc{init_from_buffer, init_from_allocator}
@@ -103,8 +126,6 @@ destroy :: proc(control: ^Allocator) {
// No need to call `pool_remove` or anything, as they're they're embedded in the backing memory.
// We do however need to free the `Pool` tracking entities and the backing memory itself.
// As `Allocator` is embedded in the first backing slice, the `control` pointer will be
// invalid after this call.
for p := control.pool.next; p != nil; {
next := p.next
@@ -136,9 +157,8 @@ allocator_proc :: proc(allocator_data: rawptr, mode: runtime.Allocator_Mode,
return nil, nil
case .Free_All:
// NOTE: this doesn't work right at the moment, Jeroen has it on his to-do list :)
// clear(control)
return nil, .Mode_Not_Implemented
free_all(control)
return nil, nil
case .Resize:
return resize(control, old_memory, uint(old_size), uint(size), uint(alignment))
@@ -159,3 +179,23 @@ allocator_proc :: proc(allocator_data: rawptr, mode: runtime.Allocator_Mode,
return nil, nil
}
// Exported solely to facilitate testing
@(require_results)
ffs :: proc "contextless" (word: u32) -> (bit: i32) {
return -1 if word == 0 else i32(intrinsics.count_trailing_zeros(word))
}
// Exported solely to facilitate testing
@(require_results)
fls :: proc "contextless" (word: u32) -> (bit: i32) {
N :: (size_of(u32) * 8) - 1
return i32(N - intrinsics.count_leading_zeros(word))
}
// Exported solely to facilitate testing
@(require_results)
fls_uint :: proc "contextless" (size: uint) -> (bit: i32) {
N :: (size_of(uint) * 8) - 1
return i32(N - intrinsics.count_leading_zeros(size))
}

File diff suppressed because it is too large Load Diff

View File

@@ -159,7 +159,7 @@ _tcp_recv_error :: proc(errno: linux.Errno) -> TCP_Recv_Error {
return .Invalid_Argument
case .ENOTCONN:
return .Not_Connected
case .ECONNREFUSED:
case .ECONNREFUSED, .ECONNRESET:
return .Connection_Closed
case .ETIMEDOUT:
return .Timeout
@@ -179,7 +179,7 @@ _udp_recv_error :: proc(errno: linux.Errno) -> UDP_Recv_Error {
#partial switch errno {
case .EBADF, .ENOTSOCK, .EFAULT:
return .Invalid_Argument
case .ECONNREFUSED, .ENOTCONN:
case .ECONNREFUSED, .ENOTCONN, .ECONNRESET:
return .Connection_Refused
case .ETIMEDOUT:
return .Timeout

View File

@@ -1,8 +1,10 @@
package test_core_mem
import "core:mem"
import "core:mem/tlsf"
import "core:mem/virtual"
import "core:testing"
import "core:slice"
@test
test_tlsf_bitscan :: proc(t: ^testing.T) {
@@ -54,3 +56,140 @@ test_align_bumping_block_limit :: proc(t: ^testing.T) {
testing.expect_value(t, err, nil)
testing.expect(t, len(data) == 896)
}
@(test)
tlsf_test_overlap_and_zero :: proc(t: ^testing.T) {
default_allocator := context.allocator
alloc: tlsf.Allocator
defer tlsf.destroy(&alloc)
NUM_ALLOCATIONS :: 1_000
BACKING_SIZE :: NUM_ALLOCATIONS * (1_000 + size_of(uintptr))
if err := tlsf.init_from_allocator(&alloc, default_allocator, BACKING_SIZE); err != .None {
testing.fail_now(t, "TLSF init error")
}
context.allocator = tlsf.allocator(&alloc)
allocations := make([dynamic][]byte, 0, NUM_ALLOCATIONS, default_allocator)
defer delete(allocations)
err: mem.Allocator_Error
s: []byte
for size := 1; err == .None && size <= NUM_ALLOCATIONS; size += 1 {
s, err = make([]byte, size)
append(&allocations, s)
}
slice.sort_by(allocations[:], proc(a, b: []byte) -> bool {
return uintptr(raw_data(a)) < uintptr(raw_data((b)))
})
for i in 0..<len(allocations) - 1 {
fail_if_allocations_overlap(t, allocations[i], allocations[i + 1])
fail_if_not_zeroed(t, allocations[i])
}
}
@(test)
tlsf_test_grow_pools :: proc(t: ^testing.T) {
default_allocator := context.allocator
alloc: tlsf.Allocator
defer tlsf.destroy(&alloc)
NUM_ALLOCATIONS :: 10
ALLOC_SIZE :: mem.Megabyte
BACKING_SIZE_INIT := tlsf.estimate_pool_size(1, ALLOC_SIZE, 64)
BACKING_SIZE_GROW := tlsf.estimate_pool_size(1, ALLOC_SIZE, 64)
allocations := make([dynamic][]byte, 0, NUM_ALLOCATIONS, default_allocator)
defer delete(allocations)
if err := tlsf.init_from_allocator(&alloc, default_allocator, BACKING_SIZE_INIT, BACKING_SIZE_GROW); err != .None {
testing.fail_now(t, "TLSF init error")
}
context.allocator = tlsf.allocator(&alloc)
for len(allocations) < NUM_ALLOCATIONS {
s := make([]byte, ALLOC_SIZE) or_break
testing.expect_value(t, len(s), ALLOC_SIZE)
append(&allocations, s)
}
testing.expect_value(t, len(allocations), NUM_ALLOCATIONS)
slice.sort_by(allocations[:], proc(a, b: []byte) -> bool {
return uintptr(raw_data(a)) < uintptr(raw_data((b)))
})
for i in 0..<len(allocations) - 1 {
fail_if_allocations_overlap(t, allocations[i], allocations[i + 1])
fail_if_not_zeroed(t, allocations[i])
}
}
@(test)
tlsf_test_free_all :: proc(t: ^testing.T) {
default_allocator := context.allocator
alloc: tlsf.Allocator
defer tlsf.destroy(&alloc)
NUM_ALLOCATIONS :: 10
ALLOCATION_SIZE :: mem.Megabyte
BACKING_SIZE :: NUM_ALLOCATIONS * (ALLOCATION_SIZE + size_of(uintptr))
if init_err := tlsf.init_from_allocator(&alloc, default_allocator, BACKING_SIZE); init_err != .None {
testing.fail_now(t, "TLSF init error")
}
context.allocator = tlsf.allocator(&alloc)
allocations: [2][dynamic][]byte
allocations[0] = make([dynamic][]byte, 0, NUM_ALLOCATIONS, default_allocator) // After `init`
allocations[1] = make([dynamic][]byte, 0, NUM_ALLOCATIONS, default_allocator) // After `free_all`
defer {
delete(allocations[0])
delete(allocations[1])
}
for {
s := make([]byte, ALLOCATION_SIZE) or_break
append(&allocations[0], s)
}
testing.expect(t, len(allocations[0]) >= 10)
free_all(tlsf.allocator(&alloc))
for {
s := make([]byte, ALLOCATION_SIZE) or_break
append(&allocations[1], s)
}
testing.expect(t, len(allocations[1]) >= 10)
for i in 0..<len(allocations[0]) {
s0, s1 := allocations[0][i], allocations[1][i]
assert(raw_data(s0) == raw_data((s1)))
assert(len(s0) == len((s1)))
}
}
fail_if_not_zeroed :: proc(t: ^testing.T, a: []byte) {
for b in a {
if b != 0 {
testing.fail_now(t, "Allocation wasn't zeroed")
}
}
}
fail_if_allocations_overlap :: proc(t: ^testing.T, a, b: []byte) {
a, b := a, b
a_start := uintptr(raw_data(a))
a_end := a_start + uintptr(len(a))
b_start := uintptr(raw_data(b))
b_end := b_start + uintptr(len(b))
if a_end >= b_end && b_end >= a_start {
testing.fail_now(t, "Allocations overlapped")
}
}