mirror of
https://github.com/odin-lang/Odin.git
synced 2026-02-17 08:34:08 +00:00
Change the implementation of Arena to use virtual memory, and remove the old gbArena code
This commit is contained in:
@@ -500,7 +500,6 @@ String odin_root_dir(void) {
|
||||
String internal_odin_root_dir(void) {
|
||||
String path = global_module_path;
|
||||
isize len, i;
|
||||
gbTempArenaMemory tmp;
|
||||
wchar_t *text;
|
||||
|
||||
if (global_module_path_set) {
|
||||
@@ -525,10 +524,7 @@ String internal_odin_root_dir(void) {
|
||||
mutex_lock(&string_buffer_mutex);
|
||||
defer (mutex_unlock(&string_buffer_mutex));
|
||||
|
||||
tmp = gb_temp_arena_memory_begin(&string_buffer_arena);
|
||||
defer (gb_temp_arena_memory_end(tmp));
|
||||
|
||||
text = gb_alloc_array(string_buffer_allocator, wchar_t, len+1);
|
||||
text = gb_alloc_array(permanent_allocator(), wchar_t, len+1);
|
||||
|
||||
GetModuleFileNameW(nullptr, text, cast(int)len);
|
||||
path = string16_to_string(heap_allocator(), make_string16(text, len));
|
||||
@@ -559,7 +555,6 @@ String path_to_fullpath(gbAllocator a, String s);
|
||||
String internal_odin_root_dir(void) {
|
||||
String path = global_module_path;
|
||||
isize len, i;
|
||||
gbTempArenaMemory tmp;
|
||||
u8 *text;
|
||||
|
||||
if (global_module_path_set) {
|
||||
@@ -583,10 +578,7 @@ String internal_odin_root_dir(void) {
|
||||
mutex_lock(&string_buffer_mutex);
|
||||
defer (mutex_unlock(&string_buffer_mutex));
|
||||
|
||||
tmp = gb_temp_arena_memory_begin(&string_buffer_arena);
|
||||
defer (gb_temp_arena_memory_end(tmp));
|
||||
|
||||
text = gb_alloc_array(string_buffer_allocator, u8, len + 1);
|
||||
text = gb_alloc_array(permanent_allocator(), u8, len + 1);
|
||||
gb_memmove(text, &path_buf[0], len);
|
||||
|
||||
path = path_to_fullpath(heap_allocator(), make_string(text, len));
|
||||
@@ -663,7 +655,7 @@ String internal_odin_root_dir(void) {
|
||||
tmp = gb_temp_arena_memory_begin(&string_buffer_arena);
|
||||
defer (gb_temp_arena_memory_end(tmp));
|
||||
|
||||
text = gb_alloc_array(string_buffer_allocator, u8, len + 1);
|
||||
text = gb_alloc_array(permanent_allocator(), u8, len + 1);
|
||||
|
||||
gb_memmove(text, &path_buf[0], len);
|
||||
|
||||
@@ -691,13 +683,11 @@ String path_to_fullpath(gbAllocator a, String s) {
|
||||
mutex_lock(&fullpath_mutex);
|
||||
defer (mutex_unlock(&fullpath_mutex));
|
||||
|
||||
gbTempArenaMemory tmp = gb_temp_arena_memory_begin(&string_buffer_arena);
|
||||
defer (gb_temp_arena_memory_end(tmp));
|
||||
String16 string16 = string_to_string16(string_buffer_allocator, s);
|
||||
String16 string16 = string_to_string16(temporary_allocator(), s);
|
||||
|
||||
DWORD len = GetFullPathNameW(&string16[0], 0, nullptr, nullptr);
|
||||
if (len != 0) {
|
||||
wchar_t *text = gb_alloc_array(string_buffer_allocator, wchar_t, len+1);
|
||||
wchar_t *text = gb_alloc_array(permanent_allocator(), wchar_t, len+1);
|
||||
GetFullPathNameW(&string16[0], len, text, nullptr);
|
||||
text[len] = 0;
|
||||
result = string16_to_string(a, make_string16(text, len));
|
||||
|
||||
391
src/common.cpp
391
src/common.cpp
@@ -31,179 +31,17 @@
|
||||
|
||||
gbAllocator heap_allocator(void);
|
||||
|
||||
#define for_array(index_, array_) for (isize index_ = 0; index_ < (array_).count; index_++)
|
||||
|
||||
#include "threading.cpp"
|
||||
|
||||
gb_inline void zero_size(void *ptr, isize len) {
|
||||
memset(ptr, 0, len);
|
||||
}
|
||||
|
||||
#define zero_item(ptr) zero_size((ptr), gb_size_of(ptr))
|
||||
|
||||
|
||||
i32 next_pow2(i32 n);
|
||||
i64 next_pow2(i64 n);
|
||||
isize next_pow2_isize(isize n);
|
||||
void debugf(char const *fmt, ...);
|
||||
|
||||
template <typename U, typename V>
|
||||
gb_inline U bit_cast(V &v) { return reinterpret_cast<U &>(v); }
|
||||
|
||||
template <typename U, typename V>
|
||||
gb_inline U const &bit_cast(V const &v) { return reinterpret_cast<U const &>(v); }
|
||||
|
||||
|
||||
gb_inline i64 align_formula(i64 size, i64 align) {
|
||||
if (align > 0) {
|
||||
i64 result = size + align-1;
|
||||
return result - result%align;
|
||||
}
|
||||
return size;
|
||||
}
|
||||
gb_inline isize align_formula_isize(isize size, isize align) {
|
||||
if (align > 0) {
|
||||
isize result = size + align-1;
|
||||
return result - result%align;
|
||||
}
|
||||
return size;
|
||||
}
|
||||
gb_inline void *align_formula_ptr(void *ptr, isize align) {
|
||||
if (align > 0) {
|
||||
uintptr result = (cast(uintptr)ptr) + align-1;
|
||||
return (void *)(result - result%align);
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
|
||||
|
||||
GB_ALLOCATOR_PROC(heap_allocator_proc);
|
||||
|
||||
gbAllocator heap_allocator(void) {
|
||||
gbAllocator a;
|
||||
a.proc = heap_allocator_proc;
|
||||
a.data = nullptr;
|
||||
return a;
|
||||
}
|
||||
|
||||
|
||||
GB_ALLOCATOR_PROC(heap_allocator_proc) {
|
||||
void *ptr = nullptr;
|
||||
gb_unused(allocator_data);
|
||||
gb_unused(old_size);
|
||||
|
||||
|
||||
|
||||
// TODO(bill): Throughly test!
|
||||
switch (type) {
|
||||
#if defined(GB_COMPILER_MSVC)
|
||||
case gbAllocation_Alloc: {
|
||||
isize aligned_size = align_formula_isize(size, alignment);
|
||||
// TODO(bill): Make sure this is aligned correctly
|
||||
ptr = HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, aligned_size);
|
||||
} break;
|
||||
case gbAllocation_Free:
|
||||
HeapFree(GetProcessHeap(), 0, old_memory);
|
||||
break;
|
||||
case gbAllocation_Resize: {
|
||||
isize aligned_size = align_formula_isize(size, alignment);
|
||||
ptr = HeapReAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, old_memory, aligned_size);
|
||||
} break;
|
||||
#elif defined(GB_SYSTEM_LINUX)
|
||||
// TODO(bill): *nix version that's decent
|
||||
case gbAllocation_Alloc: {
|
||||
ptr = aligned_alloc(alignment, (size + alignment - 1) & ~(alignment - 1));
|
||||
gb_zero_size(ptr, size);
|
||||
} break;
|
||||
|
||||
case gbAllocation_Free: {
|
||||
free(old_memory);
|
||||
} break;
|
||||
|
||||
case gbAllocation_Resize:
|
||||
if (size == 0) {
|
||||
free(old_memory);
|
||||
break;
|
||||
}
|
||||
if (!old_memory) {
|
||||
ptr = aligned_alloc(alignment, (size + alignment - 1) & ~(alignment - 1));
|
||||
gb_zero_size(ptr, size);
|
||||
break;
|
||||
}
|
||||
if (size <= old_size) {
|
||||
ptr = old_memory;
|
||||
break;
|
||||
}
|
||||
|
||||
ptr = aligned_alloc(alignment, (size + alignment - 1) & ~(alignment - 1));
|
||||
gb_memmove(ptr, old_memory, old_size);
|
||||
gb_zero_size(cast(u8 *)ptr + old_size, gb_max(size-old_size, 0));
|
||||
break;
|
||||
#else
|
||||
// TODO(bill): *nix version that's decent
|
||||
case gbAllocation_Alloc:
|
||||
posix_memalign(&ptr, alignment, size);
|
||||
gb_zero_size(ptr, size);
|
||||
break;
|
||||
|
||||
case gbAllocation_Free:
|
||||
free(old_memory);
|
||||
break;
|
||||
|
||||
case gbAllocation_Resize:
|
||||
if (size == 0) {
|
||||
free(old_memory);
|
||||
break;
|
||||
}
|
||||
if (!old_memory) {
|
||||
posix_memalign(&ptr, alignment, size);
|
||||
gb_zero_size(ptr, size);
|
||||
break;
|
||||
}
|
||||
if (size <= old_size) {
|
||||
ptr = old_memory;
|
||||
break;
|
||||
}
|
||||
|
||||
posix_memalign(&ptr, alignment, size);
|
||||
gb_memmove(ptr, old_memory, old_size);
|
||||
gb_zero_size(cast(u8 *)ptr + old_size, gb_max(size-old_size, 0));
|
||||
break;
|
||||
#endif
|
||||
|
||||
case gbAllocation_FreeAll:
|
||||
break;
|
||||
}
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
|
||||
template <typename T>
|
||||
void resize_array_raw(T **array, gbAllocator const &a, isize old_count, isize new_count) {
|
||||
GB_ASSERT(new_count >= 0);
|
||||
if (new_count == 0) {
|
||||
gb_free(a, *array);
|
||||
*array = nullptr;
|
||||
return;
|
||||
}
|
||||
if (new_count < old_count) {
|
||||
return;
|
||||
}
|
||||
isize old_size = old_count * gb_size_of(T);
|
||||
isize new_size = new_count * gb_size_of(T);
|
||||
isize alignment = gb_align_of(T);
|
||||
auto new_data = cast(T *)gb_resize_align(a, *array, old_size, new_size, alignment);
|
||||
GB_ASSERT(new_data != nullptr);
|
||||
*array = new_data;
|
||||
}
|
||||
|
||||
|
||||
|
||||
#include "unicode.cpp"
|
||||
#include "array.cpp"
|
||||
#include "string.cpp"
|
||||
#include "queue.cpp"
|
||||
#include "common_memory.cpp"
|
||||
#include "string.cpp"
|
||||
|
||||
|
||||
#define for_array(index_, array_) for (isize index_ = 0; index_ < (array_).count; index_++)
|
||||
|
||||
#include "range_cache.cpp"
|
||||
|
||||
@@ -401,222 +239,6 @@ void mul_overflow_u64(u64 x, u64 y, u64 *lo, u64 *hi) {
|
||||
gb_global String global_module_path = {0};
|
||||
gb_global bool global_module_path_set = false;
|
||||
|
||||
// Arena from Per Vognsen
|
||||
#define ALIGN_DOWN(n, a) ((n) & ~((a) - 1))
|
||||
#define ALIGN_UP(n, a) ALIGN_DOWN((n) + (a) - 1, (a))
|
||||
#define ALIGN_DOWN_PTR(p, a) (cast(void *)ALIGN_DOWN(cast(uintptr)(p), (a)))
|
||||
#define ALIGN_UP_PTR(p, a) (cast(void *)ALIGN_UP(cast(uintptr)(p), (a)))
|
||||
|
||||
typedef struct Arena {
|
||||
u8 * ptr;
|
||||
u8 * end;
|
||||
Array<gbVirtualMemory> blocks;
|
||||
BlockingMutex mutex;
|
||||
std::atomic<isize> block_size;
|
||||
std::atomic<isize> total_used;
|
||||
} Arena;
|
||||
|
||||
#define ARENA_MIN_ALIGNMENT 16
|
||||
#define ARENA_DEFAULT_BLOCK_SIZE (8*1024*1024)
|
||||
|
||||
|
||||
gb_global Arena permanent_arena = {};
|
||||
|
||||
void arena_init(Arena *arena, gbAllocator block_allocator, isize block_size=ARENA_DEFAULT_BLOCK_SIZE) {
|
||||
mutex_init(&arena->mutex);
|
||||
arena->block_size = block_size;
|
||||
array_init(&arena->blocks, block_allocator, 0, 2);
|
||||
}
|
||||
|
||||
void arena_internal_grow(Arena *arena, isize min_size) {
|
||||
isize size = gb_max(arena->block_size.load(), min_size);
|
||||
size = ALIGN_UP(size, ARENA_MIN_ALIGNMENT);
|
||||
|
||||
gbVirtualMemory vmem = gb_vm_alloc(nullptr, size);
|
||||
GB_ASSERT(vmem.data != nullptr);
|
||||
GB_ASSERT(vmem.size >= size);
|
||||
arena->ptr = cast(u8 *)vmem.data;
|
||||
GB_ASSERT(arena->ptr == ALIGN_DOWN_PTR(arena->ptr, ARENA_MIN_ALIGNMENT));
|
||||
arena->end = arena->ptr + vmem.size;
|
||||
array_add(&arena->blocks, vmem);
|
||||
}
|
||||
|
||||
void *arena_alloc(Arena *arena, isize size, isize alignment) {
|
||||
mutex_lock(&arena->mutex);
|
||||
|
||||
if (size > (arena->end - arena->ptr)) {
|
||||
arena_internal_grow(arena, size);
|
||||
GB_ASSERT(size <= (arena->end - arena->ptr));
|
||||
}
|
||||
arena->total_used += size;
|
||||
|
||||
isize align = gb_max(alignment, ARENA_MIN_ALIGNMENT);
|
||||
void *ptr = arena->ptr;
|
||||
arena->ptr = cast(u8 *)ALIGN_UP_PTR(arena->ptr + size, align);
|
||||
GB_ASSERT(arena->ptr <= arena->end);
|
||||
GB_ASSERT(ptr == ALIGN_DOWN_PTR(ptr, align));
|
||||
|
||||
mutex_unlock(&arena->mutex);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void arena_free_all(Arena *arena) {
|
||||
mutex_lock(&arena->mutex);
|
||||
|
||||
for_array(i, arena->blocks) {
|
||||
gb_vm_free(arena->blocks[i]);
|
||||
}
|
||||
array_clear(&arena->blocks);
|
||||
arena->ptr = nullptr;
|
||||
arena->end = nullptr;
|
||||
|
||||
mutex_unlock(&arena->mutex);
|
||||
}
|
||||
|
||||
|
||||
|
||||
GB_ALLOCATOR_PROC(arena_allocator_proc);
|
||||
|
||||
gbAllocator arena_allocator(Arena *arena) {
|
||||
gbAllocator a;
|
||||
a.proc = arena_allocator_proc;
|
||||
a.data = arena;
|
||||
return a;
|
||||
}
|
||||
|
||||
|
||||
GB_ALLOCATOR_PROC(arena_allocator_proc) {
|
||||
void *ptr = nullptr;
|
||||
Arena *arena = cast(Arena *)allocator_data;
|
||||
GB_ASSERT_NOT_NULL(arena);
|
||||
|
||||
switch (type) {
|
||||
case gbAllocation_Alloc:
|
||||
ptr = arena_alloc(arena, size, alignment);
|
||||
break;
|
||||
case gbAllocation_Free:
|
||||
// GB_PANIC("gbAllocation_Free not supported");
|
||||
break;
|
||||
case gbAllocation_Resize:
|
||||
if (size == 0) {
|
||||
ptr = nullptr;
|
||||
} else if (size <= old_size) {
|
||||
ptr = old_memory;
|
||||
} else {
|
||||
ptr = arena_alloc(arena, size, alignment);
|
||||
gb_memmove(ptr, old_memory, old_size);
|
||||
}
|
||||
break;
|
||||
case gbAllocation_FreeAll:
|
||||
arena_free_all(arena);
|
||||
break;
|
||||
}
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
|
||||
gbAllocator permanent_allocator() {
|
||||
return arena_allocator(&permanent_arena);
|
||||
// return heap_allocator();
|
||||
}
|
||||
|
||||
|
||||
|
||||
struct Temp_Allocator {
|
||||
u8 *data;
|
||||
isize len;
|
||||
isize curr_offset;
|
||||
gbAllocator backup_allocator;
|
||||
Array<void *> leaked_allocations;
|
||||
BlockingMutex mutex;
|
||||
};
|
||||
|
||||
gb_global Temp_Allocator temporary_allocator_data = {};
|
||||
|
||||
void temp_allocator_init(Temp_Allocator *s, isize size) {
|
||||
s->backup_allocator = heap_allocator();
|
||||
s->data = cast(u8 *)gb_alloc_align(s->backup_allocator, size, 16);
|
||||
s->len = size;
|
||||
s->curr_offset = 0;
|
||||
s->leaked_allocations.allocator = s->backup_allocator;
|
||||
mutex_init(&s->mutex);
|
||||
}
|
||||
|
||||
void *temp_allocator_alloc(Temp_Allocator *s, isize size, isize alignment) {
|
||||
size = align_formula_isize(size, alignment);
|
||||
if (s->curr_offset+size <= s->len) {
|
||||
u8 *start = s->data;
|
||||
u8 *ptr = start + s->curr_offset;
|
||||
ptr = cast(u8 *)align_formula_ptr(ptr, alignment);
|
||||
// assume memory is zero
|
||||
|
||||
isize offset = ptr - start;
|
||||
s->curr_offset = offset + size;
|
||||
return ptr;
|
||||
} else if (size <= s->len) {
|
||||
u8 *start = s->data;
|
||||
u8 *ptr = cast(u8 *)align_formula_ptr(start, alignment);
|
||||
// assume memory is zero
|
||||
|
||||
isize offset = ptr - start;
|
||||
s->curr_offset = offset + size;
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void *ptr = gb_alloc_align(s->backup_allocator, size, alignment);
|
||||
array_add(&s->leaked_allocations, ptr);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void temp_allocator_free_all(Temp_Allocator *s) {
|
||||
s->curr_offset = 0;
|
||||
for_array(i, s->leaked_allocations) {
|
||||
gb_free(s->backup_allocator, s->leaked_allocations[i]);
|
||||
}
|
||||
array_clear(&s->leaked_allocations);
|
||||
gb_zero_size(s->data, s->len);
|
||||
}
|
||||
|
||||
GB_ALLOCATOR_PROC(temp_allocator_proc) {
|
||||
void *ptr = nullptr;
|
||||
Temp_Allocator *s = cast(Temp_Allocator *)allocator_data;
|
||||
GB_ASSERT_NOT_NULL(s);
|
||||
|
||||
mutex_lock(&s->mutex);
|
||||
defer (mutex_unlock(&s->mutex));
|
||||
|
||||
switch (type) {
|
||||
case gbAllocation_Alloc:
|
||||
return temp_allocator_alloc(s, size, alignment);
|
||||
case gbAllocation_Free:
|
||||
break;
|
||||
case gbAllocation_Resize:
|
||||
if (size == 0) {
|
||||
ptr = nullptr;
|
||||
} else if (size <= old_size) {
|
||||
ptr = old_memory;
|
||||
} else {
|
||||
ptr = temp_allocator_alloc(s, size, alignment);
|
||||
gb_memmove(ptr, old_memory, old_size);
|
||||
}
|
||||
break;
|
||||
case gbAllocation_FreeAll:
|
||||
temp_allocator_free_all(s);
|
||||
break;
|
||||
}
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
|
||||
gbAllocator temporary_allocator() {
|
||||
return permanent_allocator();
|
||||
// return {temp_allocator_proc, &temporary_allocator_data};
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
#include "string_map.cpp"
|
||||
#include "map.cpp"
|
||||
@@ -633,7 +255,7 @@ struct StringIntern {
|
||||
};
|
||||
|
||||
Map<StringIntern *> string_intern_map = {}; // Key: u64
|
||||
Arena string_intern_arena = {};
|
||||
gb_global Arena string_intern_arena = {};
|
||||
|
||||
char const *string_intern(char const *text, isize len) {
|
||||
u64 hash = gb_fnv64a(text, len);
|
||||
@@ -662,7 +284,6 @@ char const *string_intern(String const &string) {
|
||||
|
||||
void init_string_interner(void) {
|
||||
map_init(&string_intern_map, heap_allocator());
|
||||
arena_init(&string_intern_arena, heap_allocator());
|
||||
}
|
||||
|
||||
|
||||
|
||||
402
src/common_memory.cpp
Normal file
402
src/common_memory.cpp
Normal file
@@ -0,0 +1,402 @@
|
||||
|
||||
gb_inline void zero_size(void *ptr, isize len) {
|
||||
memset(ptr, 0, len);
|
||||
}
|
||||
|
||||
#define zero_item(ptr) zero_size((ptr), gb_size_of(ptr))
|
||||
|
||||
|
||||
i32 next_pow2(i32 n);
|
||||
i64 next_pow2(i64 n);
|
||||
isize next_pow2_isize(isize n);
|
||||
void debugf(char const *fmt, ...);
|
||||
|
||||
template <typename U, typename V>
|
||||
gb_inline U bit_cast(V &v) { return reinterpret_cast<U &>(v); }
|
||||
|
||||
template <typename U, typename V>
|
||||
gb_inline U const &bit_cast(V const &v) { return reinterpret_cast<U const &>(v); }
|
||||
|
||||
|
||||
gb_inline i64 align_formula(i64 size, i64 align) {
|
||||
if (align > 0) {
|
||||
i64 result = size + align-1;
|
||||
return result - result%align;
|
||||
}
|
||||
return size;
|
||||
}
|
||||
gb_inline isize align_formula_isize(isize size, isize align) {
|
||||
if (align > 0) {
|
||||
isize result = size + align-1;
|
||||
return result - result%align;
|
||||
}
|
||||
return size;
|
||||
}
|
||||
gb_inline void *align_formula_ptr(void *ptr, isize align) {
|
||||
if (align > 0) {
|
||||
uintptr result = (cast(uintptr)ptr) + align-1;
|
||||
return (void *)(result - result%align);
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
|
||||
|
||||
gb_global BlockingMutex global_memory_block_mutex;
|
||||
gb_global BlockingMutex global_memory_allocator_mutex;
|
||||
|
||||
void platform_virtual_memory_init(void);
|
||||
|
||||
void virtual_memory_init(void) {
|
||||
mutex_init(&global_memory_block_mutex);
|
||||
mutex_init(&global_memory_allocator_mutex);
|
||||
platform_virtual_memory_init();
|
||||
}
|
||||
|
||||
|
||||
|
||||
struct MemoryBlock {
|
||||
u8 * base;
|
||||
isize size;
|
||||
isize used;
|
||||
MemoryBlock *prev;
|
||||
};
|
||||
|
||||
struct Arena {
|
||||
MemoryBlock * curr_block;
|
||||
isize minimum_block_size;
|
||||
isize temporary_memory_count;
|
||||
};
|
||||
|
||||
enum { DEFAULT_MINIMUM_BLOCK_SIZE = 8ll*1024ll*1024ll };
|
||||
|
||||
gb_global isize DEFAULT_PAGE_SIZE = 4096;
|
||||
|
||||
MemoryBlock *virtual_memory_alloc(isize size);
|
||||
void virtual_memory_dealloc(MemoryBlock *block);
|
||||
void arena_free_all(Arena *arena);
|
||||
|
||||
isize arena_align_forward_offset(Arena *arena, isize alignment) {
|
||||
isize alignment_offset = 0;
|
||||
isize ptr = cast(isize)(arena->curr_block->base + arena->curr_block->used);
|
||||
isize mask = alignment-1;
|
||||
if (ptr & mask) {
|
||||
alignment_offset = alignment - (ptr & mask);
|
||||
}
|
||||
return alignment_offset;
|
||||
}
|
||||
|
||||
|
||||
void *arena_alloc(Arena *arena, isize min_size, isize alignment) {
|
||||
GB_ASSERT(gb_is_power_of_two(alignment));
|
||||
|
||||
isize size = 0;
|
||||
|
||||
// TODO(bill): make it so that this can be done lock free (if possible)
|
||||
mutex_lock(&global_memory_allocator_mutex);
|
||||
|
||||
if (arena->curr_block != nullptr) {
|
||||
size = min_size + arena_align_forward_offset(arena, alignment);
|
||||
}
|
||||
|
||||
if (arena->curr_block == nullptr || (arena->curr_block->used + size) > arena->curr_block->size) {
|
||||
size = align_formula_isize(min_size, alignment);
|
||||
arena->minimum_block_size = gb_max(DEFAULT_MINIMUM_BLOCK_SIZE, arena->minimum_block_size);
|
||||
|
||||
isize block_size = gb_max(size, arena->minimum_block_size);
|
||||
|
||||
MemoryBlock *new_block = virtual_memory_alloc(block_size);
|
||||
new_block->prev = arena->curr_block;
|
||||
arena->curr_block = new_block;
|
||||
}
|
||||
|
||||
MemoryBlock *curr_block = arena->curr_block;
|
||||
GB_ASSERT((curr_block->used + size) <= curr_block->size);
|
||||
|
||||
u8 *ptr = curr_block->base + curr_block->used;
|
||||
ptr += arena_align_forward_offset(arena, alignment);
|
||||
|
||||
curr_block->used += size;
|
||||
GB_ASSERT(curr_block->used <= curr_block->size);
|
||||
|
||||
mutex_unlock(&global_memory_allocator_mutex);
|
||||
|
||||
// NOTE(bill): memory will be zeroed by default due to virtual memory
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void arena_free_all(Arena *arena) {
|
||||
while (arena->curr_block != nullptr) {
|
||||
MemoryBlock *free_block = arena->curr_block;
|
||||
arena->curr_block = free_block->prev;
|
||||
virtual_memory_dealloc(free_block);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#if defined(GB_SYSTEM_WINDOWS)
|
||||
struct WindowsMemoryBlock {
|
||||
MemoryBlock block; // IMPORTANT NOTE: must be at the start
|
||||
WindowsMemoryBlock *prev, *next;
|
||||
};
|
||||
|
||||
gb_global WindowsMemoryBlock global_windows_memory_block_sentinel;
|
||||
|
||||
void platform_virtual_memory_init(void) {
|
||||
global_windows_memory_block_sentinel.prev = &global_windows_memory_block_sentinel;
|
||||
global_windows_memory_block_sentinel.next = &global_windows_memory_block_sentinel;
|
||||
|
||||
SYSTEM_INFO sys_info = {};
|
||||
GetSystemInfo(&sys_info);
|
||||
DEFAULT_PAGE_SIZE = gb_max(DEFAULT_PAGE_SIZE, cast(isize)sys_info.dwPageSize);
|
||||
GB_ASSERT(gb_is_power_of_two(DEFAULT_PAGE_SIZE));
|
||||
}
|
||||
|
||||
MemoryBlock *virtual_memory_alloc(isize size) {
|
||||
isize const page_size = DEFAULT_PAGE_SIZE;
|
||||
|
||||
isize total_size = size + gb_size_of(WindowsMemoryBlock);
|
||||
isize base_offset = gb_size_of(WindowsMemoryBlock);
|
||||
isize protect_offset = 0;
|
||||
|
||||
bool do_protection = false;
|
||||
{ // overflow protection
|
||||
isize rounded_size = align_formula_isize(size, page_size);
|
||||
total_size = rounded_size + 2*page_size;
|
||||
base_offset = page_size + rounded_size - size;
|
||||
protect_offset = page_size + rounded_size;
|
||||
do_protection = true;
|
||||
}
|
||||
|
||||
WindowsMemoryBlock *wmblock = (WindowsMemoryBlock *)VirtualAlloc(0, total_size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
|
||||
GB_ASSERT_MSG(wmblock != nullptr, "Out of Virtual Memory, oh no...");
|
||||
|
||||
wmblock->block.base = cast(u8 *)wmblock + base_offset;
|
||||
// Should be zeroed
|
||||
GB_ASSERT(wmblock->block.used == 0);
|
||||
GB_ASSERT(wmblock->block.prev == nullptr);
|
||||
|
||||
if (do_protection) {
|
||||
DWORD old_protect = 0;
|
||||
BOOL is_protected = VirtualProtect(cast(u8 *)wmblock + protect_offset, page_size, PAGE_NOACCESS, &old_protect);
|
||||
GB_ASSERT(is_protected);
|
||||
}
|
||||
|
||||
wmblock->block.size = size;
|
||||
|
||||
WindowsMemoryBlock *sentinel = &global_windows_memory_block_sentinel;
|
||||
mutex_lock(&global_memory_block_mutex);
|
||||
wmblock->next = sentinel;
|
||||
wmblock->prev = sentinel->prev;
|
||||
wmblock->prev->next = wmblock;
|
||||
wmblock->next->prev = wmblock;
|
||||
mutex_unlock(&global_memory_block_mutex);
|
||||
|
||||
return &wmblock->block;
|
||||
}
|
||||
|
||||
void virtual_memory_dealloc(MemoryBlock *block_to_free) {
|
||||
WindowsMemoryBlock *block = cast(WindowsMemoryBlock *)block_to_free;
|
||||
if (block != nullptr) {
|
||||
mutex_lock(&global_memory_block_mutex);
|
||||
block->prev->next = block->next;
|
||||
block->next->prev = block->prev;
|
||||
mutex_unlock(&global_memory_block_mutex);
|
||||
|
||||
GB_ASSERT(VirtualFree(block, 0, MEM_RELEASE));
|
||||
}
|
||||
}
|
||||
#else
|
||||
|
||||
#error Implement 'virtual_memory_alloc' and 'virtual_memory_dealloc' on this platform
|
||||
|
||||
void platform_virtual_memory_init(void) {
|
||||
|
||||
}
|
||||
|
||||
MemoryBlock *virtual_memory_alloc(isize size, MemoryBlockFlags flags) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void virtual_memory_dealloc(MemoryBlock *block) {
|
||||
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
GB_ALLOCATOR_PROC(arena_allocator_proc);
|
||||
|
||||
gbAllocator arena_allocator(Arena *arena) {
|
||||
gbAllocator a;
|
||||
a.proc = arena_allocator_proc;
|
||||
a.data = arena;
|
||||
return a;
|
||||
}
|
||||
|
||||
|
||||
GB_ALLOCATOR_PROC(arena_allocator_proc) {
|
||||
void *ptr = nullptr;
|
||||
Arena *arena = cast(Arena *)allocator_data;
|
||||
GB_ASSERT_NOT_NULL(arena);
|
||||
|
||||
switch (type) {
|
||||
case gbAllocation_Alloc:
|
||||
ptr = arena_alloc(arena, size, alignment);
|
||||
break;
|
||||
case gbAllocation_Free:
|
||||
break;
|
||||
case gbAllocation_Resize:
|
||||
if (size == 0) {
|
||||
ptr = nullptr;
|
||||
} else if (size <= old_size) {
|
||||
ptr = old_memory;
|
||||
} else {
|
||||
ptr = arena_alloc(arena, size, alignment);
|
||||
gb_memmove(ptr, old_memory, old_size);
|
||||
}
|
||||
break;
|
||||
case gbAllocation_FreeAll:
|
||||
arena_free_all(arena);
|
||||
break;
|
||||
}
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
|
||||
gb_global Arena permanent_arena = {};
|
||||
gbAllocator permanent_allocator() {
|
||||
return arena_allocator(&permanent_arena);
|
||||
}
|
||||
|
||||
gb_global Arena temporary_arena = {};
|
||||
gbAllocator temporary_allocator() {
|
||||
return arena_allocator(&temporary_arena);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
GB_ALLOCATOR_PROC(heap_allocator_proc);
|
||||
|
||||
gbAllocator heap_allocator(void) {
|
||||
gbAllocator a;
|
||||
a.proc = heap_allocator_proc;
|
||||
a.data = nullptr;
|
||||
return a;
|
||||
}
|
||||
|
||||
|
||||
GB_ALLOCATOR_PROC(heap_allocator_proc) {
|
||||
void *ptr = nullptr;
|
||||
gb_unused(allocator_data);
|
||||
gb_unused(old_size);
|
||||
|
||||
|
||||
|
||||
// TODO(bill): Throughly test!
|
||||
switch (type) {
|
||||
#if defined(GB_COMPILER_MSVC)
|
||||
case gbAllocation_Alloc: {
|
||||
isize aligned_size = align_formula_isize(size, alignment);
|
||||
// TODO(bill): Make sure this is aligned correctly
|
||||
ptr = HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, aligned_size);
|
||||
} break;
|
||||
case gbAllocation_Free:
|
||||
HeapFree(GetProcessHeap(), 0, old_memory);
|
||||
break;
|
||||
case gbAllocation_Resize: {
|
||||
isize aligned_size = align_formula_isize(size, alignment);
|
||||
ptr = HeapReAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, old_memory, aligned_size);
|
||||
} break;
|
||||
#elif defined(GB_SYSTEM_LINUX)
|
||||
// TODO(bill): *nix version that's decent
|
||||
case gbAllocation_Alloc: {
|
||||
ptr = aligned_alloc(alignment, (size + alignment - 1) & ~(alignment - 1));
|
||||
gb_zero_size(ptr, size);
|
||||
} break;
|
||||
|
||||
case gbAllocation_Free: {
|
||||
free(old_memory);
|
||||
} break;
|
||||
|
||||
case gbAllocation_Resize:
|
||||
if (size == 0) {
|
||||
free(old_memory);
|
||||
break;
|
||||
}
|
||||
if (!old_memory) {
|
||||
ptr = aligned_alloc(alignment, (size + alignment - 1) & ~(alignment - 1));
|
||||
gb_zero_size(ptr, size);
|
||||
break;
|
||||
}
|
||||
if (size <= old_size) {
|
||||
ptr = old_memory;
|
||||
break;
|
||||
}
|
||||
|
||||
ptr = aligned_alloc(alignment, (size + alignment - 1) & ~(alignment - 1));
|
||||
gb_memmove(ptr, old_memory, old_size);
|
||||
gb_zero_size(cast(u8 *)ptr + old_size, gb_max(size-old_size, 0));
|
||||
break;
|
||||
#else
|
||||
// TODO(bill): *nix version that's decent
|
||||
case gbAllocation_Alloc:
|
||||
posix_memalign(&ptr, alignment, size);
|
||||
gb_zero_size(ptr, size);
|
||||
break;
|
||||
|
||||
case gbAllocation_Free:
|
||||
free(old_memory);
|
||||
break;
|
||||
|
||||
case gbAllocation_Resize:
|
||||
if (size == 0) {
|
||||
free(old_memory);
|
||||
break;
|
||||
}
|
||||
if (!old_memory) {
|
||||
posix_memalign(&ptr, alignment, size);
|
||||
gb_zero_size(ptr, size);
|
||||
break;
|
||||
}
|
||||
if (size <= old_size) {
|
||||
ptr = old_memory;
|
||||
break;
|
||||
}
|
||||
|
||||
posix_memalign(&ptr, alignment, size);
|
||||
gb_memmove(ptr, old_memory, old_size);
|
||||
gb_zero_size(cast(u8 *)ptr + old_size, gb_max(size-old_size, 0));
|
||||
break;
|
||||
#endif
|
||||
|
||||
case gbAllocation_FreeAll:
|
||||
break;
|
||||
}
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
|
||||
template <typename T>
|
||||
void resize_array_raw(T **array, gbAllocator const &a, isize old_count, isize new_count) {
|
||||
GB_ASSERT(new_count >= 0);
|
||||
if (new_count == 0) {
|
||||
gb_free(a, *array);
|
||||
*array = nullptr;
|
||||
return;
|
||||
}
|
||||
if (new_count < old_count) {
|
||||
return;
|
||||
}
|
||||
isize old_size = old_count * gb_size_of(T);
|
||||
isize new_size = new_count * gb_size_of(T);
|
||||
isize alignment = gb_align_of(T);
|
||||
auto new_data = cast(T *)gb_resize_align(a, *array, old_size, new_size, alignment);
|
||||
GB_ASSERT(new_data != nullptr);
|
||||
*array = new_data;
|
||||
}
|
||||
|
||||
626
src/gb/gb.h
626
src/gb/gb.h
@@ -893,79 +893,6 @@ GB_DEF GB_ALLOCATOR_PROC(gb_heap_allocator_proc);
|
||||
|
||||
|
||||
|
||||
//
|
||||
// Arena Allocator
|
||||
//
|
||||
typedef struct gbArena {
|
||||
gbAllocator backing;
|
||||
void * physical_start;
|
||||
isize total_size;
|
||||
isize total_allocated;
|
||||
isize temp_count;
|
||||
} gbArena;
|
||||
|
||||
GB_DEF void gb_arena_init_from_memory (gbArena *arena, void *start, isize size);
|
||||
GB_DEF void gb_arena_init_from_allocator(gbArena *arena, gbAllocator backing, isize size);
|
||||
GB_DEF void gb_arena_init_sub (gbArena *arena, gbArena *parent_arena, isize size);
|
||||
GB_DEF void gb_arena_free (gbArena *arena);
|
||||
|
||||
GB_DEF isize gb_arena_alignment_of (gbArena *arena, isize alignment);
|
||||
GB_DEF isize gb_arena_size_remaining(gbArena *arena, isize alignment);
|
||||
GB_DEF void gb_arena_check (gbArena *arena);
|
||||
|
||||
|
||||
// Allocation Types: alloc, free_all, resize
|
||||
GB_DEF gbAllocator gb_arena_allocator(gbArena *arena);
|
||||
GB_DEF GB_ALLOCATOR_PROC(gb_arena_allocator_proc);
|
||||
|
||||
|
||||
|
||||
typedef struct gbTempArenaMemory {
|
||||
gbArena *arena;
|
||||
isize original_count;
|
||||
} gbTempArenaMemory;
|
||||
|
||||
GB_DEF gbTempArenaMemory gb_temp_arena_memory_begin(gbArena *arena);
|
||||
GB_DEF void gb_temp_arena_memory_end (gbTempArenaMemory tmp_mem);
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
//
|
||||
// Pool Allocator
|
||||
//
|
||||
|
||||
|
||||
typedef struct gbPool {
|
||||
gbAllocator backing;
|
||||
void * physical_start;
|
||||
void * free_list;
|
||||
isize block_size;
|
||||
isize block_align;
|
||||
isize total_size;
|
||||
} gbPool;
|
||||
|
||||
GB_DEF void gb_pool_init (gbPool *pool, gbAllocator backing, isize num_blocks, isize block_size);
|
||||
GB_DEF void gb_pool_init_align(gbPool *pool, gbAllocator backing, isize num_blocks, isize block_size, isize block_align);
|
||||
GB_DEF void gb_pool_free (gbPool *pool);
|
||||
|
||||
// Allocation Types: alloc, free
|
||||
GB_DEF gbAllocator gb_pool_allocator(gbPool *pool);
|
||||
GB_DEF GB_ALLOCATOR_PROC(gb_pool_allocator_proc);
|
||||
|
||||
|
||||
|
||||
// NOTE(bill): Used for allocators to keep track of sizes
|
||||
typedef struct gbAllocationHeader {
|
||||
isize size;
|
||||
} gbAllocationHeader;
|
||||
|
||||
GB_DEF gbAllocationHeader *gb_allocation_header (void *data);
|
||||
GB_DEF void gb_allocation_header_fill(gbAllocationHeader *header, void *data, isize size);
|
||||
|
||||
// TODO(bill): Find better way of doing this without #if #elif etc.
|
||||
#if defined(GB_ARCH_32_BIT)
|
||||
#define GB_ISIZE_HIGH_BIT 0x80000000
|
||||
@@ -975,64 +902,6 @@ GB_DEF void gb_allocation_header_fill(gbAllocationHeader *header,
|
||||
#error
|
||||
#endif
|
||||
|
||||
//
|
||||
// Free List Allocator
|
||||
//
|
||||
|
||||
// IMPORTANT TODO(bill): Thoroughly test the free list allocator!
|
||||
// NOTE(bill): This is a very shitty free list as it just picks the first free block not the best size
|
||||
// as I am just being lazy. Also, I will probably remove it later; it's only here because why not?!
|
||||
//
|
||||
// NOTE(bill): I may also complete remove this if I completely implement a fixed heap allocator
|
||||
|
||||
typedef struct gbFreeListBlock gbFreeListBlock;
|
||||
struct gbFreeListBlock {
|
||||
gbFreeListBlock *next;
|
||||
isize size;
|
||||
};
|
||||
|
||||
typedef struct gbFreeList {
|
||||
void * physical_start;
|
||||
isize total_size;
|
||||
|
||||
gbFreeListBlock *curr_block;
|
||||
|
||||
isize total_allocated;
|
||||
isize allocation_count;
|
||||
} gbFreeList;
|
||||
|
||||
GB_DEF void gb_free_list_init (gbFreeList *fl, void *start, isize size);
|
||||
GB_DEF void gb_free_list_init_from_allocator(gbFreeList *fl, gbAllocator backing, isize size);
|
||||
|
||||
// Allocation Types: alloc, free, free_all, resize
|
||||
GB_DEF gbAllocator gb_free_list_allocator(gbFreeList *fl);
|
||||
GB_DEF GB_ALLOCATOR_PROC(gb_free_list_allocator_proc);
|
||||
|
||||
|
||||
|
||||
//
|
||||
// Scratch Memory Allocator - Ring Buffer Based Arena
|
||||
//
|
||||
|
||||
typedef struct gbScratchMemory {
|
||||
void *physical_start;
|
||||
isize total_size;
|
||||
void *alloc_point;
|
||||
void *free_point;
|
||||
} gbScratchMemory;
|
||||
|
||||
GB_DEF void gb_scratch_memory_init (gbScratchMemory *s, void *start, isize size);
|
||||
GB_DEF b32 gb_scratch_memory_is_in_use(gbScratchMemory *s, void *ptr);
|
||||
|
||||
|
||||
// Allocation Types: alloc, free, free_all, resize
|
||||
GB_DEF gbAllocator gb_scratch_allocator(gbScratchMemory *s);
|
||||
GB_DEF GB_ALLOCATOR_PROC(gb_scratch_allocator_proc);
|
||||
|
||||
// TODO(bill): Stack allocator
|
||||
// TODO(bill): Fixed heap allocator
|
||||
// TODO(bill): General heap allocator. Maybe a TCMalloc like clone?
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
//
|
||||
@@ -3948,501 +3817,6 @@ isize gb_virtual_memory_page_size(isize *alignment_out) {
|
||||
//
|
||||
|
||||
|
||||
//
|
||||
// Arena Allocator
|
||||
//
|
||||
|
||||
gb_inline void gb_arena_init_from_memory(gbArena *arena, void *start, isize size) {
|
||||
arena->backing.proc = NULL;
|
||||
arena->backing.data = NULL;
|
||||
arena->physical_start = start;
|
||||
arena->total_size = size;
|
||||
arena->total_allocated = 0;
|
||||
arena->temp_count = 0;
|
||||
}
|
||||
|
||||
gb_inline void gb_arena_init_from_allocator(gbArena *arena, gbAllocator backing, isize size) {
|
||||
arena->backing = backing;
|
||||
arena->physical_start = gb_alloc(backing, size); // NOTE(bill): Uses default alignment
|
||||
arena->total_size = size;
|
||||
arena->total_allocated = 0;
|
||||
arena->temp_count = 0;
|
||||
}
|
||||
|
||||
gb_inline void gb_arena_init_sub(gbArena *arena, gbArena *parent_arena, isize size) { gb_arena_init_from_allocator(arena, gb_arena_allocator(parent_arena), size); }
|
||||
|
||||
|
||||
gb_inline void gb_arena_free(gbArena *arena) {
|
||||
if (arena->backing.proc) {
|
||||
gb_free(arena->backing, arena->physical_start);
|
||||
arena->physical_start = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
gb_inline isize gb_arena_alignment_of(gbArena *arena, isize alignment) {
|
||||
isize alignment_offset, result_pointer, mask;
|
||||
GB_ASSERT(gb_is_power_of_two(alignment));
|
||||
|
||||
alignment_offset = 0;
|
||||
result_pointer = cast(isize)arena->physical_start + arena->total_allocated;
|
||||
mask = alignment - 1;
|
||||
if (result_pointer & mask)
|
||||
alignment_offset = alignment - (result_pointer & mask);
|
||||
|
||||
return alignment_offset;
|
||||
}
|
||||
|
||||
gb_inline isize gb_arena_size_remaining(gbArena *arena, isize alignment) {
|
||||
isize result = arena->total_size - (arena->total_allocated + gb_arena_alignment_of(arena, alignment));
|
||||
return result;
|
||||
}
|
||||
|
||||
gb_inline void gb_arena_check(gbArena *arena) { GB_ASSERT(arena->temp_count == 0); }
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
gb_inline gbAllocator gb_arena_allocator(gbArena *arena) {
|
||||
gbAllocator allocator;
|
||||
allocator.proc = gb_arena_allocator_proc;
|
||||
allocator.data = arena;
|
||||
return allocator;
|
||||
}
|
||||
|
||||
GB_ALLOCATOR_PROC(gb_arena_allocator_proc) {
|
||||
gbArena *arena = cast(gbArena *)allocator_data;
|
||||
void *ptr = NULL;
|
||||
|
||||
gb_unused(old_size);
|
||||
|
||||
switch (type) {
|
||||
case gbAllocation_Alloc: {
|
||||
void *end = gb_pointer_add(arena->physical_start, arena->total_allocated);
|
||||
isize total_size = size + alignment;
|
||||
|
||||
// NOTE(bill): Out of memory
|
||||
if (arena->total_allocated + total_size > cast(isize)arena->total_size) {
|
||||
gb_printf_err("Arena out of memory\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ptr = gb_align_forward(end, alignment);
|
||||
arena->total_allocated += total_size;
|
||||
if (flags & gbAllocatorFlag_ClearToZero)
|
||||
gb_zero_size(ptr, size);
|
||||
} break;
|
||||
|
||||
case gbAllocation_Free:
|
||||
// NOTE(bill): Free all at once
|
||||
// Use Temp_Arena_Memory if you want to free a block
|
||||
break;
|
||||
|
||||
case gbAllocation_FreeAll:
|
||||
arena->total_allocated = 0;
|
||||
break;
|
||||
|
||||
case gbAllocation_Resize: {
|
||||
// TODO(bill): Check if ptr is on top of stack and just extend
|
||||
gbAllocator a = gb_arena_allocator(arena);
|
||||
ptr = gb_default_resize_align(a, old_memory, old_size, size, alignment);
|
||||
} break;
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
|
||||
|
||||
gb_inline gbTempArenaMemory gb_temp_arena_memory_begin(gbArena *arena) {
|
||||
gbTempArenaMemory tmp;
|
||||
tmp.arena = arena;
|
||||
tmp.original_count = arena->total_allocated;
|
||||
arena->temp_count++;
|
||||
return tmp;
|
||||
}
|
||||
|
||||
gb_inline void gb_temp_arena_memory_end(gbTempArenaMemory tmp) {
|
||||
GB_ASSERT_MSG(tmp.arena->total_allocated >= tmp.original_count,
|
||||
"%td >= %td", tmp.arena->total_allocated, tmp.original_count);
|
||||
GB_ASSERT(tmp.arena->temp_count > 0);
|
||||
tmp.arena->total_allocated = tmp.original_count;
|
||||
tmp.arena->temp_count--;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
//
|
||||
// Pool Allocator
|
||||
//
|
||||
|
||||
|
||||
gb_inline void gb_pool_init(gbPool *pool, gbAllocator backing, isize num_blocks, isize block_size) {
|
||||
gb_pool_init_align(pool, backing, num_blocks, block_size, GB_DEFAULT_MEMORY_ALIGNMENT);
|
||||
}
|
||||
|
||||
void gb_pool_init_align(gbPool *pool, gbAllocator backing, isize num_blocks, isize block_size, isize block_align) {
|
||||
isize actual_block_size, pool_size, block_index;
|
||||
void *data, *curr;
|
||||
uintptr *end;
|
||||
|
||||
gb_zero_item(pool);
|
||||
|
||||
pool->backing = backing;
|
||||
pool->block_size = block_size;
|
||||
pool->block_align = block_align;
|
||||
|
||||
actual_block_size = block_size + block_align;
|
||||
pool_size = num_blocks * actual_block_size;
|
||||
|
||||
data = gb_alloc_align(backing, pool_size, block_align);
|
||||
|
||||
// NOTE(bill): Init intrusive freelist
|
||||
curr = data;
|
||||
for (block_index = 0; block_index < num_blocks-1; block_index++) {
|
||||
uintptr *next = cast(uintptr *)curr;
|
||||
*next = cast(uintptr)curr + actual_block_size;
|
||||
curr = gb_pointer_add(curr, actual_block_size);
|
||||
}
|
||||
|
||||
end = cast(uintptr *)curr;
|
||||
*end = cast(uintptr)NULL;
|
||||
|
||||
pool->physical_start = data;
|
||||
pool->free_list = data;
|
||||
}
|
||||
|
||||
gb_inline void gb_pool_free(gbPool *pool) {
|
||||
if (pool->backing.proc) {
|
||||
gb_free(pool->backing, pool->physical_start);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
gb_inline gbAllocator gb_pool_allocator(gbPool *pool) {
|
||||
gbAllocator allocator;
|
||||
allocator.proc = gb_pool_allocator_proc;
|
||||
allocator.data = pool;
|
||||
return allocator;
|
||||
}
|
||||
GB_ALLOCATOR_PROC(gb_pool_allocator_proc) {
|
||||
gbPool *pool = cast(gbPool *)allocator_data;
|
||||
void *ptr = NULL;
|
||||
|
||||
gb_unused(old_size);
|
||||
|
||||
switch (type) {
|
||||
case gbAllocation_Alloc: {
|
||||
uintptr next_free;
|
||||
GB_ASSERT(size == pool->block_size);
|
||||
GB_ASSERT(alignment == pool->block_align);
|
||||
GB_ASSERT(pool->free_list != NULL);
|
||||
|
||||
next_free = *cast(uintptr *)pool->free_list;
|
||||
ptr = pool->free_list;
|
||||
pool->free_list = cast(void *)next_free;
|
||||
pool->total_size += pool->block_size;
|
||||
if (flags & gbAllocatorFlag_ClearToZero)
|
||||
gb_zero_size(ptr, size);
|
||||
} break;
|
||||
|
||||
case gbAllocation_Free: {
|
||||
uintptr *next;
|
||||
if (old_memory == NULL) return NULL;
|
||||
|
||||
next = cast(uintptr *)old_memory;
|
||||
*next = cast(uintptr)pool->free_list;
|
||||
pool->free_list = old_memory;
|
||||
pool->total_size -= pool->block_size;
|
||||
} break;
|
||||
|
||||
case gbAllocation_FreeAll:
|
||||
// TODO(bill):
|
||||
break;
|
||||
|
||||
case gbAllocation_Resize:
|
||||
// NOTE(bill): Cannot resize
|
||||
GB_PANIC("You cannot resize something allocated by with a pool.");
|
||||
break;
|
||||
}
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
gb_inline gbAllocationHeader *gb_allocation_header(void *data) {
|
||||
isize *p = cast(isize *)data;
|
||||
while (p[-1] == cast(isize)(-1)) {
|
||||
p--;
|
||||
}
|
||||
return cast(gbAllocationHeader *)p - 1;
|
||||
}
|
||||
|
||||
gb_inline void gb_allocation_header_fill(gbAllocationHeader *header, void *data, isize size) {
|
||||
isize *ptr;
|
||||
header->size = size;
|
||||
ptr = cast(isize *)(header + 1);
|
||||
while (cast(void *)ptr < data) {
|
||||
*ptr++ = cast(isize)(-1);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
//
|
||||
// Free List Allocator
|
||||
//
|
||||
|
||||
gb_inline void gb_free_list_init(gbFreeList *fl, void *start, isize size) {
|
||||
GB_ASSERT(size > gb_size_of(gbFreeListBlock));
|
||||
|
||||
fl->physical_start = start;
|
||||
fl->total_size = size;
|
||||
fl->curr_block = cast(gbFreeListBlock *)start;
|
||||
fl->curr_block->size = size;
|
||||
fl->curr_block->next = NULL;
|
||||
}
|
||||
|
||||
|
||||
gb_inline void gb_free_list_init_from_allocator(gbFreeList *fl, gbAllocator backing, isize size) {
|
||||
void *start = gb_alloc(backing, size);
|
||||
gb_free_list_init(fl, start, size);
|
||||
}
|
||||
|
||||
|
||||
|
||||
gb_inline gbAllocator gb_free_list_allocator(gbFreeList *fl) {
|
||||
gbAllocator a;
|
||||
a.proc = gb_free_list_allocator_proc;
|
||||
a.data = fl;
|
||||
return a;
|
||||
}
|
||||
|
||||
GB_ALLOCATOR_PROC(gb_free_list_allocator_proc) {
|
||||
gbFreeList *fl = cast(gbFreeList *)allocator_data;
|
||||
void *ptr = NULL;
|
||||
|
||||
GB_ASSERT_NOT_NULL(fl);
|
||||
|
||||
switch (type) {
|
||||
case gbAllocation_Alloc: {
|
||||
gbFreeListBlock *prev_block = NULL;
|
||||
gbFreeListBlock *curr_block = fl->curr_block;
|
||||
|
||||
while (curr_block) {
|
||||
isize total_size;
|
||||
gbAllocationHeader *header;
|
||||
|
||||
total_size = size + alignment + gb_size_of(gbAllocationHeader);
|
||||
|
||||
if (curr_block->size < total_size) {
|
||||
prev_block = curr_block;
|
||||
curr_block = curr_block->next;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (curr_block->size - total_size <= gb_size_of(gbAllocationHeader)) {
|
||||
total_size = curr_block->size;
|
||||
|
||||
if (prev_block)
|
||||
prev_block->next = curr_block->next;
|
||||
else
|
||||
fl->curr_block = curr_block->next;
|
||||
} else {
|
||||
// NOTE(bill): Create a new block for the remaining memory
|
||||
gbFreeListBlock *next_block;
|
||||
next_block = cast(gbFreeListBlock *)gb_pointer_add(curr_block, total_size);
|
||||
|
||||
GB_ASSERT(cast(void *)next_block < gb_pointer_add(fl->physical_start, fl->total_size));
|
||||
|
||||
next_block->size = curr_block->size - total_size;
|
||||
next_block->next = curr_block->next;
|
||||
|
||||
if (prev_block)
|
||||
prev_block->next = next_block;
|
||||
else
|
||||
fl->curr_block = next_block;
|
||||
}
|
||||
|
||||
|
||||
// TODO(bill): Set Header Info
|
||||
header = cast(gbAllocationHeader *)curr_block;
|
||||
ptr = gb_align_forward(header+1, alignment);
|
||||
gb_allocation_header_fill(header, ptr, size);
|
||||
|
||||
fl->total_allocated += total_size;
|
||||
fl->allocation_count++;
|
||||
|
||||
|
||||
if (flags & gbAllocatorFlag_ClearToZero)
|
||||
gb_zero_size(ptr, size);
|
||||
return ptr;
|
||||
}
|
||||
// NOTE(bill): if ptr == NULL, ran out of free list memory! FUCK!
|
||||
return NULL;
|
||||
} break;
|
||||
|
||||
case gbAllocation_Free: {
|
||||
gbAllocationHeader *header = gb_allocation_header(old_memory);
|
||||
isize block_size = header->size;
|
||||
uintptr block_start, block_end;
|
||||
gbFreeListBlock *prev_block = NULL;
|
||||
gbFreeListBlock *curr_block = fl->curr_block;
|
||||
|
||||
block_start = cast(uintptr)header;
|
||||
block_end = cast(uintptr)block_start + block_size;
|
||||
|
||||
while (curr_block) {
|
||||
if (cast(uintptr)curr_block >= block_end)
|
||||
break;
|
||||
prev_block = curr_block;
|
||||
curr_block = curr_block->next;
|
||||
}
|
||||
|
||||
if (prev_block == NULL) {
|
||||
prev_block = cast(gbFreeListBlock *)block_start;
|
||||
prev_block->size = block_size;
|
||||
prev_block->next = fl->curr_block;
|
||||
|
||||
fl->curr_block = prev_block;
|
||||
} else if ((cast(uintptr)prev_block + prev_block->size) == block_start) {
|
||||
prev_block->size += block_size;
|
||||
} else {
|
||||
gbFreeListBlock *tmp = cast(gbFreeListBlock *)block_start;
|
||||
tmp->size = block_size;
|
||||
tmp->next = prev_block->next;
|
||||
prev_block->next = tmp;
|
||||
|
||||
prev_block = tmp;
|
||||
}
|
||||
|
||||
if (curr_block && (cast(uintptr)curr_block == block_end)) {
|
||||
prev_block->size += curr_block->size;
|
||||
prev_block->next = curr_block->next;
|
||||
}
|
||||
|
||||
fl->allocation_count--;
|
||||
fl->total_allocated -= block_size;
|
||||
} break;
|
||||
|
||||
case gbAllocation_FreeAll:
|
||||
gb_free_list_init(fl, fl->physical_start, fl->total_size);
|
||||
break;
|
||||
|
||||
case gbAllocation_Resize:
|
||||
ptr = gb_default_resize_align(gb_free_list_allocator(fl), old_memory, old_size, size, alignment);
|
||||
break;
|
||||
}
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
|
||||
|
||||
void gb_scratch_memory_init(gbScratchMemory *s, void *start, isize size) {
|
||||
s->physical_start = start;
|
||||
s->total_size = size;
|
||||
s->alloc_point = start;
|
||||
s->free_point = start;
|
||||
}
|
||||
|
||||
|
||||
b32 gb_scratch_memory_is_in_use(gbScratchMemory *s, void *ptr) {
|
||||
if (s->free_point == s->alloc_point) return false;
|
||||
if (s->alloc_point > s->free_point)
|
||||
return ptr >= s->free_point && ptr < s->alloc_point;
|
||||
return ptr >= s->free_point || ptr < s->alloc_point;
|
||||
}
|
||||
|
||||
|
||||
gbAllocator gb_scratch_allocator(gbScratchMemory *s) {
|
||||
gbAllocator a;
|
||||
a.proc = gb_scratch_allocator_proc;
|
||||
a.data = s;
|
||||
return a;
|
||||
}
|
||||
|
||||
GB_ALLOCATOR_PROC(gb_scratch_allocator_proc) {
|
||||
gbScratchMemory *s = cast(gbScratchMemory *)allocator_data;
|
||||
void *ptr = NULL;
|
||||
GB_ASSERT_NOT_NULL(s);
|
||||
|
||||
switch (type) {
|
||||
case gbAllocation_Alloc: {
|
||||
void *pt = s->alloc_point;
|
||||
gbAllocationHeader *header = cast(gbAllocationHeader *)pt;
|
||||
void *data = gb_align_forward(header+1, alignment);
|
||||
void *end = gb_pointer_add(s->physical_start, s->total_size);
|
||||
|
||||
GB_ASSERT(alignment % 4 == 0);
|
||||
size = ((size + 3)/4)*4;
|
||||
pt = gb_pointer_add(pt, size);
|
||||
|
||||
// NOTE(bill): Wrap around
|
||||
if (pt > end) {
|
||||
header->size = gb_pointer_diff(header, end) | GB_ISIZE_HIGH_BIT;
|
||||
pt = s->physical_start;
|
||||
header = cast(gbAllocationHeader *)pt;
|
||||
data = gb_align_forward(header+1, alignment);
|
||||
pt = gb_pointer_add(pt, size);
|
||||
}
|
||||
|
||||
if (!gb_scratch_memory_is_in_use(s, pt)) {
|
||||
gb_allocation_header_fill(header, pt, gb_pointer_diff(header, pt));
|
||||
s->alloc_point = cast(u8 *)pt;
|
||||
ptr = data;
|
||||
}
|
||||
|
||||
if (flags & gbAllocatorFlag_ClearToZero)
|
||||
gb_zero_size(ptr, size);
|
||||
} break;
|
||||
|
||||
case gbAllocation_Free: {
|
||||
if (old_memory) {
|
||||
void *end = gb_pointer_add(s->physical_start, s->total_size);
|
||||
if (old_memory < s->physical_start || old_memory >= end) {
|
||||
GB_ASSERT(false);
|
||||
} else {
|
||||
// NOTE(bill): Mark as free
|
||||
gbAllocationHeader *h = gb_allocation_header(old_memory);
|
||||
GB_ASSERT((h->size & GB_ISIZE_HIGH_BIT) == 0);
|
||||
h->size = h->size | GB_ISIZE_HIGH_BIT;
|
||||
|
||||
while (s->free_point != s->alloc_point) {
|
||||
gbAllocationHeader *header = cast(gbAllocationHeader *)s->free_point;
|
||||
if ((header->size & GB_ISIZE_HIGH_BIT) == 0)
|
||||
break;
|
||||
|
||||
s->free_point = gb_pointer_add(s->free_point, h->size & (~GB_ISIZE_HIGH_BIT));
|
||||
if (s->free_point == end)
|
||||
s->free_point = s->physical_start;
|
||||
}
|
||||
}
|
||||
}
|
||||
} break;
|
||||
|
||||
case gbAllocation_FreeAll:
|
||||
s->alloc_point = s->physical_start;
|
||||
s->free_point = s->physical_start;
|
||||
break;
|
||||
|
||||
case gbAllocation_Resize:
|
||||
ptr = gb_default_resize_align(gb_scratch_allocator(s), old_memory, old_size, size, alignment);
|
||||
break;
|
||||
}
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Sorting
|
||||
|
||||
18
src/main.cpp
18
src/main.cpp
@@ -60,7 +60,6 @@ i32 system_exec_command_line_app(char const *name, char const *fmt, ...) {
|
||||
isize const cmd_cap = 4096;
|
||||
char cmd_line[cmd_cap] = {};
|
||||
va_list va;
|
||||
gbTempArenaMemory tmp;
|
||||
String16 cmd;
|
||||
i32 exit_code = 0;
|
||||
|
||||
@@ -80,10 +79,7 @@ i32 system_exec_command_line_app(char const *name, char const *fmt, ...) {
|
||||
gb_printf_err("%.*s\n\n", cast(int)(cmd_len-1), cmd_line);
|
||||
}
|
||||
|
||||
tmp = gb_temp_arena_memory_begin(&string_buffer_arena);
|
||||
defer (gb_temp_arena_memory_end(tmp));
|
||||
|
||||
cmd = string_to_string16(string_buffer_allocator, make_string(cast(u8 *)cmd_line, cmd_len-1));
|
||||
cmd = string_to_string16(permanent_allocator(), make_string(cast(u8 *)cmd_line, cmd_len-1));
|
||||
if (CreateProcessW(nullptr, cmd.text,
|
||||
nullptr, nullptr, true, 0, nullptr, nullptr,
|
||||
&start_info, &pi)) {
|
||||
@@ -2005,10 +2001,8 @@ int main(int arg_count, char const **arg_ptr) {
|
||||
defer (timings_destroy(&global_timings));
|
||||
|
||||
TIME_SECTION("initialization");
|
||||
|
||||
arena_init(&permanent_arena, heap_allocator());
|
||||
temp_allocator_init(&temporary_allocator_data, 16*1024*1024);
|
||||
arena_init(&global_ast_arena, heap_allocator());
|
||||
|
||||
virtual_memory_init();
|
||||
mutex_init(&fullpath_mutex);
|
||||
|
||||
init_string_buffer_memory();
|
||||
@@ -2187,7 +2181,7 @@ int main(int arg_count, char const **arg_ptr) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
temp_allocator_free_all(&temporary_allocator_data);
|
||||
arena_free_all(&temporary_arena);
|
||||
|
||||
TIME_SECTION("type check");
|
||||
|
||||
@@ -2200,7 +2194,7 @@ int main(int arg_count, char const **arg_ptr) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
temp_allocator_free_all(&temporary_allocator_data);
|
||||
arena_free_all(&temporary_arena);
|
||||
|
||||
if (build_context.generate_docs) {
|
||||
if (global_error_collector.count != 0) {
|
||||
@@ -2237,7 +2231,7 @@ int main(int arg_count, char const **arg_ptr) {
|
||||
}
|
||||
lb_generate_code(gen);
|
||||
|
||||
temp_allocator_free_all(&temporary_allocator_data);
|
||||
arena_free_all(&temporary_arena);
|
||||
|
||||
switch (build_context.build_mode) {
|
||||
case BuildMode_Executable:
|
||||
|
||||
@@ -4784,9 +4784,8 @@ ParseFileError init_ast_file(AstFile *f, String fullpath, TokenPos *err_pos) {
|
||||
isize const page_size = 4*1024;
|
||||
isize block_size = 2*f->tokens.count*gb_size_of(Ast);
|
||||
block_size = ((block_size + page_size-1)/page_size) * page_size;
|
||||
block_size = gb_clamp(block_size, page_size, ARENA_DEFAULT_BLOCK_SIZE);
|
||||
|
||||
arena_init(&f->arena, heap_allocator(), block_size);
|
||||
block_size = gb_clamp(block_size, page_size, DEFAULT_MINIMUM_BLOCK_SIZE);
|
||||
f->arena.minimum_block_size = block_size;
|
||||
|
||||
|
||||
array_init(&f->comments, heap_allocator(), 0, 0);
|
||||
|
||||
@@ -95,7 +95,7 @@ struct AstFile {
|
||||
AstPackage * pkg;
|
||||
Scope * scope;
|
||||
|
||||
Arena arena;
|
||||
Arena arena;
|
||||
|
||||
Ast * pkg_decl;
|
||||
String fullpath;
|
||||
@@ -741,11 +741,10 @@ gb_inline bool is_ast_when_stmt(Ast *node) {
|
||||
return node->kind == Ast_WhenStmt;
|
||||
}
|
||||
|
||||
gb_global Arena global_ast_arena = {};
|
||||
gb_global gb_thread_local Arena global_ast_arena = {};
|
||||
|
||||
gbAllocator ast_allocator(AstFile *f) {
|
||||
Arena *arena = f ? &f->arena : &global_ast_arena;
|
||||
// Arena *arena = &global_ast_arena;
|
||||
return arena_allocator(arena);
|
||||
}
|
||||
|
||||
|
||||
@@ -252,10 +252,7 @@ void print_query_data_as_json(QueryValue *value, bool format = true, isize inden
|
||||
char const hex_table[] = "0123456789ABCDEF";
|
||||
isize buf_len = name.len + extra + 2 + 1;
|
||||
|
||||
gbTempArenaMemory tmp = gb_temp_arena_memory_begin(&string_buffer_arena);
|
||||
defer (gb_temp_arena_memory_end(tmp));
|
||||
|
||||
u8 *buf = gb_alloc_array(string_buffer_allocator, u8, buf_len);
|
||||
u8 *buf = gb_alloc_array(temporary_allocator(), u8, buf_len);
|
||||
|
||||
isize j = 0;
|
||||
|
||||
|
||||
@@ -1,11 +1,6 @@
|
||||
gb_global gbArena string_buffer_arena = {};
|
||||
gb_global gbAllocator string_buffer_allocator = {};
|
||||
gb_global BlockingMutex string_buffer_mutex = {};
|
||||
|
||||
void init_string_buffer_memory(void) {
|
||||
// NOTE(bill): This should be enough memory for file systems
|
||||
gb_arena_init_from_allocator(&string_buffer_arena, heap_allocator(), gb_megabytes(1));
|
||||
string_buffer_allocator = gb_arena_allocator(&string_buffer_arena);
|
||||
mutex_init(&string_buffer_mutex);
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user