Use global arena for AstNode allocations

This commit is contained in:
gingerBill
2018-06-09 19:53:06 +01:00
parent 49ea9ed722
commit 268491b224
11 changed files with 129 additions and 120 deletions

View File

@@ -352,10 +352,6 @@ void check_const_decl(CheckerContext *ctx, Entity *e, AstNode *type_expr, AstNod
}
}
if (init != nullptr) {
check_expr_or_type(ctx, &operand, init, e->type);
}
check_init_constant(ctx, e, &operand);
if (operand.mode == Addressing_Invalid ||

View File

@@ -3632,7 +3632,7 @@ break;
isize variable_count = type->Struct.fields.count;
array_init(&tuple->Tuple.variables, a, variable_count);
// TODO(bill): Should I copy each of the entities or is this good enough?
gb_memcopy_array(tuple->Tuple.variables.data, type->Struct.fields.data, variable_count);
gb_memmove_array(tuple->Tuple.variables.data, type->Struct.fields.data, variable_count);
operand->type = tuple;
operand->mode = Addressing_Value;

View File

@@ -208,9 +208,7 @@ void add_polymorphic_struct_entity(CheckerContext *ctx, AstNode *node, Type *nam
token.kind = Token_String;
token.string = named_type->Named.name;
AstNode *node = gb_alloc_item(a, AstNode);
node->kind = AstNode_Ident;
node->Ident.token = token;
AstNode *node = ast_ident(nullptr, token);
e = alloc_entity_type_name(s, token, named_type);
e->state = EntityState_Resolved;
@@ -1634,8 +1632,7 @@ void init_map_entry_type(Type *type) {
value: Value;
}
*/
AstNode *dummy_node = gb_alloc_item(a, AstNode);
dummy_node->kind = AstNode_Invalid;
AstNode *dummy_node = alloc_ast_node(nullptr, AstNode_Invalid);
Scope *s = create_scope(universal_scope, a);
auto fields = array_make<Entity *>(a, 0, 3);
@@ -1670,8 +1667,7 @@ void init_map_internal_types(Type *type) {
}
*/
gbAllocator a = heap_allocator();
AstNode *dummy_node = gb_alloc_item(a, AstNode);
dummy_node->kind = AstNode_Invalid;
AstNode *dummy_node = alloc_ast_node(nullptr, AstNode_Invalid);
Scope *s = create_scope(universal_scope, a);
Type *hashes_type = alloc_type_dynamic_array(t_int);

View File

@@ -241,7 +241,7 @@ Scope *create_scope_from_file(CheckerContext *c, AstFile *f) {
Scope *s = create_scope(f->pkg->scope, c->allocator);
array_reserve(&s->delayed_imports, f->imports.count);
array_reserve(&s->delayed_directives, f->assert_decl_count);
array_reserve(&s->delayed_directives, f->directive_count);
s->is_file = true;
s->file = f;
@@ -2652,7 +2652,7 @@ void check_add_foreign_import_decl(CheckerContext *ctx, AstNode *decl) {
if (fl->collection_name != "system") {
char *c_str = gb_alloc_array(heap_allocator(), char, fullpath.len+1);
defer (gb_free(heap_allocator(), c_str));
gb_memcopy(c_str, fullpath.text, fullpath.len);
gb_memmove(c_str, fullpath.text, fullpath.len);
c_str[fullpath.len] = '\0';
gbFile f = {};

View File

@@ -211,7 +211,7 @@ String u64_to_string(u64 v, char *out_buf, isize out_buf_len) {
buf[--i] = gb__num_to_char_table[v%b];
isize len = gb_min(gb_size_of(buf)-i, out_buf_len);
gb_memcopy(out_buf, &buf[i], len);
gb_memmove(out_buf, &buf[i], len);
return make_string(cast(u8 *)out_buf, len);
}
String i64_to_string(i64 a, char *out_buf, isize out_buf_len) {
@@ -236,7 +236,7 @@ String i64_to_string(i64 a, char *out_buf, isize out_buf_len) {
}
isize len = gb_min(gb_size_of(buf)-i, out_buf_len);
gb_memcopy(out_buf, &buf[i], len);
gb_memmove(out_buf, &buf[i], len);
return make_string(cast(u8 *)out_buf, len);
}
@@ -287,103 +287,115 @@ gb_global u64 const unsigned_integer_maxs[] = {
gb_global String global_module_path = {0};
gb_global bool global_module_path_set = false;
#if 0
struct Pool {
gbAllocator backing;
u8 *ptr;
u8 *end;
// Arena from Per Vognsen
#define ALIGN_DOWN(n, a) ((n) & ~((a) - 1))
#define ALIGN_UP(n, a) ALIGN_DOWN((n) + (a) - 1, (a))
#define ALIGN_DOWN_PTR(p, a) (cast(void *)ALIGN_DOWN(cast(uintptr)(p), (a)))
#define ALIGN_UP_PTR(p, a) (cast(void *)ALIGN_UP(cast(uintptr)(p), (a)))
typedef struct Arena {
u8 * ptr;
u8 * end;
Array<u8 *> blocks;
isize block_size;
isize alignment;
};
gbAllocator backing;
isize block_size;
gbMutex mutex;
#define POOL_BLOCK_SIZE (8*1024*1024)
#define POOL_ALIGNMENT 16
isize total_used;
isize possible_used;
} Arena;
#define ALIGN_DOWN(n, a) ((n) & ~((a) - 1))
#define ALIGN_UP(n, a) ALIGN_DOWN((n) + (a) - 1, (a))
#define ALIGN_DOWN_PTR(p, a) ((void *)ALIGN_DOWN((uintptr)(p), (a)))
#define ALIGN_UP_PTR(p, a) ((void *)ALIGN_UP((uintptr)(p), (a)))
#define ARENA_MIN_ALIGNMENT 16
#define ARENA_DEFAULT_BLOCK_SIZE (8*1024*1024)
void pool_init(Pool *pool, gbAllocator backing, isize block_size=POOL_BLOCK_SIZE, isize alignment=POOL_ALIGNMENT) {
pool->ptr = nullptr;
pool->end = nullptr;
pool->backing = backing;
pool->block_size = block_size;
pool->alignment = alignment;
array_init(&pool->blocks, backing);
void arena_init(Arena *arena, gbAllocator backing, isize block_size=ARENA_DEFAULT_BLOCK_SIZE) {
arena->backing = backing;
arena->block_size = block_size;
array_init(&arena->blocks, backing);
gb_mutex_init(&arena->mutex);
}
void pool_free_all(Pool *pool) {
for_array(i, pool->blocks) {
gb_free(pool->backing, pool->blocks[i]);
}
array_clear(&pool->blocks);
void arena_grow(Arena *arena, isize min_size) {
gb_mutex_lock(&arena->mutex);
defer (gb_mutex_unlock(&arena->mutex));
isize size = gb_max(arena->block_size, min_size);
size = ALIGN_UP(size, ARENA_MIN_ALIGNMENT);
void *new_ptr = gb_alloc(arena->backing, size);
arena->ptr = cast(u8 *)new_ptr;
gb_zero_size(arena->ptr, size);
GB_ASSERT(arena->ptr == ALIGN_DOWN_PTR(arena->ptr, ARENA_MIN_ALIGNMENT));
arena->end = arena->ptr + size;
array_add(&arena->blocks, arena->ptr);
}
void pool_destroy(Pool *pool) {
pool_free_all(pool);
array_free(&pool->blocks);
void *arena_alloc(Arena *arena, isize size, isize alignment) {
gb_mutex_lock(&arena->mutex);
defer (gb_mutex_unlock(&arena->mutex));
arena->total_used += size;
if (size > (arena->end - arena->ptr)) {
arena_grow(arena, size);
GB_ASSERT(size <= (arena->end - arena->ptr));
}
isize align = gb_max(alignment, ARENA_MIN_ALIGNMENT);
void *ptr = arena->ptr;
arena->ptr = cast(u8 *)ALIGN_UP_PTR(arena->ptr + size, align);
GB_ASSERT(arena->ptr <= arena->end);
GB_ASSERT(ptr == ALIGN_DOWN_PTR(ptr, align));
gb_zero_size(ptr, size);
return ptr;
}
void pool_grow(Pool *pool, isize min_size) {
isize size = ALIGN_UP(gb_max(min_size, pool->block_size), pool->alignment);
pool->ptr = cast(u8 *)gb_alloc(pool->backing, size);
GB_ASSERT(pool->ptr == ALIGN_DOWN_PTR(pool->ptr, pool->alignment));
pool->end = pool->ptr + size;
array_add(&pool->blocks, pool->ptr);
void arena_free_all(Arena *arena) {
gb_mutex_lock(&arena->mutex);
defer (gb_mutex_unlock(&arena->mutex));
for_array(i, arena->blocks) {
gb_free(arena->backing, arena->blocks[i]);
}
array_clear(&arena->blocks);
arena->ptr = nullptr;
arena->end = nullptr;
}
void *pool_alloc(Pool *pool, isize size, isize align) {
if (size > (pool->end - pool->ptr)) {
pool_grow(pool, size);
GB_ASSERT(size <= (pool->end - pool->ptr));
}
align = gb_max(align, pool->alignment);
void *ptr = pool->ptr;
pool->ptr = cast(u8 *)ALIGN_UP_PTR(pool->ptr + size, align);
GB_ASSERT(pool->ptr <= pool->end);
GB_ASSERT(ptr == ALIGN_DOWN_PTR(ptr, align));
return ptr;
}
GB_ALLOCATOR_PROC(pool_allocator_proc) {
void *ptr = nullptr;
Pool *pool = cast(Pool *)allocator_data;
switch (type) {
case gbAllocation_Alloc:
ptr = pool_alloc(pool, size, alignment);
break;
case gbAllocation_FreeAll:
pool_free_all(pool);
break;
case gbAllocation_Free:
case gbAllocation_Resize:
GB_PANIC("A pool allocator does not support free or resize");
break;
}
GB_ALLOCATOR_PROC(arena_allocator_proc);
return ptr;
}
gbAllocator pool_allocator(Pool *pool) {
gbAllocator arena_allocator(Arena *arena) {
gbAllocator a;
a.proc = pool_allocator_proc;
a.data = pool;
a.proc = arena_allocator_proc;
a.data = arena;
return a;
}
gb_global Pool global_pool = {};
GB_ALLOCATOR_PROC(arena_allocator_proc) {
void *ptr = nullptr;
Arena *arena = cast(Arena *)allocator_data;
GB_ASSERT_NOT_NULL(arena);
gbAllocator perm_allocator(void) {
return pool_allocator(&global_pool);
switch (type) {
case gbAllocation_Alloc:
ptr = arena_alloc(arena, size, alignment);
break;
case gbAllocation_Free:
GB_PANIC("gbAllocation_Free not supported");
break;
case gbAllocation_Resize:
GB_PANIC("gbAllocation_Resize: not supported");
break;
case gbAllocation_FreeAll:
arena_free_all(arena);
break;
}
return ptr;
}
#endif

View File

@@ -323,7 +323,7 @@ String u128_to_string(u128 v, char *out_buf, isize out_buf_len) {
buf[--i] = gb__num_to_char_table[u128_to_i64(u128_mod(v, b))];
isize len = gb_min(gb_size_of(buf)-i, out_buf_len);
gb_memcopy(out_buf, &buf[i], len);
gb_memmove(out_buf, &buf[i], len);
return make_string(cast(u8 *)out_buf, len);
}
String i128_to_string(i128 a, char *out_buf, isize out_buf_len) {
@@ -348,7 +348,7 @@ String i128_to_string(i128 a, char *out_buf, isize out_buf_len) {
}
isize len = gb_min(gb_size_of(buf)-i, out_buf_len);
gb_memcopy(out_buf, &buf[i], len);
gb_memmove(out_buf, &buf[i], len);
return make_string(cast(u8 *)out_buf, len);
}

View File

@@ -8550,7 +8550,7 @@ void ir_gen_tree(irGen *s) {
}
proc_type->Proc.abi_compat_result_type = proc_results->Tuple.variables[0]->type;
AstNode *body = gb_alloc_item(a, AstNode);
AstNode *body = alloc_ast_node(nullptr, AstNode_Invalid);
Entity *e = alloc_entity_procedure(nullptr, make_token_ident(name), proc_type, 0);
irValue *p = ir_value_procedure(a, m, e, proc_type, nullptr, body, name);
@@ -8628,7 +8628,7 @@ void ir_gen_tree(irGen *s) {
}
proc_type->Proc.abi_compat_result_type = proc_results->Tuple.variables[0]->type;
AstNode *body = gb_alloc_item(a, AstNode);
AstNode *body = alloc_ast_node(nullptr, AstNode_Invalid);
Entity *e = alloc_entity_procedure(nullptr, make_token_ident(name), proc_type, 0);
irValue *p = ir_value_procedure(a, m, e, proc_type, nullptr, body, name);
@@ -8692,7 +8692,7 @@ void ir_gen_tree(irGen *s) {
proc_params, 4,
proc_results, 1, false, ProcCC_Std);
AstNode *body = gb_alloc_item(a, AstNode);
AstNode *body = alloc_ast_node(nullptr, AstNode_Invalid);
Entity *e = alloc_entity_procedure(a, nullptr, make_token_ident(name), proc_type, 0);
irValue *p = ir_value_procedure(a, m, e, proc_type, nullptr, body, name);
@@ -8718,7 +8718,7 @@ void ir_gen_tree(irGen *s) {
nullptr, 0,
nullptr, 0, false,
ProcCC_Contextless);
AstNode *body = gb_alloc_item(a, AstNode);
AstNode *body = alloc_ast_node(nullptr, AstNode_Invalid);
Entity *e = alloc_entity_procedure(nullptr, make_token_ident(name), proc_type, 0);
irValue *p = ir_value_procedure(a, m, e, proc_type, nullptr, body, name);

View File

@@ -633,9 +633,9 @@ void remove_temp_files(String output_base) {
defer (array_free(&data));
isize n = output_base.len;
gb_memcopy(data.data, output_base.text, n);
gb_memmove(data.data, output_base.text, n);
#define EXT_REMOVE(s) do { \
gb_memcopy(data.data+n, s, gb_size_of(s)); \
gb_memmove(data.data+n, s, gb_size_of(s)); \
gb_file_remove(cast(char *)data.data); \
} while (0)
EXT_REMOVE(".ll");
@@ -718,6 +718,7 @@ int main(int arg_count, char **arg_ptr) {
init_string_buffer_memory();
init_global_error_collector();
arena_init(&global_ast_arena, heap_allocator());
array_init(&library_collections, heap_allocator());
// NOTE(bill): 'core' cannot be (re)defined by the user

View File

@@ -113,7 +113,7 @@ AstNode *clone_ast_node(gbAllocator a, AstNode *node) {
if (node == nullptr) {
return nullptr;
}
AstNode *n = gb_alloc_item(a, AstNode);
AstNode *n = alloc_ast_node(node->file, node->kind);
gb_memmove(n, node, gb_size_of(AstNode));
switch (n->kind) {
@@ -422,12 +422,13 @@ bool ast_node_expect(AstNode *node, AstNodeKind kind) {
// NOTE(bill): And this below is why is I/we need a new language! Discriminated unions are a pain in C/C++
AstNode *alloc_ast_node(AstFile *f, AstNodeKind kind) {
gbArena *arena = &f->arena;
if (gb_arena_size_remaining(arena, GB_DEFAULT_MEMORY_ALIGNMENT) <= gb_size_of(AstNode)) {
// NOTE(bill): If a syntax error is so bad, just quit!
gb_exit(1);
}
AstNode *node = gb_alloc_item(gb_arena_allocator(arena), AstNode);
Arena *arena = &global_ast_arena;
gbAllocator a = arena_allocator(arena);
AstNode *node = gb_alloc_item(a, AstNode);
gb_mutex_lock(&arena->mutex);
defer (gb_mutex_unlock(&arena->mutex));
arena->possible_used += ALIGN_UP(24 + ast_node_sizes[kind], 8);
node->kind = kind;
node->file = f;
return node;
@@ -3865,10 +3866,6 @@ ParseFileError init_ast_file(AstFile *f, String fullpath, TokenPos *err_pos) {
f->prev_token = f->tokens[f->curr_token_index];
f->curr_token = f->tokens[f->curr_token_index];
// NOTE(bill): Is this big enough or too small?
isize arena_size = gb_size_of(AstNode);
arena_size *= 2*f->tokens.count;
gb_arena_init_from_allocator(&f->arena, heap_allocator(), arena_size);
array_init(&f->comments, heap_allocator());
array_init(&f->imports, heap_allocator());
@@ -3879,7 +3876,6 @@ ParseFileError init_ast_file(AstFile *f, String fullpath, TokenPos *err_pos) {
void destroy_ast_file(AstFile *f) {
GB_ASSERT(f != nullptr);
gb_arena_free(&f->arena);
array_free(&f->tokens);
array_free(&f->comments);
array_free(&f->imports);
@@ -4197,10 +4193,8 @@ void parse_setup_file_decls(Parser *p, AstFile *f, String base_dir, Array<AstNod
if (node->kind == AstNode_ExprStmt) {
AstNode *expr = node->ExprStmt.expr;
if (expr->kind == AstNode_CallExpr &&
expr->CallExpr.proc->kind == AstNode_BasicDirective &&
expr->CallExpr.proc->BasicDirective.name == "assert") {
f->assert_decl_count += 1;
expr->CallExpr.proc->kind == AstNode_BasicDirective) {
f->directive_count += 1;
continue;
}
}
@@ -4224,7 +4218,6 @@ void parse_setup_file_decls(Parser *p, AstFile *f, String base_dir, Array<AstNod
ast_node(fl, ForeignImportDecl, node);
String file_str = fl->filepath.string;
fl->base_dir = base_dir;
fl->fullpath = file_str;
if (fl->collection_name != "system") {

View File

@@ -54,7 +54,6 @@ struct AstFile {
AstNode * pkg_decl;
String fullpath;
gbArena arena;
Tokenizer tokenizer;
Array<Token> tokens;
isize curr_token_index;
@@ -74,7 +73,7 @@ struct AstFile {
Array<AstNode *> decls;
Array<AstNode *> imports; // 'import' 'using import'
isize assert_decl_count;
isize directive_count;
AstNode * curr_proc;
@@ -388,7 +387,6 @@ AST_NODE_KIND(_DeclBegin, "", struct {}) \
Token token; \
Token filepath; \
Token library_name; \
String base_dir; \
String collection_name; \
String fullpath; \
CommentGroup docs; \
@@ -505,10 +503,19 @@ String const ast_node_strings[] = {
#undef AST_NODE_KIND
};
#define AST_NODE_KIND(_kind_name_, name, ...) typedef __VA_ARGS__ GB_JOIN2(AstNode, _kind_name_);
AST_NODE_KINDS
#undef AST_NODE_KIND
isize const ast_node_sizes[] = {
0,
#define AST_NODE_KIND(_kind_name_, name, ...) gb_size_of(GB_JOIN2(AstNode, _kind_name_)),
AST_NODE_KINDS
#undef AST_NODE_KIND
};
struct AstNode {
AstNodeKind kind;
u32 stmt_state_flags;
@@ -523,7 +530,9 @@ struct AstNode {
};
#define ast_node(n_, Kind_, node_) GB_JOIN2(AstNode, Kind_) *n_ = &(node_)->Kind_; GB_ASSERT((node_)->kind == GB_JOIN2(AstNode_, Kind_))
#define ast_node(n_, Kind_, node_) GB_JOIN2(AstNode, Kind_) *n_ = &(node_)->Kind_; GB_ASSERT_MSG((node_)->kind == GB_JOIN2(AstNode_, Kind_), \
"expected '%.*s' got '%.*s'", \
LIT(ast_node_strings[GB_JOIN2(AstNode_, Kind_)]), LIT(ast_node_strings[(node_)->kind]))
#define case_ast_node(n_, Kind_, node_) case GB_JOIN2(AstNode_, Kind_): { ast_node(n_, Kind_, node_);
#ifndef case_end
#define case_end } break;
@@ -549,4 +558,6 @@ gb_inline bool is_ast_node_when_stmt(AstNode *node) {
return node->kind == AstNode_WhenStmt;
}
gb_global Arena global_ast_arena = {};
AstNode *alloc_ast_node(AstFile *f, AstNodeKind kind);

View File

@@ -96,7 +96,7 @@ String substring(String const &s, isize lo, isize hi) {
char *alloc_cstring(gbAllocator a, String s) {
char *c_str = gb_alloc_array(a, char, s.len+1);
gb_memcopy(c_str, s.text, s.len);
gb_memmove(c_str, s.text, s.len);
c_str[s.len] = '\0';
return c_str;
}