diff --git a/src/check_decl.cpp b/src/check_decl.cpp index 1aafa6e1c..bfe703853 100644 --- a/src/check_decl.cpp +++ b/src/check_decl.cpp @@ -121,8 +121,8 @@ void check_init_variables(CheckerContext *ctx, Entity **lhs, isize lhs_count, Ar // NOTE(bill): If there is a bad syntax error, rhs > lhs which would mean there would need to be // an extra allocation - auto operands = array_make(ctx->allocator, 0, 2*lhs_count); - defer (array_free(&operands)); + SCOPED_TEMPORARY_BLOCK(); + auto operands = array_make(temporary_allocator(), 0, 2*lhs_count); check_unpack_arguments(ctx, lhs, lhs_count, &operands, inits, true, false); isize rhs_count = operands.count; @@ -317,7 +317,6 @@ void check_type_decl(CheckerContext *ctx, Entity *e, Ast *init_expr, Type *def) break; default: error(e->token, "Only struct types can have custom atom operations"); - gb_free(heap_allocator(), ac.atom_op_table); break; } } @@ -638,7 +637,7 @@ String handle_link_name(CheckerContext *ctx, Token token, String link_name, Stri error(token, "'link_name' and 'link_prefix' cannot be used together"); } else { isize len = link_prefix.len + token.string.len; - u8 *name = gb_alloc_array(ctx->allocator, u8, len+1); + u8 *name = gb_alloc_array(permanent_allocator(), u8, len+1); gb_memmove(name, &link_prefix[0], link_prefix.len); gb_memmove(name+link_prefix.len, &token.string[0], token.string.len); name[len] = 0; @@ -975,7 +974,7 @@ void check_proc_group_decl(CheckerContext *ctx, Entity *pg_entity, DeclInfo *d) ast_node(pg, ProcGroup, d->init_expr); - pge->entities = array_make(ctx->allocator, 0, pg->args.count); + pge->entities = array_make(permanent_allocator(), 0, pg->args.count); // NOTE(bill): This must be set here to prevent cycles in checking if someone // places the entity within itself diff --git a/src/check_expr.cpp b/src/check_expr.cpp index cf4304053..755ceb634 100644 --- a/src/check_expr.cpp +++ b/src/check_expr.cpp @@ -267,7 +267,7 @@ bool find_or_generate_polymorphic_procedure(CheckerContext *c, Entity *base_enti CheckerContext nctx = *c; - Scope *scope = create_scope(base_entity->scope, a); + Scope *scope = create_scope(base_entity->scope); scope->flags |= ScopeFlag_Proc; nctx.scope = scope; nctx.allow_polymorphic_types = true; @@ -366,7 +366,7 @@ bool find_or_generate_polymorphic_procedure(CheckerContext *c, Entity *base_enti u64 tags = base_entity->Procedure.tags; Ast *ident = clone_ast(base_entity->identifier); Token token = ident->Ident.token; - DeclInfo *d = make_decl_info(nctx.allocator, scope, old_decl->parent); + DeclInfo *d = make_decl_info(scope, old_decl->parent); d->gen_proc_type = final_proc_type; d->type_expr = pl->type; d->proc_lit = proc_lit; @@ -1832,12 +1832,9 @@ void check_comparison(CheckerContext *c, Operand *x, Operand *y, TokenKind op) { } + SCOPED_TEMPORARY_BLOCK(); gbString err_str = nullptr; - defer (if (err_str != nullptr) { - gb_string_free(err_str); - }); - if (check_is_assignable_to(c, x, y->type) || check_is_assignable_to(c, y, x->type)) { Type *err_type = x->type; @@ -1867,8 +1864,8 @@ void check_comparison(CheckerContext *c, Operand *x, Operand *y, TokenKind op) { } gbString type_string = type_to_string(err_type); defer (gb_string_free(type_string)); - err_str = gb_string_make(c->allocator, - gb_bprintf("operator '%.*s' not defined for type '%s'", LIT(token_strings[op]), type_string)); + err_str = gb_string_make(temporary_allocator(), + gb_bprintf("operator '%.*s' not defined for type '%s'", LIT(token_strings[op]), type_string)); } } else { gbString xt, yt; @@ -1882,8 +1879,7 @@ void check_comparison(CheckerContext *c, Operand *x, Operand *y, TokenKind op) { } else { yt = type_to_string(y->type); } - err_str = gb_string_make(c->allocator, - gb_bprintf("mismatched types '%s' and '%s'", xt, yt)); + err_str = gb_string_make(temporary_allocator(), gb_bprintf("mismatched types '%s' and '%s'", xt, yt)); gb_string_free(yt); gb_string_free(xt); } @@ -2978,9 +2974,10 @@ void convert_to_typed(CheckerContext *c, Operand *operand, Type *target_type) { case Type_Union: if (!is_operand_nil(*operand) && !is_operand_undef(*operand)) { + SCOPED_TEMPORARY_BLOCK(); + isize count = t->Union.variants.count; - ValidIndexAndScore *valids = gb_alloc_array(c->allocator, ValidIndexAndScore, count); - defer (gb_free(c->allocator, valids)); + ValidIndexAndScore *valids = gb_alloc_array(temporary_allocator(), ValidIndexAndScore, count); isize valid_count = 0; isize first_success_index = -1; for_array(i, t->Union.variants) { @@ -4739,7 +4736,7 @@ bool check_builtin_procedure(CheckerContext *c, Operand *operand, Ast *call, i32 gb_string_free(type_str); return false; } - gbAllocator a = c->allocator; + gbAllocator a = permanent_allocator(); Type *tuple = alloc_type_tuple(); @@ -5356,7 +5353,7 @@ bool check_builtin_procedure(CheckerContext *c, Operand *operand, Ast *call, i32 soa_struct->Struct.soa_elem = elem; soa_struct->Struct.soa_count = count; - scope = create_scope(c->scope, c->allocator); + scope = create_scope(c->scope); soa_struct->Struct.scope = scope; String params_xyzw[4] = { @@ -5389,7 +5386,7 @@ bool check_builtin_procedure(CheckerContext *c, Operand *operand, Ast *call, i32 soa_struct->Struct.soa_elem = elem; soa_struct->Struct.soa_count = count; - scope = create_scope(old_struct->Struct.scope->parent, c->allocator); + scope = create_scope(old_struct->Struct.scope->parent); soa_struct->Struct.scope = scope; for_array(i, old_struct->Struct.fields) { @@ -6539,11 +6536,11 @@ CALL_ARGUMENT_CHECKER(check_named_call_arguments) { bool show_error = show_error_mode == CallArgumentMode_ShowErrors; CallArgumentError err = CallArgumentError_None; + SCOPED_TEMPORARY_BLOCK(); + isize param_count = pt->param_count; - bool *visited = gb_alloc_array(c->allocator, bool, param_count); - defer (gb_free(c->allocator, visited)); - auto ordered_operands = array_make(c->allocator, param_count); - defer (array_free(&ordered_operands)); + bool *visited = gb_alloc_array(temporary_allocator(), bool, param_count); + auto ordered_operands = array_make(temporary_allocator(), param_count); defer ({ for_array(i, ordered_operands) { Operand const &o = ordered_operands[i]; @@ -7385,13 +7382,15 @@ CallArgumentError check_polymorphic_record_type(CheckerContext *c, Operand *oper Array ordered_operands = operands; if (!named_fields) { - ordered_operands = array_make(c->allocator, param_count); + ordered_operands = array_make(permanent_allocator(), param_count); array_copy(&ordered_operands, operands, 0); } else { - bool *visited = gb_alloc_array(c->allocator, bool, param_count); + SCOPED_TEMPORARY_BLOCK(); + + bool *visited = gb_alloc_array(temporary_allocator(), bool, param_count); // LEAK(bill) - ordered_operands = array_make(c->allocator, param_count); + ordered_operands = array_make(permanent_allocator(), param_count); for_array(i, ce->args) { Ast *arg = ce->args[i]; @@ -7549,8 +7548,6 @@ CallArgumentError check_polymorphic_record_type(CheckerContext *c, Operand *oper } { - gbAllocator a = c->allocator; - bool failure = false; Entity *found_entity = find_polymorphic_record_entity(c, original_type, param_count, ordered_operands, &failure); if (found_entity) { @@ -8213,7 +8210,7 @@ ExprKind check_expr_base_internal(CheckerContext *c, Operand *o, Ast *node, Type Type *type = alloc_type(Type_Proc); check_open_scope(&ctx, pl->type); { - decl = make_decl_info(ctx.allocator, ctx.scope, ctx.decl); + decl = make_decl_info(ctx.scope, ctx.decl); decl->proc_lit = node; ctx.decl = decl; defer (ctx.decl = ctx.decl->parent); @@ -8510,7 +8507,9 @@ ExprKind check_expr_base_internal(CheckerContext *c, Operand *o, Ast *node, Type } if (cl->elems[0]->kind == Ast_FieldValue) { - bool *fields_visited = gb_alloc_array(c->allocator, bool, field_count); + SCOPED_TEMPORARY_BLOCK(); + + bool *fields_visited = gb_alloc_array(temporary_allocator(), bool, field_count); for_array(i, cl->elems) { Ast *elem = cl->elems[i]; @@ -10092,7 +10091,7 @@ ExprKind check_expr_base_internal(CheckerContext *c, Operand *o, Ast *node, Type error(x.expr, "Expected a constant string for the inline asm constraints parameter"); } - Scope *scope = create_scope(c->scope, heap_allocator()); + Scope *scope = create_scope(c->scope); scope->flags |= ScopeFlag_Proc; Type *params = alloc_type_tuple(); diff --git a/src/check_stmt.cpp b/src/check_stmt.cpp index e6902f6a3..d722ea8ee 100644 --- a/src/check_stmt.cpp +++ b/src/check_stmt.cpp @@ -640,9 +640,10 @@ void add_constant_switch_case(CheckerContext *ctx, Map *seen, Oper HashKey key = hash_exact_value(operand.value); TypeAndToken *found = map_get(seen, key); if (found != nullptr) { + SCOPED_TEMPORARY_BLOCK(); + isize count = multi_map_count(seen, key); - TypeAndToken *taps = gb_alloc_array(ctx->allocator, TypeAndToken, count); - defer (gb_free(ctx->allocator, taps)); + TypeAndToken *taps = gb_alloc_array(temporary_allocator(), TypeAndToken, count); multi_map_get_all(seen, key, taps); for (isize i = 0; i < count; i++) { @@ -859,7 +860,7 @@ void check_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_flags) { token.pos = ast_token(ss->body).pos; token.string = str_lit("true"); - x.expr = gb_alloc_item(ctx->allocator, Ast); + x.expr = gb_alloc_item(permanent_allocator(), Ast); x.expr->kind = Ast_Ident; x.expr->Ident.token = token; } @@ -1025,8 +1026,8 @@ void check_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_flags) { GB_ASSERT(is_type_enum(et)); auto fields = et->Enum.fields; - auto unhandled = array_make(ctx->allocator, 0, fields.count); - defer (array_free(&unhandled)); + SCOPED_TEMPORARY_BLOCK(); + auto unhandled = array_make(temporary_allocator(), 0, fields.count); for_array(i, fields) { Entity *f = fields[i]; @@ -1265,8 +1266,8 @@ void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_flags) { GB_ASSERT(is_type_union(ut)); auto variants = ut->Union.variants; - auto unhandled = array_make(ctx->allocator, 0, variants.count); - defer (array_free(&unhandled)); + SCOPED_TEMPORARY_BLOCK(); + auto unhandled = array_make(temporary_allocator(), 0, variants.count); for_array(i, variants) { Type *t = variants[i]; @@ -1433,12 +1434,12 @@ void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags) { return; } + SCOPED_TEMPORARY_BLOCK(); + // NOTE(bill): If there is a bad syntax error, rhs > lhs which would mean there would need to be // an extra allocation - auto lhs_operands = array_make(ctx->allocator, lhs_count); - auto rhs_operands = array_make(ctx->allocator, 0, 2*lhs_count); - defer (array_free(&lhs_operands)); - defer (array_free(&rhs_operands)); + auto lhs_operands = array_make(temporary_allocator(), lhs_count); + auto rhs_operands = array_make(temporary_allocator(), 0, 2*lhs_count); for_array(i, as->lhs) { if (is_blank_ident(as->lhs[i])) { @@ -1462,8 +1463,7 @@ void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags) { } } - auto lhs_to_ignore = array_make(ctx->allocator, lhs_count); - defer (array_free(&lhs_to_ignore)); + auto lhs_to_ignore = array_make(temporary_allocator(), lhs_count); isize max = gb_min(lhs_count, rhs_count); // NOTE(bill, 2020-05-02): This is an utter hack to get these custom atom operations working @@ -1878,7 +1878,7 @@ void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags) { DeclInfo *d = decl_info_of_entity(e); GB_ASSERT(d == nullptr); add_entity(ctx->checker, ctx->scope, e->identifier, e); - d = make_decl_info(ctx->allocator, ctx->scope, ctx->decl); + d = make_decl_info(ctx->scope, ctx->decl); add_entity_and_decl_info(ctx, e->identifier, e, d); } @@ -2036,7 +2036,7 @@ void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags) { case_ast_node(vd, ValueDecl, node); if (vd->is_mutable) { - Entity **entities = gb_alloc_array(ctx->allocator, Entity *, vd->names.count); + Entity **entities = gb_alloc_array(permanent_allocator(), Entity *, vd->names.count); isize entity_count = 0; isize new_name_count = 0; diff --git a/src/check_type.cpp b/src/check_type.cpp index 6c9e82aa1..af0c70119 100644 --- a/src/check_type.cpp +++ b/src/check_type.cpp @@ -400,7 +400,7 @@ void check_struct_type(CheckerContext *ctx, Type *struct_type, Ast *node, Array< } } - auto entities = array_make(ctx->allocator, 0, variable_count); + auto entities = array_make(permanent_allocator(), 0, variable_count); for_array(i, params) { Ast *param = params[i]; @@ -596,7 +596,7 @@ void check_union_type(CheckerContext *ctx, Type *union_type, Ast *node, Array(ctx->allocator, 0, variant_count); + auto variants = array_make(permanent_allocator(), 0, variant_count); union_type->Union.scope = ctx->scope; @@ -618,7 +618,7 @@ void check_union_type(CheckerContext *ctx, Type *union_type, Ast *node, Array(ctx->allocator, 0, variable_count); + auto entities = array_make(permanent_allocator(), 0, variable_count); for_array(i, params) { Ast *param = params[i]; @@ -869,7 +869,7 @@ void check_enum_type(CheckerContext *ctx, Type *enum_type, Type *named_type, Ast enum_type->Enum.base_type = base_type; enum_type->Enum.scope = ctx->scope; - auto fields = array_make(ctx->allocator, 0, et->fields.count); + auto fields = array_make(permanent_allocator(), 0, et->fields.count); Type *constant_type = enum_type; if (named_type != nullptr) { @@ -986,9 +986,9 @@ void check_bit_field_type(CheckerContext *ctx, Type *bit_field_type, Ast *node) ast_node(bft, BitFieldType, node); GB_ASSERT(is_type_bit_field(bit_field_type)); - auto fields = array_make(ctx->allocator, 0, bft->fields.count); - auto sizes = array_make (ctx->allocator, 0, bft->fields.count); - auto offsets = array_make (ctx->allocator, 0, bft->fields.count); + auto fields = array_make(permanent_allocator(), 0, bft->fields.count); + auto sizes = array_make (permanent_allocator(), 0, bft->fields.count); + auto offsets = array_make (permanent_allocator(), 0, bft->fields.count); scope_reserve(ctx->scope, bft->fields.count); @@ -1549,7 +1549,7 @@ Type *check_get_params(CheckerContext *ctx, Scope *scope, Ast *_params, bool *is bool is_variadic = false; isize variadic_index = -1; bool is_c_vararg = false; - auto variables = array_make(ctx->allocator, 0, variable_count); + auto variables = array_make(permanent_allocator(), 0, variable_count); for_array(i, params) { Ast *param = params[i]; if (param->kind != Ast_Field) { @@ -1891,7 +1891,7 @@ Type *check_get_results(CheckerContext *ctx, Scope *scope, Ast *_results) { } } - auto variables = array_make(ctx->allocator, 0, variable_count); + auto variables = array_make(permanent_allocator(), 0, variable_count); for_array(i, results) { ast_node(field, Field, results[i]); Ast *default_value = unparen_expr(field->default_value); @@ -2781,7 +2781,6 @@ void init_map_entry_type(Type *type) { // NOTE(bill): The preload types may have not been set yet GB_ASSERT(t_map_key != nullptr); - gbAllocator a = heap_allocator(); Type *entry_type = alloc_type_struct(); /* @@ -2793,9 +2792,9 @@ void init_map_entry_type(Type *type) { } */ Ast *dummy_node = alloc_ast_node(nullptr, Ast_Invalid); - Scope *s = create_scope(builtin_pkg->scope, a); + Scope *s = create_scope(builtin_pkg->scope); - auto fields = array_make(a, 0, 3); + auto fields = array_make(permanent_allocator(), 0, 3); array_add(&fields, alloc_entity_field(s, make_token_ident(str_lit("key")), t_map_key, false, 0, EntityState_Resolved)); array_add(&fields, alloc_entity_field(s, make_token_ident(str_lit("next")), t_int, false, 1, EntityState_Resolved)); array_add(&fields, alloc_entity_field(s, make_token_ident(str_lit("value")), type->Map.value, false, 2, EntityState_Resolved)); @@ -2803,7 +2802,6 @@ void init_map_entry_type(Type *type) { entry_type->Struct.fields = fields; - // type_set_offsets(a, entry_type); type->Map.entry_type = entry_type; } @@ -2826,15 +2824,14 @@ void init_map_internal_types(Type *type) { entries: [dynamic]EntryType; } */ - gbAllocator a = heap_allocator(); Ast *dummy_node = alloc_ast_node(nullptr, Ast_Invalid); - Scope *s = create_scope(builtin_pkg->scope, a); + Scope *s = create_scope(builtin_pkg->scope); Type *hashes_type = alloc_type_slice(t_int); Type *entries_type = alloc_type_dynamic_array(type->Map.entry_type); - auto fields = array_make(a, 0, 2); + auto fields = array_make(permanent_allocator(), 0, 2); array_add(&fields, alloc_entity_field(s, make_token_ident(str_lit("hashes")), hashes_type, false, 0, EntityState_Resolved)); array_add(&fields, alloc_entity_field(s, make_token_ident(str_lit("entries")), entries_type, false, 1, EntityState_Resolved)); @@ -2902,7 +2899,7 @@ Type *make_soa_struct_fixed(CheckerContext *ctx, Ast *array_typ_expr, Ast *elem_ soa_struct->Struct.soa_elem = elem; soa_struct->Struct.soa_count = count; - scope = create_scope(ctx->scope, ctx->allocator); + scope = create_scope(ctx->scope); soa_struct->Struct.scope = scope; String params_xyzw[4] = { @@ -2935,7 +2932,7 @@ Type *make_soa_struct_fixed(CheckerContext *ctx, Ast *array_typ_expr, Ast *elem_ soa_struct->Struct.soa_elem = elem; soa_struct->Struct.soa_count = count; - scope = create_scope(old_struct->Struct.scope->parent, ctx->allocator); + scope = create_scope(old_struct->Struct.scope->parent); soa_struct->Struct.scope = scope; for_array(i, old_struct->Struct.fields) { @@ -2996,7 +2993,7 @@ Type *make_soa_struct_slice(CheckerContext *ctx, Ast *array_typ_expr, Ast *elem_ soa_struct->Struct.soa_count = 0; soa_struct->Struct.is_polymorphic = true; - scope = create_scope(ctx->scope, ctx->allocator); + scope = create_scope(ctx->scope); soa_struct->Struct.scope = scope; } else if (is_type_array(elem)) { Type *old_array = base_type(elem); @@ -3010,7 +3007,7 @@ Type *make_soa_struct_slice(CheckerContext *ctx, Ast *array_typ_expr, Ast *elem_ soa_struct->Struct.soa_elem = elem; soa_struct->Struct.soa_count = 0; - scope = create_scope(ctx->scope, ctx->allocator); + scope = create_scope(ctx->scope); soa_struct->Struct.scope = scope; String params_xyzw[4] = { @@ -3046,7 +3043,7 @@ Type *make_soa_struct_slice(CheckerContext *ctx, Ast *array_typ_expr, Ast *elem_ soa_struct->Struct.soa_elem = elem; soa_struct->Struct.soa_count = 0; - scope = create_scope(old_struct->Struct.scope->parent, ctx->allocator); + scope = create_scope(old_struct->Struct.scope->parent); soa_struct->Struct.scope = scope; for_array(i, old_struct->Struct.fields) { @@ -3113,7 +3110,7 @@ Type *make_soa_struct_dynamic_array(CheckerContext *ctx, Ast *array_typ_expr, As soa_struct->Struct.soa_count = 0; soa_struct->Struct.is_polymorphic = true; - scope = create_scope(ctx->scope, ctx->allocator); + scope = create_scope(ctx->scope); soa_struct->Struct.scope = scope; } else if (is_type_array(elem)) { Type *old_array = base_type(elem); @@ -3127,7 +3124,7 @@ Type *make_soa_struct_dynamic_array(CheckerContext *ctx, Ast *array_typ_expr, As soa_struct->Struct.soa_elem = elem; soa_struct->Struct.soa_count = 0; - scope = create_scope(ctx->scope, ctx->allocator); + scope = create_scope(ctx->scope); soa_struct->Struct.scope = scope; String params_xyzw[4] = { @@ -3162,7 +3159,7 @@ Type *make_soa_struct_dynamic_array(CheckerContext *ctx, Ast *array_typ_expr, As soa_struct->Struct.soa_elem = elem; soa_struct->Struct.soa_count = 0; - scope = create_scope(old_struct->Struct.scope->parent, ctx->allocator); + scope = create_scope(old_struct->Struct.scope->parent); soa_struct->Struct.scope = scope; for_array(i, old_struct->Struct.fields) { diff --git a/src/checker.cpp b/src/checker.cpp index a07a3ffbe..76d8cceb3 100644 --- a/src/checker.cpp +++ b/src/checker.cpp @@ -187,8 +187,8 @@ void init_decl_info(DeclInfo *d, Scope *scope, DeclInfo *parent) { array_init (&d->labels, heap_allocator()); } -DeclInfo *make_decl_info(gbAllocator a, Scope *scope, DeclInfo *parent) { - DeclInfo *d = gb_alloc_item(a, DeclInfo); +DeclInfo *make_decl_info(Scope *scope, DeclInfo *parent) { + DeclInfo *d = gb_alloc_item(permanent_allocator(), DeclInfo); init_decl_info(d, scope, parent); return d; } @@ -219,8 +219,8 @@ bool decl_info_has_init(DeclInfo *d) { -Scope *create_scope(Scope *parent, gbAllocator allocator, isize init_elements_capacity=DEFAULT_SCOPE_CAPACITY) { - Scope *s = gb_alloc_item(allocator, Scope); +Scope *create_scope(Scope *parent, isize init_elements_capacity=DEFAULT_SCOPE_CAPACITY) { + Scope *s = gb_alloc_item(permanent_allocator(), Scope); s->parent = parent; string_map_init(&s->elements, heap_allocator(), init_elements_capacity); ptr_set_init(&s->imported, heap_allocator(), 0); @@ -244,7 +244,7 @@ Scope *create_scope_from_file(CheckerContext *c, AstFile *f) { GB_ASSERT(f->pkg != nullptr); GB_ASSERT(f->pkg->scope != nullptr); - Scope *s = create_scope(f->pkg->scope, c->allocator); + Scope *s = create_scope(f->pkg->scope); array_reserve(&s->delayed_imports, f->imports.count); array_reserve(&s->delayed_directives, f->directive_count); @@ -264,7 +264,7 @@ Scope *create_scope_from_package(CheckerContext *c, AstPackage *pkg) { decl_count += pkg->files[i]->decls.count; } isize init_elements_capacity = 2*decl_count; - Scope *s = create_scope(builtin_pkg->scope, c->allocator, init_elements_capacity); + Scope *s = create_scope(builtin_pkg->scope, init_elements_capacity); s->flags |= ScopeFlag_Pkg; s->pkg = pkg; @@ -324,7 +324,7 @@ void check_open_scope(CheckerContext *c, Ast *node) { GB_ASSERT(node->kind == Ast_Invalid || is_ast_stmt(node) || is_ast_type(node)); - Scope *scope = create_scope(c->scope, c->allocator); + Scope *scope = create_scope(c->scope); add_scope(c, node, scope); switch (node->kind) { case Ast_ProcType: @@ -699,7 +699,7 @@ void init_universal(void) { builtin_pkg->name = str_lit("builtin"); builtin_pkg->kind = Package_Normal; - builtin_pkg->scope = create_scope(nullptr, a); + builtin_pkg->scope = create_scope(nullptr); builtin_pkg->scope->flags |= ScopeFlag_Pkg | ScopeFlag_Global; builtin_pkg->scope->pkg = builtin_pkg; @@ -707,7 +707,7 @@ void init_universal(void) { intrinsics_pkg->name = str_lit("intrinsics"); intrinsics_pkg->kind = Package_Normal; - intrinsics_pkg->scope = create_scope(nullptr, a); + intrinsics_pkg->scope = create_scope(nullptr); intrinsics_pkg->scope->flags |= ScopeFlag_Pkg | ScopeFlag_Global; intrinsics_pkg->scope->pkg = intrinsics_pkg; @@ -715,7 +715,7 @@ void init_universal(void) { config_pkg->name = str_lit("config"); config_pkg->kind = Package_Normal; - config_pkg->scope = create_scope(nullptr, a); + config_pkg->scope = create_scope(nullptr); config_pkg->scope->flags |= ScopeFlag_Pkg | ScopeFlag_Global; config_pkg->scope->pkg = config_pkg; @@ -872,7 +872,6 @@ CheckerContext make_checker_context(Checker *c) { CheckerContext ctx = c->init_ctx; ctx.checker = c; ctx.info = &c->info; - ctx.allocator = c->allocator; ctx.scope = builtin_pkg->scope; ctx.pkg = builtin_pkg; @@ -906,8 +905,6 @@ bool init_checker(Checker *c, Parser *parser) { isize total_token_count = c->parser->total_token_count; isize arena_size = 2 * item_size * total_token_count; - c->allocator = heap_allocator(); - c->init_ctx = make_checker_context(c); return true; } @@ -2597,7 +2594,7 @@ DECL_ATTRIBUTE_PROC(type_decl_attribute) { if (valid && build_context.use_llvm_api) { if (ac->atom_op_table == nullptr) { - ac->atom_op_table = gb_alloc_item(heap_allocator(), TypeAtomOpTable); + ac->atom_op_table = gb_alloc_item(permanent_allocator(), TypeAtomOpTable); } ac->atom_op_table->op[TypeAtomOp_index_get] = e; } @@ -2656,7 +2653,7 @@ DECL_ATTRIBUTE_PROC(type_decl_attribute) { if (valid && build_context.use_llvm_api) { if (ac->atom_op_table == nullptr) { - ac->atom_op_table = gb_alloc_item(heap_allocator(), TypeAtomOpTable); + ac->atom_op_table = gb_alloc_item(permanent_allocator(), TypeAtomOpTable); } ac->atom_op_table->op[TypeAtomOp_index_set] = e; } @@ -2738,7 +2735,7 @@ DECL_ATTRIBUTE_PROC(type_decl_attribute) { if (valid && build_context.use_llvm_api) { if (ac->atom_op_table == nullptr) { - ac->atom_op_table = gb_alloc_item(heap_allocator(), TypeAtomOpTable); + ac->atom_op_table = gb_alloc_item(permanent_allocator(), TypeAtomOpTable); } ac->atom_op_table->op[TypeAtomOp_slice] = e; } @@ -3090,7 +3087,7 @@ void check_collect_value_decl(CheckerContext *c, Ast *decl) { } Ast *init_expr = value; - DeclInfo *d = make_decl_info(heap_allocator(), c->scope, c->decl); + DeclInfo *d = make_decl_info(c->scope, c->decl); d->entity = e; d->type_expr = vd->type; d->init_expr = init_expr; @@ -3118,7 +3115,7 @@ void check_collect_value_decl(CheckerContext *c, Ast *decl) { Token token = name->Ident.token; Ast *fl = c->foreign_context.curr_library; - DeclInfo *d = make_decl_info(c->allocator, c->scope, c->decl); + DeclInfo *d = make_decl_info(c->scope, c->decl); Entity *e = nullptr; d->attributes = vd->attributes; @@ -4317,7 +4314,7 @@ void check_parsed_files(Checker *c) { for_array(i, c->parser->packages) { AstPackage *p = c->parser->packages[i]; Scope *scope = create_scope_from_package(&c->init_ctx, p); - p->decl_info = make_decl_info(c->allocator, scope, c->init_ctx.decl); + p->decl_info = make_decl_info(scope, c->init_ctx.decl); string_map_set(&c->info.packages, p->fullpath, p); if (scope->flags&ScopeFlag_Init) { diff --git a/src/checker.hpp b/src/checker.hpp index 88e9451ee..ed4809748 100644 --- a/src/checker.hpp +++ b/src/checker.hpp @@ -301,7 +301,6 @@ struct CheckerContext { ProcCallingConvention curr_proc_calling_convention; bool in_proc_sig; ForeignContext foreign_context; - gbAllocator allocator; CheckerTypePath *type_path; isize type_level; // TODO(bill): Actually handle correctly @@ -331,7 +330,6 @@ struct Checker { Array procs_with_deferred_to_check; CheckerContext *curr_ctx; - gbAllocator allocator; CheckerContext init_ctx; }; diff --git a/src/common.cpp b/src/common.cpp index 567655c04..05ebdd4c5 100644 --- a/src/common.cpp +++ b/src/common.cpp @@ -451,7 +451,6 @@ void arena_free_all(Arena *arena) { - GB_ALLOCATOR_PROC(arena_allocator_proc); gbAllocator arena_allocator(Arena *arena) { @@ -492,6 +491,38 @@ GB_ALLOCATOR_PROC(arena_allocator_proc) { return ptr; } +struct SCOPED_TEMP_ARENA_MEMORY { + Arena *arena; + u8 * ptr; + u8 * end; + u8 * prev; + isize total_used; + isize block_count; + + SCOPED_TEMP_ARENA_MEMORY(Arena *the_arena) { + GB_ASSERT(!the_arena->use_mutex); + arena = the_arena; + ptr = arena->ptr; + end = arena->end; + prev = arena->prev; + total_used = arena->total_used; + block_count = arena->blocks.count; + } + ~SCOPED_TEMP_ARENA_MEMORY() { + if (arena->blocks.count != block_count) { + for (isize i = block_count; i < arena->blocks.count; i++) { + gb_free(arena->backing, arena->blocks[i]); + } + arena->blocks.count = block_count; + } + arena->ptr = ptr; + arena->end = end; + arena->prev = prev; + arena->total_used = total_used; + } +}; + + gb_global Arena permanent_arena = {}; @@ -504,6 +535,8 @@ gbAllocator temporary_allocator() { return arena_allocator(&temporary_arena); } +#define SCOPED_TEMPORARY_BLOCK() auto GB_DEFER_3(_SCOPED_TEMPORARY_BLOCK_) = SCOPED_TEMP_ARENA_MEMORY(&temporary_arena) + diff --git a/src/gb/gb.h b/src/gb/gb.h index 848f27628..f13693000 100644 --- a/src/gb/gb.h +++ b/src/gb/gb.h @@ -5156,7 +5156,7 @@ b32 gb_affinity_set(gbAffinity *a, isize core, isize thread_index) { index = core * a->threads_per_core + thread_index; thread = pthread_self(); - + cpuset_t mn; CPU_ZERO(&mn); diff --git a/src/ir.cpp b/src/ir.cpp index ee177edd6..7b6301e30 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -2132,7 +2132,7 @@ irDebugInfo *ir_add_debug_info_field(irModule *module, irDebugInfo *scope, Entit if (e->token.string.len == 0) { // If no name available for field, use its field index as its name. isize max_len = 8; - u8 *str = cast(u8 *)gb_alloc_array(heap_allocator(), u8, max_len); + u8 *str = cast(u8 *)gb_alloc_array(permanent_allocator(), u8, max_len); isize len = gb_snprintf(cast(char *)str, 8, "%d", index); di->DerivedType.name = make_string(str, len-1); } @@ -3293,7 +3293,7 @@ irValue *ir_emit_call(irProcedure *p, irValue *value, Array const &ar GB_ASSERT_MSG(param_count == args.count, "%.*s %td == %td", LIT(p->entity->token.string), param_count, args.count); } - auto processed_args = array_make(heap_allocator(), 0, args.count); + auto processed_args = array_make(permanent_allocator(), 0, args.count); for (isize i = 0; i < param_count; i++) { Entity *e = pt->Proc.params->Tuple.variables[i]; @@ -3416,7 +3416,7 @@ irValue *ir_emit_call(irProcedure *p, irValue *value, Array const &ar case DeferredProcedure_in_out: { auto out_args = ir_value_to_array(p, result); - array_init(&result_as_args, heap_allocator(), in_args.count + out_args.count); + array_init(&result_as_args, permanent_allocator(), in_args.count + out_args.count); array_copy(&result_as_args, in_args, 0); array_copy(&result_as_args, out_args, in_args.count); } @@ -4537,7 +4537,7 @@ irValue *ir_emit_arith(irProcedure *proc, TokenKind op, irValue *left, irValue * Type *ft = base_complex_elem_type(t_left); if (op == Token_Quo) { - auto args = array_make(heap_allocator(), 2); + auto args = array_make(permanent_allocator(), 2); args[0] = left; args[1] = right; @@ -4615,7 +4615,7 @@ irValue *ir_emit_arith(irProcedure *proc, TokenKind op, irValue *left, irValue * return ir_emit_load(proc, res); } else if (op == Token_Mul) { - auto args = array_make(heap_allocator(), 2); + auto args = array_make(permanent_allocator(), 2); args[0] = left; args[1] = right; @@ -4625,7 +4625,7 @@ irValue *ir_emit_arith(irProcedure *proc, TokenKind op, irValue *left, irValue * default: GB_PANIC("Unknown float type"); break; } } else if (op == Token_Quo) { - auto args = array_make(heap_allocator(), 2); + auto args = array_make(permanent_allocator(), 2); args[0] = left; args[1] = right; @@ -4828,7 +4828,7 @@ irValue *ir_emit_comp_against_nil(irProcedure *proc, TokenKind op_kind, irValue irValue *invalid_typeid = ir_value_constant(t_typeid, exact_value_i64(0)); return ir_emit_comp(proc, op_kind, x, invalid_typeid); } else if (is_type_bit_field(t)) { - auto args = array_make(heap_allocator(), 2); + auto args = array_make(permanent_allocator(), 2); irValue *lhs = ir_address_from_load_or_generate_local(proc, x); args[0] = ir_emit_conv(proc, lhs, t_rawptr); args[1] = ir_const_int(type_size_of(t)); @@ -4848,7 +4848,7 @@ irValue *ir_emit_comp_against_nil(irProcedure *proc, TokenKind op_kind, irValue return ir_emit_comp(proc, op_kind, cap, v_zero); } } else if (is_type_struct(t) && type_has_nil(t)) { - auto args = array_make(heap_allocator(), 2); + auto args = array_make(permanent_allocator(), 2); irValue *lhs = ir_address_from_load_or_generate_local(proc, x); args[0] = ir_emit_conv(proc, lhs, t_rawptr); args[1] = ir_const_int(type_size_of(t)); @@ -4966,7 +4966,7 @@ irValue *ir_emit_comp(irProcedure *proc, TokenKind op_kind, irValue *left, irVal } else { if (is_type_simple_compare(tl) && (op_kind == Token_CmpEq || op_kind == Token_NotEq)) { // TODO(bill): Test to see if this is actually faster!!!! - auto args = array_make(heap_allocator(), 3); + auto args = array_make(permanent_allocator(), 3); args[0] = ir_emit_conv(proc, lhs, t_rawptr); args[1] = ir_emit_conv(proc, rhs, t_rawptr); args[2] = ir_const_int(type_size_of(tl)); @@ -7355,7 +7355,7 @@ irValue *ir_build_builtin_proc(irProcedure *proc, Ast *expr, TypeAndValue tv, Bu // "Intrinsics" case BuiltinProc_alloca: { - auto args = array_make(heap_allocator(), 2); + auto args = array_make(permanent_allocator(), 2); args[0] = ir_emit_conv(proc, ir_build_expr(proc, ce->args[0]), t_i32); args[1] = ir_build_expr(proc, ce->args[1]); return ir_emit(proc, ir_instr_inline_code(proc, id, args, t_u8_ptr)); @@ -9024,8 +9024,7 @@ irAddr ir_build_addr(irProcedure *proc, Ast *expr) { if (cl->elems.count > 0) { ir_emit_store(proc, v, ir_add_module_constant(proc->module, type, exact_value_compound(expr))); - auto temp_data = array_make(heap_allocator(), 0, cl->elems.count); - defer (array_free(&temp_data)); + auto temp_data = array_make(temporary_allocator(), 0, cl->elems.count); // NOTE(bill): Separate value, gep, store into their own chunks for_array(i, cl->elems) { @@ -9123,8 +9122,7 @@ irAddr ir_build_addr(irProcedure *proc, Ast *expr) { if (cl->elems.count > 0) { ir_emit_store(proc, v, ir_add_module_constant(proc->module, type, exact_value_compound(expr))); - auto temp_data = array_make(heap_allocator(), 0, cl->elems.count); - defer (array_free(&temp_data)); + auto temp_data = array_make(temporary_allocator(), 0, cl->elems.count); // NOTE(bill): Separate value, gep, store into their own chunks for_array(i, cl->elems) { @@ -9232,8 +9230,7 @@ irAddr ir_build_addr(irProcedure *proc, Ast *expr) { irValue *data = ir_emit_array_ep(proc, slice->ConstantSlice.backing_array, v_zero32); - auto temp_data = array_make(heap_allocator(), 0, cl->elems.count); - defer (array_free(&temp_data)); + auto temp_data = array_make(temporary_allocator(), 0, cl->elems.count); for_array(i, cl->elems) { Ast *elem = cl->elems[i]; @@ -10179,7 +10176,7 @@ void ir_build_stmt_internal(irProcedure *proc, Ast *node) { String mangled_name = {}; { - gbString str = gb_string_make_length(heap_allocator(), proc->name.text, proc->name.len); + gbString str = gb_string_make_length(permanent_allocator(), proc->name.text, proc->name.len); str = gb_string_appendc(str, "-"); str = gb_string_append_fmt(str, ".%.*s-%llu", LIT(name), cast(long long)e->id); mangled_name.text = cast(u8 *)str; diff --git a/src/ir_print.cpp b/src/ir_print.cpp index 2d15f176b..1a306365f 100644 --- a/src/ir_print.cpp +++ b/src/ir_print.cpp @@ -76,7 +76,8 @@ void ir_write_u64(irFileBuffer *f, u64 i) { } void ir_write_big_int(irFileBuffer *f, BigInt const &x, Type *type, bool swap_endian) { if (x.len == 2) { - gbAllocator a = heap_allocator(); // TODO(bill): Change this allocator + SCOPED_TEMPORARY_BLOCK(); + u64 words[2] = {}; BigInt y = x; if (swap_endian) { @@ -88,9 +89,8 @@ void ir_write_big_int(irFileBuffer *f, BigInt const &x, Type *type, bool swap_en y.d.words = words; } - String s = big_int_to_string(a, &y, 10); + String s = big_int_to_string(temporary_allocator(), &y, 10); ir_write_string(f, s); - gb_free(a, s.text); } else { i64 i = 0; if (x.neg) { diff --git a/src/llvm_backend.cpp b/src/llvm_backend.cpp index 121917740..0e0fa904b 100644 --- a/src/llvm_backend.cpp +++ b/src/llvm_backend.cpp @@ -781,6 +781,8 @@ void lb_emit_store_union_variant(lbProcedure *p, lbValue parent, lbValue variant void lb_clone_struct_type(LLVMTypeRef dst, LLVMTypeRef src) { + SCOPED_TEMPORARY_BLOCK(); + unsigned field_count = LLVMCountStructElementTypes(src); LLVMTypeRef *fields = gb_alloc_array(temporary_allocator(), LLVMTypeRef, field_count); LLVMGetStructElementTypes(src, fields); @@ -1277,9 +1279,10 @@ LLVMTypeRef lb_type_internal(lbModule *m, Type *type) { m->internal_type_level += 1; defer (m->internal_type_level -= 1); + SCOPED_TEMPORARY_BLOCK(); + unsigned field_count = cast(unsigned)(type->Struct.fields.count + offset); LLVMTypeRef *fields = gb_alloc_array(temporary_allocator(), LLVMTypeRef, field_count); - GB_ASSERT(fields != nullptr); for_array(i, type->Struct.fields) { Entity *field = type->Struct.fields[i]; @@ -1338,6 +1341,8 @@ LLVMTypeRef lb_type_internal(lbModule *m, Type *type) { if (type->Tuple.variables.count == 1) { return lb_type(m, type->Tuple.variables[0]->type); } else { + SCOPED_TEMPORARY_BLOCK(); + unsigned field_count = cast(unsigned)(type->Tuple.variables.count); LLVMTypeRef *fields = gb_alloc_array(temporary_allocator(), LLVMTypeRef, field_count); @@ -1437,6 +1442,8 @@ LLVMTypeRef lb_type_internal(lbModule *m, Type *type) { extra_param_count += 1; } + SCOPED_TEMPORARY_BLOCK(); + isize param_count = type->Proc.abi_compat_params.count + extra_param_count; auto param_types = array_make(temporary_allocator(), 0, param_count); @@ -1483,6 +1490,8 @@ LLVMTypeRef lb_type_internal(lbModule *m, Type *type) { { LLVMTypeRef internal_type = nullptr; { + SCOPED_TEMPORARY_BLOCK(); + GB_ASSERT(type->BitField.fields.count == type->BitField.sizes.count); unsigned field_count = cast(unsigned)type->BitField.fields.count; LLVMTypeRef *fields = gb_alloc_array(temporary_allocator(), LLVMTypeRef, field_count); @@ -5279,8 +5288,9 @@ lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bool allow_loc return lb_const_nil(m, original_type); } if (cl->elems[0]->kind == Ast_FieldValue) { - // TODO(bill): This is O(N*M) and will be quite slow; it should probably be sorted before hand + SCOPED_TEMPORARY_BLOCK(); + // TODO(bill): This is O(N*M) and will be quite slow; it should probably be sorted before hand LLVMValueRef *values = gb_alloc_array(temporary_allocator(), LLVMValueRef, type->Array.count); isize value_index = 0; @@ -5338,6 +5348,7 @@ lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bool allow_loc } else { GB_ASSERT_MSG(elem_count == type->Array.count, "%td != %td", elem_count, type->Array.count); + SCOPED_TEMPORARY_BLOCK(); LLVMValueRef *values = gb_alloc_array(temporary_allocator(), LLVMValueRef, type->Array.count); for (isize i = 0; i < elem_count; i++) { @@ -5360,8 +5371,8 @@ lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bool allow_loc return lb_const_nil(m, original_type); } if (cl->elems[0]->kind == Ast_FieldValue) { + SCOPED_TEMPORARY_BLOCK(); // TODO(bill): This is O(N*M) and will be quite slow; it should probably be sorted before hand - LLVMValueRef *values = gb_alloc_array(temporary_allocator(), LLVMValueRef, type->EnumeratedArray.count); isize value_index = 0; @@ -5423,6 +5434,7 @@ lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bool allow_loc } else { GB_ASSERT_MSG(elem_count == type->EnumeratedArray.count, "%td != %td", elem_count, type->EnumeratedArray.count); + SCOPED_TEMPORARY_BLOCK(); LLVMValueRef *values = gb_alloc_array(temporary_allocator(), LLVMValueRef, type->EnumeratedArray.count); for (isize i = 0; i < elem_count; i++) { @@ -5447,6 +5459,8 @@ lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bool allow_loc } GB_ASSERT(elem_type_can_be_constant(elem_type)); + SCOPED_TEMPORARY_BLOCK(); + isize total_elem_count = type->SimdVector.count; LLVMValueRef *values = gb_alloc_array(temporary_allocator(), LLVMValueRef, total_elem_count); @@ -5473,6 +5487,8 @@ lbValue lb_const_value(lbModule *m, Type *type, ExactValue value, bool allow_loc offset = 1; } + SCOPED_TEMPORARY_BLOCK(); + isize value_count = type->Struct.fields.count + offset; LLVMValueRef *values = gb_alloc_array(temporary_allocator(), LLVMValueRef, value_count); bool *visited = gb_alloc_array(temporary_allocator(), bool, value_count); @@ -10880,6 +10896,7 @@ lbAddr lb_build_addr(lbProcedure *p, Ast *expr) { if (cl->elems.count > 0) { lb_addr_store(p, v, lb_const_value(p->module, type, exact_value_compound(expr))); + SCOPED_TEMPORARY_BLOCK(); auto temp_data = array_make(temporary_allocator(), 0, cl->elems.count); // NOTE(bill): Separate value, gep, store into their own chunks @@ -10979,6 +10996,7 @@ lbAddr lb_build_addr(lbProcedure *p, Ast *expr) { if (cl->elems.count > 0) { lb_addr_store(p, v, lb_const_value(p->module, type, exact_value_compound(expr))); + SCOPED_TEMPORARY_BLOCK(); auto temp_data = array_make(temporary_allocator(), 0, cl->elems.count); // NOTE(bill): Separate value, gep, store into their own chunks @@ -11087,6 +11105,7 @@ lbAddr lb_build_addr(lbProcedure *p, Ast *expr) { lbValue data = lb_slice_elem(p, slice); + SCOPED_TEMPORARY_BLOCK(); auto temp_data = array_make(temporary_allocator(), 0, cl->elems.count); for_array(i, cl->elems) { @@ -11935,6 +11954,7 @@ void lb_setup_type_info_data(lbProcedure *p) { // NOTE(bill): Setup type_info da str_lit("$enum_values"), cast(i64)entry_index); + SCOPED_TEMPORARY_BLOCK(); LLVMValueRef *name_values = gb_alloc_array(temporary_allocator(), LLVMValueRef, fields.count); LLVMValueRef *value_values = gb_alloc_array(temporary_allocator(), LLVMValueRef, fields.count); @@ -12894,7 +12914,7 @@ void lb_generate_code(lbGenerator *gen) { } - String filepath_ll = concatenate_strings(temporary_allocator(), gen->output_base, STR_LIT(".ll")); + String filepath_ll = concatenate_strings(permanent_allocator(), gen->output_base, STR_LIT(".ll")); TIME_SECTION("LLVM Procedure Generation"); for_array(i, m->procedures_to_generate) { diff --git a/src/main.cpp b/src/main.cpp index d0d2e2bbb..2dbac3390 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -1646,6 +1646,7 @@ int main(int arg_count, char const **arg_ptr) { arena_init(&permanent_arena, heap_allocator()); arena_init(&temporary_arena, heap_allocator()); arena_init(&global_ast_arena, heap_allocator()); + permanent_arena.use_mutex = true; init_string_buffer_memory(); init_string_interner();