Merge pull request #2288 from odin-lang/compiler-improvements-2023-01

Multithreading Compiler Improvements 2023-01
This commit is contained in:
gingerBill
2023-01-11 22:14:53 +00:00
committed by GitHub
35 changed files with 2669 additions and 2232 deletions

View File

@@ -291,13 +291,14 @@ struct BuildContext {
bool show_error_line;
bool ignore_lazy;
bool ignore_llvm_build;
bool use_subsystem_windows;
bool ignore_microsoft_magic;
bool linker_map_file;
bool use_separate_modules;
bool threaded_checker;
bool no_threaded_checker;
bool show_debug_messages;
@@ -936,16 +937,20 @@ gb_global BlockingMutex fullpath_mutex;
#if defined(GB_SYSTEM_WINDOWS)
gb_internal String path_to_fullpath(gbAllocator a, String s) {
String result = {};
mutex_lock(&fullpath_mutex);
defer (mutex_unlock(&fullpath_mutex));
String16 string16 = string_to_string16(heap_allocator(), s);
defer (gb_free(heap_allocator(), string16.text));
DWORD len = GetFullPathNameW(&string16[0], 0, nullptr, nullptr);
DWORD len;
mutex_lock(&fullpath_mutex);
len = GetFullPathNameW(&string16[0], 0, nullptr, nullptr);
if (len != 0) {
wchar_t *text = gb_alloc_array(permanent_allocator(), wchar_t, len+1);
GetFullPathNameW(&string16[0], len, text, nullptr);
mutex_unlock(&fullpath_mutex);
text[len] = 0;
result = string16_to_string(a, make_string16(text, len));
result = string_trim_whitespace(result);
@@ -956,6 +961,8 @@ gb_internal String path_to_fullpath(gbAllocator a, String s) {
result.text[i] = '/';
}
}
} else {
mutex_unlock(&fullpath_mutex);
}
return result;
@@ -1325,11 +1332,10 @@ gb_internal void enable_target_feature(TokenPos pos, String const &target_featur
gb_internal char const *target_features_set_to_cstring(gbAllocator allocator, bool with_quotes) {
isize len = 0;
isize i = 0;
for (auto const &entry : build_context.target_features_set) {
for (String const &feature : build_context.target_features_set) {
if (i != 0) {
len += 1;
}
String feature = entry.value;
len += feature.len;
if (with_quotes) len += 2;
i += 1;
@@ -1337,13 +1343,12 @@ gb_internal char const *target_features_set_to_cstring(gbAllocator allocator, bo
char *features = gb_alloc_array(allocator, char, len+1);
len = 0;
i = 0;
for (auto const &entry : build_context.target_features_set) {
for (String const &feature : build_context.target_features_set) {
if (i != 0) {
features[len++] = ',';
}
if (with_quotes) features[len++] = '"';
String feature = entry.value;
gb_memmove(features + len, feature.text, feature.len);
len += feature.len;
if (with_quotes) features[len++] = '"';
@@ -1362,8 +1367,7 @@ gb_internal bool init_build_paths(String init_filename) {
// NOTE(Jeroen): We're pre-allocating BuildPathCOUNT slots so that certain paths are always at the same enumerated index.
array_init(&bc->build_paths, permanent_allocator(), BuildPathCOUNT);
string_set_init(&bc->target_features_set, heap_allocator(), 1024);
mutex_init(&bc->target_features_mutex);
string_set_init(&bc->target_features_set, 1024);
// [BuildPathMainPackage] Turn given init path into a `Path`, which includes normalizing it into a full path.
bc->build_paths[BuildPath_Main_Package] = path_from_string(ha, init_filename);

View File

@@ -96,8 +96,7 @@ gb_internal void check_or_else_expr_no_value_error(CheckerContext *c, String con
gbString th = nullptr;
if (type_hint != nullptr) {
GB_ASSERT(bsrc->kind == Type_Union);
for_array(i, bsrc->Union.variants) {
Type *vt = bsrc->Union.variants[i];
for (Type *vt : bsrc->Union.variants) {
if (are_types_identical(vt, type_hint)) {
th = type_to_string(type_hint);
break;
@@ -198,8 +197,7 @@ gb_internal void add_objc_proc_type(CheckerContext *c, Ast *call, Type *return_t
{
auto variables = array_make<Entity *>(permanent_allocator(), 0, param_types.count);
for_array(i, param_types) {
Type *type = param_types[i];
for (Type *type : param_types) {
Entity *param = alloc_entity_param(scope, blank_token, type, false, true);
array_add(&variables, param);
}
@@ -1110,7 +1108,7 @@ gb_internal bool cache_load_file_directive(CheckerContext *c, Ast *call, String
new_cache->path = path;
new_cache->data = data;
new_cache->file_error = file_error;
string_map_init(&new_cache->hashes, heap_allocator(), 32);
string_map_init(&new_cache->hashes, 32);
string_map_set(&c->info->load_file_cache, path, new_cache);
if (cache_) *cache_ = new_cache;
} else {
@@ -1120,8 +1118,7 @@ gb_internal bool cache_load_file_directive(CheckerContext *c, Ast *call, String
}
});
char *c_str = alloc_cstring(heap_allocator(), path);
defer (gb_free(heap_allocator(), c_str));
char *c_str = alloc_cstring(temporary_allocator(), path);
gbFile f = {};
if (cache == nullptr) {
@@ -3071,8 +3068,7 @@ gb_internal bool check_builtin_procedure(CheckerContext *c, Operand *operand, As
bool first_is_field_value = (ce->args[0]->kind == Ast_FieldValue);
bool fail = false;
for_array(i, ce->args) {
Ast *arg = ce->args[i];
for (Ast *arg : ce->args) {
bool mix = false;
if (first_is_field_value) {
mix = arg->kind != Ast_FieldValue;
@@ -3086,11 +3082,10 @@ gb_internal bool check_builtin_procedure(CheckerContext *c, Operand *operand, As
}
}
StringSet name_set = {};
string_set_init(&name_set, heap_allocator(), 2*ce->args.count);
string_set_init(&name_set, 2*ce->args.count);
for_array(i, ce->args) {
for (Ast *arg : ce->args) {
String name = {};
Ast *arg = ce->args[i];
if (arg->kind == Ast_FieldValue) {
Ast *ename = arg->FieldValue.field;
if (!fail && ename->kind != Ast_Ident) {
@@ -3577,7 +3572,7 @@ gb_internal bool check_builtin_procedure(CheckerContext *c, Operand *operand, As
Entity *base_type_entity = alloc_entity_type_name(scope, token, elem, EntityState_Resolved);
add_entity(c, scope, nullptr, base_type_entity);
add_type_info_type(c, soa_struct);
// add_type_info_type(c, soa_struct);
operand->type = soa_struct;
break;
@@ -4987,8 +4982,7 @@ gb_internal bool check_builtin_procedure(CheckerContext *c, Operand *operand, As
bool is_variant = false;
for_array(i, u->Union.variants) {
Type *vt = u->Union.variants[i];
for (Type *vt : u->Union.variants) {
if (are_types_identical(v, vt)) {
is_variant = true;
break;

View File

@@ -45,7 +45,7 @@ gb_internal Type *check_init_variable(CheckerContext *ctx, Entity *e, Operand *o
if (operand->mode == Addressing_Type) {
if (e->type != nullptr && is_type_typeid(e->type)) {
add_type_info_type(ctx, operand->type);
add_type_and_value(ctx->info, operand->expr, Addressing_Value, e->type, exact_value_typeid(operand->type));
add_type_and_value(ctx, operand->expr, Addressing_Value, e->type, exact_value_typeid(operand->type));
return e->type;
} else {
gbString t = type_to_string(operand->type);
@@ -354,8 +354,7 @@ gb_internal void check_type_decl(CheckerContext *ctx, Entity *e, Ast *init_expr,
Type *t = base_type(e->type);
if (t->kind == Type_Enum) {
for_array(i, t->Enum.fields) {
Entity *f = t->Enum.fields[i];
for (Entity *f : t->Enum.fields) {
if (f->kind != Entity_Constant) {
continue;
}
@@ -382,8 +381,8 @@ gb_internal void override_entity_in_scope(Entity *original_entity, Entity *new_e
if (found_scope == nullptr) {
return;
}
mutex_lock(&found_scope->mutex);
defer (mutex_unlock(&found_scope->mutex));
rw_mutex_lock(&found_scope->mutex);
defer (rw_mutex_unlock(&found_scope->mutex));
// IMPORTANT NOTE(bill, 2021-04-10): Overriding behaviour was flawed in that the
// original entity was still used check checked, but the checking was only
@@ -986,7 +985,7 @@ gb_internal void check_proc_decl(CheckerContext *ctx, Entity *e, DeclInfo *d) {
GB_ASSERT(pl->body->kind == Ast_BlockStmt);
if (!pt->is_polymorphic) {
check_procedure_later(ctx, ctx->file, e->token, d, proc_type, pl->body, pl->tags);
check_procedure_later(ctx->checker, ctx->file, e->token, d, proc_type, pl->body, pl->tags);
}
} else if (!is_foreign) {
if (e->Procedure.is_export) {
@@ -1235,10 +1234,9 @@ gb_internal void check_proc_group_decl(CheckerContext *ctx, Entity *&pg_entity,
pg_entity->type = t_invalid;
PtrSet<Entity *> entity_set = {};
ptr_set_init(&entity_set, heap_allocator(), 2*pg->args.count);
ptr_set_init(&entity_set, 2*pg->args.count);
for_array(i, pg->args) {
Ast *arg = pg->args[i];
for (Ast *arg : pg->args) {
Entity *e = nullptr;
Operand o = {};
if (arg->kind == Ast_Ident) {
@@ -1271,7 +1269,7 @@ gb_internal void check_proc_group_decl(CheckerContext *ctx, Entity *&pg_entity,
ptr_set_destroy(&entity_set);
for_array(j, pge->entities) {
for (isize j = 0; j < pge->entities.count; j++) {
Entity *p = pge->entities[j];
if (p->type == t_invalid) {
// NOTE(bill): This invalid overload has already been handled
@@ -1413,15 +1411,46 @@ end:;
}
gb_internal void add_deps_from_child_to_parent(DeclInfo *decl) {
if (decl && decl->parent) {
Scope *ps = decl->parent->scope;
if (ps->flags & (ScopeFlag_File & ScopeFlag_Pkg & ScopeFlag_Global)) {
return;
} else {
// NOTE(bill): Add the dependencies from the procedure literal (lambda)
// But only at the procedure level
rw_mutex_shared_lock(&decl->deps_mutex);
rw_mutex_lock(&decl->parent->deps_mutex);
for (Entity *e : decl->deps) {
ptr_set_add(&decl->parent->deps, e);
}
rw_mutex_unlock(&decl->parent->deps_mutex);
rw_mutex_shared_unlock(&decl->deps_mutex);
rw_mutex_shared_lock(&decl->type_info_deps_mutex);
rw_mutex_lock(&decl->parent->type_info_deps_mutex);
for (Type *t : decl->type_info_deps) {
ptr_set_add(&decl->parent->type_info_deps, t);
}
rw_mutex_unlock(&decl->parent->type_info_deps_mutex);
rw_mutex_shared_unlock(&decl->type_info_deps_mutex);
}
}
}
struct ProcUsingVar {
Entity *e;
Entity *uvar;
};
gb_internal void check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *decl, Type *type, Ast *body) {
gb_internal bool check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *decl, Type *type, Ast *body) {
if (body == nullptr) {
return;
return false;
}
GB_ASSERT(body->kind == Ast_BlockStmt);
@@ -1462,8 +1491,7 @@ gb_internal void check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de
{
if (type->Proc.param_count > 0) {
TypeTuple *params = &type->Proc.params->Tuple;
for_array(i, params->variables) {
Entity *e = params->variables[i];
for (Entity *e : params->variables) {
if (e->kind != Entity_Variable) {
continue;
}
@@ -1471,7 +1499,7 @@ gb_internal void check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de
continue;
}
if (is_blank_ident(e->token)) {
error(e->token, "'using' a procedure parameter requires a non blank identifier");
error(e->token, "'using' a procedure parameter requires a non blank identifier");
break;
}
@@ -1481,7 +1509,8 @@ gb_internal void check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de
if (t->kind == Type_Struct) {
Scope *scope = t->Struct.scope;
GB_ASSERT(scope != nullptr);
MUTEX_GUARD_BLOCK(scope->mutex) for (auto const &entry : scope->elements) {
rw_mutex_lock(&scope->mutex);
for (auto const &entry : scope->elements) {
Entity *f = entry.value;
if (f->kind == Entity_Variable) {
Entity *uvar = alloc_entity_using_variable(e, f->token, f->type, nullptr);
@@ -1491,6 +1520,7 @@ gb_internal void check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de
array_add(&using_entities, puv);
}
}
rw_mutex_unlock(&scope->mutex);
} else {
error(e->token, "'using' can only be applied to variables of type struct");
break;
@@ -1499,45 +1529,50 @@ gb_internal void check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de
}
}
MUTEX_GUARD_BLOCK(ctx->scope->mutex) for_array(i, using_entities) {
Entity *e = using_entities[i].e;
Entity *uvar = using_entities[i].uvar;
Entity *prev = scope_insert(ctx->scope, uvar, false);
rw_mutex_lock(&ctx->scope->mutex);
for (auto const &entry : using_entities) {
Entity *e = entry.e;
Entity *uvar = entry.uvar;
Entity *prev = scope_insert_no_mutex(ctx->scope, uvar);
if (prev != nullptr) {
error(e->token, "Namespace collision while 'using' procedure argument '%.*s' of: %.*s", LIT(e->token.string), LIT(prev->token.string));
error_line("%.*s != %.*s\n", LIT(uvar->token.string), LIT(prev->token.string));
break;
}
}
rw_mutex_unlock(&ctx->scope->mutex);
bool where_clause_ok = evaluate_where_clauses(ctx, nullptr, decl->scope, &decl->proc_lit->ProcLit.where_clauses, !decl->where_clauses_evaluated);
if (!where_clause_ok) {
// NOTE(bill, 2019-08-31): Don't check the body as the where clauses failed
return;
return false;
}
check_open_scope(ctx, body);
{
for_array(i, using_entities) {
Entity *uvar = using_entities[i].uvar;
for (auto const &entry : using_entities) {
Entity *uvar = entry.uvar;
Entity *prev = scope_insert(ctx->scope, uvar);
gb_unused(prev);
// NOTE(bill): Don't err here
}
GB_ASSERT(decl->defer_use_checked == false);
GB_ASSERT(decl->proc_checked_state != ProcCheckedState_Checked);
if (decl->defer_use_checked) {
GB_ASSERT(is_type_polymorphic(type, true));
error(token, "Defer Use Checked: %.*s", LIT(decl->entity->token.string));
GB_ASSERT(decl->defer_use_checked == false);
}
check_stmt_list(ctx, bs->stmts, Stmt_CheckScopeDecls);
decl->defer_use_checked = true;
for_array(i, bs->stmts) {
Ast *stmt = bs->stmts[i];
for (Ast *stmt : bs->stmts) {
if (stmt->kind == Ast_ValueDecl) {
ast_node(vd, ValueDecl, stmt);
for_array(j, vd->names) {
Ast *name = vd->names[j];
for (Ast *name : vd->names) {
if (!is_blank_ident(name)) {
if (name->kind == Ast_Ident) {
GB_ASSERT(name->Ident.entity != nullptr);
@@ -1572,25 +1607,7 @@ gb_internal void check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de
check_scope_usage(ctx->checker, ctx->scope);
if (decl->parent != nullptr) {
Scope *ps = decl->parent->scope;
if (ps->flags & (ScopeFlag_File & ScopeFlag_Pkg & ScopeFlag_Global)) {
return;
} else {
mutex_lock(&ctx->info->deps_mutex);
add_deps_from_child_to_parent(decl);
// NOTE(bill): Add the dependencies from the procedure literal (lambda)
// But only at the procedure level
for (auto const &entry : decl->deps) {
Entity *e = entry.ptr;
ptr_set_add(&decl->parent->deps, e);
}
for (auto const &entry : decl->type_info_deps) {
Type *t = entry.ptr;
ptr_set_add(&decl->parent->type_info_deps, t);
}
mutex_unlock(&ctx->info->deps_mutex);
}
}
return true;
}

View File

@@ -86,7 +86,6 @@ gb_internal Entity * find_polymorphic_record_entity (CheckerContext *c, Type *or
gb_internal void check_not_tuple (CheckerContext *c, Operand *operand);
gb_internal void convert_to_typed (CheckerContext *c, Operand *operand, Type *target_type);
gb_internal gbString expr_to_string (Ast *expression);
gb_internal void check_proc_body (CheckerContext *c, Token token, DeclInfo *decl, Type *type, Ast *body);
gb_internal void update_untyped_expr_type (CheckerContext *c, Ast *e, Type *type, bool final);
gb_internal bool check_is_terminating (Ast *node, String const &label);
gb_internal bool check_has_break (Ast *stmt, String const &label, bool implicit);
@@ -147,8 +146,8 @@ gb_internal void check_did_you_mean_print(DidYouMeanAnswers *d, char const *pref
auto results = did_you_mean_results(d);
if (results.count != 0) {
error_line("\tSuggestion: Did you mean?\n");
for_array(i, results) {
String const &target = results[i].target;
for (auto const &result : results) {
String const &target = result.target;
error_line("\t\t%s%.*s\n", prefix, LIT(target));
// error_line("\t\t%.*s %td\n", LIT(target), results[i].distance);
}
@@ -167,19 +166,16 @@ gb_internal void populate_check_did_you_mean_objc_entity(StringSet *set, Entity
GB_ASSERT(t->kind == Type_Struct);
if (is_type) {
for_array(i, objc_metadata->type_entries) {
String name = objc_metadata->type_entries[i].name;
string_set_add(set, name);
for (auto const &entry : objc_metadata->type_entries) {
string_set_add(set, entry.name);
}
} else {
for_array(i, objc_metadata->value_entries) {
String name = objc_metadata->value_entries[i].name;
string_set_add(set, name);
for (auto const &entry : objc_metadata->value_entries) {
string_set_add(set, entry.name);
}
}
for_array(i, t->Struct.fields) {
Entity *f = t->Struct.fields[i];
for (Entity *f : t->Struct.fields) {
if (f->flags & EntityFlag_Using && f->type != nullptr) {
if (f->type->kind == Type_Named && f->type->Named.type_name) {
populate_check_did_you_mean_objc_entity(set, f->type->Named.type_name, is_type);
@@ -194,19 +190,17 @@ gb_internal void check_did_you_mean_objc_entity(String const &name, Entity *e, b
GB_ASSERT(e->kind == Entity_TypeName);
GB_ASSERT(e->TypeName.objc_metadata != nullptr);
auto *objc_metadata = e->TypeName.objc_metadata;
mutex_lock(objc_metadata->mutex);
defer (mutex_unlock(objc_metadata->mutex));
MUTEX_GUARD(objc_metadata->mutex);
StringSet set = {};
string_set_init(&set, heap_allocator());
defer (string_set_destroy(&set));
populate_check_did_you_mean_objc_entity(&set, e, is_type);
DidYouMeanAnswers d = did_you_mean_make(heap_allocator(), set.entries.count, name);
defer (did_you_mean_destroy(&d));
for (auto const &entry : set) {
did_you_mean_append(&d, entry.value);
for (String const &target : set) {
did_you_mean_append(&d, target);
}
check_did_you_mean_print(&d, prefix);
}
@@ -217,8 +211,8 @@ gb_internal void check_did_you_mean_type(String const &name, Array<Entity *> con
DidYouMeanAnswers d = did_you_mean_make(heap_allocator(), fields.count, name);
defer (did_you_mean_destroy(&d));
for_array(i, fields) {
did_you_mean_append(&d, fields[i]->token.string);
for (Entity *e : fields) {
did_you_mean_append(&d, e->token.string);
}
check_did_you_mean_print(&d, prefix);
}
@@ -230,8 +224,8 @@ gb_internal void check_did_you_mean_type(String const &name, Slice<Entity *> con
DidYouMeanAnswers d = did_you_mean_make(heap_allocator(), fields.count, name);
defer (did_you_mean_destroy(&d));
for_array(i, fields) {
did_you_mean_append(&d, fields[i]->token.string);
for (Entity *e : fields) {
did_you_mean_append(&d, e->token.string);
}
check_did_you_mean_print(&d, prefix);
}
@@ -242,10 +236,12 @@ gb_internal void check_did_you_mean_scope(String const &name, Scope *scope, char
DidYouMeanAnswers d = did_you_mean_make(heap_allocator(), scope->elements.entries.count, name);
defer (did_you_mean_destroy(&d));
MUTEX_GUARD_BLOCK(&scope->mutex) for (auto const &entry : scope->elements) {
rw_mutex_shared_lock(&scope->mutex);
for (auto const &entry : scope->elements) {
Entity *e = entry.value;
did_you_mean_append(&d, e->token.string);
}
rw_mutex_shared_unlock(&scope->mutex);
check_did_you_mean_print(&d, prefix);
}
@@ -370,9 +366,6 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E
GB_ASSERT(dst == nullptr);
}
mutex_lock(&info->gen_procs_mutex);
defer (mutex_unlock(&info->gen_procs_mutex));
if (!src->Proc.is_polymorphic || src->Proc.is_poly_specialized) {
return false;
}
@@ -417,8 +410,6 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E
CheckerContext nctx = *old_c;
nctx.procs_to_check_queue = old_c->procs_to_check_queue;
Scope *scope = create_scope(info, base_entity->scope);
scope->flags |= ScopeFlag_Proc;
nctx.scope = scope;
@@ -439,21 +430,39 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E
return false;
}
auto *found_gen_procs = map_get(&info->gen_procs, base_entity->identifier.load());
if (found_gen_procs) {
auto procs = *found_gen_procs;
for_array(i, procs) {
Entity *other = procs[i];
GenProcsData *gen_procs = nullptr;
GB_ASSERT(base_entity->identifier.load()->kind == Ast_Ident);
GB_ASSERT(base_entity->kind == Entity_Procedure);
mutex_lock(&base_entity->Procedure.gen_procs_mutex); // @entity-mutex
gen_procs = base_entity->Procedure.gen_procs;
if (gen_procs) {
rw_mutex_shared_lock(&gen_procs->mutex); // @local-mutex
mutex_unlock(&base_entity->Procedure.gen_procs_mutex); // @entity-mutex
for (Entity *other : gen_procs->procs) {
Type *pt = base_type(other->type);
if (are_types_identical(pt, final_proc_type)) {
rw_mutex_shared_unlock(&gen_procs->mutex); // @local-mutex
if (poly_proc_data) {
poly_proc_data->gen_entity = other;
}
return true;
}
}
rw_mutex_shared_unlock(&gen_procs->mutex); // @local-mutex
} else {
gen_procs = gb_alloc_item(permanent_allocator(), GenProcsData);
gen_procs->procs.allocator = heap_allocator();
base_entity->Procedure.gen_procs = gen_procs;
mutex_unlock(&base_entity->Procedure.gen_procs_mutex); // @entity-mutex
}
{
// LEAK TODO(bill): This is technically a memory leak as it has to generate the type twice
bool prev_no_polymorphic_errors = nctx.no_polymorphic_errors;
@@ -466,24 +475,39 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E
// LEAK TODO(bill): Cloning this AST may be leaky
Ast *cloned_proc_type_node = clone_ast(pt->node);
success = check_procedure_type(&nctx, final_proc_type, cloned_proc_type_node, &operands);
if (!success) {
return false;
}
if (found_gen_procs) {
auto procs = *found_gen_procs;
for_array(i, procs) {
Entity *other = procs[i];
Type *pt = base_type(other->type);
if (are_types_identical(pt, final_proc_type)) {
if (poly_proc_data) {
poly_proc_data->gen_entity = other;
}
return true;
rw_mutex_shared_lock(&gen_procs->mutex); // @local-mutex
for (Entity *other : gen_procs->procs) {
Type *pt = base_type(other->type);
if (are_types_identical(pt, final_proc_type)) {
rw_mutex_shared_unlock(&gen_procs->mutex); // @local-mutex
if (poly_proc_data) {
poly_proc_data->gen_entity = other;
}
DeclInfo *decl = other->decl_info;
if (decl->proc_checked_state != ProcCheckedState_Checked) {
ProcInfo *proc_info = gb_alloc_item(permanent_allocator(), ProcInfo);
proc_info->file = other->file;
proc_info->token = other->token;
proc_info->decl = decl;
proc_info->type = other->type;
proc_info->body = decl->proc_lit->ProcLit.body;
proc_info->tags = other->Procedure.tags;;
proc_info->generated_from_polymorphic = true;
proc_info->poly_def_node = poly_def_node;
check_procedure_later(nctx.checker, proc_info);
}
return true;
}
}
rw_mutex_shared_unlock(&gen_procs->mutex); // @local-mutex
}
@@ -520,7 +544,8 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E
d->gen_proc_type = final_proc_type;
d->type_expr = pl->type;
d->proc_lit = proc_lit;
d->proc_checked = false;
d->proc_checked_state = ProcCheckedState_Unchecked;
d->defer_use_checked = false;
Entity *entity = alloc_entity_procedure(nullptr, token, final_proc_type, tags);
entity->identifier = ident;
@@ -530,7 +555,8 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E
entity->scope = scope->parent;
entity->file = base_entity->file;
entity->pkg = base_entity->pkg;
entity->flags &= ~EntityFlag_ProcBodyChecked;
entity->flags = 0;
d->entity = entity;
AstFile *file = nullptr;
{
@@ -541,6 +567,10 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E
}
}
rw_mutex_lock(&gen_procs->mutex); // @local-mutex
array_add(&gen_procs->procs, entity);
rw_mutex_unlock(&gen_procs->mutex); // @local-mutex
ProcInfo *proc_info = gb_alloc_item(permanent_allocator(), ProcInfo);
proc_info->file = file;
proc_info->token = token;
@@ -551,13 +581,6 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E
proc_info->generated_from_polymorphic = true;
proc_info->poly_def_node = poly_def_node;
if (found_gen_procs) {
array_add(found_gen_procs, entity);
} else {
auto array = array_make<Entity *>(heap_allocator());
array_add(&array, entity);
map_set(&info->gen_procs, base_entity->identifier.load(), array);
}
if (poly_proc_data) {
poly_proc_data->gen_entity = entity;
@@ -566,7 +589,7 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E
}
// NOTE(bill): Check the newly generated procedure body
check_procedure_later(&nctx, proc_info);
check_procedure_later(nctx.checker, proc_info);
return true;
}
@@ -755,8 +778,7 @@ gb_internal i64 check_distance_between_types(CheckerContext *c, Operand *operand
}
if (is_type_union(dst)) {
for_array(i, dst->Union.variants) {
Type *vt = dst->Union.variants[i];
for (Type *vt : dst->Union.variants) {
if (are_types_identical(vt, s)) {
return 1;
}
@@ -772,8 +794,7 @@ gb_internal i64 check_distance_between_types(CheckerContext *c, Operand *operand
} else if (is_type_untyped(src)) {
i64 prev_lowest_score = -1;
i64 lowest_score = -1;
for_array(i, dst->Union.variants) {
Type *vt = dst->Union.variants[i];
for (Type *vt : dst->Union.variants) {
i64 score = check_distance_between_types(c, operand, vt);
if (score >= 0) {
if (lowest_score < 0) {
@@ -817,7 +838,7 @@ gb_internal i64 check_distance_between_types(CheckerContext *c, Operand *operand
PolyProcData poly_proc_data = {};
if (check_polymorphic_procedure_assignment(c, operand, type, operand->expr, &poly_proc_data)) {
Entity *e = poly_proc_data.gen_entity;
add_type_and_value(c->info, operand->expr, Addressing_Value, e->type, {});
add_type_and_value(c, operand->expr, Addressing_Value, e->type, {});
add_entity_use(c, operand->expr, e);
return 4;
}
@@ -1005,8 +1026,8 @@ gb_internal void check_assignment(CheckerContext *c, Operand *operand, Type *typ
if (type != nullptr && is_type_proc(type)) {
Array<Entity *> procs = proc_group_entities(c, *operand);
// NOTE(bill): These should be done
for_array(i, procs) {
Type *t = base_type(procs[i]->type);
for (Entity *e : procs) {
Type *t = base_type(e->type);
if (t == t_invalid) {
continue;
}
@@ -1014,7 +1035,6 @@ gb_internal void check_assignment(CheckerContext *c, Operand *operand, Type *typ
x.mode = Addressing_Value;
x.type = t;
if (check_is_assignable_to(c, &x, type)) {
Entity *e = procs[i];
add_entity_use(c, operand->expr, e);
good = true;
break;
@@ -1047,7 +1067,7 @@ gb_internal void check_assignment(CheckerContext *c, Operand *operand, Type *typ
if (check_is_assignable_to(c, operand, type)) {
if (operand->mode == Addressing_Type && is_type_typeid(type)) {
add_type_info_type(c, operand->type);
add_type_and_value(c->info, operand->expr, Addressing_Value, type, exact_value_typeid(operand->type));
add_type_and_value(c, operand->expr, Addressing_Value, type, exact_value_typeid(operand->type));
}
} else {
gbString expr_str = expr_to_string(operand->expr);
@@ -1444,7 +1464,7 @@ gb_internal bool check_cycle(CheckerContext *c, Entity *curr, bool report) {
return false;
}
for_array(i, *c->type_path) {
Entity *prev = (*c->type_path)[i];
Entity *prev = c->type_path->data[i];
if (prev == curr) {
if (report) {
error(curr->token, "Illegal declaration cycle of `%.*s`", LIT(curr->token.string));
@@ -1509,8 +1529,8 @@ gb_internal Entity *check_ident(CheckerContext *c, Operand *o, Ast *n, Type *nam
if (type_hint != nullptr && is_type_proc(type_hint)) {
// NOTE(bill): These should be done
for_array(i, procs) {
Type *t = base_type(procs[i]->type);
for (Entity *proc : procs) {
Type *t = base_type(proc->type);
if (t == t_invalid) {
continue;
}
@@ -1518,7 +1538,7 @@ gb_internal Entity *check_ident(CheckerContext *c, Operand *o, Ast *n, Type *nam
x.mode = Addressing_Value;
x.type = t;
if (check_is_assignable_to(c, &x, type_hint)) {
e = procs[i];
e = proc;
add_entity_use(c, n, e);
skip = true;
break;
@@ -2339,7 +2359,7 @@ gb_internal void check_comparison(CheckerContext *c, Operand *x, Operand *y, Tok
if (x->mode == Addressing_Type && is_type_typeid(y->type)) {
add_type_info_type(c, x->type);
add_type_info_type(c, y->type);
add_type_and_value(c->info, x->expr, Addressing_Value, y->type, exact_value_typeid(x->type));
add_type_and_value(c, x->expr, Addressing_Value, y->type, exact_value_typeid(x->type));
x->mode = Addressing_Value;
x->type = t_untyped_bool;
@@ -2347,7 +2367,7 @@ gb_internal void check_comparison(CheckerContext *c, Operand *x, Operand *y, Tok
} else if (is_type_typeid(x->type) && y->mode == Addressing_Type) {
add_type_info_type(c, x->type);
add_type_info_type(c, y->type);
add_type_and_value(c->info, y->expr, Addressing_Value, x->type, exact_value_typeid(y->type));
add_type_and_value(c, y->expr, Addressing_Value, x->type, exact_value_typeid(y->type));
x->mode = Addressing_Value;
x->type = t_untyped_bool;
@@ -3580,7 +3600,7 @@ gb_internal void update_untyped_expr_type(CheckerContext *c, Ast *e, Type *type,
if (old == nullptr) {
if (type != nullptr && type != t_invalid) {
if (e->tav.type == nullptr || e->tav.type == t_invalid) {
add_type_and_value(c->info, e, e->tav.mode, type ? type : e->tav.type, e->tav.value);
add_type_and_value(c, e, e->tav.mode, type ? type : e->tav.type, e->tav.value);
if (e->kind == Ast_TernaryIfExpr) {
update_untyped_expr_type(c, e->TernaryIfExpr.x, type, final);
update_untyped_expr_type(c, e->TernaryIfExpr.y, type, final);
@@ -3686,7 +3706,7 @@ gb_internal void update_untyped_expr_type(CheckerContext *c, Ast *e, Type *type,
return;
}
add_type_and_value(c->info, e, old->mode, type, old->value);
add_type_and_value(c, e, old->mode, type, old->value);
}
gb_internal void update_untyped_expr_value(CheckerContext *c, Ast *e, ExactValue value) {
@@ -4148,8 +4168,7 @@ gb_internal ExactValue get_constant_field_single(CheckerContext *c, ExactValue v
if (cl->elems[0]->kind == Ast_FieldValue) {
if (is_type_struct(node->tav.type)) {
bool found = false;
for_array(i, cl->elems) {
Ast *elem = cl->elems[i];
for (Ast *elem : cl->elems) {
if (elem->kind != Ast_FieldValue) {
continue;
}
@@ -4168,8 +4187,7 @@ gb_internal ExactValue get_constant_field_single(CheckerContext *c, ExactValue v
value = {};
}
} else if (is_type_array(node->tav.type) || is_type_enumerated_array(node->tav.type)) {
for_array(i, cl->elems) {
Ast *elem = cl->elems[i];
for (Ast *elem : cl->elems) {
if (elem->kind != Ast_FieldValue) {
continue;
}
@@ -4534,7 +4552,7 @@ gb_internal Entity *check_selector(CheckerContext *c, Operand *operand, Ast *nod
operand->mode = Addressing_ProcGroup;
operand->proc_group = entity;
add_type_and_value(c->info, operand->expr, operand->mode, operand->type, operand->value);
add_type_and_value(c, operand->expr, operand->mode, operand->type, operand->value);
return entity;
}
GB_ASSERT_MSG(entity->type != nullptr, "%.*s (%.*s)", LIT(entity->token.string), LIT(entity_strings[entity->kind]));
@@ -4552,8 +4570,7 @@ gb_internal Entity *check_selector(CheckerContext *c, Operand *operand, Ast *nod
if (entity->kind == Entity_ProcGroup) {
Array<Entity *> procs = entity->ProcGroup.entities;
bool skip = false;
for_array(i, procs) {
Entity *p = procs[i];
for (Entity *p : procs) {
Type *t = base_type(p->type);
if (t == t_invalid) {
continue;
@@ -4703,7 +4720,7 @@ gb_internal Entity *check_selector(CheckerContext *c, Operand *operand, Ast *nod
}
Entity *swizzle_entity = alloc_entity_variable(nullptr, make_token_ident(field_name), operand->type, EntityState_Resolved);
add_type_and_value(c->info, operand->expr, operand->mode, operand->type, operand->value);
add_type_and_value(c, operand->expr, operand->mode, operand->type, operand->value);
return swizzle_entity;
}
end_of_array_selector_swizzle:;
@@ -4747,7 +4764,7 @@ gb_internal Entity *check_selector(CheckerContext *c, Operand *operand, Ast *nod
operand->value = field_value;
operand->type = entity->type;
add_entity_use(c, selector, entity);
add_type_and_value(c->info, operand->expr, operand->mode, operand->type, operand->value);
add_type_and_value(c, operand->expr, operand->mode, operand->type, operand->value);
return entity;
}
@@ -4772,7 +4789,7 @@ gb_internal Entity *check_selector(CheckerContext *c, Operand *operand, Ast *nod
operand->value = field_value;
operand->type = entity->type;
add_entity_use(c, selector, entity);
add_type_and_value(c->info, operand->expr, operand->mode, operand->type, operand->value);
add_type_and_value(c, operand->expr, operand->mode, operand->type, operand->value);
return entity;
}
@@ -4860,7 +4877,7 @@ gb_internal Entity *check_selector(CheckerContext *c, Operand *operand, Ast *nod
break;
}
add_type_and_value(c->info, operand->expr, operand->mode, operand->type, operand->value);
add_type_and_value(c, operand->expr, operand->mode, operand->type, operand->value);
return entity;
}
@@ -4909,22 +4926,21 @@ gb_internal bool check_identifier_exists(Scope *s, Ast *node, bool nested = fals
gb_internal isize add_dependencies_from_unpacking(CheckerContext *c, Entity **lhs, isize lhs_count, isize tuple_index, isize tuple_count) {
if (lhs != nullptr && c->decl != nullptr) {
mutex_lock(&c->info->deps_mutex);
for (isize j = 0; (tuple_index + j) < lhs_count && j < tuple_count; j++) {
Entity *e = lhs[tuple_index + j];
if (e != nullptr) {
DeclInfo *decl = decl_info_of_entity(e);
if (decl != nullptr) {
for (auto const &entry : decl->deps) {
Entity *dep = entry.ptr;
rw_mutex_shared_lock(&decl->deps_mutex);
rw_mutex_lock(&c->decl->deps_mutex);
for (Entity *dep : decl->deps) {
ptr_set_add(&c->decl->deps, dep);
}
rw_mutex_unlock(&c->decl->deps_mutex);
rw_mutex_shared_unlock(&decl->deps_mutex);
}
}
}
mutex_unlock(&c->info->deps_mutex);
}
return tuple_count;
}
@@ -4933,7 +4949,7 @@ gb_internal isize add_dependencies_from_unpacking(CheckerContext *c, Entity **lh
gb_internal bool check_assignment_arguments(CheckerContext *ctx, Array<Operand> const &lhs, Array<Operand> *operands, Slice<Ast *> const &rhs) {
bool optional_ok = false;
isize tuple_index = 0;
for_array(i, rhs) {
for (Ast *rhs_expr : rhs) {
CheckerContext c_ = *ctx;
CheckerContext *c = &c_;
@@ -4945,7 +4961,7 @@ gb_internal bool check_assignment_arguments(CheckerContext *ctx, Array<Operand>
type_hint = lhs[tuple_index].type;
}
check_expr_base(c, &o, rhs[i], type_hint);
check_expr_base(c, &o, rhs_expr, type_hint);
if (o.mode == Addressing_NoValue) {
error_operand_no_value(&o);
o.mode = Addressing_Invalid;
@@ -4997,8 +5013,8 @@ gb_internal bool check_assignment_arguments(CheckerContext *ctx, Array<Operand>
}
} else {
TypeTuple *tuple = &o.type->Tuple;
for_array(j, tuple->variables) {
o.type = tuple->variables[j]->type;
for (Entity *e : tuple->variables) {
o.type = e->type;
array_add(operands, o);
}
@@ -5090,8 +5106,8 @@ gb_internal bool check_unpack_arguments(CheckerContext *ctx, Entity **lhs, isize
}
} else {
TypeTuple *tuple = &o.type->Tuple;
for_array(j, tuple->variables) {
o.type = tuple->variables[j]->type;
for (Entity *e : tuple->variables) {
o.type = e->type;
array_add(operands, o);
}
@@ -5326,7 +5342,7 @@ gb_internal CALL_ARGUMENT_CHECKER(check_call_arguments_internal) {
if (o.mode == Addressing_Type && is_type_typeid(e->type)) {
add_type_info_type(c, o.type);
add_type_and_value(c->info, o.expr, Addressing_Value, e->type, exact_value_typeid(o.type));
add_type_and_value(c, o.expr, Addressing_Value, e->type, exact_value_typeid(o.type));
} else if (show_error && is_type_untyped(o.type)) {
update_untyped_expr_type(c, o.expr, t, true);
}
@@ -5377,7 +5393,7 @@ gb_internal CALL_ARGUMENT_CHECKER(check_call_arguments_internal) {
}
if (o.mode == Addressing_Type && is_type_typeid(t)) {
add_type_info_type(c, o.type);
add_type_and_value(c->info, o.expr, Addressing_Value, t, exact_value_typeid(o.type));
add_type_and_value(c, o.expr, Addressing_Value, t, exact_value_typeid(o.type));
} else if (show_error && is_type_untyped(o.type)) {
update_untyped_expr_type(c, o.expr, t, true);
}
@@ -5390,7 +5406,7 @@ gb_internal CALL_ARGUMENT_CHECKER(check_call_arguments_internal) {
data->score = score;
data->result_type = final_proc_type->Proc.results;
data->gen_entity = gen_entity;
add_type_and_value(c->info, ce->proc, Addressing_Value, final_proc_type, {});
add_type_and_value(c, ce->proc, Addressing_Value, final_proc_type, {});
}
return err;
@@ -5434,8 +5450,7 @@ gb_internal CALL_ARGUMENT_CHECKER(check_named_call_arguments) {
bool *visited = gb_alloc_array(temporary_allocator(), bool, param_count);
auto ordered_operands = array_make<Operand>(temporary_allocator(), param_count);
defer ({
for_array(i, ordered_operands) {
Operand const &o = ordered_operands[i];
for (Operand const &o : ordered_operands) {
if (o.expr != nullptr) {
call->viral_state_flags |= o.expr->viral_state_flags;
}
@@ -5590,7 +5605,7 @@ gb_internal CALL_ARGUMENT_CHECKER(check_named_call_arguments) {
if (o->mode == Addressing_Type && is_type_typeid(e->type)) {
add_type_info_type(c, o->type);
add_type_and_value(c->info, o->expr, Addressing_Value, e->type, exact_value_typeid(o->type));
add_type_and_value(c, o->expr, Addressing_Value, e->type, exact_value_typeid(o->type));
}
}
@@ -5598,7 +5613,7 @@ gb_internal CALL_ARGUMENT_CHECKER(check_named_call_arguments) {
data->score = score;
data->result_type = pt->results;
data->gen_entity = gen_entity;
add_type_and_value(c->info, ce->proc, Addressing_Value, proc_type, {});
add_type_and_value(c, ce->proc, Addressing_Value, proc_type, {});
}
return err;
@@ -5727,7 +5742,7 @@ gb_internal CallArgumentData check_call_arguments(CheckerContext *c, Operand *op
// in order to improve the type inference system
StringMap<Type *> type_hint_map = {}; // Key: String
string_map_init(&type_hint_map, heap_allocator(), 2*args.count);
string_map_init(&type_hint_map, 2*args.count);
defer (string_map_destroy(&type_hint_map));
Type *ptype = nullptr;
@@ -5753,8 +5768,7 @@ gb_internal CallArgumentData check_call_arguments(CheckerContext *c, Operand *op
param_tuple = &pt->params->Tuple;
}
if (param_tuple != nullptr) {
for_array(i, param_tuple->variables) {
Entity *e = param_tuple->variables[i];
for (Entity *e : param_tuple->variables) {
if (is_blank_ident(e->token)) {
continue;
}
@@ -5764,8 +5778,8 @@ gb_internal CallArgumentData check_call_arguments(CheckerContext *c, Operand *op
}
} else {
Array<Entity *> procs = proc_group_entities(c, *operand);
for_array(j, procs) {
Type *proc_type = base_type(procs[j]->type);
for (Entity *proc : procs) {
Type *proc_type = base_type(proc->type);
if (is_type_proc(proc_type)) {
TypeProc *pt = &proc_type->Proc;
TypeTuple *param_tuple = nullptr;
@@ -5775,8 +5789,7 @@ gb_internal CallArgumentData check_call_arguments(CheckerContext *c, Operand *op
if (param_tuple == nullptr) {
continue;
}
for_array(i, param_tuple->variables) {
Entity *e = param_tuple->variables[i];
for (Entity *e : param_tuple->variables) {
if (is_blank_ident(e->token)) {
continue;
}
@@ -5840,10 +5853,10 @@ gb_internal CallArgumentData check_call_arguments(CheckerContext *c, Operand *op
if (procs.count > 1) {
isize max_arg_count = args.count;
for_array(i, args) {
for (Ast *arg : args) {
// NOTE(bill): The only thing that may have multiple values
// will be a call expression (assuming `or_return` and `()` will be stripped)
Ast *arg = strip_or_return_expr(args[i]);
arg = strip_or_return_expr(arg);
if (arg && arg->kind == Ast_CallExpr) {
max_arg_count = ISIZE_MAX;
break;
@@ -5906,8 +5919,7 @@ gb_internal CallArgumentData check_call_arguments(CheckerContext *c, Operand *op
// where the same positional parameter has the same type value (and ellipsis)
bool proc_arg_count_all_equal = true;
isize proc_arg_count = -1;
for_array(i, procs) {
Entity *p = procs[i];
for (Entity *p : procs) {
Type *pt = base_type(p->type);
if (pt != nullptr && is_type_proc(pt)) {
if (proc_arg_count < 0) {
@@ -5929,8 +5941,7 @@ gb_internal CallArgumentData check_call_arguments(CheckerContext *c, Operand *op
lhs = gb_alloc_array(heap_allocator(), Entity *, lhs_count);
for (isize param_index = 0; param_index < lhs_count; param_index++) {
Entity *e = nullptr;
for_array(j, procs) {
Entity *p = procs[j];
for (Entity *p : procs) {
Type *pt = base_type(p->type);
if (pt != nullptr && is_type_proc(pt)) {
if (e == nullptr) {
@@ -5971,8 +5982,8 @@ gb_internal CallArgumentData check_call_arguments(CheckerContext *c, Operand *op
auto proc_entities = array_make<Entity *>(heap_allocator(), 0, procs.count*2 + 1);
defer (array_free(&proc_entities));
for_array(i, procs) {
array_add(&proc_entities, procs[i]);
for (Entity *proc : procs) {
array_add(&proc_entities, proc);
}
@@ -6062,8 +6073,7 @@ gb_internal CallArgumentData check_call_arguments(CheckerContext *c, Operand *op
if (procs.count > 0) {
error_line("Did you mean to use one of the following:\n");
}
for_array(i, procs) {
Entity *proc = procs[i];
for (Entity *proc : procs) {
TokenPos pos = proc->token.pos;
Type *t = base_type(proc->type);
if (t == t_invalid) continue;
@@ -6187,7 +6197,7 @@ gb_internal CallArgumentData check_call_arguments(CheckerContext *c, Operand *op
decl->where_clauses_evaluated = true;
if (ok && (data.gen_entity->flags & EntityFlag_ProcBodyChecked) == 0) {
check_procedure_later(c, e->file, e->token, decl, e->type, decl->proc_lit->ProcLit.body, decl->proc_lit->ProcLit.tags);
check_procedure_later(c->checker, e->file, e->token, decl, e->type, decl->proc_lit->ProcLit.body, decl->proc_lit->ProcLit.tags);
}
}
return data;
@@ -6225,7 +6235,7 @@ gb_internal CallArgumentData check_call_arguments(CheckerContext *c, Operand *op
decl->where_clauses_evaluated = true;
if (ok && (data.gen_entity->flags & EntityFlag_ProcBodyChecked) == 0) {
check_procedure_later(c, e->file, e->token, decl, e->type, decl->proc_lit->ProcLit.body, decl->proc_lit->ProcLit.tags);
check_procedure_later(c->checker, e->file, e->token, decl, e->type, decl->proc_lit->ProcLit.body, decl->proc_lit->ProcLit.tags);
}
}
return data;
@@ -6600,7 +6610,7 @@ gb_internal ExprKind check_call_expr(CheckerContext *c, Operand *operand, Ast *c
operand->builtin_id = BuiltinProc_DIRECTIVE;
operand->expr = proc;
operand->type = t_invalid;
add_type_and_value(c->info, proc, operand->mode, operand->type, operand->value);
add_type_and_value(c, proc, operand->mode, operand->type, operand->value);
} else {
error(proc, "Unknown directive: #%.*s", LIT(name));
operand->expr = proc;
@@ -6622,8 +6632,7 @@ gb_internal ExprKind check_call_expr(CheckerContext *c, Operand *operand, Ast *c
if (args.count > 0) {
bool fail = false;
bool first_is_field_value = (args[0]->kind == Ast_FieldValue);
for_array(i, args) {
Ast *arg = args[i];
for (Ast *arg : args) {
bool mix = false;
if (first_is_field_value) {
mix = arg->kind != Ast_FieldValue;
@@ -6644,8 +6653,7 @@ gb_internal ExprKind check_call_expr(CheckerContext *c, Operand *operand, Ast *c
}
if (operand->mode == Addressing_Invalid) {
for_array(i, args) {
Ast *arg = args[i];
for (Ast *arg : args) {
if (arg->kind == Ast_FieldValue) {
arg = arg->FieldValue.value;
}
@@ -6678,7 +6686,7 @@ gb_internal ExprKind check_call_expr(CheckerContext *c, Operand *operand, Ast *c
GB_ASSERT(ot->kind == Type_Named);
Entity *e = ot->Named.type_name;
add_entity_use(c, ident, e);
add_type_and_value(c->info, call, Addressing_Type, ot, empty_exact_value);
add_type_and_value(c, call, Addressing_Type, ot, empty_exact_value);
} else {
operand->mode = Addressing_Invalid;
operand->type = t_invalid;
@@ -6850,7 +6858,7 @@ gb_internal ExprKind check_call_expr(CheckerContext *c, Operand *operand, Ast *c
}
}
// add_type_and_value(c->info, operand->expr, operand->mode, operand->type, operand->value);
// add_type_and_value(c, operand->expr, operand->mode, operand->type, operand->value);
return Expr_Expr;
}
@@ -7094,8 +7102,8 @@ gb_internal bool check_range(CheckerContext *c, Ast *node, Operand *x, Operand *
return false;
}
add_type_and_value(c->info, ie->left, x->mode, x->type, x->value);
add_type_and_value(c->info, ie->right, y->mode, y->type, y->value);
add_type_and_value(c, ie->left, x->mode, x->type, x->value);
add_type_and_value(c, ie->right, y->mode, y->type, y->value);
return true;
}
@@ -7111,7 +7119,7 @@ gb_internal bool check_is_operand_compound_lit_constant(CheckerContext *c, Opera
return true;
}
if (expr->kind == Ast_ProcLit) {
add_type_and_value(c->info, expr, Addressing_Constant, type_of_expr(expr), exact_value_procedure(expr));
add_type_and_value(c, expr, Addressing_Constant, type_of_expr(expr), exact_value_procedure(expr));
return true;
}
}
@@ -7141,9 +7149,7 @@ gb_internal bool attempt_implicit_selector_expr(CheckerContext *c, Operand *o, A
Type *union_type = base_type(th);
auto operands = array_make<Operand>(temporary_allocator(), 0, union_type->Union.variants.count);
for_array(i, union_type->Union.variants) {
Type *vt = union_type->Union.variants[i];
for (Type *vt : union_type->Union.variants) {
Operand x = {};
if (attempt_implicit_selector_expr(c, &x, ise, vt)) {
array_add(&operands, x);
@@ -7220,7 +7226,7 @@ gb_internal void check_promote_optional_ok(CheckerContext *c, Operand *x, Type *
Type *pt = base_type(type_of_expr(expr->CallExpr.proc));
if (is_type_proc(pt)) {
Type *tuple = pt->Proc.results;
add_type_and_value(c->info, x->expr, x->mode, tuple, x->value);
add_type_and_value(c, x->expr, x->mode, tuple, x->value);
if (pt->Proc.result_count >= 2) {
if (ok_type_) *ok_type_ = tuple->Tuple.variables[1]->type;
@@ -7233,7 +7239,7 @@ gb_internal void check_promote_optional_ok(CheckerContext *c, Operand *x, Type *
Type *tuple = make_optional_ok_type(x->type);
if (ok_type_) *ok_type_ = tuple->Tuple.variables[1]->type;
add_type_and_value(c->info, x->expr, x->mode, tuple, x->value);
add_type_and_value(c, x->expr, x->mode, tuple, x->value);
x->type = tuple;
GB_ASSERT(is_type_tuple(type_of_expr(x->expr)));
}
@@ -7373,8 +7379,7 @@ gb_internal void add_to_seen_map(CheckerContext *ctx, SeenMap *seen, TokenKind u
}
bool found = false;
for_array(j, bt->Enum.fields) {
Entity *f = bt->Enum.fields[j];
for (Entity *f : bt->Enum.fields) {
GB_ASSERT(f->kind == Entity_Constant);
i64 fv = exact_value_to_i64(f->Constant.value);
@@ -7653,7 +7658,7 @@ gb_internal ExprKind check_or_else_expr(CheckerContext *c, Operand *o, Ast *node
Type *left_type = nullptr;
Type *right_type = nullptr;
check_or_else_split_types(c, &x, name, &left_type, &right_type);
add_type_and_value(&c->checker->info, arg, x.mode, x.type, x.value);
add_type_and_value(c, arg, x.mode, x.type, x.value);
if (left_type != nullptr) {
if (!y_is_diverging) {
@@ -7688,7 +7693,7 @@ gb_internal ExprKind check_or_return_expr(CheckerContext *c, Operand *o, Ast *no
Type *left_type = nullptr;
Type *right_type = nullptr;
check_or_return_split_types(c, &x, name, &left_type, &right_type);
add_type_and_value(&c->checker->info, re->expr, x.mode, x.type, x.value);
add_type_and_value(c, re->expr, x.mode, x.type, x.value);
if (right_type == nullptr) {
check_or_else_expr_no_value_error(c, name, x, type_hint);
@@ -7892,8 +7897,7 @@ gb_internal ExprKind check_compound_literal(CheckerContext *c, Operand *o, Ast *
if (cl->elems[0]->kind == Ast_FieldValue) {
bool *fields_visited = gb_alloc_array(temporary_allocator(), bool, field_count);
for_array(i, cl->elems) {
Ast *elem = cl->elems[i];
for (Ast *elem : cl->elems) {
if (elem->kind != Ast_FieldValue) {
error(elem, "Mixture of 'field = value' and value elements in a literal is not allowed");
continue;
@@ -8045,8 +8049,7 @@ gb_internal ExprKind check_compound_literal(CheckerContext *c, Operand *o, Ast *
RangeCache rc = range_cache_make(heap_allocator());
defer (range_cache_destroy(&rc));
for_array(i, cl->elems) {
Ast *elem = cl->elems[i];
for (Ast *elem : cl->elems) {
if (elem->kind != Ast_FieldValue) {
error(elem, "Mixture of 'field = value' and value elements in a literal is not allowed");
continue;
@@ -8114,7 +8117,7 @@ gb_internal ExprKind check_compound_literal(CheckerContext *c, Operand *o, Ast *
error(elem, "Expected a constant integer as an array field");
continue;
}
// add_type_and_value(c->info, op_index.expr, op_index.mode, op_index.type, op_index.value);
// add_type_and_value(c, op_index.expr, op_index.mode, op_index.type, op_index.value);
i64 index = exact_value_to_i64(op_index.value);
@@ -8227,8 +8230,7 @@ gb_internal ExprKind check_compound_literal(CheckerContext *c, Operand *o, Ast *
{
Type *bt = base_type(index_type);
GB_ASSERT(bt->kind == Type_Enum);
for_array(i, bt->Enum.fields) {
Entity *f = bt->Enum.fields[i];
for (Entity *f : bt->Enum.fields) {
if (f->kind != Entity_Constant) {
continue;
}
@@ -8257,15 +8259,13 @@ gb_internal ExprKind check_compound_literal(CheckerContext *c, Operand *o, Ast *
bool is_partial = cl->tag && (cl->tag->BasicDirective.name.string == "partial");
SeenMap seen = {}; // NOTE(bill): Multimap, Key: ExactValue
map_init(&seen, heap_allocator());
defer (map_destroy(&seen));
if (cl->elems.count > 0 && cl->elems[0]->kind == Ast_FieldValue) {
RangeCache rc = range_cache_make(heap_allocator());
defer (range_cache_destroy(&rc));
for_array(i, cl->elems) {
Ast *elem = cl->elems[i];
for (Ast *elem : cl->elems) {
if (elem->kind != Ast_FieldValue) {
error(elem, "Mixture of 'field = value' and value elements in a literal is not allowed");
continue;
@@ -8429,8 +8429,7 @@ gb_internal ExprKind check_compound_literal(CheckerContext *c, Operand *o, Ast *
auto unhandled = array_make<Entity *>(temporary_allocator(), 0, fields.count);
for_array(i, fields) {
Entity *f = fields[i];
for (Entity *f : fields) {
if (f->kind != Entity_Constant) {
continue;
}
@@ -8556,8 +8555,7 @@ gb_internal ExprKind check_compound_literal(CheckerContext *c, Operand *o, Ast *
bool key_is_typeid = is_type_typeid(t->Map.key);
bool value_is_typeid = is_type_typeid(t->Map.value);
for_array(i, cl->elems) {
Ast *elem = cl->elems[i];
for (Ast *elem : cl->elems) {
if (elem->kind != Ast_FieldValue) {
error(elem, "Only 'field = value' elements are allowed in a map literal");
continue;
@@ -8606,8 +8604,7 @@ gb_internal ExprKind check_compound_literal(CheckerContext *c, Operand *o, Ast *
error(cl->elems[0], "'field = value' in a bit_set a literal is not allowed");
is_constant = false;
} else {
for_array(index, cl->elems) {
Ast *elem = cl->elems[index];
for (Ast *elem : cl->elems) {
if (elem->kind == Ast_FieldValue) {
error(elem, "'field = value' in a bit_set a literal is not allowed");
continue;
@@ -8659,8 +8656,7 @@ gb_internal ExprKind check_compound_literal(CheckerContext *c, Operand *o, Ast *
BigInt one = {};
big_int_from_u64(&one, 1);
for_array(i, cl->elems) {
Ast *e = cl->elems[i];
for (Ast *e : cl->elems) {
GB_ASSERT(e->kind != Ast_FieldValue);
TypeAndValue tav = e->tav;
@@ -8759,8 +8755,7 @@ gb_internal ExprKind check_type_assertion(CheckerContext *c, Operand *o, Ast *no
if (bsrc->Union.variants.count != 1 && type_hint != nullptr) {
bool allowed = false;
for_array(i, bsrc->Union.variants) {
Type *vt = bsrc->Union.variants[i];
for (Type *vt : bsrc->Union.variants) {
if (are_types_identical(vt, type_hint)) {
allowed = true;
add_type_info_type(c, vt);
@@ -8793,8 +8788,7 @@ gb_internal ExprKind check_type_assertion(CheckerContext *c, Operand *o, Ast *no
if (is_type_union(src)) {
bool ok = false;
for_array(i, bsrc->Union.variants) {
Type *vt = bsrc->Union.variants[i];
for (Type *vt : bsrc->Union.variants) {
if (are_types_identical(vt, dst)) {
ok = true;
break;
@@ -8954,8 +8948,7 @@ gb_internal ExprKind check_selector_call_expr(CheckerContext *c, Operand *o, Ast
if (ce->args.count > 0) {
bool fail = false;
bool first_is_field_value = (ce->args[0]->kind == Ast_FieldValue);
for_array(i, ce->args) {
Ast *arg = ce->args[i];
for (Ast *arg : ce->args) {
bool mix = false;
if (first_is_field_value) {
mix = arg->kind != Ast_FieldValue;
@@ -9447,7 +9440,7 @@ gb_internal ExprKind check_expr_base_internal(CheckerContext *c, Operand *o, Ast
}
pl->decl = decl;
check_procedure_later(&ctx, ctx.file, empty_token, decl, type, pl->body, pl->tags);
check_procedure_later(ctx.checker, ctx.file, empty_token, decl, type, pl->body, pl->tags);
}
check_close_scope(&ctx);
@@ -9748,7 +9741,7 @@ gb_internal ExprKind check_expr_base(CheckerContext *c, Operand *o, Ast *node, T
}
check_rtti_type_disallowed(node, o->type, "An expression is using a type, %s, which has been disallowed");
add_type_and_value(c->info, node, o->mode, o->type, o->value);
add_type_and_value(c, node, o->mode, o->type, o->value);
return kind;
}
@@ -9857,12 +9850,9 @@ gb_internal bool is_exact_value_zero(ExactValue const &v) {
if (cl->elems.count == 0) {
return true;
} else {
for_array(i, cl->elems) {
Ast *elem = cl->elems[i];
for (Ast *elem : cl->elems) {
if (elem->tav.mode != Addressing_Constant) {
// if (elem->tav.value.kind != ExactValue_Invalid) {
return false;
// }
}
if (!is_exact_value_zero(elem->tav.value)) {
return false;
@@ -10342,8 +10332,7 @@ gb_internal gbString write_expr_to_string(gbString str, Ast *node, bool shorthan
bool parens_needed = false;
if (pt->results && pt->results->kind == Ast_FieldList) {
for_array(i, pt->results->FieldList.list) {
Ast *field = pt->results->FieldList.list[i];
for (Ast *field : pt->results->FieldList.list) {
ast_node(f, Field, field);
if (f->names.count != 0) {
parens_needed = true;

View File

@@ -622,7 +622,10 @@ gb_internal bool check_using_stmt_entity(CheckerContext *ctx, AstUsingStmt *us,
case Entity_ImportName: {
Scope *scope = e->ImportName.scope;
MUTEX_GUARD_BLOCK(scope->mutex) for (auto const &entry : scope->elements) {
rw_mutex_lock(&scope->mutex);
defer (rw_mutex_unlock(&scope->mutex));
for (auto const &entry : scope->elements) {
String name = entry.key.string;
Entity *decl = entry.value;
if (!is_entity_exported(decl)) continue;
@@ -929,19 +932,17 @@ gb_internal void check_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_flags
}
SeenMap seen = {}; // NOTE(bill): Multimap, Key: ExactValue
map_init(&seen, heap_allocator());
defer (map_destroy(&seen));
for_array(stmt_index, bs->stmts) {
Ast *stmt = bs->stmts[stmt_index];
for (Ast *stmt : bs->stmts) {
if (stmt->kind != Ast_CaseClause) {
// NOTE(bill): error handled by above multiple default checker
continue;
}
ast_node(cc, CaseClause, stmt);
for_array(j, cc->list) {
Ast *expr = unparen_expr(cc->list[j]);
for (Ast *expr : cc->list) {
expr = unparen_expr(expr);
if (is_ast_range(expr)) {
ast_node(be, BinaryExpr, expr);
@@ -1053,8 +1054,7 @@ gb_internal void check_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_flags
auto unhandled = array_make<Entity *>(temporary_allocator(), 0, fields.count);
for_array(i, fields) {
Entity *f = fields[i];
for (Entity *f : fields) {
if (f->kind != Entity_Constant) {
continue;
}
@@ -1073,8 +1073,7 @@ gb_internal void check_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_flags
error_no_newline(node, "Unhandled switch case: %.*s", LIT(unhandled[0]->token.string));
} else {
error(node, "Unhandled switch cases:");
for_array(i, unhandled) {
Entity *f = unhandled[i];
for (Entity *f : unhandled) {
error_line("\t%.*s\n", LIT(f->token.string));
}
}
@@ -1133,7 +1132,7 @@ gb_internal void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_
check_expr(ctx, &x, rhs);
check_assignment(ctx, &x, nullptr, str_lit("type switch expression"));
add_type_info_type(ctx, x.type);
// add_type_info_type(ctx, x.type);
TypeSwitchKind switch_kind = check_valid_type_switch_type(x.type);
if (switch_kind == TypeSwitch_Invalid) {
@@ -1155,8 +1154,7 @@ gb_internal void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_
// NOTE(bill): Check for multiple defaults
Ast *first_default = nullptr;
ast_node(bs, BlockStmt, ss->body);
for_array(i, bs->stmts) {
Ast *stmt = bs->stmts[i];
for (Ast *stmt : bs->stmts) {
Ast *default_stmt = nullptr;
if (stmt->kind == Ast_CaseClause) {
ast_node(cc, CaseClause, stmt);
@@ -1185,11 +1183,9 @@ gb_internal void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_
}
PtrSet<Type *> seen = {};
ptr_set_init(&seen, heap_allocator());
defer (ptr_set_destroy(&seen));
for_array(i, bs->stmts) {
Ast *stmt = bs->stmts[i];
for (Ast *stmt : bs->stmts) {
if (stmt->kind != Ast_CaseClause) {
// NOTE(bill): error handled by above multiple default checker
continue;
@@ -1200,8 +1196,7 @@ gb_internal void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_
Type *bt = base_type(type_deref(x.type));
Type *case_type = nullptr;
for_array(type_index, cc->list) {
Ast *type_expr = cc->list[type_index];
for (Ast *type_expr : cc->list) {
if (type_expr != nullptr) { // Otherwise it's a default expression
Operand y = {};
check_expr_or_type(ctx, &y, type_expr);
@@ -1215,8 +1210,7 @@ gb_internal void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_
if (switch_kind == TypeSwitch_Union) {
GB_ASSERT(is_type_union(bt));
bool tag_type_found = false;
for_array(j, bt->Union.variants) {
Type *vt = bt->Union.variants[j];
for (Type *vt : bt->Union.variants) {
if (are_types_identical(vt, y.type)) {
tag_type_found = true;
break;
@@ -1229,7 +1223,7 @@ gb_internal void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_
continue;
}
case_type = y.type;
add_type_info_type(ctx, y.type);
// add_type_info_type(ctx, y.type);
} else if (switch_kind == TypeSwitch_Any) {
case_type = y.type;
add_type_info_type(ctx, y.type);
@@ -1265,7 +1259,9 @@ gb_internal void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_
if (case_type == nullptr) {
case_type = x.type;
}
add_type_info_type(ctx, case_type);
if (switch_kind == TypeSwitch_Any) {
add_type_info_type(ctx, case_type);
}
check_open_scope(ctx, stmt);
{
@@ -1290,10 +1286,10 @@ gb_internal void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_
auto unhandled = array_make<Type *>(temporary_allocator(), 0, variants.count);
for_array(i, variants) {
Type *t = variants[i];
for (Type *t : variants) {
if (!type_ptr_set_exists(&seen, t)) {
array_add(&unhandled, t);
gb_printf_err("HERE: %p %s\n", t, type_to_string(t));
}
}
@@ -1304,8 +1300,7 @@ gb_internal void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_
gb_string_free(s);
} else {
error_no_newline(node, "Unhandled switch cases:\n");
for_array(i, unhandled) {
Type *t = unhandled[i];
for (Type *t : unhandled) {
gbString s = type_to_string(t);
error_line("\t%s\n", s);
gb_string_free(s);
@@ -1342,8 +1337,7 @@ gb_internal void check_block_stmt_for_errors(CheckerContext *ctx, Ast *body) {
isize stmt_count = 0;
Ast *the_stmt = nullptr;
for_array(i, bs->stmts) {
Ast *stmt = bs->stmts[i];
for (Ast *stmt : bs->stmts) {
GB_ASSERT(stmt != nullptr);
switch (stmt->kind) {
case_ast_node(es, EmptyStmt, stmt);
@@ -1361,8 +1355,7 @@ gb_internal void check_block_stmt_for_errors(CheckerContext *ctx, Ast *body) {
if (stmt_count == 1) {
if (the_stmt->kind == Ast_ValueDecl) {
for_array(i, the_stmt->ValueDecl.names) {
Ast *name = the_stmt->ValueDecl.names[i];
for (Ast *name : the_stmt->ValueDecl.names) {
if (name->kind != Ast_Ident) {
continue;
}
@@ -1378,8 +1371,8 @@ gb_internal void check_block_stmt_for_errors(CheckerContext *ctx, Ast *body) {
gb_internal bool all_operands_valid(Array<Operand> const &operands) {
if (any_errors()) {
for_array(i, operands) {
if (operands[i].type == t_invalid) {
for (Operand const &o : operands) {
if (o.type == t_invalid) {
return false;
}
}
@@ -1550,16 +1543,9 @@ gb_internal void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags)
check_assignment_arguments(ctx, lhs_operands, &rhs_operands, as->rhs);
isize rhs_count = rhs_operands.count;
for_array(i, rhs_operands) {
if (rhs_operands[i].mode == Addressing_Invalid) {
// TODO(bill): Should I ignore invalid parameters?
// rhs_count--;
}
}
auto lhs_to_ignore = array_make<bool>(temporary_allocator(), lhs_count);
isize rhs_count = rhs_operands.count;
isize max = gb_min(lhs_count, rhs_count);
for (isize i = 0; i < max; i++) {
if (lhs_to_ignore[i]) {
@@ -1858,8 +1844,8 @@ gb_internal void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags)
break;
}
for_array(ti, t->Tuple.variables) {
array_add(&vals, t->Tuple.variables[ti]->type);
for (Entity *e : t->Tuple.variables) {
array_add(&vals, e->type);
}
if (rs->vals.count > 1 && rs->vals[1] != nullptr && count < 3) {
@@ -1978,8 +1964,7 @@ gb_internal void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags)
}
}
for_array(i, entities) {
Entity *e = entities[i];
for (Entity *e : entities) {
DeclInfo *d = decl_info_of_entity(e);
GB_ASSERT(d == nullptr);
add_entity(ctx, ctx->scope, e->identifier, e);
@@ -2093,8 +2078,8 @@ gb_internal void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags)
error(us->token, "Empty 'using' list");
return;
}
for_array(i, us->list) {
Ast *expr = unparen_expr(us->list[i]);
for (Ast *expr : us->list) {
expr = unparen_expr(expr);
Entity *e = nullptr;
bool is_selector = false;
@@ -2134,8 +2119,7 @@ gb_internal void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags)
check_decl_attributes(&c, fb->attributes, foreign_block_decl_attribute, nullptr);
ast_node(block, BlockStmt, fb->body);
for_array(i, block->stmts) {
Ast *decl = block->stmts[i];
for (Ast *decl : block->stmts) {
if (decl->kind == Ast_ValueDecl && decl->ValueDecl.is_mutable) {
check_stmt(&c, decl, flags);
}
@@ -2148,8 +2132,7 @@ gb_internal void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags)
isize entity_count = 0;
isize new_name_count = 0;
for_array(i, vd->names) {
Ast *name = vd->names[i];
for (Ast *name : vd->names) {
Entity *entity = nullptr;
if (name->kind != Ast_Ident) {
error(name, "A variable declaration must be an identifier");
@@ -2195,8 +2178,7 @@ gb_internal void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags)
begin_error_block();
error(node, "No new declarations on the left hand side");
bool all_underscore = true;
for_array(i, vd->names) {
Ast *name = vd->names[i];
for (Ast *name : vd->names) {
if (name->kind == Ast_Ident) {
if (!is_blank_ident(name)) {
all_underscore = false;
@@ -2390,8 +2372,7 @@ gb_internal void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags)
} else {
// constant value declaration
// NOTE(bill): Check `_` declarations
for_array(i, vd->names) {
Ast *name = vd->names[i];
for (Ast *name : vd->names) {
if (is_blank_ident(name)) {
Entity *e = name->Ident.entity;
DeclInfo *d = decl_info_of_entity(e);

View File

@@ -257,63 +257,67 @@ gb_internal bool check_custom_align(CheckerContext *ctx, Ast *node, i64 *align_)
gb_internal Entity *find_polymorphic_record_entity(CheckerContext *ctx, Type *original_type, isize param_count, Array<Operand> const &ordered_operands, bool *failure) {
mutex_lock(&ctx->info->gen_types_mutex);
defer (mutex_unlock(&ctx->info->gen_types_mutex));
rw_mutex_shared_lock(&ctx->info->gen_types_mutex); // @@global
auto *found_gen_types = map_get(&ctx->info->gen_types, original_type);
if (found_gen_types != nullptr) {
// GB_ASSERT_MSG(ordered_operands.count >= param_count, "%td >= %td", ordered_operands.count, param_count);
if (found_gen_types == nullptr) {
rw_mutex_shared_unlock(&ctx->info->gen_types_mutex); // @@global
return nullptr;
}
for_array(i, *found_gen_types) {
Entity *e = (*found_gen_types)[i];
Type *t = base_type(e->type);
TypeTuple *tuple = get_record_polymorphic_params(t);
GB_ASSERT(param_count == tuple->variables.count);
rw_mutex_shared_lock(&found_gen_types->mutex); // @@local
defer (rw_mutex_shared_unlock(&found_gen_types->mutex)); // @@local
bool skip = false;
rw_mutex_shared_unlock(&ctx->info->gen_types_mutex); // @@global
for (isize j = 0; j < param_count; j++) {
Entity *p = tuple->variables[j];
Operand o = {};
if (j < ordered_operands.count) {
o = ordered_operands[j];
}
if (o.expr == nullptr) {
continue;
}
Entity *oe = entity_of_node(o.expr);
if (p == oe) {
// NOTE(bill): This is the same type, make sure that it will be be same thing and use that
// Saves on a lot of checking too below
continue;
}
for (Entity *e : found_gen_types->types) {
Type *t = base_type(e->type);
TypeTuple *tuple = get_record_polymorphic_params(t);
GB_ASSERT(param_count == tuple->variables.count);
if (p->kind == Entity_TypeName) {
if (is_type_polymorphic(o.type)) {
// NOTE(bill): Do not add polymorphic version to the gen_types
skip = true;
break;
}
if (!are_types_identical(o.type, p->type)) {
skip = true;
break;
}
} else if (p->kind == Entity_Constant) {
if (!compare_exact_values(Token_CmpEq, o.value, p->Constant.value)) {
skip = true;
break;
}
if (!are_types_identical(o.type, p->type)) {
skip = true;
break;
}
} else {
GB_PANIC("Unknown entity kind");
}
bool skip = false;
for (isize j = 0; j < param_count; j++) {
Entity *p = tuple->variables[j];
Operand o = {};
if (j < ordered_operands.count) {
o = ordered_operands[j];
}
if (!skip) {
return e;
if (o.expr == nullptr) {
continue;
}
Entity *oe = entity_of_node(o.expr);
if (p == oe) {
// NOTE(bill): This is the same type, make sure that it will be be same thing and use that
// Saves on a lot of checking too below
continue;
}
if (p->kind == Entity_TypeName) {
if (is_type_polymorphic(o.type)) {
// NOTE(bill): Do not add polymorphic version to the gen_types
skip = true;
break;
}
if (!are_types_identical(o.type, p->type)) {
skip = true;
break;
}
} else if (p->kind == Entity_Constant) {
if (!compare_exact_values(Token_CmpEq, o.value, p->Constant.value)) {
skip = true;
break;
}
if (!are_types_identical(o.type, p->type)) {
skip = true;
break;
}
} else {
GB_PANIC("Unknown entity kind");
}
}
if (!skip) {
return e;
}
}
return nullptr;
@@ -346,16 +350,19 @@ gb_internal void add_polymorphic_record_entity(CheckerContext *ctx, Ast *node, T
// TODO(bill): Is this even correct? Or should the metadata be copied?
e->TypeName.objc_metadata = original_type->Named.type_name->TypeName.objc_metadata;
mutex_lock(&ctx->info->gen_types_mutex);
rw_mutex_lock(&ctx->info->gen_types_mutex);
auto *found_gen_types = map_get(&ctx->info->gen_types, original_type);
if (found_gen_types) {
array_add(found_gen_types, e);
rw_mutex_lock(&found_gen_types->mutex);
array_add(&found_gen_types->types, e);
rw_mutex_unlock(&found_gen_types->mutex);
} else {
auto array = array_make<Entity *>(heap_allocator());
array_add(&array, e);
map_set(&ctx->info->gen_types, original_type, array);
GenTypesData gen_types = {};
gen_types.types = array_make<Entity *>(heap_allocator());
array_add(&gen_types.types, e);
map_set(&ctx->info->gen_types, original_type, gen_types);
}
mutex_unlock(&ctx->info->gen_types_mutex);
rw_mutex_unlock(&ctx->info->gen_types_mutex);
}
gb_internal Type *check_record_polymorphic_params(CheckerContext *ctx, Ast *polymorphic_params,
@@ -2398,7 +2405,8 @@ gb_internal Type *make_soa_struct_internal(CheckerContext *ctx, Ast *array_typ_e
}
soa_struct->Struct.soa_count = cast(i32)count;
scope = create_scope(ctx->info, ctx->scope, 8);
scope = create_scope(ctx->info, ctx->scope);
string_map_init(&scope->elements, 8);
soa_struct->Struct.scope = scope;
String params_xyzw[4] = {
@@ -3045,7 +3053,7 @@ gb_internal Type *check_type_expr(CheckerContext *ctx, Ast *e, Type *named_type)
#endif
if (is_type_typed(type)) {
add_type_and_value(ctx->info, e, Addressing_Type, type, empty_exact_value);
add_type_and_value(ctx, e, Addressing_Type, type, empty_exact_value);
} else {
gbString name = type_to_string(type);
error(e, "Invalid type definition of %s", name);

File diff suppressed because it is too large Load Diff

View File

@@ -142,9 +142,28 @@ typedef DECL_ATTRIBUTE_PROC(DeclAttributeProc);
gb_internal void check_decl_attributes(CheckerContext *c, Array<Ast *> const &attributes, DeclAttributeProc *proc, AttributeContext *ac);
enum ProcCheckedState : u8 {
ProcCheckedState_Unchecked,
ProcCheckedState_InProgress,
ProcCheckedState_Checked,
ProcCheckedState_COUNT
};
char const *ProcCheckedState_strings[ProcCheckedState_COUNT] {
"Unchecked",
"In Progress",
"Checked",
};
// DeclInfo is used to store information of certain declarations to allow for "any order" usage
struct DeclInfo {
DeclInfo * parent; // NOTE(bill): only used for procedure literals at the moment
BlockingMutex next_mutex;
DeclInfo * next_child;
DeclInfo * next_sibling;
Scope * scope;
Entity *entity;
@@ -157,7 +176,7 @@ struct DeclInfo {
Type * gen_proc_type; // Precalculated
bool is_using;
bool where_clauses_evaluated;
bool proc_checked;
std::atomic<ProcCheckedState> proc_checked_state;
BlockingMutex proc_checked_mutex;
isize defer_used;
bool defer_use_checked;
@@ -165,8 +184,14 @@ struct DeclInfo {
CommentGroup *comment;
CommentGroup *docs;
PtrSet<Entity *> deps;
RwMutex deps_mutex;
PtrSet<Entity *> deps;
RwMutex type_info_deps_mutex;
PtrSet<Type *> type_info_deps;
BlockingMutex type_and_value_mutex;
Array<BlockLabel> labels;
};
@@ -198,7 +223,7 @@ enum ScopeFlag : i32 {
ScopeFlag_ContextDefined = 1<<16,
};
enum { DEFAULT_SCOPE_CAPACITY = 29 };
enum { DEFAULT_SCOPE_CAPACITY = 32 };
struct Scope {
Ast * node;
@@ -206,7 +231,7 @@ struct Scope {
std::atomic<Scope *> next;
std::atomic<Scope *> head_child;
BlockingMutex mutex;
RwMutex mutex;
StringMap<Entity *> elements;
PtrSet<Scope *> imported;
@@ -297,6 +322,16 @@ struct LoadFileCache {
StringMap<u64> hashes;
};
struct GenProcsData {
Array<Entity *> procs;
RwMutex mutex;
};
struct GenTypesData {
Array<Entity *> types;
RwMutex mutex;
};
// CheckerInfo stores all the symbol information for a type-checked program
struct CheckerInfo {
Checker *checker;
@@ -311,7 +346,7 @@ struct CheckerInfo {
Scope * init_scope;
Entity * entry_point;
PtrSet<Entity *> minimum_dependency_set;
PtrSet<isize> minimum_dependency_type_info_set;
PtrMap</*type info index*/isize, /*min dep index*/isize> minimum_dependency_type_info_set;
@@ -324,30 +359,17 @@ struct CheckerInfo {
// Below are accessed within procedures
// NOTE(bill): If the semantic checker (check_proc_body) is to ever to be multithreaded,
// these variables will be of contention
Semaphore collect_semaphore;
RwMutex global_untyped_mutex;
UntypedExprInfoMap global_untyped; // NOTE(bill): This needs to be a map and not on the Ast
// as it needs to be iterated across afterwards
BlockingMutex global_untyped_mutex;
BlockingMutex builtin_mutex;
// NOT recursive & only used at the end of `check_proc_body`
// and in `add_dependency`.
// This is a possible source of contention but probably not
// too much of a problem in practice
BlockingMutex deps_mutex;
BlockingMutex type_and_value_mutex;
RecursiveMutex lazy_mutex; // Mutex required for lazy type checking of specific files
RecursiveMutex gen_procs_mutex;
RecursiveMutex gen_types_mutex;
PtrMap<Ast *, Array<Entity *> > gen_procs; // Key: Ast * | Identifier -> Entity
PtrMap<Type *, Array<Entity *> > gen_types;
RwMutex gen_types_mutex;
PtrMap<Type *, GenTypesData > gen_types;
BlockingMutex type_info_mutex; // NOT recursive
Array<Type *> type_info_types;
@@ -356,11 +378,6 @@ struct CheckerInfo {
BlockingMutex foreign_mutex; // NOT recursive
StringMap<Entity *> foreigns;
// only used by 'odin query'
bool allow_identifier_uses;
BlockingMutex identifier_uses_mutex;
Array<Ast *> identifier_uses;
// NOTE(bill): These are actually MPSC queues
// TODO(bill): Convert them to be MPSC queues
MPMCQueue<Entity *> definition_queue;
@@ -375,6 +392,9 @@ struct CheckerInfo {
BlockingMutex load_file_mutex;
StringMap<LoadFileCache *> load_file_cache;
BlockingMutex all_procedures_mutex;
Array<ProcInfo *> all_procedures;
};
struct CheckerContext {
@@ -418,8 +438,6 @@ struct CheckerContext {
Scope * polymorphic_scope;
Ast *assignment_lhs_hint;
ProcBodyQueue *procs_to_check_queue;
};
@@ -430,9 +448,7 @@ struct Checker {
CheckerContext builtin_ctx;
MPMCQueue<Entity *> procs_with_deferred_to_check;
ProcBodyQueue procs_to_check_queue;
Semaphore procs_to_check_semaphore;
Array<ProcInfo *> procs_to_check;
// TODO(bill): Technically MPSC queue
MPMCQueue<UntypedExprInfo> global_untyped_queue;
@@ -462,10 +478,10 @@ gb_internal Entity *entity_of_node(Ast *expr);
gb_internal Entity *scope_lookup_current(Scope *s, String const &name);
gb_internal Entity *scope_lookup (Scope *s, String const &name);
gb_internal void scope_lookup_parent (Scope *s, String const &name, Scope **scope_, Entity **entity_);
gb_internal Entity *scope_insert (Scope *s, Entity *entity, bool use_mutex=true);
gb_internal Entity *scope_insert (Scope *s, Entity *entity);
gb_internal void add_type_and_value (CheckerInfo *i, Ast *expression, AddressingMode mode, Type *type, ExactValue value);
gb_internal void add_type_and_value (CheckerContext *c, Ast *expression, AddressingMode mode, Type *type, ExactValue value);
gb_internal ExprInfo *check_get_expr_info (CheckerContext *c, Ast *expr);
gb_internal void add_untyped (CheckerContext *c, Ast *expression, AddressingMode mode, Type *basic_type, ExactValue value);
gb_internal void add_entity_use (CheckerContext *c, Ast *identifier, Entity *entity);

View File

@@ -43,9 +43,9 @@ gb_internal void debugf(char const *fmt, ...);
#error Odin on Windows requires a 64-bit build-system. The 'Developer Command Prompt' for VS still defaults to 32-bit shell. The 64-bit shell can be found under the name 'x64 Native Tools Command Prompt' for VS. For more information, please see https://odin-lang.org/docs/install/#for-windows
#endif
#include "threading.cpp"
#include "unicode.cpp"
#include "array.cpp"
#include "threading.cpp"
#include "queue.cpp"
#include "common_memory.cpp"
#include "string.cpp"
@@ -373,7 +373,7 @@ gb_internal char const *string_intern(String const &string) {
}
gb_internal void init_string_interner(void) {
map_init(&string_intern_map, heap_allocator());
map_init(&string_intern_map);
}

View File

@@ -14,36 +14,24 @@ gb_internal gb_inline U const &bit_cast(V const &v) { return reinterpret_cast<U
gb_internal gb_inline i64 align_formula(i64 size, i64 align) {
if (align > 0) {
i64 result = size + align-1;
return result - result%align;
}
return size;
i64 result = size + align-1;
return result - (i64)((u64)result%(u64)align);
}
gb_internal gb_inline isize align_formula_isize(isize size, isize align) {
if (align > 0) {
isize result = size + align-1;
return result - result%align;
}
return size;
isize result = size + align-1;
return result - (isize)((usize)result%(usize)align);
}
gb_internal gb_inline void *align_formula_ptr(void *ptr, isize align) {
if (align > 0) {
uintptr result = (cast(uintptr)ptr) + align-1;
return (void *)(result - result%align);
}
return ptr;
uintptr result = (cast(uintptr)ptr) + align-1;
return (void *)(result - result%align);
}
gb_global BlockingMutex global_memory_block_mutex;
gb_global BlockingMutex global_memory_allocator_mutex;
gb_internal void platform_virtual_memory_init(void);
gb_internal void virtual_memory_init(void) {
mutex_init(&global_memory_block_mutex);
mutex_init(&global_memory_allocator_mutex);
platform_virtual_memory_init();
}
@@ -57,9 +45,9 @@ struct MemoryBlock {
};
struct Arena {
MemoryBlock *curr_block;
isize minimum_block_size;
bool ignore_mutex;
MemoryBlock * curr_block;
isize minimum_block_size;
BlockingMutex mutex;
};
enum { DEFAULT_MINIMUM_BLOCK_SIZE = 8ll*1024ll*1024ll };
@@ -85,10 +73,7 @@ gb_internal isize arena_align_forward_offset(Arena *arena, isize alignment) {
gb_internal void *arena_alloc(Arena *arena, isize min_size, isize alignment) {
GB_ASSERT(gb_is_power_of_two(alignment));
BlockingMutex *mutex = &global_memory_allocator_mutex;
if (!arena->ignore_mutex) {
mutex_lock(mutex);
}
mutex_lock(&arena->mutex);
isize size = 0;
if (arena->curr_block != nullptr) {
@@ -115,9 +100,7 @@ gb_internal void *arena_alloc(Arena *arena, isize min_size, isize alignment) {
curr_block->used += size;
GB_ASSERT(curr_block->used <= curr_block->size);
if (!arena->ignore_mutex) {
mutex_unlock(mutex);
}
mutex_unlock(&arena->mutex);
// NOTE(bill): memory will be zeroed by default due to virtual memory
return ptr;
@@ -306,7 +289,7 @@ gb_internal GB_ALLOCATOR_PROC(arena_allocator_proc) {
}
gb_global gb_thread_local Arena permanent_arena = {nullptr, DEFAULT_MINIMUM_BLOCK_SIZE, true};
gb_global gb_thread_local Arena permanent_arena = {nullptr, DEFAULT_MINIMUM_BLOCK_SIZE};
gb_internal gbAllocator permanent_allocator() {
return arena_allocator(&permanent_arena);
}

View File

@@ -53,13 +53,12 @@ gb_internal void odin_doc_writer_item_tracker_init(OdinDocWriterItemTracker<T> *
gb_internal void odin_doc_writer_prepare(OdinDocWriter *w) {
w->state = OdinDocWriterState_Preparing;
gbAllocator a = heap_allocator();
string_map_init(&w->string_cache, a);
string_map_init(&w->string_cache);
map_init(&w->file_cache, a);
map_init(&w->pkg_cache, a);
map_init(&w->entity_cache, a);
map_init(&w->type_cache, a);
map_init(&w->file_cache);
map_init(&w->pkg_cache);
map_init(&w->entity_cache);
map_init(&w->type_cache);
odin_doc_writer_item_tracker_init(&w->files, 1);
odin_doc_writer_item_tracker_init(&w->pkgs, 1);

View File

@@ -130,7 +130,7 @@ enum EntityConstantFlags : u32 {
EntityConstantFlag_ImplicitEnumValue = 1<<0,
};
enum ProcedureOptimizationMode : u32 {
enum ProcedureOptimizationMode : u8 {
ProcedureOptimizationMode_Default,
ProcedureOptimizationMode_None,
ProcedureOptimizationMode_Minimal,
@@ -154,7 +154,6 @@ struct TypeNameObjCMetadata {
gb_internal TypeNameObjCMetadata *create_type_name_obj_c_metadata() {
TypeNameObjCMetadata *md = gb_alloc_item(permanent_allocator(), TypeNameObjCMetadata);
md->mutex = gb_alloc_item(permanent_allocator(), BlockingMutex);
mutex_init(md->mutex);
array_init(&md->type_entries, heap_allocator());
array_init(&md->value_entries, heap_allocator());
return md;
@@ -234,6 +233,9 @@ struct Entity {
String link_name;
String link_prefix;
DeferredProcedure deferred_procedure;
struct GenProcsData *gen_procs;
BlockingMutex gen_procs_mutex;
ProcedureOptimizationMode optimization_mode;
bool is_foreign : 1;
bool is_export : 1;

View File

@@ -22,10 +22,6 @@ gb_internal bool any_errors(void) {
}
gb_internal void init_global_error_collector(void) {
mutex_init(&global_error_collector.mutex);
mutex_init(&global_error_collector.block_mutex);
mutex_init(&global_error_collector.error_out_mutex);
mutex_init(&global_error_collector.string_mutex);
array_init(&global_error_collector.errors, heap_allocator());
array_init(&global_error_collector.error_buffer, heap_allocator());
array_init(&global_file_path_strings, heap_allocator(), 1, 4096);

File diff suppressed because it is too large Load Diff

View File

@@ -117,6 +117,16 @@ struct lbIncompleteDebugType {
typedef Slice<i32> lbStructFieldRemapping;
enum lbFunctionPassManagerKind {
lbFunctionPassManager_default,
lbFunctionPassManager_default_without_memcpy,
lbFunctionPassManager_minimal,
lbFunctionPassManager_size,
lbFunctionPassManager_speed,
lbFunctionPassManager_COUNT
};
struct lbModule {
LLVMModuleRef mod;
LLVMContextRef ctx;
@@ -132,6 +142,8 @@ struct lbModule {
PtrMap<void *, lbStructFieldRemapping> struct_field_remapping; // Key: LLVMTypeRef or Type *
i32 internal_type_level;
RecursiveMutex values_mutex;
PtrMap<Entity *, lbValue> values;
PtrMap<Entity *, lbAddr> soa_values;
StringMap<lbValue> members;
@@ -151,11 +163,14 @@ struct lbModule {
u32 nested_type_name_guid;
Array<lbProcedure *> procedures_to_generate;
Array<Entity *> global_procedures_and_types_to_create;
lbProcedure *curr_procedure;
LLVMDIBuilderRef debug_builder;
LLVMMetadataRef debug_compile_unit;
RecursiveMutex debug_values_mutex;
PtrMap<void *, LLVMMetadataRef> debug_values;
Array<lbIncompleteDebugType> debug_incomplete_types;
@@ -165,6 +180,8 @@ struct lbModule {
PtrMap<Type *, lbAddr> map_cell_info_map; // address of runtime.Map_Info
PtrMap<Type *, lbAddr> map_info_map; // address of runtime.Map_Cell_Info
LLVMPassManagerRef function_pass_managers[lbFunctionPassManager_COUNT];
};
struct lbGenerator {
@@ -178,6 +195,7 @@ struct lbGenerator {
PtrMap<LLVMContextRef, lbModule *> modules_through_ctx;
lbModule default_module;
BlockingMutex anonymous_proc_lits_mutex;
PtrMap<Ast *, lbProcedure *> anonymous_proc_lits;
BlockingMutex foreign_mutex;
@@ -186,6 +204,10 @@ struct lbGenerator {
std::atomic<u32> global_array_index;
std::atomic<u32> global_generated_index;
lbProcedure *startup_type_info;
lbProcedure *startup_runtime;
lbProcedure *objc_names;
};

View File

@@ -2,7 +2,9 @@ gb_internal LLVMMetadataRef lb_get_llvm_metadata(lbModule *m, void *key) {
if (key == nullptr) {
return nullptr;
}
mutex_lock(&m->debug_values_mutex);
auto found = map_get(&m->debug_values, key);
mutex_unlock(&m->debug_values_mutex);
if (found) {
return *found;
}
@@ -10,7 +12,9 @@ gb_internal LLVMMetadataRef lb_get_llvm_metadata(lbModule *m, void *key) {
}
gb_internal void lb_set_llvm_metadata(lbModule *m, void *key, LLVMMetadataRef value) {
if (key != nullptr) {
mutex_lock(&m->debug_values_mutex);
map_set(&m->debug_values, key, value);
mutex_unlock(&m->debug_values_mutex);
}
}
@@ -491,6 +495,9 @@ gb_internal LLVMMetadataRef lb_get_base_scope_metadata(lbModule *m, Scope *scope
}
gb_internal LLVMMetadataRef lb_debug_type(lbModule *m, Type *type) {
mutex_lock(&m->debug_values_mutex);
defer (mutex_unlock(&m->debug_values_mutex));
GB_ASSERT(type != nullptr);
LLVMMetadataRef found = lb_get_llvm_metadata(m, type);
if (found != nullptr) {

View File

@@ -61,8 +61,7 @@ gb_internal lbValue lb_emit_logical_binary_expr(lbProcedure *p, TokenKind op, As
GB_ASSERT(incoming_values.count > 0);
LLVMTypeRef phi_type = nullptr;
for_array(i, incoming_values) {
LLVMValueRef incoming_value = incoming_values[i];
for (LLVMValueRef incoming_value : incoming_values) {
if (!LLVMIsConstant(incoming_value)) {
phi_type = LLVMTypeOf(incoming_value);
break;
@@ -1921,8 +1920,7 @@ gb_internal lbValue lb_emit_conv(lbProcedure *p, lbValue value, Type *t) {
}
if (is_type_union(dst)) {
for_array(i, dst->Union.variants) {
Type *vt = dst->Union.variants[i];
for (Type *vt : dst->Union.variants) {
if (are_types_identical(vt, src_type)) {
lbAddr parent = lb_add_local_generated(p, t, true);
lb_emit_store_union_variant(p, parent.addr, value, vt);
@@ -3596,8 +3594,7 @@ gb_internal void lb_build_addr_compound_lit_populate(lbProcedure *p, Slice<Ast *
}
}
gb_internal void lb_build_addr_compound_lit_assign_array(lbProcedure *p, Array<lbCompoundLitElemTempData> const &temp_data) {
for_array(i, temp_data) {
auto td = temp_data[i];
for (auto const &td : temp_data) {
if (td.value.value != nullptr) {
if (td.elem_length > 0) {
auto loop_data = lb_loop_start(p, cast(isize)td.elem_length, t_i32);
@@ -4129,8 +4126,7 @@ gb_internal lbAddr lb_build_addr_compound_lit(lbProcedure *p, Ast *expr) {
lbValue err = lb_dynamic_map_reserve(p, v.addr, 2*cl->elems.count, pos);
gb_unused(err);
for_array(field_index, cl->elems) {
Ast *elem = cl->elems[field_index];
for (Ast *elem : cl->elems) {
ast_node(fv, FieldValue, elem);
lbValue key = lb_build_expr(p, fv->field);
@@ -4304,8 +4300,7 @@ gb_internal lbAddr lb_build_addr_compound_lit(lbProcedure *p, Ast *expr) {
lb_addr_store(p, v, lb_const_value(p->module, type, exact_value_compound(expr)));
lbValue lower = lb_const_value(p->module, t_int, exact_value_i64(bt->BitSet.lower));
for_array(i, cl->elems) {
Ast *elem = cl->elems[i];
for (Ast *elem : cl->elems) {
GB_ASSERT(elem->kind != Ast_FieldValue);
if (lb_is_elem_const(elem, et)) {
@@ -4359,8 +4354,7 @@ gb_internal lbAddr lb_build_addr_compound_lit(lbProcedure *p, Ast *expr) {
// TODO(bill): reduce the need for individual `insertelement` if a `shufflevector`
// might be a better option
for_array(i, temp_data) {
auto td = temp_data[i];
for (auto const &td : temp_data) {
if (td.value.value != nullptr) {
if (td.elem_length > 0) {
for (i64 k = 0; k < td.elem_length; k++) {

View File

@@ -55,30 +55,31 @@ gb_internal void lb_init_module(lbModule *m, Checker *c) {
}
gbAllocator a = heap_allocator();
map_init(&m->types, a);
map_init(&m->func_raw_types, a);
map_init(&m->struct_field_remapping, a);
map_init(&m->values, a);
map_init(&m->soa_values, a);
string_map_init(&m->members, a);
map_init(&m->procedure_values, a);
string_map_init(&m->procedures, a);
string_map_init(&m->const_strings, a);
map_init(&m->function_type_map, a);
map_init(&m->equal_procs, a);
map_init(&m->hasher_procs, a);
map_init(&m->map_get_procs, a);
map_init(&m->map_set_procs, a);
map_init(&m->types);
map_init(&m->func_raw_types);
map_init(&m->struct_field_remapping);
map_init(&m->values);
map_init(&m->soa_values);
string_map_init(&m->members);
map_init(&m->procedure_values);
string_map_init(&m->procedures);
string_map_init(&m->const_strings);
map_init(&m->function_type_map);
map_init(&m->equal_procs);
map_init(&m->hasher_procs);
map_init(&m->map_get_procs);
map_init(&m->map_set_procs);
array_init(&m->procedures_to_generate, a, 0, 1024);
array_init(&m->global_procedures_and_types_to_create, a, 0, 1024);
array_init(&m->missing_procedures_to_check, a, 0, 16);
map_init(&m->debug_values, a);
map_init(&m->debug_values);
array_init(&m->debug_incomplete_types, a, 0, 1024);
string_map_init(&m->objc_classes, a);
string_map_init(&m->objc_selectors, a);
string_map_init(&m->objc_classes);
string_map_init(&m->objc_selectors);
map_init(&m->map_info_map, a, 0);
map_init(&m->map_cell_info_map, a, 0);
map_init(&m->map_info_map, 0);
map_init(&m->map_cell_info_map, 0);
}
@@ -127,14 +128,13 @@ gb_internal bool lb_init_generator(lbGenerator *gen, Checker *c) {
gen->info = &c->info;
map_init(&gen->modules, permanent_allocator(), gen->info->packages.entries.count*2);
map_init(&gen->modules_through_ctx, permanent_allocator(), gen->info->packages.entries.count*2);
map_init(&gen->anonymous_proc_lits, heap_allocator(), 1024);
map_init(&gen->modules, gen->info->packages.entries.count*2);
map_init(&gen->modules_through_ctx, gen->info->packages.entries.count*2);
map_init(&gen->anonymous_proc_lits, 1024);
mutex_init(&gen->foreign_mutex);
array_init(&gen->foreign_libraries, heap_allocator(), 0, 1024);
ptr_set_init(&gen->foreign_libraries_set, heap_allocator(), 1024);
ptr_set_init(&gen->foreign_libraries_set, 1024);
if (USE_SEPARATE_MODULES) {
for (auto const &entry : gen->info->packages) {
@@ -317,6 +317,7 @@ gb_internal bool lb_is_instr_terminating(LLVMValueRef instr) {
gb_internal lbModule *lb_pkg_module(lbGenerator *gen, AstPackage *pkg) {
// NOTE(bill): no need for a mutex since it's immutable
auto *found = map_get(&gen->modules, pkg);
if (found) {
return *found;
@@ -1355,7 +1356,7 @@ gb_internal String lb_mangle_name(lbModule *m, Entity *e) {
return mangled_name;
}
gb_internal String lb_set_nested_type_name_ir_mangled_name(Entity *e, lbProcedure *p) {
gb_internal String lb_set_nested_type_name_ir_mangled_name(Entity *e, lbProcedure *p, lbModule *module) {
// NOTE(bill, 2020-03-08): A polymorphic procedure may take a nested type declaration
// and as a result, the declaration does not have time to determine what it should be
@@ -1422,7 +1423,7 @@ gb_internal String lb_get_entity_name(lbModule *m, Entity *e, String default_nam
}
if (e->kind == Entity_TypeName && (e->scope->flags & ScopeFlag_File) == 0) {
return lb_set_nested_type_name_ir_mangled_name(e, nullptr);
return lb_set_nested_type_name_ir_mangled_name(e, nullptr, m);
}
String name = {};
@@ -2165,19 +2166,25 @@ gb_internal void lb_ensure_abi_function_type(lbModule *m, lbProcedure *p) {
gb_internal void lb_add_entity(lbModule *m, Entity *e, lbValue val) {
if (e != nullptr) {
mutex_lock(&m->values_mutex);
map_set(&m->values, e, val);
mutex_unlock(&m->values_mutex);
}
}
gb_internal void lb_add_member(lbModule *m, String const &name, lbValue val) {
if (name.len > 0) {
mutex_lock(&m->values_mutex);
string_map_set(&m->members, name, val);
mutex_unlock(&m->values_mutex);
}
}
gb_internal void lb_add_procedure_value(lbModule *m, lbProcedure *p) {
mutex_lock(&m->values_mutex);
if (p->entity != nullptr) {
map_set(&m->procedure_values, p->value, p->entity);
}
string_map_set(&m->procedures, p->name, p);
mutex_unlock(&m->values_mutex);
}
@@ -2520,6 +2527,8 @@ gb_internal lbValue lb_find_ident(lbProcedure *p, lbModule *m, Entity *e, Ast *e
return *found;
}
}
mutex_lock(&m->values_mutex);
defer (mutex_unlock(&m->values_mutex));
auto *found = map_get(&m->values, e);
if (found) {
@@ -2539,7 +2548,6 @@ gb_internal lbValue lb_find_ident(lbProcedure *p, lbModule *m, Entity *e, Ast *e
if (USE_SEPARATE_MODULES) {
lbModule *other_module = lb_pkg_module(m->gen, e->pkg);
if (other_module != m) {
String name = lb_get_entity_name(other_module, e);
lb_set_entity_from_other_modules_linkage_correctly(other_module, e, name);
@@ -2570,6 +2578,9 @@ gb_internal lbValue lb_find_procedure_value_from_entity(lbModule *m, Entity *e)
e = strip_entity_wrapping(e);
GB_ASSERT(e != nullptr);
mutex_lock(&m->values_mutex);
defer (mutex_unlock(&m->values_mutex));
auto *found = map_get(&m->values, e);
if (found) {
return *found;
@@ -2658,6 +2669,10 @@ gb_internal lbValue lb_find_value_from_entity(lbModule *m, Entity *e) {
return lb_find_procedure_value_from_entity(m, e);
}
mutex_lock(&m->values_mutex);
defer (mutex_unlock(&m->values_mutex));
auto *found = map_get(&m->values, e);
if (found) {
return *found;
@@ -2715,7 +2730,6 @@ gb_internal lbValue lb_find_value_from_entity(lbModule *m, Entity *e) {
return g;
}
}
GB_PANIC("\n\tError in: %s, missing value '%.*s'\n", token_pos_to_string(e->token.pos), LIT(e->token.string));
return {};
}

View File

@@ -359,6 +359,9 @@ gb_internal void lb_run_remove_dead_instruction_pass(lbProcedure *p) {
gb_internal void lb_run_function_pass_manager(LLVMPassManagerRef fpm, lbProcedure *p) {
if (p == nullptr) {
return;
}
LLVMRunFunctionPassManager(fpm, p->value);
// NOTE(bill): LLVMAddDCEPass doesn't seem to be exported in the official DLL's for LLVM
// which means we cannot rely upon it

View File

@@ -68,7 +68,7 @@ gb_internal lbProcedure *lb_create_procedure(lbModule *m, Entity *entity, bool i
GB_ASSERT(entity != nullptr);
GB_ASSERT(entity->kind == Entity_Procedure);
if (!entity->Procedure.is_foreign) {
GB_ASSERT_MSG(entity->flags & EntityFlag_ProcBodyChecked, "%.*s :: %s", LIT(entity->token.string), type_to_string(entity->type));
GB_ASSERT_MSG(entity->flags & EntityFlag_ProcBodyChecked, "%.*s :: %s (was parapoly: %d)", LIT(entity->token.string), type_to_string(entity->type), is_type_polymorphic(entity->type, true));
}
String link_name = {};
@@ -119,9 +119,9 @@ gb_internal lbProcedure *lb_create_procedure(lbModule *m, Entity *entity, bool i
p->branch_blocks.allocator = a;
p->context_stack.allocator = a;
p->scope_stack.allocator = a;
map_init(&p->selector_values, a, 0);
map_init(&p->selector_addr, a, 0);
map_init(&p->tuple_fix_map, a, 0);
map_init(&p->selector_values, 0);
map_init(&p->selector_addr, 0);
map_init(&p->tuple_fix_map, 0);
if (p->is_foreign) {
lb_add_foreign_library_path(p->module, entity->Procedure.foreign_library);
@@ -345,7 +345,7 @@ gb_internal lbProcedure *lb_create_dummy_procedure(lbModule *m, String link_name
p->blocks.allocator = a;
p->branch_blocks.allocator = a;
p->context_stack.allocator = a;
map_init(&p->tuple_fix_map, a, 0);
map_init(&p->tuple_fix_map, 0);
char *c_link_name = alloc_cstring(permanent_allocator(), p->name);
@@ -486,7 +486,7 @@ gb_internal void lb_begin_procedure_body(lbProcedure *p) {
p->entry_block = lb_create_block(p, "entry", true);
lb_start_block(p, p->entry_block);
map_init(&p->direct_parameters, heap_allocator());
map_init(&p->direct_parameters);
GB_ASSERT(p->type != nullptr);

View File

@@ -7,8 +7,7 @@ gb_internal void lb_build_constant_value_decl(lbProcedure *p, AstValueDecl *vd)
static i32 global_guid = 0;
for_array(i, vd->names) {
Ast *ident = vd->names[i];
for (Ast *ident : vd->names) {
GB_ASSERT(ident->kind == Ast_Ident);
Entity *e = entity_of_node(ident);
GB_ASSERT(e != nullptr);
@@ -33,7 +32,7 @@ gb_internal void lb_build_constant_value_decl(lbProcedure *p, AstValueDecl *vd)
continue;
}
lb_set_nested_type_name_ir_mangled_name(e, p);
lb_set_nested_type_name_ir_mangled_name(e, p, p->module);
}
for_array(i, vd->names) {
@@ -51,21 +50,20 @@ gb_internal void lb_build_constant_value_decl(lbProcedure *p, AstValueDecl *vd)
continue; // It's an alias
}
CheckerInfo *info = p->module->info;
DeclInfo *decl = decl_info_of_entity(e);
ast_node(pl, ProcLit, decl->proc_lit);
if (pl->body != nullptr) {
auto *found = map_get(&info->gen_procs, ident);
if (found) {
auto procs = *found;
for_array(i, procs) {
Entity *e = procs[i];
GenProcsData *gpd = e->Procedure.gen_procs;
if (gpd) {
rw_mutex_shared_lock(&gpd->mutex);
for (Entity *e : gpd->procs) {
if (!ptr_set_exists(min_dep_set, e)) {
continue;
}
DeclInfo *d = decl_info_of_entity(e);
lb_build_nested_proc(p, &d->proc_lit->ProcLit, e);
}
rw_mutex_shared_unlock(&gpd->mutex);
} else {
lb_build_nested_proc(p, pl, e);
}
@@ -106,8 +104,7 @@ gb_internal void lb_build_constant_value_decl(lbProcedure *p, AstValueDecl *vd)
gb_internal void lb_build_stmt_list(lbProcedure *p, Slice<Ast *> const &stmts) {
for_array(i, stmts) {
Ast *stmt = stmts[i];
for (Ast *stmt : stmts) {
switch (stmt->kind) {
case_ast_node(vd, ValueDecl, stmt);
lb_build_constant_value_decl(p, vd);
@@ -118,8 +115,8 @@ gb_internal void lb_build_stmt_list(lbProcedure *p, Slice<Ast *> const &stmts) {
case_end;
}
}
for_array(i, stmts) {
lb_build_stmt(p, stmts[i]);
for (Ast *stmt : stmts) {
lb_build_stmt(p, stmt);
}
}
@@ -129,10 +126,9 @@ gb_internal lbBranchBlocks lb_lookup_branch_blocks(lbProcedure *p, Ast *ident) {
GB_ASSERT(ident->kind == Ast_Ident);
Entity *e = entity_of_node(ident);
GB_ASSERT(e->kind == Entity_Label);
for_array(i, p->branch_blocks) {
lbBranchBlocks *b = &p->branch_blocks[i];
if (b->label == e->Label.node) {
return *b;
for (lbBranchBlocks const &b : p->branch_blocks) {
if (b.label == e->Label.node) {
return b;
}
}
@@ -153,13 +149,12 @@ gb_internal lbTargetList *lb_push_target_list(lbProcedure *p, Ast *label, lbBloc
if (label != nullptr) { // Set label blocks
GB_ASSERT(label->kind == Ast_Label);
for_array(i, p->branch_blocks) {
lbBranchBlocks *b = &p->branch_blocks[i];
GB_ASSERT(b->label != nullptr && label != nullptr);
GB_ASSERT(b->label->kind == Ast_Label);
if (b->label == label) {
b->break_ = break_;
b->continue_ = continue_;
for (lbBranchBlocks &b : p->branch_blocks) {
GB_ASSERT(b.label != nullptr && label != nullptr);
GB_ASSERT(b.label->kind == Ast_Label);
if (b.label == label) {
b.break_ = break_;
b.continue_ = continue_;
return tl;
}
}
@@ -1095,8 +1090,7 @@ gb_internal bool lb_switch_stmt_can_be_trivial_jump_table(AstSwitchStmt *ss, boo
}
ast_node(body, BlockStmt, ss->body);
for_array(i, body->stmts) {
Ast *clause = body->stmts[i];
for (Ast *clause : body->stmts) {
ast_node(cc, CaseClause, clause);
if (cc->list.count == 0) {
@@ -1104,8 +1098,8 @@ gb_internal bool lb_switch_stmt_can_be_trivial_jump_table(AstSwitchStmt *ss, boo
continue;
}
for_array(j, cc->list) {
Ast *expr = unparen_expr(cc->list[j]);
for (Ast *expr : cc->list) {
expr = unparen_expr(expr);
if (is_ast_range(expr)) {
return false;
}
@@ -1166,8 +1160,7 @@ gb_internal void lb_build_switch_stmt(lbProcedure *p, AstSwitchStmt *ss, Scope *
LLVMValueRef switch_instr = nullptr;
if (is_trivial) {
isize num_cases = 0;
for_array(i, body->stmts) {
Ast *clause = body->stmts[i];
for (Ast *clause : body->stmts) {
ast_node(cc, CaseClause, clause);
num_cases += cc->list.count;
}
@@ -1204,8 +1197,8 @@ gb_internal void lb_build_switch_stmt(lbProcedure *p, AstSwitchStmt *ss, Scope *
}
lbBlock *next_cond = nullptr;
for_array(j, cc->list) {
Ast *expr = unparen_expr(cc->list[j]);
for (Ast *expr : cc->list) {
expr = unparen_expr(expr);
if (switch_instr != nullptr) {
lbValue on_val = {};
@@ -1384,8 +1377,7 @@ gb_internal void lb_build_type_switch_stmt(lbProcedure *p, AstTypeSwitchStmt *ss
lbBlock *default_block = nullptr;
isize num_cases = 0;
for_array(i, body->stmts) {
Ast *clause = body->stmts[i];
for (Ast *clause : body->stmts) {
ast_node(cc, CaseClause, clause);
num_cases += cc->list.count;
if (cc->list.count == 0) {
@@ -1405,8 +1397,7 @@ gb_internal void lb_build_type_switch_stmt(lbProcedure *p, AstTypeSwitchStmt *ss
switch_instr = LLVMBuildSwitch(p->builder, tag.value, else_block->block, cast(unsigned)num_cases);
}
for_array(i, body->stmts) {
Ast *clause = body->stmts[i];
for (Ast *clause : body->stmts) {
ast_node(cc, CaseClause, clause);
lb_open_scope(p, cc->scope);
if (cc->list.count == 0) {
@@ -1420,9 +1411,8 @@ gb_internal void lb_build_type_switch_stmt(lbProcedure *p, AstTypeSwitchStmt *ss
if (p->debug_info != nullptr) {
LLVMSetCurrentDebugLocation2(p->builder, lb_debug_location_from_ast(p, clause));
}
Type *case_type = nullptr;
for_array(type_index, cc->list) {
case_type = type_of_expr(cc->list[type_index]);
for (Ast *type_expr : cc->list) {
Type *case_type = type_of_expr(type_expr);
lbValue on_val = {};
if (switch_kind == TypeSwitch_Union) {
Type *ut = base_type(type_deref(parent.type));
@@ -1538,8 +1528,8 @@ gb_internal void lb_append_tuple_values(lbProcedure *p, Array<lbValue> *dst_valu
if (t->kind == Type_Tuple) {
lbTupleFix *tf = map_get(&p->tuple_fix_map, src_value.value);
if (tf) {
for_array(j, tf->values) {
array_add(dst_values, tf->values[j]);
for (lbValue const &value : tf->values) {
array_add(dst_values, value);
}
} else {
for_array(i, t->Tuple.variables) {
@@ -1560,8 +1550,7 @@ gb_internal void lb_build_assignment(lbProcedure *p, Array<lbAddr> &lvals, Slice
auto inits = array_make<lbValue>(permanent_allocator(), 0, lvals.count);
for_array(i, values) {
Ast *rhs = values[i];
for (Ast *rhs : values) {
lbValue init = lb_build_expr(p, rhs);
lb_append_tuple_values(p, &inits, init);
}
@@ -1971,8 +1960,7 @@ gb_internal void lb_build_assign_stmt_array(lbProcedure *p, TokenKind op, lbAddr
auto indices_handled = slice_make<bool>(temporary_allocator(), bt->Array.count);
auto indices = slice_make<i32>(temporary_allocator(), bt->Array.count);
i32 index_count = 0;
for_array(i, lhs.swizzle_large.indices) {
i32 index = lhs.swizzle_large.indices[i];
for (i32 index : lhs.swizzle_large.indices) {
if (indices_handled[index]) {
continue;
}
@@ -2049,8 +2037,7 @@ gb_internal void lb_build_assign_stmt(lbProcedure *p, AstAssignStmt *as) {
if (as->op.kind == Token_Eq) {
auto lvals = array_make<lbAddr>(permanent_allocator(), 0, as->lhs.count);
for_array(i, as->lhs) {
Ast *lhs = as->lhs[i];
for (Ast *lhs : as->lhs) {
lbAddr lval = {};
if (!is_blank_ident(lhs)) {
lval = lb_build_addr(p, lhs);
@@ -2185,12 +2172,12 @@ gb_internal void lb_build_stmt(lbProcedure *p, Ast *node) {
bool is_static = false;
if (vd->names.count > 0) {
for_array(i, vd->names) {
Ast *name = vd->names[i];
for (Ast *name : vd->names) {
if (!is_blank_ident(name)) {
GB_ASSERT(name->kind == Ast_Ident);
Entity *e = entity_of_node(name);
TokenPos pos = ast_token(name).pos;
GB_ASSERT_MSG(e != nullptr, "%s", token_pos_to_string(pos));
GB_ASSERT_MSG(e != nullptr, "\n%s missing entity for %.*s", token_pos_to_string(pos), LIT(name->Ident.token.string));
if (e->flags & EntityFlag_Static) {
// NOTE(bill): If one of the entities is static, they all are
is_static = true;
@@ -2207,8 +2194,7 @@ gb_internal void lb_build_stmt(lbProcedure *p, Ast *node) {
auto lvals = array_make<lbAddr>(permanent_allocator(), 0, vd->names.count);
for_array(i, vd->names) {
Ast *name = vd->names[i];
for (Ast *name : vd->names) {
lbAddr lval = {};
if (!is_blank_ident(name)) {
Entity *e = entity_of_node(name);

View File

@@ -2,9 +2,10 @@ gb_internal isize lb_type_info_index(CheckerInfo *info, Type *type, bool err_on_
auto *set = &info->minimum_dependency_type_info_set;
isize index = type_info_index(info, type, err_on_not_found);
if (index >= 0) {
isize i = ptr_entry_index(set, index);
if (i >= 0) {
return i+1;
auto *found = map_get(set, index);
if (found) {
GB_ASSERT(*found >= 0);
return *found + 1;
}
}
if (err_on_not_found) {
@@ -185,7 +186,7 @@ gb_internal void lb_setup_type_info_data(lbProcedure *p) { // NOTE(bill): Setup
if (entry_index <= 0) {
continue;
}
if (entries_handled[entry_index]) {
continue;
}

View File

@@ -13,17 +13,16 @@
#endif
#include "exact_value.cpp"
#include "build_settings.cpp"
gb_global ThreadPool global_thread_pool;
gb_internal void init_global_thread_pool(void) {
isize thread_count = gb_max(build_context.thread_count, 1);
isize worker_count = thread_count-1; // NOTE(bill): The main thread will also be used for work
isize worker_count = thread_count-1;
thread_pool_init(&global_thread_pool, permanent_allocator(), worker_count, "ThreadPoolWorker");
}
gb_internal bool global_thread_pool_add_task(WorkerTaskProc *proc, void *data) {
gb_internal bool thread_pool_add_task(WorkerTaskProc *proc, void *data) {
return thread_pool_add_task(&global_thread_pool, proc, data);
}
gb_internal void global_thread_pool_wait(void) {
gb_internal void thread_pool_wait(void) {
thread_pool_wait(&global_thread_pool);
}
@@ -213,11 +212,11 @@ gb_internal i32 linker_stage(lbGenerator *gen) {
StringSet libs = {};
string_set_init(&libs, heap_allocator(), 64);
string_set_init(&libs, 64);
defer (string_set_destroy(&libs));
StringSet asm_files = {};
string_set_init(&asm_files, heap_allocator(), 64);
string_set_init(&asm_files, 64);
defer (string_set_destroy(&asm_files));
for_array(j, gen->foreign_libraries) {
@@ -372,7 +371,7 @@ gb_internal i32 linker_stage(lbGenerator *gen) {
defer (gb_string_free(lib_str));
StringSet libs = {};
string_set_init(&libs, heap_allocator(), 64);
string_set_init(&libs, 64);
defer (string_set_destroy(&libs));
for_array(j, gen->foreign_libraries) {
@@ -618,7 +617,6 @@ enum BuildFlagKind {
BuildFlag_NoEntryPoint,
BuildFlag_UseLLD,
BuildFlag_UseSeparateModules,
BuildFlag_ThreadedChecker,
BuildFlag_NoThreadedChecker,
BuildFlag_ShowDebugMessages,
BuildFlag_Vet,
@@ -660,6 +658,7 @@ enum BuildFlagKind {
// internal use only
BuildFlag_InternalIgnoreLazy,
BuildFlag_InternalIgnoreLLVMBuild,
#if defined(GB_SYSTEM_WINDOWS)
BuildFlag_IgnoreVsSearch,
@@ -793,7 +792,6 @@ gb_internal bool parse_build_flags(Array<String> args) {
add_flag(&build_flags, BuildFlag_NoEntryPoint, str_lit("no-entry-point"), BuildFlagParam_None, Command__does_check &~ Command_test);
add_flag(&build_flags, BuildFlag_UseLLD, str_lit("lld"), BuildFlagParam_None, Command__does_build);
add_flag(&build_flags, BuildFlag_UseSeparateModules, str_lit("use-separate-modules"), BuildFlagParam_None, Command__does_build);
add_flag(&build_flags, BuildFlag_ThreadedChecker, str_lit("threaded-checker"), BuildFlagParam_None, Command__does_check);
add_flag(&build_flags, BuildFlag_NoThreadedChecker, str_lit("no-threaded-checker"), BuildFlagParam_None, Command__does_check);
add_flag(&build_flags, BuildFlag_ShowDebugMessages, str_lit("show-debug-messages"), BuildFlagParam_None, Command_all);
add_flag(&build_flags, BuildFlag_Vet, str_lit("vet"), BuildFlagParam_None, Command__does_check);
@@ -832,6 +830,7 @@ gb_internal bool parse_build_flags(Array<String> args) {
add_flag(&build_flags, BuildFlag_ErrorPosStyle, str_lit("error-pos-style"), BuildFlagParam_String, Command_all);
add_flag(&build_flags, BuildFlag_InternalIgnoreLazy, str_lit("internal-ignore-lazy"), BuildFlagParam_None, Command_all);
add_flag(&build_flags, BuildFlag_InternalIgnoreLLVMBuild, str_lit("internal-ignore-llvm-build"),BuildFlagParam_None, Command_all);
#if defined(GB_SYSTEM_WINDOWS)
add_flag(&build_flags, BuildFlag_IgnoreVsSearch, str_lit("ignore-vs-search"), BuildFlagParam_None, Command__does_build);
@@ -1310,20 +1309,8 @@ gb_internal bool parse_build_flags(Array<String> args) {
case BuildFlag_UseSeparateModules:
build_context.use_separate_modules = true;
break;
case BuildFlag_ThreadedChecker: {
#if defined(DEFAULT_TO_THREADED_CHECKER)
gb_printf_err("-threaded-checker is the default on this platform\n");
bad_flags = true;
#endif
build_context.threaded_checker = true;
break;
}
case BuildFlag_NoThreadedChecker: {
#if !defined(DEFAULT_TO_THREADED_CHECKER)
gb_printf_err("-no-threaded-checker is the default on this platform\n");
bad_flags = true;
#endif
build_context.threaded_checker = false;
build_context.no_threaded_checker = true;
break;
}
case BuildFlag_ShowDebugMessages:
@@ -1491,6 +1478,9 @@ gb_internal bool parse_build_flags(Array<String> args) {
case BuildFlag_InternalIgnoreLazy:
build_context.ignore_lazy = true;
break;
case BuildFlag_InternalIgnoreLLVMBuild:
build_context.ignore_llvm_build = true;
break;
#if defined(GB_SYSTEM_WINDOWS)
case BuildFlag_IgnoreVsSearch: {
GB_ASSERT(value.kind == ExactValue_Invalid);
@@ -2498,15 +2488,10 @@ int main(int arg_count, char const **arg_ptr) {
MAIN_TIME_SECTION("initialization");
virtual_memory_init();
mutex_init(&fullpath_mutex);
mutex_init(&hash_exact_value_mutex);
mutex_init(&global_type_name_objc_metadata_mutex);
init_string_buffer_memory();
init_string_interner();
init_global_error_collector();
init_keyword_hash_table();
init_type_mutex();
if (!check_env()) {
return 1;
@@ -2517,9 +2502,9 @@ int main(int arg_count, char const **arg_ptr) {
add_library_collection(str_lit("core"), get_fullpath_relative(heap_allocator(), odin_root_dir(), str_lit("core")));
add_library_collection(str_lit("vendor"), get_fullpath_relative(heap_allocator(), odin_root_dir(), str_lit("vendor")));
map_init(&build_context.defined_values, heap_allocator());
map_init(&build_context.defined_values);
build_context.extra_packages.allocator = heap_allocator();
string_set_init(&build_context.test_names, heap_allocator());
string_set_init(&build_context.test_names);
Array<String> args = setup_args(arg_count, arg_ptr);
@@ -2785,19 +2770,19 @@ int main(int arg_count, char const **arg_ptr) {
if (!lb_init_generator(gen, checker)) {
return 1;
}
lb_generate_code(gen);
switch (build_context.build_mode) {
case BuildMode_Executable:
case BuildMode_DynamicLibrary:
i32 result = linker_stage(gen);
if (result) {
if (build_context.show_timings) {
show_timings(checker, &global_timings);
if (lb_generate_code(gen)) {
switch (build_context.build_mode) {
case BuildMode_Executable:
case BuildMode_DynamicLibrary:
i32 result = linker_stage(gen);
if (result) {
if (build_context.show_timings) {
show_timings(checker, &global_timings);
}
return result;
}
return result;
break;
}
break;
}
remove_temp_files(gen);

View File

@@ -64,11 +64,9 @@ gb_global std::atomic<isize> global_total_node_memory_allocated;
// NOTE(bill): And this below is why is I/we need a new language! Discriminated unions are a pain in C/C++
gb_internal Ast *alloc_ast_node(AstFile *f, AstKind kind) {
gbAllocator a = ast_allocator(f);
isize size = ast_node_size(kind);
Ast *node = cast(Ast *)gb_alloc(a, size);
Ast *node = cast(Ast *)arena_alloc(&global_thread_local_ast_arena, size, 16);
node->kind = kind;
node->file_id = f ? f->id : 0;
@@ -77,33 +75,35 @@ gb_internal Ast *alloc_ast_node(AstFile *f, AstKind kind) {
return node;
}
gb_internal Ast *clone_ast(Ast *node);
gb_internal Array<Ast *> clone_ast_array(Array<Ast *> const &array) {
gb_internal Ast *clone_ast(Ast *node, AstFile *f = nullptr);
gb_internal Array<Ast *> clone_ast_array(Array<Ast *> const &array, AstFile *f) {
Array<Ast *> result = {};
if (array.count > 0) {
result = array_make<Ast *>(ast_allocator(nullptr), array.count);
for_array(i, array) {
result[i] = clone_ast(array[i]);
result[i] = clone_ast(array[i], f);
}
}
return result;
}
gb_internal Slice<Ast *> clone_ast_array(Slice<Ast *> const &array) {
gb_internal Slice<Ast *> clone_ast_array(Slice<Ast *> const &array, AstFile *f) {
Slice<Ast *> result = {};
if (array.count > 0) {
result = slice_clone(permanent_allocator(), array);
for_array(i, array) {
result[i] = clone_ast(array[i]);
result[i] = clone_ast(array[i], f);
}
}
return result;
}
gb_internal Ast *clone_ast(Ast *node) {
gb_internal Ast *clone_ast(Ast *node, AstFile *f) {
if (node == nullptr) {
return nullptr;
}
AstFile *f = node->thread_safe_file();
if (f == nullptr) {
f = node->thread_safe_file();
}
Ast *n = alloc_ast_node(f, node->kind);
gb_memmove(n, node, ast_node_size(node->kind));
@@ -120,279 +120,279 @@ gb_internal Ast *clone_ast(Ast *node) {
case Ast_BasicDirective: break;
case Ast_PolyType:
n->PolyType.type = clone_ast(n->PolyType.type);
n->PolyType.specialization = clone_ast(n->PolyType.specialization);
n->PolyType.type = clone_ast(n->PolyType.type, f);
n->PolyType.specialization = clone_ast(n->PolyType.specialization, f);
break;
case Ast_Ellipsis:
n->Ellipsis.expr = clone_ast(n->Ellipsis.expr);
n->Ellipsis.expr = clone_ast(n->Ellipsis.expr, f);
break;
case Ast_ProcGroup:
n->ProcGroup.args = clone_ast_array(n->ProcGroup.args);
n->ProcGroup.args = clone_ast_array(n->ProcGroup.args, f);
break;
case Ast_ProcLit:
n->ProcLit.type = clone_ast(n->ProcLit.type);
n->ProcLit.body = clone_ast(n->ProcLit.body);
n->ProcLit.where_clauses = clone_ast_array(n->ProcLit.where_clauses);
n->ProcLit.type = clone_ast(n->ProcLit.type, f);
n->ProcLit.body = clone_ast(n->ProcLit.body, f);
n->ProcLit.where_clauses = clone_ast_array(n->ProcLit.where_clauses, f);
break;
case Ast_CompoundLit:
n->CompoundLit.type = clone_ast(n->CompoundLit.type);
n->CompoundLit.elems = clone_ast_array(n->CompoundLit.elems);
n->CompoundLit.type = clone_ast(n->CompoundLit.type, f);
n->CompoundLit.elems = clone_ast_array(n->CompoundLit.elems, f);
break;
case Ast_BadExpr: break;
case Ast_TagExpr:
n->TagExpr.expr = clone_ast(n->TagExpr.expr);
n->TagExpr.expr = clone_ast(n->TagExpr.expr, f);
break;
case Ast_UnaryExpr:
n->UnaryExpr.expr = clone_ast(n->UnaryExpr.expr);
n->UnaryExpr.expr = clone_ast(n->UnaryExpr.expr, f);
break;
case Ast_BinaryExpr:
n->BinaryExpr.left = clone_ast(n->BinaryExpr.left);
n->BinaryExpr.right = clone_ast(n->BinaryExpr.right);
n->BinaryExpr.left = clone_ast(n->BinaryExpr.left, f);
n->BinaryExpr.right = clone_ast(n->BinaryExpr.right, f);
break;
case Ast_ParenExpr:
n->ParenExpr.expr = clone_ast(n->ParenExpr.expr);
n->ParenExpr.expr = clone_ast(n->ParenExpr.expr, f);
break;
case Ast_SelectorExpr:
n->SelectorExpr.expr = clone_ast(n->SelectorExpr.expr);
n->SelectorExpr.selector = clone_ast(n->SelectorExpr.selector);
n->SelectorExpr.expr = clone_ast(n->SelectorExpr.expr, f);
n->SelectorExpr.selector = clone_ast(n->SelectorExpr.selector, f);
break;
case Ast_ImplicitSelectorExpr:
n->ImplicitSelectorExpr.selector = clone_ast(n->ImplicitSelectorExpr.selector);
n->ImplicitSelectorExpr.selector = clone_ast(n->ImplicitSelectorExpr.selector, f);
break;
case Ast_SelectorCallExpr:
n->SelectorCallExpr.expr = clone_ast(n->SelectorCallExpr.expr);
n->SelectorCallExpr.call = clone_ast(n->SelectorCallExpr.call);
n->SelectorCallExpr.expr = clone_ast(n->SelectorCallExpr.expr, f);
n->SelectorCallExpr.call = clone_ast(n->SelectorCallExpr.call, f);
break;
case Ast_IndexExpr:
n->IndexExpr.expr = clone_ast(n->IndexExpr.expr);
n->IndexExpr.index = clone_ast(n->IndexExpr.index);
n->IndexExpr.expr = clone_ast(n->IndexExpr.expr, f);
n->IndexExpr.index = clone_ast(n->IndexExpr.index, f);
break;
case Ast_MatrixIndexExpr:
n->MatrixIndexExpr.expr = clone_ast(n->MatrixIndexExpr.expr);
n->MatrixIndexExpr.row_index = clone_ast(n->MatrixIndexExpr.row_index);
n->MatrixIndexExpr.column_index = clone_ast(n->MatrixIndexExpr.column_index);
n->MatrixIndexExpr.expr = clone_ast(n->MatrixIndexExpr.expr, f);
n->MatrixIndexExpr.row_index = clone_ast(n->MatrixIndexExpr.row_index, f);
n->MatrixIndexExpr.column_index = clone_ast(n->MatrixIndexExpr.column_index, f);
break;
case Ast_DerefExpr:
n->DerefExpr.expr = clone_ast(n->DerefExpr.expr);
n->DerefExpr.expr = clone_ast(n->DerefExpr.expr, f);
break;
case Ast_SliceExpr:
n->SliceExpr.expr = clone_ast(n->SliceExpr.expr);
n->SliceExpr.low = clone_ast(n->SliceExpr.low);
n->SliceExpr.high = clone_ast(n->SliceExpr.high);
n->SliceExpr.expr = clone_ast(n->SliceExpr.expr, f);
n->SliceExpr.low = clone_ast(n->SliceExpr.low, f);
n->SliceExpr.high = clone_ast(n->SliceExpr.high, f);
break;
case Ast_CallExpr:
n->CallExpr.proc = clone_ast(n->CallExpr.proc);
n->CallExpr.args = clone_ast_array(n->CallExpr.args);
n->CallExpr.proc = clone_ast(n->CallExpr.proc, f);
n->CallExpr.args = clone_ast_array(n->CallExpr.args, f);
break;
case Ast_FieldValue:
n->FieldValue.field = clone_ast(n->FieldValue.field);
n->FieldValue.value = clone_ast(n->FieldValue.value);
n->FieldValue.field = clone_ast(n->FieldValue.field, f);
n->FieldValue.value = clone_ast(n->FieldValue.value, f);
break;
case Ast_EnumFieldValue:
n->EnumFieldValue.name = clone_ast(n->EnumFieldValue.name);
n->EnumFieldValue.value = clone_ast(n->EnumFieldValue.value);
n->EnumFieldValue.name = clone_ast(n->EnumFieldValue.name, f);
n->EnumFieldValue.value = clone_ast(n->EnumFieldValue.value, f);
break;
case Ast_TernaryIfExpr:
n->TernaryIfExpr.x = clone_ast(n->TernaryIfExpr.x);
n->TernaryIfExpr.cond = clone_ast(n->TernaryIfExpr.cond);
n->TernaryIfExpr.y = clone_ast(n->TernaryIfExpr.y);
n->TernaryIfExpr.x = clone_ast(n->TernaryIfExpr.x, f);
n->TernaryIfExpr.cond = clone_ast(n->TernaryIfExpr.cond, f);
n->TernaryIfExpr.y = clone_ast(n->TernaryIfExpr.y, f);
break;
case Ast_TernaryWhenExpr:
n->TernaryWhenExpr.x = clone_ast(n->TernaryWhenExpr.x);
n->TernaryWhenExpr.cond = clone_ast(n->TernaryWhenExpr.cond);
n->TernaryWhenExpr.y = clone_ast(n->TernaryWhenExpr.y);
n->TernaryWhenExpr.x = clone_ast(n->TernaryWhenExpr.x, f);
n->TernaryWhenExpr.cond = clone_ast(n->TernaryWhenExpr.cond, f);
n->TernaryWhenExpr.y = clone_ast(n->TernaryWhenExpr.y, f);
break;
case Ast_OrElseExpr:
n->OrElseExpr.x = clone_ast(n->OrElseExpr.x);
n->OrElseExpr.y = clone_ast(n->OrElseExpr.y);
n->OrElseExpr.x = clone_ast(n->OrElseExpr.x, f);
n->OrElseExpr.y = clone_ast(n->OrElseExpr.y, f);
break;
case Ast_OrReturnExpr:
n->OrReturnExpr.expr = clone_ast(n->OrReturnExpr.expr);
n->OrReturnExpr.expr = clone_ast(n->OrReturnExpr.expr, f);
break;
case Ast_TypeAssertion:
n->TypeAssertion.expr = clone_ast(n->TypeAssertion.expr);
n->TypeAssertion.type = clone_ast(n->TypeAssertion.type);
n->TypeAssertion.expr = clone_ast(n->TypeAssertion.expr, f);
n->TypeAssertion.type = clone_ast(n->TypeAssertion.type, f);
break;
case Ast_TypeCast:
n->TypeCast.type = clone_ast(n->TypeCast.type);
n->TypeCast.expr = clone_ast(n->TypeCast.expr);
n->TypeCast.type = clone_ast(n->TypeCast.type, f);
n->TypeCast.expr = clone_ast(n->TypeCast.expr, f);
break;
case Ast_AutoCast:
n->AutoCast.expr = clone_ast(n->AutoCast.expr);
n->AutoCast.expr = clone_ast(n->AutoCast.expr, f);
break;
case Ast_InlineAsmExpr:
n->InlineAsmExpr.param_types = clone_ast_array(n->InlineAsmExpr.param_types);
n->InlineAsmExpr.return_type = clone_ast(n->InlineAsmExpr.return_type);
n->InlineAsmExpr.asm_string = clone_ast(n->InlineAsmExpr.asm_string);
n->InlineAsmExpr.constraints_string = clone_ast(n->InlineAsmExpr.constraints_string);
n->InlineAsmExpr.param_types = clone_ast_array(n->InlineAsmExpr.param_types, f);
n->InlineAsmExpr.return_type = clone_ast(n->InlineAsmExpr.return_type, f);
n->InlineAsmExpr.asm_string = clone_ast(n->InlineAsmExpr.asm_string, f);
n->InlineAsmExpr.constraints_string = clone_ast(n->InlineAsmExpr.constraints_string, f);
break;
case Ast_BadStmt: break;
case Ast_EmptyStmt: break;
case Ast_ExprStmt:
n->ExprStmt.expr = clone_ast(n->ExprStmt.expr);
n->ExprStmt.expr = clone_ast(n->ExprStmt.expr, f);
break;
case Ast_AssignStmt:
n->AssignStmt.lhs = clone_ast_array(n->AssignStmt.lhs);
n->AssignStmt.rhs = clone_ast_array(n->AssignStmt.rhs);
n->AssignStmt.lhs = clone_ast_array(n->AssignStmt.lhs, f);
n->AssignStmt.rhs = clone_ast_array(n->AssignStmt.rhs, f);
break;
case Ast_BlockStmt:
n->BlockStmt.label = clone_ast(n->BlockStmt.label);
n->BlockStmt.stmts = clone_ast_array(n->BlockStmt.stmts);
n->BlockStmt.label = clone_ast(n->BlockStmt.label, f);
n->BlockStmt.stmts = clone_ast_array(n->BlockStmt.stmts, f);
break;
case Ast_IfStmt:
n->IfStmt.label = clone_ast(n->IfStmt.label);
n->IfStmt.init = clone_ast(n->IfStmt.init);
n->IfStmt.cond = clone_ast(n->IfStmt.cond);
n->IfStmt.body = clone_ast(n->IfStmt.body);
n->IfStmt.else_stmt = clone_ast(n->IfStmt.else_stmt);
n->IfStmt.label = clone_ast(n->IfStmt.label, f);
n->IfStmt.init = clone_ast(n->IfStmt.init, f);
n->IfStmt.cond = clone_ast(n->IfStmt.cond, f);
n->IfStmt.body = clone_ast(n->IfStmt.body, f);
n->IfStmt.else_stmt = clone_ast(n->IfStmt.else_stmt, f);
break;
case Ast_WhenStmt:
n->WhenStmt.cond = clone_ast(n->WhenStmt.cond);
n->WhenStmt.body = clone_ast(n->WhenStmt.body);
n->WhenStmt.else_stmt = clone_ast(n->WhenStmt.else_stmt);
n->WhenStmt.cond = clone_ast(n->WhenStmt.cond, f);
n->WhenStmt.body = clone_ast(n->WhenStmt.body, f);
n->WhenStmt.else_stmt = clone_ast(n->WhenStmt.else_stmt, f);
break;
case Ast_ReturnStmt:
n->ReturnStmt.results = clone_ast_array(n->ReturnStmt.results);
n->ReturnStmt.results = clone_ast_array(n->ReturnStmt.results, f);
break;
case Ast_ForStmt:
n->ForStmt.label = clone_ast(n->ForStmt.label);
n->ForStmt.init = clone_ast(n->ForStmt.init);
n->ForStmt.cond = clone_ast(n->ForStmt.cond);
n->ForStmt.post = clone_ast(n->ForStmt.post);
n->ForStmt.body = clone_ast(n->ForStmt.body);
n->ForStmt.label = clone_ast(n->ForStmt.label, f);
n->ForStmt.init = clone_ast(n->ForStmt.init, f);
n->ForStmt.cond = clone_ast(n->ForStmt.cond, f);
n->ForStmt.post = clone_ast(n->ForStmt.post, f);
n->ForStmt.body = clone_ast(n->ForStmt.body, f);
break;
case Ast_RangeStmt:
n->RangeStmt.label = clone_ast(n->RangeStmt.label);
n->RangeStmt.vals = clone_ast_array(n->RangeStmt.vals);
n->RangeStmt.expr = clone_ast(n->RangeStmt.expr);
n->RangeStmt.body = clone_ast(n->RangeStmt.body);
n->RangeStmt.label = clone_ast(n->RangeStmt.label, f);
n->RangeStmt.vals = clone_ast_array(n->RangeStmt.vals, f);
n->RangeStmt.expr = clone_ast(n->RangeStmt.expr, f);
n->RangeStmt.body = clone_ast(n->RangeStmt.body, f);
break;
case Ast_UnrollRangeStmt:
n->UnrollRangeStmt.val0 = clone_ast(n->UnrollRangeStmt.val0);
n->UnrollRangeStmt.val1 = clone_ast(n->UnrollRangeStmt.val1);
n->UnrollRangeStmt.expr = clone_ast(n->UnrollRangeStmt.expr);
n->UnrollRangeStmt.body = clone_ast(n->UnrollRangeStmt.body);
n->UnrollRangeStmt.val0 = clone_ast(n->UnrollRangeStmt.val0, f);
n->UnrollRangeStmt.val1 = clone_ast(n->UnrollRangeStmt.val1, f);
n->UnrollRangeStmt.expr = clone_ast(n->UnrollRangeStmt.expr, f);
n->UnrollRangeStmt.body = clone_ast(n->UnrollRangeStmt.body, f);
break;
case Ast_CaseClause:
n->CaseClause.list = clone_ast_array(n->CaseClause.list);
n->CaseClause.stmts = clone_ast_array(n->CaseClause.stmts);
n->CaseClause.list = clone_ast_array(n->CaseClause.list, f);
n->CaseClause.stmts = clone_ast_array(n->CaseClause.stmts, f);
n->CaseClause.implicit_entity = nullptr;
break;
case Ast_SwitchStmt:
n->SwitchStmt.label = clone_ast(n->SwitchStmt.label);
n->SwitchStmt.init = clone_ast(n->SwitchStmt.init);
n->SwitchStmt.tag = clone_ast(n->SwitchStmt.tag);
n->SwitchStmt.body = clone_ast(n->SwitchStmt.body);
n->SwitchStmt.label = clone_ast(n->SwitchStmt.label, f);
n->SwitchStmt.init = clone_ast(n->SwitchStmt.init, f);
n->SwitchStmt.tag = clone_ast(n->SwitchStmt.tag, f);
n->SwitchStmt.body = clone_ast(n->SwitchStmt.body, f);
break;
case Ast_TypeSwitchStmt:
n->TypeSwitchStmt.label = clone_ast(n->TypeSwitchStmt.label);
n->TypeSwitchStmt.tag = clone_ast(n->TypeSwitchStmt.tag);
n->TypeSwitchStmt.body = clone_ast(n->TypeSwitchStmt.body);
n->TypeSwitchStmt.label = clone_ast(n->TypeSwitchStmt.label, f);
n->TypeSwitchStmt.tag = clone_ast(n->TypeSwitchStmt.tag, f);
n->TypeSwitchStmt.body = clone_ast(n->TypeSwitchStmt.body, f);
break;
case Ast_DeferStmt:
n->DeferStmt.stmt = clone_ast(n->DeferStmt.stmt);
n->DeferStmt.stmt = clone_ast(n->DeferStmt.stmt, f);
break;
case Ast_BranchStmt:
n->BranchStmt.label = clone_ast(n->BranchStmt.label);
n->BranchStmt.label = clone_ast(n->BranchStmt.label, f);
break;
case Ast_UsingStmt:
n->UsingStmt.list = clone_ast_array(n->UsingStmt.list);
n->UsingStmt.list = clone_ast_array(n->UsingStmt.list, f);
break;
case Ast_BadDecl: break;
case Ast_ForeignBlockDecl:
n->ForeignBlockDecl.foreign_library = clone_ast(n->ForeignBlockDecl.foreign_library);
n->ForeignBlockDecl.body = clone_ast(n->ForeignBlockDecl.body);
n->ForeignBlockDecl.attributes = clone_ast_array(n->ForeignBlockDecl.attributes);
n->ForeignBlockDecl.foreign_library = clone_ast(n->ForeignBlockDecl.foreign_library, f);
n->ForeignBlockDecl.body = clone_ast(n->ForeignBlockDecl.body, f);
n->ForeignBlockDecl.attributes = clone_ast_array(n->ForeignBlockDecl.attributes, f);
break;
case Ast_Label:
n->Label.name = clone_ast(n->Label.name);
n->Label.name = clone_ast(n->Label.name, f);
break;
case Ast_ValueDecl:
n->ValueDecl.names = clone_ast_array(n->ValueDecl.names);
n->ValueDecl.type = clone_ast(n->ValueDecl.type);
n->ValueDecl.values = clone_ast_array(n->ValueDecl.values);
n->ValueDecl.attributes = clone_ast_array(n->ValueDecl.attributes);
n->ValueDecl.names = clone_ast_array(n->ValueDecl.names, f);
n->ValueDecl.type = clone_ast(n->ValueDecl.type, f);
n->ValueDecl.values = clone_ast_array(n->ValueDecl.values, f);
n->ValueDecl.attributes = clone_ast_array(n->ValueDecl.attributes, f);
break;
case Ast_Attribute:
n->Attribute.elems = clone_ast_array(n->Attribute.elems);
n->Attribute.elems = clone_ast_array(n->Attribute.elems, f);
break;
case Ast_Field:
n->Field.names = clone_ast_array(n->Field.names);
n->Field.type = clone_ast(n->Field.type);
n->Field.names = clone_ast_array(n->Field.names, f);
n->Field.type = clone_ast(n->Field.type, f);
break;
case Ast_FieldList:
n->FieldList.list = clone_ast_array(n->FieldList.list);
n->FieldList.list = clone_ast_array(n->FieldList.list, f);
break;
case Ast_TypeidType:
n->TypeidType.specialization = clone_ast(n->TypeidType.specialization);
n->TypeidType.specialization = clone_ast(n->TypeidType.specialization, f);
break;
case Ast_HelperType:
n->HelperType.type = clone_ast(n->HelperType.type);
n->HelperType.type = clone_ast(n->HelperType.type, f);
break;
case Ast_DistinctType:
n->DistinctType.type = clone_ast(n->DistinctType.type);
n->DistinctType.type = clone_ast(n->DistinctType.type, f);
break;
case Ast_ProcType:
n->ProcType.params = clone_ast(n->ProcType.params);
n->ProcType.results = clone_ast(n->ProcType.results);
n->ProcType.params = clone_ast(n->ProcType.params, f);
n->ProcType.results = clone_ast(n->ProcType.results, f);
break;
case Ast_RelativeType:
n->RelativeType.tag = clone_ast(n->RelativeType.tag);
n->RelativeType.type = clone_ast(n->RelativeType.type);
n->RelativeType.tag = clone_ast(n->RelativeType.tag, f);
n->RelativeType.type = clone_ast(n->RelativeType.type, f);
break;
case Ast_PointerType:
n->PointerType.type = clone_ast(n->PointerType.type);
n->PointerType.tag = clone_ast(n->PointerType.tag);
n->PointerType.type = clone_ast(n->PointerType.type, f);
n->PointerType.tag = clone_ast(n->PointerType.tag, f);
break;
case Ast_MultiPointerType:
n->MultiPointerType.type = clone_ast(n->MultiPointerType.type);
n->MultiPointerType.type = clone_ast(n->MultiPointerType.type, f);
break;
case Ast_ArrayType:
n->ArrayType.count = clone_ast(n->ArrayType.count);
n->ArrayType.elem = clone_ast(n->ArrayType.elem);
n->ArrayType.tag = clone_ast(n->ArrayType.tag);
n->ArrayType.count = clone_ast(n->ArrayType.count, f);
n->ArrayType.elem = clone_ast(n->ArrayType.elem, f);
n->ArrayType.tag = clone_ast(n->ArrayType.tag, f);
break;
case Ast_DynamicArrayType:
n->DynamicArrayType.elem = clone_ast(n->DynamicArrayType.elem);
n->DynamicArrayType.elem = clone_ast(n->DynamicArrayType.elem, f);
break;
case Ast_StructType:
n->StructType.fields = clone_ast_array(n->StructType.fields);
n->StructType.polymorphic_params = clone_ast(n->StructType.polymorphic_params);
n->StructType.align = clone_ast(n->StructType.align);
n->StructType.where_clauses = clone_ast_array(n->StructType.where_clauses);
n->StructType.fields = clone_ast_array(n->StructType.fields, f);
n->StructType.polymorphic_params = clone_ast(n->StructType.polymorphic_params, f);
n->StructType.align = clone_ast(n->StructType.align, f);
n->StructType.where_clauses = clone_ast_array(n->StructType.where_clauses, f);
break;
case Ast_UnionType:
n->UnionType.variants = clone_ast_array(n->UnionType.variants);
n->UnionType.polymorphic_params = clone_ast(n->UnionType.polymorphic_params);
n->UnionType.where_clauses = clone_ast_array(n->UnionType.where_clauses);
n->UnionType.variants = clone_ast_array(n->UnionType.variants, f);
n->UnionType.polymorphic_params = clone_ast(n->UnionType.polymorphic_params, f);
n->UnionType.where_clauses = clone_ast_array(n->UnionType.where_clauses, f);
break;
case Ast_EnumType:
n->EnumType.base_type = clone_ast(n->EnumType.base_type);
n->EnumType.fields = clone_ast_array(n->EnumType.fields);
n->EnumType.base_type = clone_ast(n->EnumType.base_type, f);
n->EnumType.fields = clone_ast_array(n->EnumType.fields, f);
break;
case Ast_BitSetType:
n->BitSetType.elem = clone_ast(n->BitSetType.elem);
n->BitSetType.underlying = clone_ast(n->BitSetType.underlying);
n->BitSetType.elem = clone_ast(n->BitSetType.elem, f);
n->BitSetType.underlying = clone_ast(n->BitSetType.underlying, f);
break;
case Ast_MapType:
n->MapType.count = clone_ast(n->MapType.count);
n->MapType.key = clone_ast(n->MapType.key);
n->MapType.value = clone_ast(n->MapType.value);
n->MapType.count = clone_ast(n->MapType.count, f);
n->MapType.key = clone_ast(n->MapType.key, f);
n->MapType.value = clone_ast(n->MapType.value, f);
break;
case Ast_MatrixType:
n->MatrixType.row_count = clone_ast(n->MatrixType.row_count);
n->MatrixType.column_count = clone_ast(n->MatrixType.column_count);
n->MatrixType.elem = clone_ast(n->MatrixType.elem);
n->MatrixType.row_count = clone_ast(n->MatrixType.row_count, f);
n->MatrixType.column_count = clone_ast(n->MatrixType.column_count, f);
n->MatrixType.elem = clone_ast(n->MatrixType.elem, f);
break;
}
@@ -1905,13 +1905,11 @@ gb_internal void check_polymorphic_params_for_type(AstFile *f, Ast *polymorphic_
return;
}
ast_node(fl, FieldList, polymorphic_params);
for_array(fi, fl->list) {
Ast *field = fl->list[fi];
for (Ast *field : fl->list) {
if (field->kind != Ast_Field) {
continue;
}
for_array(i, field->Field.names) {
Ast *name = field->Field.names[i];
for (Ast *name : field->Field.names) {
if (name->kind != field->Field.names[0]->kind) {
syntax_error(name, "Mixture of polymorphic names using both $ and not for %.*s parameters", LIT(token.string));
return;
@@ -3473,16 +3471,14 @@ gb_internal Ast *parse_proc_type(AstFile *f, Token proc_token) {
u64 tags = 0;
bool is_generic = false;
for_array(i, params->FieldList.list) {
Ast *param = params->FieldList.list[i];
for (Ast *param : params->FieldList.list) {
ast_node(field, Field, param);
if (field->type != nullptr) {
if (field->type->kind == Ast_PolyType) {
is_generic = true;
goto end;
}
for_array(j, field->names) {
Ast *name = field->names[j];
for (Ast *name : field->names) {
if (name->kind == Ast_PolyType) {
is_generic = true;
goto end;
@@ -3646,8 +3642,9 @@ struct AstAndFlags {
gb_internal Array<Ast *> convert_to_ident_list(AstFile *f, Array<AstAndFlags> list, bool ignore_flags, bool allow_poly_names) {
auto idents = array_make<Ast *>(heap_allocator(), 0, list.count);
// Convert to ident list
for_array(i, list) {
Ast *ident = list[i].node;
isize i = 0;
for (AstAndFlags const &item : list) {
Ast *ident = item.node;
if (!ignore_flags) {
if (i != 0) {
@@ -3678,6 +3675,7 @@ gb_internal Array<Ast *> convert_to_ident_list(AstFile *f, Array<AstAndFlags> li
break;
}
array_add(&idents, ident);
i += 1;
}
return idents;
}
@@ -3919,8 +3917,8 @@ gb_internal Ast *parse_field_list(AstFile *f, isize *name_count_, u32 allowed_fl
return ast_field_list(f, start_token, params);
}
for_array(i, list) {
Ast *type = list[i].node;
for (AstAndFlags const &item : list) {
Ast *type = item.node;
Token token = blank_token;
if (allowed_flags&FieldFlag_Results) {
// NOTE(bill): Make this nothing and not `_`
@@ -3930,9 +3928,9 @@ gb_internal Ast *parse_field_list(AstFile *f, isize *name_count_, u32 allowed_fl
auto names = array_make<Ast *>(heap_allocator(), 1);
token.pos = ast_token(type).pos;
names[0] = ast_ident(f, token);
u32 flags = check_field_prefixes(f, list.count, allowed_flags, list[i].flags);
u32 flags = check_field_prefixes(f, list.count, allowed_flags, item.flags);
Token tag = {};
Ast *param = ast_field(f, names, list[i].node, nullptr, flags, tag, docs, f->line_comment);
Ast *param = ast_field(f, names, item.node, nullptr, flags, tag, docs, f->line_comment);
array_add(&params, param);
}
@@ -4856,40 +4854,31 @@ gb_internal void destroy_ast_file(AstFile *f) {
gb_internal bool init_parser(Parser *p) {
GB_ASSERT(p != nullptr);
string_set_init(&p->imported_files, heap_allocator());
string_set_init(&p->imported_files);
array_init(&p->packages, heap_allocator());
mutex_init(&p->imported_files_mutex);
mutex_init(&p->file_decl_mutex);
mutex_init(&p->packages_mutex);
mutex_init(&p->file_error_mutex);
return true;
}
gb_internal void destroy_parser(Parser *p) {
GB_ASSERT(p != nullptr);
// TODO(bill): Fix memory leak
for_array(i, p->packages) {
AstPackage *pkg = p->packages[i];
for_array(j, pkg->files) {
destroy_ast_file(pkg->files[j]);
for (AstPackage *pkg : p->packages) {
for (AstFile *file : pkg->files) {
destroy_ast_file(file);
}
array_free(&pkg->files);
array_free(&pkg->foreign_files);
}
array_free(&p->packages);
string_set_destroy(&p->imported_files);
mutex_destroy(&p->imported_files_mutex);
mutex_destroy(&p->file_decl_mutex);
mutex_destroy(&p->packages_mutex);
mutex_destroy(&p->file_error_mutex);
}
gb_internal void parser_add_package(Parser *p, AstPackage *pkg) {
mutex_lock(&p->packages_mutex);
pkg->id = p->packages.count+1;
array_add(&p->packages, pkg);
mutex_unlock(&p->packages_mutex);
MUTEX_GUARD_BLOCK(&p->packages_mutex) {
pkg->id = p->packages.count+1;
array_add(&p->packages, pkg);
}
}
gb_internal ParseFileError process_imported_file(Parser *p, ImportedFile imported_file);
@@ -4901,15 +4890,15 @@ gb_internal WORKER_TASK_PROC(parser_worker_proc) {
auto *node = gb_alloc_item(permanent_allocator(), ParseFileErrorNode);
node->err = err;
mutex_lock(&wd->parser->file_error_mutex);
if (wd->parser->file_error_tail != nullptr) {
wd->parser->file_error_tail->next = node;
MUTEX_GUARD_BLOCK(&wd->parser->file_error_mutex) {
if (wd->parser->file_error_tail != nullptr) {
wd->parser->file_error_tail->next = node;
}
wd->parser->file_error_tail = node;
if (wd->parser->file_error_head == nullptr) {
wd->parser->file_error_head = node;
}
}
wd->parser->file_error_tail = node;
if (wd->parser->file_error_head == nullptr) {
wd->parser->file_error_head = node;
}
mutex_unlock(&wd->parser->file_error_mutex);
}
return cast(isize)err;
}
@@ -4921,7 +4910,7 @@ gb_internal void parser_add_file_to_process(Parser *p, AstPackage *pkg, FileInfo
auto wd = gb_alloc_item(permanent_allocator(), ParserWorkerData);
wd->parser = p;
wd->imported_file = f;
global_thread_pool_add_task(parser_worker_proc, wd);
thread_pool_add_task(parser_worker_proc, wd);
}
gb_internal WORKER_TASK_PROC(foreign_file_worker_proc) {
@@ -4945,9 +4934,9 @@ gb_internal WORKER_TASK_PROC(foreign_file_worker_proc) {
// TODO(bill): Actually do something with it
break;
}
mutex_lock(&pkg->foreign_files_mutex);
array_add(&pkg->foreign_files, foreign_file);
mutex_unlock(&pkg->foreign_files_mutex);
MUTEX_GUARD_BLOCK(&pkg->foreign_files_mutex) {
array_add(&pkg->foreign_files, foreign_file);
}
return 0;
}
@@ -4959,7 +4948,7 @@ gb_internal void parser_add_foreign_file_to_process(Parser *p, AstPackage *pkg,
wd->parser = p;
wd->imported_file = f;
wd->foreign_kind = kind;
global_thread_pool_add_task(foreign_file_worker_proc, wd);
thread_pool_add_task(foreign_file_worker_proc, wd);
}
@@ -4978,19 +4967,16 @@ gb_internal AstPackage *try_add_import_path(Parser *p, String const &path, Strin
pkg->fullpath = path;
array_init(&pkg->files, heap_allocator());
pkg->foreign_files.allocator = heap_allocator();
mutex_init(&pkg->files_mutex);
mutex_init(&pkg->foreign_files_mutex);
// NOTE(bill): Single file initial package
if (kind == Package_Init && string_ends_with(path, FILE_EXT)) {
FileInfo fi = {};
fi.name = filename_from_path(path);
fi.fullpath = path;
fi.size = get_file_size(path);
fi.is_dir = false;
array_reserve(&pkg->files, 1);
pkg->is_single_file = true;
parser_add_package(p, pkg);
parser_add_file_to_process(p, pkg, fi, pos);
@@ -5028,8 +5014,17 @@ gb_internal AstPackage *try_add_import_path(Parser *p, String const &path, Strin
return nullptr;
}
for_array(list_index, list) {
FileInfo fi = list[list_index];
isize files_to_reserve = 1; // always reserve 1
for (FileInfo fi : list) {
String name = fi.name;
String ext = path_extension(name);
if (ext == FILE_EXT && !is_excluded_target_filename(name)) {
files_to_reserve += 1;
}
}
array_reserve(&pkg->files, files_to_reserve);
for (FileInfo fi : list) {
String name = fi.name;
String ext = path_extension(name);
if (ext == FILE_EXT) {
@@ -5322,14 +5317,14 @@ gb_internal void parse_setup_file_decls(Parser *p, AstFile *f, String const &bas
auto fullpaths = array_make<String>(permanent_allocator(), 0, fl->filepaths.count);
for_array(fp_idx, fl->filepaths) {
String file_str = string_trim_whitespace(string_value_from_token(f, fl->filepaths[fp_idx]));
for (Token const &fp : fl->filepaths) {
String file_str = string_trim_whitespace(string_value_from_token(f, fp));
String fullpath = file_str;
if (allow_check_foreign_filepath()) {
String foreign_path = {};
bool ok = determine_path_from_string(&p->file_decl_mutex, node, base_dir, file_str, &foreign_path);
if (!ok) {
decls[i] = ast_bad_decl(f, fl->filepaths[fp_idx], fl->filepaths[fl->filepaths.count-1]);
decls[i] = ast_bad_decl(f, fp, fl->filepaths[fl->filepaths.count-1]);
goto end;
}
fullpath = foreign_path;
@@ -5454,8 +5449,8 @@ gb_internal isize calc_decl_count(Ast *decl) {
isize count = 0;
switch (decl->kind) {
case Ast_BlockStmt:
for_array(i, decl->BlockStmt.stmts) {
count += calc_decl_count(decl->BlockStmt.stmts.data[i]);
for (Ast *stmt : decl->BlockStmt.stmts) {
count += calc_decl_count(stmt);
}
break;
case Ast_WhenStmt:
@@ -5575,8 +5570,8 @@ gb_internal bool parse_file(Parser *p, AstFile *f) {
f->package_name = package_name.string;
if (!f->pkg->is_single_file && docs != nullptr && docs->list.count > 0) {
for_array(i, docs->list) {
Token tok = docs->list[i]; GB_ASSERT(tok.kind == Token_Comment);
for (Token const &tok : docs->list) {
GB_ASSERT(tok.kind == Token_Comment);
String str = tok.string;
if (string_starts_with(str, str_lit("//"))) {
String lc = string_trim_whitespace(substring(str, 2, str.len));
@@ -5589,6 +5584,8 @@ gb_internal bool parse_file(Parser *p, AstFile *f) {
if (!parse_build_tag(tok, lc)) {
return false;
}
} else if (string_starts_with(lc, str_lit("+ignore"))) {
return false;
} else if (string_starts_with(lc, str_lit("+private"))) {
f->flags |= AstFile_IsPrivatePkg;
String command = string_trim_starts_with(lc, str_lit("+private "));
@@ -5787,8 +5784,7 @@ gb_internal ParseFileError parse_packages(Parser *p, String init_filename) {
}
for_array(i, build_context.extra_packages) {
String path = build_context.extra_packages[i];
for (String const &path : build_context.extra_packages) {
String fullpath = path_to_full_path(heap_allocator(), path); // LEAK?
if (!path_is_directory(fullpath)) {
String const ext = str_lit(".odin");
@@ -5804,7 +5800,7 @@ gb_internal ParseFileError parse_packages(Parser *p, String init_filename) {
}
}
global_thread_pool_wait();
thread_pool_wait();
for (ParseFileErrorNode *node = p->file_error_head; node != nullptr; node = node->next) {
if (node->err != ParseFile_None) {

View File

@@ -174,6 +174,7 @@ struct AstPackage {
BlockingMutex files_mutex;
BlockingMutex foreign_files_mutex;
BlockingMutex type_and_value_mutex;
MPMCQueue<AstPackageExportedEntity> exported_entity_queue;
@@ -820,9 +821,8 @@ gb_internal gb_inline bool is_ast_when_stmt(Ast *node) {
gb_global gb_thread_local Arena global_thread_local_ast_arena = {};
gb_internal gbAllocator ast_allocator(AstFile *f) {
Arena *arena = &global_thread_local_ast_arena;
return arena_allocator(arena);
gb_internal gb_inline gbAllocator ast_allocator(AstFile *f) {
return arena_allocator(&global_thread_local_ast_arena);
}
gb_internal Ast *alloc_ast_node(AstFile *f, AstKind kind);

View File

@@ -225,7 +225,6 @@ gb_internal i64 get_file_size(String path) {
gb_internal ReadDirectoryError read_directory(String path, Array<FileInfo> *fi) {
GB_ASSERT(fi != nullptr);
gbAllocator a = heap_allocator();
while (path.len > 0) {
Rune end = path[path.len-1];
@@ -242,9 +241,7 @@ gb_internal ReadDirectoryError read_directory(String path, Array<FileInfo> *fi)
return ReadDirectory_InvalidPath;
}
{
char *c_str = alloc_cstring(a, path);
defer (gb_free(a, c_str));
char *c_str = alloc_cstring(temporary_allocator(), path);
gbFile f = {};
gbFileError file_err = gb_file_open(&f, c_str);
defer (gb_file_close(&f));
@@ -261,6 +258,7 @@ gb_internal ReadDirectoryError read_directory(String path, Array<FileInfo> *fi)
}
gbAllocator a = heap_allocator();
char *new_path = gb_alloc_array(a, char, path.len+3);
defer (gb_free(a, new_path));
@@ -283,8 +281,8 @@ gb_internal ReadDirectoryError read_directory(String path, Array<FileInfo> *fi)
do {
wchar_t *filename_w = file_data.cFileName;
i64 size = cast(i64)file_data.nFileSizeLow;
size |= (cast(i64)file_data.nFileSizeHigh) << 32;
u64 size = cast(u64)file_data.nFileSizeLow;
size |= (cast(u64)file_data.nFileSizeHigh) << 32;
String name = string16_to_string(a, make_string16_c(filename_w));
if (name == "." || name == "..") {
gb_free(a, name.text);
@@ -302,7 +300,7 @@ gb_internal ReadDirectoryError read_directory(String path, Array<FileInfo> *fi)
FileInfo info = {};
info.name = name;
info.fullpath = path_to_full_path(a, filepath);
info.size = size;
info.size = cast(i64)size;
info.is_dir = (file_data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0;
array_add(fi, info);
} while (FindNextFileW(find_file, &file_data));

View File

@@ -27,6 +27,7 @@ struct PtrMap {
gb_internal gb_inline u32 ptr_map_hash_key(uintptr key) {
u32 res;
#if defined(GB_ARCH_64_BIT)
key = (~key) + (key << 21);
key = key ^ (key >> 24);
@@ -34,22 +35,24 @@ gb_internal gb_inline u32 ptr_map_hash_key(uintptr key) {
key = key ^ (key >> 14);
key = (key + (key << 2)) + (key << 4);
key = key ^ (key << 28);
return cast(u32)key;
res = cast(u32)key;
#elif defined(GB_ARCH_32_BIT)
u32 state = ((u32)key) * 747796405u + 2891336453u;
u32 word = ((state >> ((state >> 28u) + 4u)) ^ state) * 277803737u;
return (word >> 22u) ^ word;
res = (word >> 22u) ^ word;
#endif
return res;
}
gb_internal gb_inline u32 ptr_map_hash_key(void const *key) {
return ptr_map_hash_key((uintptr)key);
}
template <typename K, typename V> gb_internal void map_init (PtrMap<K, V> *h, gbAllocator a, isize capacity = 16);
template <typename K, typename V> gb_internal void map_init (PtrMap<K, V> *h, isize capacity = 16);
template <typename K, typename V> gb_internal void map_destroy (PtrMap<K, V> *h);
template <typename K, typename V> gb_internal V * map_get (PtrMap<K, V> *h, K key);
template <typename K, typename V> gb_internal void map_set (PtrMap<K, V> *h, K key, V const &value);
template <typename K, typename V> gb_internal bool map_set_if_not_previously_exists(PtrMap<K, V> *h, K key, V const &value); // returns true if it previously existed
template <typename K, typename V> gb_internal void map_remove (PtrMap<K, V> *h, K key);
template <typename K, typename V> gb_internal void map_clear (PtrMap<K, V> *h);
template <typename K, typename V> gb_internal void map_grow (PtrMap<K, V> *h);
@@ -68,11 +71,15 @@ template <typename K, typename V> gb_internal void multi_map_remove (PtrMap<
template <typename K, typename V> gb_internal void multi_map_remove_all(PtrMap<K, V> *h, K key);
#endif
gb_internal gbAllocator map_allocator(void) {
return heap_allocator();
}
template <typename K, typename V>
gb_internal gb_inline void map_init(PtrMap<K, V> *h, gbAllocator a, isize capacity) {
gb_internal gb_inline void map_init(PtrMap<K, V> *h, isize capacity) {
capacity = next_pow2_isize(capacity);
slice_init(&h->hashes, a, capacity);
array_init(&h->entries, a, 0, capacity);
slice_init(&h->hashes, map_allocator(), capacity);
array_init(&h->entries, map_allocator(), 0, capacity);
for (isize i = 0; i < capacity; i++) {
h->hashes.data[i] = MAP_SENTINEL;
}
@@ -80,6 +87,9 @@ gb_internal gb_inline void map_init(PtrMap<K, V> *h, gbAllocator a, isize capaci
template <typename K, typename V>
gb_internal gb_inline void map_destroy(PtrMap<K, V> *h) {
if (h->entries.allocator.proc == nullptr) {
h->entries.allocator = map_allocator();
}
slice_free(&h->hashes, h->entries.allocator);
array_free(&h->entries);
}
@@ -103,11 +113,12 @@ gb_internal MapFindResult map__find(PtrMap<K, V> *h, K key) {
fr.hash_index = cast(MapIndex)(hash & (h->hashes.count-1));
fr.entry_index = h->hashes.data[fr.hash_index];
while (fr.entry_index != MAP_SENTINEL) {
if (h->entries.data[fr.entry_index].key == key) {
auto *entry = &h->entries.data[fr.entry_index];
if (entry->key == key) {
return fr;
}
fr.entry_prev = fr.entry_index;
fr.entry_index = h->entries.data[fr.entry_index].next;
fr.entry_index = entry->next;
}
return fr;
}
@@ -162,6 +173,9 @@ gb_internal void map_reset_entries(PtrMap<K, V> *h) {
template <typename K, typename V>
gb_internal void map_reserve(PtrMap<K, V> *h, isize cap) {
if (h->entries.allocator.proc == nullptr) {
h->entries.allocator = map_allocator();
}
array_reserve(&h->entries, cap);
if (h->entries.count*2 < h->hashes.count) {
return;
@@ -178,18 +192,64 @@ gb_internal void map_rehash(PtrMap<K, V> *h, isize new_count) {
template <typename K, typename V>
gb_internal V *map_get(PtrMap<K, V> *h, K key) {
MapIndex index = map__find(h, key).entry_index;
if (index != MAP_SENTINEL) {
return &h->entries.data[index].value;
MapIndex hash_index = MAP_SENTINEL;
MapIndex entry_prev = MAP_SENTINEL;
MapIndex entry_index = MAP_SENTINEL;
if (h->hashes.count != 0) {
u32 hash = ptr_map_hash_key(key);
hash_index = cast(MapIndex)(hash & (h->hashes.count-1));
entry_index = h->hashes.data[hash_index];
while (entry_index != MAP_SENTINEL) {
auto *entry = &h->entries.data[entry_index];
if (entry->key == key) {
return &entry->value;
}
entry_prev = entry_index;
entry_index = entry->next;
}
}
return nullptr;
}
template <typename K, typename V>
gb_internal V *map_try_get(PtrMap<K, V> *h, K key, MapFindResult *fr_) {
MapFindResult fr = {MAP_SENTINEL, MAP_SENTINEL, MAP_SENTINEL};
if (h->hashes.count != 0) {
u32 hash = ptr_map_hash_key(key);
fr.hash_index = cast(MapIndex)(hash & (h->hashes.count-1));
fr.entry_index = h->hashes.data[fr.hash_index];
while (fr.entry_index != MAP_SENTINEL) {
auto *entry = &h->entries.data[fr.entry_index];
if (entry->key == key) {
return &entry->value;
}
fr.entry_prev = fr.entry_index;
fr.entry_index = entry->next;
}
}
if (h->hashes.count == 0 || map__full(h)) {
map_grow(h);
}
if (fr_) *fr_ = fr;
return nullptr;
}
template <typename K, typename V>
gb_internal void map_set_internal_from_try_get(PtrMap<K, V> *h, K key, V const &value, MapFindResult const &fr) {
MapIndex index = map__add_entry(h, key);
if (fr.entry_prev != MAP_SENTINEL) {
h->entries.data[fr.entry_prev].next = index;
} else {
h->hashes.data[fr.hash_index] = index;
}
h->entries.data[index].value = value;
}
template <typename K, typename V>
gb_internal V &map_must_get(PtrMap<K, V> *h, K key) {
MapIndex index = map__find(h, key).entry_index;
GB_ASSERT(index != MAP_SENTINEL);
return h->entries.data[index].value;
V *ptr = map_get(h, key);
GB_ASSERT(ptr != nullptr);
return *ptr;
}
template <typename K, typename V>
@@ -217,6 +277,33 @@ gb_internal void map_set(PtrMap<K, V> *h, K key, V const &value) {
}
}
// returns true if it previously existed
template <typename K, typename V>
gb_internal bool map_set_if_not_previously_exists(PtrMap<K, V> *h, K key, V const &value) {
MapIndex index;
MapFindResult fr;
if (h->hashes.count == 0) {
map_grow(h);
}
fr = map__find(h, key);
if (fr.entry_index != MAP_SENTINEL) {
return true;
} else {
index = map__add_entry(h, key);
if (fr.entry_prev != MAP_SENTINEL) {
h->entries.data[fr.entry_prev].next = index;
} else {
h->hashes.data[fr.hash_index] = index;
}
}
h->entries.data[index].value = value;
if (map__full(h)) {
map_grow(h);
}
return false;
}
template <typename K, typename V>
gb_internal void map__erase(PtrMap<K, V> *h, MapFindResult const &fr) {

View File

@@ -1,256 +1,215 @@
template <typename T>
struct PtrSetEntry {
T ptr;
MapIndex next;
struct TypeIsPointer {
enum {value = false};
};
template <typename T>
struct TypeIsPointer<T *> {
enum {value = true};
};
template <typename T>
struct PtrSet {
Slice<MapIndex> hashes;
Array<PtrSetEntry<T>> entries;
static_assert(TypeIsPointer<T>::value, "PtrSet::T must be a pointer");
static constexpr uintptr TOMBSTONE = ~(uintptr)(0ull);
T * keys;
usize count;
usize capacity;
};
template <typename T> gb_internal void ptr_set_init (PtrSet<T> *s, gbAllocator a, isize capacity = 16);
template <typename T> gb_internal void ptr_set_init (PtrSet<T> *s, isize capacity = 16);
template <typename T> gb_internal void ptr_set_destroy(PtrSet<T> *s);
template <typename T> gb_internal T ptr_set_add (PtrSet<T> *s, T ptr);
template <typename T> gb_internal bool ptr_set_update (PtrSet<T> *s, T ptr); // returns true if it previously existed
template <typename T> gb_internal bool ptr_set_exists (PtrSet<T> *s, T ptr);
template <typename T> gb_internal void ptr_set_remove (PtrSet<T> *s, T ptr);
template <typename T> gb_internal void ptr_set_clear (PtrSet<T> *s);
template <typename T> gb_internal void ptr_set_grow (PtrSet<T> *s);
template <typename T> gb_internal void ptr_set_rehash (PtrSet<T> *s, isize new_count);
template <typename T> gb_internal void ptr_set_reserve(PtrSet<T> *h, isize cap);
gb_internal gbAllocator ptr_set_allocator(void) {
return heap_allocator();
}
template <typename T>
gb_internal void ptr_set_init(PtrSet<T> *s, gbAllocator a, isize capacity) {
gb_internal void ptr_set_init(PtrSet<T> *s, isize capacity) {
GB_ASSERT(s->keys == nullptr);
if (capacity != 0) {
capacity = next_pow2_isize(gb_max(16, capacity));
s->keys = gb_alloc_array(ptr_set_allocator(), T, capacity);
// This memory will be zeroed, no need to explicitly zero it
}
slice_init(&s->hashes, a, capacity);
array_init(&s->entries, a, 0, capacity);
for (isize i = 0; i < capacity; i++) {
s->hashes.data[i] = MAP_SENTINEL;
}
s->count = 0;
s->capacity = capacity;
}
template <typename T>
gb_internal void ptr_set_destroy(PtrSet<T> *s) {
slice_free(&s->hashes, s->entries.allocator);
array_free(&s->entries);
gb_free(ptr_set_allocator(), s->keys);
s->keys = nullptr;
s->count = 0;
s->capacity = 0;
}
template <typename T>
gb_internal MapIndex ptr_set__add_entry(PtrSet<T> *s, T ptr) {
PtrSetEntry<T> e = {};
e.ptr = ptr;
e.next = MAP_SENTINEL;
array_add(&s->entries, e);
return cast(MapIndex)(s->entries.count-1);
}
template <typename T>
gb_internal MapFindResult ptr_set__find(PtrSet<T> *s, T ptr) {
MapFindResult fr = {MAP_SENTINEL, MAP_SENTINEL, MAP_SENTINEL};
if (s->hashes.count != 0) {
gb_internal isize ptr_set__find(PtrSet<T> *s, T ptr) {
GB_ASSERT(ptr != nullptr);
if (s->count != 0) {
#if 0
for (usize i = 0; i < s->capacity; i++) {
if (s->keys[i] == ptr) {
return i;
}
}
#else
u32 hash = ptr_map_hash_key(ptr);
fr.hash_index = cast(MapIndex)(hash & (s->hashes.count-1));
fr.entry_index = s->hashes.data[fr.hash_index];
while (fr.entry_index != MAP_SENTINEL) {
if (s->entries.data[fr.entry_index].ptr == ptr) {
return fr;
usize mask = s->capacity-1;
usize hash_index = cast(usize)hash & mask;
for (usize i = 0; i < s->capacity; i++) {
T key = s->keys[hash_index];
if (key == ptr) {
return hash_index;
} else if (key == nullptr) {
return -1;
}
fr.entry_prev = fr.entry_index;
fr.entry_index = s->entries.data[fr.entry_index].next;
hash_index = (hash_index+1)&mask;
}
}
return fr;
}
template <typename T>
gb_internal MapFindResult ptr_set__find_from_entry(PtrSet<T> *s, PtrSetEntry<T> *e) {
MapFindResult fr = {MAP_SENTINEL, MAP_SENTINEL, MAP_SENTINEL};
if (s->hashes.count != 0) {
u32 hash = ptr_map_hash_key(e->ptr);
fr.hash_index = cast(MapIndex)(hash & (s->hashes.count-1));
fr.entry_index = s->hashes.data[fr.hash_index];
while (fr.entry_index != MAP_SENTINEL) {
if (&s->entries.data[fr.entry_index] == e) {
return fr;
}
fr.entry_prev = fr.entry_index;
fr.entry_index = s->entries.data[fr.entry_index].next;
}
}
return fr;
}
template <typename T>
gb_internal bool ptr_set__full(PtrSet<T> *s) {
return 0.75f * s->hashes.count <= s->entries.count;
}
template <typename T>
gb_internal gb_inline void ptr_set_grow(PtrSet<T> *s) {
isize new_count = gb_max(s->hashes.count<<1, 16);
ptr_set_rehash(s, new_count);
}
template <typename T>
gb_internal void ptr_set_reset_entries(PtrSet<T> *s) {
for (isize i = 0; i < s->hashes.count; i++) {
s->hashes.data[i] = MAP_SENTINEL;
}
for (isize i = 0; i < s->entries.count; i++) {
MapFindResult fr;
PtrSetEntry<T> *e = &s->entries.data[i];
e->next = MAP_SENTINEL;
fr = ptr_set__find_from_entry(s, e);
if (fr.entry_prev == MAP_SENTINEL) {
s->hashes[fr.hash_index] = cast(MapIndex)i;
} else {
s->entries[fr.entry_prev].next = cast(MapIndex)i;
}
}
}
template <typename T>
gb_internal void ptr_set_reserve(PtrSet<T> *s, isize cap) {
array_reserve(&s->entries, cap);
if (s->entries.count*2 < s->hashes.count) {
return;
}
slice_resize(&s->hashes, s->entries.allocator, cap*2);
ptr_set_reset_entries(s);
}
template <typename T>
gb_internal void ptr_set_rehash(PtrSet<T> *s, isize new_count) {
ptr_set_reserve(s, new_count);
}
template <typename T>
gb_internal gb_inline bool ptr_set_exists(PtrSet<T> *s, T ptr) {
isize index = ptr_set__find(s, ptr).entry_index;
return index != MAP_SENTINEL;
}
template <typename T>
gb_internal gb_inline isize ptr_entry_index(PtrSet<T> *s, T ptr) {
isize index = ptr_set__find(s, ptr).entry_index;
if (index != MAP_SENTINEL) {
return index;
#endif
}
return -1;
}
// Returns true if it already exists
template <typename T>
gb_internal T ptr_set_add(PtrSet<T> *s, T ptr) {
MapIndex index;
MapFindResult fr;
if (s->hashes.count == 0) {
ptr_set_grow(s);
}
fr = ptr_set__find(s, ptr);
if (fr.entry_index == MAP_SENTINEL) {
index = ptr_set__add_entry(s, ptr);
if (fr.entry_prev != MAP_SENTINEL) {
s->entries.data[fr.entry_prev].next = index;
} else {
s->hashes.data[fr.hash_index] = index;
}
}
if (ptr_set__full(s)) {
ptr_set_grow(s);
}
return ptr;
gb_internal bool ptr_set__full(PtrSet<T> *s) {
return 0.75f * s->capacity <= s->count;
}
template <typename T>
gb_internal gb_inline void ptr_set_grow(PtrSet<T> *old_set) {
if (old_set->capacity == 0) {
ptr_set_init(old_set);
return;
}
PtrSet<T> new_set = {};
ptr_set_init(&new_set, gb_max(old_set->capacity<<1, 16));
for (T ptr : *old_set) {
bool was_new = ptr_set_update(&new_set, ptr);
GB_ASSERT(!was_new);
}
GB_ASSERT(old_set->count == new_set.count);
ptr_set_destroy(old_set);
*old_set = new_set;
}
template <typename T>
gb_internal gb_inline bool ptr_set_exists(PtrSet<T> *s, T ptr) {
return ptr_set__find(s, ptr) >= 0;
}
template <typename T>
gb_internal bool ptr_set_update(PtrSet<T> *s, T ptr) { // returns true if it previously existsed
bool exists = false;
MapIndex index;
MapFindResult fr;
if (s->hashes.count == 0) {
if (ptr_set_exists(s, ptr)) {
return true;
}
if (s->keys == nullptr) {
ptr_set_init(s);
} else if (ptr_set__full(s)) {
ptr_set_grow(s);
}
fr = ptr_set__find(s, ptr);
if (fr.entry_index != MAP_SENTINEL) {
exists = true;
} else {
index = ptr_set__add_entry(s, ptr);
if (fr.entry_prev != MAP_SENTINEL) {
s->entries.data[fr.entry_prev].next = index;
} else {
s->hashes.data[fr.hash_index] = index;
GB_ASSERT(s->count < s->capacity);
GB_ASSERT(s->capacity >= 0);
usize mask = s->capacity-1;
u32 hash = ptr_map_hash_key(ptr);
usize hash_index = (cast(usize)hash) & mask;
GB_ASSERT(hash_index < s->capacity);
for (usize i = 0; i < s->capacity; i++) {
T *key = &s->keys[hash_index];
GB_ASSERT(*key != ptr);
if (*key == (T)PtrSet<T>::TOMBSTONE || *key == nullptr) {
*key = ptr;
s->count++;
return false;
}
hash_index = (hash_index+1)&mask;
}
if (ptr_set__full(s)) {
ptr_set_grow(s);
}
return exists;
GB_PANIC("ptr set out of memory");
return false;
}
template <typename T>
gb_internal void ptr_set__erase(PtrSet<T> *s, MapFindResult fr) {
MapFindResult last;
if (fr.entry_prev == MAP_SENTINEL) {
s->hashes.data[fr.hash_index] = s->entries.data[fr.entry_index].next;
} else {
s->entries.data[fr.entry_prev].next = s->entries.data[fr.entry_index].next;
}
if (cast(isize)fr.entry_index == s->entries.count-1) {
array_pop(&s->entries);
return;
}
s->entries.data[fr.entry_index] = s->entries.data[s->entries.count-1];
last = ptr_set__find(s, s->entries.data[fr.entry_index].ptr);
if (last.entry_prev != MAP_SENTINEL) {
s->entries.data[last.entry_prev].next = fr.entry_index;
} else {
s->hashes.data[last.hash_index] = fr.entry_index;
}
gb_internal T ptr_set_add(PtrSet<T> *s, T ptr) {
ptr_set_update(s, ptr);
return ptr;
}
template <typename T>
gb_internal void ptr_set_remove(PtrSet<T> *s, T ptr) {
MapFindResult fr = ptr_set__find(s, ptr);
if (fr.entry_index != MAP_SENTINEL) {
ptr_set__erase(s, fr);
isize index = ptr_set__find(s, ptr);
if (index >= 0) {
GB_ASSERT(s->count > 0);
s->keys[index] = (T)PtrSet<T>::TOMBSTONE;
s->count--;
}
}
template <typename T>
gb_internal gb_inline void ptr_set_clear(PtrSet<T> *s) {
array_clear(&s->entries);
for (isize i = 0; i < s->hashes.count; i++) {
s->hashes.data[i] = MAP_SENTINEL;
s->count = 0;
gb_zero_size(s->keys, s->capacity*gb_size_of(T));
}
template <typename T>
struct PtrSetIterator {
PtrSet<T> *set;
usize index;
PtrSetIterator<T> &operator++() noexcept {
for (;;) {
++index;
if (set->capacity == index) {
return *this;
}
T key = set->keys[index];
if (key != nullptr && key != (T)PtrSet<T>::TOMBSTONE) {
return *this;
}
}
}
}
bool operator==(PtrSetIterator<T> const &other) const noexcept {
return this->set == other.set && this->index == other.index;
}
operator T *() const {
return &set->keys[index];
}
};
template <typename T>
gb_internal PtrSetEntry<T> *begin(PtrSet<T> &m) {
return m.entries.data;
gb_internal PtrSetIterator<T> begin(PtrSet<T> &set) noexcept {
usize index = 0;
while (index < set.capacity) {
T key = set.keys[index];
if (key != nullptr && key != (T)PtrSet<T>::TOMBSTONE) {
break;
}
index++;
}
return PtrSetIterator<T>{&set, index};
}
template <typename T>
gb_internal PtrSetEntry<T> const *begin(PtrSet<T> const &m) {
return m.entries.data;
}
template <typename T>
gb_internal PtrSetEntry<T> *end(PtrSet<T> &m) {
return m.entries.data + m.entries.count;
}
template <typename T>
gb_internal PtrSetEntry<T> const *end(PtrSet<T> const &m) {
return m.entries.data + m.entries.count;
gb_internal PtrSetIterator<T> end(PtrSet<T> &set) noexcept {
return PtrSetIterator<T>{&set, set.capacity};
}

View File

@@ -52,7 +52,6 @@ gb_internal void mpmc_init(MPMCQueue<T> *q, gbAllocator a, isize size_i) {
size = next_pow2(size);
GB_ASSERT(gb_is_power_of_two(size));
mutex_init(&q->mutex);
q->mask = size-1;
q->allocator = a;
q->nodes = gb_alloc_array(a, T, size);
@@ -65,7 +64,6 @@ gb_internal void mpmc_init(MPMCQueue<T> *q, gbAllocator a, isize size_i) {
template <typename T>
gb_internal void mpmc_destroy(MPMCQueue<T> *q) {
mutex_destroy(&q->mutex);
gb_free(q->allocator, q->nodes);
gb_free(q->allocator, q->indices);
}

View File

@@ -1,10 +1,5 @@
gb_global BlockingMutex string_buffer_mutex = {};
gb_internal void init_string_buffer_memory(void) {
mutex_init(&string_buffer_mutex);
}
// NOTE(bill): Used for UTF-8 strings
struct String {
u8 * text;

View File

@@ -1,6 +1,13 @@
struct StringHashKey {
u32 hash;
String string;
operator String() const noexcept {
return this->string;
}
operator String const &() const noexcept {
return this->string;
}
};
gb_internal gb_inline StringHashKey string_hash_string(String const &s) {
@@ -35,7 +42,7 @@ struct StringMap {
};
template <typename T> gb_internal void string_map_init (StringMap<T> *h, gbAllocator a, isize capacity = 16);
template <typename T> gb_internal void string_map_init (StringMap<T> *h, isize capacity = 16);
template <typename T> gb_internal void string_map_destroy (StringMap<T> *h);
template <typename T> gb_internal T * string_map_get (StringMap<T> *h, char const *key);
@@ -56,11 +63,15 @@ template <typename T> gb_internal void string_map_grow (StringMap<T>
template <typename T> gb_internal void string_map_rehash (StringMap<T> *h, isize new_count);
template <typename T> gb_internal void string_map_reserve (StringMap<T> *h, isize cap);
gb_internal gbAllocator string_map_allocator(void) {
return heap_allocator();
}
template <typename T>
gb_internal gb_inline void string_map_init(StringMap<T> *h, gbAllocator a, isize capacity) {
gb_internal gb_inline void string_map_init(StringMap<T> *h, isize capacity) {
capacity = next_pow2_isize(capacity);
slice_init(&h->hashes, a, capacity);
array_init(&h->entries, a, 0, capacity);
slice_init(&h->hashes, string_map_allocator(), capacity);
array_init(&h->entries, string_map_allocator(), 0, capacity);
for (isize i = 0; i < capacity; i++) {
h->hashes.data[i] = MAP_SENTINEL;
}
@@ -68,6 +79,9 @@ gb_internal gb_inline void string_map_init(StringMap<T> *h, gbAllocator a, isize
template <typename T>
gb_internal gb_inline void string_map_destroy(StringMap<T> *h) {
if (h->entries.allocator.proc == nullptr) {
h->entries.allocator = string_map_allocator();
}
slice_free(&h->hashes, h->entries.allocator);
array_free(&h->entries);
}
@@ -147,6 +161,9 @@ gb_internal void string_map_reset_entries(StringMap<T> *h) {
template <typename T>
gb_internal void string_map_reserve(StringMap<T> *h, isize cap) {
if (h->entries.allocator.proc == nullptr) {
h->entries.allocator = string_map_allocator();
}
array_reserve(&h->entries, cap);
if (h->entries.count*2 < h->hashes.count) {
return;
@@ -163,9 +180,18 @@ gb_internal void string_map_rehash(StringMap<T> *h, isize new_count) {
template <typename T>
gb_internal T *string_map_get(StringMap<T> *h, StringHashKey const &key) {
isize index = string_map__find(h, key).entry_index;
if (index != MAP_SENTINEL) {
return &h->entries.data[index].value;
MapFindResult fr = {MAP_SENTINEL, MAP_SENTINEL, MAP_SENTINEL};
if (h->hashes.count != 0) {
fr.hash_index = cast(MapIndex)(key.hash & (h->hashes.count-1));
fr.entry_index = h->hashes.data[fr.hash_index];
while (fr.entry_index != MAP_SENTINEL) {
auto *entry = &h->entries.data[fr.entry_index];
if (string_hash_key_equal(entry->key, key)) {
return &entry->value;
}
fr.entry_prev = fr.entry_index;
fr.entry_index = entry->next;
}
}
return nullptr;
}
@@ -273,11 +299,11 @@ gb_internal gb_inline void string_map_clear(StringMap<T> *h) {
template <typename T>
gb_internal StringMapEntry<T> *begin(StringMap<T> &m) {
gb_internal StringMapEntry<T> *begin(StringMap<T> &m) noexcept {
return m.entries.data;
}
template <typename T>
gb_internal StringMapEntry<T> const *begin(StringMap<T> const &m) {
gb_internal StringMapEntry<T> const *begin(StringMap<T> const &m) noexcept {
return m.entries.data;
}
@@ -288,6 +314,6 @@ gb_internal StringMapEntry<T> *end(StringMap<T> &m) {
}
template <typename T>
gb_internal StringMapEntry<T> const *end(StringMap<T> const &m) {
gb_internal StringMapEntry<T> const *end(StringMap<T> const &m) noexcept {
return m.entries.data + m.entries.count;
}

View File

@@ -2,6 +2,13 @@ struct StringSetEntry {
u32 hash;
MapIndex next;
String value;
operator String const() const noexcept {
return this->value;
}
operator String const &() const noexcept {
return this->value;
}
};
struct StringSet {
@@ -10,7 +17,7 @@ struct StringSet {
};
gb_internal void string_set_init (StringSet *s, gbAllocator a, isize capacity = 16);
gb_internal void string_set_init (StringSet *s, isize capacity = 16);
gb_internal void string_set_destroy(StringSet *s);
gb_internal void string_set_add (StringSet *s, String const &str);
gb_internal bool string_set_update (StringSet *s, String const &str); // returns true if it previously existed
@@ -20,18 +27,24 @@ gb_internal void string_set_clear (StringSet *s);
gb_internal void string_set_grow (StringSet *s);
gb_internal void string_set_rehash (StringSet *s, isize new_count);
gb_internal gbAllocator string_set_allocator(void) {
return heap_allocator();
}
gb_internal gb_inline void string_set_init(StringSet *s, gbAllocator a, isize capacity) {
gb_internal gb_inline void string_set_init(StringSet *s, isize capacity) {
capacity = next_pow2_isize(gb_max(16, capacity));
slice_init(&s->hashes, a, capacity);
array_init(&s->entries, a, 0, capacity);
slice_init(&s->hashes, string_set_allocator(), capacity);
array_init(&s->entries, string_set_allocator(), 0, capacity);
for (isize i = 0; i < capacity; i++) {
s->hashes.data[i] = MAP_SENTINEL;
}
}
gb_internal gb_inline void string_set_destroy(StringSet *s) {
if (s->entries.allocator.proc == nullptr) {
s->entries.allocator = string_set_allocator();
}
slice_free(&s->hashes, s->entries.allocator);
array_free(&s->entries);
}
@@ -106,6 +119,9 @@ gb_internal void string_set_reset_entries(StringSet *s) {
}
gb_internal void string_set_reserve(StringSet *s, isize cap) {
if (s->entries.allocator.proc == nullptr) {
s->entries.allocator = string_set_allocator();
}
array_reserve(&s->entries, cap);
if (s->entries.count*2 < s->hashes.count) {
return;
@@ -217,19 +233,18 @@ gb_internal gb_inline void string_set_clear(StringSet *s) {
}
gb_internal StringSetEntry *begin(StringSet &m) {
gb_internal StringSetEntry *begin(StringSet &m) noexcept {
return m.entries.data;
}
gb_internal StringSetEntry const *begin(StringSet const &m) {
gb_internal StringSetEntry const *begin(StringSet const &m) noexcept {
return m.entries.data;
}
gb_internal StringSetEntry *end(StringSet &m) {
gb_internal StringSetEntry *end(StringSet &m) noexcept {
return m.entries.data + m.entries.count;
}
gb_internal StringSetEntry const *end(StringSet const &m) {
gb_internal StringSetEntry const *end(StringSet const &m) noexcept {
return m.entries.data + m.entries.count;
}

View File

@@ -5,7 +5,7 @@ struct ThreadPool;
gb_thread_local Thread *current_thread;
gb_internal void thread_pool_init(ThreadPool *pool, gbAllocator const &a, isize thread_count, char const *worker_name);
gb_internal void thread_pool_init(ThreadPool *pool, gbAllocator const &a, isize worker_count, char const *worker_name);
gb_internal void thread_pool_destroy(ThreadPool *pool);
gb_internal bool thread_pool_add_task(ThreadPool *pool, WorkerTaskProc *proc, void *data);
gb_internal void thread_pool_wait(ThreadPool *pool);
@@ -16,18 +16,21 @@ struct ThreadPool {
Slice<Thread> threads;
std::atomic<bool> running;
BlockingMutex task_lock;
Condition tasks_available;
Futex tasks_available;
Futex tasks_left;
};
gb_internal void thread_pool_init(ThreadPool *pool, gbAllocator const &a, isize thread_count, char const *worker_name) {
mutex_init(&pool->task_lock);
condition_init(&pool->tasks_available);
gb_internal isize current_thread_index(void) {
return current_thread ? current_thread->idx : 0;
}
gb_internal void thread_pool_init(ThreadPool *pool, gbAllocator const &a, isize worker_count, char const *worker_name) {
pool->allocator = a;
slice_init(&pool->threads, a, thread_count + 1);
slice_init(&pool->threads, a, worker_count + 1);
// NOTE: this needs to be initialized before any thread starts
pool->running.store(true, std::memory_order_seq_cst);
// setup the main thread
thread_init(pool, &pool->threads[0], 0);
@@ -37,62 +40,55 @@ gb_internal void thread_pool_init(ThreadPool *pool, gbAllocator const &a, isize
Thread *t = &pool->threads[i];
thread_init_and_start(pool, t, i);
}
pool->running = true;
}
gb_internal void thread_pool_destroy(ThreadPool *pool) {
pool->running = false;
pool->running.store(false, std::memory_order_seq_cst);
for_array_off(i, 1, pool->threads) {
Thread *t = &pool->threads[i];
condition_broadcast(&pool->tasks_available);
pool->tasks_available.fetch_add(1, std::memory_order_relaxed);
futex_broadcast(&pool->tasks_available);
thread_join_and_destroy(t);
}
for_array(i, pool->threads) {
free(pool->threads[i].queue);
}
gb_free(pool->allocator, pool->threads.data);
mutex_destroy(&pool->task_lock);
condition_destroy(&pool->tasks_available);
}
void thread_pool_queue_push(Thread *thread, WorkerTask task) {
uint64_t capture;
uint64_t new_capture;
u64 capture;
u64 new_capture;
do {
capture = thread->head_and_tail.load();
uint64_t mask = thread->capacity - 1;
uint64_t head = (capture >> 32) & mask;
uint64_t tail = ((uint32_t)capture) & mask;
u64 mask = thread->capacity - 1;
u64 head = (capture >> 32) & mask;
u64 tail = ((u32)capture) & mask;
uint64_t new_head = (head + 1) & mask;
if (new_head == tail) {
GB_PANIC("Thread Queue Full!\n");
}
u64 new_head = (head + 1) & mask;
GB_ASSERT_MSG(new_head != tail, "Thread Queue Full!");
// This *must* be done in here, to avoid a potential race condition where we no longer own the slot by the time we're assigning
thread->queue[head] = task;
new_capture = (new_head << 32) | tail;
} while (!thread->head_and_tail.compare_exchange_weak(capture, new_capture));
thread->pool->tasks_left.fetch_add(1);
condition_broadcast(&thread->pool->tasks_available);
thread->pool->tasks_left.fetch_add(1, std::memory_order_release);
thread->pool->tasks_available.fetch_add(1, std::memory_order_relaxed);
futex_broadcast(&thread->pool->tasks_available);
}
bool thread_pool_queue_pop(Thread *thread, WorkerTask *task) {
uint64_t capture;
uint64_t new_capture;
u64 capture;
u64 new_capture;
do {
capture = thread->head_and_tail.load();
capture = thread->head_and_tail.load(std::memory_order_acquire);
uint64_t mask = thread->capacity - 1;
uint64_t head = (capture >> 32) & mask;
uint64_t tail = ((uint32_t)capture) & mask;
u64 mask = thread->capacity - 1;
u64 head = (capture >> 32) & mask;
u64 tail = ((u32)capture) & mask;
uint64_t new_tail = (tail + 1) & mask;
u64 new_tail = (tail + 1) & mask;
if (tail == head) {
return false;
}
@@ -101,7 +97,7 @@ bool thread_pool_queue_pop(Thread *thread, WorkerTask *task) {
*task = thread->queue[tail];
new_capture = (head << 32) | new_tail;
} while (!thread->head_and_tail.compare_exchange_weak(capture, new_capture));
} while (!thread->head_and_tail.compare_exchange_weak(capture, new_capture, std::memory_order_release));
return true;
}
@@ -118,12 +114,11 @@ gb_internal bool thread_pool_add_task(ThreadPool *pool, WorkerTaskProc *proc, vo
gb_internal void thread_pool_wait(ThreadPool *pool) {
WorkerTask task;
while (pool->tasks_left) {
while (pool->tasks_left.load(std::memory_order_acquire)) {
// if we've got tasks on our queue, run them
while (thread_pool_queue_pop(current_thread, &task)) {
task.do_work(task.data);
pool->tasks_left.fetch_sub(1);
pool->tasks_left.fetch_sub(1, std::memory_order_release);
}
@@ -131,12 +126,12 @@ gb_internal void thread_pool_wait(ThreadPool *pool) {
// This *must* be executed in this order, so the futex wakes immediately
// if rem_tasks has changed since we checked last, otherwise the program
// will permanently sleep
Footex rem_tasks = pool->tasks_left.load();
if (!rem_tasks) {
break;
Footex rem_tasks = pool->tasks_left.load(std::memory_order_acquire);
if (rem_tasks == 0) {
return;
}
tpool_wait_on_addr(&pool->tasks_left, rem_tasks);
futex_wait(&pool->tasks_left, rem_tasks);
}
}
@@ -144,56 +139,53 @@ gb_internal THREAD_PROC(thread_pool_thread_proc) {
WorkerTask task;
current_thread = thread;
ThreadPool *pool = current_thread->pool;
// debugf("worker id: %td\n", current_thread->idx);
for (;;) {
work_start:
if (!pool->running) {
break;
}
while (pool->running.load(std::memory_order_seq_cst)) {
// If we've got tasks to process, work through them
size_t finished_tasks = 0;
usize finished_tasks = 0;
i32 state;
while (thread_pool_queue_pop(current_thread, &task)) {
task.do_work(task.data);
pool->tasks_left.fetch_sub(1);
pool->tasks_left.fetch_sub(1, std::memory_order_release);
finished_tasks += 1;
}
if (finished_tasks > 0 && !pool->tasks_left) {
tpool_wake_addr(&pool->tasks_left);
if (finished_tasks > 0 && pool->tasks_left.load(std::memory_order_acquire) == 0) {
futex_signal(&pool->tasks_left);
}
// If there's still work somewhere and we don't have it, steal it
if (pool->tasks_left) {
isize idx = current_thread->idx;
if (pool->tasks_left.load(std::memory_order_acquire)) {
usize idx = cast(usize)current_thread->idx;
for_array(i, pool->threads) {
if (!pool->tasks_left) {
if (pool->tasks_left.load(std::memory_order_acquire) == 0) {
break;
}
idx = (idx + 1) % pool->threads.count;
Thread *thread = &pool->threads[idx];
idx = (idx + 1) % cast(usize)pool->threads.count;
Thread *thread = &pool->threads.data[idx];
WorkerTask task;
if (!thread_pool_queue_pop(thread, &task)) {
continue;
if (thread_pool_queue_pop(thread, &task)) {
task.do_work(task.data);
pool->tasks_left.fetch_sub(1, std::memory_order_release);
if (pool->tasks_left.load(std::memory_order_acquire) == 0) {
futex_signal(&pool->tasks_left);
}
goto main_loop_continue;
}
task.do_work(task.data);
pool->tasks_left.fetch_sub(1);
if (!pool->tasks_left) {
tpool_wake_addr(&pool->tasks_left);
}
goto work_start;
}
}
// if we've done all our work, and there's nothing to steal, go to sleep
mutex_lock(&pool->task_lock);
condition_wait(&pool->tasks_available, &pool->task_lock);
mutex_unlock(&pool->task_lock);
state = pool->tasks_available.load(std::memory_order_acquire);
futex_wait(&pool->tasks_available, state);
main_loop_continue:;
}
return 0;

View File

@@ -8,10 +8,12 @@
struct BlockingMutex;
struct RecursiveMutex;
struct RwMutex;
struct Semaphore;
struct Condition;
struct Thread;
struct ThreadPool;
struct Parker;
#define THREAD_PROC(name) isize name(struct Thread *thread)
gb_internal THREAD_PROC(thread_pool_thread_proc);
@@ -41,31 +43,40 @@ struct Thread {
struct ThreadPool *pool;
};
typedef std::atomic<i32> Futex;
typedef volatile i32 Footex;
gb_internal void futex_wait(Futex *addr, Footex val);
gb_internal void futex_signal(Futex *addr);
gb_internal void futex_broadcast(Futex *addr);
gb_internal void mutex_init (BlockingMutex *m);
gb_internal void mutex_destroy (BlockingMutex *m);
gb_internal void mutex_lock (BlockingMutex *m);
gb_internal bool mutex_try_lock(BlockingMutex *m);
gb_internal void mutex_unlock (BlockingMutex *m);
gb_internal void mutex_init (RecursiveMutex *m);
gb_internal void mutex_destroy (RecursiveMutex *m);
gb_internal void mutex_lock (RecursiveMutex *m);
gb_internal bool mutex_try_lock(RecursiveMutex *m);
gb_internal void mutex_unlock (RecursiveMutex *m);
gb_internal void semaphore_init (Semaphore *s);
gb_internal void semaphore_destroy(Semaphore *s);
gb_internal void rw_mutex_lock (RwMutex *m);
gb_internal bool rw_mutex_try_lock (RwMutex *m);
gb_internal void rw_mutex_unlock (RwMutex *m);
gb_internal void rw_mutex_shared_lock (RwMutex *m);
gb_internal bool rw_mutex_try_shared_lock(RwMutex *m);
gb_internal void rw_mutex_shared_unlock (RwMutex *m);
gb_internal void semaphore_post (Semaphore *s, i32 count);
gb_internal void semaphore_wait (Semaphore *s);
gb_internal void semaphore_release(Semaphore *s) { semaphore_post(s, 1); }
gb_internal void condition_init(Condition *c);
gb_internal void condition_destroy(Condition *c);
gb_internal void condition_broadcast(Condition *c);
gb_internal void condition_signal(Condition *c);
gb_internal void condition_wait(Condition *c, BlockingMutex *m);
gb_internal void condition_wait_with_timeout(Condition *c, BlockingMutex *m, u32 timeout_in_ms);
gb_internal void park(Parker *p);
gb_internal void unpark_one(Parker *p);
gb_internal void unpark_all(Parker *p);
gb_internal u32 thread_current_id(void);
@@ -79,22 +90,23 @@ gb_internal void yield_process(void);
struct MutexGuard {
MutexGuard() = delete;
MutexGuard() = delete;
MutexGuard(MutexGuard const &) = delete;
MutexGuard(MutexGuard &&) = delete;
MutexGuard(BlockingMutex *bm) : bm{bm} {
explicit MutexGuard(BlockingMutex *bm) noexcept : bm{bm} {
mutex_lock(this->bm);
}
MutexGuard(RecursiveMutex *rm) : rm{rm} {
explicit MutexGuard(RecursiveMutex *rm) noexcept : rm{rm} {
mutex_lock(this->rm);
}
MutexGuard(BlockingMutex &bm) : bm{&bm} {
explicit MutexGuard(BlockingMutex &bm) noexcept : bm{&bm} {
mutex_lock(this->bm);
}
MutexGuard(RecursiveMutex &rm) : rm{&rm} {
explicit MutexGuard(RecursiveMutex &rm) noexcept : rm{&rm} {
mutex_lock(this->rm);
}
~MutexGuard() {
~MutexGuard() noexcept {
if (this->bm) {
mutex_unlock(this->bm);
} else if (this->rm) {
@@ -102,24 +114,87 @@ struct MutexGuard {
}
}
operator bool() const { return true; }
operator bool() const noexcept { return true; }
BlockingMutex *bm;
RecursiveMutex *rm;
};
#define MUTEX_GUARD_BLOCK(m) if (MutexGuard GB_DEFER_3(_mutex_guard_){m})
#define MUTEX_GUARD(m) MutexGuard GB_DEFER_3(_mutex_guard_){m}
#define MUTEX_GUARD(m) mutex_lock(m); defer (mutex_unlock(m))
struct RecursiveMutex {
Futex owner;
i32 recursion;
};
gb_internal void mutex_lock(RecursiveMutex *m) {
Futex tid;
tid.store(cast(i32)thread_current_id());
for (;;) {
i32 prev_owner = 0;
m->owner.compare_exchange_strong(prev_owner, tid, std::memory_order_acquire, std::memory_order_acquire);
if (prev_owner == 0 || prev_owner == tid) {
m->recursion++;
// inside the lock
return;
}
futex_wait(&m->owner, prev_owner);
}
}
gb_internal bool mutex_try_lock(RecursiveMutex *m) {
Futex tid;
tid.store(cast(i32)thread_current_id());
i32 prev_owner = 0;
m->owner.compare_exchange_strong(prev_owner, tid, std::memory_order_acquire, std::memory_order_acquire);
if (prev_owner == 0 || prev_owner == tid) {
m->recursion++;
// inside the lock
return true;
}
return false;
}
gb_internal void mutex_unlock(RecursiveMutex *m) {
m->recursion--;
if (m->recursion != 0) {
return;
}
m->owner.exchange(0, std::memory_order_release);
futex_signal(&m->owner);
// outside the lock
}
struct Semaphore {
Futex count;
};
gb_internal void semaphore_post(Semaphore *s, i32 count) {
s->count.fetch_add(count, std::memory_order_release);
if (s->count == 1) {
futex_signal(&s->count);
} else {
futex_broadcast(&s->count);
}
}
gb_internal void semaphore_wait(Semaphore *s) {
for (;;) {
i32 original_count = s->count.load(std::memory_order_relaxed);
while (original_count == 0) {
futex_wait(&s->count, original_count);
original_count = s->count;
}
if (!s->count.compare_exchange_strong(original_count, original_count-1, std::memory_order_acquire, std::memory_order_acquire)) {
return;
}
}
}
#if defined(GB_SYSTEM_WINDOWS)
struct BlockingMutex {
SRWLOCK srwlock;
};
gb_internal void mutex_init(BlockingMutex *m) {
}
gb_internal void mutex_destroy(BlockingMutex *m) {
}
gb_internal void mutex_lock(BlockingMutex *m) {
AcquireSRWLockExclusive(&m->srwlock);
}
@@ -130,50 +205,10 @@ struct MutexGuard {
ReleaseSRWLockExclusive(&m->srwlock);
}
struct RecursiveMutex {
CRITICAL_SECTION win32_critical_section;
};
gb_internal void mutex_init(RecursiveMutex *m) {
InitializeCriticalSection(&m->win32_critical_section);
}
gb_internal void mutex_destroy(RecursiveMutex *m) {
DeleteCriticalSection(&m->win32_critical_section);
}
gb_internal void mutex_lock(RecursiveMutex *m) {
EnterCriticalSection(&m->win32_critical_section);
}
gb_internal bool mutex_try_lock(RecursiveMutex *m) {
return TryEnterCriticalSection(&m->win32_critical_section) != 0;
}
gb_internal void mutex_unlock(RecursiveMutex *m) {
LeaveCriticalSection(&m->win32_critical_section);
}
struct Semaphore {
void *win32_handle;
};
gb_internal void semaphore_init(Semaphore *s) {
s->win32_handle = CreateSemaphoreA(NULL, 0, I32_MAX, NULL);
}
gb_internal void semaphore_destroy(Semaphore *s) {
CloseHandle(s->win32_handle);
}
gb_internal void semaphore_post(Semaphore *s, i32 count) {
ReleaseSemaphore(s->win32_handle, count, NULL);
}
gb_internal void semaphore_wait(Semaphore *s) {
WaitForSingleObjectEx(s->win32_handle, INFINITE, FALSE);
}
struct Condition {
CONDITION_VARIABLE cond;
};
gb_internal void condition_init(Condition *c) {
}
gb_internal void condition_destroy(Condition *c) {
}
gb_internal void condition_broadcast(Condition *c) {
WakeAllConditionVariable(&c->cond);
}
@@ -183,103 +218,192 @@ struct MutexGuard {
gb_internal void condition_wait(Condition *c, BlockingMutex *m) {
SleepConditionVariableSRW(&c->cond, &m->srwlock, INFINITE, 0);
}
gb_internal void condition_wait_with_timeout(Condition *c, BlockingMutex *m, u32 timeout_in_ms) {
SleepConditionVariableSRW(&c->cond, &m->srwlock, timeout_in_ms, 0);
struct RwMutex {
SRWLOCK srwlock;
};
gb_internal void rw_mutex_lock(RwMutex *m) {
AcquireSRWLockExclusive(&m->srwlock);
}
gb_internal bool rw_mutex_try_lock(RwMutex *m) {
return !!TryAcquireSRWLockExclusive(&m->srwlock);
}
gb_internal void rw_mutex_unlock(RwMutex *m) {
ReleaseSRWLockExclusive(&m->srwlock);
}
gb_internal void rw_mutex_shared_lock(RwMutex *m) {
AcquireSRWLockShared(&m->srwlock);
}
gb_internal bool rw_mutex_try_shared_lock(RwMutex *m) {
return !!TryAcquireSRWLockShared(&m->srwlock);
}
gb_internal void rw_mutex_shared_unlock(RwMutex *m) {
ReleaseSRWLockShared(&m->srwlock);
}
#else
struct BlockingMutex {
pthread_mutex_t pthread_mutex;
enum Internal_Mutex_State : i32 {
Internal_Mutex_State_Unlocked = 0,
Internal_Mutex_State_Locked = 1,
Internal_Mutex_State_Waiting = 2,
};
gb_internal void mutex_init(BlockingMutex *m) {
pthread_mutex_init(&m->pthread_mutex, nullptr);
}
gb_internal void mutex_destroy(BlockingMutex *m) {
pthread_mutex_destroy(&m->pthread_mutex);
struct BlockingMutex {
i32 state_;
Futex &state() {
return *(Futex *)&this->state_;
}
Futex const &state() const {
return *(Futex const *)&this->state_;
}
};
gb_no_inline gb_internal void mutex_lock_slow(BlockingMutex *m, i32 curr_state) {
i32 new_state = curr_state;
for (i32 spin = 0; spin < 100; spin++) {
i32 state = Internal_Mutex_State_Unlocked;
bool ok = m->state().compare_exchange_weak(state, new_state, std::memory_order_acquire, std::memory_order_consume);
if (ok) {
return;
}
if (state == Internal_Mutex_State_Waiting) {
break;
}
for (i32 i = gb_min(spin+1, 32); i > 0; i--) {
yield_thread();
}
}
// Set just in case 100 iterations did not do it
new_state = Internal_Mutex_State_Waiting;
for (;;) {
if (m->state().exchange(Internal_Mutex_State_Waiting, std::memory_order_acquire) == Internal_Mutex_State_Unlocked) {
return;
}
futex_wait(&m->state(), new_state);
yield_thread();
}
}
gb_internal void mutex_lock(BlockingMutex *m) {
pthread_mutex_lock(&m->pthread_mutex);
i32 v = m->state().exchange(Internal_Mutex_State_Locked, std::memory_order_acquire);
if (v != Internal_Mutex_State_Unlocked) {
mutex_lock_slow(m, v);
}
}
gb_internal bool mutex_try_lock(BlockingMutex *m) {
return pthread_mutex_trylock(&m->pthread_mutex) == 0;
i32 v = m->state().exchange(Internal_Mutex_State_Locked, std::memory_order_acquire);
return v == Internal_Mutex_State_Unlocked;
}
gb_no_inline gb_internal void mutex_unlock_slow(BlockingMutex *m) {
futex_signal(&m->state());
}
gb_internal void mutex_unlock(BlockingMutex *m) {
pthread_mutex_unlock(&m->pthread_mutex);
i32 v = m->state().exchange(Internal_Mutex_State_Unlocked, std::memory_order_release);
switch (v) {
case Internal_Mutex_State_Unlocked:
GB_PANIC("Unreachable");
break;
case Internal_Mutex_State_Locked:
// Okay
break;
case Internal_Mutex_State_Waiting:
mutex_unlock_slow(m);
break;
}
}
struct RecursiveMutex {
pthread_mutex_t pthread_mutex;
pthread_mutexattr_t pthread_mutexattr;
};
gb_internal void mutex_init(RecursiveMutex *m) {
pthread_mutexattr_init(&m->pthread_mutexattr);
pthread_mutexattr_settype(&m->pthread_mutexattr, PTHREAD_MUTEX_RECURSIVE);
pthread_mutex_init(&m->pthread_mutex, &m->pthread_mutexattr);
}
gb_internal void mutex_destroy(RecursiveMutex *m) {
pthread_mutex_destroy(&m->pthread_mutex);
}
gb_internal void mutex_lock(RecursiveMutex *m) {
pthread_mutex_lock(&m->pthread_mutex);
}
gb_internal bool mutex_try_lock(RecursiveMutex *m) {
return pthread_mutex_trylock(&m->pthread_mutex) == 0;
}
gb_internal void mutex_unlock(RecursiveMutex *m) {
pthread_mutex_unlock(&m->pthread_mutex);
}
#if defined(GB_SYSTEM_OSX)
struct Semaphore {
semaphore_t osx_handle;
};
gb_internal void semaphore_init (Semaphore *s) { semaphore_create(mach_task_self(), &s->osx_handle, SYNC_POLICY_FIFO, 0); }
gb_internal void semaphore_destroy(Semaphore *s) { semaphore_destroy(mach_task_self(), s->osx_handle); }
gb_internal void semaphore_post (Semaphore *s, i32 count) { while (count --> 0) semaphore_signal(s->osx_handle); }
gb_internal void semaphore_wait (Semaphore *s) { semaphore_wait(s->osx_handle); }
#elif defined(GB_SYSTEM_UNIX)
struct Semaphore {
sem_t unix_handle;
};
gb_internal void semaphore_init (Semaphore *s) { sem_init(&s->unix_handle, 0, 0); }
gb_internal void semaphore_destroy(Semaphore *s) { sem_destroy(&s->unix_handle); }
gb_internal void semaphore_post (Semaphore *s, i32 count) { while (count --> 0) sem_post(&s->unix_handle); }
void semaphore_wait (Semaphore *s) { int i; do { i = sem_wait(&s->unix_handle); } while (i == -1 && errno == EINTR); }
#else
#error Implement Semaphore for this platform
#endif
struct Condition {
pthread_cond_t pthread_cond;
i32 state_;
Futex &state() {
return *(Futex *)&this->state_;
}
Futex const &state() const {
return *(Futex const *)&this->state_;
}
};
gb_internal void condition_init(Condition *c) {
pthread_cond_init(&c->pthread_cond, NULL);
}
gb_internal void condition_destroy(Condition *c) {
pthread_cond_destroy(&c->pthread_cond);
}
gb_internal void condition_broadcast(Condition *c) {
pthread_cond_broadcast(&c->pthread_cond);
c->state().fetch_add(1, std::memory_order_release);
futex_broadcast(&c->state());
}
gb_internal void condition_signal(Condition *c) {
pthread_cond_signal(&c->pthread_cond);
c->state().fetch_add(1, std::memory_order_release);
futex_signal(&c->state());
}
gb_internal void condition_wait(Condition *c, BlockingMutex *m) {
pthread_cond_wait(&c->pthread_cond, &m->pthread_mutex);
i32 state = c->state().load(std::memory_order_relaxed);
mutex_unlock(m);
futex_wait(&c->state(), state);
mutex_lock(m);
}
gb_internal void condition_wait_with_timeout(Condition *c, BlockingMutex *m, u32 timeout_in_ms) {
struct timespec abstime = {};
abstime.tv_sec = timeout_in_ms/1000;
abstime.tv_nsec = cast(long)(timeout_in_ms%1000)*1e6;
pthread_cond_timedwait(&c->pthread_cond, &m->pthread_mutex, &abstime);
struct RwMutex {
// TODO(bill): make this a proper RW mutex
BlockingMutex mutex;
};
gb_internal void rw_mutex_lock(RwMutex *m) {
mutex_lock(&m->mutex);
}
gb_internal bool rw_mutex_try_lock(RwMutex *m) {
return mutex_try_lock(&m->mutex);
}
gb_internal void rw_mutex_unlock(RwMutex *m) {
mutex_unlock(&m->mutex);
}
gb_internal void rw_mutex_shared_lock(RwMutex *m) {
mutex_lock(&m->mutex);
}
gb_internal bool rw_mutex_try_shared_lock(RwMutex *m) {
return mutex_try_lock(&m->mutex);
}
gb_internal void rw_mutex_shared_unlock(RwMutex *m) {
mutex_unlock(&m->mutex);
}
#endif
struct Parker {
Futex state;
};
enum ParkerState : u32 {
ParkerState_Empty = 0,
ParkerState_Notified = 1,
ParkerState_Parked = UINT32_MAX,
};
gb_internal void park(Parker *p) {
if (p->state.fetch_sub(1, std::memory_order_acquire) == ParkerState_Notified) {
return;
}
for (;;) {
futex_wait(&p->state, ParkerState_Parked);
i32 notified = ParkerState_Empty;
if (p->state.compare_exchange_strong(notified, ParkerState_Empty, std::memory_order_acquire, std::memory_order_acquire)) {
return;
}
}
}
gb_internal void unpark_one(Parker *p) {
if (p->state.exchange(ParkerState_Notified, std::memory_order_release) == ParkerState_Parked) {
futex_signal(&p->state);
}
}
gb_internal void unpark_all(Parker *p) {
if (p->state.exchange(ParkerState_Notified, std::memory_order_release) == ParkerState_Parked) {
futex_broadcast(&p->state);
}
}
gb_internal u32 thread_current_id(void) {
u32 thread_id;
@@ -364,12 +488,13 @@ gb_internal void thread_init(ThreadPool *pool, Thread *t, isize idx) {
#endif
t->capacity = 1 << 14; // must be a power of 2
t->queue = (WorkerTask *)calloc(sizeof(WorkerTask), t->capacity);
t->queue = gb_alloc_array(heap_allocator(), WorkerTask, t->capacity);
t->head_and_tail = 0;
t->pool = pool;
t->idx = idx;
}
gb_internal void thread_init_and_start(ThreadPool *pool, Thread *t, isize idx) {
thread_init(pool, t, idx);
isize stack_size = 0;
@@ -400,6 +525,8 @@ gb_internal void thread_join_and_destroy(Thread *t) {
pthread_join(t->posix_handle, NULL);
t->posix_handle = 0;
#endif
gb_free(heap_allocator(), t->queue);
}
gb_internal void thread_set_name(Thread *t, char const *name) {
@@ -441,24 +568,25 @@ gb_internal void thread_set_name(Thread *t, char const *name) {
#include <linux/futex.h>
#include <sys/syscall.h>
typedef std::atomic<int32_t> Futex;
typedef volatile int32_t Footex;
gb_internal void tpool_wake_addr(Futex *addr) {
for (;;) {
int ret = syscall(SYS_futex, addr, FUTEX_WAKE, 1, NULL, NULL, 0);
if (ret == -1) {
perror("Futex wake");
GB_PANIC("Failed in futex wake!\n");
} else if (ret > 0) {
return;
}
gb_internal void futex_signal(Futex *addr) {
int ret = syscall(SYS_futex, addr, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, 1, NULL, NULL, 0);
if (ret == -1) {
perror("Futex wake");
GB_PANIC("Failed in futex wake!\n");
}
}
gb_internal void tpool_wait_on_addr(Futex *addr, Footex val) {
gb_internal void futex_broadcast(Futex *addr) {
int ret = syscall(SYS_futex, addr, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, INT32_MAX, NULL, NULL, 0);
if (ret == -1) {
perror("Futex wake");
GB_PANIC("Failed in futex wake!\n");
}
}
gb_internal void futex_wait(Futex *addr, Footex val) {
for (;;) {
int ret = syscall(SYS_futex, addr, FUTEX_WAIT, val, NULL, NULL, 0);
int ret = syscall(SYS_futex, addr, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, val, NULL, NULL, 0);
if (ret == -1) {
if (errno != EAGAIN) {
perror("Futex wait");
@@ -479,14 +607,15 @@ gb_internal void tpool_wait_on_addr(Futex *addr, Footex val) {
#include <sys/types.h>
#include <sys/umtx.h>
typedef std::atomic<int32_t> Futex;
typedef volatile int32_t Footex;
gb_internal void tpool_wake_addr(Futex *addr) {
gb_internal void futex_signal(Futex *addr) {
_umtx_op(addr, UMTX_OP_WAKE, 1, 0, 0);
}
gb_internal void tpool_wait_on_addr(Futex *addr, Footex val) {
gb_internal void futex_broadcast(Futex *addr) {
_umtx_op(addr, UMTX_OP_WAKE, INT32_MAX, 0, 0);
}
gb_internal void futex_wait(Futex *addr, Footex val) {
for (;;) {
int ret = _umtx_op(addr, UMTX_OP_WAIT_UINT, val, 0, NULL);
if (ret == 0) {
@@ -508,12 +637,9 @@ gb_internal void tpool_wait_on_addr(Futex *addr, Footex val) {
#include <sys/futex.h>
typedef std::atomic<int32_t> Futex;
typedef volatile int32_t Footex;
gb_internal void tpool_wake_addr(Futex *addr) {
gb_internal void futex_signal(Futex *f) {
for (;;) {
int ret = futex((volatile uint32_t *)addr, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, 1, NULL, NULL);
int ret = futex((volatile uint32_t *)f, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, 1, NULL, NULL);
if (ret == -1) {
if (errno == ETIMEDOUT || errno == EINTR) {
continue;
@@ -527,11 +653,28 @@ gb_internal void tpool_wake_addr(Futex *addr) {
}
}
gb_internal void tpool_wait_on_addr(Futex *addr, Footex val) {
gb_internal void futex_broadcast(Futex *f) {
for (;;) {
int ret = futex((volatile uint32_t *)addr, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, val, NULL, NULL);
int ret = futex((volatile uint32_t *)f, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, INT32_MAX, NULL, NULL);
if (ret == -1) {
if (*addr != val) {
if (errno == ETIMEDOUT || errno == EINTR) {
continue;
}
perror("Futex wake");
GB_PANIC("futex wake fail");
} else if (ret == 1) {
return;
}
}
}
gb_internal void futex_wait(Futex *f, Footex val) {
for (;;) {
int ret = futex((volatile uint32_t *)f, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, val, NULL, NULL);
if (ret == -1) {
if (*f != val) {
return;
}
@@ -547,46 +690,58 @@ gb_internal void tpool_wait_on_addr(Futex *addr, Footex val) {
#elif defined(GB_SYSTEM_OSX)
typedef std::atomic<int64_t> Futex;
typedef volatile int64_t Footex;
#define UL_COMPARE_AND_WAIT 0x00000001
#define ULF_NO_ERRNO 0x01000000
extern "C" int __ulock_wait(uint32_t operation, void *addr, uint64_t value, uint32_t timeout); /* timeout is specified in microseconds */
extern "C" int __ulock_wake(uint32_t operation, void *addr, uint64_t wake_value);
gb_internal void tpool_wake_addr(Futex *addr) {
gb_internal void futex_signal(Futex *f) {
for (;;) {
int ret = __ulock_wake(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, addr, 0);
int ret = __ulock_wake(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, f, 0);
if (ret >= 0) {
return;
}
ret = -ret;
if (ret == EINTR || ret == EFAULT) {
if (ret == -EINTR || ret == -EFAULT) {
continue;
}
if (ret == ENOENT) {
if (ret == -ENOENT) {
return;
}
GB_PANIC("Failed in futex wake!\n");
}
}
gb_internal void tpool_wait_on_addr(Futex *addr, Footex val) {
gb_internal void futex_broadcast(Futex *f) {
for (;;) {
int ret = __ulock_wait(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, addr, val, 0);
enum { ULF_WAKE_ALL = 0x00000100 };
int ret = __ulock_wake(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO | ULF_WAKE_ALL, f, 0);
if (ret == 0) {
return;
}
if (ret == -EINTR || ret == -EFAULT) {
continue;
}
if (ret == -ENOENT) {
return;
}
GB_PANIC("Failed in futex wake!\n");
}
}
gb_internal void futex_wait(Futex *f, Footex val) {
for (;;) {
int ret = __ulock_wait(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, f, val, 0);
if (ret >= 0) {
if (*addr != val) {
if (*f != val) {
return;
}
continue;
}
ret = -ret;
if (ret == EINTR || ret == EFAULT) {
continue;
if (ret == -EINTR || ret == -EFAULT) {continue;
ret = -ret;
}
if (ret == ENOENT) {
if (ret == -ENOENT) {
return;
}
@@ -594,18 +749,19 @@ gb_internal void tpool_wait_on_addr(Futex *addr, Footex val) {
}
}
#elif defined(GB_SYSTEM_WINDOWS)
typedef std::atomic<int64_t> Futex;
typedef volatile int64_t Footex;
gb_internal void tpool_wake_addr(Futex *addr) {
WakeByAddressSingle((void *)addr);
gb_internal void futex_signal(Futex *f) {
WakeByAddressSingle(f);
}
gb_internal void tpool_wait_on_addr(Futex *addr, Footex val) {
for (;;) {
WaitOnAddress(addr, (void *)&val, sizeof(val), INFINITE);
if (*addr != val) break;
}
gb_internal void futex_broadcast(Futex *f) {
WakeByAddressAll(f);
}
gb_internal void futex_wait(Futex *f, Footex val) {
do {
WaitOnAddress(f, (void *)&val, sizeof(val), INFINITE);
} while (f->load() == val);
}
#endif

View File

@@ -748,6 +748,7 @@ gb_internal i64 type_align_of_internal(Type *t, TypePath *path);
// IMPORTANT TODO(bill): SHould this TypePath code be removed since type cycle checking is handled much earlier on?
struct TypePath {
RecursiveMutex mutex;
Array<Entity *> path; // Entity_TypeName;
bool failure;
};
@@ -758,7 +759,9 @@ gb_internal void type_path_init(TypePath *tp) {
}
gb_internal void type_path_free(TypePath *tp) {
mutex_lock(&tp->mutex);
array_free(&tp->path);
mutex_unlock(&tp->mutex);
}
gb_internal void type_path_print_illegal_cycle(TypePath *tp, isize start_index) {
@@ -787,6 +790,8 @@ gb_internal bool type_path_push(TypePath *tp, Type *t) {
}
Entity *e = t->Named.type_name;
mutex_lock(&tp->mutex);
for (isize i = 0; i < tp->path.count; i++) {
Entity *p = tp->path[i];
if (p == e) {
@@ -795,12 +800,19 @@ gb_internal bool type_path_push(TypePath *tp, Type *t) {
}
array_add(&tp->path, e);
mutex_unlock(&tp->mutex);
return true;
}
gb_internal void type_path_pop(TypePath *tp) {
if (tp != nullptr && tp->path.count > 0) {
array_pop(&tp->path);
if (tp != nullptr) {
mutex_lock(&tp->mutex);
if (tp->path.count > 0) {
array_pop(&tp->path);
}
mutex_unlock(&tp->mutex);
}
}
@@ -808,10 +820,6 @@ gb_internal void type_path_pop(TypePath *tp) {
#define FAILURE_SIZE 0
#define FAILURE_ALIGNMENT 0
gb_internal void init_type_mutex(void) {
mutex_init(&g_type_mutex);
}
gb_internal bool type_ptr_set_update(PtrSet<Type *> *s, Type *t) {
if (ptr_set_exists(s, t)) {
return true;
@@ -827,8 +835,7 @@ gb_internal bool type_ptr_set_exists(PtrSet<Type *> *s, Type *t) {
// TODO(bill, 2019-10-05): This is very slow and it's probably a lot
// faster to cache types correctly
for (auto const &entry : *s) {
Type *f = entry.ptr;
for (Type *f : *s) {
if (are_types_identical(t, f)) {
ptr_set_add(s, t);
return true;
@@ -2521,14 +2528,6 @@ gb_internal bool lookup_subtype_polymorphic_selection(Type *dst, Type *src, Sele
gb_internal bool are_types_identical_internal(Type *x, Type *y, bool check_tuple_names);
gb_internal bool are_types_identical(Type *x, Type *y) {
return are_types_identical_internal(x, y, false);
}
gb_internal bool are_types_identical_unique_tuples(Type *x, Type *y) {
return are_types_identical_internal(x, y, true);
}
gb_internal bool are_types_identical_internal(Type *x, Type *y, bool check_tuple_names) {
if (x == y) {
return true;
}
@@ -2540,13 +2539,13 @@ gb_internal bool are_types_identical_internal(Type *x, Type *y, bool check_tuple
if (x->kind == Type_Named) {
Entity *e = x->Named.type_name;
if (e != nullptr && e->kind == Entity_TypeName && e->TypeName.is_type_alias) {
if (e->TypeName.is_type_alias) {
x = x->Named.base;
}
}
if (y->kind == Type_Named) {
Entity *e = y->Named.type_name;
if (e != nullptr && e->kind == Entity_TypeName && e->TypeName.is_type_alias) {
if (e->TypeName.is_type_alias) {
y = y->Named.base;
}
}
@@ -2554,6 +2553,64 @@ gb_internal bool are_types_identical_internal(Type *x, Type *y, bool check_tuple
return false;
}
return are_types_identical_internal(x, y, false);
}
gb_internal bool are_types_identical_unique_tuples(Type *x, Type *y) {
if (x == y) {
return true;
}
if (!x | !y) {
return false;
}
if (x->kind == Type_Named) {
Entity *e = x->Named.type_name;
if (e->TypeName.is_type_alias) {
x = x->Named.base;
}
}
if (y->kind == Type_Named) {
Entity *e = y->Named.type_name;
if (e->TypeName.is_type_alias) {
y = y->Named.base;
}
}
if (x->kind != y->kind) {
return false;
}
return are_types_identical_internal(x, y, true);
}
gb_internal bool are_types_identical_internal(Type *x, Type *y, bool check_tuple_names) {
if (x == y) {
return true;
}
if (!x | !y) {
return false;
}
#if 0
if (x->kind == Type_Named) {
Entity *e = x->Named.type_name;
if (e->TypeName.is_type_alias) {
x = x->Named.base;
}
}
if (y->kind == Type_Named) {
Entity *e = y->Named.type_name;
if (e->TypeName.is_type_alias) {
y = y->Named.base;
}
}
if (x->kind != y->kind) {
return false;
}
#endif
switch (x->kind) {
case Type_Generic:
return are_types_identical(x->Generic.specialized, y->Generic.specialized);
@@ -3350,35 +3407,55 @@ gb_internal i64 type_size_of(Type *t) {
if (t == nullptr) {
return 0;
}
// NOTE(bill): Always calculate the size when it is a Type_Basic
if (t->kind == Type_Named && t->cached_size >= 0) {
i64 size = -1;
if (t->kind == Type_Basic) {
GB_ASSERT_MSG(is_type_typed(t), "%s", type_to_string(t));
switch (t->Basic.kind) {
case Basic_string: size = 2*build_context.word_size; break;
case Basic_cstring: size = build_context.word_size; break;
case Basic_any: size = 2*build_context.word_size; break;
case Basic_typeid: size = build_context.word_size; break;
} else if (t->kind != Type_Basic && t->cached_size >= 0) {
return t->cached_size;
case Basic_int: case Basic_uint: case Basic_uintptr: case Basic_rawptr:
size = build_context.word_size;
break;
default:
size = t->Basic.size;
break;
}
t->cached_size.store(size);
return size;
} else if (t->kind != Type_Named && t->cached_size >= 0) {
return t->cached_size.load();
} else {
TypePath path{};
type_path_init(&path);
{
MUTEX_GUARD(&g_type_mutex);
size = type_size_of_internal(t, &path);
t->cached_size.store(size);
}
type_path_free(&path);
return size;
}
TypePath path = {0};
type_path_init(&path);
t->cached_size = type_size_of_internal(t, &path);
type_path_free(&path);
return t->cached_size;
}
gb_internal i64 type_align_of(Type *t) {
if (t == nullptr) {
return 1;
}
// NOTE(bill): Always calculate the size when it is a Type_Basic
if (t->kind == Type_Named && t->cached_align >= 0) {
} if (t->kind != Type_Basic && t->cached_align > 0) {
return t->cached_align;
if (t->kind != Type_Named && t->cached_align > 0) {
return t->cached_align.load();
}
TypePath path = {0};
TypePath path{};
type_path_init(&path);
t->cached_align = type_align_of_internal(t, &path);
{
MUTEX_GUARD(&g_type_mutex);
t->cached_align.store(type_align_of_internal(t, &path));
}
type_path_free(&path);
return t->cached_align;
return t->cached_align.load();
}
@@ -3387,8 +3464,6 @@ gb_internal i64 type_align_of_internal(Type *t, TypePath *path) {
if (t->failure) {
return FAILURE_ALIGNMENT;
}
mutex_lock(&g_type_mutex);
defer (mutex_unlock(&g_type_mutex));
t = base_type(t);
@@ -3485,39 +3560,25 @@ gb_internal i64 type_align_of_internal(Type *t, TypePath *path) {
if (t->Struct.custom_align > 0) {
return gb_max(t->Struct.custom_align, 1);
}
if (t->Struct.is_raw_union) {
i64 max = 1;
for_array(i, t->Struct.fields) {
Type *field_type = t->Struct.fields[i]->type;
bool pop = type_path_push(path, field_type);
if (path->failure) {
return FAILURE_ALIGNMENT;
}
i64 align = type_align_of_internal(field_type, path);
if (pop) type_path_pop(path);
if (max < align) {
max = align;
}
}
return max;
} else if (t->Struct.fields.count > 0) {
i64 max = 1;
// NOTE(bill): Check the fields to check for cyclic definitions
for_array(i, t->Struct.fields) {
Type *field_type = t->Struct.fields[i]->type;
bool pop = type_path_push(path, field_type);
if (path->failure) return FAILURE_ALIGNMENT;
i64 align = type_align_of_internal(field_type, path);
if (pop) type_path_pop(path);
if (max < align) {
max = align;
}
}
if (t->Struct.is_packed) {
return 1;
}
return max;
if (t->Struct.is_packed) {
return 1;
}
i64 max = 1;
for_array(i, t->Struct.fields) {
Type *field_type = t->Struct.fields[i]->type;
bool pop = type_path_push(path, field_type);
if (path->failure) {
return FAILURE_ALIGNMENT;
}
i64 align = type_align_of_internal(field_type, path);
if (pop) type_path_pop(path);
if (max < align) {
max = align;
}
}
return max;
} break;
case Type_BitSet: {
@@ -3583,8 +3644,7 @@ gb_internal i64 *type_set_offsets_of(Slice<Entity *> const &fields, bool is_pack
}
gb_internal bool type_set_offsets(Type *t) {
mutex_lock(&g_type_mutex);
defer (mutex_unlock(&g_type_mutex));
MUTEX_GUARD(&g_type_mutex); // TODO(bill): only per struct
t = base_type(t);
if (t->kind == Type_Struct) {
@@ -3613,9 +3673,6 @@ gb_internal i64 type_size_of_internal(Type *t, TypePath *path) {
if (t->failure) {
return FAILURE_SIZE;
}
mutex_lock(&g_type_mutex);
defer (mutex_unlock(&g_type_mutex));
switch (t->kind) {
case Type_Named: {