Multithread min dep set by removing the set itself

This commit is contained in:
gingerBill
2025-09-10 17:29:11 +01:00
parent 60684ff028
commit 1e0902677f
13 changed files with 265 additions and 125 deletions

View File

@@ -1712,7 +1712,7 @@ gb_internal void check_remove_expr_info(CheckerContext *c, Ast *e) {
}
gb_internal isize type_info_index(CheckerInfo *info, TypeInfoPair pair, bool error_on_failure) {
mutex_lock(&info->minimum_dependency_type_info_mutex);
rw_mutex_shared_lock(&info->minimum_dependency_type_info_mutex);
isize entry_index = -1;
u64 hash = pair.hash;
@@ -1720,7 +1720,7 @@ gb_internal isize type_info_index(CheckerInfo *info, TypeInfoPair pair, bool err
if (found_entry_index) {
entry_index = *found_entry_index;
}
mutex_unlock(&info->minimum_dependency_type_info_mutex);
rw_mutex_shared_unlock(&info->minimum_dependency_type_info_mutex);
if (error_on_failure && entry_index < 0) {
compiler_error("Type_Info for '%s' could not be found", type_to_string(pair.type));
@@ -2377,11 +2377,8 @@ gb_internal void add_min_dep_type_info(Checker *c, Type *t) {
return;
}
{
MUTEX_GUARD(&c->info.minimum_dependency_type_info_mutex);
if (type_set_update(&c->info.min_dep_type_info_set, t)) {
return;
}
if (type_set_update_with_mutex(&c->info.min_dep_type_info_set, t, &c->info.min_dep_type_info_set_mutex)) {
return;
}
// Add nested types
@@ -2555,13 +2552,15 @@ gb_internal void add_min_dep_type_info(Checker *c, Type *t) {
}
}
gb_internal void add_dependency_to_set_threaded(Checker *c, Entity *entity);
gb_global std::atomic<Checker *> global_checker_ptr;
gb_internal void add_dependency_to_set(Checker *c, Entity *entity) {
if (entity == nullptr) {
return;
}
CheckerInfo *info = &c->info;
if (entity->type != nullptr &&
is_type_polymorphic(entity->type)) {
DeclInfo *decl = decl_info_of_entity(entity);
@@ -2570,11 +2569,8 @@ gb_internal void add_dependency_to_set(Checker *c, Entity *entity) {
}
}
{
MUTEX_GUARD(&info->minimum_dependency_type_info_mutex);
if (ptr_set_update(&info->minimum_dependency_set, entity)) {
return;
}
if (entity->min_dep_count.fetch_add(1, std::memory_order_relaxed) > 0) {
return;
}
DeclInfo *decl = decl_info_of_entity(entity);
@@ -2584,46 +2580,45 @@ gb_internal void add_dependency_to_set(Checker *c, Entity *entity) {
for (TypeInfoPair const tt : decl->type_info_deps) {
add_min_dep_type_info(c, tt.type);
}
for (Entity *e : decl->deps) {
switch (e->kind) {
case Entity_Procedure:
if (e->Procedure.is_foreign) {
Entity *fl = e->Procedure.foreign_library;
if (fl != nullptr) {
GB_ASSERT_MSG(fl->kind == Entity_LibraryName &&
(fl->flags&EntityFlag_Used),
"%.*s", LIT(entity->token.string));
add_dependency_to_set(c, fl);
}
}
break;
case Entity_Variable:
if (e->Variable.is_foreign) {
Entity *fl = e->Variable.foreign_library;
if (fl != nullptr) {
GB_ASSERT_MSG(fl->kind == Entity_LibraryName &&
(fl->flags&EntityFlag_Used),
"%.*s", LIT(entity->token.string));
add_dependency_to_set(c, fl);
}
}
break;
}
}
for (Entity *e : decl->deps) {
add_dependency_to_set(c, e);
if (e->kind == Entity_Procedure && e->Procedure.is_foreign) {
Entity *fl = e->Procedure.foreign_library;
if (fl != nullptr) {
GB_ASSERT_MSG(fl->kind == Entity_LibraryName &&
(fl->flags&EntityFlag_Used),
"%.*s", LIT(entity->token.string));
add_dependency_to_set(c, fl);
}
} else if (e->kind == Entity_Variable && e->Variable.is_foreign) {
Entity *fl = e->Variable.foreign_library;
if (fl != nullptr) {
GB_ASSERT_MSG(fl->kind == Entity_LibraryName &&
(fl->flags&EntityFlag_Used),
"%.*s", LIT(entity->token.string));
add_dependency_to_set(c, fl);
}
}
}
}
struct AddDependecyToSetWorkerData {
Checker *c;
Entity *entity;
};
gb_internal void add_dependency_to_set_threaded(Checker *c, Entity *entity);
gb_internal WORKER_TASK_PROC(add_dependency_to_set_worker) {
AddDependecyToSetWorkerData *wd = cast(AddDependecyToSetWorkerData *)data;
Checker *c = wd->c;
Entity *entity = wd->entity;
Checker *c = global_checker_ptr.load(std::memory_order_relaxed);
Entity *entity = cast(Entity *)data;
if (entity == nullptr) {
return 0;
}
CheckerInfo *info = &c->info;
if (entity->type != nullptr &&
is_type_polymorphic(entity->type)) {
DeclInfo *decl = decl_info_of_entity(entity);
@@ -2632,11 +2627,8 @@ gb_internal WORKER_TASK_PROC(add_dependency_to_set_worker) {
}
}
{
MUTEX_GUARD(&info->minimum_dependency_type_info_mutex);
if (ptr_set_update(&info->minimum_dependency_set, entity)) {
return 0;
}
if (entity->min_dep_count.fetch_add(1, std::memory_order_relaxed) > 0) {
return 0;
}
DeclInfo *decl = decl_info_of_entity(entity);
@@ -2648,25 +2640,36 @@ gb_internal WORKER_TASK_PROC(add_dependency_to_set_worker) {
}
for (Entity *e : decl->deps) {
add_dependency_to_set(c, e);
if (e->kind == Entity_Procedure && e->Procedure.is_foreign) {
Entity *fl = e->Procedure.foreign_library;
if (fl != nullptr) {
GB_ASSERT_MSG(fl->kind == Entity_LibraryName &&
(fl->flags&EntityFlag_Used),
"%.*s", LIT(entity->token.string));
add_dependency_to_set_threaded(c, fl);
switch (e->kind) {
case Entity_Procedure:
if (e->Procedure.is_foreign) {
Entity *fl = e->Procedure.foreign_library;
if (fl != nullptr) {
GB_ASSERT_MSG(fl->kind == Entity_LibraryName &&
(fl->flags&EntityFlag_Used),
"%.*s", LIT(entity->token.string));
add_dependency_to_set_threaded(c, fl);
}
}
} else if (e->kind == Entity_Variable && e->Variable.is_foreign) {
Entity *fl = e->Variable.foreign_library;
if (fl != nullptr) {
GB_ASSERT_MSG(fl->kind == Entity_LibraryName &&
(fl->flags&EntityFlag_Used),
"%.*s", LIT(entity->token.string));
add_dependency_to_set_threaded(c, fl);
break;
case Entity_Variable:
if (e->Variable.is_foreign) {
Entity *fl = e->Variable.foreign_library;
if (fl != nullptr) {
GB_ASSERT_MSG(fl->kind == Entity_LibraryName &&
(fl->flags&EntityFlag_Used),
"%.*s", LIT(entity->token.string));
add_dependency_to_set_threaded(c, fl);
}
}
break;
}
}
for (Entity *e : decl->deps) {
add_dependency_to_set_threaded(c, e);
}
return 0;
}
@@ -2675,11 +2678,7 @@ gb_internal void add_dependency_to_set_threaded(Checker *c, Entity *entity) {
if (entity == nullptr) {
return;
}
AddDependecyToSetWorkerData *wd = gb_alloc_item(temporary_allocator(), AddDependecyToSetWorkerData);
wd->c = c;
wd->entity = entity;
thread_pool_add_task(add_dependency_to_set_worker, wd);
thread_pool_add_task(add_dependency_to_set_worker, entity);
}
@@ -2732,27 +2731,35 @@ gb_internal void collect_testing_procedures_of_package(Checker *c, AstPackage *p
}
gb_internal void generate_minimum_dependency_set_internal(Checker *c, Entity *start) {
// auto const &add_to_set = add_dependency_to_set;
auto const &add_to_set = add_dependency_to_set_threaded;
Scope *builtin_scope = builtin_pkg->scope;
for_array(i, c->info.definitions) {
Entity *e = c->info.definitions[i];
if (e->scope == builtin_pkg->scope) {
if (e->scope == builtin_scope) {
if (e->type == nullptr) {
add_dependency_to_set_threaded(c, e);
add_to_set(c, e);
}
} else if (e->kind == Entity_Procedure) {
if (e->Procedure.is_export) {
add_to_set(c, e);
}
} else if (e->kind == Entity_Variable) {
if (e->Variable.is_export) {
add_to_set(c, e);
}
} else if (e->kind == Entity_Procedure && e->Procedure.is_export) {
add_dependency_to_set_threaded(c, e);
} else if (e->kind == Entity_Variable && e->Variable.is_export) {
add_dependency_to_set_threaded(c, e);
}
}
for (Entity *e; mpsc_dequeue(&c->info.required_foreign_imports_through_force_queue, &e); /**/) {
array_add(&c->info.required_foreign_imports_through_force, e);
add_dependency_to_set_threaded(c, e);
add_to_set(c, e);
}
for (Entity *e; mpsc_dequeue(&c->info.required_global_variable_queue, &e); /**/) {
e->flags |= EntityFlag_Used;
add_dependency_to_set_threaded(c, e);
add_to_set(c, e);
}
for_array(i, c->info.entities) {
@@ -2760,16 +2767,16 @@ gb_internal void generate_minimum_dependency_set_internal(Checker *c, Entity *st
switch (e->kind) {
case Entity_Variable:
if (e->Variable.is_export) {
add_dependency_to_set_threaded(c, e);
add_to_set(c, e);
} else if (e->flags & EntityFlag_Require) {
add_dependency_to_set_threaded(c, e);
add_to_set(c, e);
}
break;
case Entity_Procedure:
if (e->Procedure.is_export) {
add_dependency_to_set_threaded(c, e);
add_to_set(c, e);
} else if (e->flags & EntityFlag_Require) {
add_dependency_to_set_threaded(c, e);
add_to_set(c, e);
}
if (e->flags & EntityFlag_Init) {
Type *t = base_type(e->type);
@@ -2809,7 +2816,7 @@ gb_internal void generate_minimum_dependency_set_internal(Checker *c, Entity *st
if (is_init) {
add_dependency_to_set_threaded(c, e);
add_to_set(c, e);
array_add(&c->info.init_procedures, e);
}
} else if (e->flags & EntityFlag_Fini) {
@@ -2844,7 +2851,7 @@ gb_internal void generate_minimum_dependency_set_internal(Checker *c, Entity *st
}
if (is_fini) {
add_dependency_to_set_threaded(c, e);
add_to_set(c, e);
array_add(&c->info.fini_procedures, e);
}
}
@@ -2861,7 +2868,7 @@ gb_internal void generate_minimum_dependency_set_internal(Checker *c, Entity *st
Entity *e = entry.value;
if (e != nullptr) {
e->flags |= EntityFlag_Used;
add_dependency_to_set_threaded(c, e);
add_to_set(c, e);
}
}
@@ -2876,16 +2883,11 @@ gb_internal void generate_minimum_dependency_set_internal(Checker *c, Entity *st
}
} else if (start != nullptr) {
start->flags |= EntityFlag_Used;
add_dependency_to_set_threaded(c, start);
add_to_set(c, start);
}
}
gb_internal void generate_minimum_dependency_set(Checker *c, Entity *start) {
isize entity_count = c->info.entities.count;
isize min_dep_set_cap = next_pow2_isize(entity_count*4); // empirically determined factor
ptr_set_init(&c->info.minimum_dependency_set, min_dep_set_cap);
#define FORCE_ADD_RUNTIME_ENTITIES(condition, ...) do { \
if (condition) { \
String entities[] = {__VA_ARGS__}; \
@@ -6267,8 +6269,10 @@ gb_internal void check_unchecked_bodies(Checker *c) {
// use the `procs_to_check` array
global_procedure_body_in_worker_queue = false;
for (Entity *e : c->info.minimum_dependency_set) {
check_procedure_later_from_entity(c, e, "check_unchecked_bodies");
for (Entity *e : c->info.entities) {
if (e->min_dep_count.load(std::memory_order_relaxed) > 0) {
check_procedure_later_from_entity(c, e, "check_unchecked_bodies");
}
}
if (!global_procedure_body_in_worker_queue) {
@@ -7042,6 +7046,7 @@ gb_internal void check_merge_queues_into_arrays(Checker *c) {
}
check_add_entities_from_queues(c);
check_add_definitions_from_queues(c);
thread_pool_wait();
}
gb_internal GB_COMPARE_PROC(init_procedures_cmp) {
@@ -7100,7 +7105,7 @@ gb_internal void add_type_info_for_type_definitions(Checker *c) {
Entity *e = c->info.definitions[i];
if (e->kind == Entity_TypeName && e->type != nullptr && is_type_typed(e->type)) {
i64 align = type_align_of(e->type);
if (align > 0 && ptr_set_exists(&c->info.minimum_dependency_set, e)) {
if (align > 0 && e->min_dep_count.load(std::memory_order_relaxed) > 0) {
add_type_info_type(&c->builtin_ctx, e->type);
}
}
@@ -7170,6 +7175,8 @@ gb_internal void check_update_dependency_tree_for_procedures(Checker *c) {
gb_internal void check_parsed_files(Checker *c) {
global_checker_ptr.store(c, std::memory_order_relaxed);
TIME_SECTION("map full filepaths to scope");
add_type_info_type(&c->builtin_ctx, t_invalid);
@@ -7312,11 +7319,9 @@ gb_internal void check_parsed_files(Checker *c) {
check_unchecked_bodies(c);
TIME_SECTION("check #soa types");
check_merge_queues_into_arrays(c);
thread_pool_wait();
TIME_SECTION("update minimum dependency set");
TIME_SECTION("update minimum dependency set again");
generate_minimum_dependency_set_internal(c, c->info.entry_point);
// NOTE(laytan): has to be ran after generate_minimum_dependency_set,

View File

@@ -449,11 +449,10 @@ struct CheckerInfo {
Scope * init_scope;
Entity * entry_point;
BlockingMutex minimum_dependency_set_mutex;
PtrSet<Entity *> minimum_dependency_set;
BlockingMutex minimum_dependency_type_info_mutex;
RwMutex minimum_dependency_type_info_mutex;
PtrMap</*type info hash*/u64, /*min dep index*/isize> min_dep_type_info_index_map;
RWSpinLock min_dep_type_info_set_mutex;
TypeSet min_dep_type_info_set;
Array<TypeInfoPair> type_info_types_hash_map; // 2 * type_info_types.count

View File

@@ -164,6 +164,7 @@ struct Entity {
u64 id;
std::atomic<u64> flags;
std::atomic<EntityState> state;
std::atomic<i32> min_dep_count;
Token token;
Scope * scope;
Type * type;

View File

@@ -86,7 +86,7 @@ gb_internal char *token_pos_to_string(TokenPos const &pos);
gb_internal bool set_file_path_string(i32 index, String const &path) {
bool ok = false;
GB_ASSERT(index >= 0);
mutex_lock(&global_error_collector.path_mutex);
// mutex_lock(&global_error_collector.path_mutex);
mutex_lock(&global_files_mutex);
if (index >= global_file_path_strings.count) {
@@ -99,14 +99,14 @@ gb_internal bool set_file_path_string(i32 index, String const &path) {
}
mutex_unlock(&global_files_mutex);
mutex_unlock(&global_error_collector.path_mutex);
// mutex_unlock(&global_error_collector.path_mutex);
return ok;
}
gb_internal bool thread_safe_set_ast_file_from_id(i32 index, AstFile *file) {
bool ok = false;
GB_ASSERT(index >= 0);
mutex_lock(&global_error_collector.path_mutex);
// mutex_lock(&global_error_collector.path_mutex);
mutex_lock(&global_files_mutex);
if (index >= global_files.count) {
@@ -118,13 +118,13 @@ gb_internal bool thread_safe_set_ast_file_from_id(i32 index, AstFile *file) {
ok = true;
}
mutex_unlock(&global_files_mutex);
mutex_unlock(&global_error_collector.path_mutex);
// mutex_unlock(&global_error_collector.path_mutex);
return ok;
}
gb_internal String get_file_path_string(i32 index) {
GB_ASSERT(index >= 0);
mutex_lock(&global_error_collector.path_mutex);
// mutex_lock(&global_error_collector.path_mutex);
mutex_lock(&global_files_mutex);
String path = {};
@@ -133,13 +133,13 @@ gb_internal String get_file_path_string(i32 index) {
}
mutex_unlock(&global_files_mutex);
mutex_unlock(&global_error_collector.path_mutex);
// mutex_unlock(&global_error_collector.path_mutex);
return path;
}
gb_internal AstFile *thread_safe_get_ast_file_from_id(i32 index) {
GB_ASSERT(index >= 0);
mutex_lock(&global_error_collector.path_mutex);
// mutex_lock(&global_error_collector.path_mutex);
mutex_lock(&global_files_mutex);
AstFile *file = nullptr;
@@ -148,7 +148,7 @@ gb_internal AstFile *thread_safe_get_ast_file_from_id(i32 index) {
}
mutex_unlock(&global_files_mutex);
mutex_unlock(&global_error_collector.path_mutex);
// mutex_unlock(&global_error_collector.path_mutex);
return file;
}

View File

@@ -2097,8 +2097,6 @@ gb_internal GB_COMPARE_PROC(llvm_global_entity_cmp) {
}
gb_internal void lb_create_global_procedures_and_types(lbGenerator *gen, CheckerInfo *info, bool do_threading) {
auto *min_dep_set = &info->minimum_dependency_set;
for (Entity *e : info->entities) {
String name = e->token.string;
Scope * scope = e->scope;
@@ -2135,11 +2133,16 @@ gb_internal void lb_create_global_procedures_and_types(lbGenerator *gen, Checker
}
}
if (!polymorphic_struct && !ptr_set_exists(min_dep_set, e)) {
if (!polymorphic_struct && e->min_dep_count.load(std::memory_order_relaxed) == 0) {
// NOTE(bill): Nothing depends upon it so doesn't need to be built
continue;
}
// if (!polymorphic_struct && !ptr_set_exists(min_dep_set, e)) {
// // NOTE(bill): Nothing depends upon it so doesn't need to be built
// continue;
// }
lbModule *m = &gen->default_module;
if (USE_SEPARATE_MODULES) {
m = lb_module_of_entity(gen, e);
@@ -2845,8 +2848,6 @@ gb_internal bool lb_generate_code(lbGenerator *gen) {
lbModule *default_module = &gen->default_module;
CheckerInfo *info = gen->info;
auto *min_dep_set = &info->minimum_dependency_set;
switch (build_context.metrics.arch) {
case TargetArch_amd64:
case TargetArch_i386:
@@ -3184,10 +3185,14 @@ gb_internal bool lb_generate_code(lbGenerator *gen) {
continue;
}
if (!ptr_set_exists(min_dep_set, e)) {
if (e->min_dep_count.load(std::memory_order_relaxed) == 0) {
continue;
}
// if (!ptr_set_exists(min_dep_set, e)) {
// continue;
// }
DeclInfo *decl = decl_info_of_entity(e);
if (decl == nullptr) {
continue;

View File

@@ -798,9 +798,8 @@ gb_internal void lb_end_procedure_body(lbProcedure *p) {
gb_internal void lb_build_nested_proc(lbProcedure *p, AstProcLit *pd, Entity *e) {
GB_ASSERT(pd->body != nullptr);
lbModule *m = p->module;
auto *min_dep_set = &m->info->minimum_dependency_set;
if (ptr_set_exists(min_dep_set, e) == false) {
if (e->min_dep_count.load(std::memory_order_relaxed) == 0) {
// NOTE(bill): Nothing depends upon it so doesn't need to be built
return;
}

View File

@@ -3,8 +3,6 @@ gb_internal void lb_build_constant_value_decl(lbProcedure *p, AstValueDecl *vd)
return;
}
auto *min_dep_set = &p->module->info->minimum_dependency_set;
for (Ast *ident : vd->names) {
GB_ASSERT(ident->kind == Ast_Ident);
Entity *e = entity_of_node(ident);
@@ -21,7 +19,7 @@ gb_internal void lb_build_constant_value_decl(lbProcedure *p, AstValueDecl *vd)
}
}
if (!polymorphic_struct && !ptr_set_exists(min_dep_set, e)) {
if (!polymorphic_struct && e->min_dep_count.load(std::memory_order_relaxed) == 0) {
continue;
}
@@ -56,7 +54,7 @@ gb_internal void lb_build_constant_value_decl(lbProcedure *p, AstValueDecl *vd)
if (gpd) {
rw_mutex_shared_lock(&gpd->mutex);
for (Entity *e : gpd->procs) {
if (!ptr_set_exists(min_dep_set, e)) {
if (e->min_dep_count.load(std::memory_order_relaxed) == 0) {
continue;
}
DeclInfo *d = decl_info_of_entity(e);

View File

@@ -3050,7 +3050,8 @@ gb_internal void print_show_unused(Checker *c) {
if (e->token.string == "_") {
continue;
}
if (ptr_set_exists(&info->minimum_dependency_set, e)) {
if (e->min_dep_count.load(std::memory_order_relaxed) > 0) {
continue;
}
array_add(&unused, e);

View File

@@ -57,10 +57,13 @@ gb_internal isize type_set__find(TypeSet *s, TypeInfoPair pair) {
usize mask = s->capacity-1;
usize hash_index = cast(usize)hash & mask;
for (usize i = 0; i < s->capacity; i++) {
Type *key = s->keys[hash_index].type;
if (are_types_identical_unique_tuples(key, pair.type)) {
auto *e = &s->keys[hash_index];
u64 hash = e->hash;
Type *key = e->type;
if (hash == pair.hash &&
are_types_identical_unique_tuples(key, pair.type)) {
return hash_index;
} else if (key == 0) {
} else if (key == nullptr) {
return -1;
}
hash_index = (hash_index+1)&mask;
@@ -164,6 +167,48 @@ gb_internal bool type_set_update(TypeSet *s, Type *ptr) { // returns true if it
return type_set_update(s, pair);
}
gb_internal bool type_set_update_with_mutex(TypeSet *s, TypeInfoPair pair, RWSpinLock *m) { // returns true if it previously existsed
rwlock_acquire_upgrade(m);
if (type_set_exists(s, pair)) {
rwlock_release_upgrade(m);
return true;
}
rwlock_release_upgrade_and_acquire_write(m);
defer (rwlock_release_write(m));
if (s->keys == nullptr) {
type_set_init(s);
} else if (type_set__full(s)) {
type_set_grow(s);
}
GB_ASSERT(s->count < s->capacity);
GB_ASSERT(s->capacity >= 0);
usize mask = s->capacity-1;
usize hash = cast(usize)pair.hash;
usize hash_index = (cast(usize)hash) & mask;
GB_ASSERT(hash_index < s->capacity);
for (usize i = 0; i < s->capacity; i++) {
TypeInfoPair *key = &s->keys[hash_index];
GB_ASSERT(!are_types_identical_unique_tuples(key->type, pair.type));
if (key->hash == TYPE_SET_TOMBSTONE || key->hash == 0) {
*key = pair;
s->count++;
return false;
}
hash_index = (hash_index+1)&mask;
}
GB_PANIC("ptr set out of memory");
return false;
}
gb_internal bool type_set_update_with_mutex(TypeSet *s, Type *ptr, RWSpinLock *m) { // returns true if it previously existsed
TypeInfoPair pair = {ptr, type_hash_canonical_type(ptr)};
return type_set_update_with_mutex(s, pair, m);
}
gb_internal Type *type_set_add(TypeSet *s, Type *ptr) {
type_set_update(s, ptr);
@@ -328,12 +373,20 @@ gb_internal u64 type_hash_canonical_type(Type *type) {
if (type == nullptr) {
return 0;
}
u64 prev_hash = type->canonical_hash.load(std::memory_order_relaxed);
if (prev_hash != 0) {
return prev_hash;
}
u64 hash = fnv64a(nullptr, 0);
TypeWriter w = {};
type_writer_make_hasher(&w, &hash);
write_type_to_canonical_string(&w, type);
hash = hash ? hash : 1;
return hash ? hash : 1;
type->canonical_hash.store(hash, std::memory_order_relaxed);
return hash;
}
gb_internal String type_to_canonical_string(gbAllocator allocator, Type *type) {

View File

@@ -102,6 +102,8 @@ gb_internal Type *type_set_add (TypeSet *s, Type *ptr);
gb_internal Type *type_set_add (TypeSet *s, TypeInfoPair pair);
gb_internal bool type_set_update (TypeSet *s, Type *ptr); // returns true if it previously existed
gb_internal bool type_set_update (TypeSet *s, TypeInfoPair pair); // returns true if it previously existed
gb_internal bool type_set_update_with_mutex(TypeSet *s, TypeInfoPair pair, RWSpinLock *m);
gb_internal bool type_set_update_with_mutex(TypeSet *s, Type *ptr, RWSpinLock *m);
gb_internal bool type_set_exists (TypeSet *s, Type *ptr);
gb_internal void type_set_remove (TypeSet *s, Type *ptr);
gb_internal void type_set_clear (TypeSet *s);

View File

@@ -134,6 +134,44 @@ gb_internal bool ptr_set_update(PtrSet<T> *s, T ptr) { // returns true if it pre
return false;
}
template <typename T>
gb_internal bool ptr_set_update_with_mutex(PtrSet<T> *s, T ptr, RWSpinLock *m) { // returns true if it previously existsed
rwlock_acquire_upgrade(m);
if (ptr_set_exists(s, ptr)) {
rwlock_release_upgrade(m);
return true;
}
rwlock_release_upgrade_and_acquire_write(m);
defer (rwlock_release_write(m));
if (s->keys == nullptr) {
ptr_set_init(s);
} else if (ptr_set__full(s)) {
ptr_set_grow(s);
}
GB_ASSERT(s->count < s->capacity);
GB_ASSERT(s->capacity >= 0);
usize mask = s->capacity-1;
u32 hash = ptr_map_hash_key(ptr);
usize hash_index = (cast(usize)hash) & mask;
GB_ASSERT(hash_index < s->capacity);
for (usize i = 0; i < s->capacity; i++) {
T *key = &s->keys[hash_index];
GB_ASSERT(*key != ptr);
if (*key == (T)PtrSet<T>::TOMBSTONE || *key == 0) {
*key = ptr;
s->count++;
return false;
}
hash_index = (hash_index+1)&mask;
}
GB_PANIC("ptr set out of memory");
return false;
}
template <typename T>
gb_internal T ptr_set_add(PtrSet<T> *s, T ptr) {
ptr_set_update(s, ptr);

View File

@@ -448,6 +448,44 @@ gb_internal void semaphore_wait(Semaphore *s) {
}
#endif
static const int RWLOCK_WRITER = 1;
static const int RWLOCK_UPGRADED = 2;
static const int RWLOCK_READER = 4;
struct RWSpinLock {
Futex bits;
};
void rwlock_release_write(RWSpinLock *l) {
l->bits.fetch_and(~(RWLOCK_WRITER | RWLOCK_UPGRADED), std::memory_order_release);
futex_signal(&l->bits);
}
bool rwlock_try_acquire_upgrade(RWSpinLock *l) {
int value = l->bits.fetch_or(RWLOCK_UPGRADED, std::memory_order_acquire);
return (value & (RWLOCK_UPGRADED | RWLOCK_WRITER)) == 0;
}
void rwlock_acquire_upgrade(RWSpinLock *l) {
while (!rwlock_try_acquire_upgrade(l)) {
futex_wait(&l->bits, RWLOCK_UPGRADED);
}
}
void rwlock_release_upgrade(RWSpinLock *l) {
l->bits.fetch_add(-RWLOCK_UPGRADED, std::memory_order_acq_rel);
}
bool rwlock_try_release_upgrade_and_acquire_write(RWSpinLock *l) {
int expect = RWLOCK_UPGRADED;
return l->bits.compare_exchange_strong(expect, RWLOCK_WRITER, std::memory_order_acq_rel);
}
void rwlock_release_upgrade_and_acquire_write(RWSpinLock *l) {
while (!rwlock_try_release_upgrade_and_acquire_write(l)) {
futex_wait(&l->bits, RWLOCK_WRITER);
}
}
struct Parker {
Futex state;
};

View File

@@ -334,6 +334,7 @@ struct Type {
// NOTE(bill): These need to be at the end to not affect the unionized data
std::atomic<i64> cached_size;
std::atomic<i64> cached_align;
std::atomic<u64> canonical_hash;
std::atomic<u32> flags; // TypeFlag
bool failure;
};