From c7a704d345e9bda38da18807a1d7cd5bc5accc17 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Tue, 3 Jan 2023 15:26:47 +0000 Subject: [PATCH] Use `RwMutex` for the `Scope` --- src/check_decl.cpp | 12 ++++-- src/check_expr.cpp | 4 +- src/check_stmt.cpp | 5 ++- src/checker.cpp | 21 ++++++---- src/checker.hpp | 2 +- src/thread_pool.cpp | 27 ++++++------- src/threading.cpp | 96 +++++++++++++++++++++++++++++++++++++++++++++ 7 files changed, 138 insertions(+), 29 deletions(-) diff --git a/src/check_decl.cpp b/src/check_decl.cpp index 66f16546c..4afde6e51 100644 --- a/src/check_decl.cpp +++ b/src/check_decl.cpp @@ -381,8 +381,8 @@ gb_internal void override_entity_in_scope(Entity *original_entity, Entity *new_e if (found_scope == nullptr) { return; } - mutex_lock(&found_scope->mutex); - defer (mutex_unlock(&found_scope->mutex)); + rw_mutex_lock(&found_scope->mutex); + defer (rw_mutex_unlock(&found_scope->mutex)); // IMPORTANT NOTE(bill, 2021-04-10): Overriding behaviour was flawed in that the // original entity was still used check checked, but the checking was only @@ -1478,7 +1478,8 @@ gb_internal bool check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de if (t->kind == Type_Struct) { Scope *scope = t->Struct.scope; GB_ASSERT(scope != nullptr); - MUTEX_GUARD_BLOCK(scope->mutex) for (auto const &entry : scope->elements) { + rw_mutex_lock(&scope->mutex); + for (auto const &entry : scope->elements) { Entity *f = entry.value; if (f->kind == Entity_Variable) { Entity *uvar = alloc_entity_using_variable(e, f->token, f->type, nullptr); @@ -1488,6 +1489,7 @@ gb_internal bool check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de array_add(&using_entities, puv); } } + rw_mutex_unlock(&scope->mutex); } else { error(e->token, "'using' can only be applied to variables of type struct"); break; @@ -1496,7 +1498,8 @@ gb_internal bool check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de } } - MUTEX_GUARD_BLOCK(ctx->scope->mutex) for (auto const &entry : using_entities) { + rw_mutex_lock(&ctx->scope->mutex); + for (auto const &entry : using_entities) { Entity *e = entry.e; Entity *uvar = entry.uvar; Entity *prev = scope_insert_no_mutex(ctx->scope, uvar); @@ -1506,6 +1509,7 @@ gb_internal bool check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de break; } } + rw_mutex_unlock(&ctx->scope->mutex); bool where_clause_ok = evaluate_where_clauses(ctx, nullptr, decl->scope, &decl->proc_lit->ProcLit.where_clauses, !decl->where_clauses_evaluated); diff --git a/src/check_expr.cpp b/src/check_expr.cpp index c1787e7b6..d9ab328cb 100644 --- a/src/check_expr.cpp +++ b/src/check_expr.cpp @@ -236,10 +236,12 @@ gb_internal void check_did_you_mean_scope(String const &name, Scope *scope, char DidYouMeanAnswers d = did_you_mean_make(heap_allocator(), scope->elements.entries.count, name); defer (did_you_mean_destroy(&d)); - MUTEX_GUARD_BLOCK(&scope->mutex) for (auto const &entry : scope->elements) { + rw_mutex_shared_lock(&scope->mutex); + for (auto const &entry : scope->elements) { Entity *e = entry.value; did_you_mean_append(&d, e->token.string); } + rw_mutex_shared_unlock(&scope->mutex); check_did_you_mean_print(&d, prefix); } diff --git a/src/check_stmt.cpp b/src/check_stmt.cpp index e075297a4..6e84d0789 100644 --- a/src/check_stmt.cpp +++ b/src/check_stmt.cpp @@ -622,7 +622,10 @@ gb_internal bool check_using_stmt_entity(CheckerContext *ctx, AstUsingStmt *us, case Entity_ImportName: { Scope *scope = e->ImportName.scope; - MUTEX_GUARD_BLOCK(scope->mutex) for (auto const &entry : scope->elements) { + rw_mutex_lock(&scope->mutex); + defer (rw_mutex_unlock(&scope->mutex)); + + for (auto const &entry : scope->elements) { String name = entry.key.string; Entity *decl = entry.value; if (!is_entity_exported(decl)) continue; diff --git a/src/checker.cpp b/src/checker.cpp index 0075fa543..1d536074d 100644 --- a/src/checker.cpp +++ b/src/checker.cpp @@ -51,10 +51,11 @@ gb_internal bool check_rtti_type_disallowed(Ast *expr, Type *type, char const *f gb_internal void scope_reset(Scope *scope) { if (scope == nullptr) return; - MUTEX_GUARD(&scope->mutex); + rw_mutex_lock(&scope->mutex); scope->head_child.store(nullptr, std::memory_order_relaxed); string_map_clear(&scope->elements); ptr_set_clear(&scope->imported); + rw_mutex_unlock(&scope->mutex); } gb_internal void scope_reserve(Scope *scope, isize capacity) { @@ -180,9 +181,9 @@ gb_internal void init_decl_info(DeclInfo *d, Scope *scope, DeclInfo *parent) { gb_zero_item(d); d->parent = parent; d->scope = scope; - ptr_set_init(&d->deps); - ptr_set_init(&d->type_info_deps); - array_init (&d->labels, heap_allocator()); + ptr_set_init(&d->deps, 0); + ptr_set_init(&d->type_info_deps, 0); + d->labels.allocator = heap_allocator(); } gb_internal DeclInfo *make_decl_info(Scope *scope, DeclInfo *parent) { @@ -394,9 +395,9 @@ gb_internal void scope_lookup_parent(Scope *scope, String const &name, Scope **s StringHashKey key = string_hash_string(name); for (Scope *s = scope; s != nullptr; s = s->parent) { Entity **found = nullptr; - mutex_lock(&s->mutex); + rw_mutex_shared_lock(&s->mutex); found = string_map_get(&s->elements, key); - mutex_unlock(&s->mutex); + rw_mutex_shared_unlock(&s->mutex); if (found) { Entity *e = *found; if (gone_thru_proc) { @@ -482,7 +483,7 @@ gb_internal Entity *scope_insert_with_name(Scope *s, String const &name, Entity Entity **found = nullptr; Entity *result = nullptr; - MUTEX_GUARD(&s->mutex); + rw_mutex_lock(&s->mutex); found = string_map_get(&s->elements, key); @@ -509,6 +510,8 @@ gb_internal Entity *scope_insert_with_name(Scope *s, String const &name, Entity entity->scope = s; } end:; + rw_mutex_unlock(&s->mutex); + return result; } @@ -669,7 +672,8 @@ gb_internal void check_scope_usage(Checker *c, Scope *scope) { Array vetted_entities = {}; array_init(&vetted_entities, heap_allocator()); - MUTEX_GUARD_BLOCK(scope->mutex) for (auto const &entry : scope->elements) { + rw_mutex_shared_lock(&scope->mutex); + for (auto const &entry : scope->elements) { Entity *e = entry.value; if (e == nullptr) continue; VettedEntity ve_unused = {}; @@ -686,6 +690,7 @@ gb_internal void check_scope_usage(Checker *c, Scope *scope) { array_add(&vetted_entities, ve_shadowed); } } + rw_mutex_shared_unlock(&scope->mutex); gb_sort(vetted_entities.data, vetted_entities.count, gb_size_of(VettedEntity), vetted_entity_variable_pos_cmp); diff --git a/src/checker.hpp b/src/checker.hpp index cc92fce28..53052d5cd 100644 --- a/src/checker.hpp +++ b/src/checker.hpp @@ -224,7 +224,7 @@ struct Scope { std::atomic next; std::atomic head_child; - BlockingMutex mutex; + RwMutex mutex; StringMap elements; PtrSet imported; diff --git a/src/thread_pool.cpp b/src/thread_pool.cpp index 276e93dff..2c369eaad 100644 --- a/src/thread_pool.cpp +++ b/src/thread_pool.cpp @@ -47,7 +47,7 @@ gb_internal void thread_pool_destroy(ThreadPool *pool) { for_array_off(i, 1, pool->threads) { Thread *t = &pool->threads[i]; - pool->tasks_available.fetch_add(1, std::memory_order_release); + pool->tasks_available.fetch_add(1, std::memory_order_relaxed); futex_broadcast(&pool->tasks_available); thread_join_and_destroy(t); } @@ -74,7 +74,7 @@ void thread_pool_queue_push(Thread *thread, WorkerTask task) { } while (!thread->head_and_tail.compare_exchange_weak(capture, new_capture)); thread->pool->tasks_left.fetch_add(1, std::memory_order_release); - thread->pool->tasks_available.fetch_add(1, std::memory_order_release); + thread->pool->tasks_available.fetch_add(1, std::memory_order_relaxed); futex_broadcast(&thread->pool->tasks_available); } @@ -82,7 +82,7 @@ bool thread_pool_queue_pop(Thread *thread, WorkerTask *task) { u64 capture; u64 new_capture; do { - capture = thread->head_and_tail.load(); + capture = thread->head_and_tail.load(std::memory_order_acquire); u64 mask = thread->capacity - 1; u64 head = (capture >> 32) & mask; @@ -97,7 +97,7 @@ bool thread_pool_queue_pop(Thread *thread, WorkerTask *task) { *task = thread->queue[tail]; new_capture = (head << 32) | new_tail; - } while (!thread->head_and_tail.compare_exchange_weak(capture, new_capture)); + } while (!thread->head_and_tail.compare_exchange_weak(capture, new_capture, std::memory_order_release)); return true; } @@ -168,22 +168,21 @@ gb_internal THREAD_PROC(thread_pool_thread_proc) { Thread *thread = &pool->threads.data[idx]; WorkerTask task; - if (!thread_pool_queue_pop(thread, &task)) { - continue; - } - task.do_work(task.data); - pool->tasks_left.fetch_sub(1, std::memory_order_release); + if (thread_pool_queue_pop(thread, &task)) { + task.do_work(task.data); + pool->tasks_left.fetch_sub(1, std::memory_order_release); - if (pool->tasks_left.load(std::memory_order_acquire) == 0) { - futex_signal(&pool->tasks_left); - } + if (pool->tasks_left.load(std::memory_order_acquire) == 0) { + futex_signal(&pool->tasks_left); + } - goto main_loop_continue; + goto main_loop_continue; + } } } // if we've done all our work, and there's nothing to steal, go to sleep - state = pool->tasks_available.load(); + state = pool->tasks_available.load(std::memory_order_acquire); futex_wait(&pool->tasks_available, state); main_loop_continue:; diff --git a/src/threading.cpp b/src/threading.cpp index 78943150e..27a17112e 100644 --- a/src/threading.cpp +++ b/src/threading.cpp @@ -8,10 +8,12 @@ struct BlockingMutex; struct RecursiveMutex; +struct RwMutex; struct Semaphore; struct Condition; struct Thread; struct ThreadPool; +struct Parker; #define THREAD_PROC(name) isize name(struct Thread *thread) gb_internal THREAD_PROC(thread_pool_thread_proc); @@ -56,6 +58,13 @@ gb_internal void mutex_lock (RecursiveMutex *m); gb_internal bool mutex_try_lock(RecursiveMutex *m); gb_internal void mutex_unlock (RecursiveMutex *m); +gb_internal void rw_mutex_lock (RwMutex *m); +gb_internal bool rw_mutex_try_lock (RwMutex *m); +gb_internal void rw_mutex_unlock (RwMutex *m); +gb_internal void rw_mutex_shared_lock (RwMutex *m); +gb_internal bool rw_mutex_try_shared_lock(RwMutex *m); +gb_internal void rw_mutex_shared_unlock (RwMutex *m); + gb_internal void semaphore_post (Semaphore *s, i32 count); gb_internal void semaphore_wait (Semaphore *s); gb_internal void semaphore_release(Semaphore *s) { semaphore_post(s, 1); } @@ -65,6 +74,10 @@ gb_internal void condition_broadcast(Condition *c); gb_internal void condition_signal(Condition *c); gb_internal void condition_wait(Condition *c, BlockingMutex *m); +gb_internal void park(Parker *p); +gb_internal void unpark_one(Parker *p); +gb_internal void unpark_all(Parker *p); + gb_internal u32 thread_current_id(void); gb_internal void thread_init (ThreadPool *pool, Thread *t, isize idx); @@ -205,6 +218,30 @@ gb_internal void semaphore_wait(Semaphore *s) { gb_internal void condition_wait(Condition *c, BlockingMutex *m) { SleepConditionVariableSRW(&c->cond, &m->srwlock, INFINITE, 0); } + + struct RwMutex { + SRWLOCK srwlock; + }; + + gb_internal void rw_mutex_lock(RwMutex *m) { + AcquireSRWLockExclusive(&m->srwlock); + } + gb_internal bool rw_mutex_try_lock(RwMutex *m) { + return !!TryAcquireSRWLockExclusive(&m->srwlock); + } + gb_internal void rw_mutex_unlock(RwMutex *m) { + ReleaseSRWLockExclusive(&m->srwlock); + } + + gb_internal void rw_mutex_shared_lock(RwMutex *m) { + AcquireSRWLockShared(&m->srwlock); + } + gb_internal bool rw_mutex_try_shared_lock(RwMutex *m) { + return !!TryAcquireSRWLockShared(&m->srwlock); + } + gb_internal void rw_mutex_shared_unlock(RwMutex *m) { + ReleaseSRWLockShared(&m->srwlock); + } #else enum Internal_Mutex_State : i32 { Internal_Mutex_State_Unlocked = 0, @@ -306,8 +343,67 @@ gb_internal void semaphore_wait(Semaphore *s) { futex_wait(&c->state(), state); mutex_lock(m); } + + struct RwMutex { + // TODO(bill): make this a proper RW mutex + BlockingMutex mutex; + }; + + gb_internal void rw_mutex_lock(RwMutex *m) { + mutex_lock(&m->mutex); + } + gb_internal bool rw_mutex_try_lock(RwMutex *m) { + return mutex_try_lock(&m->mutex); + } + gb_internal void rw_mutex_unlock(RwMutex *m) { + mutex_unlock(&m->mutex); + } + + gb_internal void rw_mutex_shared_lock(RwMutex *m) { + mutex_lock(&m->mutex); + } + gb_internal bool rw_mutex_try_shared_lock(RwMutex *m) { + return mutex_try_lock(&m->mutex); + } + gb_internal void rw_mutex_shared_unlock(RwMutex *m) { + mutex_unlock(&m->mutex); + } #endif +struct Parker { + Futex state; +}; +enum ParkerState : u32 { + ParkerState_Empty = 0, + ParkerState_Notified = 1, + ParkerState_Parked = UINT32_MAX, +}; + +gb_internal void park(Parker *p) { + if (p->state.fetch_sub(1, std::memory_order_acquire) == ParkerState_Notified) { + return; + } + for (;;) { + futex_wait(&p->state, ParkerState_Parked); + i32 notified = ParkerState_Empty; + if (p->state.compare_exchange_strong(notified, ParkerState_Empty, std::memory_order_acquire, std::memory_order_acquire)) { + return; + } + } +} + +gb_internal void unpark_one(Parker *p) { + if (p->state.exchange(ParkerState_Notified, std::memory_order_release) == ParkerState_Parked) { + futex_signal(&p->state); + } +} + +gb_internal void unpark_all(Parker *p) { + if (p->state.exchange(ParkerState_Notified, std::memory_order_release) == ParkerState_Parked) { + futex_broadcast(&p->state); + } +} + gb_internal u32 thread_current_id(void) { u32 thread_id;