From 60d0390ef8ceabb0567ee1ba968fdaf2024d34bf Mon Sep 17 00:00:00 2001 From: gingerBill Date: Sun, 1 Jan 2023 14:48:31 +0000 Subject: [PATCH 01/78] Unify compiler `Futex` interface --- src/thread_pool.cpp | 6 +++--- src/threading.cpp | 43 +++++++++++++++++-------------------------- 2 files changed, 20 insertions(+), 29 deletions(-) diff --git a/src/thread_pool.cpp b/src/thread_pool.cpp index 3565ef25a..57ed5e3c5 100644 --- a/src/thread_pool.cpp +++ b/src/thread_pool.cpp @@ -136,7 +136,7 @@ gb_internal void thread_pool_wait(ThreadPool *pool) { break; } - tpool_wait_on_addr(&pool->tasks_left, rem_tasks); + futex_wait(&pool->tasks_left, rem_tasks); } } @@ -160,7 +160,7 @@ work_start: finished_tasks += 1; } if (finished_tasks > 0 && !pool->tasks_left) { - tpool_wake_addr(&pool->tasks_left); + futex_signal(&pool->tasks_left); } // If there's still work somewhere and we don't have it, steal it @@ -183,7 +183,7 @@ work_start: pool->tasks_left.fetch_sub(1); if (!pool->tasks_left) { - tpool_wake_addr(&pool->tasks_left); + futex_signal(&pool->tasks_left); } goto work_start; diff --git a/src/threading.cpp b/src/threading.cpp index 493e57c91..cb3150c2a 100644 --- a/src/threading.cpp +++ b/src/threading.cpp @@ -41,6 +41,11 @@ struct Thread { struct ThreadPool *pool; }; +typedef std::atomic Futex; +typedef volatile int32_t Footex; + +gb_internal void futex_wait(Futex *addr, Footex val); +gb_internal void futex_signal(Futex *addr); gb_internal void mutex_init (BlockingMutex *m); gb_internal void mutex_destroy (BlockingMutex *m); @@ -441,12 +446,9 @@ gb_internal void thread_set_name(Thread *t, char const *name) { #include #include -typedef std::atomic Futex; -typedef volatile int32_t Footex; - -gb_internal void tpool_wake_addr(Futex *addr) { +gb_internal void futex_signal(Futex *addr) { for (;;) { - int ret = syscall(SYS_futex, addr, FUTEX_WAKE, 1, NULL, NULL, 0); + int ret = syscall(SYS_futex, addr, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, 1, NULL, NULL, 0); if (ret == -1) { perror("Futex wake"); GB_PANIC("Failed in futex wake!\n"); @@ -456,9 +458,9 @@ gb_internal void tpool_wake_addr(Futex *addr) { } } -gb_internal void tpool_wait_on_addr(Futex *addr, Footex val) { +gb_internal void futex_wait(Futex *addr, Footex val) { for (;;) { - int ret = syscall(SYS_futex, addr, FUTEX_WAIT, val, NULL, NULL, 0); + int ret = syscall(SYS_futex, addr, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, val, NULL, NULL, 0); if (ret == -1) { if (errno != EAGAIN) { perror("Futex wait"); @@ -479,14 +481,11 @@ gb_internal void tpool_wait_on_addr(Futex *addr, Footex val) { #include #include -typedef std::atomic Futex; -typedef volatile int32_t Footex; - -gb_internal void tpool_wake_addr(Futex *addr) { +gb_internal void futex_signal(Futex *addr) { _umtx_op(addr, UMTX_OP_WAKE, 1, 0, 0); } -gb_internal void tpool_wait_on_addr(Futex *addr, Footex val) { +gb_internal void futex_wait(Futex *addr, Footex val) { for (;;) { int ret = _umtx_op(addr, UMTX_OP_WAIT_UINT, val, 0, NULL); if (ret == 0) { @@ -508,10 +507,7 @@ gb_internal void tpool_wait_on_addr(Futex *addr, Footex val) { #include -typedef std::atomic Futex; -typedef volatile int32_t Footex; - -gb_internal void tpool_wake_addr(Futex *addr) { +gb_internal void futex_signal(Futex *addr) { for (;;) { int ret = futex((volatile uint32_t *)addr, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, 1, NULL, NULL); if (ret == -1) { @@ -527,7 +523,7 @@ gb_internal void tpool_wake_addr(Futex *addr) { } } -gb_internal void tpool_wait_on_addr(Futex *addr, Footex val) { +gb_internal void futex_wait(Futex *addr, Footex val) { for (;;) { int ret = futex((volatile uint32_t *)addr, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, val, NULL, NULL); if (ret == -1) { @@ -547,16 +543,13 @@ gb_internal void tpool_wait_on_addr(Futex *addr, Footex val) { #elif defined(GB_SYSTEM_OSX) -typedef std::atomic Futex; -typedef volatile int64_t Footex; - #define UL_COMPARE_AND_WAIT 0x00000001 #define ULF_NO_ERRNO 0x01000000 extern "C" int __ulock_wait(uint32_t operation, void *addr, uint64_t value, uint32_t timeout); /* timeout is specified in microseconds */ extern "C" int __ulock_wake(uint32_t operation, void *addr, uint64_t wake_value); -gb_internal void tpool_wake_addr(Futex *addr) { +gb_internal void futex_signal(Futex *addr) { for (;;) { int ret = __ulock_wake(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, addr, 0); if (ret >= 0) { @@ -572,7 +565,7 @@ gb_internal void tpool_wake_addr(Futex *addr) { } } -gb_internal void tpool_wait_on_addr(Futex *addr, Footex val) { +gb_internal void futex_wait(Futex *addr, Footex val) { for (;;) { int ret = __ulock_wait(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, addr, val, 0); if (ret >= 0) { @@ -592,14 +585,12 @@ gb_internal void tpool_wait_on_addr(Futex *addr, Footex val) { } } #elif defined(GB_SYSTEM_WINDOWS) -typedef std::atomic Futex; -typedef volatile int64_t Footex; -gb_internal void tpool_wake_addr(Futex *addr) { +gb_internal void futex_signal(Futex *addr) { WakeByAddressSingle((void *)addr); } -gb_internal void tpool_wait_on_addr(Futex *addr, Footex val) { +gb_internal void futex_wait(Futex *addr, Footex val) { for (;;) { WaitOnAddress(addr, (void *)&val, sizeof(val), INFINITE); if (*addr != val) break; From 20d451396d68d749b6c7ce762bc14e44f219e299 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Sun, 1 Jan 2023 15:06:57 +0000 Subject: [PATCH 02/78] Begin work on futex-ifying the threading primitives --- src/threading.cpp | 172 +++++++++++++++++++++++++++++++++++----------- 1 file changed, 133 insertions(+), 39 deletions(-) diff --git a/src/threading.cpp b/src/threading.cpp index cb3150c2a..646c0e93b 100644 --- a/src/threading.cpp +++ b/src/threading.cpp @@ -41,11 +41,12 @@ struct Thread { struct ThreadPool *pool; }; -typedef std::atomic Futex; -typedef volatile int32_t Footex; +typedef std::atomic Futex; +typedef volatile i32 Footex; gb_internal void futex_wait(Futex *addr, Footex val); gb_internal void futex_signal(Futex *addr); +gb_internal void futex_broadcast(Futex *addr); gb_internal void mutex_init (BlockingMutex *m); gb_internal void mutex_destroy (BlockingMutex *m); @@ -117,6 +118,82 @@ struct MutexGuard { #define MUTEX_GUARD(m) MutexGuard GB_DEFER_3(_mutex_guard_){m} +struct RecursiveMutex { + Futex owner; + i32 recursion; +}; +gb_internal void mutex_init(RecursiveMutex *m) { + +} +gb_internal void mutex_destroy(RecursiveMutex *m) { + +} +gb_internal void mutex_lock(RecursiveMutex *m) { + Futex tid = cast(i32)thread_current_id(); + for (;;) { + i32 prev_owner = 0; + m->owner.compare_exchange_strong(prev_owner, tid, std::memory_order_acquire, std::memory_order_acquire); + if (prev_owner == 0 || prev_owner == tid) { + m->recursion++; + // inside the lock + return; + } + futex_wait(&m->owner, prev_owner); + } +} +gb_internal bool mutex_try_lock(RecursiveMutex *m) { + Futex tid = cast(i32)thread_current_id(); + i32 prev_owner = 0; + m->owner.compare_exchange_strong(prev_owner, tid, std::memory_order_acquire, std::memory_order_acquire); + if (prev_owner == 0 || prev_owner == tid) { + m->recursion++; + // inside the lock + return true; + } + return false; +} +gb_internal void mutex_unlock(RecursiveMutex *m) { + m->recursion--; + if (m->recursion != 0) { + return; + } + m->owner.exchange(0, std::memory_order_release); + futex_signal(&m->owner); + // outside the lock +} + +struct Semaphore { + Futex count; +}; + +gb_internal void semaphore_init(Semaphore *s) { + +} +gb_internal void semaphore_destroy(Semaphore *s) { + +} +gb_internal void semaphore_post(Semaphore *s, i32 count) { + s->count.fetch_add(count, std::memory_order_release); + if (s->count == 1) { + futex_signal(&s->count); + } else { + futex_broadcast(&s->count); + } +} +gb_internal void semaphore_wait(Semaphore *s) { + for (;;) { + i32 original_count = s->count.load(std::memory_order_relaxed); + while (original_count == 0) { + futex_wait(&s->count, original_count); + original_count = s->count; + } + + if (!s->count.compare_exchange_strong(original_count, original_count-1, std::memory_order_acquire, std::memory_order_acquire)) { + return; + } + } +} + #if defined(GB_SYSTEM_WINDOWS) struct BlockingMutex { SRWLOCK srwlock; @@ -135,42 +212,6 @@ struct MutexGuard { ReleaseSRWLockExclusive(&m->srwlock); } - struct RecursiveMutex { - CRITICAL_SECTION win32_critical_section; - }; - gb_internal void mutex_init(RecursiveMutex *m) { - InitializeCriticalSection(&m->win32_critical_section); - } - gb_internal void mutex_destroy(RecursiveMutex *m) { - DeleteCriticalSection(&m->win32_critical_section); - } - gb_internal void mutex_lock(RecursiveMutex *m) { - EnterCriticalSection(&m->win32_critical_section); - } - gb_internal bool mutex_try_lock(RecursiveMutex *m) { - return TryEnterCriticalSection(&m->win32_critical_section) != 0; - } - gb_internal void mutex_unlock(RecursiveMutex *m) { - LeaveCriticalSection(&m->win32_critical_section); - } - - struct Semaphore { - void *win32_handle; - }; - - gb_internal void semaphore_init(Semaphore *s) { - s->win32_handle = CreateSemaphoreA(NULL, 0, I32_MAX, NULL); - } - gb_internal void semaphore_destroy(Semaphore *s) { - CloseHandle(s->win32_handle); - } - gb_internal void semaphore_post(Semaphore *s, i32 count) { - ReleaseSemaphore(s->win32_handle, count, NULL); - } - gb_internal void semaphore_wait(Semaphore *s) { - WaitForSingleObjectEx(s->win32_handle, INFINITE, FALSE); - } - struct Condition { CONDITION_VARIABLE cond; }; @@ -458,6 +499,18 @@ gb_internal void futex_signal(Futex *addr) { } } +gb_internal void futex_broadcast(Futex *addr) { + for (;;) { + int ret = syscall(SYS_futex, addr, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, INT32_MAX, NULL, NULL, 0); + if (ret == -1) { + perror("Futex wake"); + GB_PANIC("Failed in futex wake!\n"); + } else if (ret > 0) { + return; + } + } +} + gb_internal void futex_wait(Futex *addr, Footex val) { for (;;) { int ret = syscall(SYS_futex, addr, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, val, NULL, NULL, 0); @@ -485,6 +538,10 @@ gb_internal void futex_signal(Futex *addr) { _umtx_op(addr, UMTX_OP_WAKE, 1, 0, 0); } +gb_internal void futex_broadcast(Futex *addr) { + _umtx_op(addr, UMTX_OP_WAKE, INT32_MAX, 0, 0); +} + gb_internal void futex_wait(Futex *addr, Footex val) { for (;;) { int ret = _umtx_op(addr, UMTX_OP_WAIT_UINT, val, 0, NULL); @@ -523,6 +580,23 @@ gb_internal void futex_signal(Futex *addr) { } } + +gb_internal void futex_broadcast(Futex *addr) { + for (;;) { + int ret = futex((volatile uint32_t *)addr, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, INT32_MAX, NULL, NULL); + if (ret == -1) { + if (errno == ETIMEDOUT || errno == EINTR) { + continue; + } + + perror("Futex wake"); + GB_PANIC("futex wake fail"); + } else if (ret == 1) { + return; + } + } +} + gb_internal void futex_wait(Futex *addr, Footex val) { for (;;) { int ret = futex((volatile uint32_t *)addr, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, val, NULL, NULL); @@ -565,9 +639,25 @@ gb_internal void futex_signal(Futex *addr) { } } +gb_internal void futex_broadcast(Futex *addr) { + for (;;) { + int ret = __ulock_wake(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, addr, 0); + if (ret >= 0) { + return; + } + if (ret == EINTR || ret == EFAULT) { + continue; + } + if (ret == ENOENT) { + return; + } + GB_PANIC("Failed in futex wake!\n"); + } +} + gb_internal void futex_wait(Futex *addr, Footex val) { for (;;) { - int ret = __ulock_wait(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, addr, val, 0); + int ret = __ulock_wait(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO | ULF_WAKE_ALL, addr, val, 0); if (ret >= 0) { if (*addr != val) { return; @@ -590,6 +680,10 @@ gb_internal void futex_signal(Futex *addr) { WakeByAddressSingle((void *)addr); } +gb_internal void futex_broadcast(Futex *addr) { + WakeByAddressAll((void *)addr); +} + gb_internal void futex_wait(Futex *addr, Footex val) { for (;;) { WaitOnAddress(addr, (void *)&val, sizeof(val), INFINITE); From 74e6d9144e9a0afd9c29b0edec8c6ed2960efde4 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Sun, 1 Jan 2023 16:15:35 +0000 Subject: [PATCH 03/78] Get around the std::atomic issue --- src/threading.cpp | 162 +++++++++++++++++++++++++--------------------- 1 file changed, 89 insertions(+), 73 deletions(-) diff --git a/src/threading.cpp b/src/threading.cpp index 646c0e93b..7dd1247e7 100644 --- a/src/threading.cpp +++ b/src/threading.cpp @@ -234,95 +234,111 @@ gb_internal void semaphore_wait(Semaphore *s) { } #else - struct BlockingMutex { - pthread_mutex_t pthread_mutex; + enum Internal_Mutex_State : i32 { + Internal_Mutex_State_Unlocked = 0, + Internal_Mutex_State_Locked = 1, + Internal_Mutex_State_Waiting = 2, }; - gb_internal void mutex_init(BlockingMutex *m) { - pthread_mutex_init(&m->pthread_mutex, nullptr); - } - gb_internal void mutex_destroy(BlockingMutex *m) { - pthread_mutex_destroy(&m->pthread_mutex); + + struct BlockingMutex { + i32 state_; + + Futex &state() { + return *(Futex *)&this->state_; + } + Futex const &state() const { + return *(Futex const *)&this->state_; + } + }; + + gb_internal void mutex_init(BlockingMutex *m) {}; + gb_internal void mutex_destroy(BlockingMutex *m) {}; + + gb_no_inline gb_internal void mutex_lock_slow(BlockingMutex *m, i32 curr_state) { + i32 new_state = curr_state; + for (i32 spin = 0; spin < 100; spin++) { + i32 state = Internal_Mutex_State_Unlocked; + bool ok = m->state().compare_exchange_weak(state, new_state, std::memory_order_acquire, std::memory_order_consume); + if (ok) { + return; + } + if (state == Internal_Mutex_State_Waiting) { + break; + } + for (i32 i = gb_min(spin+1, 32); i > 0; i--) { + yield_thread(); + } + } + + // Set just in case 100 iterations did not do it + new_state = Internal_Mutex_State_Waiting; + + for (;;) { + if (m->state().exchange(Internal_Mutex_State_Waiting, std::memory_order_acquire) == Internal_Mutex_State_Unlocked) { + return; + } + futex_wait(&m->state(), new_state); + yield_thread(); + } } + gb_internal void mutex_lock(BlockingMutex *m) { - pthread_mutex_lock(&m->pthread_mutex); + i32 v = m->state().exchange(Internal_Mutex_State_Locked, std::memory_order_acquire); + if (v != Internal_Mutex_State_Unlocked) { + mutex_lock_slow(m, v); + } } gb_internal bool mutex_try_lock(BlockingMutex *m) { - return pthread_mutex_trylock(&m->pthread_mutex) == 0; + i32 v = m->state().exchange(Internal_Mutex_State_Locked, std::memory_order_acquire); + return v == Internal_Mutex_State_Unlocked; } + + gb_no_inline gb_internal void mutex_unlock_slow(BlockingMutex *m) { + futex_signal(&m->state()); + } + gb_internal void mutex_unlock(BlockingMutex *m) { - pthread_mutex_unlock(&m->pthread_mutex); + i32 v = m->state().exchange(Internal_Mutex_State_Unlocked, std::memory_order_release); + switch (v) { + case Internal_Mutex_State_Unlocked: + GB_PANIC("Unreachable"); + break; + case Internal_Mutex_State_Locked: + // Okay + break; + case Internal_Mutex_State_Waiting: + mutex_unlock_slow(m); + break; + } } - - struct RecursiveMutex { - pthread_mutex_t pthread_mutex; - pthread_mutexattr_t pthread_mutexattr; - }; - gb_internal void mutex_init(RecursiveMutex *m) { - pthread_mutexattr_init(&m->pthread_mutexattr); - pthread_mutexattr_settype(&m->pthread_mutexattr, PTHREAD_MUTEX_RECURSIVE); - pthread_mutex_init(&m->pthread_mutex, &m->pthread_mutexattr); - } - gb_internal void mutex_destroy(RecursiveMutex *m) { - pthread_mutex_destroy(&m->pthread_mutex); - } - gb_internal void mutex_lock(RecursiveMutex *m) { - pthread_mutex_lock(&m->pthread_mutex); - } - gb_internal bool mutex_try_lock(RecursiveMutex *m) { - return pthread_mutex_trylock(&m->pthread_mutex) == 0; - } - gb_internal void mutex_unlock(RecursiveMutex *m) { - pthread_mutex_unlock(&m->pthread_mutex); - } - - #if defined(GB_SYSTEM_OSX) - struct Semaphore { - semaphore_t osx_handle; - }; - - gb_internal void semaphore_init (Semaphore *s) { semaphore_create(mach_task_self(), &s->osx_handle, SYNC_POLICY_FIFO, 0); } - gb_internal void semaphore_destroy(Semaphore *s) { semaphore_destroy(mach_task_self(), s->osx_handle); } - gb_internal void semaphore_post (Semaphore *s, i32 count) { while (count --> 0) semaphore_signal(s->osx_handle); } - gb_internal void semaphore_wait (Semaphore *s) { semaphore_wait(s->osx_handle); } - #elif defined(GB_SYSTEM_UNIX) - struct Semaphore { - sem_t unix_handle; - }; - - gb_internal void semaphore_init (Semaphore *s) { sem_init(&s->unix_handle, 0, 0); } - gb_internal void semaphore_destroy(Semaphore *s) { sem_destroy(&s->unix_handle); } - gb_internal void semaphore_post (Semaphore *s, i32 count) { while (count --> 0) sem_post(&s->unix_handle); } - void semaphore_wait (Semaphore *s) { int i; do { i = sem_wait(&s->unix_handle); } while (i == -1 && errno == EINTR); } - #else - #error Implement Semaphore for this platform - #endif - struct Condition { - pthread_cond_t pthread_cond; + i32 state_; + + Futex &state() { + return *(Futex *)&this->state_; + } + Futex const &state() const { + return *(Futex const *)&this->state_; + } }; - - gb_internal void condition_init(Condition *c) { - pthread_cond_init(&c->pthread_cond, NULL); - } - gb_internal void condition_destroy(Condition *c) { - pthread_cond_destroy(&c->pthread_cond); - } + + gb_internal void condition_init(Condition *c) {} + gb_internal void condition_destroy(Condition *c) {} + gb_internal void condition_broadcast(Condition *c) { - pthread_cond_broadcast(&c->pthread_cond); + c->state().fetch_add(1, std::memory_order_release); + futex_broadcast(&c->state()); } gb_internal void condition_signal(Condition *c) { - pthread_cond_signal(&c->pthread_cond); + c->state().fetch_add(1, std::memory_order_release); + futex_signal(&c->state()); } gb_internal void condition_wait(Condition *c, BlockingMutex *m) { - pthread_cond_wait(&c->pthread_cond, &m->pthread_mutex); - } - gb_internal void condition_wait_with_timeout(Condition *c, BlockingMutex *m, u32 timeout_in_ms) { - struct timespec abstime = {}; - abstime.tv_sec = timeout_in_ms/1000; - abstime.tv_nsec = cast(long)(timeout_in_ms%1000)*1e6; - pthread_cond_timedwait(&c->pthread_cond, &m->pthread_mutex, &abstime); - + i32 state = c->state().load(std::memory_order_relaxed); + mutex_unlock(m); + futex_wait(&c->state(), state); + mutex_lock(m); } #endif From 5c519f0e8dada6b15166a257d22a07f2316a394f Mon Sep 17 00:00:00 2001 From: gingerBill Date: Sun, 1 Jan 2023 16:19:21 +0000 Subject: [PATCH 04/78] Remove the synchronization primitive init/destroy calls --- src/build_settings.cpp | 1 - src/checker.cpp | 36 ------------------------------------ src/common_memory.cpp | 2 -- src/entity.cpp | 1 - src/error.cpp | 4 ---- src/llvm_backend_general.cpp | 1 - src/main.cpp | 5 ----- src/parser.cpp | 11 ----------- src/queue.cpp | 2 -- src/string.cpp | 5 ----- src/thread_pool.cpp | 5 ----- src/threading.cpp | 35 +---------------------------------- src/types.cpp | 4 ---- 13 files changed, 1 insertion(+), 111 deletions(-) diff --git a/src/build_settings.cpp b/src/build_settings.cpp index 080e9dddc..97b512b81 100644 --- a/src/build_settings.cpp +++ b/src/build_settings.cpp @@ -1363,7 +1363,6 @@ gb_internal bool init_build_paths(String init_filename) { array_init(&bc->build_paths, permanent_allocator(), BuildPathCOUNT); string_set_init(&bc->target_features_set, heap_allocator(), 1024); - mutex_init(&bc->target_features_mutex); // [BuildPathMainPackage] Turn given init path into a `Path`, which includes normalizing it into a full path. bc->build_paths[BuildPath_Main_Package] = path_from_string(ha, init_filename); diff --git a/src/checker.cpp b/src/checker.cpp index b78da2827..7141b0698 100644 --- a/src/checker.cpp +++ b/src/checker.cpp @@ -184,7 +184,6 @@ gb_internal void init_decl_info(DeclInfo *d, Scope *scope, DeclInfo *parent) { ptr_set_init(&d->deps, heap_allocator()); ptr_set_init(&d->type_info_deps, heap_allocator()); array_init (&d->labels, heap_allocator()); - mutex_init(&d->proc_checked_mutex); } gb_internal DeclInfo *make_decl_info(Scope *scope, DeclInfo *parent) { @@ -225,7 +224,6 @@ gb_internal Scope *create_scope(CheckerInfo *info, Scope *parent, isize init_ele s->parent = parent; string_map_init(&s->elements, heap_allocator(), init_elements_capacity); ptr_set_init(&s->imported, heap_allocator(), 0); - mutex_init(&s->mutex); if (parent != nullptr && parent != builtin_pkg->scope) { Scope *prev_head_child = parent->head_child.exchange(s, std::memory_order_acq_rel); @@ -306,7 +304,6 @@ gb_internal void destroy_scope(Scope *scope) { string_map_destroy(&scope->elements); ptr_set_destroy(&scope->imported); - mutex_destroy(&scope->mutex); // NOTE(bill): No need to free scope as it "should" be allocated in an arena (except for the global scope) } @@ -1134,24 +1131,9 @@ gb_internal void init_checker_info(CheckerInfo *i) { TIME_SECTION("checker info: mutexes"); - mutex_init(&i->gen_procs_mutex); - mutex_init(&i->gen_types_mutex); - mutex_init(&i->lazy_mutex); - mutex_init(&i->builtin_mutex); - mutex_init(&i->global_untyped_mutex); - mutex_init(&i->type_info_mutex); - mutex_init(&i->deps_mutex); - mutex_init(&i->type_and_value_mutex); - mutex_init(&i->identifier_uses_mutex); - mutex_init(&i->foreign_mutex); - - semaphore_init(&i->collect_semaphore); - mpmc_init(&i->intrinsics_entry_point_usage, a, 1<<10); // just waste some memory here, even if it probably never used - mutex_init(&i->objc_types_mutex); map_init(&i->objc_msgSend_types, a); - mutex_init(&i->load_file_mutex); string_map_init(&i->load_file_cache, a); } @@ -1175,20 +1157,7 @@ gb_internal void destroy_checker_info(CheckerInfo *i) { mpmc_destroy(&i->required_global_variable_queue); mpmc_destroy(&i->required_foreign_imports_through_force_queue); - mutex_destroy(&i->gen_procs_mutex); - mutex_destroy(&i->gen_types_mutex); - mutex_destroy(&i->lazy_mutex); - mutex_destroy(&i->builtin_mutex); - mutex_destroy(&i->global_untyped_mutex); - mutex_destroy(&i->type_info_mutex); - mutex_destroy(&i->deps_mutex); - mutex_destroy(&i->type_and_value_mutex); - mutex_destroy(&i->identifier_uses_mutex); - mutex_destroy(&i->foreign_mutex); - - mutex_destroy(&i->objc_types_mutex); map_destroy(&i->objc_msgSend_types); - mutex_init(&i->load_file_mutex); string_map_destroy(&i->load_file_cache); } @@ -1201,11 +1170,9 @@ gb_internal CheckerContext make_checker_context(Checker *c) { ctx.type_path = new_checker_type_path(); ctx.type_level = 0; - mutex_init(&ctx.mutex); return ctx; } gb_internal void destroy_checker_context(CheckerContext *ctx) { - mutex_destroy(&ctx->mutex); destroy_checker_type_path(ctx->type_path); } @@ -1264,7 +1231,6 @@ gb_internal void init_checker(Checker *c) { // NOTE(bill): 1 Mi elements should be enough on average mpmc_init(&c->procs_to_check_queue, heap_allocator(), 1<<20); - semaphore_init(&c->procs_to_check_semaphore); mpmc_init(&c->global_untyped_queue, a, 1<<20); @@ -1277,8 +1243,6 @@ gb_internal void destroy_checker(Checker *c) { destroy_checker_context(&c->builtin_ctx); mpmc_destroy(&c->procs_to_check_queue); - semaphore_destroy(&c->procs_to_check_semaphore); - mpmc_destroy(&c->global_untyped_queue); } diff --git a/src/common_memory.cpp b/src/common_memory.cpp index c8a62756a..2022554cf 100644 --- a/src/common_memory.cpp +++ b/src/common_memory.cpp @@ -42,8 +42,6 @@ gb_global BlockingMutex global_memory_allocator_mutex; gb_internal void platform_virtual_memory_init(void); gb_internal void virtual_memory_init(void) { - mutex_init(&global_memory_block_mutex); - mutex_init(&global_memory_allocator_mutex); platform_virtual_memory_init(); } diff --git a/src/entity.cpp b/src/entity.cpp index 0605a293a..f82a2fb05 100644 --- a/src/entity.cpp +++ b/src/entity.cpp @@ -154,7 +154,6 @@ struct TypeNameObjCMetadata { gb_internal TypeNameObjCMetadata *create_type_name_obj_c_metadata() { TypeNameObjCMetadata *md = gb_alloc_item(permanent_allocator(), TypeNameObjCMetadata); md->mutex = gb_alloc_item(permanent_allocator(), BlockingMutex); - mutex_init(md->mutex); array_init(&md->type_entries, heap_allocator()); array_init(&md->value_entries, heap_allocator()); return md; diff --git a/src/error.cpp b/src/error.cpp index 085e1a8dd..a0bb4ad5b 100644 --- a/src/error.cpp +++ b/src/error.cpp @@ -22,10 +22,6 @@ gb_internal bool any_errors(void) { } gb_internal void init_global_error_collector(void) { - mutex_init(&global_error_collector.mutex); - mutex_init(&global_error_collector.block_mutex); - mutex_init(&global_error_collector.error_out_mutex); - mutex_init(&global_error_collector.string_mutex); array_init(&global_error_collector.errors, heap_allocator()); array_init(&global_error_collector.error_buffer, heap_allocator()); array_init(&global_file_path_strings, heap_allocator(), 1, 4096); diff --git a/src/llvm_backend_general.cpp b/src/llvm_backend_general.cpp index e5aa95f10..0508c6171 100644 --- a/src/llvm_backend_general.cpp +++ b/src/llvm_backend_general.cpp @@ -132,7 +132,6 @@ gb_internal bool lb_init_generator(lbGenerator *gen, Checker *c) { map_init(&gen->anonymous_proc_lits, heap_allocator(), 1024); - mutex_init(&gen->foreign_mutex); array_init(&gen->foreign_libraries, heap_allocator(), 0, 1024); ptr_set_init(&gen->foreign_libraries_set, heap_allocator(), 1024); diff --git a/src/main.cpp b/src/main.cpp index 6d910c7bf..184ab471e 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -2498,15 +2498,10 @@ int main(int arg_count, char const **arg_ptr) { MAIN_TIME_SECTION("initialization"); virtual_memory_init(); - mutex_init(&fullpath_mutex); - mutex_init(&hash_exact_value_mutex); - mutex_init(&global_type_name_objc_metadata_mutex); - init_string_buffer_memory(); init_string_interner(); init_global_error_collector(); init_keyword_hash_table(); - init_type_mutex(); if (!check_env()) { return 1; diff --git a/src/parser.cpp b/src/parser.cpp index e07f26004..344dcb20d 100644 --- a/src/parser.cpp +++ b/src/parser.cpp @@ -4858,10 +4858,6 @@ gb_internal bool init_parser(Parser *p) { GB_ASSERT(p != nullptr); string_set_init(&p->imported_files, heap_allocator()); array_init(&p->packages, heap_allocator()); - mutex_init(&p->imported_files_mutex); - mutex_init(&p->file_decl_mutex); - mutex_init(&p->packages_mutex); - mutex_init(&p->file_error_mutex); return true; } @@ -4878,10 +4874,6 @@ gb_internal void destroy_parser(Parser *p) { } array_free(&p->packages); string_set_destroy(&p->imported_files); - mutex_destroy(&p->imported_files_mutex); - mutex_destroy(&p->file_decl_mutex); - mutex_destroy(&p->packages_mutex); - mutex_destroy(&p->file_error_mutex); } @@ -4978,9 +4970,6 @@ gb_internal AstPackage *try_add_import_path(Parser *p, String const &path, Strin pkg->fullpath = path; array_init(&pkg->files, heap_allocator()); pkg->foreign_files.allocator = heap_allocator(); - mutex_init(&pkg->files_mutex); - mutex_init(&pkg->foreign_files_mutex); - // NOTE(bill): Single file initial package if (kind == Package_Init && string_ends_with(path, FILE_EXT)) { diff --git a/src/queue.cpp b/src/queue.cpp index 4de5ac5e5..8f279bb21 100644 --- a/src/queue.cpp +++ b/src/queue.cpp @@ -52,7 +52,6 @@ gb_internal void mpmc_init(MPMCQueue *q, gbAllocator a, isize size_i) { size = next_pow2(size); GB_ASSERT(gb_is_power_of_two(size)); - mutex_init(&q->mutex); q->mask = size-1; q->allocator = a; q->nodes = gb_alloc_array(a, T, size); @@ -65,7 +64,6 @@ gb_internal void mpmc_init(MPMCQueue *q, gbAllocator a, isize size_i) { template gb_internal void mpmc_destroy(MPMCQueue *q) { - mutex_destroy(&q->mutex); gb_free(q->allocator, q->nodes); gb_free(q->allocator, q->indices); } diff --git a/src/string.cpp b/src/string.cpp index 8cce0f1ef..a2254d100 100644 --- a/src/string.cpp +++ b/src/string.cpp @@ -1,10 +1,5 @@ gb_global BlockingMutex string_buffer_mutex = {}; -gb_internal void init_string_buffer_memory(void) { - mutex_init(&string_buffer_mutex); -} - - // NOTE(bill): Used for UTF-8 strings struct String { u8 * text; diff --git a/src/thread_pool.cpp b/src/thread_pool.cpp index 57ed5e3c5..522b96d09 100644 --- a/src/thread_pool.cpp +++ b/src/thread_pool.cpp @@ -23,9 +23,6 @@ struct ThreadPool { }; gb_internal void thread_pool_init(ThreadPool *pool, gbAllocator const &a, isize thread_count, char const *worker_name) { - mutex_init(&pool->task_lock); - condition_init(&pool->tasks_available); - pool->allocator = a; slice_init(&pool->threads, a, thread_count + 1); @@ -54,8 +51,6 @@ gb_internal void thread_pool_destroy(ThreadPool *pool) { } gb_free(pool->allocator, pool->threads.data); - mutex_destroy(&pool->task_lock); - condition_destroy(&pool->tasks_available); } void thread_pool_queue_push(Thread *thread, WorkerTask task) { diff --git a/src/threading.cpp b/src/threading.cpp index 7dd1247e7..fb71a2c29 100644 --- a/src/threading.cpp +++ b/src/threading.cpp @@ -48,30 +48,22 @@ gb_internal void futex_wait(Futex *addr, Footex val); gb_internal void futex_signal(Futex *addr); gb_internal void futex_broadcast(Futex *addr); -gb_internal void mutex_init (BlockingMutex *m); -gb_internal void mutex_destroy (BlockingMutex *m); gb_internal void mutex_lock (BlockingMutex *m); gb_internal bool mutex_try_lock(BlockingMutex *m); gb_internal void mutex_unlock (BlockingMutex *m); -gb_internal void mutex_init (RecursiveMutex *m); -gb_internal void mutex_destroy (RecursiveMutex *m); + gb_internal void mutex_lock (RecursiveMutex *m); gb_internal bool mutex_try_lock(RecursiveMutex *m); gb_internal void mutex_unlock (RecursiveMutex *m); -gb_internal void semaphore_init (Semaphore *s); -gb_internal void semaphore_destroy(Semaphore *s); gb_internal void semaphore_post (Semaphore *s, i32 count); gb_internal void semaphore_wait (Semaphore *s); gb_internal void semaphore_release(Semaphore *s) { semaphore_post(s, 1); } -gb_internal void condition_init(Condition *c); -gb_internal void condition_destroy(Condition *c); gb_internal void condition_broadcast(Condition *c); gb_internal void condition_signal(Condition *c); gb_internal void condition_wait(Condition *c, BlockingMutex *m); -gb_internal void condition_wait_with_timeout(Condition *c, BlockingMutex *m, u32 timeout_in_ms); gb_internal u32 thread_current_id(void); @@ -122,12 +114,7 @@ struct RecursiveMutex { Futex owner; i32 recursion; }; -gb_internal void mutex_init(RecursiveMutex *m) { -} -gb_internal void mutex_destroy(RecursiveMutex *m) { - -} gb_internal void mutex_lock(RecursiveMutex *m) { Futex tid = cast(i32)thread_current_id(); for (;;) { @@ -166,12 +153,6 @@ struct Semaphore { Futex count; }; -gb_internal void semaphore_init(Semaphore *s) { - -} -gb_internal void semaphore_destroy(Semaphore *s) { - -} gb_internal void semaphore_post(Semaphore *s, i32 count) { s->count.fetch_add(count, std::memory_order_release); if (s->count == 1) { @@ -198,10 +179,6 @@ gb_internal void semaphore_wait(Semaphore *s) { struct BlockingMutex { SRWLOCK srwlock; }; - gb_internal void mutex_init(BlockingMutex *m) { - } - gb_internal void mutex_destroy(BlockingMutex *m) { - } gb_internal void mutex_lock(BlockingMutex *m) { AcquireSRWLockExclusive(&m->srwlock); } @@ -229,10 +206,6 @@ gb_internal void semaphore_wait(Semaphore *s) { gb_internal void condition_wait(Condition *c, BlockingMutex *m) { SleepConditionVariableSRW(&c->cond, &m->srwlock, INFINITE, 0); } - gb_internal void condition_wait_with_timeout(Condition *c, BlockingMutex *m, u32 timeout_in_ms) { - SleepConditionVariableSRW(&c->cond, &m->srwlock, timeout_in_ms, 0); - } - #else enum Internal_Mutex_State : i32 { Internal_Mutex_State_Unlocked = 0, @@ -251,9 +224,6 @@ gb_internal void semaphore_wait(Semaphore *s) { } }; - gb_internal void mutex_init(BlockingMutex *m) {}; - gb_internal void mutex_destroy(BlockingMutex *m) {}; - gb_no_inline gb_internal void mutex_lock_slow(BlockingMutex *m, i32 curr_state) { i32 new_state = curr_state; for (i32 spin = 0; spin < 100; spin++) { @@ -323,9 +293,6 @@ gb_internal void semaphore_wait(Semaphore *s) { } }; - gb_internal void condition_init(Condition *c) {} - gb_internal void condition_destroy(Condition *c) {} - gb_internal void condition_broadcast(Condition *c) { c->state().fetch_add(1, std::memory_order_release); futex_broadcast(&c->state()); diff --git a/src/types.cpp b/src/types.cpp index 5bddfc79e..afe0b7d5d 100644 --- a/src/types.cpp +++ b/src/types.cpp @@ -808,10 +808,6 @@ gb_internal void type_path_pop(TypePath *tp) { #define FAILURE_SIZE 0 #define FAILURE_ALIGNMENT 0 -gb_internal void init_type_mutex(void) { - mutex_init(&g_type_mutex); -} - gb_internal bool type_ptr_set_update(PtrSet *s, Type *t) { if (ptr_set_exists(s, t)) { return true; From d16ddf7926935e66057239194bbcb699409fafdf Mon Sep 17 00:00:00 2001 From: gingerBill Date: Sun, 1 Jan 2023 16:32:51 +0000 Subject: [PATCH 05/78] Use C++ style `for` loop over `for_array` macro in parser.cpp where posible --- src/parser.cpp | 95 ++++++++++++++++++++++++++------------------------ 1 file changed, 50 insertions(+), 45 deletions(-) diff --git a/src/parser.cpp b/src/parser.cpp index 344dcb20d..4a5ba78a9 100644 --- a/src/parser.cpp +++ b/src/parser.cpp @@ -1905,13 +1905,11 @@ gb_internal void check_polymorphic_params_for_type(AstFile *f, Ast *polymorphic_ return; } ast_node(fl, FieldList, polymorphic_params); - for_array(fi, fl->list) { - Ast *field = fl->list[fi]; + for (Ast *field : fl->list) { if (field->kind != Ast_Field) { continue; } - for_array(i, field->Field.names) { - Ast *name = field->Field.names[i]; + for (Ast *name : field->Field.names) { if (name->kind != field->Field.names[0]->kind) { syntax_error(name, "Mixture of polymorphic names using both $ and not for %.*s parameters", LIT(token.string)); return; @@ -3473,16 +3471,14 @@ gb_internal Ast *parse_proc_type(AstFile *f, Token proc_token) { u64 tags = 0; bool is_generic = false; - for_array(i, params->FieldList.list) { - Ast *param = params->FieldList.list[i]; + for (Ast *param : params->FieldList.list) { ast_node(field, Field, param); if (field->type != nullptr) { if (field->type->kind == Ast_PolyType) { is_generic = true; goto end; } - for_array(j, field->names) { - Ast *name = field->names[j]; + for (Ast *name : field->names) { if (name->kind == Ast_PolyType) { is_generic = true; goto end; @@ -3646,8 +3642,9 @@ struct AstAndFlags { gb_internal Array convert_to_ident_list(AstFile *f, Array list, bool ignore_flags, bool allow_poly_names) { auto idents = array_make(heap_allocator(), 0, list.count); // Convert to ident list - for_array(i, list) { - Ast *ident = list[i].node; + isize i = 0; + for (AstAndFlags const &item : list) { + Ast *ident = item.node; if (!ignore_flags) { if (i != 0) { @@ -3678,6 +3675,7 @@ gb_internal Array convert_to_ident_list(AstFile *f, Array li break; } array_add(&idents, ident); + i += 1; } return idents; } @@ -3919,8 +3917,8 @@ gb_internal Ast *parse_field_list(AstFile *f, isize *name_count_, u32 allowed_fl return ast_field_list(f, start_token, params); } - for_array(i, list) { - Ast *type = list[i].node; + for (AstAndFlags const &item : list) { + Ast *type = item.node; Token token = blank_token; if (allowed_flags&FieldFlag_Results) { // NOTE(bill): Make this nothing and not `_` @@ -3930,9 +3928,9 @@ gb_internal Ast *parse_field_list(AstFile *f, isize *name_count_, u32 allowed_fl auto names = array_make(heap_allocator(), 1); token.pos = ast_token(type).pos; names[0] = ast_ident(f, token); - u32 flags = check_field_prefixes(f, list.count, allowed_flags, list[i].flags); + u32 flags = check_field_prefixes(f, list.count, allowed_flags, item.flags); Token tag = {}; - Ast *param = ast_field(f, names, list[i].node, nullptr, flags, tag, docs, f->line_comment); + Ast *param = ast_field(f, names, item.node, nullptr, flags, tag, docs, f->line_comment); array_add(¶ms, param); } @@ -4864,10 +4862,9 @@ gb_internal bool init_parser(Parser *p) { gb_internal void destroy_parser(Parser *p) { GB_ASSERT(p != nullptr); // TODO(bill): Fix memory leak - for_array(i, p->packages) { - AstPackage *pkg = p->packages[i]; - for_array(j, pkg->files) { - destroy_ast_file(pkg->files[j]); + for (AstPackage *pkg : p->packages) { + for (AstFile *file : pkg->files) { + destroy_ast_file(file); } array_free(&pkg->files); array_free(&pkg->foreign_files); @@ -4878,10 +4875,10 @@ gb_internal void destroy_parser(Parser *p) { gb_internal void parser_add_package(Parser *p, AstPackage *pkg) { - mutex_lock(&p->packages_mutex); - pkg->id = p->packages.count+1; - array_add(&p->packages, pkg); - mutex_unlock(&p->packages_mutex); + MUTEX_GUARD_BLOCK(&p->packages_mutex) { + pkg->id = p->packages.count+1; + array_add(&p->packages, pkg); + } } gb_internal ParseFileError process_imported_file(Parser *p, ImportedFile imported_file); @@ -4893,15 +4890,15 @@ gb_internal WORKER_TASK_PROC(parser_worker_proc) { auto *node = gb_alloc_item(permanent_allocator(), ParseFileErrorNode); node->err = err; - mutex_lock(&wd->parser->file_error_mutex); - if (wd->parser->file_error_tail != nullptr) { - wd->parser->file_error_tail->next = node; + MUTEX_GUARD_BLOCK(&wd->parser->file_error_mutex) { + if (wd->parser->file_error_tail != nullptr) { + wd->parser->file_error_tail->next = node; + } + wd->parser->file_error_tail = node; + if (wd->parser->file_error_head == nullptr) { + wd->parser->file_error_head = node; + } } - wd->parser->file_error_tail = node; - if (wd->parser->file_error_head == nullptr) { - wd->parser->file_error_head = node; - } - mutex_unlock(&wd->parser->file_error_mutex); } return cast(isize)err; } @@ -4937,9 +4934,9 @@ gb_internal WORKER_TASK_PROC(foreign_file_worker_proc) { // TODO(bill): Actually do something with it break; } - mutex_lock(&pkg->foreign_files_mutex); - array_add(&pkg->foreign_files, foreign_file); - mutex_unlock(&pkg->foreign_files_mutex); + MUTEX_GUARD_BLOCK(&pkg->foreign_files_mutex) { + array_add(&pkg->foreign_files, foreign_file); + } return 0; } @@ -4973,13 +4970,13 @@ gb_internal AstPackage *try_add_import_path(Parser *p, String const &path, Strin // NOTE(bill): Single file initial package if (kind == Package_Init && string_ends_with(path, FILE_EXT)) { - FileInfo fi = {}; fi.name = filename_from_path(path); fi.fullpath = path; fi.size = get_file_size(path); fi.is_dir = false; + array_reserve(&pkg->files, 1); pkg->is_single_file = true; parser_add_package(p, pkg); parser_add_file_to_process(p, pkg, fi, pos); @@ -5017,8 +5014,17 @@ gb_internal AstPackage *try_add_import_path(Parser *p, String const &path, Strin return nullptr; } - for_array(list_index, list) { - FileInfo fi = list[list_index]; + isize files_to_reserve = 1; // always reserve 1 + for (FileInfo fi : list) { + String name = fi.name; + String ext = path_extension(name); + if (ext == FILE_EXT && !is_excluded_target_filename(name)) { + files_to_reserve += 1; + } + } + + array_reserve(&pkg->files, files_to_reserve); + for (FileInfo fi : list) { String name = fi.name; String ext = path_extension(name); if (ext == FILE_EXT) { @@ -5311,14 +5317,14 @@ gb_internal void parse_setup_file_decls(Parser *p, AstFile *f, String const &bas auto fullpaths = array_make(permanent_allocator(), 0, fl->filepaths.count); - for_array(fp_idx, fl->filepaths) { - String file_str = string_trim_whitespace(string_value_from_token(f, fl->filepaths[fp_idx])); + for (Token const &fp : fl->filepaths) { + String file_str = string_trim_whitespace(string_value_from_token(f, fp)); String fullpath = file_str; if (allow_check_foreign_filepath()) { String foreign_path = {}; bool ok = determine_path_from_string(&p->file_decl_mutex, node, base_dir, file_str, &foreign_path); if (!ok) { - decls[i] = ast_bad_decl(f, fl->filepaths[fp_idx], fl->filepaths[fl->filepaths.count-1]); + decls[i] = ast_bad_decl(f, fp, fl->filepaths[fl->filepaths.count-1]); goto end; } fullpath = foreign_path; @@ -5443,8 +5449,8 @@ gb_internal isize calc_decl_count(Ast *decl) { isize count = 0; switch (decl->kind) { case Ast_BlockStmt: - for_array(i, decl->BlockStmt.stmts) { - count += calc_decl_count(decl->BlockStmt.stmts.data[i]); + for (Ast *stmt : decl->BlockStmt.stmts) { + count += calc_decl_count(stmt); } break; case Ast_WhenStmt: @@ -5564,8 +5570,8 @@ gb_internal bool parse_file(Parser *p, AstFile *f) { f->package_name = package_name.string; if (!f->pkg->is_single_file && docs != nullptr && docs->list.count > 0) { - for_array(i, docs->list) { - Token tok = docs->list[i]; GB_ASSERT(tok.kind == Token_Comment); + for (Token const &tok : docs->list) { + GB_ASSERT(tok.kind == Token_Comment); String str = tok.string; if (string_starts_with(str, str_lit("//"))) { String lc = string_trim_whitespace(substring(str, 2, str.len)); @@ -5776,8 +5782,7 @@ gb_internal ParseFileError parse_packages(Parser *p, String init_filename) { } - for_array(i, build_context.extra_packages) { - String path = build_context.extra_packages[i]; + for (String const &path : build_context.extra_packages) { String fullpath = path_to_full_path(heap_allocator(), path); // LEAK? if (!path_is_directory(fullpath)) { String const ext = str_lit(".odin"); From 3c90a059571cb879a468a00c0ca26c9a35090c38 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Mon, 2 Jan 2023 00:26:17 +0000 Subject: [PATCH 06/78] Replace condition+mutex with futex --- src/checker.cpp | 3 +- src/thread_pool.cpp | 72 +++++++++++++++++++++------------------------ src/threading.cpp | 4 ++- 3 files changed, 39 insertions(+), 40 deletions(-) diff --git a/src/checker.cpp b/src/checker.cpp index 7141b0698..03ff901eb 100644 --- a/src/checker.cpp +++ b/src/checker.cpp @@ -1935,7 +1935,7 @@ gb_internal void add_type_info_type_internal(CheckerContext *c, Type *t) { -gb_global bool global_procedure_body_in_worker_queue = false; +gb_global std::atomic global_procedure_body_in_worker_queue = false; gb_internal void check_procedure_later(CheckerContext *c, ProcInfo *info) { GB_ASSERT(info != nullptr); @@ -5264,6 +5264,7 @@ gb_internal WORKER_TASK_PROC(thread_proc_body) { gb_internal void check_procedure_bodies(Checker *c) { GB_ASSERT(c != nullptr); + u32 thread_count = cast(u32)gb_max(build_context.thread_count, 1); u32 worker_count = thread_count-1; // NOTE(bill): The main thread will also be used for work if (!build_context.threaded_checker) { diff --git a/src/thread_pool.cpp b/src/thread_pool.cpp index 522b96d09..768a92645 100644 --- a/src/thread_pool.cpp +++ b/src/thread_pool.cpp @@ -16,8 +16,7 @@ struct ThreadPool { Slice threads; std::atomic running; - BlockingMutex task_lock; - Condition tasks_available; + Futex tasks_available; Futex tasks_left; }; @@ -43,27 +42,25 @@ gb_internal void thread_pool_destroy(ThreadPool *pool) { for_array_off(i, 1, pool->threads) { Thread *t = &pool->threads[i]; - condition_broadcast(&pool->tasks_available); + pool->tasks_available.fetch_add(1, std::memory_order_release); + futex_broadcast(&pool->tasks_available); thread_join_and_destroy(t); } - for_array(i, pool->threads) { - free(pool->threads[i].queue); - } gb_free(pool->allocator, pool->threads.data); } void thread_pool_queue_push(Thread *thread, WorkerTask task) { - uint64_t capture; - uint64_t new_capture; + u64 capture; + u64 new_capture; do { capture = thread->head_and_tail.load(); - uint64_t mask = thread->capacity - 1; - uint64_t head = (capture >> 32) & mask; - uint64_t tail = ((uint32_t)capture) & mask; + u64 mask = thread->capacity - 1; + u64 head = (capture >> 32) & mask; + u64 tail = ((u32)capture) & mask; - uint64_t new_head = (head + 1) & mask; + u64 new_head = (head + 1) & mask; if (new_head == tail) { GB_PANIC("Thread Queue Full!\n"); } @@ -73,21 +70,22 @@ void thread_pool_queue_push(Thread *thread, WorkerTask task) { new_capture = (new_head << 32) | tail; } while (!thread->head_and_tail.compare_exchange_weak(capture, new_capture)); - thread->pool->tasks_left.fetch_add(1); - condition_broadcast(&thread->pool->tasks_available); + thread->pool->tasks_left.fetch_add(1, std::memory_order_release); + thread->pool->tasks_available.fetch_add(1, std::memory_order_release); + futex_broadcast(&thread->pool->tasks_available); } bool thread_pool_queue_pop(Thread *thread, WorkerTask *task) { - uint64_t capture; - uint64_t new_capture; + u64 capture; + u64 new_capture; do { capture = thread->head_and_tail.load(); - uint64_t mask = thread->capacity - 1; - uint64_t head = (capture >> 32) & mask; - uint64_t tail = ((uint32_t)capture) & mask; + u64 mask = thread->capacity - 1; + u64 head = (capture >> 32) & mask; + u64 tail = ((u32)capture) & mask; - uint64_t new_tail = (tail + 1) & mask; + u64 new_tail = (tail + 1) & mask; if (tail == head) { return false; } @@ -113,12 +111,11 @@ gb_internal bool thread_pool_add_task(ThreadPool *pool, WorkerTaskProc *proc, vo gb_internal void thread_pool_wait(ThreadPool *pool) { WorkerTask task; - while (pool->tasks_left) { - + while (pool->tasks_left.load()) { // if we've got tasks on our queue, run them while (thread_pool_queue_pop(current_thread, &task)) { task.do_work(task.data); - pool->tasks_left.fetch_sub(1); + pool->tasks_left.fetch_sub(1, std::memory_order_release); } @@ -127,8 +124,8 @@ gb_internal void thread_pool_wait(ThreadPool *pool) { // if rem_tasks has changed since we checked last, otherwise the program // will permanently sleep Footex rem_tasks = pool->tasks_left.load(); - if (!rem_tasks) { - break; + if (rem_tasks == 0) { + return; } futex_wait(&pool->tasks_left, rem_tasks); @@ -147,37 +144,37 @@ work_start: } // If we've got tasks to process, work through them - size_t finished_tasks = 0; + usize finished_tasks = 0; while (thread_pool_queue_pop(current_thread, &task)) { task.do_work(task.data); - pool->tasks_left.fetch_sub(1); + pool->tasks_left.fetch_sub(1, std::memory_order_release); finished_tasks += 1; } - if (finished_tasks > 0 && !pool->tasks_left) { + if (finished_tasks > 0 && pool->tasks_left.load() == 0) { futex_signal(&pool->tasks_left); } // If there's still work somewhere and we don't have it, steal it - if (pool->tasks_left) { - isize idx = current_thread->idx; + if (pool->tasks_left.load()) { + usize idx = cast(usize)current_thread->idx; for_array(i, pool->threads) { - if (!pool->tasks_left) { + if (pool->tasks_left.load() == 0) { break; } - idx = (idx + 1) % pool->threads.count; - Thread *thread = &pool->threads[idx]; + idx = (idx + 1) % cast(usize)pool->threads.count; + Thread *thread = &pool->threads.data[idx]; WorkerTask task; if (!thread_pool_queue_pop(thread, &task)) { continue; } task.do_work(task.data); - pool->tasks_left.fetch_sub(1); + pool->tasks_left.fetch_sub(1, std::memory_order_release); - if (!pool->tasks_left) { + if (pool->tasks_left.load() == 0) { futex_signal(&pool->tasks_left); } @@ -186,9 +183,8 @@ work_start: } // if we've done all our work, and there's nothing to steal, go to sleep - mutex_lock(&pool->task_lock); - condition_wait(&pool->tasks_available, &pool->task_lock); - mutex_unlock(&pool->task_lock); + i32 state = pool->tasks_available.load(); + futex_wait(&pool->tasks_available, state); } return 0; diff --git a/src/threading.cpp b/src/threading.cpp index fb71a2c29..e3f26a8a0 100644 --- a/src/threading.cpp +++ b/src/threading.cpp @@ -393,7 +393,7 @@ gb_internal void thread_init(ThreadPool *pool, Thread *t, isize idx) { #endif t->capacity = 1 << 14; // must be a power of 2 - t->queue = (WorkerTask *)calloc(sizeof(WorkerTask), t->capacity); + t->queue = gb_alloc_array(heap_allocator(), WorkerTask, t->capacity); t->head_and_tail = 0; t->pool = pool; t->idx = idx; @@ -429,6 +429,8 @@ gb_internal void thread_join_and_destroy(Thread *t) { pthread_join(t->posix_handle, NULL); t->posix_handle = 0; #endif + + gb_free(heap_allocator(), t->queue); } gb_internal void thread_set_name(Thread *t, char const *name) { From da479c7628d827d4343f82954c7d09adff31876c Mon Sep 17 00:00:00 2001 From: gingerBill Date: Mon, 2 Jan 2023 00:35:12 +0000 Subject: [PATCH 07/78] Minor style change --- src/thread_pool.cpp | 6 ++---- src/threading.cpp | 4 ---- 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/src/thread_pool.cpp b/src/thread_pool.cpp index 768a92645..9ac1af039 100644 --- a/src/thread_pool.cpp +++ b/src/thread_pool.cpp @@ -61,9 +61,7 @@ void thread_pool_queue_push(Thread *thread, WorkerTask task) { u64 tail = ((u32)capture) & mask; u64 new_head = (head + 1) & mask; - if (new_head == tail) { - GB_PANIC("Thread Queue Full!\n"); - } + GB_ASSERT_MSG(new_head != tail, "Thread Queue Full!"); // This *must* be done in here, to avoid a potential race condition where we no longer own the slot by the time we're assigning thread->queue[head] = task; @@ -139,7 +137,7 @@ gb_internal THREAD_PROC(thread_pool_thread_proc) { for (;;) { work_start: - if (!pool->running) { + if (!pool->running.load()) { break; } diff --git a/src/threading.cpp b/src/threading.cpp index e3f26a8a0..4c7aa8f92 100644 --- a/src/threading.cpp +++ b/src/threading.cpp @@ -193,10 +193,6 @@ gb_internal void semaphore_wait(Semaphore *s) { CONDITION_VARIABLE cond; }; - gb_internal void condition_init(Condition *c) { - } - gb_internal void condition_destroy(Condition *c) { - } gb_internal void condition_broadcast(Condition *c) { WakeAllConditionVariable(&c->cond); } From 54f89dd84baaf296f7f744e1d2702b2ec867b5ae Mon Sep 17 00:00:00 2001 From: gingerBill Date: Mon, 2 Jan 2023 00:53:11 +0000 Subject: [PATCH 08/78] Multithread `check_collect_entities_all` using new thread pool --- src/checker.cpp | 75 ++++++++++++++++++++++++++++++++----------------- 1 file changed, 50 insertions(+), 25 deletions(-) diff --git a/src/checker.cpp b/src/checker.cpp index 03ff901eb..e70643940 100644 --- a/src/checker.cpp +++ b/src/checker.cpp @@ -4660,40 +4660,65 @@ gb_internal void check_with_workers(Checker *c, WorkerTaskProc *proc, isize tota semaphore_wait(&c->info.collect_semaphore); } +struct CollectEntityWorkerData { + Checker *c; + CheckerContext ctx; + UntypedExprInfoMap untyped; +}; -gb_internal WORKER_TASK_PROC(thread_proc_collect_entities) { - auto *cs = cast(ThreadProcCheckerSection *)data; - Checker *c = cs->checker; - CheckerContext collect_entity_ctx = make_checker_context(c); - defer (destroy_checker_context(&collect_entity_ctx)); +gb_global CollectEntityWorkerData *collect_entity_worker_data; - CheckerContext *ctx = &collect_entity_ctx; - - UntypedExprInfoMap untyped = {}; - map_init(&untyped, heap_allocator()); - - isize offset = cs->offset; - isize file_end = gb_min(offset+cs->count, c->info.files.entries.count); - - for (isize i = offset; i < file_end; i++) { - AstFile *f = c->info.files.entries[i].value; - reset_checker_context(ctx, f, &untyped); - - check_collect_entities(ctx, f->decls); - GB_ASSERT(ctx->collect_delayed_decls == false); - - add_untyped_expressions(&c->info, ctx->untyped); +WORKER_TASK_PROC(check_collect_entities_all_worker_proc) { + isize thread_idx = 0; + if (current_thread) { + thread_idx = current_thread->idx; } + CollectEntityWorkerData *wd = &collect_entity_worker_data[thread_idx]; - map_destroy(&untyped); + Checker *c = wd->c; + CheckerContext *ctx = &wd->ctx; + UntypedExprInfoMap *untyped = &wd->untyped; + + AstFile *f = cast(AstFile *)data; + reset_checker_context(ctx, f, untyped); + + check_collect_entities(ctx, f->decls); + GB_ASSERT(ctx->collect_delayed_decls == false); + + add_untyped_expressions(&c->info, ctx->untyped); - semaphore_release(&c->info.collect_semaphore); return 0; } - gb_internal void check_collect_entities_all(Checker *c) { - check_with_workers(c, thread_proc_collect_entities, c->info.files.entries.count); + isize thread_count = global_thread_pool.threads.count; + + collect_entity_worker_data = gb_alloc_array(permanent_allocator(), CollectEntityWorkerData, thread_count); + for (isize i = 0; i < thread_count; i++) { + auto *wd = &collect_entity_worker_data[i]; + wd->c = c; + wd->ctx = make_checker_context(c); + map_init(&wd->untyped, heap_allocator()); + } + + if (build_context.threaded_checker) { + for (auto const &entry : c->info.files.entries) { + AstFile *f = entry.value; + global_thread_pool_add_task(check_collect_entities_all_worker_proc, f); + } + global_thread_pool_wait(); + } else { + for (auto const &entry : c->info.files.entries) { + AstFile *f = entry.value; + check_collect_entities_all_worker_proc(f); + } + } + + for (isize i = 0; i < thread_count; i++) { + auto *wd = &collect_entity_worker_data[i]; + map_destroy(&wd->untyped); + destroy_checker_context(&wd->ctx); + } } gb_internal void check_export_entities_in_pkg(CheckerContext *ctx, AstPackage *pkg, UntypedExprInfoMap *untyped) { From bfdcf900ef25566e57e46ec46683f8b6f2a9515a Mon Sep 17 00:00:00 2001 From: gingerBill Date: Mon, 2 Jan 2023 00:56:06 +0000 Subject: [PATCH 09/78] Remove `global_` prefix from `global_thread_pool_*` procedures --- src/checker.cpp | 12 ++++++------ src/llvm_backend.cpp | 2 +- src/main.cpp | 4 ++-- src/parser.cpp | 6 +++--- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/src/checker.cpp b/src/checker.cpp index e70643940..a25d78d3d 100644 --- a/src/checker.cpp +++ b/src/checker.cpp @@ -4654,9 +4654,9 @@ gb_internal void check_with_workers(Checker *c, WorkerTaskProc *proc, isize tota for (isize i = 0; i < thread_count; i++) { - global_thread_pool_add_task(proc, thread_data+i); + thread_pool_add_task(proc, thread_data+i); } - global_thread_pool_wait(); + thread_pool_wait(); semaphore_wait(&c->info.collect_semaphore); } @@ -4704,9 +4704,9 @@ gb_internal void check_collect_entities_all(Checker *c) { if (build_context.threaded_checker) { for (auto const &entry : c->info.files.entries) { AstFile *f = entry.value; - global_thread_pool_add_task(check_collect_entities_all_worker_proc, f); + thread_pool_add_task(check_collect_entities_all_worker_proc, f); } - global_thread_pool_wait(); + thread_pool_wait(); } else { for (auto const &entry : c->info.files.entries) { AstFile *f = entry.value; @@ -5350,9 +5350,9 @@ gb_internal void check_procedure_bodies(Checker *c) { semaphore_post(&c->procs_to_check_semaphore, cast(i32)thread_count); for (isize i = 0; i < thread_count; i++) { - global_thread_pool_add_task(thread_proc_body, thread_data+i); + thread_pool_add_task(thread_proc_body, thread_data+i); } - global_thread_pool_wait(); + thread_pool_wait(); semaphore_wait(&c->procs_to_check_semaphore); isize global_remaining = c->procs_to_check_queue.count.load(std::memory_order_relaxed); diff --git a/src/llvm_backend.cpp b/src/llvm_backend.cpp index 146cb2944..1c401552e 100644 --- a/src/llvm_backend.cpp +++ b/src/llvm_backend.cpp @@ -2272,7 +2272,7 @@ gb_internal void lb_generate_code(lbGenerator *gen) { wd->code_gen_file_type = code_gen_file_type; wd->filepath_obj = filepath_obj; wd->m = m; - global_thread_pool_add_task(lb_llvm_emit_worker_proc, wd); + thread_pool_add_task(lb_llvm_emit_worker_proc, wd); } thread_pool_wait(&global_thread_pool); diff --git a/src/main.cpp b/src/main.cpp index 184ab471e..3ad0e160f 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -20,10 +20,10 @@ gb_internal void init_global_thread_pool(void) { isize worker_count = thread_count-1; // NOTE(bill): The main thread will also be used for work thread_pool_init(&global_thread_pool, permanent_allocator(), worker_count, "ThreadPoolWorker"); } -gb_internal bool global_thread_pool_add_task(WorkerTaskProc *proc, void *data) { +gb_internal bool thread_pool_add_task(WorkerTaskProc *proc, void *data) { return thread_pool_add_task(&global_thread_pool, proc, data); } -gb_internal void global_thread_pool_wait(void) { +gb_internal void thread_pool_wait(void) { thread_pool_wait(&global_thread_pool); } diff --git a/src/parser.cpp b/src/parser.cpp index 4a5ba78a9..4d2a8ecf4 100644 --- a/src/parser.cpp +++ b/src/parser.cpp @@ -4910,7 +4910,7 @@ gb_internal void parser_add_file_to_process(Parser *p, AstPackage *pkg, FileInfo auto wd = gb_alloc_item(permanent_allocator(), ParserWorkerData); wd->parser = p; wd->imported_file = f; - global_thread_pool_add_task(parser_worker_proc, wd); + thread_pool_add_task(parser_worker_proc, wd); } gb_internal WORKER_TASK_PROC(foreign_file_worker_proc) { @@ -4948,7 +4948,7 @@ gb_internal void parser_add_foreign_file_to_process(Parser *p, AstPackage *pkg, wd->parser = p; wd->imported_file = f; wd->foreign_kind = kind; - global_thread_pool_add_task(foreign_file_worker_proc, wd); + thread_pool_add_task(foreign_file_worker_proc, wd); } @@ -5798,7 +5798,7 @@ gb_internal ParseFileError parse_packages(Parser *p, String init_filename) { } } - global_thread_pool_wait(); + thread_pool_wait(); for (ParseFileErrorNode *node = p->file_error_head; node != nullptr; node = node->next) { if (node->err != ParseFile_None) { From a5ce8a8c0bc33afb6a4cf7baa16528c0a551d8e0 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Mon, 2 Jan 2023 01:31:14 +0000 Subject: [PATCH 10/78] Multi thread `check_export_entities` --- src/checker.cpp | 192 ++++++++++++++++++++++-------------------------- 1 file changed, 89 insertions(+), 103 deletions(-) diff --git a/src/checker.cpp b/src/checker.cpp index a25d78d3d..1e40f04a6 100644 --- a/src/checker.cpp +++ b/src/checker.cpp @@ -1,3 +1,5 @@ +#define MULTITHREAD_CHECKER 1 + #include "entity.cpp" #include "types.cpp" @@ -1937,16 +1939,22 @@ gb_internal void add_type_info_type_internal(CheckerContext *c, Type *t) { gb_global std::atomic global_procedure_body_in_worker_queue = false; +gb_internal WORKER_TASK_PROC(check_proc_info_worker_proc); + gb_internal void check_procedure_later(CheckerContext *c, ProcInfo *info) { GB_ASSERT(info != nullptr); GB_ASSERT(info->decl != nullptr); - if (build_context.threaded_checker && global_procedure_body_in_worker_queue) { - GB_ASSERT(c->procs_to_check_queue != nullptr); - } + if (MULTITHREAD_CHECKER && global_procedure_body_in_worker_queue) { + thread_pool_add_task(check_proc_info_worker_proc, info); + } else { + if (build_context.threaded_checker && global_procedure_body_in_worker_queue) { + GB_ASSERT(c->procs_to_check_queue != nullptr); + } - auto *queue = c->procs_to_check_queue ? c->procs_to_check_queue : &c->checker->procs_to_check_queue; - mpmc_enqueue(queue, info); + auto *queue = c->procs_to_check_queue ? c->procs_to_check_queue : &c->checker->procs_to_check_queue; + mpmc_enqueue(queue, info); + } } gb_internal void check_procedure_later(CheckerContext *c, AstFile *file, Token token, DeclInfo *decl, Type *type, Ast *body, u64 tags) { @@ -4623,7 +4631,7 @@ struct ThreadProcCheckerSection { gb_internal void check_with_workers(Checker *c, WorkerTaskProc *proc, isize total_count) { - isize thread_count = gb_max(build_context.thread_count, 1); + isize thread_count = global_thread_pool.threads.count; isize worker_count = thread_count-1; // NOTE(bill): The main thread will also be used for work if (!build_context.threaded_checker) { worker_count = 0; @@ -4668,7 +4676,7 @@ struct CollectEntityWorkerData { gb_global CollectEntityWorkerData *collect_entity_worker_data; -WORKER_TASK_PROC(check_collect_entities_all_worker_proc) { +gb_internal WORKER_TASK_PROC(check_collect_entities_all_worker_proc) { isize thread_idx = 0; if (current_thread) { thread_idx = current_thread->idx; @@ -4701,7 +4709,7 @@ gb_internal void check_collect_entities_all(Checker *c) { map_init(&wd->untyped, heap_allocator()); } - if (build_context.threaded_checker) { + if (MULTITHREAD_CHECKER || build_context.threaded_checker) { for (auto const &entry : c->info.files.entries) { AstFile *f = entry.value; thread_pool_add_task(check_collect_entities_all_worker_proc, f); @@ -4713,12 +4721,6 @@ gb_internal void check_collect_entities_all(Checker *c) { check_collect_entities_all_worker_proc(f); } } - - for (isize i = 0; i < thread_count; i++) { - auto *wd = &collect_entity_worker_data[i]; - map_destroy(&wd->untyped); - destroy_checker_context(&wd->ctx); - } } gb_internal void check_export_entities_in_pkg(CheckerContext *ctx, AstPackage *pkg, UntypedExprInfoMap *untyped) { @@ -4735,30 +4737,32 @@ gb_internal void check_export_entities_in_pkg(CheckerContext *ctx, AstPackage *p } } -gb_internal WORKER_TASK_PROC(thread_proc_check_export_entities) { - auto cs = cast(ThreadProcCheckerSection *)data; - Checker *c = cs->checker; +gb_internal WORKER_TASK_PROC(check_export_entities_worker_proc) { + isize thread_idx = current_thread ? current_thread->idx : 0; - CheckerContext ctx = make_checker_context(c); - defer (destroy_checker_context(&ctx)); - - UntypedExprInfoMap untyped = {}; - map_init(&untyped, heap_allocator()); - - isize end = gb_min(cs->offset + cs->count, c->info.packages.entries.count); - for (isize i = cs->offset; i < end; i++) { - AstPackage *pkg = c->info.packages.entries[i].value; - check_export_entities_in_pkg(&ctx, pkg, &untyped); - } - - map_destroy(&untyped); - - semaphore_release(&c->info.collect_semaphore); + AstPackage *pkg = (AstPackage *)data; + auto *wd = &collect_entity_worker_data[thread_idx]; + check_export_entities_in_pkg(&wd->ctx, pkg, &wd->untyped); return 0; } + gb_internal void check_export_entities(Checker *c) { - check_with_workers(c, thread_proc_check_export_entities, c->info.packages.entries.count); + isize thread_count = global_thread_pool.threads.count; + + // NOTE(bill): reuse `collect_entity_worker_data` + + for (isize i = 0; i < thread_count; i++) { + auto *wd = &collect_entity_worker_data[i]; + map_clear(&wd->untyped); + wd->ctx = make_checker_context(c); + } + + for (auto const &entry : c->info.packages.entries) { + AstPackage *pkg = entry.value; + thread_pool_add_task(check_export_entities_worker_proc, pkg); + } + thread_pool_wait(); } gb_internal void check_import_entities(Checker *c) { @@ -5087,8 +5091,10 @@ gb_internal bool check_proc_info(Checker *c, ProcInfo *pi, UntypedExprInfoMap *u defer (destroy_checker_context(&ctx)); reset_checker_context(&ctx, pi->file, untyped); ctx.decl = pi->decl; + + GB_ASSERT(procs_to_check_queue != nullptr || MULTITHREAD_CHECKER); + ctx.procs_to_check_queue = procs_to_check_queue; - GB_ASSERT(procs_to_check_queue != nullptr); GB_ASSERT(pi->type->kind == Type_Proc); TypeProc *pt = &pi->type->Proc; @@ -5201,6 +5207,7 @@ gb_internal void check_unchecked_bodies(Checker *c) { } } + thread_pool_wait(); } gb_internal void check_test_procedures(Checker *c) { @@ -5258,106 +5265,85 @@ gb_internal bool consume_proc_info_queue(Checker *c, ProcInfo *pi, ProcBodyQueue return ok; } -struct ThreadProcBodyData { - Checker *checker; - ProcBodyQueue *queue; - u32 thread_index; - u32 thread_count; - ThreadProcBodyData *all_data; +struct CheckProcedureBodyWorkerData { + Checker *c; + UntypedExprInfoMap untyped; }; -gb_internal WORKER_TASK_PROC(thread_proc_body) { - ThreadProcBodyData *bd = cast(ThreadProcBodyData *)data; - Checker *c = bd->checker; - GB_ASSERT(c != nullptr); - ProcBodyQueue *this_queue = bd->queue; +gb_global CheckProcedureBodyWorkerData *check_procedure_bodies_worker_data; - UntypedExprInfoMap untyped = {}; - map_init(&untyped, heap_allocator()); - - for (ProcInfo *pi; mpmc_dequeue(this_queue, &pi); /**/) { - consume_proc_info_queue(c, pi, this_queue, &untyped); +gb_internal WORKER_TASK_PROC(check_proc_info_worker_proc) { + isize thread_idx = 0; + if (current_thread) { + thread_idx = current_thread->idx; } + UntypedExprInfoMap *untyped = &check_procedure_bodies_worker_data[thread_idx].untyped; + Checker *c = check_procedure_bodies_worker_data[thread_idx].c; - map_destroy(&untyped); + ProcInfo *pi = cast(ProcInfo *)data; - semaphore_release(&c->procs_to_check_semaphore); - - return 0; + GB_ASSERT(pi->decl != nullptr); + if (pi->decl->parent && pi->decl->parent->entity) { + Entity *parent = pi->decl->parent->entity; + // NOTE(bill): Only check a nested procedure if its parent's body has been checked first + // This is prevent any possible race conditions in evaluation when multithreaded + // NOTE(bill): In single threaded mode, this should never happen + if (parent->kind == Entity_Procedure && (parent->flags & EntityFlag_ProcBodyChecked) == 0) { + thread_pool_add_task(check_proc_info_worker_proc, pi); + return 1; + } + } + map_clear(untyped); + bool ok = check_proc_info(c, pi, untyped, nullptr); + total_bodies_checked.fetch_add(1, std::memory_order_relaxed); + return !ok; } + gb_internal void check_procedure_bodies(Checker *c) { GB_ASSERT(c != nullptr); - u32 thread_count = cast(u32)gb_max(build_context.thread_count, 1); - u32 worker_count = thread_count-1; // NOTE(bill): The main thread will also be used for work + u32 thread_count = cast(u32)global_thread_pool.threads.count; if (!build_context.threaded_checker) { - worker_count = 0; + thread_count = 1; } - if (worker_count == 0) { + + check_procedure_bodies_worker_data = gb_alloc_array(permanent_allocator(), CheckProcedureBodyWorkerData, thread_count); + + for (isize i = 0; i < thread_count; i++) { + check_procedure_bodies_worker_data[i].c = c; + map_init(&check_procedure_bodies_worker_data[i].untyped, heap_allocator()); + } + + defer (for (isize i = 0; i < thread_count; i++) { + map_destroy(&check_procedure_bodies_worker_data[i].untyped); + }); + + if (thread_count == 1) { auto *this_queue = &c->procs_to_check_queue; - UntypedExprInfoMap untyped = {}; - map_init(&untyped, heap_allocator()); + UntypedExprInfoMap *untyped = &check_procedure_bodies_worker_data[0].untyped; for (ProcInfo *pi = nullptr; mpmc_dequeue(this_queue, &pi); /**/) { - consume_proc_info_queue(c, pi, this_queue, &untyped); + consume_proc_info_queue(c, pi, this_queue, untyped); } - map_destroy(&untyped); - debugf("Total Procedure Bodies Checked: %td\n", total_bodies_checked.load(std::memory_order_relaxed)); return; } global_procedure_body_in_worker_queue = true; - isize original_queue_count = c->procs_to_check_queue.count.load(std::memory_order_relaxed); - isize load_count = (original_queue_count+thread_count-1)/thread_count; - - ThreadProcBodyData *thread_data = gb_alloc_array(permanent_allocator(), ThreadProcBodyData, thread_count); - for (u32 i = 0; i < thread_count; i++) { - ThreadProcBodyData *data = thread_data + i; - data->checker = c; - data->queue = gb_alloc_item(permanent_allocator(), ProcBodyQueue); - data->thread_index = i; - data->thread_count = thread_count; - data->all_data = thread_data; - // NOTE(bill) 2x the amount assumes on average only 1 nested procedure - // TODO(bill): Determine a good heuristic - mpmc_init(data->queue, heap_allocator(), next_pow2_isize(load_count*2)); + for (ProcInfo *pi = nullptr; mpmc_dequeue(&c->procs_to_check_queue, &pi); /**/) { + thread_pool_add_task(check_proc_info_worker_proc, pi); } - // Distibute the work load into multiple queues - for (isize j = 0; j < load_count; j++) { - for (isize i = 0; i < thread_count; i++) { - ProcBodyQueue *queue = thread_data[i].queue; - ProcInfo *pi = nullptr; - if (!mpmc_dequeue(&c->procs_to_check_queue, &pi)) { - break; - } - mpmc_enqueue(queue, pi); - } - } - isize total_queued = 0; - for (isize i = 0; i < thread_count; i++) { - ProcBodyQueue *queue = thread_data[i].queue; - total_queued += queue->count.load(); - } - GB_ASSERT(total_queued == original_queue_count); - - semaphore_post(&c->procs_to_check_semaphore, cast(i32)thread_count); - - for (isize i = 0; i < thread_count; i++) { - thread_pool_add_task(thread_proc_body, thread_data+i); - } - thread_pool_wait(); - semaphore_wait(&c->procs_to_check_semaphore); - isize global_remaining = c->procs_to_check_queue.count.load(std::memory_order_relaxed); GB_ASSERT(global_remaining == 0); + thread_pool_wait(); + debugf("Total Procedure Bodies Checked: %td\n", total_bodies_checked.load(std::memory_order_relaxed)); From 015fe924b8f9a1d8cb78d307a4f8ef6791402bea Mon Sep 17 00:00:00 2001 From: gingerBill Date: Mon, 2 Jan 2023 12:28:38 +0000 Subject: [PATCH 11/78] Remove use of queues for procedure checking. --- src/check_decl.cpp | 2 +- src/check_expr.cpp | 10 ++-- src/checker.cpp | 108 +++++++++++++++++--------------------------- src/checker.hpp | 6 +-- src/thread_pool.cpp | 4 ++ 5 files changed, 52 insertions(+), 78 deletions(-) diff --git a/src/check_decl.cpp b/src/check_decl.cpp index 59beae56d..4e3c1b405 100644 --- a/src/check_decl.cpp +++ b/src/check_decl.cpp @@ -986,7 +986,7 @@ gb_internal void check_proc_decl(CheckerContext *ctx, Entity *e, DeclInfo *d) { GB_ASSERT(pl->body->kind == Ast_BlockStmt); if (!pt->is_polymorphic) { - check_procedure_later(ctx, ctx->file, e->token, d, proc_type, pl->body, pl->tags); + check_procedure_later(ctx->checker, ctx->file, e->token, d, proc_type, pl->body, pl->tags); } } else if (!is_foreign) { if (e->Procedure.is_export) { diff --git a/src/check_expr.cpp b/src/check_expr.cpp index ed1ddd1f1..eb9f76547 100644 --- a/src/check_expr.cpp +++ b/src/check_expr.cpp @@ -417,8 +417,6 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E CheckerContext nctx = *old_c; - nctx.procs_to_check_queue = old_c->procs_to_check_queue; - Scope *scope = create_scope(info, base_entity->scope); scope->flags |= ScopeFlag_Proc; nctx.scope = scope; @@ -566,7 +564,7 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E } // NOTE(bill): Check the newly generated procedure body - check_procedure_later(&nctx, proc_info); + check_procedure_later(nctx.checker, proc_info); return true; } @@ -6187,7 +6185,7 @@ gb_internal CallArgumentData check_call_arguments(CheckerContext *c, Operand *op decl->where_clauses_evaluated = true; if (ok && (data.gen_entity->flags & EntityFlag_ProcBodyChecked) == 0) { - check_procedure_later(c, e->file, e->token, decl, e->type, decl->proc_lit->ProcLit.body, decl->proc_lit->ProcLit.tags); + check_procedure_later(c->checker, e->file, e->token, decl, e->type, decl->proc_lit->ProcLit.body, decl->proc_lit->ProcLit.tags); } } return data; @@ -6225,7 +6223,7 @@ gb_internal CallArgumentData check_call_arguments(CheckerContext *c, Operand *op decl->where_clauses_evaluated = true; if (ok && (data.gen_entity->flags & EntityFlag_ProcBodyChecked) == 0) { - check_procedure_later(c, e->file, e->token, decl, e->type, decl->proc_lit->ProcLit.body, decl->proc_lit->ProcLit.tags); + check_procedure_later(c->checker, e->file, e->token, decl, e->type, decl->proc_lit->ProcLit.body, decl->proc_lit->ProcLit.tags); } } return data; @@ -9447,7 +9445,7 @@ gb_internal ExprKind check_expr_base_internal(CheckerContext *c, Operand *o, Ast } pl->decl = decl; - check_procedure_later(&ctx, ctx.file, empty_token, decl, type, pl->body, pl->tags); + check_procedure_later(ctx.checker, ctx.file, empty_token, decl, type, pl->body, pl->tags); } check_close_scope(&ctx); diff --git a/src/checker.cpp b/src/checker.cpp index 1e40f04a6..30e7409f9 100644 --- a/src/checker.cpp +++ b/src/checker.cpp @@ -1195,7 +1195,6 @@ gb_internal void reset_checker_context(CheckerContext *ctx, AstFile *file, Untyp GB_ASSERT(ctx->checker != nullptr); mutex_lock(&ctx->mutex); - auto *queue = ctx->procs_to_check_queue; auto type_path = ctx->type_path; array_clear(type_path); @@ -1211,7 +1210,6 @@ gb_internal void reset_checker_context(CheckerContext *ctx, AstFile *file, Untyp add_curr_ast_file(ctx, file); - ctx->procs_to_check_queue = queue; ctx->untyped = untyped; mutex_unlock(&ctx->mutex); @@ -1232,7 +1230,7 @@ gb_internal void init_checker(Checker *c) { mpmc_init(&c->procs_with_deferred_to_check, a, 1<<10); // NOTE(bill): 1 Mi elements should be enough on average - mpmc_init(&c->procs_to_check_queue, heap_allocator(), 1<<20); + array_init(&c->procs_to_check, heap_allocator(), 0, 1<<20); mpmc_init(&c->global_untyped_queue, a, 1<<20); @@ -1244,7 +1242,7 @@ gb_internal void destroy_checker(Checker *c) { destroy_checker_context(&c->builtin_ctx); - mpmc_destroy(&c->procs_to_check_queue); + array_free(&c->procs_to_check); mpmc_destroy(&c->global_untyped_queue); } @@ -1941,23 +1939,19 @@ gb_global std::atomic global_procedure_body_in_worker_queue = false; gb_internal WORKER_TASK_PROC(check_proc_info_worker_proc); -gb_internal void check_procedure_later(CheckerContext *c, ProcInfo *info) { +gb_internal void check_procedure_later(Checker *c, ProcInfo *info) { GB_ASSERT(info != nullptr); GB_ASSERT(info->decl != nullptr); if (MULTITHREAD_CHECKER && global_procedure_body_in_worker_queue) { thread_pool_add_task(check_proc_info_worker_proc, info); } else { - if (build_context.threaded_checker && global_procedure_body_in_worker_queue) { - GB_ASSERT(c->procs_to_check_queue != nullptr); - } - - auto *queue = c->procs_to_check_queue ? c->procs_to_check_queue : &c->checker->procs_to_check_queue; - mpmc_enqueue(queue, info); + GB_ASSERT(global_procedure_body_in_worker_queue == false); + array_add(&c->procs_to_check, info); } } -gb_internal void check_procedure_later(CheckerContext *c, AstFile *file, Token token, DeclInfo *decl, Type *type, Ast *body, u64 tags) { +gb_internal void check_procedure_later(Checker *c, AstFile *file, Token token, DeclInfo *decl, Type *type, Ast *body, u64 tags) { ProcInfo *info = gb_alloc_item(permanent_allocator(), ProcInfo); info->file = file; info->token = token; @@ -4677,11 +4671,7 @@ struct CollectEntityWorkerData { gb_global CollectEntityWorkerData *collect_entity_worker_data; gb_internal WORKER_TASK_PROC(check_collect_entities_all_worker_proc) { - isize thread_idx = 0; - if (current_thread) { - thread_idx = current_thread->idx; - } - CollectEntityWorkerData *wd = &collect_entity_worker_data[thread_idx]; + CollectEntityWorkerData *wd = &collect_entity_worker_data[current_thread_index()]; Checker *c = wd->c; CheckerContext *ctx = &wd->ctx; @@ -4738,10 +4728,8 @@ gb_internal void check_export_entities_in_pkg(CheckerContext *ctx, AstPackage *p } gb_internal WORKER_TASK_PROC(check_export_entities_worker_proc) { - isize thread_idx = current_thread ? current_thread->idx : 0; - AstPackage *pkg = (AstPackage *)data; - auto *wd = &collect_entity_worker_data[thread_idx]; + auto *wd = &collect_entity_worker_data[current_thread_index()]; check_export_entities_in_pkg(&wd->ctx, pkg, &wd->untyped); return 0; } @@ -5069,7 +5057,7 @@ gb_internal void calculate_global_init_order(Checker *c) { } -gb_internal bool check_proc_info(Checker *c, ProcInfo *pi, UntypedExprInfoMap *untyped, ProcBodyQueue *procs_to_check_queue) { +gb_internal bool check_proc_info(Checker *c, ProcInfo *pi, UntypedExprInfoMap *untyped) { if (pi == nullptr) { return false; } @@ -5085,17 +5073,16 @@ gb_internal bool check_proc_info(Checker *c, ProcInfo *pi, UntypedExprInfoMap *u } return true; } + if (e != nullptr && (e->flags & EntityFlag_ProcBodyChecked) != 0) { + GB_ASSERT(pi->decl->proc_checked); + return true; + } + pi->decl->proc_checked = true; + if (e != nullptr) { + e->flags |= EntityFlag_ProcBodyChecked; + } } - CheckerContext ctx = make_checker_context(c); - defer (destroy_checker_context(&ctx)); - reset_checker_context(&ctx, pi->file, untyped); - ctx.decl = pi->decl; - - GB_ASSERT(procs_to_check_queue != nullptr || MULTITHREAD_CHECKER); - - ctx.procs_to_check_queue = procs_to_check_queue; - GB_ASSERT(pi->type->kind == Type_Proc); TypeProc *pt = &pi->type->Proc; String name = pi->token.string; @@ -5116,6 +5103,12 @@ gb_internal bool check_proc_info(Checker *c, ProcInfo *pi, UntypedExprInfoMap *u } } + + CheckerContext ctx = make_checker_context(c); + defer (destroy_checker_context(&ctx)); + reset_checker_context(&ctx, pi->file, untyped); + ctx.decl = pi->decl; + bool bounds_check = (pi->tags & ProcTag_bounds_check) != 0; bool no_bounds_check = (pi->tags & ProcTag_no_bounds_check) != 0; @@ -5138,24 +5131,14 @@ gb_internal bool check_proc_info(Checker *c, ProcInfo *pi, UntypedExprInfoMap *u ctx.state_flags &= ~StateFlag_type_assert; } - if (pi->body != nullptr && e != nullptr) { - GB_ASSERT((e->flags & EntityFlag_ProcBodyChecked) == 0); - } - check_proc_body(&ctx, pi->token, pi->decl, pi->type, pi->body); - MUTEX_GUARD_BLOCK(&pi->decl->proc_checked_mutex) { - if (e != nullptr) { - e->flags |= EntityFlag_ProcBodyChecked; - } - pi->decl->proc_checked = true; - } add_untyped_expressions(&c->info, ctx.untyped); return true; } GB_STATIC_ASSERT(sizeof(isize) == sizeof(void *)); -gb_internal bool consume_proc_info_queue(Checker *c, ProcInfo *pi, ProcBodyQueue *q, UntypedExprInfoMap *untyped); +gb_internal bool consume_proc_info_queue(Checker *c, ProcInfo *pi, UntypedExprInfoMap *untyped); gb_internal void check_unchecked_bodies(Checker *c) { // NOTE(2021-02-26, bill): Sanity checker @@ -5193,15 +5176,13 @@ gb_internal void check_unchecked_bodies(Checker *c) { } debugf("unchecked: %.*s\n", LIT(e->token.string)); - mpmc_enqueue(&c->procs_to_check_queue, pi); + array_add(&c->procs_to_check, pi); } } - auto *q = &c->procs_to_check_queue; - ProcInfo *pi = nullptr; - while (mpmc_dequeue(q, &pi)) { + for (ProcInfo *pi : c->procs_to_check) { Entity *e = pi->decl->entity; - if (consume_proc_info_queue(c, pi, q, &untyped)) { + if (consume_proc_info_queue(c, pi, &untyped)) { add_dependency_to_set(c, e); GB_ASSERT(e->flags & EntityFlag_ProcBodyChecked); } @@ -5245,7 +5226,7 @@ gb_internal void check_test_procedures(Checker *c) { gb_global std::atomic total_bodies_checked; -gb_internal bool consume_proc_info_queue(Checker *c, ProcInfo *pi, ProcBodyQueue *q, UntypedExprInfoMap *untyped) { +gb_internal bool consume_proc_info_queue(Checker *c, ProcInfo *pi, UntypedExprInfoMap *untyped) { GB_ASSERT(pi->decl != nullptr); if (pi->decl->parent && pi->decl->parent->entity) { Entity *parent = pi->decl->parent->entity; @@ -5253,14 +5234,14 @@ gb_internal bool consume_proc_info_queue(Checker *c, ProcInfo *pi, ProcBodyQueue // This is prevent any possible race conditions in evaluation when multithreaded // NOTE(bill): In single threaded mode, this should never happen if (parent->kind == Entity_Procedure && (parent->flags & EntityFlag_ProcBodyChecked) == 0) { - mpmc_enqueue(q, pi); + check_procedure_later(c, pi); return false; } } if (untyped) { map_clear(untyped); } - bool ok = check_proc_info(c, pi, untyped, q); + bool ok = check_proc_info(c, pi, untyped); total_bodies_checked.fetch_add(1, std::memory_order_relaxed); return ok; } @@ -5273,12 +5254,9 @@ struct CheckProcedureBodyWorkerData { gb_global CheckProcedureBodyWorkerData *check_procedure_bodies_worker_data; gb_internal WORKER_TASK_PROC(check_proc_info_worker_proc) { - isize thread_idx = 0; - if (current_thread) { - thread_idx = current_thread->idx; - } - UntypedExprInfoMap *untyped = &check_procedure_bodies_worker_data[thread_idx].untyped; - Checker *c = check_procedure_bodies_worker_data[thread_idx].c; + auto *wd = &check_procedure_bodies_worker_data[current_thread_index()]; + UntypedExprInfoMap *untyped = &wd->untyped; + Checker *c = wd->c; ProcInfo *pi = cast(ProcInfo *)data; @@ -5294,7 +5272,7 @@ gb_internal WORKER_TASK_PROC(check_proc_info_worker_proc) { } } map_clear(untyped); - bool ok = check_proc_info(c, pi, untyped, nullptr); + bool ok = check_proc_info(c, pi, untyped); total_bodies_checked.fetch_add(1, std::memory_order_relaxed); return !ok; } @@ -5321,13 +5299,11 @@ gb_internal void check_procedure_bodies(Checker *c) { }); if (thread_count == 1) { - auto *this_queue = &c->procs_to_check_queue; - UntypedExprInfoMap *untyped = &check_procedure_bodies_worker_data[0].untyped; - - for (ProcInfo *pi = nullptr; mpmc_dequeue(this_queue, &pi); /**/) { - consume_proc_info_queue(c, pi, this_queue, untyped); + for_array(i, c->procs_to_check) { + consume_proc_info_queue(c, c->procs_to_check[i], untyped); } + array_clear(&c->procs_to_check); debugf("Total Procedure Bodies Checked: %td\n", total_bodies_checked.load(std::memory_order_relaxed)); return; @@ -5335,12 +5311,12 @@ gb_internal void check_procedure_bodies(Checker *c) { global_procedure_body_in_worker_queue = true; - for (ProcInfo *pi = nullptr; mpmc_dequeue(&c->procs_to_check_queue, &pi); /**/) { - thread_pool_add_task(check_proc_info_worker_proc, pi); + isize prev_procs_to_check_count = c->procs_to_check.count; + for_array(i, c->procs_to_check) { + thread_pool_add_task(check_proc_info_worker_proc, c->procs_to_check[i]); } - - isize global_remaining = c->procs_to_check_queue.count.load(std::memory_order_relaxed); - GB_ASSERT(global_remaining == 0); + GB_ASSERT(prev_procs_to_check_count == c->procs_to_check.count); + array_clear(&c->procs_to_check); thread_pool_wait(); diff --git a/src/checker.hpp b/src/checker.hpp index 1d6019b79..eaad1fa63 100644 --- a/src/checker.hpp +++ b/src/checker.hpp @@ -418,8 +418,6 @@ struct CheckerContext { Scope * polymorphic_scope; Ast *assignment_lhs_hint; - - ProcBodyQueue *procs_to_check_queue; }; @@ -430,9 +428,7 @@ struct Checker { CheckerContext builtin_ctx; MPMCQueue procs_with_deferred_to_check; - - ProcBodyQueue procs_to_check_queue; - Semaphore procs_to_check_semaphore; + Array procs_to_check; // TODO(bill): Technically MPSC queue MPMCQueue global_untyped_queue; diff --git a/src/thread_pool.cpp b/src/thread_pool.cpp index 9ac1af039..939d3c533 100644 --- a/src/thread_pool.cpp +++ b/src/thread_pool.cpp @@ -21,6 +21,10 @@ struct ThreadPool { Futex tasks_left; }; +gb_internal isize current_thread_index(void) { + return current_thread ? current_thread->idx : 0; +} + gb_internal void thread_pool_init(ThreadPool *pool, gbAllocator const &a, isize thread_count, char const *worker_name) { pool->allocator = a; slice_init(&pool->threads, a, thread_count + 1); From f01cff7ff0d61a4bd222be159243775b5d9bf3e7 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Mon, 2 Jan 2023 12:31:00 +0000 Subject: [PATCH 12/78] Multithread checker --- src/checker.cpp | 64 ++++--------------------------------------------- 1 file changed, 5 insertions(+), 59 deletions(-) diff --git a/src/checker.cpp b/src/checker.cpp index 30e7409f9..d3a9c3d2c 100644 --- a/src/checker.cpp +++ b/src/checker.cpp @@ -1,5 +1,3 @@ -#define MULTITHREAD_CHECKER 1 - #include "entity.cpp" #include "types.cpp" @@ -1943,7 +1941,7 @@ gb_internal void check_procedure_later(Checker *c, ProcInfo *info) { GB_ASSERT(info != nullptr); GB_ASSERT(info->decl != nullptr); - if (MULTITHREAD_CHECKER && global_procedure_body_in_worker_queue) { + if (global_procedure_body_in_worker_queue) { thread_pool_add_task(check_proc_info_worker_proc, info); } else { GB_ASSERT(global_procedure_body_in_worker_queue == false); @@ -4617,51 +4615,6 @@ gb_internal void check_create_file_scopes(Checker *c) { } } -struct ThreadProcCheckerSection { - Checker *checker; - isize offset; - isize count; -}; - - -gb_internal void check_with_workers(Checker *c, WorkerTaskProc *proc, isize total_count) { - isize thread_count = global_thread_pool.threads.count; - isize worker_count = thread_count-1; // NOTE(bill): The main thread will also be used for work - if (!build_context.threaded_checker) { - worker_count = 0; - } - - semaphore_post(&c->info.collect_semaphore, cast(i32)thread_count); - if (worker_count == 0) { - ThreadProcCheckerSection section_all = {}; - section_all.checker = c; - section_all.offset = 0; - section_all.count = total_count; - proc(§ion_all); - return; - } - - isize file_load_count = (total_count+thread_count-1)/thread_count; - isize remaining_count = total_count; - - ThreadProcCheckerSection *thread_data = gb_alloc_array(permanent_allocator(), ThreadProcCheckerSection, thread_count); - for (isize i = 0; i < thread_count; i++) { - ThreadProcCheckerSection *data = thread_data + i; - data->checker = c; - data->offset = total_count-remaining_count; - data->count = file_load_count; - remaining_count -= file_load_count; - } - GB_ASSERT(remaining_count <= 0); - - - for (isize i = 0; i < thread_count; i++) { - thread_pool_add_task(proc, thread_data+i); - } - thread_pool_wait(); - semaphore_wait(&c->info.collect_semaphore); -} - struct CollectEntityWorkerData { Checker *c; CheckerContext ctx; @@ -4699,18 +4652,11 @@ gb_internal void check_collect_entities_all(Checker *c) { map_init(&wd->untyped, heap_allocator()); } - if (MULTITHREAD_CHECKER || build_context.threaded_checker) { - for (auto const &entry : c->info.files.entries) { - AstFile *f = entry.value; - thread_pool_add_task(check_collect_entities_all_worker_proc, f); - } - thread_pool_wait(); - } else { - for (auto const &entry : c->info.files.entries) { - AstFile *f = entry.value; - check_collect_entities_all_worker_proc(f); - } + for (auto const &entry : c->info.files.entries) { + AstFile *f = entry.value; + thread_pool_add_task(check_collect_entities_all_worker_proc, f); } + thread_pool_wait(); } gb_internal void check_export_entities_in_pkg(CheckerContext *ctx, AstPackage *pkg, UntypedExprInfoMap *untyped) { From 529383f5b17d74f66bebb8679820a69476635b6a Mon Sep 17 00:00:00 2001 From: gingerBill Date: Mon, 2 Jan 2023 15:30:04 +0000 Subject: [PATCH 13/78] Correct a race condition when checking the procedure body --- src/build_settings.cpp | 1 + src/check_decl.cpp | 25 ++-- src/check_expr.cpp | 23 +++- src/checker.cpp | 228 +++++++++++++++++++++++++++-------- src/checker.hpp | 21 +++- src/llvm_backend.cpp | 6 + src/llvm_backend.hpp | 1 + src/llvm_backend_expr.cpp | 6 +- src/llvm_backend_general.cpp | 5 +- src/llvm_backend_proc.cpp | 3 +- src/llvm_backend_stmt.cpp | 6 +- src/main.cpp | 5 + 12 files changed, 262 insertions(+), 68 deletions(-) diff --git a/src/build_settings.cpp b/src/build_settings.cpp index 97b512b81..f59b5c0f7 100644 --- a/src/build_settings.cpp +++ b/src/build_settings.cpp @@ -291,6 +291,7 @@ struct BuildContext { bool show_error_line; bool ignore_lazy; + bool ignore_llvm_build; bool use_subsystem_windows; bool ignore_microsoft_magic; diff --git a/src/check_decl.cpp b/src/check_decl.cpp index 4e3c1b405..8f95c1a49 100644 --- a/src/check_decl.cpp +++ b/src/check_decl.cpp @@ -1419,9 +1419,9 @@ struct ProcUsingVar { }; -gb_internal void check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *decl, Type *type, Ast *body) { +gb_internal bool check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *decl, Type *type, Ast *body) { if (body == nullptr) { - return; + return false; } GB_ASSERT(body->kind == Ast_BlockStmt); @@ -1502,7 +1502,7 @@ gb_internal void check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de MUTEX_GUARD_BLOCK(ctx->scope->mutex) for_array(i, using_entities) { Entity *e = using_entities[i].e; Entity *uvar = using_entities[i].uvar; - Entity *prev = scope_insert(ctx->scope, uvar, false); + Entity *prev = scope_insert_no_mutex(ctx->scope, uvar); if (prev != nullptr) { error(e->token, "Namespace collision while 'using' procedure argument '%.*s' of: %.*s", LIT(e->token.string), LIT(prev->token.string)); error_line("%.*s != %.*s\n", LIT(uvar->token.string), LIT(prev->token.string)); @@ -1514,7 +1514,7 @@ gb_internal void check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de bool where_clause_ok = evaluate_where_clauses(ctx, nullptr, decl->scope, &decl->proc_lit->ProcLit.where_clauses, !decl->where_clauses_evaluated); if (!where_clause_ok) { // NOTE(bill, 2019-08-31): Don't check the body as the where clauses failed - return; + return false; } check_open_scope(ctx, body); @@ -1526,7 +1526,12 @@ gb_internal void check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de // NOTE(bill): Don't err here } - GB_ASSERT(decl->defer_use_checked == false); + GB_ASSERT(decl->proc_checked_state != ProcCheckedState_Checked); + if (decl->defer_use_checked) { + GB_ASSERT(is_type_polymorphic(type, true)); + error(token, "Defer Use Checked: %.*s", LIT(decl->entity->token.string)); + GB_ASSERT(decl->defer_use_checked == false); + } check_stmt_list(ctx, bs->stmts, Stmt_CheckScopeDecls); @@ -1575,10 +1580,8 @@ gb_internal void check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de if (decl->parent != nullptr) { Scope *ps = decl->parent->scope; if (ps->flags & (ScopeFlag_File & ScopeFlag_Pkg & ScopeFlag_Global)) { - return; - } else { - mutex_lock(&ctx->info->deps_mutex); - + return true; + } else MUTEX_GUARD_BLOCK(&ctx->info->deps_mutex) { // NOTE(bill): Add the dependencies from the procedure literal (lambda) // But only at the procedure level for (auto const &entry : decl->deps) { @@ -1589,8 +1592,8 @@ gb_internal void check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de Type *t = entry.ptr; ptr_set_add(&decl->parent->type_info_deps, t); } - - mutex_unlock(&ctx->info->deps_mutex); } } + + return true; } diff --git a/src/check_expr.cpp b/src/check_expr.cpp index eb9f76547..5445e73c7 100644 --- a/src/check_expr.cpp +++ b/src/check_expr.cpp @@ -86,7 +86,6 @@ gb_internal Entity * find_polymorphic_record_entity (CheckerContext *c, Type *or gb_internal void check_not_tuple (CheckerContext *c, Operand *operand); gb_internal void convert_to_typed (CheckerContext *c, Operand *operand, Type *target_type); gb_internal gbString expr_to_string (Ast *expression); -gb_internal void check_proc_body (CheckerContext *c, Token token, DeclInfo *decl, Type *type, Ast *body); gb_internal void update_untyped_expr_type (CheckerContext *c, Ast *e, Type *type, bool final); gb_internal bool check_is_terminating (Ast *node, String const &label); gb_internal bool check_has_break (Ast *stmt, String const &label, bool implicit); @@ -478,6 +477,22 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E if (poly_proc_data) { poly_proc_data->gen_entity = other; } + + DeclInfo *decl = other->decl_info; + if (decl->proc_checked_state != ProcCheckedState_Checked) { + ProcInfo *proc_info = gb_alloc_item(permanent_allocator(), ProcInfo); + proc_info->file = other->file; + proc_info->token = other->token; + proc_info->decl = decl; + proc_info->type = other->type; + proc_info->body = decl->proc_lit->ProcLit.body; + proc_info->tags = other->Procedure.tags;; + proc_info->generated_from_polymorphic = true; + proc_info->poly_def_node = poly_def_node; + + check_procedure_later(nctx.checker, proc_info); + } + return true; } } @@ -518,7 +533,8 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E d->gen_proc_type = final_proc_type; d->type_expr = pl->type; d->proc_lit = proc_lit; - d->proc_checked = false; + d->proc_checked_state = ProcCheckedState_Unchecked; + d->defer_use_checked = false; Entity *entity = alloc_entity_procedure(nullptr, token, final_proc_type, tags); entity->identifier = ident; @@ -528,7 +544,8 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E entity->scope = scope->parent; entity->file = base_entity->file; entity->pkg = base_entity->pkg; - entity->flags &= ~EntityFlag_ProcBodyChecked; + entity->flags = 0; + d->entity = entity; AstFile *file = nullptr; { diff --git a/src/checker.cpp b/src/checker.cpp index d3a9c3d2c..f4c9b6822 100644 --- a/src/checker.cpp +++ b/src/checker.cpp @@ -1,3 +1,5 @@ +#define DEBUG_CHECK_ALL_PROCEDURES 1 + #include "entity.cpp" #include "types.cpp" @@ -179,6 +181,7 @@ gb_internal void import_graph_node_swap(ImportGraphNode **data, isize i, isize j gb_internal void init_decl_info(DeclInfo *d, Scope *scope, DeclInfo *parent) { + gb_zero_item(d); d->parent = parent; d->scope = scope; ptr_set_init(&d->deps, heap_allocator()); @@ -438,9 +441,7 @@ gb_internal Entity *scope_lookup(Scope *s, String const &name) { return entity; } - - -gb_internal Entity *scope_insert_with_name(Scope *s, String const &name, Entity *entity, bool use_mutex=true) { +gb_internal Entity *scope_insert_with_name_no_mutex(Scope *s, String const &name, Entity *entity) { if (name == "") { return nullptr; } @@ -448,9 +449,6 @@ gb_internal Entity *scope_insert_with_name(Scope *s, String const &name, Entity Entity **found = nullptr; Entity *result = nullptr; - if (use_mutex) mutex_lock(&s->mutex); - defer (if (use_mutex) mutex_unlock(&s->mutex)); - found = string_map_get(&s->elements, key); if (found) { @@ -479,9 +477,53 @@ end:; return result; } -gb_internal Entity *scope_insert(Scope *s, Entity *entity, bool use_mutex) { + +gb_internal Entity *scope_insert_with_name(Scope *s, String const &name, Entity *entity) { + if (name == "") { + return nullptr; + } + StringHashKey key = string_hash_string(name); + Entity **found = nullptr; + Entity *result = nullptr; + + MUTEX_GUARD(&s->mutex); + + found = string_map_get(&s->elements, key); + + if (found) { + if (entity != *found) { + result = *found; + } + goto end; + } + if (s->parent != nullptr && (s->parent->flags & ScopeFlag_Proc) != 0) { + found = string_map_get(&s->parent->elements, key); + if (found) { + if ((*found)->flags & EntityFlag_Result) { + if (entity != *found) { + result = *found; + } + goto end; + } + } + } + + string_map_set(&s->elements, key, entity); + if (entity->scope == nullptr) { + entity->scope = s; + } +end:; + return result; +} + +gb_internal Entity *scope_insert(Scope *s, Entity *entity) { String name = entity->token.string; - return scope_insert_with_name(s, name, entity, use_mutex); + return scope_insert_with_name(s, name, entity); +} + +gb_internal Entity *scope_insert_no_mutex(Scope *s, Entity *entity) { + String name = entity->token.string; + return scope_insert_with_name_no_mutex(s, name, entity); } @@ -1135,6 +1177,9 @@ gb_internal void init_checker_info(CheckerInfo *i) { map_init(&i->objc_msgSend_types, a); string_map_init(&i->load_file_cache, a); + + array_init(&i->all_procedures, heap_allocator()); + } gb_internal void destroy_checker_info(CheckerInfo *i) { @@ -1934,6 +1979,7 @@ gb_internal void add_type_info_type_internal(CheckerContext *c, Type *t) { gb_global std::atomic global_procedure_body_in_worker_queue = false; +gb_global std::atomic global_after_checking_procedure_bodies = false; gb_internal WORKER_TASK_PROC(check_proc_info_worker_proc); @@ -1941,12 +1987,24 @@ gb_internal void check_procedure_later(Checker *c, ProcInfo *info) { GB_ASSERT(info != nullptr); GB_ASSERT(info->decl != nullptr); - if (global_procedure_body_in_worker_queue) { + if (global_after_checking_procedure_bodies) { + Entity *e = info->decl->entity; + debugf("CHECK PROCEDURE LATER! %.*s :: %s {...}\n", LIT(e->token.string), type_to_string(e->type)); + } + + if (global_procedure_body_in_worker_queue.load()) { thread_pool_add_task(check_proc_info_worker_proc, info); } else { - GB_ASSERT(global_procedure_body_in_worker_queue == false); array_add(&c->procs_to_check, info); } + + if (DEBUG_CHECK_ALL_PROCEDURES) { + MUTEX_GUARD_BLOCK(&c->info.all_procedures_mutex) { + GB_ASSERT(info != nullptr); + GB_ASSERT(info->decl != nullptr); + array_add(&c->info.all_procedures, info); + } + } } gb_internal void check_procedure_later(Checker *c, AstFile *file, Token token, DeclInfo *decl, Type *type, Ast *body, u64 tags) { @@ -5010,24 +5068,26 @@ gb_internal bool check_proc_info(Checker *c, ProcInfo *pi, UntypedExprInfoMap *u if (pi->type == nullptr) { return false; } - Entity *e = pi->decl->entity; - MUTEX_GUARD_BLOCK(&pi->decl->proc_checked_mutex) { - if (pi->decl->proc_checked) { - if (e != nullptr) { - GB_ASSERT(e->flags & EntityFlag_ProcBodyChecked); - } - return true; + MUTEX_GUARD(&pi->decl->proc_checked_mutex); + + Entity *e = pi->decl->entity; + switch (pi->decl->proc_checked_state.load()) { + case ProcCheckedState_InProgress: + if (e) { + GB_ASSERT(global_procedure_body_in_worker_queue.load()); } - if (e != nullptr && (e->flags & EntityFlag_ProcBodyChecked) != 0) { - GB_ASSERT(pi->decl->proc_checked); - return true; - } - pi->decl->proc_checked = true; + return false; + case ProcCheckedState_Checked: if (e != nullptr) { - e->flags |= EntityFlag_ProcBodyChecked; + GB_ASSERT(e->flags & EntityFlag_ProcBodyChecked); } + return true; + case ProcCheckedState_Unchecked: + // okay + break; } + pi->decl->proc_checked_state.store(ProcCheckedState_InProgress); GB_ASSERT(pi->type->kind == Type_Proc); TypeProc *pt = &pi->type->Proc; @@ -5039,17 +5099,21 @@ gb_internal bool check_proc_info(Checker *c, ProcInfo *pi, UntypedExprInfoMap *u token = ast_token(pi->poly_def_node); } error(token, "Unspecialized polymorphic procedure '%.*s'", LIT(name)); + pi->decl->proc_checked_state.store(ProcCheckedState_Unchecked); return false; } if (pt->is_polymorphic && pt->is_poly_specialized) { + Entity *e = pi->decl->entity; + GB_ASSERT(e != nullptr); if ((e->flags & EntityFlag_Used) == 0) { // NOTE(bill, 2019-08-31): It was never used, don't check + // NOTE(bill, 2023-01-02): This may need to be checked again if it is used elsewhere? + pi->decl->proc_checked_state.store(ProcCheckedState_Unchecked); return false; } } - CheckerContext ctx = make_checker_context(c); defer (destroy_checker_context(&ctx)); reset_checker_context(&ctx, pi->file, untyped); @@ -5077,14 +5141,34 @@ gb_internal bool check_proc_info(Checker *c, ProcInfo *pi, UntypedExprInfoMap *u ctx.state_flags &= ~StateFlag_type_assert; } - check_proc_body(&ctx, pi->token, pi->decl, pi->type, pi->body); + bool body_was_checked = check_proc_body(&ctx, pi->token, pi->decl, pi->type, pi->body); + + if (body_was_checked) { + pi->decl->proc_checked_state.store(ProcCheckedState_Checked); + if (pi->body) { + Entity *e = pi->decl->entity; + if (e != nullptr) { + e->flags |= EntityFlag_ProcBodyChecked; + } + } + } else { + pi->decl->proc_checked_state.store(ProcCheckedState_Unchecked); + if (pi->body) { + Entity *e = pi->decl->entity; + if (e != nullptr) { + e->flags &= ~EntityFlag_ProcBodyChecked; + } + } + } + add_untyped_expressions(&c->info, ctx.untyped); + return true; } GB_STATIC_ASSERT(sizeof(isize) == sizeof(void *)); -gb_internal bool consume_proc_info_queue(Checker *c, ProcInfo *pi, UntypedExprInfoMap *untyped); +gb_internal bool consume_proc_info(Checker *c, ProcInfo *pi, UntypedExprInfoMap *untyped); gb_internal void check_unchecked_bodies(Checker *c) { // NOTE(2021-02-26, bill): Sanity checker @@ -5092,10 +5176,15 @@ gb_internal void check_unchecked_bodies(Checker *c) { // even ones which should not exist, due to the multithreaded nature of the parser // HACK TODO(2021-02-26, bill): Actually fix this race condition + GB_ASSERT(c->procs_to_check.count == 0); + UntypedExprInfoMap untyped = {}; map_init(&untyped, heap_allocator()); defer (map_destroy(&untyped)); + // use the `procs_to_check` array + global_procedure_body_in_worker_queue = false; + for (auto const &entry : c->info.minimum_dependency_set) { Entity *e = entry.ptr; if (e == nullptr || e->kind != Entity_Procedure) { @@ -5122,19 +5211,22 @@ gb_internal void check_unchecked_bodies(Checker *c) { } debugf("unchecked: %.*s\n", LIT(e->token.string)); - array_add(&c->procs_to_check, pi); + check_procedure_later(c, pi); } } - for (ProcInfo *pi : c->procs_to_check) { - Entity *e = pi->decl->entity; - if (consume_proc_info_queue(c, pi, &untyped)) { - add_dependency_to_set(c, e); - GB_ASSERT(e->flags & EntityFlag_ProcBodyChecked); + if (!global_procedure_body_in_worker_queue) { + for_array(i, c->procs_to_check) { + ProcInfo *pi = c->procs_to_check[i]; + consume_proc_info(c, pi, &untyped); } + array_clear(&c->procs_to_check); + } else { + thread_pool_wait(); } - thread_pool_wait(); + global_procedure_body_in_worker_queue = false; + global_after_checking_procedure_bodies = true; } gb_internal void check_test_procedures(Checker *c) { @@ -5172,8 +5264,15 @@ gb_internal void check_test_procedures(Checker *c) { gb_global std::atomic total_bodies_checked; -gb_internal bool consume_proc_info_queue(Checker *c, ProcInfo *pi, UntypedExprInfoMap *untyped) { +gb_internal bool consume_proc_info(Checker *c, ProcInfo *pi, UntypedExprInfoMap *untyped) { GB_ASSERT(pi->decl != nullptr); + switch (pi->decl->proc_checked_state.load()) { + case ProcCheckedState_InProgress: + return false; + case ProcCheckedState_Checked: + return true; + } + if (pi->decl->parent && pi->decl->parent->entity) { Entity *parent = pi->decl->parent->entity; // NOTE(bill): Only check a nested procedure if its parent's body has been checked first @@ -5187,9 +5286,11 @@ gb_internal bool consume_proc_info_queue(Checker *c, ProcInfo *pi, UntypedExprIn if (untyped) { map_clear(untyped); } - bool ok = check_proc_info(c, pi, untyped); - total_bodies_checked.fetch_add(1, std::memory_order_relaxed); - return ok; + if (check_proc_info(c, pi, untyped)) { + total_bodies_checked.fetch_add(1, std::memory_order_relaxed); + return true; + } + return false; } struct CheckProcedureBodyWorkerData { @@ -5218,9 +5319,11 @@ gb_internal WORKER_TASK_PROC(check_proc_info_worker_proc) { } } map_clear(untyped); - bool ok = check_proc_info(c, pi, untyped); - total_bodies_checked.fetch_add(1, std::memory_order_relaxed); - return !ok; + if (check_proc_info(c, pi, untyped)) { + total_bodies_checked.fetch_add(1, std::memory_order_relaxed); + return 0; + } + return 1; } @@ -5247,7 +5350,7 @@ gb_internal void check_procedure_bodies(Checker *c) { if (thread_count == 1) { UntypedExprInfoMap *untyped = &check_procedure_bodies_worker_data[0].untyped; for_array(i, c->procs_to_check) { - consume_proc_info_queue(c, c->procs_to_check[i], untyped); + consume_proc_info(c, c->procs_to_check[i], untyped); } array_clear(&c->procs_to_check); @@ -5266,9 +5369,6 @@ gb_internal void check_procedure_bodies(Checker *c) { thread_pool_wait(); - debugf("Total Procedure Bodies Checked: %td\n", total_bodies_checked.load(std::memory_order_relaxed)); - - global_procedure_body_in_worker_queue = false; } gb_internal void add_untyped_expressions(CheckerInfo *cinfo, UntypedExprInfoMap *untyped) { @@ -5662,9 +5762,6 @@ gb_internal void check_parsed_files(Checker *c) { TIME_SECTION("check test procedures"); check_test_procedures(c); - TIME_SECTION("check bodies have all been checked"); - check_unchecked_bodies(c); - TIME_SECTION("add type info for type definitions"); add_type_info_for_type_definitions(c); check_merge_queues_into_arrays(c); @@ -5672,6 +5769,12 @@ gb_internal void check_parsed_files(Checker *c) { TIME_SECTION("generate minimum dependency set"); generate_minimum_dependency_set(c, c->info.entry_point); + TIME_SECTION("check bodies have all been checked"); + check_unchecked_bodies(c); + + check_merge_queues_into_arrays(c); + + TIME_SECTION("check entry point"); if (build_context.build_mode == BuildMode_Executable && !build_context.no_entry_point && build_context.command_kind != Command_test) { Scope *s = c->info.init_scope; @@ -5694,11 +5797,39 @@ gb_internal void check_parsed_files(Checker *c) { } } + thread_pool_wait(); + GB_ASSERT(c->procs_to_check.count == 0); + + if (DEBUG_CHECK_ALL_PROCEDURES) { + UntypedExprInfoMap untyped = {}; + map_init(&untyped, heap_allocator()); + defer (map_destroy(&untyped)); + + for_array(i, c->info.all_procedures) { + ProcInfo *pi = c->info.all_procedures[i]; + GB_ASSERT(pi != nullptr); + GB_ASSERT(pi->decl != nullptr); + Entity *e = pi->decl->entity; + auto proc_checked_state = pi->decl->proc_checked_state.load(); + if (e && ((e->flags & EntityFlag_ProcBodyChecked) == 0)) { + if ((e->flags & EntityFlag_Used) != 0) { + debugf("%.*s :: %s\n", LIT(e->token.string), type_to_string(e->type)); + debugf("proc body unchecked\n"); + debugf("Checked State: %s\n\n", ProcCheckedState_strings[proc_checked_state]); + + consume_proc_info(c, pi, &untyped); + } + } + } + } + + debugf("Total Procedure Bodies Checked: %td\n", total_bodies_checked.load(std::memory_order_relaxed)); + TIME_SECTION("check unique package names"); check_unique_package_names(c); - TIME_SECTION("sanity checks"); + check_merge_queues_into_arrays(c); GB_ASSERT(c->info.entity_queue.count.load(std::memory_order_relaxed) == 0); GB_ASSERT(c->info.definition_queue.count.load(std::memory_order_relaxed) == 0); @@ -5717,5 +5848,6 @@ gb_internal void check_parsed_files(Checker *c) { } } + TIME_SECTION("type check finish"); } diff --git a/src/checker.hpp b/src/checker.hpp index eaad1fa63..37aff41b1 100644 --- a/src/checker.hpp +++ b/src/checker.hpp @@ -142,6 +142,20 @@ typedef DECL_ATTRIBUTE_PROC(DeclAttributeProc); gb_internal void check_decl_attributes(CheckerContext *c, Array const &attributes, DeclAttributeProc *proc, AttributeContext *ac); +enum ProcCheckedState : u8 { + ProcCheckedState_Unchecked, + ProcCheckedState_InProgress, + ProcCheckedState_Checked, + + ProcCheckedState_COUNT +}; + +char const *ProcCheckedState_strings[ProcCheckedState_COUNT] { + "Unchecked", + "In Progress", + "Checked", +}; + // DeclInfo is used to store information of certain declarations to allow for "any order" usage struct DeclInfo { DeclInfo * parent; // NOTE(bill): only used for procedure literals at the moment @@ -157,7 +171,7 @@ struct DeclInfo { Type * gen_proc_type; // Precalculated bool is_using; bool where_clauses_evaluated; - bool proc_checked; + std::atomic proc_checked_state; BlockingMutex proc_checked_mutex; isize defer_used; bool defer_use_checked; @@ -375,6 +389,9 @@ struct CheckerInfo { BlockingMutex load_file_mutex; StringMap load_file_cache; + + BlockingMutex all_procedures_mutex;; + Array all_procedures; }; struct CheckerContext { @@ -458,7 +475,7 @@ gb_internal Entity *entity_of_node(Ast *expr); gb_internal Entity *scope_lookup_current(Scope *s, String const &name); gb_internal Entity *scope_lookup (Scope *s, String const &name); gb_internal void scope_lookup_parent (Scope *s, String const &name, Scope **scope_, Entity **entity_); -gb_internal Entity *scope_insert (Scope *s, Entity *entity, bool use_mutex=true); +gb_internal Entity *scope_insert (Scope *s, Entity *entity); gb_internal void add_type_and_value (CheckerInfo *i, Ast *expression, AddressingMode mode, Type *type, ExactValue value); diff --git a/src/llvm_backend.cpp b/src/llvm_backend.cpp index 1c401552e..3e62f678a 100644 --- a/src/llvm_backend.cpp +++ b/src/llvm_backend.cpp @@ -2255,6 +2255,12 @@ gb_internal void lb_generate_code(lbGenerator *gen) { } } + if (build_context.ignore_llvm_build) { + gb_printf_err("LLVM SUCCESS!\n"); + gb_exit(1); + return; + } + if (do_threading && non_empty_module_count > 1) { for (auto const &entry : gen->modules) { lbModule *m = entry.value; diff --git a/src/llvm_backend.hpp b/src/llvm_backend.hpp index 9f7caa3bb..50fb5701f 100644 --- a/src/llvm_backend.hpp +++ b/src/llvm_backend.hpp @@ -301,6 +301,7 @@ struct lbProcedure { lbBlock * curr_block; lbTargetList * target_list; PtrMap direct_parameters; + PtrMap local_entity_map; Ast *curr_stmt; diff --git a/src/llvm_backend_expr.cpp b/src/llvm_backend_expr.cpp index d574caf4c..28a68b065 100644 --- a/src/llvm_backend_expr.cpp +++ b/src/llvm_backend_expr.cpp @@ -3404,7 +3404,11 @@ gb_internal lbAddr lb_build_addr_from_entity(lbProcedure *p, Entity *e, Ast *exp lbValue v = {}; - lbValue *found = map_get(&p->module->values, e); + lbValue *found = nullptr; + found = map_get(&p->local_entity_map, e); + if (found == nullptr) { + found = map_get(&p->module->values, e); + } if (found) { v = *found; } else if (e->kind == Entity_Variable && e->flags & EntityFlag_Using) { diff --git a/src/llvm_backend_general.cpp b/src/llvm_backend_general.cpp index 0508c6171..940c94a13 100644 --- a/src/llvm_backend_general.cpp +++ b/src/llvm_backend_general.cpp @@ -2714,7 +2714,6 @@ gb_internal lbValue lb_find_value_from_entity(lbModule *m, Entity *e) { return g; } } - GB_PANIC("\n\tError in: %s, missing value '%.*s'\n", token_pos_to_string(e->token.pos), LIT(e->token.string)); return {}; } @@ -2845,6 +2844,10 @@ gb_internal lbAddr lb_add_local(lbProcedure *p, Type *type, Entity *e, bool zero lb_mem_zero_ptr(p, ptr, type, alignment); } + if (e != nullptr) { + map_set(&p->local_entity_map, e, val); + } + return lb_addr(val); } diff --git a/src/llvm_backend_proc.cpp b/src/llvm_backend_proc.cpp index 384d29ca7..9691afebc 100644 --- a/src/llvm_backend_proc.cpp +++ b/src/llvm_backend_proc.cpp @@ -68,7 +68,7 @@ gb_internal lbProcedure *lb_create_procedure(lbModule *m, Entity *entity, bool i GB_ASSERT(entity != nullptr); GB_ASSERT(entity->kind == Entity_Procedure); if (!entity->Procedure.is_foreign) { - GB_ASSERT_MSG(entity->flags & EntityFlag_ProcBodyChecked, "%.*s :: %s", LIT(entity->token.string), type_to_string(entity->type)); + GB_ASSERT_MSG(entity->flags & EntityFlag_ProcBodyChecked, "%.*s :: %s (was parapoly: %d)", LIT(entity->token.string), type_to_string(entity->type), is_type_polymorphic(entity->type, true)); } String link_name = {}; @@ -487,6 +487,7 @@ gb_internal void lb_begin_procedure_body(lbProcedure *p) { lb_start_block(p, p->entry_block); map_init(&p->direct_parameters, heap_allocator()); + map_init(&p->local_entity_map, heap_allocator()); GB_ASSERT(p->type != nullptr); diff --git a/src/llvm_backend_stmt.cpp b/src/llvm_backend_stmt.cpp index 6400a8a9d..cdfb28aa7 100644 --- a/src/llvm_backend_stmt.cpp +++ b/src/llvm_backend_stmt.cpp @@ -1531,6 +1531,9 @@ gb_internal void lb_build_static_variables(lbProcedure *p, AstValueDecl *vd) { lbValue global_val = {global, alloc_type_pointer(e->type)}; lb_add_entity(p->module, e, global_val); lb_add_member(p->module, mangled_name, global_val); + if (e) { + map_set(&p->local_entity_map, e, global_val); + } } } gb_internal void lb_append_tuple_values(lbProcedure *p, Array *dst_values, lbValue src_value) { @@ -2188,9 +2191,10 @@ gb_internal void lb_build_stmt(lbProcedure *p, Ast *node) { for_array(i, vd->names) { Ast *name = vd->names[i]; if (!is_blank_ident(name)) { + GB_ASSERT(name->kind == Ast_Ident); Entity *e = entity_of_node(name); TokenPos pos = ast_token(name).pos; - GB_ASSERT_MSG(e != nullptr, "%s", token_pos_to_string(pos)); + GB_ASSERT_MSG(e != nullptr, "\n%s missing entity for %.*s", token_pos_to_string(pos), LIT(name->Ident.token.string)); if (e->flags & EntityFlag_Static) { // NOTE(bill): If one of the entities is static, they all are is_static = true; diff --git a/src/main.cpp b/src/main.cpp index 3ad0e160f..42d6f8e87 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -660,6 +660,7 @@ enum BuildFlagKind { // internal use only BuildFlag_InternalIgnoreLazy, + BuildFlag_InternalIgnoreLLVMBuild, #if defined(GB_SYSTEM_WINDOWS) BuildFlag_IgnoreVsSearch, @@ -832,6 +833,7 @@ gb_internal bool parse_build_flags(Array args) { add_flag(&build_flags, BuildFlag_ErrorPosStyle, str_lit("error-pos-style"), BuildFlagParam_String, Command_all); add_flag(&build_flags, BuildFlag_InternalIgnoreLazy, str_lit("internal-ignore-lazy"), BuildFlagParam_None, Command_all); + add_flag(&build_flags, BuildFlag_InternalIgnoreLLVMBuild, str_lit("internal-ignore-llvm-build"),BuildFlagParam_None, Command_all); #if defined(GB_SYSTEM_WINDOWS) add_flag(&build_flags, BuildFlag_IgnoreVsSearch, str_lit("ignore-vs-search"), BuildFlagParam_None, Command__does_build); @@ -1491,6 +1493,9 @@ gb_internal bool parse_build_flags(Array args) { case BuildFlag_InternalIgnoreLazy: build_context.ignore_lazy = true; break; + case BuildFlag_InternalIgnoreLLVMBuild: + build_context.ignore_llvm_build = true; + break; #if defined(GB_SYSTEM_WINDOWS) case BuildFlag_IgnoreVsSearch: { GB_ASSERT(value.kind == ExactValue_Invalid); From fa562ec5d60319f5cd7e85bb337bd21feb7ceeb8 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Mon, 2 Jan 2023 15:40:25 +0000 Subject: [PATCH 14/78] Remove unneeded `local_entity_map` --- src/llvm_backend.hpp | 1 - src/llvm_backend_expr.cpp | 6 +----- src/llvm_backend_general.cpp | 4 ---- src/llvm_backend_proc.cpp | 1 - src/llvm_backend_stmt.cpp | 3 --- 5 files changed, 1 insertion(+), 14 deletions(-) diff --git a/src/llvm_backend.hpp b/src/llvm_backend.hpp index 50fb5701f..9f7caa3bb 100644 --- a/src/llvm_backend.hpp +++ b/src/llvm_backend.hpp @@ -301,7 +301,6 @@ struct lbProcedure { lbBlock * curr_block; lbTargetList * target_list; PtrMap direct_parameters; - PtrMap local_entity_map; Ast *curr_stmt; diff --git a/src/llvm_backend_expr.cpp b/src/llvm_backend_expr.cpp index 28a68b065..d574caf4c 100644 --- a/src/llvm_backend_expr.cpp +++ b/src/llvm_backend_expr.cpp @@ -3404,11 +3404,7 @@ gb_internal lbAddr lb_build_addr_from_entity(lbProcedure *p, Entity *e, Ast *exp lbValue v = {}; - lbValue *found = nullptr; - found = map_get(&p->local_entity_map, e); - if (found == nullptr) { - found = map_get(&p->module->values, e); - } + lbValue *found = map_get(&p->module->values, e); if (found) { v = *found; } else if (e->kind == Entity_Variable && e->flags & EntityFlag_Using) { diff --git a/src/llvm_backend_general.cpp b/src/llvm_backend_general.cpp index 940c94a13..22628e895 100644 --- a/src/llvm_backend_general.cpp +++ b/src/llvm_backend_general.cpp @@ -2844,10 +2844,6 @@ gb_internal lbAddr lb_add_local(lbProcedure *p, Type *type, Entity *e, bool zero lb_mem_zero_ptr(p, ptr, type, alignment); } - if (e != nullptr) { - map_set(&p->local_entity_map, e, val); - } - return lb_addr(val); } diff --git a/src/llvm_backend_proc.cpp b/src/llvm_backend_proc.cpp index 9691afebc..7245bdd80 100644 --- a/src/llvm_backend_proc.cpp +++ b/src/llvm_backend_proc.cpp @@ -487,7 +487,6 @@ gb_internal void lb_begin_procedure_body(lbProcedure *p) { lb_start_block(p, p->entry_block); map_init(&p->direct_parameters, heap_allocator()); - map_init(&p->local_entity_map, heap_allocator()); GB_ASSERT(p->type != nullptr); diff --git a/src/llvm_backend_stmt.cpp b/src/llvm_backend_stmt.cpp index cdfb28aa7..06abebc78 100644 --- a/src/llvm_backend_stmt.cpp +++ b/src/llvm_backend_stmt.cpp @@ -1531,9 +1531,6 @@ gb_internal void lb_build_static_variables(lbProcedure *p, AstValueDecl *vd) { lbValue global_val = {global, alloc_type_pointer(e->type)}; lb_add_entity(p->module, e, global_val); lb_add_member(p->module, mangled_name, global_val); - if (e) { - map_set(&p->local_entity_map, e, global_val); - } } } gb_internal void lb_append_tuple_values(lbProcedure *p, Array *dst_values, lbValue src_value) { From c293f5b7ebc3b733e996a97c6e32d678f13b3ee5 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Mon, 2 Jan 2023 16:56:05 +0000 Subject: [PATCH 15/78] Remove unneeded mutex --- src/check_expr.cpp | 33 +++++++++++++++++---------------- src/checker.hpp | 7 ++++++- src/common_memory.cpp | 18 ++++++------------ src/llvm_backend_stmt.cpp | 5 ++--- src/main.cpp | 3 +-- src/threading.cpp | 17 +++++++++-------- 6 files changed, 41 insertions(+), 42 deletions(-) diff --git a/src/check_expr.cpp b/src/check_expr.cpp index 5445e73c7..f47361c27 100644 --- a/src/check_expr.cpp +++ b/src/check_expr.cpp @@ -193,8 +193,7 @@ gb_internal void check_did_you_mean_objc_entity(String const &name, Entity *e, b GB_ASSERT(e->kind == Entity_TypeName); GB_ASSERT(e->TypeName.objc_metadata != nullptr); auto *objc_metadata = e->TypeName.objc_metadata; - mutex_lock(objc_metadata->mutex); - defer (mutex_unlock(objc_metadata->mutex)); + MUTEX_GUARD(objc_metadata->mutex); StringSet set = {}; string_set_init(&set, heap_allocator()); @@ -369,8 +368,7 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E GB_ASSERT(dst == nullptr); } - mutex_lock(&info->gen_procs_mutex); - defer (mutex_unlock(&info->gen_procs_mutex)); + // MUTEX_GUARD(&info->gen_procs_mutex); if (!src->Proc.is_polymorphic || src->Proc.is_poly_specialized) { return false; @@ -436,11 +434,14 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E return false; } - auto *found_gen_procs = map_get(&info->gen_procs, base_entity->identifier.load()); + GenProcsData *found_gen_procs = nullptr; + + MUTEX_GUARD(&info->gen_procs_mutex); + + found_gen_procs = map_get(&info->gen_procs, base_entity->identifier.load()); if (found_gen_procs) { - auto procs = *found_gen_procs; - for_array(i, procs) { - Entity *other = procs[i]; + MUTEX_GUARD(&found_gen_procs->mutex); + for (Entity *other : found_gen_procs->procs) { Type *pt = base_type(other->type); if (are_types_identical(pt, final_proc_type)) { if (poly_proc_data) { @@ -463,15 +464,13 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E // LEAK TODO(bill): Cloning this AST may be leaky Ast *cloned_proc_type_node = clone_ast(pt->node); success = check_procedure_type(&nctx, final_proc_type, cloned_proc_type_node, &operands); - if (!success) { return false; } if (found_gen_procs) { - auto procs = *found_gen_procs; - for_array(i, procs) { - Entity *other = procs[i]; + MUTEX_GUARD(&found_gen_procs->mutex); + for (Entity *other : found_gen_procs->procs) { Type *pt = base_type(other->type); if (are_types_identical(pt, final_proc_type)) { if (poly_proc_data) { @@ -567,11 +566,13 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E proc_info->poly_def_node = poly_def_node; if (found_gen_procs) { - array_add(found_gen_procs, entity); + MUTEX_GUARD(&found_gen_procs->mutex); + array_add(&found_gen_procs->procs, entity); } else { - auto array = array_make(heap_allocator()); - array_add(&array, entity); - map_set(&info->gen_procs, base_entity->identifier.load(), array); + GenProcsData gen_proc_data = {}; + gen_proc_data.procs = array_make(heap_allocator()); + array_add(&gen_proc_data.procs, entity); + map_set(&info->gen_procs, base_entity->identifier.load(), gen_proc_data); } if (poly_proc_data) { diff --git a/src/checker.hpp b/src/checker.hpp index 37aff41b1..56f8707d3 100644 --- a/src/checker.hpp +++ b/src/checker.hpp @@ -311,6 +311,11 @@ struct LoadFileCache { StringMap hashes; }; +struct GenProcsData { + Array procs; + BlockingMutex mutex; +}; + // CheckerInfo stores all the symbol information for a type-checked program struct CheckerInfo { Checker *checker; @@ -360,7 +365,7 @@ struct CheckerInfo { RecursiveMutex gen_procs_mutex; RecursiveMutex gen_types_mutex; - PtrMap > gen_procs; // Key: Ast * | Identifier -> Entity + PtrMap gen_procs; // Key: Ast * | Identifier -> Entity PtrMap > gen_types; BlockingMutex type_info_mutex; // NOT recursive diff --git a/src/common_memory.cpp b/src/common_memory.cpp index 2022554cf..cdf2281fe 100644 --- a/src/common_memory.cpp +++ b/src/common_memory.cpp @@ -37,7 +37,6 @@ gb_internal gb_inline void *align_formula_ptr(void *ptr, isize align) { gb_global BlockingMutex global_memory_block_mutex; -gb_global BlockingMutex global_memory_allocator_mutex; gb_internal void platform_virtual_memory_init(void); @@ -55,9 +54,9 @@ struct MemoryBlock { }; struct Arena { - MemoryBlock *curr_block; - isize minimum_block_size; - bool ignore_mutex; + MemoryBlock * curr_block; + isize minimum_block_size; + BlockingMutex mutex; }; enum { DEFAULT_MINIMUM_BLOCK_SIZE = 8ll*1024ll*1024ll }; @@ -83,10 +82,7 @@ gb_internal isize arena_align_forward_offset(Arena *arena, isize alignment) { gb_internal void *arena_alloc(Arena *arena, isize min_size, isize alignment) { GB_ASSERT(gb_is_power_of_two(alignment)); - BlockingMutex *mutex = &global_memory_allocator_mutex; - if (!arena->ignore_mutex) { - mutex_lock(mutex); - } + mutex_lock(&arena->mutex); isize size = 0; if (arena->curr_block != nullptr) { @@ -113,9 +109,7 @@ gb_internal void *arena_alloc(Arena *arena, isize min_size, isize alignment) { curr_block->used += size; GB_ASSERT(curr_block->used <= curr_block->size); - if (!arena->ignore_mutex) { - mutex_unlock(mutex); - } + mutex_unlock(&arena->mutex); // NOTE(bill): memory will be zeroed by default due to virtual memory return ptr; @@ -304,7 +298,7 @@ gb_internal GB_ALLOCATOR_PROC(arena_allocator_proc) { } -gb_global gb_thread_local Arena permanent_arena = {nullptr, DEFAULT_MINIMUM_BLOCK_SIZE, true}; +gb_global gb_thread_local Arena permanent_arena = {nullptr, DEFAULT_MINIMUM_BLOCK_SIZE}; gb_internal gbAllocator permanent_allocator() { return arena_allocator(&permanent_arena); } diff --git a/src/llvm_backend_stmt.cpp b/src/llvm_backend_stmt.cpp index 06abebc78..8742423a5 100644 --- a/src/llvm_backend_stmt.cpp +++ b/src/llvm_backend_stmt.cpp @@ -57,9 +57,8 @@ gb_internal void lb_build_constant_value_decl(lbProcedure *p, AstValueDecl *vd) if (pl->body != nullptr) { auto *found = map_get(&info->gen_procs, ident); if (found) { - auto procs = *found; - for_array(i, procs) { - Entity *e = procs[i]; + MUTEX_GUARD(&found->mutex); + for (Entity *e : found->procs) { if (!ptr_set_exists(min_dep_set, e)) { continue; } diff --git a/src/main.cpp b/src/main.cpp index 42d6f8e87..ad9d6b0ef 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -17,8 +17,7 @@ gb_global ThreadPool global_thread_pool; gb_internal void init_global_thread_pool(void) { isize thread_count = gb_max(build_context.thread_count, 1); - isize worker_count = thread_count-1; // NOTE(bill): The main thread will also be used for work - thread_pool_init(&global_thread_pool, permanent_allocator(), worker_count, "ThreadPoolWorker"); + thread_pool_init(&global_thread_pool, permanent_allocator(), thread_count, "ThreadPoolWorker"); } gb_internal bool thread_pool_add_task(WorkerTaskProc *proc, void *data) { return thread_pool_add_task(&global_thread_pool, proc, data); diff --git a/src/threading.cpp b/src/threading.cpp index 4c7aa8f92..c5db5d1b4 100644 --- a/src/threading.cpp +++ b/src/threading.cpp @@ -77,22 +77,23 @@ gb_internal void yield_process(void); struct MutexGuard { - MutexGuard() = delete; + MutexGuard() = delete; MutexGuard(MutexGuard const &) = delete; + MutexGuard(MutexGuard &&) = delete; - MutexGuard(BlockingMutex *bm) : bm{bm} { + explicit MutexGuard(BlockingMutex *bm) noexcept : bm{bm} { mutex_lock(this->bm); } - MutexGuard(RecursiveMutex *rm) : rm{rm} { + explicit MutexGuard(RecursiveMutex *rm) noexcept : rm{rm} { mutex_lock(this->rm); } - MutexGuard(BlockingMutex &bm) : bm{&bm} { + explicit MutexGuard(BlockingMutex &bm) noexcept : bm{&bm} { mutex_lock(this->bm); } - MutexGuard(RecursiveMutex &rm) : rm{&rm} { + explicit MutexGuard(RecursiveMutex &rm) noexcept : rm{&rm} { mutex_lock(this->rm); } - ~MutexGuard() { + ~MutexGuard() noexcept { if (this->bm) { mutex_unlock(this->bm); } else if (this->rm) { @@ -100,14 +101,14 @@ struct MutexGuard { } } - operator bool() const { return true; } + operator bool() const noexcept { return true; } BlockingMutex *bm; RecursiveMutex *rm; }; #define MUTEX_GUARD_BLOCK(m) if (MutexGuard GB_DEFER_3(_mutex_guard_){m}) -#define MUTEX_GUARD(m) MutexGuard GB_DEFER_3(_mutex_guard_){m} +#define MUTEX_GUARD(m) mutex_lock(m); defer (mutex_unlock(m)) struct RecursiveMutex { From c38650911267a4ebd12063e69aefa24b783121c7 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Mon, 2 Jan 2023 17:06:29 +0000 Subject: [PATCH 16/78] Minor clean up of thread pool code --- src/thread_pool.cpp | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/src/thread_pool.cpp b/src/thread_pool.cpp index 939d3c533..12a2f9292 100644 --- a/src/thread_pool.cpp +++ b/src/thread_pool.cpp @@ -38,11 +38,11 @@ gb_internal void thread_pool_init(ThreadPool *pool, gbAllocator const &a, isize thread_init_and_start(pool, t, i); } - pool->running = true; + pool->running.store(true); } gb_internal void thread_pool_destroy(ThreadPool *pool) { - pool->running = false; + pool->running.store(false); for_array_off(i, 1, pool->threads) { Thread *t = &pool->threads[i]; @@ -139,12 +139,7 @@ gb_internal THREAD_PROC(thread_pool_thread_proc) { current_thread = thread; ThreadPool *pool = current_thread->pool; - for (;;) { -work_start: - if (!pool->running.load()) { - break; - } - + while (pool->running.load()) { // If we've got tasks to process, work through them usize finished_tasks = 0; while (thread_pool_queue_pop(current_thread, &task)) { @@ -180,13 +175,15 @@ work_start: futex_signal(&pool->tasks_left); } - goto work_start; + goto main_loop_continue; } } // if we've done all our work, and there's nothing to steal, go to sleep i32 state = pool->tasks_available.load(); futex_wait(&pool->tasks_available, state); + + main_loop_continue:; } return 0; From ad52003077d579600d810b1337ca4d7904a1fc9b Mon Sep 17 00:00:00 2001 From: gingerBill Date: Mon, 2 Jan 2023 17:15:29 +0000 Subject: [PATCH 17/78] Remove some unneeded checks --- src/checker.cpp | 4 ++-- src/thread_pool.cpp | 4 +++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/checker.cpp b/src/checker.cpp index f4c9b6822..c9e84a35b 100644 --- a/src/checker.cpp +++ b/src/checker.cpp @@ -1978,8 +1978,8 @@ gb_internal void add_type_info_type_internal(CheckerContext *c, Type *t) { -gb_global std::atomic global_procedure_body_in_worker_queue = false; -gb_global std::atomic global_after_checking_procedure_bodies = false; +gb_global std::atomic global_procedure_body_in_worker_queue; +gb_global std::atomic global_after_checking_procedure_bodies; gb_internal WORKER_TASK_PROC(check_proc_info_worker_proc); diff --git a/src/thread_pool.cpp b/src/thread_pool.cpp index 12a2f9292..f1f19b275 100644 --- a/src/thread_pool.cpp +++ b/src/thread_pool.cpp @@ -142,6 +142,8 @@ gb_internal THREAD_PROC(thread_pool_thread_proc) { while (pool->running.load()) { // If we've got tasks to process, work through them usize finished_tasks = 0; + i32 state; + while (thread_pool_queue_pop(current_thread, &task)) { task.do_work(task.data); pool->tasks_left.fetch_sub(1, std::memory_order_release); @@ -180,7 +182,7 @@ gb_internal THREAD_PROC(thread_pool_thread_proc) { } // if we've done all our work, and there's nothing to steal, go to sleep - i32 state = pool->tasks_available.load(); + state = pool->tasks_available.load(); futex_wait(&pool->tasks_available, state); main_loop_continue:; From 9737b65d9c2c4c09fd1a0ec1daa3dd7dcdeb7dc5 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Mon, 2 Jan 2023 17:18:59 +0000 Subject: [PATCH 18/78] Explicitly call `store` for futex --- src/threading.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/threading.cpp b/src/threading.cpp index c5db5d1b4..169b9ff43 100644 --- a/src/threading.cpp +++ b/src/threading.cpp @@ -117,7 +117,8 @@ struct RecursiveMutex { }; gb_internal void mutex_lock(RecursiveMutex *m) { - Futex tid = cast(i32)thread_current_id(); + Futex tid; + tid.store(cast(i32)thread_current_id()); for (;;) { i32 prev_owner = 0; m->owner.compare_exchange_strong(prev_owner, tid, std::memory_order_acquire, std::memory_order_acquire); @@ -130,7 +131,8 @@ gb_internal void mutex_lock(RecursiveMutex *m) { } } gb_internal bool mutex_try_lock(RecursiveMutex *m) { - Futex tid = cast(i32)thread_current_id(); + Futex tid; + tid.store(cast(i32)thread_current_id()); i32 prev_owner = 0; m->owner.compare_exchange_strong(prev_owner, tid, std::memory_order_acquire, std::memory_order_acquire); if (prev_owner == 0 || prev_owner == tid) { From 0e040be9411328d2e82556b7a37fa1cf90b666bd Mon Sep 17 00:00:00 2001 From: gingerBill Date: Mon, 2 Jan 2023 17:49:16 +0000 Subject: [PATCH 19/78] Add define for darwin --- src/threading.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/threading.cpp b/src/threading.cpp index 169b9ff43..bcbdaf083 100644 --- a/src/threading.cpp +++ b/src/threading.cpp @@ -641,6 +641,7 @@ gb_internal void futex_broadcast(Futex *addr) { gb_internal void futex_wait(Futex *addr, Footex val) { for (;;) { + enum { ULF_WAKE_ALL = 0x00000100 }; int ret = __ulock_wait(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO | ULF_WAKE_ALL, addr, val, 0); if (ret >= 0) { if (*addr != val) { From 1568971732bd04a7f68a52277c0b2ab0cb5009c2 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Mon, 2 Jan 2023 18:04:16 +0000 Subject: [PATCH 20/78] Fix pool running --- src/thread_pool.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/thread_pool.cpp b/src/thread_pool.cpp index f1f19b275..a429e47ff 100644 --- a/src/thread_pool.cpp +++ b/src/thread_pool.cpp @@ -29,6 +29,9 @@ gb_internal void thread_pool_init(ThreadPool *pool, gbAllocator const &a, isize pool->allocator = a; slice_init(&pool->threads, a, thread_count + 1); + // NOTE: this needs to be initialized before any thread starts + pool->running.store(true); + // setup the main thread thread_init(pool, &pool->threads[0], 0); current_thread = &pool->threads[0]; @@ -37,8 +40,6 @@ gb_internal void thread_pool_init(ThreadPool *pool, gbAllocator const &a, isize Thread *t = &pool->threads[i]; thread_init_and_start(pool, t, i); } - - pool->running.store(true); } gb_internal void thread_pool_destroy(ThreadPool *pool) { @@ -138,6 +139,7 @@ gb_internal THREAD_PROC(thread_pool_thread_proc) { WorkerTask task; current_thread = thread; ThreadPool *pool = current_thread->pool; + // debugf("worker id: %td\n", current_thread->idx); while (pool->running.load()) { // If we've got tasks to process, work through them From 0d87b2e8db045b33cc6da18f5c53d8c3266c12b3 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Mon, 2 Jan 2023 19:39:35 +0000 Subject: [PATCH 21/78] Use local mutexes rather than a global one for the dependency insertion --- src/checker.cpp | 30 ++++++------------------------ src/checker.hpp | 11 +++++------ 2 files changed, 11 insertions(+), 30 deletions(-) diff --git a/src/checker.cpp b/src/checker.cpp index c9e84a35b..daebe0d31 100644 --- a/src/checker.cpp +++ b/src/checker.cpp @@ -743,21 +743,17 @@ gb_internal void check_scope_usage(Checker *c, Scope *scope) { gb_internal void add_dependency(CheckerInfo *info, DeclInfo *d, Entity *e) { - mutex_lock(&info->deps_mutex); + mutex_lock(&d->deps_mutex); ptr_set_add(&d->deps, e); - mutex_unlock(&info->deps_mutex); + mutex_unlock(&d->deps_mutex); } -gb_internal void add_type_info_dependency(CheckerInfo *info, DeclInfo *d, Type *type, bool require_mutex) { +gb_internal void add_type_info_dependency(CheckerInfo *info, DeclInfo *d, Type *type) { if (d == nullptr) { return; } - if (require_mutex) { - mutex_lock(&info->deps_mutex); - } + mutex_lock(&d->type_info_deps_mutex); ptr_set_add(&d->type_info_deps, type); - if (require_mutex) { - mutex_unlock(&info->deps_mutex); - } + mutex_unlock(&d->type_info_deps_mutex); } gb_internal AstPackage *get_core_package(CheckerInfo *info, String name) { @@ -1157,13 +1153,6 @@ gb_internal void init_checker_info(CheckerInfo *i) { array_init(&i->required_foreign_imports_through_force, a, 0, 0); - - i->allow_identifier_uses = false; - if (i->allow_identifier_uses) { - array_init(&i->identifier_uses, a); - } - - TIME_SECTION("checker info: mpmc queues"); mpmc_init(&i->entity_queue, a, 1<<20); @@ -1194,7 +1183,6 @@ gb_internal void destroy_checker_info(CheckerInfo *i) { string_map_destroy(&i->files); string_map_destroy(&i->packages); array_free(&i->variable_init_order); - array_free(&i->identifier_uses); array_free(&i->required_foreign_imports_through_force); mpmc_destroy(&i->entity_queue); @@ -1597,12 +1585,6 @@ gb_internal void add_entity_use(CheckerContext *c, Ast *identifier, Entity *enti identifier->Ident.entity = entity; - if (c->info->allow_identifier_uses) { - mutex_lock(&c->info->identifier_uses_mutex); - array_add(&c->info->identifier_uses, identifier); - mutex_unlock(&c->info->identifier_uses_mutex); - } - String dmsg = entity->deprecated_message; if (dmsg.len > 0) { warning(identifier, "%.*s is deprecated: %.*s", LIT(entity->token.string), LIT(dmsg)); @@ -1767,7 +1749,7 @@ gb_internal void add_type_info_type_internal(CheckerContext *c, Type *t) { return; } - add_type_info_dependency(c->info, c->decl, t, false); + add_type_info_dependency(c->info, c->decl, t); auto found = map_get(&c->info->type_info_map, t); if (found != nullptr) { diff --git a/src/checker.hpp b/src/checker.hpp index 56f8707d3..62f655f95 100644 --- a/src/checker.hpp +++ b/src/checker.hpp @@ -179,8 +179,12 @@ struct DeclInfo { CommentGroup *comment; CommentGroup *docs; - PtrSet deps; + BlockingMutex deps_mutex; + PtrSet deps; + + BlockingMutex type_info_deps_mutex; PtrSet type_info_deps; + Array labels; }; @@ -375,11 +379,6 @@ struct CheckerInfo { BlockingMutex foreign_mutex; // NOT recursive StringMap foreigns; - // only used by 'odin query' - bool allow_identifier_uses; - BlockingMutex identifier_uses_mutex; - Array identifier_uses; - // NOTE(bill): These are actually MPSC queues // TODO(bill): Convert them to be MPSC queues MPMCQueue definition_queue; From d2ec2d1606013eb28f608d2ecaec3a654ec3598f Mon Sep 17 00:00:00 2001 From: gingerBill Date: Mon, 2 Jan 2023 19:46:55 +0000 Subject: [PATCH 22/78] Remove another use of a global mutex --- src/check_decl.cpp | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/src/check_decl.cpp b/src/check_decl.cpp index 8f95c1a49..32d50e36d 100644 --- a/src/check_decl.cpp +++ b/src/check_decl.cpp @@ -1581,16 +1581,21 @@ gb_internal bool check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de Scope *ps = decl->parent->scope; if (ps->flags & (ScopeFlag_File & ScopeFlag_Pkg & ScopeFlag_Global)) { return true; - } else MUTEX_GUARD_BLOCK(&ctx->info->deps_mutex) { + } else { // NOTE(bill): Add the dependencies from the procedure literal (lambda) // But only at the procedure level - for (auto const &entry : decl->deps) { - Entity *e = entry.ptr; - ptr_set_add(&decl->parent->deps, e); + + MUTEX_GUARD_BLOCK(decl->deps_mutex) MUTEX_GUARD_BLOCK(decl->parent->deps_mutex) { + for (auto const &entry : decl->deps) { + Entity *e = entry.ptr; + ptr_set_add(&decl->parent->deps, e); + } } - for (auto const &entry : decl->type_info_deps) { - Type *t = entry.ptr; - ptr_set_add(&decl->parent->type_info_deps, t); + MUTEX_GUARD_BLOCK(decl->type_info_deps_mutex) MUTEX_GUARD_BLOCK(decl->parent->type_info_deps_mutex) { + for (auto const &entry : decl->type_info_deps) { + Type *t = entry.ptr; + ptr_set_add(&decl->parent->type_info_deps, t); + } } } } From 09c26e6be006cb285aed4b0780bee368516fd272 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Mon, 2 Jan 2023 20:38:37 +0000 Subject: [PATCH 23/78] Narrow type info mutex usage --- src/checker.cpp | 55 ++++++++++++++++++++++++------------------------- 1 file changed, 27 insertions(+), 28 deletions(-) diff --git a/src/checker.cpp b/src/checker.cpp index daebe0d31..dc6a49bcf 100644 --- a/src/checker.cpp +++ b/src/checker.cpp @@ -1731,10 +1731,7 @@ gb_internal void add_type_info_type(CheckerContext *c, Type *t) { if (build_context.disallow_rtti) { return; } - - mutex_lock(&c->info->type_info_mutex); add_type_info_type_internal(c, t); - mutex_unlock(&c->info->type_info_mutex); } gb_internal void add_type_info_type_internal(CheckerContext *c, Type *t) { @@ -1751,33 +1748,35 @@ gb_internal void add_type_info_type_internal(CheckerContext *c, Type *t) { add_type_info_dependency(c->info, c->decl, t); - auto found = map_get(&c->info->type_info_map, t); - if (found != nullptr) { - // Types have already been added - return; - } - - bool prev = false; - isize ti_index = -1; - for (auto const &e : c->info->type_info_map) { - if (are_types_identical_unique_tuples(t, e.key)) { - // Duplicate entry - ti_index = e.value; - prev = true; - break; + MUTEX_GUARD_BLOCK(&c->info->type_info_mutex) { + auto found = map_get(&c->info->type_info_map, t); + if (found != nullptr) { + // Types have already been added + return; } - } - if (ti_index < 0) { - // Unique entry - // NOTE(bill): map entries grow linearly and in order - ti_index = c->info->type_info_types.count; - array_add(&c->info->type_info_types, t); - } - map_set(&c->checker->info.type_info_map, t, ti_index); - if (prev) { - // NOTE(bill): If a previous one exists already, no need to continue - return; + bool prev = false; + isize ti_index = -1; + for (auto const &e : c->info->type_info_map) { + if (are_types_identical_unique_tuples(t, e.key)) { + // Duplicate entry + ti_index = e.value; + prev = true; + break; + } + } + if (ti_index < 0) { + // Unique entry + // NOTE(bill): map entries grow linearly and in order + ti_index = c->info->type_info_types.count; + array_add(&c->info->type_info_types, t); + } + map_set(&c->checker->info.type_info_map, t, ti_index); + + if (prev) { + // NOTE(bill): If a previous one exists already, no need to continue + return; + } } // Add nested types From df2767311f11255da311a23bc240077fa8f933b1 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Mon, 2 Jan 2023 20:42:22 +0000 Subject: [PATCH 24/78] Use `mutex_try_lock` in `check_proc_info` --- src/checker.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/checker.cpp b/src/checker.cpp index dc6a49bcf..f80ea9e4c 100644 --- a/src/checker.cpp +++ b/src/checker.cpp @@ -5050,7 +5050,10 @@ gb_internal bool check_proc_info(Checker *c, ProcInfo *pi, UntypedExprInfoMap *u return false; } - MUTEX_GUARD(&pi->decl->proc_checked_mutex); + if (!mutex_try_lock(&pi->decl->proc_checked_mutex)) { + return false; + } + defer (mutex_unlock(&pi->decl->proc_checked_mutex)); Entity *e = pi->decl->entity; switch (pi->decl->proc_checked_state.load()) { From 5b335bb88c9045961a2a20d021ec5f4b5acf96ce Mon Sep 17 00:00:00 2001 From: gingerBill Date: Mon, 2 Jan 2023 20:48:24 +0000 Subject: [PATCH 25/78] Narrow `g_type_mutex` usage --- src/types.cpp | 80 ++++++++++++++++++++++++++------------------------- 1 file changed, 41 insertions(+), 39 deletions(-) diff --git a/src/types.cpp b/src/types.cpp index afe0b7d5d..c49f43f7c 100644 --- a/src/types.cpp +++ b/src/types.cpp @@ -3383,8 +3383,6 @@ gb_internal i64 type_align_of_internal(Type *t, TypePath *path) { if (t->failure) { return FAILURE_ALIGNMENT; } - mutex_lock(&g_type_mutex); - defer (mutex_unlock(&g_type_mutex)); t = base_type(t); @@ -3408,6 +3406,8 @@ gb_internal i64 type_align_of_internal(Type *t, TypePath *path) { } break; case Type_Array: { + MUTEX_GUARD(&g_type_mutex); + Type *elem = t->Array.elem; bool pop = type_path_push(path, elem); if (path->failure) { @@ -3419,6 +3419,8 @@ gb_internal i64 type_align_of_internal(Type *t, TypePath *path) { } case Type_EnumeratedArray: { + MUTEX_GUARD(&g_type_mutex); + Type *elem = t->EnumeratedArray.elem; bool pop = type_path_push(path, elem); if (path->failure) { @@ -3438,6 +3440,8 @@ gb_internal i64 type_align_of_internal(Type *t, TypePath *path) { case Type_Tuple: { + MUTEX_GUARD(&g_type_mutex); + i64 max = 1; for_array(i, t->Tuple.variables) { i64 align = type_align_of_internal(t->Tuple.variables[i]->type, path); @@ -3461,6 +3465,8 @@ gb_internal i64 type_align_of_internal(Type *t, TypePath *path) { return gb_max(t->Union.custom_align, 1); } + MUTEX_GUARD(&g_type_mutex); + i64 max = 1; for_array(i, t->Union.variants) { Type *variant = t->Union.variants[i]; @@ -3481,39 +3487,27 @@ gb_internal i64 type_align_of_internal(Type *t, TypePath *path) { if (t->Struct.custom_align > 0) { return gb_max(t->Struct.custom_align, 1); } - if (t->Struct.is_raw_union) { - i64 max = 1; - for_array(i, t->Struct.fields) { - Type *field_type = t->Struct.fields[i]->type; - bool pop = type_path_push(path, field_type); - if (path->failure) { - return FAILURE_ALIGNMENT; - } - i64 align = type_align_of_internal(field_type, path); - if (pop) type_path_pop(path); - if (max < align) { - max = align; - } - } - return max; - } else if (t->Struct.fields.count > 0) { - i64 max = 1; - // NOTE(bill): Check the fields to check for cyclic definitions - for_array(i, t->Struct.fields) { - Type *field_type = t->Struct.fields[i]->type; - bool pop = type_path_push(path, field_type); - if (path->failure) return FAILURE_ALIGNMENT; - i64 align = type_align_of_internal(field_type, path); - if (pop) type_path_pop(path); - if (max < align) { - max = align; - } - } - if (t->Struct.is_packed) { - return 1; - } - return max; + + if (t->Struct.is_packed) { + return 1; } + + MUTEX_GUARD(&g_type_mutex); + + i64 max = 1; + for_array(i, t->Struct.fields) { + Type *field_type = t->Struct.fields[i]->type; + bool pop = type_path_push(path, field_type); + if (path->failure) { + return FAILURE_ALIGNMENT; + } + i64 align = type_align_of_internal(field_type, path); + if (pop) type_path_pop(path); + if (max < align) { + max = align; + } + } + return max; } break; case Type_BitSet: { @@ -3579,8 +3573,7 @@ gb_internal i64 *type_set_offsets_of(Slice const &fields, bool is_pack } gb_internal bool type_set_offsets(Type *t) { - mutex_lock(&g_type_mutex); - defer (mutex_unlock(&g_type_mutex)); + MUTEX_GUARD(&g_type_mutex); // TODO(bill): only per struct t = base_type(t); if (t->kind == Type_Struct) { @@ -3609,12 +3602,11 @@ gb_internal i64 type_size_of_internal(Type *t, TypePath *path) { if (t->failure) { return FAILURE_SIZE; } - mutex_lock(&g_type_mutex); - defer (mutex_unlock(&g_type_mutex)); - switch (t->kind) { case Type_Named: { + MUTEX_GUARD(&g_type_mutex); + bool pop = type_path_push(path, t); if (path->failure) { return FAILURE_ALIGNMENT; @@ -3652,6 +3644,8 @@ gb_internal i64 type_size_of_internal(Type *t, TypePath *path) { return build_context.word_size*2; case Type_Array: { + MUTEX_GUARD(&g_type_mutex); + i64 count, align, size, alignment; count = t->Array.count; if (count == 0) { @@ -3667,6 +3661,8 @@ gb_internal i64 type_size_of_internal(Type *t, TypePath *path) { } break; case Type_EnumeratedArray: { + MUTEX_GUARD(&g_type_mutex); + i64 count, align, size, alignment; count = t->EnumeratedArray.count; if (count == 0) { @@ -3699,6 +3695,8 @@ gb_internal i64 type_size_of_internal(Type *t, TypePath *path) { return (1 + 1 + 2)*build_context.word_size; case Type_Tuple: { + MUTEX_GUARD(&g_type_mutex); + i64 count, align, size; count = t->Tuple.variables.count; if (count == 0) { @@ -3717,6 +3715,8 @@ gb_internal i64 type_size_of_internal(Type *t, TypePath *path) { if (t->Union.variants.count == 0) { return 0; } + MUTEX_GUARD(&g_type_mutex); + i64 align = type_align_of_internal(t, path); if (path->failure) { return FAILURE_SIZE; @@ -3754,6 +3754,8 @@ gb_internal i64 type_size_of_internal(Type *t, TypePath *path) { case Type_Struct: { + MUTEX_GUARD(&g_type_mutex); + if (t->Struct.is_raw_union) { i64 count = t->Struct.fields.count; i64 align = type_align_of_internal(t, path); From f16d8e77b3f9b3ddf82e97672ca38dc19264eb5e Mon Sep 17 00:00:00 2001 From: gingerBill Date: Mon, 2 Jan 2023 20:55:49 +0000 Subject: [PATCH 26/78] Narrow `fullpath_mutex` usage --- src/build_settings.cpp | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/build_settings.cpp b/src/build_settings.cpp index f59b5c0f7..04d1ada93 100644 --- a/src/build_settings.cpp +++ b/src/build_settings.cpp @@ -937,16 +937,20 @@ gb_global BlockingMutex fullpath_mutex; #if defined(GB_SYSTEM_WINDOWS) gb_internal String path_to_fullpath(gbAllocator a, String s) { String result = {}; - mutex_lock(&fullpath_mutex); - defer (mutex_unlock(&fullpath_mutex)); String16 string16 = string_to_string16(heap_allocator(), s); defer (gb_free(heap_allocator(), string16.text)); - DWORD len = GetFullPathNameW(&string16[0], 0, nullptr, nullptr); + DWORD len; + + mutex_lock(&fullpath_mutex); + + len = GetFullPathNameW(&string16[0], 0, nullptr, nullptr); if (len != 0) { wchar_t *text = gb_alloc_array(permanent_allocator(), wchar_t, len+1); GetFullPathNameW(&string16[0], len, text, nullptr); + mutex_unlock(&fullpath_mutex); + text[len] = 0; result = string16_to_string(a, make_string16(text, len)); result = string_trim_whitespace(result); @@ -957,6 +961,8 @@ gb_internal String path_to_fullpath(gbAllocator a, String s) { result.text[i] = '/'; } } + } else { + mutex_unlock(&fullpath_mutex); } return result; From 7ffffeecccc6a1fa1b26238f8ed4608d93ec9bb0 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Mon, 2 Jan 2023 21:35:40 +0000 Subject: [PATCH 27/78] Comment out many mutex guards in `type_(size|align)_of_internal` --- src/thread_pool.cpp | 21 ++++++++++----------- src/types.cpp | 22 +++++++++++----------- 2 files changed, 21 insertions(+), 22 deletions(-) diff --git a/src/thread_pool.cpp b/src/thread_pool.cpp index a429e47ff..b89e00454 100644 --- a/src/thread_pool.cpp +++ b/src/thread_pool.cpp @@ -30,7 +30,7 @@ gb_internal void thread_pool_init(ThreadPool *pool, gbAllocator const &a, isize slice_init(&pool->threads, a, thread_count + 1); // NOTE: this needs to be initialized before any thread starts - pool->running.store(true); + pool->running.store(true, std::memory_order_seq_cst); // setup the main thread thread_init(pool, &pool->threads[0], 0); @@ -43,7 +43,7 @@ gb_internal void thread_pool_init(ThreadPool *pool, gbAllocator const &a, isize } gb_internal void thread_pool_destroy(ThreadPool *pool) { - pool->running.store(false); + pool->running.store(false, std::memory_order_seq_cst); for_array_off(i, 1, pool->threads) { Thread *t = &pool->threads[i]; @@ -114,7 +114,7 @@ gb_internal bool thread_pool_add_task(ThreadPool *pool, WorkerTaskProc *proc, vo gb_internal void thread_pool_wait(ThreadPool *pool) { WorkerTask task; - while (pool->tasks_left.load()) { + while (pool->tasks_left.load(std::memory_order_acquire)) { // if we've got tasks on our queue, run them while (thread_pool_queue_pop(current_thread, &task)) { task.do_work(task.data); @@ -126,7 +126,7 @@ gb_internal void thread_pool_wait(ThreadPool *pool) { // This *must* be executed in this order, so the futex wakes immediately // if rem_tasks has changed since we checked last, otherwise the program // will permanently sleep - Footex rem_tasks = pool->tasks_left.load(); + Footex rem_tasks = pool->tasks_left.load(std::memory_order_acquire); if (rem_tasks == 0) { return; } @@ -141,7 +141,7 @@ gb_internal THREAD_PROC(thread_pool_thread_proc) { ThreadPool *pool = current_thread->pool; // debugf("worker id: %td\n", current_thread->idx); - while (pool->running.load()) { + while (pool->running.load(std::memory_order_seq_cst)) { // If we've got tasks to process, work through them usize finished_tasks = 0; i32 state; @@ -152,30 +152,29 @@ gb_internal THREAD_PROC(thread_pool_thread_proc) { finished_tasks += 1; } - if (finished_tasks > 0 && pool->tasks_left.load() == 0) { + if (finished_tasks > 0 && pool->tasks_left.load(std::memory_order_acquire) == 0) { futex_signal(&pool->tasks_left); } // If there's still work somewhere and we don't have it, steal it - if (pool->tasks_left.load()) { + if (pool->tasks_left.load(std::memory_order_acquire)) { usize idx = cast(usize)current_thread->idx; for_array(i, pool->threads) { - if (pool->tasks_left.load() == 0) { + if (pool->tasks_left.load(std::memory_order_acquire) == 0) { break; } idx = (idx + 1) % cast(usize)pool->threads.count; Thread *thread = &pool->threads.data[idx]; - WorkerTask task; + WorkerTask task, another_task; if (!thread_pool_queue_pop(thread, &task)) { continue; } - task.do_work(task.data); pool->tasks_left.fetch_sub(1, std::memory_order_release); - if (pool->tasks_left.load() == 0) { + if (pool->tasks_left.load(std::memory_order_acquire) == 0) { futex_signal(&pool->tasks_left); } diff --git a/src/types.cpp b/src/types.cpp index c49f43f7c..1e2d85ac6 100644 --- a/src/types.cpp +++ b/src/types.cpp @@ -3406,7 +3406,7 @@ gb_internal i64 type_align_of_internal(Type *t, TypePath *path) { } break; case Type_Array: { - MUTEX_GUARD(&g_type_mutex); + // MUTEX_GUARD(&g_type_mutex); Type *elem = t->Array.elem; bool pop = type_path_push(path, elem); @@ -3419,7 +3419,7 @@ gb_internal i64 type_align_of_internal(Type *t, TypePath *path) { } case Type_EnumeratedArray: { - MUTEX_GUARD(&g_type_mutex); + // MUTEX_GUARD(&g_type_mutex); Type *elem = t->EnumeratedArray.elem; bool pop = type_path_push(path, elem); @@ -3440,7 +3440,7 @@ gb_internal i64 type_align_of_internal(Type *t, TypePath *path) { case Type_Tuple: { - MUTEX_GUARD(&g_type_mutex); + // MUTEX_GUARD(&g_type_mutex); i64 max = 1; for_array(i, t->Tuple.variables) { @@ -3465,7 +3465,7 @@ gb_internal i64 type_align_of_internal(Type *t, TypePath *path) { return gb_max(t->Union.custom_align, 1); } - MUTEX_GUARD(&g_type_mutex); + // MUTEX_GUARD(&g_type_mutex); i64 max = 1; for_array(i, t->Union.variants) { @@ -3492,7 +3492,7 @@ gb_internal i64 type_align_of_internal(Type *t, TypePath *path) { return 1; } - MUTEX_GUARD(&g_type_mutex); + // MUTEX_GUARD(&g_type_mutex); i64 max = 1; for_array(i, t->Struct.fields) { @@ -3605,7 +3605,7 @@ gb_internal i64 type_size_of_internal(Type *t, TypePath *path) { switch (t->kind) { case Type_Named: { - MUTEX_GUARD(&g_type_mutex); + // MUTEX_GUARD(&g_type_mutex); bool pop = type_path_push(path, t); if (path->failure) { @@ -3644,7 +3644,7 @@ gb_internal i64 type_size_of_internal(Type *t, TypePath *path) { return build_context.word_size*2; case Type_Array: { - MUTEX_GUARD(&g_type_mutex); + // MUTEX_GUARD(&g_type_mutex); i64 count, align, size, alignment; count = t->Array.count; @@ -3661,7 +3661,7 @@ gb_internal i64 type_size_of_internal(Type *t, TypePath *path) { } break; case Type_EnumeratedArray: { - MUTEX_GUARD(&g_type_mutex); + // MUTEX_GUARD(&g_type_mutex); i64 count, align, size, alignment; count = t->EnumeratedArray.count; @@ -3695,7 +3695,7 @@ gb_internal i64 type_size_of_internal(Type *t, TypePath *path) { return (1 + 1 + 2)*build_context.word_size; case Type_Tuple: { - MUTEX_GUARD(&g_type_mutex); + // MUTEX_GUARD(&g_type_mutex); i64 count, align, size; count = t->Tuple.variables.count; @@ -3715,7 +3715,7 @@ gb_internal i64 type_size_of_internal(Type *t, TypePath *path) { if (t->Union.variants.count == 0) { return 0; } - MUTEX_GUARD(&g_type_mutex); + // MUTEX_GUARD(&g_type_mutex); i64 align = type_align_of_internal(t, path); if (path->failure) { @@ -3754,7 +3754,7 @@ gb_internal i64 type_size_of_internal(Type *t, TypePath *path) { case Type_Struct: { - MUTEX_GUARD(&g_type_mutex); + // MUTEX_GUARD(&g_type_mutex); if (t->Struct.is_raw_union) { i64 count = t->Struct.fields.count; From 318d92f9a8651e75da2e0846e0e5d60a4a137a25 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Mon, 2 Jan 2023 21:37:21 +0000 Subject: [PATCH 28/78] Comment out `type_and_value_mutex` usage --- src/checker.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/checker.cpp b/src/checker.cpp index f80ea9e4c..e78a4f65f 100644 --- a/src/checker.cpp +++ b/src/checker.cpp @@ -1451,7 +1451,7 @@ gb_internal void add_type_and_value(CheckerInfo *i, Ast *expr, AddressingMode mo return; } - mutex_lock(&i->type_and_value_mutex); + // mutex_lock(&i->type_and_value_mutex); Ast *prev_expr = nullptr; while (prev_expr != expr) { prev_expr = expr; @@ -1473,7 +1473,7 @@ gb_internal void add_type_and_value(CheckerInfo *i, Ast *expr, AddressingMode mo expr = unparen_expr(expr); } - mutex_unlock(&i->type_and_value_mutex); + // mutex_unlock(&i->type_and_value_mutex); } gb_internal void add_entity_definition(CheckerInfo *i, Ast *identifier, Entity *entity) { From 52b319dbfd94a223f205c1078fb98d93fc6a60e0 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Mon, 2 Jan 2023 21:53:41 +0000 Subject: [PATCH 29/78] Fix darwin's futex implementation in the compiler --- src/threading.cpp | 73 +++++++++++++++++++++++------------------------ 1 file changed, 36 insertions(+), 37 deletions(-) diff --git a/src/threading.cpp b/src/threading.cpp index bcbdaf083..cda8fe89b 100644 --- a/src/threading.cpp +++ b/src/threading.cpp @@ -548,9 +548,9 @@ gb_internal void futex_wait(Futex *addr, Footex val) { #include -gb_internal void futex_signal(Futex *addr) { +gb_internal void futex_signal(Futex *f) { for (;;) { - int ret = futex((volatile uint32_t *)addr, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, 1, NULL, NULL); + int ret = futex((volatile uint32_t *)f, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, 1, NULL, NULL); if (ret == -1) { if (errno == ETIMEDOUT || errno == EINTR) { continue; @@ -565,9 +565,9 @@ gb_internal void futex_signal(Futex *addr) { } -gb_internal void futex_broadcast(Futex *addr) { +gb_internal void futex_broadcast(Futex *f) { for (;;) { - int ret = futex((volatile uint32_t *)addr, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, INT32_MAX, NULL, NULL); + int ret = futex((volatile uint32_t *)f, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, INT32_MAX, NULL, NULL); if (ret == -1) { if (errno == ETIMEDOUT || errno == EINTR) { continue; @@ -581,11 +581,11 @@ gb_internal void futex_broadcast(Futex *addr) { } } -gb_internal void futex_wait(Futex *addr, Footex val) { +gb_internal void futex_wait(Futex *f, Footex val) { for (;;) { - int ret = futex((volatile uint32_t *)addr, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, val, NULL, NULL); + int ret = futex((volatile uint32_t *)f, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, val, NULL, NULL); if (ret == -1) { - if (*addr != val) { + if (*f != val) { return; } @@ -607,9 +607,9 @@ gb_internal void futex_wait(Futex *addr, Footex val) { extern "C" int __ulock_wait(uint32_t operation, void *addr, uint64_t value, uint32_t timeout); /* timeout is specified in microseconds */ extern "C" int __ulock_wake(uint32_t operation, void *addr, uint64_t wake_value); -gb_internal void futex_signal(Futex *addr) { +gb_internal void futex_signal(Futex *f) { for (;;) { - int ret = __ulock_wake(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, addr, 0); + int ret = __ulock_wake(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, f, 0); if (ret >= 0) { return; } @@ -623,28 +623,28 @@ gb_internal void futex_signal(Futex *addr) { } } -gb_internal void futex_broadcast(Futex *addr) { - for (;;) { - int ret = __ulock_wake(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, addr, 0); - if (ret >= 0) { - return; - } - if (ret == EINTR || ret == EFAULT) { - continue; - } - if (ret == ENOENT) { - return; - } - GB_PANIC("Failed in futex wake!\n"); - } -} - -gb_internal void futex_wait(Futex *addr, Footex val) { +gb_internal void futex_broadcast(Futex *f) { for (;;) { enum { ULF_WAKE_ALL = 0x00000100 }; - int ret = __ulock_wait(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO | ULF_WAKE_ALL, addr, val, 0); + int ret = __ulock_wake(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO | ULF_WAKE_ALL, f, 0); if (ret >= 0) { - if (*addr != val) { + return; + } + if (ret == EINTR || ret == EFAULT) { + continue; + } + if (ret == ENOENT) { + return; + } + GB_PANIC("Failed in futex wake!\n"); + } +} + +gb_internal void futex_wait(Futex *f, Footex val) { + for (;;) { + int ret = __ulock_wait(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, f, val, 0); + if (ret >= 0) { + if (*f != val) { return; } continue; @@ -661,19 +661,18 @@ gb_internal void futex_wait(Futex *addr, Footex val) { } #elif defined(GB_SYSTEM_WINDOWS) -gb_internal void futex_signal(Futex *addr) { - WakeByAddressSingle((void *)addr); +gb_internal void futex_signal(Futex *f) { + WakeByAddressSingle(f); } -gb_internal void futex_broadcast(Futex *addr) { - WakeByAddressAll((void *)addr); +gb_internal void futex_broadcast(Futex *f) { + WakeByAddressAll(f); } -gb_internal void futex_wait(Futex *addr, Footex val) { - for (;;) { - WaitOnAddress(addr, (void *)&val, sizeof(val), INFINITE); - if (*addr != val) break; - } +gb_internal void futex_wait(Futex *f, Footex val) { + do { + WaitOnAddress(f, (void *)&val, sizeof(val), INFINITE); + } while (f->load() == val); } #endif From d36c3c2590d28bbfc8bc887b9dd0aebc3ac92667 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Mon, 2 Jan 2023 22:06:05 +0000 Subject: [PATCH 30/78] Re enable `type_and_value_mutex` --- src/checker.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/checker.cpp b/src/checker.cpp index e78a4f65f..f80ea9e4c 100644 --- a/src/checker.cpp +++ b/src/checker.cpp @@ -1451,7 +1451,7 @@ gb_internal void add_type_and_value(CheckerInfo *i, Ast *expr, AddressingMode mo return; } - // mutex_lock(&i->type_and_value_mutex); + mutex_lock(&i->type_and_value_mutex); Ast *prev_expr = nullptr; while (prev_expr != expr) { prev_expr = expr; @@ -1473,7 +1473,7 @@ gb_internal void add_type_and_value(CheckerInfo *i, Ast *expr, AddressingMode mo expr = unparen_expr(expr); } - // mutex_unlock(&i->type_and_value_mutex); + mutex_unlock(&i->type_and_value_mutex); } gb_internal void add_entity_definition(CheckerInfo *i, Ast *identifier, Entity *entity) { From bc9ee8e1a4ce797a894e5648fa92216c212b6999 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Mon, 2 Jan 2023 22:13:49 +0000 Subject: [PATCH 31/78] Remove loops within futex signals on Linux --- src/check_decl.cpp | 2 +- src/threading.cpp | 24 ++++++++---------------- 2 files changed, 9 insertions(+), 17 deletions(-) diff --git a/src/check_decl.cpp b/src/check_decl.cpp index 32d50e36d..2b6868f05 100644 --- a/src/check_decl.cpp +++ b/src/check_decl.cpp @@ -1471,7 +1471,7 @@ gb_internal bool check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de continue; } if (is_blank_ident(e->token)) { - error(e->token, "'using' a procedure parameter requires a non blank identifier"); + error(e->token, "'using' a procedure parameter requires a non blank identifier"); break; } diff --git a/src/threading.cpp b/src/threading.cpp index cda8fe89b..aca77cd8f 100644 --- a/src/threading.cpp +++ b/src/threading.cpp @@ -472,26 +472,18 @@ gb_internal void thread_set_name(Thread *t, char const *name) { #include gb_internal void futex_signal(Futex *addr) { - for (;;) { - int ret = syscall(SYS_futex, addr, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, 1, NULL, NULL, 0); - if (ret == -1) { - perror("Futex wake"); - GB_PANIC("Failed in futex wake!\n"); - } else if (ret > 0) { - return; - } + int ret = syscall(SYS_futex, addr, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, 1, NULL, NULL, 0); + if (ret == -1) { + perror("Futex wake"); + GB_PANIC("Failed in futex wake!\n"); } } gb_internal void futex_broadcast(Futex *addr) { - for (;;) { - int ret = syscall(SYS_futex, addr, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, INT32_MAX, NULL, NULL, 0); - if (ret == -1) { - perror("Futex wake"); - GB_PANIC("Failed in futex wake!\n"); - } else if (ret > 0) { - return; - } + int ret = syscall(SYS_futex, addr, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, INT32_MAX, NULL, NULL, 0); + if (ret == -1) { + perror("Futex wake"); + GB_PANIC("Failed in futex wake!\n"); } } From 6bd3a9d422d0567ead89e2465f46068d52f34f20 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Mon, 2 Jan 2023 22:23:49 +0000 Subject: [PATCH 32/78] Be very explicit where the gen_procs_mutex can be unlock --- src/check_expr.cpp | 39 +++++++++++++++++++++++++++++---------- 1 file changed, 29 insertions(+), 10 deletions(-) diff --git a/src/check_expr.cpp b/src/check_expr.cpp index f47361c27..b40c48459 100644 --- a/src/check_expr.cpp +++ b/src/check_expr.cpp @@ -436,14 +436,19 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E GenProcsData *found_gen_procs = nullptr; - MUTEX_GUARD(&info->gen_procs_mutex); + // @@GPM + mutex_lock(&info->gen_procs_mutex); found_gen_procs = map_get(&info->gen_procs, base_entity->identifier.load()); + if (found_gen_procs) { MUTEX_GUARD(&found_gen_procs->mutex); for (Entity *other : found_gen_procs->procs) { Type *pt = base_type(other->type); if (are_types_identical(pt, final_proc_type)) { + // @@GPM + mutex_unlock(&info->gen_procs_mutex); + if (poly_proc_data) { poly_proc_data->gen_entity = other; } @@ -465,6 +470,8 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E Ast *cloned_proc_type_node = clone_ast(pt->node); success = check_procedure_type(&nctx, final_proc_type, cloned_proc_type_node, &operands); if (!success) { + // @@GPM + mutex_unlock(&info->gen_procs_mutex); return false; } @@ -473,6 +480,9 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E for (Entity *other : found_gen_procs->procs) { Type *pt = base_type(other->type); if (are_types_identical(pt, final_proc_type)) { + // @@GPM + mutex_unlock(&info->gen_procs_mutex); + if (poly_proc_data) { poly_proc_data->gen_entity = other; } @@ -498,6 +508,11 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E } } + if (found_gen_procs) { + // @@GPM + mutex_unlock(&info->gen_procs_mutex); + } + Ast *proc_lit = clone_ast(old_decl->proc_lit); ast_node(pl, ProcLit, proc_lit); @@ -555,6 +570,19 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E } } + if (found_gen_procs) { + MUTEX_GUARD(&found_gen_procs->mutex); + array_add(&found_gen_procs->procs, entity); + } else { + GenProcsData gen_proc_data = {}; + gen_proc_data.procs = array_make(heap_allocator()); + array_add(&gen_proc_data.procs, entity); + map_set(&info->gen_procs, base_entity->identifier.load(), gen_proc_data); + + // @@GPM + mutex_unlock(&info->gen_procs_mutex); + } + ProcInfo *proc_info = gb_alloc_item(permanent_allocator(), ProcInfo); proc_info->file = file; proc_info->token = token; @@ -565,15 +593,6 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E proc_info->generated_from_polymorphic = true; proc_info->poly_def_node = poly_def_node; - if (found_gen_procs) { - MUTEX_GUARD(&found_gen_procs->mutex); - array_add(&found_gen_procs->procs, entity); - } else { - GenProcsData gen_proc_data = {}; - gen_proc_data.procs = array_make(heap_allocator()); - array_add(&gen_proc_data.procs, entity); - map_set(&info->gen_procs, base_entity->identifier.load(), gen_proc_data); - } if (poly_proc_data) { poly_proc_data->gen_entity = entity; From 69b075782bac981ceeea5eea8f544e346f0fe6b5 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Mon, 2 Jan 2023 22:40:28 +0000 Subject: [PATCH 33/78] Use a package local mutex for `add_type_and_value` --- src/check_decl.cpp | 9 +++++--- src/check_expr.cpp | 56 +++++++++++++++++++++++----------------------- src/check_type.cpp | 2 +- src/checker.cpp | 13 +++++++---- src/checker.hpp | 2 +- src/parser.hpp | 1 + 6 files changed, 46 insertions(+), 37 deletions(-) diff --git a/src/check_decl.cpp b/src/check_decl.cpp index 2b6868f05..e3486924c 100644 --- a/src/check_decl.cpp +++ b/src/check_decl.cpp @@ -45,7 +45,7 @@ gb_internal Type *check_init_variable(CheckerContext *ctx, Entity *e, Operand *o if (operand->mode == Addressing_Type) { if (e->type != nullptr && is_type_typeid(e->type)) { add_type_info_type(ctx, operand->type); - add_type_and_value(ctx->info, operand->expr, Addressing_Value, e->type, exact_value_typeid(operand->type)); + add_type_and_value(ctx, operand->expr, Addressing_Value, e->type, exact_value_typeid(operand->type)); return e->type; } else { gbString t = type_to_string(operand->type); @@ -1585,13 +1585,16 @@ gb_internal bool check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de // NOTE(bill): Add the dependencies from the procedure literal (lambda) // But only at the procedure level - MUTEX_GUARD_BLOCK(decl->deps_mutex) MUTEX_GUARD_BLOCK(decl->parent->deps_mutex) { + MUTEX_GUARD_BLOCK(decl->deps_mutex) + MUTEX_GUARD_BLOCK(decl->parent->deps_mutex) { for (auto const &entry : decl->deps) { Entity *e = entry.ptr; ptr_set_add(&decl->parent->deps, e); } } - MUTEX_GUARD_BLOCK(decl->type_info_deps_mutex) MUTEX_GUARD_BLOCK(decl->parent->type_info_deps_mutex) { + + MUTEX_GUARD_BLOCK(decl->type_info_deps_mutex) + MUTEX_GUARD_BLOCK(decl->parent->type_info_deps_mutex) { for (auto const &entry : decl->type_info_deps) { Type *t = entry.ptr; ptr_set_add(&decl->parent->type_info_deps, t); diff --git a/src/check_expr.cpp b/src/check_expr.cpp index b40c48459..9feac93ea 100644 --- a/src/check_expr.cpp +++ b/src/check_expr.cpp @@ -852,7 +852,7 @@ gb_internal i64 check_distance_between_types(CheckerContext *c, Operand *operand PolyProcData poly_proc_data = {}; if (check_polymorphic_procedure_assignment(c, operand, type, operand->expr, &poly_proc_data)) { Entity *e = poly_proc_data.gen_entity; - add_type_and_value(c->info, operand->expr, Addressing_Value, e->type, {}); + add_type_and_value(c, operand->expr, Addressing_Value, e->type, {}); add_entity_use(c, operand->expr, e); return 4; } @@ -1082,7 +1082,7 @@ gb_internal void check_assignment(CheckerContext *c, Operand *operand, Type *typ if (check_is_assignable_to(c, operand, type)) { if (operand->mode == Addressing_Type && is_type_typeid(type)) { add_type_info_type(c, operand->type); - add_type_and_value(c->info, operand->expr, Addressing_Value, type, exact_value_typeid(operand->type)); + add_type_and_value(c, operand->expr, Addressing_Value, type, exact_value_typeid(operand->type)); } } else { gbString expr_str = expr_to_string(operand->expr); @@ -2374,7 +2374,7 @@ gb_internal void check_comparison(CheckerContext *c, Operand *x, Operand *y, Tok if (x->mode == Addressing_Type && is_type_typeid(y->type)) { add_type_info_type(c, x->type); add_type_info_type(c, y->type); - add_type_and_value(c->info, x->expr, Addressing_Value, y->type, exact_value_typeid(x->type)); + add_type_and_value(c, x->expr, Addressing_Value, y->type, exact_value_typeid(x->type)); x->mode = Addressing_Value; x->type = t_untyped_bool; @@ -2382,7 +2382,7 @@ gb_internal void check_comparison(CheckerContext *c, Operand *x, Operand *y, Tok } else if (is_type_typeid(x->type) && y->mode == Addressing_Type) { add_type_info_type(c, x->type); add_type_info_type(c, y->type); - add_type_and_value(c->info, y->expr, Addressing_Value, x->type, exact_value_typeid(y->type)); + add_type_and_value(c, y->expr, Addressing_Value, x->type, exact_value_typeid(y->type)); x->mode = Addressing_Value; x->type = t_untyped_bool; @@ -3615,7 +3615,7 @@ gb_internal void update_untyped_expr_type(CheckerContext *c, Ast *e, Type *type, if (old == nullptr) { if (type != nullptr && type != t_invalid) { if (e->tav.type == nullptr || e->tav.type == t_invalid) { - add_type_and_value(c->info, e, e->tav.mode, type ? type : e->tav.type, e->tav.value); + add_type_and_value(c, e, e->tav.mode, type ? type : e->tav.type, e->tav.value); if (e->kind == Ast_TernaryIfExpr) { update_untyped_expr_type(c, e->TernaryIfExpr.x, type, final); update_untyped_expr_type(c, e->TernaryIfExpr.y, type, final); @@ -3721,7 +3721,7 @@ gb_internal void update_untyped_expr_type(CheckerContext *c, Ast *e, Type *type, return; } - add_type_and_value(c->info, e, old->mode, type, old->value); + add_type_and_value(c, e, old->mode, type, old->value); } gb_internal void update_untyped_expr_value(CheckerContext *c, Ast *e, ExactValue value) { @@ -4569,7 +4569,7 @@ gb_internal Entity *check_selector(CheckerContext *c, Operand *operand, Ast *nod operand->mode = Addressing_ProcGroup; operand->proc_group = entity; - add_type_and_value(c->info, operand->expr, operand->mode, operand->type, operand->value); + add_type_and_value(c, operand->expr, operand->mode, operand->type, operand->value); return entity; } GB_ASSERT_MSG(entity->type != nullptr, "%.*s (%.*s)", LIT(entity->token.string), LIT(entity_strings[entity->kind])); @@ -4738,7 +4738,7 @@ gb_internal Entity *check_selector(CheckerContext *c, Operand *operand, Ast *nod } Entity *swizzle_entity = alloc_entity_variable(nullptr, make_token_ident(field_name), operand->type, EntityState_Resolved); - add_type_and_value(c->info, operand->expr, operand->mode, operand->type, operand->value); + add_type_and_value(c, operand->expr, operand->mode, operand->type, operand->value); return swizzle_entity; } end_of_array_selector_swizzle:; @@ -4782,7 +4782,7 @@ gb_internal Entity *check_selector(CheckerContext *c, Operand *operand, Ast *nod operand->value = field_value; operand->type = entity->type; add_entity_use(c, selector, entity); - add_type_and_value(c->info, operand->expr, operand->mode, operand->type, operand->value); + add_type_and_value(c, operand->expr, operand->mode, operand->type, operand->value); return entity; } @@ -4807,7 +4807,7 @@ gb_internal Entity *check_selector(CheckerContext *c, Operand *operand, Ast *nod operand->value = field_value; operand->type = entity->type; add_entity_use(c, selector, entity); - add_type_and_value(c->info, operand->expr, operand->mode, operand->type, operand->value); + add_type_and_value(c, operand->expr, operand->mode, operand->type, operand->value); return entity; } @@ -4895,7 +4895,7 @@ gb_internal Entity *check_selector(CheckerContext *c, Operand *operand, Ast *nod break; } - add_type_and_value(c->info, operand->expr, operand->mode, operand->type, operand->value); + add_type_and_value(c, operand->expr, operand->mode, operand->type, operand->value); return entity; } @@ -5361,7 +5361,7 @@ gb_internal CALL_ARGUMENT_CHECKER(check_call_arguments_internal) { if (o.mode == Addressing_Type && is_type_typeid(e->type)) { add_type_info_type(c, o.type); - add_type_and_value(c->info, o.expr, Addressing_Value, e->type, exact_value_typeid(o.type)); + add_type_and_value(c, o.expr, Addressing_Value, e->type, exact_value_typeid(o.type)); } else if (show_error && is_type_untyped(o.type)) { update_untyped_expr_type(c, o.expr, t, true); } @@ -5412,7 +5412,7 @@ gb_internal CALL_ARGUMENT_CHECKER(check_call_arguments_internal) { } if (o.mode == Addressing_Type && is_type_typeid(t)) { add_type_info_type(c, o.type); - add_type_and_value(c->info, o.expr, Addressing_Value, t, exact_value_typeid(o.type)); + add_type_and_value(c, o.expr, Addressing_Value, t, exact_value_typeid(o.type)); } else if (show_error && is_type_untyped(o.type)) { update_untyped_expr_type(c, o.expr, t, true); } @@ -5425,7 +5425,7 @@ gb_internal CALL_ARGUMENT_CHECKER(check_call_arguments_internal) { data->score = score; data->result_type = final_proc_type->Proc.results; data->gen_entity = gen_entity; - add_type_and_value(c->info, ce->proc, Addressing_Value, final_proc_type, {}); + add_type_and_value(c, ce->proc, Addressing_Value, final_proc_type, {}); } return err; @@ -5625,7 +5625,7 @@ gb_internal CALL_ARGUMENT_CHECKER(check_named_call_arguments) { if (o->mode == Addressing_Type && is_type_typeid(e->type)) { add_type_info_type(c, o->type); - add_type_and_value(c->info, o->expr, Addressing_Value, e->type, exact_value_typeid(o->type)); + add_type_and_value(c, o->expr, Addressing_Value, e->type, exact_value_typeid(o->type)); } } @@ -5633,7 +5633,7 @@ gb_internal CALL_ARGUMENT_CHECKER(check_named_call_arguments) { data->score = score; data->result_type = pt->results; data->gen_entity = gen_entity; - add_type_and_value(c->info, ce->proc, Addressing_Value, proc_type, {}); + add_type_and_value(c, ce->proc, Addressing_Value, proc_type, {}); } return err; @@ -6635,7 +6635,7 @@ gb_internal ExprKind check_call_expr(CheckerContext *c, Operand *operand, Ast *c operand->builtin_id = BuiltinProc_DIRECTIVE; operand->expr = proc; operand->type = t_invalid; - add_type_and_value(c->info, proc, operand->mode, operand->type, operand->value); + add_type_and_value(c, proc, operand->mode, operand->type, operand->value); } else { error(proc, "Unknown directive: #%.*s", LIT(name)); operand->expr = proc; @@ -6713,7 +6713,7 @@ gb_internal ExprKind check_call_expr(CheckerContext *c, Operand *operand, Ast *c GB_ASSERT(ot->kind == Type_Named); Entity *e = ot->Named.type_name; add_entity_use(c, ident, e); - add_type_and_value(c->info, call, Addressing_Type, ot, empty_exact_value); + add_type_and_value(c, call, Addressing_Type, ot, empty_exact_value); } else { operand->mode = Addressing_Invalid; operand->type = t_invalid; @@ -6885,7 +6885,7 @@ gb_internal ExprKind check_call_expr(CheckerContext *c, Operand *operand, Ast *c } } - // add_type_and_value(c->info, operand->expr, operand->mode, operand->type, operand->value); + // add_type_and_value(c, operand->expr, operand->mode, operand->type, operand->value); return Expr_Expr; } @@ -7129,8 +7129,8 @@ gb_internal bool check_range(CheckerContext *c, Ast *node, Operand *x, Operand * return false; } - add_type_and_value(c->info, ie->left, x->mode, x->type, x->value); - add_type_and_value(c->info, ie->right, y->mode, y->type, y->value); + add_type_and_value(c, ie->left, x->mode, x->type, x->value); + add_type_and_value(c, ie->right, y->mode, y->type, y->value); return true; } @@ -7146,7 +7146,7 @@ gb_internal bool check_is_operand_compound_lit_constant(CheckerContext *c, Opera return true; } if (expr->kind == Ast_ProcLit) { - add_type_and_value(c->info, expr, Addressing_Constant, type_of_expr(expr), exact_value_procedure(expr)); + add_type_and_value(c, expr, Addressing_Constant, type_of_expr(expr), exact_value_procedure(expr)); return true; } } @@ -7255,7 +7255,7 @@ gb_internal void check_promote_optional_ok(CheckerContext *c, Operand *x, Type * Type *pt = base_type(type_of_expr(expr->CallExpr.proc)); if (is_type_proc(pt)) { Type *tuple = pt->Proc.results; - add_type_and_value(c->info, x->expr, x->mode, tuple, x->value); + add_type_and_value(c, x->expr, x->mode, tuple, x->value); if (pt->Proc.result_count >= 2) { if (ok_type_) *ok_type_ = tuple->Tuple.variables[1]->type; @@ -7268,7 +7268,7 @@ gb_internal void check_promote_optional_ok(CheckerContext *c, Operand *x, Type * Type *tuple = make_optional_ok_type(x->type); if (ok_type_) *ok_type_ = tuple->Tuple.variables[1]->type; - add_type_and_value(c->info, x->expr, x->mode, tuple, x->value); + add_type_and_value(c, x->expr, x->mode, tuple, x->value); x->type = tuple; GB_ASSERT(is_type_tuple(type_of_expr(x->expr))); } @@ -7688,7 +7688,7 @@ gb_internal ExprKind check_or_else_expr(CheckerContext *c, Operand *o, Ast *node Type *left_type = nullptr; Type *right_type = nullptr; check_or_else_split_types(c, &x, name, &left_type, &right_type); - add_type_and_value(&c->checker->info, arg, x.mode, x.type, x.value); + add_type_and_value(c, arg, x.mode, x.type, x.value); if (left_type != nullptr) { if (!y_is_diverging) { @@ -7723,7 +7723,7 @@ gb_internal ExprKind check_or_return_expr(CheckerContext *c, Operand *o, Ast *no Type *left_type = nullptr; Type *right_type = nullptr; check_or_return_split_types(c, &x, name, &left_type, &right_type); - add_type_and_value(&c->checker->info, re->expr, x.mode, x.type, x.value); + add_type_and_value(c, re->expr, x.mode, x.type, x.value); if (right_type == nullptr) { check_or_else_expr_no_value_error(c, name, x, type_hint); @@ -8149,7 +8149,7 @@ gb_internal ExprKind check_compound_literal(CheckerContext *c, Operand *o, Ast * error(elem, "Expected a constant integer as an array field"); continue; } - // add_type_and_value(c->info, op_index.expr, op_index.mode, op_index.type, op_index.value); + // add_type_and_value(c, op_index.expr, op_index.mode, op_index.type, op_index.value); i64 index = exact_value_to_i64(op_index.value); @@ -9783,7 +9783,7 @@ gb_internal ExprKind check_expr_base(CheckerContext *c, Operand *o, Ast *node, T } check_rtti_type_disallowed(node, o->type, "An expression is using a type, %s, which has been disallowed"); - add_type_and_value(c->info, node, o->mode, o->type, o->value); + add_type_and_value(c, node, o->mode, o->type, o->value); return kind; } diff --git a/src/check_type.cpp b/src/check_type.cpp index 4634e1fbe..05fdbf4d3 100644 --- a/src/check_type.cpp +++ b/src/check_type.cpp @@ -3045,7 +3045,7 @@ gb_internal Type *check_type_expr(CheckerContext *ctx, Ast *e, Type *named_type) #endif if (is_type_typed(type)) { - add_type_and_value(ctx->info, e, Addressing_Type, type, empty_exact_value); + add_type_and_value(ctx, e, Addressing_Type, type, empty_exact_value); } else { gbString name = type_to_string(type); error(e, "Invalid type definition of %s", name); diff --git a/src/checker.cpp b/src/checker.cpp index f80ea9e4c..e48142c82 100644 --- a/src/checker.cpp +++ b/src/checker.cpp @@ -1440,7 +1440,7 @@ gb_internal void add_untyped(CheckerContext *c, Ast *expr, AddressingMode mode, check_set_expr_info(c, expr, mode, type, value); } -gb_internal void add_type_and_value(CheckerInfo *i, Ast *expr, AddressingMode mode, Type *type, ExactValue value) { +gb_internal void add_type_and_value(CheckerContext *ctx, Ast *expr, AddressingMode mode, Type *type, ExactValue value) { if (expr == nullptr) { return; } @@ -1451,7 +1451,12 @@ gb_internal void add_type_and_value(CheckerInfo *i, Ast *expr, AddressingMode mo return; } - mutex_lock(&i->type_and_value_mutex); + BlockingMutex *mutex = &ctx->info->type_and_value_mutex; + if (ctx->pkg) { + mutex = &ctx->pkg->type_and_value_mutex; + } + + mutex_lock(mutex); Ast *prev_expr = nullptr; while (prev_expr != expr) { prev_expr = expr; @@ -1473,7 +1478,7 @@ gb_internal void add_type_and_value(CheckerInfo *i, Ast *expr, AddressingMode mo expr = unparen_expr(expr); } - mutex_unlock(&i->type_and_value_mutex); + mutex_unlock(mutex); } gb_internal void add_entity_definition(CheckerInfo *i, Ast *identifier, Entity *entity) { @@ -5701,7 +5706,7 @@ gb_internal void check_parsed_files(Checker *c) { if (is_type_typed(u.info->type)) { compiler_error("%s (type %s) is typed!", expr_to_string(u.expr), type_to_string(u.info->type)); } - add_type_and_value(&c->info, u.expr, u.info->mode, u.info->type, u.info->value); + add_type_and_value(&c->builtin_ctx, u.expr, u.info->mode, u.info->type, u.info->value); } diff --git a/src/checker.hpp b/src/checker.hpp index 62f655f95..04cb1e311 100644 --- a/src/checker.hpp +++ b/src/checker.hpp @@ -482,7 +482,7 @@ gb_internal void scope_lookup_parent (Scope *s, String const &name, Scope **s gb_internal Entity *scope_insert (Scope *s, Entity *entity); -gb_internal void add_type_and_value (CheckerInfo *i, Ast *expression, AddressingMode mode, Type *type, ExactValue value); +gb_internal void add_type_and_value (CheckerContext *c, Ast *expression, AddressingMode mode, Type *type, ExactValue value); gb_internal ExprInfo *check_get_expr_info (CheckerContext *c, Ast *expr); gb_internal void add_untyped (CheckerContext *c, Ast *expression, AddressingMode mode, Type *basic_type, ExactValue value); gb_internal void add_entity_use (CheckerContext *c, Ast *identifier, Entity *entity); diff --git a/src/parser.hpp b/src/parser.hpp index c33739ebe..b492cfa85 100644 --- a/src/parser.hpp +++ b/src/parser.hpp @@ -174,6 +174,7 @@ struct AstPackage { BlockingMutex files_mutex; BlockingMutex foreign_files_mutex; + BlockingMutex type_and_value_mutex; MPMCQueue exported_entity_queue; From 8ece92f1f69217ae5b143cf0e7d812c0f857fa8d Mon Sep 17 00:00:00 2001 From: gingerBill Date: Mon, 2 Jan 2023 23:21:16 +0000 Subject: [PATCH 34/78] Minimize the parapoly mutex usage a bit --- src/check_expr.cpp | 98 ++++++++++++++++++--------------------- src/checker.hpp | 2 +- src/llvm_backend_stmt.cpp | 5 +- 3 files changed, 48 insertions(+), 57 deletions(-) diff --git a/src/check_expr.cpp b/src/check_expr.cpp index 9feac93ea..65a411dc1 100644 --- a/src/check_expr.cpp +++ b/src/check_expr.cpp @@ -434,20 +434,22 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E return false; } - GenProcsData *found_gen_procs = nullptr; + GenProcsData *gen_procs = nullptr; - // @@GPM + // @@GPM ////////////////////////// mutex_lock(&info->gen_procs_mutex); + /////////////////////////////////// - found_gen_procs = map_get(&info->gen_procs, base_entity->identifier.load()); - - if (found_gen_procs) { - MUTEX_GUARD(&found_gen_procs->mutex); - for (Entity *other : found_gen_procs->procs) { + auto *found = map_get(&info->gen_procs, base_entity->identifier.load()); + if (found) { + gen_procs = *found; + MUTEX_GUARD(&gen_procs->mutex); + for (Entity *other : gen_procs->procs) { Type *pt = base_type(other->type); if (are_types_identical(pt, final_proc_type)) { - // @@GPM + // @@GPM //////////////////////////// mutex_unlock(&info->gen_procs_mutex); + ///////////////////////////////////// if (poly_proc_data) { poly_proc_data->gen_entity = other; @@ -455,6 +457,10 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E return true; } } + } else { + gen_procs = gb_alloc_item(permanent_allocator(), GenProcsData); + gen_procs->procs.allocator = heap_allocator(); + map_set(&info->gen_procs, base_entity->identifier.load(), gen_procs); } { @@ -469,50 +475,43 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E // LEAK TODO(bill): Cloning this AST may be leaky Ast *cloned_proc_type_node = clone_ast(pt->node); success = check_procedure_type(&nctx, final_proc_type, cloned_proc_type_node, &operands); + + // @@GPM //////////////////////////// + mutex_unlock(&info->gen_procs_mutex); + ///////////////////////////////////// + if (!success) { - // @@GPM - mutex_unlock(&info->gen_procs_mutex); return false; } - if (found_gen_procs) { - MUTEX_GUARD(&found_gen_procs->mutex); - for (Entity *other : found_gen_procs->procs) { - Type *pt = base_type(other->type); - if (are_types_identical(pt, final_proc_type)) { - // @@GPM - mutex_unlock(&info->gen_procs_mutex); - - if (poly_proc_data) { - poly_proc_data->gen_entity = other; - } - - DeclInfo *decl = other->decl_info; - if (decl->proc_checked_state != ProcCheckedState_Checked) { - ProcInfo *proc_info = gb_alloc_item(permanent_allocator(), ProcInfo); - proc_info->file = other->file; - proc_info->token = other->token; - proc_info->decl = decl; - proc_info->type = other->type; - proc_info->body = decl->proc_lit->ProcLit.body; - proc_info->tags = other->Procedure.tags;; - proc_info->generated_from_polymorphic = true; - proc_info->poly_def_node = poly_def_node; - - check_procedure_later(nctx.checker, proc_info); - } - - return true; + MUTEX_GUARD(&gen_procs->mutex); + for (Entity *other : gen_procs->procs) { + Type *pt = base_type(other->type); + if (are_types_identical(pt, final_proc_type)) { + if (poly_proc_data) { + poly_proc_data->gen_entity = other; } + + DeclInfo *decl = other->decl_info; + if (decl->proc_checked_state != ProcCheckedState_Checked) { + ProcInfo *proc_info = gb_alloc_item(permanent_allocator(), ProcInfo); + proc_info->file = other->file; + proc_info->token = other->token; + proc_info->decl = decl; + proc_info->type = other->type; + proc_info->body = decl->proc_lit->ProcLit.body; + proc_info->tags = other->Procedure.tags;; + proc_info->generated_from_polymorphic = true; + proc_info->poly_def_node = poly_def_node; + + check_procedure_later(nctx.checker, proc_info); + } + + return true; } } } - if (found_gen_procs) { - // @@GPM - mutex_unlock(&info->gen_procs_mutex); - } - Ast *proc_lit = clone_ast(old_decl->proc_lit); ast_node(pl, ProcLit, proc_lit); @@ -570,17 +569,8 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E } } - if (found_gen_procs) { - MUTEX_GUARD(&found_gen_procs->mutex); - array_add(&found_gen_procs->procs, entity); - } else { - GenProcsData gen_proc_data = {}; - gen_proc_data.procs = array_make(heap_allocator()); - array_add(&gen_proc_data.procs, entity); - map_set(&info->gen_procs, base_entity->identifier.load(), gen_proc_data); - - // @@GPM - mutex_unlock(&info->gen_procs_mutex); + MUTEX_GUARD_BLOCK(&gen_procs->mutex) { + array_add(&gen_procs->procs, entity); } ProcInfo *proc_info = gb_alloc_item(permanent_allocator(), ProcInfo); diff --git a/src/checker.hpp b/src/checker.hpp index 04cb1e311..cc92fce28 100644 --- a/src/checker.hpp +++ b/src/checker.hpp @@ -369,7 +369,7 @@ struct CheckerInfo { RecursiveMutex gen_procs_mutex; RecursiveMutex gen_types_mutex; - PtrMap gen_procs; // Key: Ast * | Identifier -> Entity + PtrMap gen_procs; // Key: Ast * | Identifier -> Entity PtrMap > gen_types; BlockingMutex type_info_mutex; // NOT recursive diff --git a/src/llvm_backend_stmt.cpp b/src/llvm_backend_stmt.cpp index 8742423a5..2703c511a 100644 --- a/src/llvm_backend_stmt.cpp +++ b/src/llvm_backend_stmt.cpp @@ -57,8 +57,9 @@ gb_internal void lb_build_constant_value_decl(lbProcedure *p, AstValueDecl *vd) if (pl->body != nullptr) { auto *found = map_get(&info->gen_procs, ident); if (found) { - MUTEX_GUARD(&found->mutex); - for (Entity *e : found->procs) { + GenProcsData *gpd = *found; + MUTEX_GUARD(&gpd->mutex); + for (Entity *e : gpd->procs) { if (!ptr_set_exists(min_dep_set, e)) { continue; } From fd62ee14cdfe48be93b041aeadaf4d5eedce0447 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Mon, 2 Jan 2023 23:31:38 +0000 Subject: [PATCH 35/78] Code moving around --- src/checker.cpp | 47 +++++++++++++++++++++++++++-------------------- 1 file changed, 27 insertions(+), 20 deletions(-) diff --git a/src/checker.cpp b/src/checker.cpp index e48142c82..c2cd1163a 100644 --- a/src/checker.cpp +++ b/src/checker.cpp @@ -5218,6 +5218,31 @@ gb_internal void check_unchecked_bodies(Checker *c) { global_after_checking_procedure_bodies = true; } +gb_internal void check_safety_all_procedures_for_unchecked(Checker *c) { + GB_ASSERT(DEBUG_CHECK_ALL_PROCEDURES); + UntypedExprInfoMap untyped = {}; + map_init(&untyped, heap_allocator()); + defer (map_destroy(&untyped)); + + + for_array(i, c->info.all_procedures) { + ProcInfo *pi = c->info.all_procedures[i]; + GB_ASSERT(pi != nullptr); + GB_ASSERT(pi->decl != nullptr); + Entity *e = pi->decl->entity; + auto proc_checked_state = pi->decl->proc_checked_state.load(); + if (e && ((e->flags & EntityFlag_ProcBodyChecked) == 0)) { + if ((e->flags & EntityFlag_Used) != 0) { + debugf("%.*s :: %s\n", LIT(e->token.string), type_to_string(e->type)); + debugf("proc body unchecked\n"); + debugf("Checked State: %s\n\n", ProcCheckedState_strings[proc_checked_state]); + + consume_proc_info(c, pi, &untyped); + } + } + } +} + gb_internal void check_test_procedures(Checker *c) { if (build_context.test_names.entries.count == 0) { return; @@ -5790,26 +5815,8 @@ gb_internal void check_parsed_files(Checker *c) { GB_ASSERT(c->procs_to_check.count == 0); if (DEBUG_CHECK_ALL_PROCEDURES) { - UntypedExprInfoMap untyped = {}; - map_init(&untyped, heap_allocator()); - defer (map_destroy(&untyped)); - - for_array(i, c->info.all_procedures) { - ProcInfo *pi = c->info.all_procedures[i]; - GB_ASSERT(pi != nullptr); - GB_ASSERT(pi->decl != nullptr); - Entity *e = pi->decl->entity; - auto proc_checked_state = pi->decl->proc_checked_state.load(); - if (e && ((e->flags & EntityFlag_ProcBodyChecked) == 0)) { - if ((e->flags & EntityFlag_Used) != 0) { - debugf("%.*s :: %s\n", LIT(e->token.string), type_to_string(e->type)); - debugf("proc body unchecked\n"); - debugf("Checked State: %s\n\n", ProcCheckedState_strings[proc_checked_state]); - - consume_proc_info(c, pi, &untyped); - } - } - } + TIME_SECTION("check unchecked (safety measure)"); + check_safety_all_procedures_for_unchecked(c); } debugf("Total Procedure Bodies Checked: %td\n", total_bodies_checked.load(std::memory_order_relaxed)); From e10fe91ebacdf6256608672a805de9d376e698fe Mon Sep 17 00:00:00 2001 From: gingerBill Date: Mon, 2 Jan 2023 23:50:48 +0000 Subject: [PATCH 36/78] Narrow global `gen_procs_mutex` further --- src/check_expr.cpp | 9 +++------ src/checker.cpp | 1 + src/thread_pool.cpp | 2 +- 3 files changed, 5 insertions(+), 7 deletions(-) diff --git a/src/check_expr.cpp b/src/check_expr.cpp index 65a411dc1..e3c55870c 100644 --- a/src/check_expr.cpp +++ b/src/check_expr.cpp @@ -439,7 +439,6 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E // @@GPM ////////////////////////// mutex_lock(&info->gen_procs_mutex); /////////////////////////////////// - auto *found = map_get(&info->gen_procs, base_entity->identifier.load()); if (found) { gen_procs = *found; @@ -462,6 +461,9 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E gen_procs->procs.allocator = heap_allocator(); map_set(&info->gen_procs, base_entity->identifier.load(), gen_procs); } + // @@GPM //////////////////////////// + mutex_unlock(&info->gen_procs_mutex); + ///////////////////////////////////// { // LEAK TODO(bill): This is technically a memory leak as it has to generate the type twice @@ -475,11 +477,6 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E // LEAK TODO(bill): Cloning this AST may be leaky Ast *cloned_proc_type_node = clone_ast(pt->node); success = check_procedure_type(&nctx, final_proc_type, cloned_proc_type_node, &operands); - - // @@GPM //////////////////////////// - mutex_unlock(&info->gen_procs_mutex); - ///////////////////////////////////// - if (!success) { return false; } diff --git a/src/checker.cpp b/src/checker.cpp index c2cd1163a..ccd0f3627 100644 --- a/src/checker.cpp +++ b/src/checker.cpp @@ -51,6 +51,7 @@ gb_internal bool check_rtti_type_disallowed(Ast *expr, Type *type, char const *f gb_internal void scope_reset(Scope *scope) { if (scope == nullptr) return; + MUTEX_GUARD(&scope->mutex); scope->head_child.store(nullptr, std::memory_order_relaxed); string_map_clear(&scope->elements); ptr_set_clear(&scope->imported); diff --git a/src/thread_pool.cpp b/src/thread_pool.cpp index b89e00454..07ab3d323 100644 --- a/src/thread_pool.cpp +++ b/src/thread_pool.cpp @@ -167,7 +167,7 @@ gb_internal THREAD_PROC(thread_pool_thread_proc) { idx = (idx + 1) % cast(usize)pool->threads.count; Thread *thread = &pool->threads.data[idx]; - WorkerTask task, another_task; + WorkerTask task; if (!thread_pool_queue_pop(thread, &task)) { continue; } From 670274ad8fc5ba52ea11c2d864d1915e107cc0e2 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Mon, 2 Jan 2023 23:56:37 +0000 Subject: [PATCH 37/78] More explicit uses of mutexes --- src/check_expr.cpp | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/src/check_expr.cpp b/src/check_expr.cpp index e3c55870c..3c998fc44 100644 --- a/src/check_expr.cpp +++ b/src/check_expr.cpp @@ -442,10 +442,11 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E auto *found = map_get(&info->gen_procs, base_entity->identifier.load()); if (found) { gen_procs = *found; - MUTEX_GUARD(&gen_procs->mutex); + mutex_lock(&gen_procs->mutex); // @local-mutex for (Entity *other : gen_procs->procs) { Type *pt = base_type(other->type); if (are_types_identical(pt, final_proc_type)) { + mutex_unlock(&gen_procs->mutex); // @local-mutex // @@GPM //////////////////////////// mutex_unlock(&info->gen_procs_mutex); ///////////////////////////////////// @@ -456,6 +457,7 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E return true; } } + mutex_unlock(&gen_procs->mutex); // @local-mutex } else { gen_procs = gb_alloc_item(permanent_allocator(), GenProcsData); gen_procs->procs.allocator = heap_allocator(); @@ -481,10 +483,12 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E return false; } - MUTEX_GUARD(&gen_procs->mutex); + mutex_lock(&gen_procs->mutex); // @local-mutex for (Entity *other : gen_procs->procs) { Type *pt = base_type(other->type); if (are_types_identical(pt, final_proc_type)) { + mutex_unlock(&gen_procs->mutex); // @local-mutex + if (poly_proc_data) { poly_proc_data->gen_entity = other; } @@ -507,6 +511,7 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E return true; } } + mutex_unlock(&gen_procs->mutex); // @local-mutex } @@ -566,9 +571,9 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E } } - MUTEX_GUARD_BLOCK(&gen_procs->mutex) { + mutex_lock(&gen_procs->mutex); // @local-mutex array_add(&gen_procs->procs, entity); - } + mutex_unlock(&gen_procs->mutex); // @local-mutex ProcInfo *proc_info = gb_alloc_item(permanent_allocator(), ProcInfo); proc_info->file = file; From 600f2b7284b8974a18827242c18e790dab0cf06a Mon Sep 17 00:00:00 2001 From: gingerBill Date: Tue, 3 Jan 2023 11:53:59 +0000 Subject: [PATCH 38/78] Use heap_allocator for all hash set types --- src/build_settings.cpp | 2 +- src/check_builtin.cpp | 2 +- src/check_decl.cpp | 2 +- src/check_expr.cpp | 1 - src/check_stmt.cpp | 1 - src/checker.cpp | 24 +++++++----------------- src/llvm_backend_general.cpp | 2 +- src/llvm_backend_type.cpp | 2 +- src/main.cpp | 8 ++++---- src/parser.cpp | 2 +- src/ptr_set.cpp | 21 +++++++++++++++------ src/string_set.cpp | 18 +++++++++++++----- 12 files changed, 45 insertions(+), 40 deletions(-) diff --git a/src/build_settings.cpp b/src/build_settings.cpp index 04d1ada93..75615a901 100644 --- a/src/build_settings.cpp +++ b/src/build_settings.cpp @@ -1369,7 +1369,7 @@ gb_internal bool init_build_paths(String init_filename) { // NOTE(Jeroen): We're pre-allocating BuildPathCOUNT slots so that certain paths are always at the same enumerated index. array_init(&bc->build_paths, permanent_allocator(), BuildPathCOUNT); - string_set_init(&bc->target_features_set, heap_allocator(), 1024); + string_set_init(&bc->target_features_set, 1024); // [BuildPathMainPackage] Turn given init path into a `Path`, which includes normalizing it into a full path. bc->build_paths[BuildPath_Main_Package] = path_from_string(ha, init_filename); diff --git a/src/check_builtin.cpp b/src/check_builtin.cpp index 99d956f5e..36dc9b7a1 100644 --- a/src/check_builtin.cpp +++ b/src/check_builtin.cpp @@ -3086,7 +3086,7 @@ gb_internal bool check_builtin_procedure(CheckerContext *c, Operand *operand, As } } StringSet name_set = {}; - string_set_init(&name_set, heap_allocator(), 2*ce->args.count); + string_set_init(&name_set, 2*ce->args.count); for_array(i, ce->args) { String name = {}; diff --git a/src/check_decl.cpp b/src/check_decl.cpp index e3486924c..0c1a7c325 100644 --- a/src/check_decl.cpp +++ b/src/check_decl.cpp @@ -1235,7 +1235,7 @@ gb_internal void check_proc_group_decl(CheckerContext *ctx, Entity *&pg_entity, pg_entity->type = t_invalid; PtrSet entity_set = {}; - ptr_set_init(&entity_set, heap_allocator(), 2*pg->args.count); + ptr_set_init(&entity_set, 2*pg->args.count); for_array(i, pg->args) { Ast *arg = pg->args[i]; diff --git a/src/check_expr.cpp b/src/check_expr.cpp index 3c998fc44..7a00b5353 100644 --- a/src/check_expr.cpp +++ b/src/check_expr.cpp @@ -196,7 +196,6 @@ gb_internal void check_did_you_mean_objc_entity(String const &name, Entity *e, b MUTEX_GUARD(objc_metadata->mutex); StringSet set = {}; - string_set_init(&set, heap_allocator()); defer (string_set_destroy(&set)); populate_check_did_you_mean_objc_entity(&set, e, is_type); diff --git a/src/check_stmt.cpp b/src/check_stmt.cpp index cf111e84c..945ba8f02 100644 --- a/src/check_stmt.cpp +++ b/src/check_stmt.cpp @@ -1185,7 +1185,6 @@ gb_internal void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_ } PtrSet seen = {}; - ptr_set_init(&seen, heap_allocator()); defer (ptr_set_destroy(&seen)); for_array(i, bs->stmts) { diff --git a/src/checker.cpp b/src/checker.cpp index ccd0f3627..8da659461 100644 --- a/src/checker.cpp +++ b/src/checker.cpp @@ -72,7 +72,7 @@ gb_internal void entity_graph_node_set_destroy(EntityGraphNodeSet *s) { gb_internal void entity_graph_node_set_add(EntityGraphNodeSet *s, EntityGraphNode *n) { if (s->hashes.data == nullptr) { - ptr_set_init(s, heap_allocator()); + ptr_set_init(s); } ptr_set_add(s, n); } @@ -118,15 +118,10 @@ gb_internal void entity_graph_node_swap(EntityGraphNode **data, isize i, isize j gb_internal void import_graph_node_set_destroy(ImportGraphNodeSet *s) { - if (s->hashes.data != nullptr) { - ptr_set_destroy(s); - } + ptr_set_destroy(s); } gb_internal void import_graph_node_set_add(ImportGraphNodeSet *s, ImportGraphNode *n) { - if (s->hashes.data == nullptr) { - ptr_set_init(s, heap_allocator()); - } ptr_set_add(s, n); } @@ -185,8 +180,8 @@ gb_internal void init_decl_info(DeclInfo *d, Scope *scope, DeclInfo *parent) { gb_zero_item(d); d->parent = parent; d->scope = scope; - ptr_set_init(&d->deps, heap_allocator()); - ptr_set_init(&d->type_info_deps, heap_allocator()); + ptr_set_init(&d->deps); + ptr_set_init(&d->type_info_deps); array_init (&d->labels, heap_allocator()); } @@ -227,7 +222,7 @@ gb_internal Scope *create_scope(CheckerInfo *info, Scope *parent, isize init_ele Scope *s = gb_alloc_item(permanent_allocator(), Scope); s->parent = parent; string_map_init(&s->elements, heap_allocator(), init_elements_capacity); - ptr_set_init(&s->imported, heap_allocator(), 0); + ptr_set_init(&s->imported, 0); if (parent != nullptr && parent != builtin_pkg->scope) { Scope *prev_head_child = parent->head_child.exchange(s, std::memory_order_acq_rel); @@ -2270,8 +2265,8 @@ gb_internal void generate_minimum_dependency_set(Checker *c, Entity *start) { isize entity_count = c->info.entities.count; isize min_dep_set_cap = next_pow2_isize(entity_count*4); // empirically determined factor - ptr_set_init(&c->info.minimum_dependency_set, heap_allocator(), min_dep_set_cap); - ptr_set_init(&c->info.minimum_dependency_type_info_set, heap_allocator()); + ptr_set_init(&c->info.minimum_dependency_set, min_dep_set_cap); + ptr_set_init(&c->info.minimum_dependency_type_info_set); #define FORCE_ADD_RUNTIME_ENTITIES(condition, ...) do { \ if (condition) { \ @@ -3388,7 +3383,6 @@ gb_internal void check_decl_attributes(CheckerContext *c, Array const &at } StringSet set = {}; - string_set_init(&set, heap_allocator()); defer (string_set_destroy(&set)); for_array(i, attributes) { @@ -4759,7 +4753,6 @@ gb_internal void check_import_entities(Checker *c) { auto pq = priority_queue_create(dep_graph, import_graph_node_cmp, import_graph_node_swap); PtrSet emitted = {}; - ptr_set_init(&emitted, heap_allocator()); defer (ptr_set_destroy(&emitted)); Array package_order = {}; @@ -4773,7 +4766,6 @@ gb_internal void check_import_entities(Checker *c) { if (n->dep_count > 0) { PtrSet visited = {}; - ptr_set_init(&visited, heap_allocator()); defer (ptr_set_destroy(&visited)); auto path = find_import_path(c, pkg, pkg, &visited); @@ -4927,7 +4919,6 @@ gb_internal Array find_entity_path(Entity *start, Entity *end, PtrSet< bool made_visited = false; if (visited == nullptr) { made_visited = true; - ptr_set_init(&visited_, heap_allocator()); visited = &visited_; } defer (if (made_visited) { @@ -4990,7 +4981,6 @@ gb_internal void calculate_global_init_order(Checker *c) { auto pq = priority_queue_create(dep_graph, entity_graph_node_cmp, entity_graph_node_swap); PtrSet emitted = {}; - ptr_set_init(&emitted, heap_allocator()); defer (ptr_set_destroy(&emitted)); TIME_SECTION("calculate_global_init_order: queue sort"); diff --git a/src/llvm_backend_general.cpp b/src/llvm_backend_general.cpp index 22628e895..75675474a 100644 --- a/src/llvm_backend_general.cpp +++ b/src/llvm_backend_general.cpp @@ -133,7 +133,7 @@ gb_internal bool lb_init_generator(lbGenerator *gen, Checker *c) { array_init(&gen->foreign_libraries, heap_allocator(), 0, 1024); - ptr_set_init(&gen->foreign_libraries_set, heap_allocator(), 1024); + ptr_set_init(&gen->foreign_libraries_set, 1024); if (USE_SEPARATE_MODULES) { for (auto const &entry : gen->info->packages) { diff --git a/src/llvm_backend_type.cpp b/src/llvm_backend_type.cpp index 26bb614e6..c306cdead 100644 --- a/src/llvm_backend_type.cpp +++ b/src/llvm_backend_type.cpp @@ -2,7 +2,7 @@ gb_internal isize lb_type_info_index(CheckerInfo *info, Type *type, bool err_on_ auto *set = &info->minimum_dependency_type_info_set; isize index = type_info_index(info, type, err_on_not_found); if (index >= 0) { - isize i = ptr_entry_index(set, index); + isize i = ptr_set_entry_index(set, index); if (i >= 0) { return i+1; } diff --git a/src/main.cpp b/src/main.cpp index ad9d6b0ef..91dcbdb01 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -212,11 +212,11 @@ gb_internal i32 linker_stage(lbGenerator *gen) { StringSet libs = {}; - string_set_init(&libs, heap_allocator(), 64); + string_set_init(&libs, 64); defer (string_set_destroy(&libs)); StringSet asm_files = {}; - string_set_init(&asm_files, heap_allocator(), 64); + string_set_init(&asm_files, 64); defer (string_set_destroy(&asm_files)); for_array(j, gen->foreign_libraries) { @@ -371,7 +371,7 @@ gb_internal i32 linker_stage(lbGenerator *gen) { defer (gb_string_free(lib_str)); StringSet libs = {}; - string_set_init(&libs, heap_allocator(), 64); + string_set_init(&libs, 64); defer (string_set_destroy(&libs)); for_array(j, gen->foreign_libraries) { @@ -2518,7 +2518,7 @@ int main(int arg_count, char const **arg_ptr) { map_init(&build_context.defined_values, heap_allocator()); build_context.extra_packages.allocator = heap_allocator(); - string_set_init(&build_context.test_names, heap_allocator()); + string_set_init(&build_context.test_names); Array args = setup_args(arg_count, arg_ptr); diff --git a/src/parser.cpp b/src/parser.cpp index 4d2a8ecf4..046469c16 100644 --- a/src/parser.cpp +++ b/src/parser.cpp @@ -4854,7 +4854,7 @@ gb_internal void destroy_ast_file(AstFile *f) { gb_internal bool init_parser(Parser *p) { GB_ASSERT(p != nullptr); - string_set_init(&p->imported_files, heap_allocator()); + string_set_init(&p->imported_files); array_init(&p->packages, heap_allocator()); return true; } diff --git a/src/ptr_set.cpp b/src/ptr_set.cpp index 9ecf1043e..affde5c2f 100644 --- a/src/ptr_set.cpp +++ b/src/ptr_set.cpp @@ -6,11 +6,11 @@ struct PtrSetEntry { template struct PtrSet { - Slice hashes; + Slice hashes; Array> entries; }; -template gb_internal void ptr_set_init (PtrSet *s, gbAllocator a, isize capacity = 16); +template gb_internal void ptr_set_init (PtrSet *s, isize capacity = 16); template gb_internal void ptr_set_destroy(PtrSet *s); template gb_internal T ptr_set_add (PtrSet *s, T ptr); template gb_internal bool ptr_set_update (PtrSet *s, T ptr); // returns true if it previously existed @@ -21,15 +21,18 @@ template gb_internal void ptr_set_grow (PtrSet *s); template gb_internal void ptr_set_rehash (PtrSet *s, isize new_count); template gb_internal void ptr_set_reserve(PtrSet *h, isize cap); +gb_internal gbAllocator ptr_set_allocator(void) { + return heap_allocator(); +} template -gb_internal void ptr_set_init(PtrSet *s, gbAllocator a, isize capacity) { +gb_internal void ptr_set_init(PtrSet *s, isize capacity) { if (capacity != 0) { capacity = next_pow2_isize(gb_max(16, capacity)); } - slice_init(&s->hashes, a, capacity); - array_init(&s->entries, a, 0, capacity); + slice_init(&s->hashes, ptr_set_allocator(), capacity); + array_init(&s->entries, ptr_set_allocator(), 0, capacity); for (isize i = 0; i < capacity; i++) { s->hashes.data[i] = MAP_SENTINEL; } @@ -37,6 +40,9 @@ gb_internal void ptr_set_init(PtrSet *s, gbAllocator a, isize capacity) { template gb_internal void ptr_set_destroy(PtrSet *s) { + if (s->entries.allocator.proc == nullptr) { + s->entries.allocator = ptr_set_allocator(); + } slice_free(&s->hashes, s->entries.allocator); array_free(&s->entries); } @@ -118,6 +124,9 @@ gb_internal void ptr_set_reset_entries(PtrSet *s) { template gb_internal void ptr_set_reserve(PtrSet *s, isize cap) { + if (s->entries.allocator.proc == nullptr) { + s->entries.allocator = ptr_set_allocator(); + } array_reserve(&s->entries, cap); if (s->entries.count*2 < s->hashes.count) { return; @@ -139,7 +148,7 @@ gb_internal gb_inline bool ptr_set_exists(PtrSet *s, T ptr) { } template -gb_internal gb_inline isize ptr_entry_index(PtrSet *s, T ptr) { +gb_internal gb_inline isize ptr_set_entry_index(PtrSet *s, T ptr) { isize index = ptr_set__find(s, ptr).entry_index; if (index != MAP_SENTINEL) { return index; diff --git a/src/string_set.cpp b/src/string_set.cpp index 1c97d253e..753afa9bf 100644 --- a/src/string_set.cpp +++ b/src/string_set.cpp @@ -10,7 +10,7 @@ struct StringSet { }; -gb_internal void string_set_init (StringSet *s, gbAllocator a, isize capacity = 16); +gb_internal void string_set_init (StringSet *s, isize capacity = 16); gb_internal void string_set_destroy(StringSet *s); gb_internal void string_set_add (StringSet *s, String const &str); gb_internal bool string_set_update (StringSet *s, String const &str); // returns true if it previously existed @@ -20,18 +20,24 @@ gb_internal void string_set_clear (StringSet *s); gb_internal void string_set_grow (StringSet *s); gb_internal void string_set_rehash (StringSet *s, isize new_count); +gb_internal gbAllocator string_set_allocator(void) { + return heap_allocator(); +} -gb_internal gb_inline void string_set_init(StringSet *s, gbAllocator a, isize capacity) { +gb_internal gb_inline void string_set_init(StringSet *s, isize capacity) { capacity = next_pow2_isize(gb_max(16, capacity)); - slice_init(&s->hashes, a, capacity); - array_init(&s->entries, a, 0, capacity); + slice_init(&s->hashes, string_set_allocator(), capacity); + array_init(&s->entries, string_set_allocator(), 0, capacity); for (isize i = 0; i < capacity; i++) { s->hashes.data[i] = MAP_SENTINEL; } } gb_internal gb_inline void string_set_destroy(StringSet *s) { + if (s->entries.allocator.proc == nullptr) { + s->entries.allocator = string_set_allocator(); + } slice_free(&s->hashes, s->entries.allocator); array_free(&s->entries); } @@ -106,6 +112,9 @@ gb_internal void string_set_reset_entries(StringSet *s) { } gb_internal void string_set_reserve(StringSet *s, isize cap) { + if (s->entries.allocator.proc == nullptr) { + s->entries.allocator = string_set_allocator(); + } array_reserve(&s->entries, cap); if (s->entries.count*2 < s->hashes.count) { return; @@ -217,7 +226,6 @@ gb_internal gb_inline void string_set_clear(StringSet *s) { } - gb_internal StringSetEntry *begin(StringSet &m) { return m.entries.data; } From 252be0fb417f9cdde5e9c4b348cd995a20433aea Mon Sep 17 00:00:00 2001 From: gingerBill Date: Tue, 3 Jan 2023 11:59:52 +0000 Subject: [PATCH 39/78] Make all maps use heap allocator implicitly --- src/check_builtin.cpp | 2 +- src/check_expr.cpp | 3 +-- src/check_stmt.cpp | 1 - src/checker.cpp | 33 ++++++++++++--------------- src/common.cpp | 2 +- src/docs_writer.cpp | 11 ++++----- src/llvm_backend_general.cpp | 44 ++++++++++++++++++------------------ src/llvm_backend_proc.cpp | 10 ++++---- src/main.cpp | 2 +- src/ptr_map.cpp | 18 +++++++++++---- src/string_map.cpp | 18 +++++++++++---- 11 files changed, 79 insertions(+), 65 deletions(-) diff --git a/src/check_builtin.cpp b/src/check_builtin.cpp index 36dc9b7a1..af196234e 100644 --- a/src/check_builtin.cpp +++ b/src/check_builtin.cpp @@ -1110,7 +1110,7 @@ gb_internal bool cache_load_file_directive(CheckerContext *c, Ast *call, String new_cache->path = path; new_cache->data = data; new_cache->file_error = file_error; - string_map_init(&new_cache->hashes, heap_allocator(), 32); + string_map_init(&new_cache->hashes, 32); string_map_set(&c->info->load_file_cache, path, new_cache); if (cache_) *cache_ = new_cache; } else { diff --git a/src/check_expr.cpp b/src/check_expr.cpp index 7a00b5353..030bfb8e6 100644 --- a/src/check_expr.cpp +++ b/src/check_expr.cpp @@ -5753,7 +5753,7 @@ gb_internal CallArgumentData check_call_arguments(CheckerContext *c, Operand *op // in order to improve the type inference system StringMap type_hint_map = {}; // Key: String - string_map_init(&type_hint_map, heap_allocator(), 2*args.count); + string_map_init(&type_hint_map, 2*args.count); defer (string_map_destroy(&type_hint_map)); Type *ptype = nullptr; @@ -8283,7 +8283,6 @@ gb_internal ExprKind check_compound_literal(CheckerContext *c, Operand *o, Ast * bool is_partial = cl->tag && (cl->tag->BasicDirective.name.string == "partial"); SeenMap seen = {}; // NOTE(bill): Multimap, Key: ExactValue - map_init(&seen, heap_allocator()); defer (map_destroy(&seen)); if (cl->elems.count > 0 && cl->elems[0]->kind == Ast_FieldValue) { diff --git a/src/check_stmt.cpp b/src/check_stmt.cpp index 945ba8f02..7192b16b5 100644 --- a/src/check_stmt.cpp +++ b/src/check_stmt.cpp @@ -929,7 +929,6 @@ gb_internal void check_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_flags } SeenMap seen = {}; // NOTE(bill): Multimap, Key: ExactValue - map_init(&seen, heap_allocator()); defer (map_destroy(&seen)); for_array(stmt_index, bs->stmts) { diff --git a/src/checker.cpp b/src/checker.cpp index 8da659461..8779d9d45 100644 --- a/src/checker.cpp +++ b/src/checker.cpp @@ -221,7 +221,7 @@ gb_internal DeclInfo *make_decl_info(Scope *scope, DeclInfo *parent) { gb_internal Scope *create_scope(CheckerInfo *info, Scope *parent, isize init_elements_capacity=DEFAULT_SCOPE_CAPACITY) { Scope *s = gb_alloc_item(permanent_allocator(), Scope); s->parent = parent; - string_map_init(&s->elements, heap_allocator(), init_elements_capacity); + string_map_init(&s->elements, init_elements_capacity); ptr_set_init(&s->imported, 0); if (parent != nullptr && parent != builtin_pkg->scope) { @@ -1135,14 +1135,14 @@ gb_internal void init_checker_info(CheckerInfo *i) { array_init(&i->definitions, a); array_init(&i->entities, a); - map_init(&i->global_untyped, a); - string_map_init(&i->foreigns, a); - map_init(&i->gen_procs, a); - map_init(&i->gen_types, a); + map_init(&i->global_untyped); + string_map_init(&i->foreigns); + map_init(&i->gen_procs); + map_init(&i->gen_types); array_init(&i->type_info_types, a); - map_init(&i->type_info_map, a); - string_map_init(&i->files, a); - string_map_init(&i->packages, a); + map_init(&i->type_info_map); + string_map_init(&i->files); + string_map_init(&i->packages); array_init(&i->variable_init_order, a); array_init(&i->testing_procedures, a, 0, 0); array_init(&i->init_procedures, a, 0, 0); @@ -1160,8 +1160,8 @@ gb_internal void init_checker_info(CheckerInfo *i) { mpmc_init(&i->intrinsics_entry_point_usage, a, 1<<10); // just waste some memory here, even if it probably never used - map_init(&i->objc_msgSend_types, a); - string_map_init(&i->load_file_cache, a); + map_init(&i->objc_msgSend_types); + string_map_init(&i->load_file_cache); array_init(&i->all_procedures, heap_allocator()); @@ -2490,7 +2490,7 @@ gb_internal bool is_entity_a_dependency(Entity *e) { gb_internal Array generate_entity_dependency_graph(CheckerInfo *info, gbAllocator allocator) { PtrMap M = {}; - map_init(&M, allocator, info->entities.count); + map_init(&M, info->entities.count); defer (map_destroy(&M)); for_array(i, info->entities) { Entity *e = info->entities[i]; @@ -4200,7 +4200,7 @@ gb_internal void add_import_dependency_node(Checker *c, Ast *decl, PtrMap generate_import_dependency_graph(Checker *c) { PtrMap M = {}; - map_init(&M, heap_allocator(), 2*c->parser->packages.count); + map_init(&M, 2*c->parser->packages.count); defer (map_destroy(&M)); for_array(i, c->parser->packages) { @@ -4688,7 +4688,7 @@ gb_internal void check_collect_entities_all(Checker *c) { auto *wd = &collect_entity_worker_data[i]; wd->c = c; wd->ctx = make_checker_context(c); - map_init(&wd->untyped, heap_allocator()); + map_init(&wd->untyped); } for (auto const &entry : c->info.files.entries) { @@ -4804,7 +4804,6 @@ gb_internal void check_import_entities(Checker *c) { CheckerContext ctx = make_checker_context(c); UntypedExprInfoMap untyped = {}; - map_init(&untyped, heap_allocator()); defer (map_destroy(&untyped)); isize min_pkg_index = 0; @@ -5159,7 +5158,6 @@ gb_internal void check_unchecked_bodies(Checker *c) { GB_ASSERT(c->procs_to_check.count == 0); UntypedExprInfoMap untyped = {}; - map_init(&untyped, heap_allocator()); defer (map_destroy(&untyped)); // use the `procs_to_check` array @@ -5212,7 +5210,6 @@ gb_internal void check_unchecked_bodies(Checker *c) { gb_internal void check_safety_all_procedures_for_unchecked(Checker *c) { GB_ASSERT(DEBUG_CHECK_ALL_PROCEDURES); UntypedExprInfoMap untyped = {}; - map_init(&untyped, heap_allocator()); defer (map_destroy(&untyped)); @@ -5345,7 +5342,7 @@ gb_internal void check_procedure_bodies(Checker *c) { for (isize i = 0; i < thread_count; i++) { check_procedure_bodies_worker_data[i].c = c; - map_init(&check_procedure_bodies_worker_data[i].untyped, heap_allocator()); + map_init(&check_procedure_bodies_worker_data[i].untyped); } defer (for (isize i = 0; i < thread_count; i++) { @@ -5545,7 +5542,7 @@ gb_internal void check_deferred_procedures(Checker *c) { gb_internal void check_unique_package_names(Checker *c) { StringMap pkgs = {}; // Key: package name - string_map_init(&pkgs, heap_allocator(), 2*c->info.packages.entries.count); + string_map_init(&pkgs, 2*c->info.packages.entries.count); defer (string_map_destroy(&pkgs)); for (auto const &entry : c->info.packages) { diff --git a/src/common.cpp b/src/common.cpp index 3b6ea59e8..199a263a1 100644 --- a/src/common.cpp +++ b/src/common.cpp @@ -373,7 +373,7 @@ gb_internal char const *string_intern(String const &string) { } gb_internal void init_string_interner(void) { - map_init(&string_intern_map, heap_allocator()); + map_init(&string_intern_map); } diff --git a/src/docs_writer.cpp b/src/docs_writer.cpp index bab97158d..2aefe29eb 100644 --- a/src/docs_writer.cpp +++ b/src/docs_writer.cpp @@ -53,13 +53,12 @@ gb_internal void odin_doc_writer_item_tracker_init(OdinDocWriterItemTracker * gb_internal void odin_doc_writer_prepare(OdinDocWriter *w) { w->state = OdinDocWriterState_Preparing; - gbAllocator a = heap_allocator(); - string_map_init(&w->string_cache, a); + string_map_init(&w->string_cache); - map_init(&w->file_cache, a); - map_init(&w->pkg_cache, a); - map_init(&w->entity_cache, a); - map_init(&w->type_cache, a); + map_init(&w->file_cache); + map_init(&w->pkg_cache); + map_init(&w->entity_cache); + map_init(&w->type_cache); odin_doc_writer_item_tracker_init(&w->files, 1); odin_doc_writer_item_tracker_init(&w->pkgs, 1); diff --git a/src/llvm_backend_general.cpp b/src/llvm_backend_general.cpp index 75675474a..a849929f0 100644 --- a/src/llvm_backend_general.cpp +++ b/src/llvm_backend_general.cpp @@ -55,30 +55,30 @@ gb_internal void lb_init_module(lbModule *m, Checker *c) { } gbAllocator a = heap_allocator(); - map_init(&m->types, a); - map_init(&m->func_raw_types, a); - map_init(&m->struct_field_remapping, a); - map_init(&m->values, a); - map_init(&m->soa_values, a); - string_map_init(&m->members, a); - map_init(&m->procedure_values, a); - string_map_init(&m->procedures, a); - string_map_init(&m->const_strings, a); - map_init(&m->function_type_map, a); - map_init(&m->equal_procs, a); - map_init(&m->hasher_procs, a); - map_init(&m->map_get_procs, a); - map_init(&m->map_set_procs, a); + map_init(&m->types); + map_init(&m->func_raw_types); + map_init(&m->struct_field_remapping); + map_init(&m->values); + map_init(&m->soa_values); + string_map_init(&m->members); + map_init(&m->procedure_values); + string_map_init(&m->procedures); + string_map_init(&m->const_strings); + map_init(&m->function_type_map); + map_init(&m->equal_procs); + map_init(&m->hasher_procs); + map_init(&m->map_get_procs); + map_init(&m->map_set_procs); array_init(&m->procedures_to_generate, a, 0, 1024); array_init(&m->missing_procedures_to_check, a, 0, 16); - map_init(&m->debug_values, a); + map_init(&m->debug_values); array_init(&m->debug_incomplete_types, a, 0, 1024); - string_map_init(&m->objc_classes, a); - string_map_init(&m->objc_selectors, a); + string_map_init(&m->objc_classes); + string_map_init(&m->objc_selectors); - map_init(&m->map_info_map, a, 0); - map_init(&m->map_cell_info_map, a, 0); + map_init(&m->map_info_map, 0); + map_init(&m->map_cell_info_map, 0); } @@ -127,9 +127,9 @@ gb_internal bool lb_init_generator(lbGenerator *gen, Checker *c) { gen->info = &c->info; - map_init(&gen->modules, permanent_allocator(), gen->info->packages.entries.count*2); - map_init(&gen->modules_through_ctx, permanent_allocator(), gen->info->packages.entries.count*2); - map_init(&gen->anonymous_proc_lits, heap_allocator(), 1024); + map_init(&gen->modules, gen->info->packages.entries.count*2); + map_init(&gen->modules_through_ctx, gen->info->packages.entries.count*2); + map_init(&gen->anonymous_proc_lits, 1024); array_init(&gen->foreign_libraries, heap_allocator(), 0, 1024); diff --git a/src/llvm_backend_proc.cpp b/src/llvm_backend_proc.cpp index 7245bdd80..c66462bc1 100644 --- a/src/llvm_backend_proc.cpp +++ b/src/llvm_backend_proc.cpp @@ -119,9 +119,9 @@ gb_internal lbProcedure *lb_create_procedure(lbModule *m, Entity *entity, bool i p->branch_blocks.allocator = a; p->context_stack.allocator = a; p->scope_stack.allocator = a; - map_init(&p->selector_values, a, 0); - map_init(&p->selector_addr, a, 0); - map_init(&p->tuple_fix_map, a, 0); + map_init(&p->selector_values, 0); + map_init(&p->selector_addr, 0); + map_init(&p->tuple_fix_map, 0); if (p->is_foreign) { lb_add_foreign_library_path(p->module, entity->Procedure.foreign_library); @@ -345,7 +345,7 @@ gb_internal lbProcedure *lb_create_dummy_procedure(lbModule *m, String link_name p->blocks.allocator = a; p->branch_blocks.allocator = a; p->context_stack.allocator = a; - map_init(&p->tuple_fix_map, a, 0); + map_init(&p->tuple_fix_map, 0); char *c_link_name = alloc_cstring(permanent_allocator(), p->name); @@ -486,7 +486,7 @@ gb_internal void lb_begin_procedure_body(lbProcedure *p) { p->entry_block = lb_create_block(p, "entry", true); lb_start_block(p, p->entry_block); - map_init(&p->direct_parameters, heap_allocator()); + map_init(&p->direct_parameters); GB_ASSERT(p->type != nullptr); diff --git a/src/main.cpp b/src/main.cpp index 91dcbdb01..7ac78241e 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -2516,7 +2516,7 @@ int main(int arg_count, char const **arg_ptr) { add_library_collection(str_lit("core"), get_fullpath_relative(heap_allocator(), odin_root_dir(), str_lit("core"))); add_library_collection(str_lit("vendor"), get_fullpath_relative(heap_allocator(), odin_root_dir(), str_lit("vendor"))); - map_init(&build_context.defined_values, heap_allocator()); + map_init(&build_context.defined_values); build_context.extra_packages.allocator = heap_allocator(); string_set_init(&build_context.test_names); diff --git a/src/ptr_map.cpp b/src/ptr_map.cpp index 434680e91..083cd6697 100644 --- a/src/ptr_map.cpp +++ b/src/ptr_map.cpp @@ -46,7 +46,7 @@ gb_internal gb_inline u32 ptr_map_hash_key(void const *key) { } -template gb_internal void map_init (PtrMap *h, gbAllocator a, isize capacity = 16); +template gb_internal void map_init (PtrMap *h, isize capacity = 16); template gb_internal void map_destroy (PtrMap *h); template gb_internal V * map_get (PtrMap *h, K key); template gb_internal void map_set (PtrMap *h, K key, V const &value); @@ -68,11 +68,15 @@ template gb_internal void multi_map_remove (PtrMap< template gb_internal void multi_map_remove_all(PtrMap *h, K key); #endif +gb_internal gbAllocator map_allocator(void) { + return heap_allocator(); +} + template -gb_internal gb_inline void map_init(PtrMap *h, gbAllocator a, isize capacity) { +gb_internal gb_inline void map_init(PtrMap *h, isize capacity) { capacity = next_pow2_isize(capacity); - slice_init(&h->hashes, a, capacity); - array_init(&h->entries, a, 0, capacity); + slice_init(&h->hashes, map_allocator(), capacity); + array_init(&h->entries, map_allocator(), 0, capacity); for (isize i = 0; i < capacity; i++) { h->hashes.data[i] = MAP_SENTINEL; } @@ -80,6 +84,9 @@ gb_internal gb_inline void map_init(PtrMap *h, gbAllocator a, isize capaci template gb_internal gb_inline void map_destroy(PtrMap *h) { + if (h->entries.allocator.proc == nullptr) { + h->entries.allocator = map_allocator(); + } slice_free(&h->hashes, h->entries.allocator); array_free(&h->entries); } @@ -162,6 +169,9 @@ gb_internal void map_reset_entries(PtrMap *h) { template gb_internal void map_reserve(PtrMap *h, isize cap) { + if (h->entries.allocator.proc == nullptr) { + h->entries.allocator = map_allocator(); + } array_reserve(&h->entries, cap); if (h->entries.count*2 < h->hashes.count) { return; diff --git a/src/string_map.cpp b/src/string_map.cpp index 9f9374ece..b5db63e90 100644 --- a/src/string_map.cpp +++ b/src/string_map.cpp @@ -35,7 +35,7 @@ struct StringMap { }; -template gb_internal void string_map_init (StringMap *h, gbAllocator a, isize capacity = 16); +template gb_internal void string_map_init (StringMap *h, isize capacity = 16); template gb_internal void string_map_destroy (StringMap *h); template gb_internal T * string_map_get (StringMap *h, char const *key); @@ -56,11 +56,15 @@ template gb_internal void string_map_grow (StringMap template gb_internal void string_map_rehash (StringMap *h, isize new_count); template gb_internal void string_map_reserve (StringMap *h, isize cap); +gb_internal gbAllocator string_map_allocator(void) { + return heap_allocator(); +} + template -gb_internal gb_inline void string_map_init(StringMap *h, gbAllocator a, isize capacity) { +gb_internal gb_inline void string_map_init(StringMap *h, isize capacity) { capacity = next_pow2_isize(capacity); - slice_init(&h->hashes, a, capacity); - array_init(&h->entries, a, 0, capacity); + slice_init(&h->hashes, string_map_allocator(), capacity); + array_init(&h->entries, string_map_allocator(), 0, capacity); for (isize i = 0; i < capacity; i++) { h->hashes.data[i] = MAP_SENTINEL; } @@ -68,6 +72,9 @@ gb_internal gb_inline void string_map_init(StringMap *h, gbAllocator a, isize template gb_internal gb_inline void string_map_destroy(StringMap *h) { + if (h->entries.allocator.proc == nullptr) { + h->entries.allocator = string_map_allocator(); + } slice_free(&h->hashes, h->entries.allocator); array_free(&h->entries); } @@ -147,6 +154,9 @@ gb_internal void string_map_reset_entries(StringMap *h) { template gb_internal void string_map_reserve(StringMap *h, isize cap) { + if (h->entries.allocator.proc == nullptr) { + h->entries.allocator = string_map_allocator(); + } array_reserve(&h->entries, cap); if (h->entries.count*2 < h->hashes.count) { return; From 747a11a954824da7960a247299986f56d1316773 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Tue, 3 Jan 2023 12:18:35 +0000 Subject: [PATCH 40/78] Allow all set entry types to be implicitly cast to their key/value type to allow for easier iteration --- src/build_settings.cpp | 6 ++---- src/check_decl.cpp | 6 ++---- src/check_expr.cpp | 7 +++---- src/checker.cpp | 38 +++++++++++++------------------------- src/ptr_set.cpp | 12 ++++++++---- src/string_map.cpp | 13 ++++++++++--- src/string_set.cpp | 15 +++++++++++---- src/types.cpp | 3 +-- 8 files changed, 50 insertions(+), 50 deletions(-) diff --git a/src/build_settings.cpp b/src/build_settings.cpp index 75615a901..1dff5f43e 100644 --- a/src/build_settings.cpp +++ b/src/build_settings.cpp @@ -1332,11 +1332,10 @@ gb_internal void enable_target_feature(TokenPos pos, String const &target_featur gb_internal char const *target_features_set_to_cstring(gbAllocator allocator, bool with_quotes) { isize len = 0; isize i = 0; - for (auto const &entry : build_context.target_features_set) { + for (String const &feature : build_context.target_features_set) { if (i != 0) { len += 1; } - String feature = entry.value; len += feature.len; if (with_quotes) len += 2; i += 1; @@ -1344,13 +1343,12 @@ gb_internal char const *target_features_set_to_cstring(gbAllocator allocator, bo char *features = gb_alloc_array(allocator, char, len+1); len = 0; i = 0; - for (auto const &entry : build_context.target_features_set) { + for (String const &feature : build_context.target_features_set) { if (i != 0) { features[len++] = ','; } if (with_quotes) features[len++] = '"'; - String feature = entry.value; gb_memmove(features + len, feature.text, feature.len); len += feature.len; if (with_quotes) features[len++] = '"'; diff --git a/src/check_decl.cpp b/src/check_decl.cpp index 0c1a7c325..7b229db08 100644 --- a/src/check_decl.cpp +++ b/src/check_decl.cpp @@ -1587,16 +1587,14 @@ gb_internal bool check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de MUTEX_GUARD_BLOCK(decl->deps_mutex) MUTEX_GUARD_BLOCK(decl->parent->deps_mutex) { - for (auto const &entry : decl->deps) { - Entity *e = entry.ptr; + for (Entity *e : decl->deps) { ptr_set_add(&decl->parent->deps, e); } } MUTEX_GUARD_BLOCK(decl->type_info_deps_mutex) MUTEX_GUARD_BLOCK(decl->parent->type_info_deps_mutex) { - for (auto const &entry : decl->type_info_deps) { - Type *t = entry.ptr; + for (Type *t : decl->type_info_deps) { ptr_set_add(&decl->parent->type_info_deps, t); } } diff --git a/src/check_expr.cpp b/src/check_expr.cpp index 030bfb8e6..2924f9d13 100644 --- a/src/check_expr.cpp +++ b/src/check_expr.cpp @@ -202,8 +202,8 @@ gb_internal void check_did_you_mean_objc_entity(String const &name, Entity *e, b DidYouMeanAnswers d = did_you_mean_make(heap_allocator(), set.entries.count, name); defer (did_you_mean_destroy(&d)); - for (auto const &entry : set) { - did_you_mean_append(&d, entry.value); + for (String const &target : set) { + did_you_mean_append(&d, target); } check_did_you_mean_print(&d, prefix); } @@ -4942,8 +4942,7 @@ gb_internal isize add_dependencies_from_unpacking(CheckerContext *c, Entity **lh if (e != nullptr) { DeclInfo *decl = decl_info_of_entity(e); if (decl != nullptr) { - for (auto const &entry : decl->deps) { - Entity *dep = entry.ptr; + for (Entity *dep : decl->deps) { ptr_set_add(&c->decl->deps, dep); } } diff --git a/src/checker.cpp b/src/checker.cpp index 8779d9d45..0075fa543 100644 --- a/src/checker.cpp +++ b/src/checker.cpp @@ -2222,12 +2222,11 @@ gb_internal void add_dependency_to_set(Checker *c, Entity *entity) { return; } - for (auto const &entry : decl->type_info_deps) { - add_min_dep_type_info(c, entry.ptr); + for (Type *t : decl->type_info_deps) { + add_min_dep_type_info(c, t); } - for (auto const &entry : decl->deps) { - Entity *e = entry.ptr; + for (Entity *e : decl->deps) { add_dependency_to_set(c, e); if (e->kind == Entity_Procedure && e->Procedure.is_foreign) { Entity *fl = e->Procedure.foreign_library; @@ -2510,8 +2509,7 @@ gb_internal Array generate_entity_dependency_graph(CheckerInf DeclInfo *decl = decl_info_of_entity(e); GB_ASSERT(decl != nullptr); - for (auto const &entry : decl->deps) { - Entity *dep = entry.ptr; + for (Entity *dep : decl->deps) { if (dep->flags & EntityFlag_Field) { continue; } @@ -2537,15 +2535,12 @@ gb_internal Array generate_entity_dependency_graph(CheckerInf if (e->kind == Entity_Procedure) { // Connect each pred 'p' of 'n' with each succ 's' and from // the procedure node - for (auto const &p_entry : n->pred) { - EntityGraphNode *p = p_entry.ptr; - + for (EntityGraphNode *p : n->pred) { // Ignore self-cycles if (p != n) { // Each succ 's' of 'n' becomes a succ of 'p', and // each pred 'p' of 'n' becomes a pred of 's' - for (auto const &s_entry : n->succ) { - EntityGraphNode *s = s_entry.ptr; + for (EntityGraphNode *s : n->succ) { // Ignore self-cycles if (s != n) { if (p->entity->kind == Entity_Procedure && @@ -4784,8 +4779,7 @@ gb_internal void check_import_entities(Checker *c) { } } - for (auto const &entry : n->pred) { - ImportGraphNode *p = entry.ptr; + for (ImportGraphNode *p : n->pred) { p->dep_count = gb_max(p->dep_count-1, 0); priority_queue_fix(&pq, p->index); } @@ -4893,8 +4887,7 @@ gb_internal bool find_entity_path_tuple(Type *tuple, Entity *end, PtrSetdeps) { - Entity *dep = entry.ptr; + for (Entity *dep : var_decl->deps) { if (dep == end) { auto path = array_make(heap_allocator()); array_add(&path, dep); @@ -4944,8 +4937,7 @@ gb_internal Array find_entity_path(Entity *start, Entity *end, PtrSet< return path; } } else { - for (auto const &entry : decl->deps) { - Entity *dep = entry.ptr; + for (Entity *dep : decl->deps) { if (dep == end) { auto path = array_make(heap_allocator()); array_add(&path, dep); @@ -5002,8 +4994,7 @@ gb_internal void calculate_global_init_order(Checker *c) { } } - for (auto const &entry : n->pred) { - EntityGraphNode *p = entry.ptr; + for (EntityGraphNode *p : n->pred) { p->dep_count -= 1; p->dep_count = gb_max(p->dep_count, 0); priority_queue_fix(&pq, p->index); @@ -5163,8 +5154,7 @@ gb_internal void check_unchecked_bodies(Checker *c) { // use the `procs_to_check` array global_procedure_body_in_worker_queue = false; - for (auto const &entry : c->info.minimum_dependency_set) { - Entity *e = entry.ptr; + for (Entity *e : c->info.minimum_dependency_set) { if (e == nullptr || e->kind != Entity_Procedure) { continue; } @@ -5239,8 +5229,7 @@ gb_internal void check_test_procedures(Checker *c) { AstPackage *pkg = c->info.init_package; Scope *s = pkg->scope; - for (auto const &entry : build_context.test_names) { - String name = entry.value; + for (String const &name : build_context.test_names) { Entity *e = scope_lookup(s, name); if (e == nullptr) { Token tok = {}; @@ -5744,8 +5733,7 @@ gb_internal void check_parsed_files(Checker *c) { DeclInfo *decl = e->decl_info; ast_node(pl, ProcLit, decl->proc_lit); if (pl->inlining == ProcInlining_inline) { - for (auto const &entry : decl->deps) { - Entity *dep = entry.ptr; + for (Entity *dep : decl->deps) { if (dep == e) { error(e->token, "Cannot inline recursive procedure '%.*s'", LIT(e->token.string)); break; diff --git a/src/ptr_set.cpp b/src/ptr_set.cpp index affde5c2f..303bde07e 100644 --- a/src/ptr_set.cpp +++ b/src/ptr_set.cpp @@ -2,6 +2,10 @@ template struct PtrSetEntry { T ptr; MapIndex next; + + operator T() const noexcept { + return this->ptr; + } }; template @@ -245,21 +249,21 @@ gb_internal gb_inline void ptr_set_clear(PtrSet *s) { template -gb_internal PtrSetEntry *begin(PtrSet &m) { +gb_internal PtrSetEntry *begin(PtrSet &m) noexcept { return m.entries.data; } template -gb_internal PtrSetEntry const *begin(PtrSet const &m) { +gb_internal PtrSetEntry const *begin(PtrSet const &m) noexcept { return m.entries.data; } template -gb_internal PtrSetEntry *end(PtrSet &m) { +gb_internal PtrSetEntry *end(PtrSet &m) noexcept { return m.entries.data + m.entries.count; } template -gb_internal PtrSetEntry const *end(PtrSet const &m) { +gb_internal PtrSetEntry const *end(PtrSet const &m) noexcept { return m.entries.data + m.entries.count; } \ No newline at end of file diff --git a/src/string_map.cpp b/src/string_map.cpp index b5db63e90..74a16de73 100644 --- a/src/string_map.cpp +++ b/src/string_map.cpp @@ -1,6 +1,13 @@ struct StringHashKey { u32 hash; String string; + + operator String() const noexcept { + return this->string; + } + operator String const &() const noexcept { + return this->string; + } }; gb_internal gb_inline StringHashKey string_hash_string(String const &s) { @@ -283,11 +290,11 @@ gb_internal gb_inline void string_map_clear(StringMap *h) { template -gb_internal StringMapEntry *begin(StringMap &m) { +gb_internal StringMapEntry *begin(StringMap &m) noexcept { return m.entries.data; } template -gb_internal StringMapEntry const *begin(StringMap const &m) { +gb_internal StringMapEntry const *begin(StringMap const &m) noexcept { return m.entries.data; } @@ -298,6 +305,6 @@ gb_internal StringMapEntry *end(StringMap &m) { } template -gb_internal StringMapEntry const *end(StringMap const &m) { +gb_internal StringMapEntry const *end(StringMap const &m) noexcept { return m.entries.data + m.entries.count; } \ No newline at end of file diff --git a/src/string_set.cpp b/src/string_set.cpp index 753afa9bf..fb4640c20 100644 --- a/src/string_set.cpp +++ b/src/string_set.cpp @@ -2,6 +2,13 @@ struct StringSetEntry { u32 hash; MapIndex next; String value; + + operator String const() const noexcept { + return this->value; + } + operator String const &() const noexcept { + return this->value; + } }; struct StringSet { @@ -226,18 +233,18 @@ gb_internal gb_inline void string_set_clear(StringSet *s) { } -gb_internal StringSetEntry *begin(StringSet &m) { +gb_internal StringSetEntry *begin(StringSet &m) noexcept { return m.entries.data; } -gb_internal StringSetEntry const *begin(StringSet const &m) { +gb_internal StringSetEntry const *begin(StringSet const &m) noexcept { return m.entries.data; } -gb_internal StringSetEntry *end(StringSet &m) { +gb_internal StringSetEntry *end(StringSet &m) noexcept { return m.entries.data + m.entries.count; } -gb_internal StringSetEntry const *end(StringSet const &m) { +gb_internal StringSetEntry const *end(StringSet const &m) noexcept { return m.entries.data + m.entries.count; } \ No newline at end of file diff --git a/src/types.cpp b/src/types.cpp index 1e2d85ac6..d33c36e94 100644 --- a/src/types.cpp +++ b/src/types.cpp @@ -823,8 +823,7 @@ gb_internal bool type_ptr_set_exists(PtrSet *s, Type *t) { // TODO(bill, 2019-10-05): This is very slow and it's probably a lot // faster to cache types correctly - for (auto const &entry : *s) { - Type *f = entry.ptr; + for (Type *f : *s) { if (are_types_identical(t, f)) { ptr_set_add(s, t); return true; From 7380b7e61be55e4c2371f62401d6175c51ea0c6f Mon Sep 17 00:00:00 2001 From: gingerBill Date: Tue, 3 Jan 2023 12:37:41 +0000 Subject: [PATCH 41/78] Add more uses of C++ style `for` loops over `for_array` macro --- src/check_expr.cpp | 149 +++++++++++++++++---------------------------- 1 file changed, 57 insertions(+), 92 deletions(-) diff --git a/src/check_expr.cpp b/src/check_expr.cpp index 2924f9d13..c1787e7b6 100644 --- a/src/check_expr.cpp +++ b/src/check_expr.cpp @@ -146,8 +146,8 @@ gb_internal void check_did_you_mean_print(DidYouMeanAnswers *d, char const *pref auto results = did_you_mean_results(d); if (results.count != 0) { error_line("\tSuggestion: Did you mean?\n"); - for_array(i, results) { - String const &target = results[i].target; + for (auto const &result : results) { + String const &target = result.target; error_line("\t\t%s%.*s\n", prefix, LIT(target)); // error_line("\t\t%.*s %td\n", LIT(target), results[i].distance); } @@ -166,19 +166,16 @@ gb_internal void populate_check_did_you_mean_objc_entity(StringSet *set, Entity GB_ASSERT(t->kind == Type_Struct); if (is_type) { - for_array(i, objc_metadata->type_entries) { - String name = objc_metadata->type_entries[i].name; - string_set_add(set, name); + for (auto const &entry : objc_metadata->type_entries) { + string_set_add(set, entry.name); } } else { - for_array(i, objc_metadata->value_entries) { - String name = objc_metadata->value_entries[i].name; - string_set_add(set, name); + for (auto const &entry : objc_metadata->value_entries) { + string_set_add(set, entry.name); } } - for_array(i, t->Struct.fields) { - Entity *f = t->Struct.fields[i]; + for (Entity *f : t->Struct.fields) { if (f->flags & EntityFlag_Using && f->type != nullptr) { if (f->type->kind == Type_Named && f->type->Named.type_name) { populate_check_did_you_mean_objc_entity(set, f->type->Named.type_name, is_type); @@ -214,8 +211,8 @@ gb_internal void check_did_you_mean_type(String const &name, Array con DidYouMeanAnswers d = did_you_mean_make(heap_allocator(), fields.count, name); defer (did_you_mean_destroy(&d)); - for_array(i, fields) { - did_you_mean_append(&d, fields[i]->token.string); + for (Entity *e : fields) { + did_you_mean_append(&d, e->token.string); } check_did_you_mean_print(&d, prefix); } @@ -227,8 +224,8 @@ gb_internal void check_did_you_mean_type(String const &name, Slice con DidYouMeanAnswers d = did_you_mean_make(heap_allocator(), fields.count, name); defer (did_you_mean_destroy(&d)); - for_array(i, fields) { - did_you_mean_append(&d, fields[i]->token.string); + for (Entity *e : fields) { + did_you_mean_append(&d, e->token.string); } check_did_you_mean_print(&d, prefix); } @@ -781,8 +778,7 @@ gb_internal i64 check_distance_between_types(CheckerContext *c, Operand *operand } if (is_type_union(dst)) { - for_array(i, dst->Union.variants) { - Type *vt = dst->Union.variants[i]; + for (Type *vt : dst->Union.variants) { if (are_types_identical(vt, s)) { return 1; } @@ -798,8 +794,7 @@ gb_internal i64 check_distance_between_types(CheckerContext *c, Operand *operand } else if (is_type_untyped(src)) { i64 prev_lowest_score = -1; i64 lowest_score = -1; - for_array(i, dst->Union.variants) { - Type *vt = dst->Union.variants[i]; + for (Type *vt : dst->Union.variants) { i64 score = check_distance_between_types(c, operand, vt); if (score >= 0) { if (lowest_score < 0) { @@ -1031,8 +1026,8 @@ gb_internal void check_assignment(CheckerContext *c, Operand *operand, Type *typ if (type != nullptr && is_type_proc(type)) { Array procs = proc_group_entities(c, *operand); // NOTE(bill): These should be done - for_array(i, procs) { - Type *t = base_type(procs[i]->type); + for (Entity *e : procs) { + Type *t = base_type(e->type); if (t == t_invalid) { continue; } @@ -1040,7 +1035,6 @@ gb_internal void check_assignment(CheckerContext *c, Operand *operand, Type *typ x.mode = Addressing_Value; x.type = t; if (check_is_assignable_to(c, &x, type)) { - Entity *e = procs[i]; add_entity_use(c, operand->expr, e); good = true; break; @@ -1470,7 +1464,7 @@ gb_internal bool check_cycle(CheckerContext *c, Entity *curr, bool report) { return false; } for_array(i, *c->type_path) { - Entity *prev = (*c->type_path)[i]; + Entity *prev = c->type_path->data[i]; if (prev == curr) { if (report) { error(curr->token, "Illegal declaration cycle of `%.*s`", LIT(curr->token.string)); @@ -1535,8 +1529,8 @@ gb_internal Entity *check_ident(CheckerContext *c, Operand *o, Ast *n, Type *nam if (type_hint != nullptr && is_type_proc(type_hint)) { // NOTE(bill): These should be done - for_array(i, procs) { - Type *t = base_type(procs[i]->type); + for (Entity *proc : procs) { + Type *t = base_type(proc->type); if (t == t_invalid) { continue; } @@ -1544,7 +1538,7 @@ gb_internal Entity *check_ident(CheckerContext *c, Operand *o, Ast *n, Type *nam x.mode = Addressing_Value; x.type = t; if (check_is_assignable_to(c, &x, type_hint)) { - e = procs[i]; + e = proc; add_entity_use(c, n, e); skip = true; break; @@ -4174,8 +4168,7 @@ gb_internal ExactValue get_constant_field_single(CheckerContext *c, ExactValue v if (cl->elems[0]->kind == Ast_FieldValue) { if (is_type_struct(node->tav.type)) { bool found = false; - for_array(i, cl->elems) { - Ast *elem = cl->elems[i]; + for (Ast *elem : cl->elems) { if (elem->kind != Ast_FieldValue) { continue; } @@ -4194,8 +4187,7 @@ gb_internal ExactValue get_constant_field_single(CheckerContext *c, ExactValue v value = {}; } } else if (is_type_array(node->tav.type) || is_type_enumerated_array(node->tav.type)) { - for_array(i, cl->elems) { - Ast *elem = cl->elems[i]; + for (Ast *elem : cl->elems) { if (elem->kind != Ast_FieldValue) { continue; } @@ -4578,8 +4570,7 @@ gb_internal Entity *check_selector(CheckerContext *c, Operand *operand, Ast *nod if (entity->kind == Entity_ProcGroup) { Array procs = entity->ProcGroup.entities; bool skip = false; - for_array(i, procs) { - Entity *p = procs[i]; + for (Entity *p : procs) { Type *t = base_type(p->type); if (t == t_invalid) { continue; @@ -4958,7 +4949,7 @@ gb_internal isize add_dependencies_from_unpacking(CheckerContext *c, Entity **lh gb_internal bool check_assignment_arguments(CheckerContext *ctx, Array const &lhs, Array *operands, Slice const &rhs) { bool optional_ok = false; isize tuple_index = 0; - for_array(i, rhs) { + for (Ast *rhs_expr : rhs) { CheckerContext c_ = *ctx; CheckerContext *c = &c_; @@ -4970,7 +4961,7 @@ gb_internal bool check_assignment_arguments(CheckerContext *ctx, Array type_hint = lhs[tuple_index].type; } - check_expr_base(c, &o, rhs[i], type_hint); + check_expr_base(c, &o, rhs_expr, type_hint); if (o.mode == Addressing_NoValue) { error_operand_no_value(&o); o.mode = Addressing_Invalid; @@ -5022,8 +5013,8 @@ gb_internal bool check_assignment_arguments(CheckerContext *ctx, Array } } else { TypeTuple *tuple = &o.type->Tuple; - for_array(j, tuple->variables) { - o.type = tuple->variables[j]->type; + for (Entity *e : tuple->variables) { + o.type = e->type; array_add(operands, o); } @@ -5115,8 +5106,8 @@ gb_internal bool check_unpack_arguments(CheckerContext *ctx, Entity **lhs, isize } } else { TypeTuple *tuple = &o.type->Tuple; - for_array(j, tuple->variables) { - o.type = tuple->variables[j]->type; + for (Entity *e : tuple->variables) { + o.type = e->type; array_add(operands, o); } @@ -5459,8 +5450,7 @@ gb_internal CALL_ARGUMENT_CHECKER(check_named_call_arguments) { bool *visited = gb_alloc_array(temporary_allocator(), bool, param_count); auto ordered_operands = array_make(temporary_allocator(), param_count); defer ({ - for_array(i, ordered_operands) { - Operand const &o = ordered_operands[i]; + for (Operand const &o : ordered_operands) { if (o.expr != nullptr) { call->viral_state_flags |= o.expr->viral_state_flags; } @@ -5778,8 +5768,7 @@ gb_internal CallArgumentData check_call_arguments(CheckerContext *c, Operand *op param_tuple = &pt->params->Tuple; } if (param_tuple != nullptr) { - for_array(i, param_tuple->variables) { - Entity *e = param_tuple->variables[i]; + for (Entity *e : param_tuple->variables) { if (is_blank_ident(e->token)) { continue; } @@ -5789,8 +5778,8 @@ gb_internal CallArgumentData check_call_arguments(CheckerContext *c, Operand *op } } else { Array procs = proc_group_entities(c, *operand); - for_array(j, procs) { - Type *proc_type = base_type(procs[j]->type); + for (Entity *proc : procs) { + Type *proc_type = base_type(proc->type); if (is_type_proc(proc_type)) { TypeProc *pt = &proc_type->Proc; TypeTuple *param_tuple = nullptr; @@ -5800,8 +5789,7 @@ gb_internal CallArgumentData check_call_arguments(CheckerContext *c, Operand *op if (param_tuple == nullptr) { continue; } - for_array(i, param_tuple->variables) { - Entity *e = param_tuple->variables[i]; + for (Entity *e : param_tuple->variables) { if (is_blank_ident(e->token)) { continue; } @@ -5865,10 +5853,10 @@ gb_internal CallArgumentData check_call_arguments(CheckerContext *c, Operand *op if (procs.count > 1) { isize max_arg_count = args.count; - for_array(i, args) { + for (Ast *arg : args) { // NOTE(bill): The only thing that may have multiple values // will be a call expression (assuming `or_return` and `()` will be stripped) - Ast *arg = strip_or_return_expr(args[i]); + arg = strip_or_return_expr(arg); if (arg && arg->kind == Ast_CallExpr) { max_arg_count = ISIZE_MAX; break; @@ -5931,8 +5919,7 @@ gb_internal CallArgumentData check_call_arguments(CheckerContext *c, Operand *op // where the same positional parameter has the same type value (and ellipsis) bool proc_arg_count_all_equal = true; isize proc_arg_count = -1; - for_array(i, procs) { - Entity *p = procs[i]; + for (Entity *p : procs) { Type *pt = base_type(p->type); if (pt != nullptr && is_type_proc(pt)) { if (proc_arg_count < 0) { @@ -5954,8 +5941,7 @@ gb_internal CallArgumentData check_call_arguments(CheckerContext *c, Operand *op lhs = gb_alloc_array(heap_allocator(), Entity *, lhs_count); for (isize param_index = 0; param_index < lhs_count; param_index++) { Entity *e = nullptr; - for_array(j, procs) { - Entity *p = procs[j]; + for (Entity *p : procs) { Type *pt = base_type(p->type); if (pt != nullptr && is_type_proc(pt)) { if (e == nullptr) { @@ -5996,8 +5982,8 @@ gb_internal CallArgumentData check_call_arguments(CheckerContext *c, Operand *op auto proc_entities = array_make(heap_allocator(), 0, procs.count*2 + 1); defer (array_free(&proc_entities)); - for_array(i, procs) { - array_add(&proc_entities, procs[i]); + for (Entity *proc : procs) { + array_add(&proc_entities, proc); } @@ -6087,8 +6073,7 @@ gb_internal CallArgumentData check_call_arguments(CheckerContext *c, Operand *op if (procs.count > 0) { error_line("Did you mean to use one of the following:\n"); } - for_array(i, procs) { - Entity *proc = procs[i]; + for (Entity *proc : procs) { TokenPos pos = proc->token.pos; Type *t = base_type(proc->type); if (t == t_invalid) continue; @@ -6647,8 +6632,7 @@ gb_internal ExprKind check_call_expr(CheckerContext *c, Operand *operand, Ast *c if (args.count > 0) { bool fail = false; bool first_is_field_value = (args[0]->kind == Ast_FieldValue); - for_array(i, args) { - Ast *arg = args[i]; + for (Ast *arg : args) { bool mix = false; if (first_is_field_value) { mix = arg->kind != Ast_FieldValue; @@ -6669,8 +6653,7 @@ gb_internal ExprKind check_call_expr(CheckerContext *c, Operand *operand, Ast *c } if (operand->mode == Addressing_Invalid) { - for_array(i, args) { - Ast *arg = args[i]; + for (Ast *arg : args) { if (arg->kind == Ast_FieldValue) { arg = arg->FieldValue.value; } @@ -7166,9 +7149,7 @@ gb_internal bool attempt_implicit_selector_expr(CheckerContext *c, Operand *o, A Type *union_type = base_type(th); auto operands = array_make(temporary_allocator(), 0, union_type->Union.variants.count); - for_array(i, union_type->Union.variants) { - Type *vt = union_type->Union.variants[i]; - + for (Type *vt : union_type->Union.variants) { Operand x = {}; if (attempt_implicit_selector_expr(c, &x, ise, vt)) { array_add(&operands, x); @@ -7398,8 +7379,7 @@ gb_internal void add_to_seen_map(CheckerContext *ctx, SeenMap *seen, TokenKind u } bool found = false; - for_array(j, bt->Enum.fields) { - Entity *f = bt->Enum.fields[j]; + for (Entity *f : bt->Enum.fields) { GB_ASSERT(f->kind == Entity_Constant); i64 fv = exact_value_to_i64(f->Constant.value); @@ -7917,8 +7897,7 @@ gb_internal ExprKind check_compound_literal(CheckerContext *c, Operand *o, Ast * if (cl->elems[0]->kind == Ast_FieldValue) { bool *fields_visited = gb_alloc_array(temporary_allocator(), bool, field_count); - for_array(i, cl->elems) { - Ast *elem = cl->elems[i]; + for (Ast *elem : cl->elems) { if (elem->kind != Ast_FieldValue) { error(elem, "Mixture of 'field = value' and value elements in a literal is not allowed"); continue; @@ -8070,8 +8049,7 @@ gb_internal ExprKind check_compound_literal(CheckerContext *c, Operand *o, Ast * RangeCache rc = range_cache_make(heap_allocator()); defer (range_cache_destroy(&rc)); - for_array(i, cl->elems) { - Ast *elem = cl->elems[i]; + for (Ast *elem : cl->elems) { if (elem->kind != Ast_FieldValue) { error(elem, "Mixture of 'field = value' and value elements in a literal is not allowed"); continue; @@ -8252,8 +8230,7 @@ gb_internal ExprKind check_compound_literal(CheckerContext *c, Operand *o, Ast * { Type *bt = base_type(index_type); GB_ASSERT(bt->kind == Type_Enum); - for_array(i, bt->Enum.fields) { - Entity *f = bt->Enum.fields[i]; + for (Entity *f : bt->Enum.fields) { if (f->kind != Entity_Constant) { continue; } @@ -8288,8 +8265,7 @@ gb_internal ExprKind check_compound_literal(CheckerContext *c, Operand *o, Ast * RangeCache rc = range_cache_make(heap_allocator()); defer (range_cache_destroy(&rc)); - for_array(i, cl->elems) { - Ast *elem = cl->elems[i]; + for (Ast *elem : cl->elems) { if (elem->kind != Ast_FieldValue) { error(elem, "Mixture of 'field = value' and value elements in a literal is not allowed"); continue; @@ -8453,8 +8429,7 @@ gb_internal ExprKind check_compound_literal(CheckerContext *c, Operand *o, Ast * auto unhandled = array_make(temporary_allocator(), 0, fields.count); - for_array(i, fields) { - Entity *f = fields[i]; + for (Entity *f : fields) { if (f->kind != Entity_Constant) { continue; } @@ -8580,8 +8555,7 @@ gb_internal ExprKind check_compound_literal(CheckerContext *c, Operand *o, Ast * bool key_is_typeid = is_type_typeid(t->Map.key); bool value_is_typeid = is_type_typeid(t->Map.value); - for_array(i, cl->elems) { - Ast *elem = cl->elems[i]; + for (Ast *elem : cl->elems) { if (elem->kind != Ast_FieldValue) { error(elem, "Only 'field = value' elements are allowed in a map literal"); continue; @@ -8630,8 +8604,7 @@ gb_internal ExprKind check_compound_literal(CheckerContext *c, Operand *o, Ast * error(cl->elems[0], "'field = value' in a bit_set a literal is not allowed"); is_constant = false; } else { - for_array(index, cl->elems) { - Ast *elem = cl->elems[index]; + for (Ast *elem : cl->elems) { if (elem->kind == Ast_FieldValue) { error(elem, "'field = value' in a bit_set a literal is not allowed"); continue; @@ -8683,8 +8656,7 @@ gb_internal ExprKind check_compound_literal(CheckerContext *c, Operand *o, Ast * BigInt one = {}; big_int_from_u64(&one, 1); - for_array(i, cl->elems) { - Ast *e = cl->elems[i]; + for (Ast *e : cl->elems) { GB_ASSERT(e->kind != Ast_FieldValue); TypeAndValue tav = e->tav; @@ -8783,8 +8755,7 @@ gb_internal ExprKind check_type_assertion(CheckerContext *c, Operand *o, Ast *no if (bsrc->Union.variants.count != 1 && type_hint != nullptr) { bool allowed = false; - for_array(i, bsrc->Union.variants) { - Type *vt = bsrc->Union.variants[i]; + for (Type *vt : bsrc->Union.variants) { if (are_types_identical(vt, type_hint)) { allowed = true; add_type_info_type(c, vt); @@ -8817,8 +8788,7 @@ gb_internal ExprKind check_type_assertion(CheckerContext *c, Operand *o, Ast *no if (is_type_union(src)) { bool ok = false; - for_array(i, bsrc->Union.variants) { - Type *vt = bsrc->Union.variants[i]; + for (Type *vt : bsrc->Union.variants) { if (are_types_identical(vt, dst)) { ok = true; break; @@ -8978,8 +8948,7 @@ gb_internal ExprKind check_selector_call_expr(CheckerContext *c, Operand *o, Ast if (ce->args.count > 0) { bool fail = false; bool first_is_field_value = (ce->args[0]->kind == Ast_FieldValue); - for_array(i, ce->args) { - Ast *arg = ce->args[i]; + for (Ast *arg : ce->args) { bool mix = false; if (first_is_field_value) { mix = arg->kind != Ast_FieldValue; @@ -9881,12 +9850,9 @@ gb_internal bool is_exact_value_zero(ExactValue const &v) { if (cl->elems.count == 0) { return true; } else { - for_array(i, cl->elems) { - Ast *elem = cl->elems[i]; + for (Ast *elem : cl->elems) { if (elem->tav.mode != Addressing_Constant) { - // if (elem->tav.value.kind != ExactValue_Invalid) { return false; - // } } if (!is_exact_value_zero(elem->tav.value)) { return false; @@ -10366,8 +10332,7 @@ gb_internal gbString write_expr_to_string(gbString str, Ast *node, bool shorthan bool parens_needed = false; if (pt->results && pt->results->kind == Ast_FieldList) { - for_array(i, pt->results->FieldList.list) { - Ast *field = pt->results->FieldList.list[i]; + for (Ast *field : pt->results->FieldList.list) { ast_node(f, Field, field); if (f->names.count != 0) { parens_needed = true; From 69934c3b0b1b8ad0a499574c39c1ab177a1fe30a Mon Sep 17 00:00:00 2001 From: gingerBill Date: Tue, 3 Jan 2023 13:04:09 +0000 Subject: [PATCH 42/78] More `for_array(i, y)` to `for (x : y)` translations --- src/check_builtin.cpp | 15 +++----- src/check_decl.cpp | 27 ++++++-------- src/check_stmt.cpp | 73 +++++++++++++------------------------- src/llvm_backend.cpp | 9 ++--- src/llvm_backend_expr.cpp | 18 ++++------ src/llvm_backend_stmt.cpp | 74 ++++++++++++++++----------------------- 6 files changed, 80 insertions(+), 136 deletions(-) diff --git a/src/check_builtin.cpp b/src/check_builtin.cpp index af196234e..7c5521dde 100644 --- a/src/check_builtin.cpp +++ b/src/check_builtin.cpp @@ -96,8 +96,7 @@ gb_internal void check_or_else_expr_no_value_error(CheckerContext *c, String con gbString th = nullptr; if (type_hint != nullptr) { GB_ASSERT(bsrc->kind == Type_Union); - for_array(i, bsrc->Union.variants) { - Type *vt = bsrc->Union.variants[i]; + for (Type *vt : bsrc->Union.variants) { if (are_types_identical(vt, type_hint)) { th = type_to_string(type_hint); break; @@ -198,8 +197,7 @@ gb_internal void add_objc_proc_type(CheckerContext *c, Ast *call, Type *return_t { auto variables = array_make(permanent_allocator(), 0, param_types.count); - for_array(i, param_types) { - Type *type = param_types[i]; + for (Type *type : param_types) { Entity *param = alloc_entity_param(scope, blank_token, type, false, true); array_add(&variables, param); } @@ -3071,8 +3069,7 @@ gb_internal bool check_builtin_procedure(CheckerContext *c, Operand *operand, As bool first_is_field_value = (ce->args[0]->kind == Ast_FieldValue); bool fail = false; - for_array(i, ce->args) { - Ast *arg = ce->args[i]; + for (Ast *arg : ce->args) { bool mix = false; if (first_is_field_value) { mix = arg->kind != Ast_FieldValue; @@ -3088,9 +3085,8 @@ gb_internal bool check_builtin_procedure(CheckerContext *c, Operand *operand, As StringSet name_set = {}; string_set_init(&name_set, 2*ce->args.count); - for_array(i, ce->args) { + for (Ast *arg : ce->args) { String name = {}; - Ast *arg = ce->args[i]; if (arg->kind == Ast_FieldValue) { Ast *ename = arg->FieldValue.field; if (!fail && ename->kind != Ast_Ident) { @@ -4987,8 +4983,7 @@ gb_internal bool check_builtin_procedure(CheckerContext *c, Operand *operand, As bool is_variant = false; - for_array(i, u->Union.variants) { - Type *vt = u->Union.variants[i]; + for (Type *vt : u->Union.variants) { if (are_types_identical(v, vt)) { is_variant = true; break; diff --git a/src/check_decl.cpp b/src/check_decl.cpp index 7b229db08..66f16546c 100644 --- a/src/check_decl.cpp +++ b/src/check_decl.cpp @@ -354,8 +354,7 @@ gb_internal void check_type_decl(CheckerContext *ctx, Entity *e, Ast *init_expr, Type *t = base_type(e->type); if (t->kind == Type_Enum) { - for_array(i, t->Enum.fields) { - Entity *f = t->Enum.fields[i]; + for (Entity *f : t->Enum.fields) { if (f->kind != Entity_Constant) { continue; } @@ -1237,8 +1236,7 @@ gb_internal void check_proc_group_decl(CheckerContext *ctx, Entity *&pg_entity, PtrSet entity_set = {}; ptr_set_init(&entity_set, 2*pg->args.count); - for_array(i, pg->args) { - Ast *arg = pg->args[i]; + for (Ast *arg : pg->args) { Entity *e = nullptr; Operand o = {}; if (arg->kind == Ast_Ident) { @@ -1271,7 +1269,7 @@ gb_internal void check_proc_group_decl(CheckerContext *ctx, Entity *&pg_entity, ptr_set_destroy(&entity_set); - for_array(j, pge->entities) { + for (isize j = 0; j < pge->entities.count; j++) { Entity *p = pge->entities[j]; if (p->type == t_invalid) { // NOTE(bill): This invalid overload has already been handled @@ -1462,8 +1460,7 @@ gb_internal bool check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de { if (type->Proc.param_count > 0) { TypeTuple *params = &type->Proc.params->Tuple; - for_array(i, params->variables) { - Entity *e = params->variables[i]; + for (Entity *e : params->variables) { if (e->kind != Entity_Variable) { continue; } @@ -1499,9 +1496,9 @@ gb_internal bool check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de } } - MUTEX_GUARD_BLOCK(ctx->scope->mutex) for_array(i, using_entities) { - Entity *e = using_entities[i].e; - Entity *uvar = using_entities[i].uvar; + MUTEX_GUARD_BLOCK(ctx->scope->mutex) for (auto const &entry : using_entities) { + Entity *e = entry.e; + Entity *uvar = entry.uvar; Entity *prev = scope_insert_no_mutex(ctx->scope, uvar); if (prev != nullptr) { error(e->token, "Namespace collision while 'using' procedure argument '%.*s' of: %.*s", LIT(e->token.string), LIT(prev->token.string)); @@ -1519,8 +1516,8 @@ gb_internal bool check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de check_open_scope(ctx, body); { - for_array(i, using_entities) { - Entity *uvar = using_entities[i].uvar; + for (auto const &entry : using_entities) { + Entity *uvar = entry.uvar; Entity *prev = scope_insert(ctx->scope, uvar); gb_unused(prev); // NOTE(bill): Don't err here @@ -1537,12 +1534,10 @@ gb_internal bool check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de decl->defer_use_checked = true; - for_array(i, bs->stmts) { - Ast *stmt = bs->stmts[i]; + for (Ast *stmt : bs->stmts) { if (stmt->kind == Ast_ValueDecl) { ast_node(vd, ValueDecl, stmt); - for_array(j, vd->names) { - Ast *name = vd->names[j]; + for (Ast *name : vd->names) { if (!is_blank_ident(name)) { if (name->kind == Ast_Ident) { GB_ASSERT(name->Ident.entity != nullptr); diff --git a/src/check_stmt.cpp b/src/check_stmt.cpp index 7192b16b5..e075297a4 100644 --- a/src/check_stmt.cpp +++ b/src/check_stmt.cpp @@ -931,16 +931,15 @@ gb_internal void check_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_flags SeenMap seen = {}; // NOTE(bill): Multimap, Key: ExactValue defer (map_destroy(&seen)); - for_array(stmt_index, bs->stmts) { - Ast *stmt = bs->stmts[stmt_index]; + for (Ast *stmt : bs->stmts) { if (stmt->kind != Ast_CaseClause) { // NOTE(bill): error handled by above multiple default checker continue; } ast_node(cc, CaseClause, stmt); - for_array(j, cc->list) { - Ast *expr = unparen_expr(cc->list[j]); + for (Ast *expr : cc->list) { + expr = unparen_expr(expr); if (is_ast_range(expr)) { ast_node(be, BinaryExpr, expr); @@ -1052,8 +1051,7 @@ gb_internal void check_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_flags auto unhandled = array_make(temporary_allocator(), 0, fields.count); - for_array(i, fields) { - Entity *f = fields[i]; + for (Entity *f : fields) { if (f->kind != Entity_Constant) { continue; } @@ -1072,8 +1070,7 @@ gb_internal void check_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_flags error_no_newline(node, "Unhandled switch case: %.*s", LIT(unhandled[0]->token.string)); } else { error(node, "Unhandled switch cases:"); - for_array(i, unhandled) { - Entity *f = unhandled[i]; + for (Entity *f : unhandled) { error_line("\t%.*s\n", LIT(f->token.string)); } } @@ -1154,8 +1151,7 @@ gb_internal void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_ // NOTE(bill): Check for multiple defaults Ast *first_default = nullptr; ast_node(bs, BlockStmt, ss->body); - for_array(i, bs->stmts) { - Ast *stmt = bs->stmts[i]; + for (Ast *stmt : bs->stmts) { Ast *default_stmt = nullptr; if (stmt->kind == Ast_CaseClause) { ast_node(cc, CaseClause, stmt); @@ -1186,8 +1182,7 @@ gb_internal void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_ PtrSet seen = {}; defer (ptr_set_destroy(&seen)); - for_array(i, bs->stmts) { - Ast *stmt = bs->stmts[i]; + for (Ast *stmt : bs->stmts) { if (stmt->kind != Ast_CaseClause) { // NOTE(bill): error handled by above multiple default checker continue; @@ -1198,8 +1193,7 @@ gb_internal void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_ Type *bt = base_type(type_deref(x.type)); Type *case_type = nullptr; - for_array(type_index, cc->list) { - Ast *type_expr = cc->list[type_index]; + for (Ast *type_expr : cc->list) { if (type_expr != nullptr) { // Otherwise it's a default expression Operand y = {}; check_expr_or_type(ctx, &y, type_expr); @@ -1213,8 +1207,7 @@ gb_internal void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_ if (switch_kind == TypeSwitch_Union) { GB_ASSERT(is_type_union(bt)); bool tag_type_found = false; - for_array(j, bt->Union.variants) { - Type *vt = bt->Union.variants[j]; + for (Type *vt : bt->Union.variants) { if (are_types_identical(vt, y.type)) { tag_type_found = true; break; @@ -1288,8 +1281,7 @@ gb_internal void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_ auto unhandled = array_make(temporary_allocator(), 0, variants.count); - for_array(i, variants) { - Type *t = variants[i]; + for (Type *t : variants) { if (!type_ptr_set_exists(&seen, t)) { array_add(&unhandled, t); } @@ -1302,8 +1294,7 @@ gb_internal void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_ gb_string_free(s); } else { error_no_newline(node, "Unhandled switch cases:\n"); - for_array(i, unhandled) { - Type *t = unhandled[i]; + for (Type *t : unhandled) { gbString s = type_to_string(t); error_line("\t%s\n", s); gb_string_free(s); @@ -1340,8 +1331,7 @@ gb_internal void check_block_stmt_for_errors(CheckerContext *ctx, Ast *body) { isize stmt_count = 0; Ast *the_stmt = nullptr; - for_array(i, bs->stmts) { - Ast *stmt = bs->stmts[i]; + for (Ast *stmt : bs->stmts) { GB_ASSERT(stmt != nullptr); switch (stmt->kind) { case_ast_node(es, EmptyStmt, stmt); @@ -1359,8 +1349,7 @@ gb_internal void check_block_stmt_for_errors(CheckerContext *ctx, Ast *body) { if (stmt_count == 1) { if (the_stmt->kind == Ast_ValueDecl) { - for_array(i, the_stmt->ValueDecl.names) { - Ast *name = the_stmt->ValueDecl.names[i]; + for (Ast *name : the_stmt->ValueDecl.names) { if (name->kind != Ast_Ident) { continue; } @@ -1376,8 +1365,8 @@ gb_internal void check_block_stmt_for_errors(CheckerContext *ctx, Ast *body) { gb_internal bool all_operands_valid(Array const &operands) { if (any_errors()) { - for_array(i, operands) { - if (operands[i].type == t_invalid) { + for (Operand const &o : operands) { + if (o.type == t_invalid) { return false; } } @@ -1548,16 +1537,9 @@ gb_internal void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags) check_assignment_arguments(ctx, lhs_operands, &rhs_operands, as->rhs); - isize rhs_count = rhs_operands.count; - for_array(i, rhs_operands) { - if (rhs_operands[i].mode == Addressing_Invalid) { - // TODO(bill): Should I ignore invalid parameters? - // rhs_count--; - } - } - auto lhs_to_ignore = array_make(temporary_allocator(), lhs_count); + isize rhs_count = rhs_operands.count; isize max = gb_min(lhs_count, rhs_count); for (isize i = 0; i < max; i++) { if (lhs_to_ignore[i]) { @@ -1856,8 +1838,8 @@ gb_internal void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags) break; } - for_array(ti, t->Tuple.variables) { - array_add(&vals, t->Tuple.variables[ti]->type); + for (Entity *e : t->Tuple.variables) { + array_add(&vals, e->type); } if (rs->vals.count > 1 && rs->vals[1] != nullptr && count < 3) { @@ -1976,8 +1958,7 @@ gb_internal void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags) } } - for_array(i, entities) { - Entity *e = entities[i]; + for (Entity *e : entities) { DeclInfo *d = decl_info_of_entity(e); GB_ASSERT(d == nullptr); add_entity(ctx, ctx->scope, e->identifier, e); @@ -2091,8 +2072,8 @@ gb_internal void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags) error(us->token, "Empty 'using' list"); return; } - for_array(i, us->list) { - Ast *expr = unparen_expr(us->list[i]); + for (Ast *expr : us->list) { + expr = unparen_expr(expr); Entity *e = nullptr; bool is_selector = false; @@ -2132,8 +2113,7 @@ gb_internal void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags) check_decl_attributes(&c, fb->attributes, foreign_block_decl_attribute, nullptr); ast_node(block, BlockStmt, fb->body); - for_array(i, block->stmts) { - Ast *decl = block->stmts[i]; + for (Ast *decl : block->stmts) { if (decl->kind == Ast_ValueDecl && decl->ValueDecl.is_mutable) { check_stmt(&c, decl, flags); } @@ -2146,8 +2126,7 @@ gb_internal void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags) isize entity_count = 0; isize new_name_count = 0; - for_array(i, vd->names) { - Ast *name = vd->names[i]; + for (Ast *name : vd->names) { Entity *entity = nullptr; if (name->kind != Ast_Ident) { error(name, "A variable declaration must be an identifier"); @@ -2193,8 +2172,7 @@ gb_internal void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags) begin_error_block(); error(node, "No new declarations on the left hand side"); bool all_underscore = true; - for_array(i, vd->names) { - Ast *name = vd->names[i]; + for (Ast *name : vd->names) { if (name->kind == Ast_Ident) { if (!is_blank_ident(name)) { all_underscore = false; @@ -2388,8 +2366,7 @@ gb_internal void check_stmt_internal(CheckerContext *ctx, Ast *node, u32 flags) } else { // constant value declaration // NOTE(bill): Check `_` declarations - for_array(i, vd->names) { - Ast *name = vd->names[i]; + for (Ast *name : vd->names) { if (is_blank_ident(name)) { Entity *e = name->Ident.entity; DeclInfo *d = decl_info_of_entity(e); diff --git a/src/llvm_backend.cpp b/src/llvm_backend.cpp index 3e62f678a..304e5ef36 100644 --- a/src/llvm_backend.cpp +++ b/src/llvm_backend.cpp @@ -240,11 +240,10 @@ gb_internal lbValue lb_equal_proc_for_type(lbModule *m, Type *type) { LLVMValueRef v_switch = LLVMBuildSwitch(p->builder, left_tag.value, block_false->block, cast(unsigned)type->Union.variants.count); - for_array(i, type->Union.variants) { + for (Type *v : type->Union.variants) { lbBlock *case_block = lb_create_block(p, "bcase"); lb_start_block(p, case_block); - Type *v = type->Union.variants[i]; lbValue case_tag = lb_const_union_tag(p->module, type, v); Type *vp = alloc_type_pointer(v); @@ -374,11 +373,10 @@ gb_internal lbValue lb_hasher_proc_for_type(lbModule *m, Type *type) { LLVMValueRef v_switch = LLVMBuildSwitch(p->builder, tag.value, end_block->block, cast(unsigned)type->Union.variants.count); - for_array(i, type->Union.variants) { + for (Type *v : type->Union.variants) { lbBlock *case_block = lb_create_block(p, "bcase"); lb_start_block(p, case_block); - Type *v = type->Union.variants[i]; lbValue case_tag = lb_const_union_tag(p->module, type, v); lbValue variant_hasher = lb_hasher_proc_for_type(m, v); @@ -2235,8 +2233,7 @@ gb_internal void lb_generate_code(lbGenerator *gen) { for (auto const &entry : gen->modules) { lbModule *m = entry.value; - for_array(i, m->info->required_foreign_imports_through_force) { - Entity *e = m->info->required_foreign_imports_through_force[i]; + for (Entity *e : m->info->required_foreign_imports_through_force) { lb_add_foreign_library_path(m, e); } diff --git a/src/llvm_backend_expr.cpp b/src/llvm_backend_expr.cpp index d574caf4c..c28e9fb2b 100644 --- a/src/llvm_backend_expr.cpp +++ b/src/llvm_backend_expr.cpp @@ -61,8 +61,7 @@ gb_internal lbValue lb_emit_logical_binary_expr(lbProcedure *p, TokenKind op, As GB_ASSERT(incoming_values.count > 0); LLVMTypeRef phi_type = nullptr; - for_array(i, incoming_values) { - LLVMValueRef incoming_value = incoming_values[i]; + for (LLVMValueRef incoming_value : incoming_values) { if (!LLVMIsConstant(incoming_value)) { phi_type = LLVMTypeOf(incoming_value); break; @@ -1921,8 +1920,7 @@ gb_internal lbValue lb_emit_conv(lbProcedure *p, lbValue value, Type *t) { } if (is_type_union(dst)) { - for_array(i, dst->Union.variants) { - Type *vt = dst->Union.variants[i]; + for (Type *vt : dst->Union.variants) { if (are_types_identical(vt, src_type)) { lbAddr parent = lb_add_local_generated(p, t, true); lb_emit_store_union_variant(p, parent.addr, value, vt); @@ -3596,8 +3594,7 @@ gb_internal void lb_build_addr_compound_lit_populate(lbProcedure *p, Slice const &temp_data) { - for_array(i, temp_data) { - auto td = temp_data[i]; + for (auto const &td : temp_data) { if (td.value.value != nullptr) { if (td.elem_length > 0) { auto loop_data = lb_loop_start(p, cast(isize)td.elem_length, t_i32); @@ -4129,8 +4126,7 @@ gb_internal lbAddr lb_build_addr_compound_lit(lbProcedure *p, Ast *expr) { lbValue err = lb_dynamic_map_reserve(p, v.addr, 2*cl->elems.count, pos); gb_unused(err); - for_array(field_index, cl->elems) { - Ast *elem = cl->elems[field_index]; + for (Ast *elem : cl->elems) { ast_node(fv, FieldValue, elem); lbValue key = lb_build_expr(p, fv->field); @@ -4304,8 +4300,7 @@ gb_internal lbAddr lb_build_addr_compound_lit(lbProcedure *p, Ast *expr) { lb_addr_store(p, v, lb_const_value(p->module, type, exact_value_compound(expr))); lbValue lower = lb_const_value(p->module, t_int, exact_value_i64(bt->BitSet.lower)); - for_array(i, cl->elems) { - Ast *elem = cl->elems[i]; + for (Ast *elem : cl->elems) { GB_ASSERT(elem->kind != Ast_FieldValue); if (lb_is_elem_const(elem, et)) { @@ -4359,8 +4354,7 @@ gb_internal lbAddr lb_build_addr_compound_lit(lbProcedure *p, Ast *expr) { // TODO(bill): reduce the need for individual `insertelement` if a `shufflevector` // might be a better option - for_array(i, temp_data) { - auto td = temp_data[i]; + for (auto const &td : temp_data) { if (td.value.value != nullptr) { if (td.elem_length > 0) { for (i64 k = 0; k < td.elem_length; k++) { diff --git a/src/llvm_backend_stmt.cpp b/src/llvm_backend_stmt.cpp index 2703c511a..c48115079 100644 --- a/src/llvm_backend_stmt.cpp +++ b/src/llvm_backend_stmt.cpp @@ -7,8 +7,7 @@ gb_internal void lb_build_constant_value_decl(lbProcedure *p, AstValueDecl *vd) static i32 global_guid = 0; - for_array(i, vd->names) { - Ast *ident = vd->names[i]; + for (Ast *ident : vd->names) { GB_ASSERT(ident->kind == Ast_Ident); Entity *e = entity_of_node(ident); GB_ASSERT(e != nullptr); @@ -106,8 +105,7 @@ gb_internal void lb_build_constant_value_decl(lbProcedure *p, AstValueDecl *vd) gb_internal void lb_build_stmt_list(lbProcedure *p, Slice const &stmts) { - for_array(i, stmts) { - Ast *stmt = stmts[i]; + for (Ast *stmt : stmts) { switch (stmt->kind) { case_ast_node(vd, ValueDecl, stmt); lb_build_constant_value_decl(p, vd); @@ -118,8 +116,8 @@ gb_internal void lb_build_stmt_list(lbProcedure *p, Slice const &stmts) { case_end; } } - for_array(i, stmts) { - lb_build_stmt(p, stmts[i]); + for (Ast *stmt : stmts) { + lb_build_stmt(p, stmt); } } @@ -129,10 +127,9 @@ gb_internal lbBranchBlocks lb_lookup_branch_blocks(lbProcedure *p, Ast *ident) { GB_ASSERT(ident->kind == Ast_Ident); Entity *e = entity_of_node(ident); GB_ASSERT(e->kind == Entity_Label); - for_array(i, p->branch_blocks) { - lbBranchBlocks *b = &p->branch_blocks[i]; - if (b->label == e->Label.node) { - return *b; + for (lbBranchBlocks const &b : p->branch_blocks) { + if (b.label == e->Label.node) { + return b; } } @@ -153,13 +150,12 @@ gb_internal lbTargetList *lb_push_target_list(lbProcedure *p, Ast *label, lbBloc if (label != nullptr) { // Set label blocks GB_ASSERT(label->kind == Ast_Label); - for_array(i, p->branch_blocks) { - lbBranchBlocks *b = &p->branch_blocks[i]; - GB_ASSERT(b->label != nullptr && label != nullptr); - GB_ASSERT(b->label->kind == Ast_Label); - if (b->label == label) { - b->break_ = break_; - b->continue_ = continue_; + for (lbBranchBlocks &b : p->branch_blocks) { + GB_ASSERT(b.label != nullptr && label != nullptr); + GB_ASSERT(b.label->kind == Ast_Label); + if (b.label == label) { + b.break_ = break_; + b.continue_ = continue_; return tl; } } @@ -1095,8 +1091,7 @@ gb_internal bool lb_switch_stmt_can_be_trivial_jump_table(AstSwitchStmt *ss, boo } ast_node(body, BlockStmt, ss->body); - for_array(i, body->stmts) { - Ast *clause = body->stmts[i]; + for (Ast *clause : body->stmts) { ast_node(cc, CaseClause, clause); if (cc->list.count == 0) { @@ -1104,8 +1099,8 @@ gb_internal bool lb_switch_stmt_can_be_trivial_jump_table(AstSwitchStmt *ss, boo continue; } - for_array(j, cc->list) { - Ast *expr = unparen_expr(cc->list[j]); + for (Ast *expr : cc->list) { + expr = unparen_expr(expr); if (is_ast_range(expr)) { return false; } @@ -1166,8 +1161,7 @@ gb_internal void lb_build_switch_stmt(lbProcedure *p, AstSwitchStmt *ss, Scope * LLVMValueRef switch_instr = nullptr; if (is_trivial) { isize num_cases = 0; - for_array(i, body->stmts) { - Ast *clause = body->stmts[i]; + for (Ast *clause : body->stmts) { ast_node(cc, CaseClause, clause); num_cases += cc->list.count; } @@ -1204,8 +1198,8 @@ gb_internal void lb_build_switch_stmt(lbProcedure *p, AstSwitchStmt *ss, Scope * } lbBlock *next_cond = nullptr; - for_array(j, cc->list) { - Ast *expr = unparen_expr(cc->list[j]); + for (Ast *expr : cc->list) { + expr = unparen_expr(expr); if (switch_instr != nullptr) { lbValue on_val = {}; @@ -1384,8 +1378,7 @@ gb_internal void lb_build_type_switch_stmt(lbProcedure *p, AstTypeSwitchStmt *ss lbBlock *default_block = nullptr; isize num_cases = 0; - for_array(i, body->stmts) { - Ast *clause = body->stmts[i]; + for (Ast *clause : body->stmts) { ast_node(cc, CaseClause, clause); num_cases += cc->list.count; if (cc->list.count == 0) { @@ -1405,8 +1398,7 @@ gb_internal void lb_build_type_switch_stmt(lbProcedure *p, AstTypeSwitchStmt *ss switch_instr = LLVMBuildSwitch(p->builder, tag.value, else_block->block, cast(unsigned)num_cases); } - for_array(i, body->stmts) { - Ast *clause = body->stmts[i]; + for (Ast *clause : body->stmts) { ast_node(cc, CaseClause, clause); lb_open_scope(p, cc->scope); if (cc->list.count == 0) { @@ -1420,9 +1412,8 @@ gb_internal void lb_build_type_switch_stmt(lbProcedure *p, AstTypeSwitchStmt *ss if (p->debug_info != nullptr) { LLVMSetCurrentDebugLocation2(p->builder, lb_debug_location_from_ast(p, clause)); } - Type *case_type = nullptr; - for_array(type_index, cc->list) { - case_type = type_of_expr(cc->list[type_index]); + for (Ast *type_expr : cc->list) { + Type *case_type = type_of_expr(type_expr); lbValue on_val = {}; if (switch_kind == TypeSwitch_Union) { Type *ut = base_type(type_deref(parent.type)); @@ -1538,8 +1529,8 @@ gb_internal void lb_append_tuple_values(lbProcedure *p, Array *dst_valu if (t->kind == Type_Tuple) { lbTupleFix *tf = map_get(&p->tuple_fix_map, src_value.value); if (tf) { - for_array(j, tf->values) { - array_add(dst_values, tf->values[j]); + for (lbValue const &value : tf->values) { + array_add(dst_values, value); } } else { for_array(i, t->Tuple.variables) { @@ -1560,8 +1551,7 @@ gb_internal void lb_build_assignment(lbProcedure *p, Array &lvals, Slice auto inits = array_make(permanent_allocator(), 0, lvals.count); - for_array(i, values) { - Ast *rhs = values[i]; + for (Ast *rhs : values) { lbValue init = lb_build_expr(p, rhs); lb_append_tuple_values(p, &inits, init); } @@ -1971,8 +1961,7 @@ gb_internal void lb_build_assign_stmt_array(lbProcedure *p, TokenKind op, lbAddr auto indices_handled = slice_make(temporary_allocator(), bt->Array.count); auto indices = slice_make(temporary_allocator(), bt->Array.count); i32 index_count = 0; - for_array(i, lhs.swizzle_large.indices) { - i32 index = lhs.swizzle_large.indices[i]; + for (i32 index : lhs.swizzle_large.indices) { if (indices_handled[index]) { continue; } @@ -2049,8 +2038,7 @@ gb_internal void lb_build_assign_stmt(lbProcedure *p, AstAssignStmt *as) { if (as->op.kind == Token_Eq) { auto lvals = array_make(permanent_allocator(), 0, as->lhs.count); - for_array(i, as->lhs) { - Ast *lhs = as->lhs[i]; + for (Ast *lhs : as->lhs) { lbAddr lval = {}; if (!is_blank_ident(lhs)) { lval = lb_build_addr(p, lhs); @@ -2185,8 +2173,7 @@ gb_internal void lb_build_stmt(lbProcedure *p, Ast *node) { bool is_static = false; if (vd->names.count > 0) { - for_array(i, vd->names) { - Ast *name = vd->names[i]; + for (Ast *name : vd->names) { if (!is_blank_ident(name)) { GB_ASSERT(name->kind == Ast_Ident); Entity *e = entity_of_node(name); @@ -2208,8 +2195,7 @@ gb_internal void lb_build_stmt(lbProcedure *p, Ast *node) { auto lvals = array_make(permanent_allocator(), 0, vd->names.count); - for_array(i, vd->names) { - Ast *name = vd->names[i]; + for (Ast *name : vd->names) { lbAddr lval = {}; if (!is_blank_ident(name)) { Entity *e = entity_of_node(name); From 0fb3032b731b640a2d0d1d62b9f8dd548e224b0e Mon Sep 17 00:00:00 2001 From: gingerBill Date: Tue, 3 Jan 2023 14:45:09 +0000 Subject: [PATCH 43/78] General improves to `alloc_ast_node` and other unnecessary checks --- src/common.cpp | 2 +- src/main.cpp | 4 ++-- src/parser.cpp | 4 +--- src/parser.hpp | 5 ++--- src/ptr_map.cpp | 6 ++++-- src/thread_pool.cpp | 6 +++--- src/threading.cpp | 1 + src/types.cpp | 4 ++-- 8 files changed, 16 insertions(+), 16 deletions(-) diff --git a/src/common.cpp b/src/common.cpp index 199a263a1..988a992d0 100644 --- a/src/common.cpp +++ b/src/common.cpp @@ -43,9 +43,9 @@ gb_internal void debugf(char const *fmt, ...); #error Odin on Windows requires a 64-bit build-system. The 'Developer Command Prompt' for VS still defaults to 32-bit shell. The 64-bit shell can be found under the name 'x64 Native Tools Command Prompt' for VS. For more information, please see https://odin-lang.org/docs/install/#for-windows #endif -#include "threading.cpp" #include "unicode.cpp" #include "array.cpp" +#include "threading.cpp" #include "queue.cpp" #include "common_memory.cpp" #include "string.cpp" diff --git a/src/main.cpp b/src/main.cpp index 7ac78241e..c07d2c400 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -13,11 +13,11 @@ #endif #include "exact_value.cpp" #include "build_settings.cpp" - gb_global ThreadPool global_thread_pool; gb_internal void init_global_thread_pool(void) { isize thread_count = gb_max(build_context.thread_count, 1); - thread_pool_init(&global_thread_pool, permanent_allocator(), thread_count, "ThreadPoolWorker"); + isize worker_count = thread_count-1; + thread_pool_init(&global_thread_pool, permanent_allocator(), worker_count, "ThreadPoolWorker"); } gb_internal bool thread_pool_add_task(WorkerTaskProc *proc, void *data) { return thread_pool_add_task(&global_thread_pool, proc, data); diff --git a/src/parser.cpp b/src/parser.cpp index 046469c16..c6f35d326 100644 --- a/src/parser.cpp +++ b/src/parser.cpp @@ -64,11 +64,9 @@ gb_global std::atomic global_total_node_memory_allocated; // NOTE(bill): And this below is why is I/we need a new language! Discriminated unions are a pain in C/C++ gb_internal Ast *alloc_ast_node(AstFile *f, AstKind kind) { - gbAllocator a = ast_allocator(f); - isize size = ast_node_size(kind); - Ast *node = cast(Ast *)gb_alloc(a, size); + Ast *node = cast(Ast *)arena_alloc(&global_thread_local_ast_arena, size, 16); node->kind = kind; node->file_id = f ? f->id : 0; diff --git a/src/parser.hpp b/src/parser.hpp index b492cfa85..d81194831 100644 --- a/src/parser.hpp +++ b/src/parser.hpp @@ -821,9 +821,8 @@ gb_internal gb_inline bool is_ast_when_stmt(Ast *node) { gb_global gb_thread_local Arena global_thread_local_ast_arena = {}; -gb_internal gbAllocator ast_allocator(AstFile *f) { - Arena *arena = &global_thread_local_ast_arena; - return arena_allocator(arena); +gb_internal gb_inline gbAllocator ast_allocator(AstFile *f) { + return arena_allocator(&global_thread_local_ast_arena); } gb_internal Ast *alloc_ast_node(AstFile *f, AstKind kind); diff --git a/src/ptr_map.cpp b/src/ptr_map.cpp index 083cd6697..264136881 100644 --- a/src/ptr_map.cpp +++ b/src/ptr_map.cpp @@ -27,6 +27,7 @@ struct PtrMap { gb_internal gb_inline u32 ptr_map_hash_key(uintptr key) { + u32 res; #if defined(GB_ARCH_64_BIT) key = (~key) + (key << 21); key = key ^ (key >> 24); @@ -34,12 +35,13 @@ gb_internal gb_inline u32 ptr_map_hash_key(uintptr key) { key = key ^ (key >> 14); key = (key + (key << 2)) + (key << 4); key = key ^ (key << 28); - return cast(u32)key; + res = cast(u32)key; #elif defined(GB_ARCH_32_BIT) u32 state = ((u32)key) * 747796405u + 2891336453u; u32 word = ((state >> ((state >> 28u) + 4u)) ^ state) * 277803737u; - return (word >> 22u) ^ word; + res = (word >> 22u) ^ word; #endif + return res ^ (res == MAP_SENTINEL); } gb_internal gb_inline u32 ptr_map_hash_key(void const *key) { return ptr_map_hash_key((uintptr)key); diff --git a/src/thread_pool.cpp b/src/thread_pool.cpp index 07ab3d323..276e93dff 100644 --- a/src/thread_pool.cpp +++ b/src/thread_pool.cpp @@ -5,7 +5,7 @@ struct ThreadPool; gb_thread_local Thread *current_thread; -gb_internal void thread_pool_init(ThreadPool *pool, gbAllocator const &a, isize thread_count, char const *worker_name); +gb_internal void thread_pool_init(ThreadPool *pool, gbAllocator const &a, isize worker_count, char const *worker_name); gb_internal void thread_pool_destroy(ThreadPool *pool); gb_internal bool thread_pool_add_task(ThreadPool *pool, WorkerTaskProc *proc, void *data); gb_internal void thread_pool_wait(ThreadPool *pool); @@ -25,9 +25,9 @@ gb_internal isize current_thread_index(void) { return current_thread ? current_thread->idx : 0; } -gb_internal void thread_pool_init(ThreadPool *pool, gbAllocator const &a, isize thread_count, char const *worker_name) { +gb_internal void thread_pool_init(ThreadPool *pool, gbAllocator const &a, isize worker_count, char const *worker_name) { pool->allocator = a; - slice_init(&pool->threads, a, thread_count + 1); + slice_init(&pool->threads, a, worker_count + 1); // NOTE: this needs to be initialized before any thread starts pool->running.store(true, std::memory_order_seq_cst); diff --git a/src/threading.cpp b/src/threading.cpp index aca77cd8f..78943150e 100644 --- a/src/threading.cpp +++ b/src/threading.cpp @@ -398,6 +398,7 @@ gb_internal void thread_init(ThreadPool *pool, Thread *t, isize idx) { t->idx = idx; } + gb_internal void thread_init_and_start(ThreadPool *pool, Thread *t, isize idx) { thread_init(pool, t, idx); isize stack_size = 0; diff --git a/src/types.cpp b/src/types.cpp index d33c36e94..fa7c1d7f7 100644 --- a/src/types.cpp +++ b/src/types.cpp @@ -2535,13 +2535,13 @@ gb_internal bool are_types_identical_internal(Type *x, Type *y, bool check_tuple if (x->kind == Type_Named) { Entity *e = x->Named.type_name; - if (e != nullptr && e->kind == Entity_TypeName && e->TypeName.is_type_alias) { + if (e->TypeName.is_type_alias) { x = x->Named.base; } } if (y->kind == Type_Named) { Entity *e = y->Named.type_name; - if (e != nullptr && e->kind == Entity_TypeName && e->TypeName.is_type_alias) { + if (e->TypeName.is_type_alias) { y = y->Named.base; } } From c7a704d345e9bda38da18807a1d7cd5bc5accc17 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Tue, 3 Jan 2023 15:26:47 +0000 Subject: [PATCH 44/78] Use `RwMutex` for the `Scope` --- src/check_decl.cpp | 12 ++++-- src/check_expr.cpp | 4 +- src/check_stmt.cpp | 5 ++- src/checker.cpp | 21 ++++++---- src/checker.hpp | 2 +- src/thread_pool.cpp | 27 ++++++------- src/threading.cpp | 96 +++++++++++++++++++++++++++++++++++++++++++++ 7 files changed, 138 insertions(+), 29 deletions(-) diff --git a/src/check_decl.cpp b/src/check_decl.cpp index 66f16546c..4afde6e51 100644 --- a/src/check_decl.cpp +++ b/src/check_decl.cpp @@ -381,8 +381,8 @@ gb_internal void override_entity_in_scope(Entity *original_entity, Entity *new_e if (found_scope == nullptr) { return; } - mutex_lock(&found_scope->mutex); - defer (mutex_unlock(&found_scope->mutex)); + rw_mutex_lock(&found_scope->mutex); + defer (rw_mutex_unlock(&found_scope->mutex)); // IMPORTANT NOTE(bill, 2021-04-10): Overriding behaviour was flawed in that the // original entity was still used check checked, but the checking was only @@ -1478,7 +1478,8 @@ gb_internal bool check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de if (t->kind == Type_Struct) { Scope *scope = t->Struct.scope; GB_ASSERT(scope != nullptr); - MUTEX_GUARD_BLOCK(scope->mutex) for (auto const &entry : scope->elements) { + rw_mutex_lock(&scope->mutex); + for (auto const &entry : scope->elements) { Entity *f = entry.value; if (f->kind == Entity_Variable) { Entity *uvar = alloc_entity_using_variable(e, f->token, f->type, nullptr); @@ -1488,6 +1489,7 @@ gb_internal bool check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de array_add(&using_entities, puv); } } + rw_mutex_unlock(&scope->mutex); } else { error(e->token, "'using' can only be applied to variables of type struct"); break; @@ -1496,7 +1498,8 @@ gb_internal bool check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de } } - MUTEX_GUARD_BLOCK(ctx->scope->mutex) for (auto const &entry : using_entities) { + rw_mutex_lock(&ctx->scope->mutex); + for (auto const &entry : using_entities) { Entity *e = entry.e; Entity *uvar = entry.uvar; Entity *prev = scope_insert_no_mutex(ctx->scope, uvar); @@ -1506,6 +1509,7 @@ gb_internal bool check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de break; } } + rw_mutex_unlock(&ctx->scope->mutex); bool where_clause_ok = evaluate_where_clauses(ctx, nullptr, decl->scope, &decl->proc_lit->ProcLit.where_clauses, !decl->where_clauses_evaluated); diff --git a/src/check_expr.cpp b/src/check_expr.cpp index c1787e7b6..d9ab328cb 100644 --- a/src/check_expr.cpp +++ b/src/check_expr.cpp @@ -236,10 +236,12 @@ gb_internal void check_did_you_mean_scope(String const &name, Scope *scope, char DidYouMeanAnswers d = did_you_mean_make(heap_allocator(), scope->elements.entries.count, name); defer (did_you_mean_destroy(&d)); - MUTEX_GUARD_BLOCK(&scope->mutex) for (auto const &entry : scope->elements) { + rw_mutex_shared_lock(&scope->mutex); + for (auto const &entry : scope->elements) { Entity *e = entry.value; did_you_mean_append(&d, e->token.string); } + rw_mutex_shared_unlock(&scope->mutex); check_did_you_mean_print(&d, prefix); } diff --git a/src/check_stmt.cpp b/src/check_stmt.cpp index e075297a4..6e84d0789 100644 --- a/src/check_stmt.cpp +++ b/src/check_stmt.cpp @@ -622,7 +622,10 @@ gb_internal bool check_using_stmt_entity(CheckerContext *ctx, AstUsingStmt *us, case Entity_ImportName: { Scope *scope = e->ImportName.scope; - MUTEX_GUARD_BLOCK(scope->mutex) for (auto const &entry : scope->elements) { + rw_mutex_lock(&scope->mutex); + defer (rw_mutex_unlock(&scope->mutex)); + + for (auto const &entry : scope->elements) { String name = entry.key.string; Entity *decl = entry.value; if (!is_entity_exported(decl)) continue; diff --git a/src/checker.cpp b/src/checker.cpp index 0075fa543..1d536074d 100644 --- a/src/checker.cpp +++ b/src/checker.cpp @@ -51,10 +51,11 @@ gb_internal bool check_rtti_type_disallowed(Ast *expr, Type *type, char const *f gb_internal void scope_reset(Scope *scope) { if (scope == nullptr) return; - MUTEX_GUARD(&scope->mutex); + rw_mutex_lock(&scope->mutex); scope->head_child.store(nullptr, std::memory_order_relaxed); string_map_clear(&scope->elements); ptr_set_clear(&scope->imported); + rw_mutex_unlock(&scope->mutex); } gb_internal void scope_reserve(Scope *scope, isize capacity) { @@ -180,9 +181,9 @@ gb_internal void init_decl_info(DeclInfo *d, Scope *scope, DeclInfo *parent) { gb_zero_item(d); d->parent = parent; d->scope = scope; - ptr_set_init(&d->deps); - ptr_set_init(&d->type_info_deps); - array_init (&d->labels, heap_allocator()); + ptr_set_init(&d->deps, 0); + ptr_set_init(&d->type_info_deps, 0); + d->labels.allocator = heap_allocator(); } gb_internal DeclInfo *make_decl_info(Scope *scope, DeclInfo *parent) { @@ -394,9 +395,9 @@ gb_internal void scope_lookup_parent(Scope *scope, String const &name, Scope **s StringHashKey key = string_hash_string(name); for (Scope *s = scope; s != nullptr; s = s->parent) { Entity **found = nullptr; - mutex_lock(&s->mutex); + rw_mutex_shared_lock(&s->mutex); found = string_map_get(&s->elements, key); - mutex_unlock(&s->mutex); + rw_mutex_shared_unlock(&s->mutex); if (found) { Entity *e = *found; if (gone_thru_proc) { @@ -482,7 +483,7 @@ gb_internal Entity *scope_insert_with_name(Scope *s, String const &name, Entity Entity **found = nullptr; Entity *result = nullptr; - MUTEX_GUARD(&s->mutex); + rw_mutex_lock(&s->mutex); found = string_map_get(&s->elements, key); @@ -509,6 +510,8 @@ gb_internal Entity *scope_insert_with_name(Scope *s, String const &name, Entity entity->scope = s; } end:; + rw_mutex_unlock(&s->mutex); + return result; } @@ -669,7 +672,8 @@ gb_internal void check_scope_usage(Checker *c, Scope *scope) { Array vetted_entities = {}; array_init(&vetted_entities, heap_allocator()); - MUTEX_GUARD_BLOCK(scope->mutex) for (auto const &entry : scope->elements) { + rw_mutex_shared_lock(&scope->mutex); + for (auto const &entry : scope->elements) { Entity *e = entry.value; if (e == nullptr) continue; VettedEntity ve_unused = {}; @@ -686,6 +690,7 @@ gb_internal void check_scope_usage(Checker *c, Scope *scope) { array_add(&vetted_entities, ve_shadowed); } } + rw_mutex_shared_unlock(&scope->mutex); gb_sort(vetted_entities.data, vetted_entities.count, gb_size_of(VettedEntity), vetted_entity_variable_pos_cmp); diff --git a/src/checker.hpp b/src/checker.hpp index cc92fce28..53052d5cd 100644 --- a/src/checker.hpp +++ b/src/checker.hpp @@ -224,7 +224,7 @@ struct Scope { std::atomic next; std::atomic head_child; - BlockingMutex mutex; + RwMutex mutex; StringMap elements; PtrSet imported; diff --git a/src/thread_pool.cpp b/src/thread_pool.cpp index 276e93dff..2c369eaad 100644 --- a/src/thread_pool.cpp +++ b/src/thread_pool.cpp @@ -47,7 +47,7 @@ gb_internal void thread_pool_destroy(ThreadPool *pool) { for_array_off(i, 1, pool->threads) { Thread *t = &pool->threads[i]; - pool->tasks_available.fetch_add(1, std::memory_order_release); + pool->tasks_available.fetch_add(1, std::memory_order_relaxed); futex_broadcast(&pool->tasks_available); thread_join_and_destroy(t); } @@ -74,7 +74,7 @@ void thread_pool_queue_push(Thread *thread, WorkerTask task) { } while (!thread->head_and_tail.compare_exchange_weak(capture, new_capture)); thread->pool->tasks_left.fetch_add(1, std::memory_order_release); - thread->pool->tasks_available.fetch_add(1, std::memory_order_release); + thread->pool->tasks_available.fetch_add(1, std::memory_order_relaxed); futex_broadcast(&thread->pool->tasks_available); } @@ -82,7 +82,7 @@ bool thread_pool_queue_pop(Thread *thread, WorkerTask *task) { u64 capture; u64 new_capture; do { - capture = thread->head_and_tail.load(); + capture = thread->head_and_tail.load(std::memory_order_acquire); u64 mask = thread->capacity - 1; u64 head = (capture >> 32) & mask; @@ -97,7 +97,7 @@ bool thread_pool_queue_pop(Thread *thread, WorkerTask *task) { *task = thread->queue[tail]; new_capture = (head << 32) | new_tail; - } while (!thread->head_and_tail.compare_exchange_weak(capture, new_capture)); + } while (!thread->head_and_tail.compare_exchange_weak(capture, new_capture, std::memory_order_release)); return true; } @@ -168,22 +168,21 @@ gb_internal THREAD_PROC(thread_pool_thread_proc) { Thread *thread = &pool->threads.data[idx]; WorkerTask task; - if (!thread_pool_queue_pop(thread, &task)) { - continue; - } - task.do_work(task.data); - pool->tasks_left.fetch_sub(1, std::memory_order_release); + if (thread_pool_queue_pop(thread, &task)) { + task.do_work(task.data); + pool->tasks_left.fetch_sub(1, std::memory_order_release); - if (pool->tasks_left.load(std::memory_order_acquire) == 0) { - futex_signal(&pool->tasks_left); - } + if (pool->tasks_left.load(std::memory_order_acquire) == 0) { + futex_signal(&pool->tasks_left); + } - goto main_loop_continue; + goto main_loop_continue; + } } } // if we've done all our work, and there's nothing to steal, go to sleep - state = pool->tasks_available.load(); + state = pool->tasks_available.load(std::memory_order_acquire); futex_wait(&pool->tasks_available, state); main_loop_continue:; diff --git a/src/threading.cpp b/src/threading.cpp index 78943150e..27a17112e 100644 --- a/src/threading.cpp +++ b/src/threading.cpp @@ -8,10 +8,12 @@ struct BlockingMutex; struct RecursiveMutex; +struct RwMutex; struct Semaphore; struct Condition; struct Thread; struct ThreadPool; +struct Parker; #define THREAD_PROC(name) isize name(struct Thread *thread) gb_internal THREAD_PROC(thread_pool_thread_proc); @@ -56,6 +58,13 @@ gb_internal void mutex_lock (RecursiveMutex *m); gb_internal bool mutex_try_lock(RecursiveMutex *m); gb_internal void mutex_unlock (RecursiveMutex *m); +gb_internal void rw_mutex_lock (RwMutex *m); +gb_internal bool rw_mutex_try_lock (RwMutex *m); +gb_internal void rw_mutex_unlock (RwMutex *m); +gb_internal void rw_mutex_shared_lock (RwMutex *m); +gb_internal bool rw_mutex_try_shared_lock(RwMutex *m); +gb_internal void rw_mutex_shared_unlock (RwMutex *m); + gb_internal void semaphore_post (Semaphore *s, i32 count); gb_internal void semaphore_wait (Semaphore *s); gb_internal void semaphore_release(Semaphore *s) { semaphore_post(s, 1); } @@ -65,6 +74,10 @@ gb_internal void condition_broadcast(Condition *c); gb_internal void condition_signal(Condition *c); gb_internal void condition_wait(Condition *c, BlockingMutex *m); +gb_internal void park(Parker *p); +gb_internal void unpark_one(Parker *p); +gb_internal void unpark_all(Parker *p); + gb_internal u32 thread_current_id(void); gb_internal void thread_init (ThreadPool *pool, Thread *t, isize idx); @@ -205,6 +218,30 @@ gb_internal void semaphore_wait(Semaphore *s) { gb_internal void condition_wait(Condition *c, BlockingMutex *m) { SleepConditionVariableSRW(&c->cond, &m->srwlock, INFINITE, 0); } + + struct RwMutex { + SRWLOCK srwlock; + }; + + gb_internal void rw_mutex_lock(RwMutex *m) { + AcquireSRWLockExclusive(&m->srwlock); + } + gb_internal bool rw_mutex_try_lock(RwMutex *m) { + return !!TryAcquireSRWLockExclusive(&m->srwlock); + } + gb_internal void rw_mutex_unlock(RwMutex *m) { + ReleaseSRWLockExclusive(&m->srwlock); + } + + gb_internal void rw_mutex_shared_lock(RwMutex *m) { + AcquireSRWLockShared(&m->srwlock); + } + gb_internal bool rw_mutex_try_shared_lock(RwMutex *m) { + return !!TryAcquireSRWLockShared(&m->srwlock); + } + gb_internal void rw_mutex_shared_unlock(RwMutex *m) { + ReleaseSRWLockShared(&m->srwlock); + } #else enum Internal_Mutex_State : i32 { Internal_Mutex_State_Unlocked = 0, @@ -306,8 +343,67 @@ gb_internal void semaphore_wait(Semaphore *s) { futex_wait(&c->state(), state); mutex_lock(m); } + + struct RwMutex { + // TODO(bill): make this a proper RW mutex + BlockingMutex mutex; + }; + + gb_internal void rw_mutex_lock(RwMutex *m) { + mutex_lock(&m->mutex); + } + gb_internal bool rw_mutex_try_lock(RwMutex *m) { + return mutex_try_lock(&m->mutex); + } + gb_internal void rw_mutex_unlock(RwMutex *m) { + mutex_unlock(&m->mutex); + } + + gb_internal void rw_mutex_shared_lock(RwMutex *m) { + mutex_lock(&m->mutex); + } + gb_internal bool rw_mutex_try_shared_lock(RwMutex *m) { + return mutex_try_lock(&m->mutex); + } + gb_internal void rw_mutex_shared_unlock(RwMutex *m) { + mutex_unlock(&m->mutex); + } #endif +struct Parker { + Futex state; +}; +enum ParkerState : u32 { + ParkerState_Empty = 0, + ParkerState_Notified = 1, + ParkerState_Parked = UINT32_MAX, +}; + +gb_internal void park(Parker *p) { + if (p->state.fetch_sub(1, std::memory_order_acquire) == ParkerState_Notified) { + return; + } + for (;;) { + futex_wait(&p->state, ParkerState_Parked); + i32 notified = ParkerState_Empty; + if (p->state.compare_exchange_strong(notified, ParkerState_Empty, std::memory_order_acquire, std::memory_order_acquire)) { + return; + } + } +} + +gb_internal void unpark_one(Parker *p) { + if (p->state.exchange(ParkerState_Notified, std::memory_order_release) == ParkerState_Parked) { + futex_signal(&p->state); + } +} + +gb_internal void unpark_all(Parker *p) { + if (p->state.exchange(ParkerState_Notified, std::memory_order_release) == ParkerState_Parked) { + futex_broadcast(&p->state); + } +} + gb_internal u32 thread_current_id(void) { u32 thread_id; From 3dee3205b299b2a2e803a3bc248f0a9ffdcce69e Mon Sep 17 00:00:00 2001 From: gingerBill Date: Tue, 3 Jan 2023 15:34:52 +0000 Subject: [PATCH 45/78] Use `RwMutex` for `DeclInfo` `deps --- src/check_decl.cpp | 27 +++++++++++++++++---------- src/check_expr.cpp | 6 ++++-- src/checker.cpp | 8 ++++---- src/checker.hpp | 4 ++-- 4 files changed, 27 insertions(+), 18 deletions(-) diff --git a/src/check_decl.cpp b/src/check_decl.cpp index 4afde6e51..72c69b5dc 100644 --- a/src/check_decl.cpp +++ b/src/check_decl.cpp @@ -1584,19 +1584,26 @@ gb_internal bool check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de // NOTE(bill): Add the dependencies from the procedure literal (lambda) // But only at the procedure level - MUTEX_GUARD_BLOCK(decl->deps_mutex) - MUTEX_GUARD_BLOCK(decl->parent->deps_mutex) { - for (Entity *e : decl->deps) { - ptr_set_add(&decl->parent->deps, e); - } + rw_mutex_shared_lock(&decl->deps_mutex); + rw_mutex_lock(&decl->parent->deps_mutex); + + for (Entity *e : decl->deps) { + ptr_set_add(&decl->parent->deps, e); } - MUTEX_GUARD_BLOCK(decl->type_info_deps_mutex) - MUTEX_GUARD_BLOCK(decl->parent->type_info_deps_mutex) { - for (Type *t : decl->type_info_deps) { - ptr_set_add(&decl->parent->type_info_deps, t); - } + rw_mutex_unlock(&decl->parent->deps_mutex); + rw_mutex_shared_unlock(&decl->deps_mutex); + + + rw_mutex_shared_lock(&decl->type_info_deps_mutex); + rw_mutex_lock(&decl->parent->type_info_deps_mutex); + + for (Type *t : decl->type_info_deps) { + ptr_set_add(&decl->parent->type_info_deps, t); } + + rw_mutex_unlock(&decl->parent->type_info_deps_mutex); + rw_mutex_shared_unlock(&decl->type_info_deps_mutex); } } diff --git a/src/check_expr.cpp b/src/check_expr.cpp index d9ab328cb..e3cd66db6 100644 --- a/src/check_expr.cpp +++ b/src/check_expr.cpp @@ -4928,21 +4928,23 @@ gb_internal bool check_identifier_exists(Scope *s, Ast *node, bool nested = fals gb_internal isize add_dependencies_from_unpacking(CheckerContext *c, Entity **lhs, isize lhs_count, isize tuple_index, isize tuple_count) { if (lhs != nullptr && c->decl != nullptr) { - mutex_lock(&c->info->deps_mutex); + // mutex_lock(&c->info->deps_mutex); for (isize j = 0; (tuple_index + j) < lhs_count && j < tuple_count; j++) { Entity *e = lhs[tuple_index + j]; if (e != nullptr) { DeclInfo *decl = decl_info_of_entity(e); if (decl != nullptr) { + rw_mutex_lock(&c->decl->deps_mutex); for (Entity *dep : decl->deps) { ptr_set_add(&c->decl->deps, dep); } + rw_mutex_unlock(&c->decl->deps_mutex); } } } - mutex_unlock(&c->info->deps_mutex); + // mutex_unlock(&c->info->deps_mutex); } return tuple_count; } diff --git a/src/checker.cpp b/src/checker.cpp index 1d536074d..fd80d07de 100644 --- a/src/checker.cpp +++ b/src/checker.cpp @@ -744,17 +744,17 @@ gb_internal void check_scope_usage(Checker *c, Scope *scope) { gb_internal void add_dependency(CheckerInfo *info, DeclInfo *d, Entity *e) { - mutex_lock(&d->deps_mutex); + rw_mutex_lock(&d->deps_mutex); ptr_set_add(&d->deps, e); - mutex_unlock(&d->deps_mutex); + rw_mutex_unlock(&d->deps_mutex); } gb_internal void add_type_info_dependency(CheckerInfo *info, DeclInfo *d, Type *type) { if (d == nullptr) { return; } - mutex_lock(&d->type_info_deps_mutex); + rw_mutex_lock(&d->type_info_deps_mutex); ptr_set_add(&d->type_info_deps, type); - mutex_unlock(&d->type_info_deps_mutex); + rw_mutex_unlock(&d->type_info_deps_mutex); } gb_internal AstPackage *get_core_package(CheckerInfo *info, String name) { diff --git a/src/checker.hpp b/src/checker.hpp index 53052d5cd..50f4a204c 100644 --- a/src/checker.hpp +++ b/src/checker.hpp @@ -179,10 +179,10 @@ struct DeclInfo { CommentGroup *comment; CommentGroup *docs; - BlockingMutex deps_mutex; + RwMutex deps_mutex; PtrSet deps; - BlockingMutex type_info_deps_mutex; + RwMutex type_info_deps_mutex; PtrSet type_info_deps; Array labels; From 485c6066724b374de5b94115411f529493f799f7 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Tue, 3 Jan 2023 15:37:35 +0000 Subject: [PATCH 46/78] Clarify `RwLock`s for `add_dependenies_from_unpacking` --- src/check_expr.cpp | 6 ++---- src/checker.hpp | 6 ------ 2 files changed, 2 insertions(+), 10 deletions(-) diff --git a/src/check_expr.cpp b/src/check_expr.cpp index e3cd66db6..38fe33c24 100644 --- a/src/check_expr.cpp +++ b/src/check_expr.cpp @@ -4928,23 +4928,21 @@ gb_internal bool check_identifier_exists(Scope *s, Ast *node, bool nested = fals gb_internal isize add_dependencies_from_unpacking(CheckerContext *c, Entity **lhs, isize lhs_count, isize tuple_index, isize tuple_count) { if (lhs != nullptr && c->decl != nullptr) { - // mutex_lock(&c->info->deps_mutex); - for (isize j = 0; (tuple_index + j) < lhs_count && j < tuple_count; j++) { Entity *e = lhs[tuple_index + j]; if (e != nullptr) { DeclInfo *decl = decl_info_of_entity(e); if (decl != nullptr) { + rw_mutex_shared_lock(&decl->deps_mutex); rw_mutex_lock(&c->decl->deps_mutex); for (Entity *dep : decl->deps) { ptr_set_add(&c->decl->deps, dep); } rw_mutex_unlock(&c->decl->deps_mutex); + rw_mutex_shared_unlock(&decl->deps_mutex); } } } - - // mutex_unlock(&c->info->deps_mutex); } return tuple_count; } diff --git a/src/checker.hpp b/src/checker.hpp index 50f4a204c..554c8ddf4 100644 --- a/src/checker.hpp +++ b/src/checker.hpp @@ -357,12 +357,6 @@ struct CheckerInfo { BlockingMutex global_untyped_mutex; BlockingMutex builtin_mutex; - // NOT recursive & only used at the end of `check_proc_body` - // and in `add_dependency`. - // This is a possible source of contention but probably not - // too much of a problem in practice - BlockingMutex deps_mutex; - BlockingMutex type_and_value_mutex; RecursiveMutex lazy_mutex; // Mutex required for lazy type checking of specific files From 774fea1e63941473b9899a50585e0f171184a147 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Tue, 3 Jan 2023 15:47:25 +0000 Subject: [PATCH 47/78] Use `RwMutex` for `gen_procs` --- src/check_expr.cpp | 16 ++++++++-------- src/checker.cpp | 13 +++++++------ src/checker.hpp | 11 +++-------- src/llvm_backend_stmt.cpp | 3 ++- 4 files changed, 20 insertions(+), 23 deletions(-) diff --git a/src/check_expr.cpp b/src/check_expr.cpp index 38fe33c24..746a29ce0 100644 --- a/src/check_expr.cpp +++ b/src/check_expr.cpp @@ -440,11 +440,11 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E auto *found = map_get(&info->gen_procs, base_entity->identifier.load()); if (found) { gen_procs = *found; - mutex_lock(&gen_procs->mutex); // @local-mutex + rw_mutex_shared_lock(&gen_procs->mutex); // @local-mutex for (Entity *other : gen_procs->procs) { Type *pt = base_type(other->type); if (are_types_identical(pt, final_proc_type)) { - mutex_unlock(&gen_procs->mutex); // @local-mutex + rw_mutex_shared_unlock(&gen_procs->mutex); // @local-mutex // @@GPM //////////////////////////// mutex_unlock(&info->gen_procs_mutex); ///////////////////////////////////// @@ -455,7 +455,7 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E return true; } } - mutex_unlock(&gen_procs->mutex); // @local-mutex + rw_mutex_shared_unlock(&gen_procs->mutex); // @local-mutex } else { gen_procs = gb_alloc_item(permanent_allocator(), GenProcsData); gen_procs->procs.allocator = heap_allocator(); @@ -481,11 +481,11 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E return false; } - mutex_lock(&gen_procs->mutex); // @local-mutex + rw_mutex_shared_lock(&gen_procs->mutex); // @local-mutex for (Entity *other : gen_procs->procs) { Type *pt = base_type(other->type); if (are_types_identical(pt, final_proc_type)) { - mutex_unlock(&gen_procs->mutex); // @local-mutex + rw_mutex_shared_unlock(&gen_procs->mutex); // @local-mutex if (poly_proc_data) { poly_proc_data->gen_entity = other; @@ -509,7 +509,7 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E return true; } } - mutex_unlock(&gen_procs->mutex); // @local-mutex + rw_mutex_shared_unlock(&gen_procs->mutex); // @local-mutex } @@ -569,9 +569,9 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E } } - mutex_lock(&gen_procs->mutex); // @local-mutex + rw_mutex_lock(&gen_procs->mutex); // @local-mutex array_add(&gen_procs->procs, entity); - mutex_unlock(&gen_procs->mutex); // @local-mutex + rw_mutex_unlock(&gen_procs->mutex); // @local-mutex ProcInfo *proc_info = gb_alloc_item(permanent_allocator(), ProcInfo); proc_info->file = file; diff --git a/src/checker.cpp b/src/checker.cpp index fd80d07de..3f5c2892f 100644 --- a/src/checker.cpp +++ b/src/checker.cpp @@ -1356,9 +1356,9 @@ gb_internal ExprInfo *check_get_expr_info(CheckerContext *c, Ast *expr) { } return nullptr; } else { - mutex_lock(&c->info->global_untyped_mutex); - defer (mutex_unlock(&c->info->global_untyped_mutex)); + rw_mutex_shared_lock(&c->info->global_untyped_mutex); ExprInfo **found = map_get(&c->info->global_untyped, expr); + rw_mutex_shared_unlock(&c->info->global_untyped_mutex); if (found) { return *found; } @@ -1370,9 +1370,9 @@ gb_internal void check_set_expr_info(CheckerContext *c, Ast *expr, AddressingMod if (c->untyped != nullptr) { map_set(c->untyped, expr, make_expr_info(mode, type, value, false)); } else { - mutex_lock(&c->info->global_untyped_mutex); + rw_mutex_lock(&c->info->global_untyped_mutex); map_set(&c->info->global_untyped, expr, make_expr_info(mode, type, value, false)); - mutex_unlock(&c->info->global_untyped_mutex); + rw_mutex_unlock(&c->info->global_untyped_mutex); } } @@ -1382,10 +1382,10 @@ gb_internal void check_remove_expr_info(CheckerContext *c, Ast *e) { GB_ASSERT(map_get(c->untyped, e) == nullptr); } else { auto *untyped = &c->info->global_untyped; - mutex_lock(&c->info->global_untyped_mutex); + rw_mutex_lock(&c->info->global_untyped_mutex); map_remove(untyped, e); GB_ASSERT(map_get(untyped, e) == nullptr); - mutex_unlock(&c->info->global_untyped_mutex); + rw_mutex_unlock(&c->info->global_untyped_mutex); } } @@ -1454,6 +1454,7 @@ gb_internal void add_type_and_value(CheckerContext *ctx, Ast *expr, AddressingMo BlockingMutex *mutex = &ctx->info->type_and_value_mutex; if (ctx->pkg) { + // TODO(bill): is a per package mutex is a good idea here? mutex = &ctx->pkg->type_and_value_mutex; } diff --git a/src/checker.hpp b/src/checker.hpp index 554c8ddf4..226f69c1c 100644 --- a/src/checker.hpp +++ b/src/checker.hpp @@ -317,7 +317,7 @@ struct LoadFileCache { struct GenProcsData { Array procs; - BlockingMutex mutex; + RwMutex mutex; }; // CheckerInfo stores all the symbol information for a type-checked program @@ -347,14 +347,9 @@ struct CheckerInfo { // Below are accessed within procedures - // NOTE(bill): If the semantic checker (check_proc_body) is to ever to be multithreaded, - // these variables will be of contention - - Semaphore collect_semaphore; - + RwMutex global_untyped_mutex; UntypedExprInfoMap global_untyped; // NOTE(bill): This needs to be a map and not on the Ast // as it needs to be iterated across afterwards - BlockingMutex global_untyped_mutex; BlockingMutex builtin_mutex; BlockingMutex type_and_value_mutex; @@ -388,7 +383,7 @@ struct CheckerInfo { BlockingMutex load_file_mutex; StringMap load_file_cache; - BlockingMutex all_procedures_mutex;; + BlockingMutex all_procedures_mutex; Array all_procedures; }; diff --git a/src/llvm_backend_stmt.cpp b/src/llvm_backend_stmt.cpp index c48115079..1660d3487 100644 --- a/src/llvm_backend_stmt.cpp +++ b/src/llvm_backend_stmt.cpp @@ -57,7 +57,7 @@ gb_internal void lb_build_constant_value_decl(lbProcedure *p, AstValueDecl *vd) auto *found = map_get(&info->gen_procs, ident); if (found) { GenProcsData *gpd = *found; - MUTEX_GUARD(&gpd->mutex); + rw_mutex_shared_lock(&gpd->mutex); for (Entity *e : gpd->procs) { if (!ptr_set_exists(min_dep_set, e)) { continue; @@ -65,6 +65,7 @@ gb_internal void lb_build_constant_value_decl(lbProcedure *p, AstValueDecl *vd) DeclInfo *d = decl_info_of_entity(e); lb_build_nested_proc(p, &d->proc_lit->ProcLit, e); } + rw_mutex_shared_unlock(&gpd->mutex); } else { lb_build_nested_proc(p, pl, e); } From dc317c8cd83ca183cf53b2bb3d4c7dbbd0f16668 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Tue, 3 Jan 2023 15:50:31 +0000 Subject: [PATCH 48/78] Make `BlockingMutex` --- src/checker.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/checker.hpp b/src/checker.hpp index 226f69c1c..d647cd67f 100644 --- a/src/checker.hpp +++ b/src/checker.hpp @@ -356,7 +356,7 @@ struct CheckerInfo { RecursiveMutex lazy_mutex; // Mutex required for lazy type checking of specific files - RecursiveMutex gen_procs_mutex; + BlockingMutex gen_procs_mutex; RecursiveMutex gen_types_mutex; PtrMap gen_procs; // Key: Ast * | Identifier -> Entity PtrMap > gen_types; From 85e390deba48437eb8268e2a1067e2c377613352 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Tue, 3 Jan 2023 15:55:22 +0000 Subject: [PATCH 49/78] Minimize calling of `Ast::thread_safe_file()` when cloning --- src/parser.cpp | 284 +++++++++++++++++++++++++------------------------ 1 file changed, 143 insertions(+), 141 deletions(-) diff --git a/src/parser.cpp b/src/parser.cpp index c6f35d326..bc0fcf6be 100644 --- a/src/parser.cpp +++ b/src/parser.cpp @@ -75,33 +75,35 @@ gb_internal Ast *alloc_ast_node(AstFile *f, AstKind kind) { return node; } -gb_internal Ast *clone_ast(Ast *node); -gb_internal Array clone_ast_array(Array const &array) { +gb_internal Ast *clone_ast(Ast *node, AstFile *f = nullptr); +gb_internal Array clone_ast_array(Array const &array, AstFile *f) { Array result = {}; if (array.count > 0) { result = array_make(ast_allocator(nullptr), array.count); for_array(i, array) { - result[i] = clone_ast(array[i]); + result[i] = clone_ast(array[i], f); } } return result; } -gb_internal Slice clone_ast_array(Slice const &array) { +gb_internal Slice clone_ast_array(Slice const &array, AstFile *f) { Slice result = {}; if (array.count > 0) { result = slice_clone(permanent_allocator(), array); for_array(i, array) { - result[i] = clone_ast(array[i]); + result[i] = clone_ast(array[i], f); } } return result; } -gb_internal Ast *clone_ast(Ast *node) { +gb_internal Ast *clone_ast(Ast *node, AstFile *f) { if (node == nullptr) { return nullptr; } - AstFile *f = node->thread_safe_file(); + if (f == nullptr) { + f = node->thread_safe_file(); + } Ast *n = alloc_ast_node(f, node->kind); gb_memmove(n, node, ast_node_size(node->kind)); @@ -118,279 +120,279 @@ gb_internal Ast *clone_ast(Ast *node) { case Ast_BasicDirective: break; case Ast_PolyType: - n->PolyType.type = clone_ast(n->PolyType.type); - n->PolyType.specialization = clone_ast(n->PolyType.specialization); + n->PolyType.type = clone_ast(n->PolyType.type, f); + n->PolyType.specialization = clone_ast(n->PolyType.specialization, f); break; case Ast_Ellipsis: - n->Ellipsis.expr = clone_ast(n->Ellipsis.expr); + n->Ellipsis.expr = clone_ast(n->Ellipsis.expr, f); break; case Ast_ProcGroup: - n->ProcGroup.args = clone_ast_array(n->ProcGroup.args); + n->ProcGroup.args = clone_ast_array(n->ProcGroup.args, f); break; case Ast_ProcLit: - n->ProcLit.type = clone_ast(n->ProcLit.type); - n->ProcLit.body = clone_ast(n->ProcLit.body); - n->ProcLit.where_clauses = clone_ast_array(n->ProcLit.where_clauses); + n->ProcLit.type = clone_ast(n->ProcLit.type, f); + n->ProcLit.body = clone_ast(n->ProcLit.body, f); + n->ProcLit.where_clauses = clone_ast_array(n->ProcLit.where_clauses, f); break; case Ast_CompoundLit: - n->CompoundLit.type = clone_ast(n->CompoundLit.type); - n->CompoundLit.elems = clone_ast_array(n->CompoundLit.elems); + n->CompoundLit.type = clone_ast(n->CompoundLit.type, f); + n->CompoundLit.elems = clone_ast_array(n->CompoundLit.elems, f); break; case Ast_BadExpr: break; case Ast_TagExpr: - n->TagExpr.expr = clone_ast(n->TagExpr.expr); + n->TagExpr.expr = clone_ast(n->TagExpr.expr, f); break; case Ast_UnaryExpr: - n->UnaryExpr.expr = clone_ast(n->UnaryExpr.expr); + n->UnaryExpr.expr = clone_ast(n->UnaryExpr.expr, f); break; case Ast_BinaryExpr: - n->BinaryExpr.left = clone_ast(n->BinaryExpr.left); - n->BinaryExpr.right = clone_ast(n->BinaryExpr.right); + n->BinaryExpr.left = clone_ast(n->BinaryExpr.left, f); + n->BinaryExpr.right = clone_ast(n->BinaryExpr.right, f); break; case Ast_ParenExpr: - n->ParenExpr.expr = clone_ast(n->ParenExpr.expr); + n->ParenExpr.expr = clone_ast(n->ParenExpr.expr, f); break; case Ast_SelectorExpr: - n->SelectorExpr.expr = clone_ast(n->SelectorExpr.expr); - n->SelectorExpr.selector = clone_ast(n->SelectorExpr.selector); + n->SelectorExpr.expr = clone_ast(n->SelectorExpr.expr, f); + n->SelectorExpr.selector = clone_ast(n->SelectorExpr.selector, f); break; case Ast_ImplicitSelectorExpr: - n->ImplicitSelectorExpr.selector = clone_ast(n->ImplicitSelectorExpr.selector); + n->ImplicitSelectorExpr.selector = clone_ast(n->ImplicitSelectorExpr.selector, f); break; case Ast_SelectorCallExpr: - n->SelectorCallExpr.expr = clone_ast(n->SelectorCallExpr.expr); - n->SelectorCallExpr.call = clone_ast(n->SelectorCallExpr.call); + n->SelectorCallExpr.expr = clone_ast(n->SelectorCallExpr.expr, f); + n->SelectorCallExpr.call = clone_ast(n->SelectorCallExpr.call, f); break; case Ast_IndexExpr: - n->IndexExpr.expr = clone_ast(n->IndexExpr.expr); - n->IndexExpr.index = clone_ast(n->IndexExpr.index); + n->IndexExpr.expr = clone_ast(n->IndexExpr.expr, f); + n->IndexExpr.index = clone_ast(n->IndexExpr.index, f); break; case Ast_MatrixIndexExpr: - n->MatrixIndexExpr.expr = clone_ast(n->MatrixIndexExpr.expr); - n->MatrixIndexExpr.row_index = clone_ast(n->MatrixIndexExpr.row_index); - n->MatrixIndexExpr.column_index = clone_ast(n->MatrixIndexExpr.column_index); + n->MatrixIndexExpr.expr = clone_ast(n->MatrixIndexExpr.expr, f); + n->MatrixIndexExpr.row_index = clone_ast(n->MatrixIndexExpr.row_index, f); + n->MatrixIndexExpr.column_index = clone_ast(n->MatrixIndexExpr.column_index, f); break; case Ast_DerefExpr: - n->DerefExpr.expr = clone_ast(n->DerefExpr.expr); + n->DerefExpr.expr = clone_ast(n->DerefExpr.expr, f); break; case Ast_SliceExpr: - n->SliceExpr.expr = clone_ast(n->SliceExpr.expr); - n->SliceExpr.low = clone_ast(n->SliceExpr.low); - n->SliceExpr.high = clone_ast(n->SliceExpr.high); + n->SliceExpr.expr = clone_ast(n->SliceExpr.expr, f); + n->SliceExpr.low = clone_ast(n->SliceExpr.low, f); + n->SliceExpr.high = clone_ast(n->SliceExpr.high, f); break; case Ast_CallExpr: - n->CallExpr.proc = clone_ast(n->CallExpr.proc); - n->CallExpr.args = clone_ast_array(n->CallExpr.args); + n->CallExpr.proc = clone_ast(n->CallExpr.proc, f); + n->CallExpr.args = clone_ast_array(n->CallExpr.args, f); break; case Ast_FieldValue: - n->FieldValue.field = clone_ast(n->FieldValue.field); - n->FieldValue.value = clone_ast(n->FieldValue.value); + n->FieldValue.field = clone_ast(n->FieldValue.field, f); + n->FieldValue.value = clone_ast(n->FieldValue.value, f); break; case Ast_EnumFieldValue: - n->EnumFieldValue.name = clone_ast(n->EnumFieldValue.name); - n->EnumFieldValue.value = clone_ast(n->EnumFieldValue.value); + n->EnumFieldValue.name = clone_ast(n->EnumFieldValue.name, f); + n->EnumFieldValue.value = clone_ast(n->EnumFieldValue.value, f); break; case Ast_TernaryIfExpr: - n->TernaryIfExpr.x = clone_ast(n->TernaryIfExpr.x); - n->TernaryIfExpr.cond = clone_ast(n->TernaryIfExpr.cond); - n->TernaryIfExpr.y = clone_ast(n->TernaryIfExpr.y); + n->TernaryIfExpr.x = clone_ast(n->TernaryIfExpr.x, f); + n->TernaryIfExpr.cond = clone_ast(n->TernaryIfExpr.cond, f); + n->TernaryIfExpr.y = clone_ast(n->TernaryIfExpr.y, f); break; case Ast_TernaryWhenExpr: - n->TernaryWhenExpr.x = clone_ast(n->TernaryWhenExpr.x); - n->TernaryWhenExpr.cond = clone_ast(n->TernaryWhenExpr.cond); - n->TernaryWhenExpr.y = clone_ast(n->TernaryWhenExpr.y); + n->TernaryWhenExpr.x = clone_ast(n->TernaryWhenExpr.x, f); + n->TernaryWhenExpr.cond = clone_ast(n->TernaryWhenExpr.cond, f); + n->TernaryWhenExpr.y = clone_ast(n->TernaryWhenExpr.y, f); break; case Ast_OrElseExpr: - n->OrElseExpr.x = clone_ast(n->OrElseExpr.x); - n->OrElseExpr.y = clone_ast(n->OrElseExpr.y); + n->OrElseExpr.x = clone_ast(n->OrElseExpr.x, f); + n->OrElseExpr.y = clone_ast(n->OrElseExpr.y, f); break; case Ast_OrReturnExpr: - n->OrReturnExpr.expr = clone_ast(n->OrReturnExpr.expr); + n->OrReturnExpr.expr = clone_ast(n->OrReturnExpr.expr, f); break; case Ast_TypeAssertion: - n->TypeAssertion.expr = clone_ast(n->TypeAssertion.expr); - n->TypeAssertion.type = clone_ast(n->TypeAssertion.type); + n->TypeAssertion.expr = clone_ast(n->TypeAssertion.expr, f); + n->TypeAssertion.type = clone_ast(n->TypeAssertion.type, f); break; case Ast_TypeCast: - n->TypeCast.type = clone_ast(n->TypeCast.type); - n->TypeCast.expr = clone_ast(n->TypeCast.expr); + n->TypeCast.type = clone_ast(n->TypeCast.type, f); + n->TypeCast.expr = clone_ast(n->TypeCast.expr, f); break; case Ast_AutoCast: - n->AutoCast.expr = clone_ast(n->AutoCast.expr); + n->AutoCast.expr = clone_ast(n->AutoCast.expr, f); break; case Ast_InlineAsmExpr: - n->InlineAsmExpr.param_types = clone_ast_array(n->InlineAsmExpr.param_types); - n->InlineAsmExpr.return_type = clone_ast(n->InlineAsmExpr.return_type); - n->InlineAsmExpr.asm_string = clone_ast(n->InlineAsmExpr.asm_string); - n->InlineAsmExpr.constraints_string = clone_ast(n->InlineAsmExpr.constraints_string); + n->InlineAsmExpr.param_types = clone_ast_array(n->InlineAsmExpr.param_types, f); + n->InlineAsmExpr.return_type = clone_ast(n->InlineAsmExpr.return_type, f); + n->InlineAsmExpr.asm_string = clone_ast(n->InlineAsmExpr.asm_string, f); + n->InlineAsmExpr.constraints_string = clone_ast(n->InlineAsmExpr.constraints_string, f); break; case Ast_BadStmt: break; case Ast_EmptyStmt: break; case Ast_ExprStmt: - n->ExprStmt.expr = clone_ast(n->ExprStmt.expr); + n->ExprStmt.expr = clone_ast(n->ExprStmt.expr, f); break; case Ast_AssignStmt: - n->AssignStmt.lhs = clone_ast_array(n->AssignStmt.lhs); - n->AssignStmt.rhs = clone_ast_array(n->AssignStmt.rhs); + n->AssignStmt.lhs = clone_ast_array(n->AssignStmt.lhs, f); + n->AssignStmt.rhs = clone_ast_array(n->AssignStmt.rhs, f); break; case Ast_BlockStmt: - n->BlockStmt.label = clone_ast(n->BlockStmt.label); - n->BlockStmt.stmts = clone_ast_array(n->BlockStmt.stmts); + n->BlockStmt.label = clone_ast(n->BlockStmt.label, f); + n->BlockStmt.stmts = clone_ast_array(n->BlockStmt.stmts, f); break; case Ast_IfStmt: - n->IfStmt.label = clone_ast(n->IfStmt.label); - n->IfStmt.init = clone_ast(n->IfStmt.init); - n->IfStmt.cond = clone_ast(n->IfStmt.cond); - n->IfStmt.body = clone_ast(n->IfStmt.body); - n->IfStmt.else_stmt = clone_ast(n->IfStmt.else_stmt); + n->IfStmt.label = clone_ast(n->IfStmt.label, f); + n->IfStmt.init = clone_ast(n->IfStmt.init, f); + n->IfStmt.cond = clone_ast(n->IfStmt.cond, f); + n->IfStmt.body = clone_ast(n->IfStmt.body, f); + n->IfStmt.else_stmt = clone_ast(n->IfStmt.else_stmt, f); break; case Ast_WhenStmt: - n->WhenStmt.cond = clone_ast(n->WhenStmt.cond); - n->WhenStmt.body = clone_ast(n->WhenStmt.body); - n->WhenStmt.else_stmt = clone_ast(n->WhenStmt.else_stmt); + n->WhenStmt.cond = clone_ast(n->WhenStmt.cond, f); + n->WhenStmt.body = clone_ast(n->WhenStmt.body, f); + n->WhenStmt.else_stmt = clone_ast(n->WhenStmt.else_stmt, f); break; case Ast_ReturnStmt: - n->ReturnStmt.results = clone_ast_array(n->ReturnStmt.results); + n->ReturnStmt.results = clone_ast_array(n->ReturnStmt.results, f); break; case Ast_ForStmt: - n->ForStmt.label = clone_ast(n->ForStmt.label); - n->ForStmt.init = clone_ast(n->ForStmt.init); - n->ForStmt.cond = clone_ast(n->ForStmt.cond); - n->ForStmt.post = clone_ast(n->ForStmt.post); - n->ForStmt.body = clone_ast(n->ForStmt.body); + n->ForStmt.label = clone_ast(n->ForStmt.label, f); + n->ForStmt.init = clone_ast(n->ForStmt.init, f); + n->ForStmt.cond = clone_ast(n->ForStmt.cond, f); + n->ForStmt.post = clone_ast(n->ForStmt.post, f); + n->ForStmt.body = clone_ast(n->ForStmt.body, f); break; case Ast_RangeStmt: - n->RangeStmt.label = clone_ast(n->RangeStmt.label); - n->RangeStmt.vals = clone_ast_array(n->RangeStmt.vals); - n->RangeStmt.expr = clone_ast(n->RangeStmt.expr); - n->RangeStmt.body = clone_ast(n->RangeStmt.body); + n->RangeStmt.label = clone_ast(n->RangeStmt.label, f); + n->RangeStmt.vals = clone_ast_array(n->RangeStmt.vals, f); + n->RangeStmt.expr = clone_ast(n->RangeStmt.expr, f); + n->RangeStmt.body = clone_ast(n->RangeStmt.body, f); break; case Ast_UnrollRangeStmt: - n->UnrollRangeStmt.val0 = clone_ast(n->UnrollRangeStmt.val0); - n->UnrollRangeStmt.val1 = clone_ast(n->UnrollRangeStmt.val1); - n->UnrollRangeStmt.expr = clone_ast(n->UnrollRangeStmt.expr); - n->UnrollRangeStmt.body = clone_ast(n->UnrollRangeStmt.body); + n->UnrollRangeStmt.val0 = clone_ast(n->UnrollRangeStmt.val0, f); + n->UnrollRangeStmt.val1 = clone_ast(n->UnrollRangeStmt.val1, f); + n->UnrollRangeStmt.expr = clone_ast(n->UnrollRangeStmt.expr, f); + n->UnrollRangeStmt.body = clone_ast(n->UnrollRangeStmt.body, f); break; case Ast_CaseClause: - n->CaseClause.list = clone_ast_array(n->CaseClause.list); - n->CaseClause.stmts = clone_ast_array(n->CaseClause.stmts); + n->CaseClause.list = clone_ast_array(n->CaseClause.list, f); + n->CaseClause.stmts = clone_ast_array(n->CaseClause.stmts, f); n->CaseClause.implicit_entity = nullptr; break; case Ast_SwitchStmt: - n->SwitchStmt.label = clone_ast(n->SwitchStmt.label); - n->SwitchStmt.init = clone_ast(n->SwitchStmt.init); - n->SwitchStmt.tag = clone_ast(n->SwitchStmt.tag); - n->SwitchStmt.body = clone_ast(n->SwitchStmt.body); + n->SwitchStmt.label = clone_ast(n->SwitchStmt.label, f); + n->SwitchStmt.init = clone_ast(n->SwitchStmt.init, f); + n->SwitchStmt.tag = clone_ast(n->SwitchStmt.tag, f); + n->SwitchStmt.body = clone_ast(n->SwitchStmt.body, f); break; case Ast_TypeSwitchStmt: - n->TypeSwitchStmt.label = clone_ast(n->TypeSwitchStmt.label); - n->TypeSwitchStmt.tag = clone_ast(n->TypeSwitchStmt.tag); - n->TypeSwitchStmt.body = clone_ast(n->TypeSwitchStmt.body); + n->TypeSwitchStmt.label = clone_ast(n->TypeSwitchStmt.label, f); + n->TypeSwitchStmt.tag = clone_ast(n->TypeSwitchStmt.tag, f); + n->TypeSwitchStmt.body = clone_ast(n->TypeSwitchStmt.body, f); break; case Ast_DeferStmt: - n->DeferStmt.stmt = clone_ast(n->DeferStmt.stmt); + n->DeferStmt.stmt = clone_ast(n->DeferStmt.stmt, f); break; case Ast_BranchStmt: - n->BranchStmt.label = clone_ast(n->BranchStmt.label); + n->BranchStmt.label = clone_ast(n->BranchStmt.label, f); break; case Ast_UsingStmt: - n->UsingStmt.list = clone_ast_array(n->UsingStmt.list); + n->UsingStmt.list = clone_ast_array(n->UsingStmt.list, f); break; case Ast_BadDecl: break; case Ast_ForeignBlockDecl: - n->ForeignBlockDecl.foreign_library = clone_ast(n->ForeignBlockDecl.foreign_library); - n->ForeignBlockDecl.body = clone_ast(n->ForeignBlockDecl.body); - n->ForeignBlockDecl.attributes = clone_ast_array(n->ForeignBlockDecl.attributes); + n->ForeignBlockDecl.foreign_library = clone_ast(n->ForeignBlockDecl.foreign_library, f); + n->ForeignBlockDecl.body = clone_ast(n->ForeignBlockDecl.body, f); + n->ForeignBlockDecl.attributes = clone_ast_array(n->ForeignBlockDecl.attributes, f); break; case Ast_Label: - n->Label.name = clone_ast(n->Label.name); + n->Label.name = clone_ast(n->Label.name, f); break; case Ast_ValueDecl: - n->ValueDecl.names = clone_ast_array(n->ValueDecl.names); - n->ValueDecl.type = clone_ast(n->ValueDecl.type); - n->ValueDecl.values = clone_ast_array(n->ValueDecl.values); - n->ValueDecl.attributes = clone_ast_array(n->ValueDecl.attributes); + n->ValueDecl.names = clone_ast_array(n->ValueDecl.names, f); + n->ValueDecl.type = clone_ast(n->ValueDecl.type, f); + n->ValueDecl.values = clone_ast_array(n->ValueDecl.values, f); + n->ValueDecl.attributes = clone_ast_array(n->ValueDecl.attributes, f); break; case Ast_Attribute: - n->Attribute.elems = clone_ast_array(n->Attribute.elems); + n->Attribute.elems = clone_ast_array(n->Attribute.elems, f); break; case Ast_Field: - n->Field.names = clone_ast_array(n->Field.names); - n->Field.type = clone_ast(n->Field.type); + n->Field.names = clone_ast_array(n->Field.names, f); + n->Field.type = clone_ast(n->Field.type, f); break; case Ast_FieldList: - n->FieldList.list = clone_ast_array(n->FieldList.list); + n->FieldList.list = clone_ast_array(n->FieldList.list, f); break; case Ast_TypeidType: - n->TypeidType.specialization = clone_ast(n->TypeidType.specialization); + n->TypeidType.specialization = clone_ast(n->TypeidType.specialization, f); break; case Ast_HelperType: - n->HelperType.type = clone_ast(n->HelperType.type); + n->HelperType.type = clone_ast(n->HelperType.type, f); break; case Ast_DistinctType: - n->DistinctType.type = clone_ast(n->DistinctType.type); + n->DistinctType.type = clone_ast(n->DistinctType.type, f); break; case Ast_ProcType: - n->ProcType.params = clone_ast(n->ProcType.params); - n->ProcType.results = clone_ast(n->ProcType.results); + n->ProcType.params = clone_ast(n->ProcType.params, f); + n->ProcType.results = clone_ast(n->ProcType.results, f); break; case Ast_RelativeType: - n->RelativeType.tag = clone_ast(n->RelativeType.tag); - n->RelativeType.type = clone_ast(n->RelativeType.type); + n->RelativeType.tag = clone_ast(n->RelativeType.tag, f); + n->RelativeType.type = clone_ast(n->RelativeType.type, f); break; case Ast_PointerType: - n->PointerType.type = clone_ast(n->PointerType.type); - n->PointerType.tag = clone_ast(n->PointerType.tag); + n->PointerType.type = clone_ast(n->PointerType.type, f); + n->PointerType.tag = clone_ast(n->PointerType.tag, f); break; case Ast_MultiPointerType: - n->MultiPointerType.type = clone_ast(n->MultiPointerType.type); + n->MultiPointerType.type = clone_ast(n->MultiPointerType.type, f); break; case Ast_ArrayType: - n->ArrayType.count = clone_ast(n->ArrayType.count); - n->ArrayType.elem = clone_ast(n->ArrayType.elem); - n->ArrayType.tag = clone_ast(n->ArrayType.tag); + n->ArrayType.count = clone_ast(n->ArrayType.count, f); + n->ArrayType.elem = clone_ast(n->ArrayType.elem, f); + n->ArrayType.tag = clone_ast(n->ArrayType.tag, f); break; case Ast_DynamicArrayType: - n->DynamicArrayType.elem = clone_ast(n->DynamicArrayType.elem); + n->DynamicArrayType.elem = clone_ast(n->DynamicArrayType.elem, f); break; case Ast_StructType: - n->StructType.fields = clone_ast_array(n->StructType.fields); - n->StructType.polymorphic_params = clone_ast(n->StructType.polymorphic_params); - n->StructType.align = clone_ast(n->StructType.align); - n->StructType.where_clauses = clone_ast_array(n->StructType.where_clauses); + n->StructType.fields = clone_ast_array(n->StructType.fields, f); + n->StructType.polymorphic_params = clone_ast(n->StructType.polymorphic_params, f); + n->StructType.align = clone_ast(n->StructType.align, f); + n->StructType.where_clauses = clone_ast_array(n->StructType.where_clauses, f); break; case Ast_UnionType: - n->UnionType.variants = clone_ast_array(n->UnionType.variants); - n->UnionType.polymorphic_params = clone_ast(n->UnionType.polymorphic_params); - n->UnionType.where_clauses = clone_ast_array(n->UnionType.where_clauses); + n->UnionType.variants = clone_ast_array(n->UnionType.variants, f); + n->UnionType.polymorphic_params = clone_ast(n->UnionType.polymorphic_params, f); + n->UnionType.where_clauses = clone_ast_array(n->UnionType.where_clauses, f); break; case Ast_EnumType: - n->EnumType.base_type = clone_ast(n->EnumType.base_type); - n->EnumType.fields = clone_ast_array(n->EnumType.fields); + n->EnumType.base_type = clone_ast(n->EnumType.base_type, f); + n->EnumType.fields = clone_ast_array(n->EnumType.fields, f); break; case Ast_BitSetType: - n->BitSetType.elem = clone_ast(n->BitSetType.elem); - n->BitSetType.underlying = clone_ast(n->BitSetType.underlying); + n->BitSetType.elem = clone_ast(n->BitSetType.elem, f); + n->BitSetType.underlying = clone_ast(n->BitSetType.underlying, f); break; case Ast_MapType: - n->MapType.count = clone_ast(n->MapType.count); - n->MapType.key = clone_ast(n->MapType.key); - n->MapType.value = clone_ast(n->MapType.value); + n->MapType.count = clone_ast(n->MapType.count, f); + n->MapType.key = clone_ast(n->MapType.key, f); + n->MapType.value = clone_ast(n->MapType.value, f); break; case Ast_MatrixType: - n->MatrixType.row_count = clone_ast(n->MatrixType.row_count); - n->MatrixType.column_count = clone_ast(n->MatrixType.column_count); - n->MatrixType.elem = clone_ast(n->MatrixType.elem); + n->MatrixType.row_count = clone_ast(n->MatrixType.row_count, f); + n->MatrixType.column_count = clone_ast(n->MatrixType.column_count, f); + n->MatrixType.elem = clone_ast(n->MatrixType.elem, f); break; } From bb80c1b05922a1d1a76e96f0bc5edba09292b1e9 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Tue, 3 Jan 2023 17:07:53 +0000 Subject: [PATCH 50/78] Add `type_and_value_mutex` to `DeclInfo` --- src/checker.cpp | 8 ++++++-- src/checker.hpp | 2 ++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/src/checker.cpp b/src/checker.cpp index 3f5c2892f..11e0dfa47 100644 --- a/src/checker.cpp +++ b/src/checker.cpp @@ -1453,7 +1453,9 @@ gb_internal void add_type_and_value(CheckerContext *ctx, Ast *expr, AddressingMo } BlockingMutex *mutex = &ctx->info->type_and_value_mutex; - if (ctx->pkg) { + if (ctx->decl) { + mutex = &ctx->decl->type_and_value_mutex; + } else if (ctx->pkg) { // TODO(bill): is a per package mutex is a good idea here? mutex = &ctx->pkg->type_and_value_mutex; } @@ -1749,7 +1751,7 @@ gb_internal void add_type_info_type_internal(CheckerContext *c, Type *t) { if (is_type_untyped(t)) { return; // Could be nil } - if (is_type_polymorphic(base_type(t))) { + if (is_type_polymorphic(t)) { return; } @@ -1764,6 +1766,8 @@ gb_internal void add_type_info_type_internal(CheckerContext *c, Type *t) { bool prev = false; isize ti_index = -1; + // NOTE(bill): this is a linear lookup, and is most likely very costly + // as this map keeps growing linearly for (auto const &e : c->info->type_info_map) { if (are_types_identical_unique_tuples(t, e.key)) { // Duplicate entry diff --git a/src/checker.hpp b/src/checker.hpp index d647cd67f..60800349b 100644 --- a/src/checker.hpp +++ b/src/checker.hpp @@ -185,6 +185,8 @@ struct DeclInfo { RwMutex type_info_deps_mutex; PtrSet type_info_deps; + BlockingMutex type_and_value_mutex; + Array labels; }; From 2720e9812778c8cf28ead9c41c46b0d578f6a7b3 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Tue, 3 Jan 2023 17:25:51 +0000 Subject: [PATCH 51/78] Add `+ignore` along with `+build ignore` --- src/parser.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/parser.cpp b/src/parser.cpp index bc0fcf6be..0eb7e5fc1 100644 --- a/src/parser.cpp +++ b/src/parser.cpp @@ -5584,6 +5584,8 @@ gb_internal bool parse_file(Parser *p, AstFile *f) { if (!parse_build_tag(tok, lc)) { return false; } + } else if (string_starts_with(lc, str_lit("+ignore"))) { + return false; } else if (string_starts_with(lc, str_lit("+private"))) { f->flags |= AstFile_IsPrivatePkg; String command = string_trim_starts_with(lc, str_lit("+private ")); From 855ebceadcc4612a6451f268ab6d6693838ed5f4 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Tue, 3 Jan 2023 17:26:05 +0000 Subject: [PATCH 52/78] Minimize `add_type_info_type` usage --- src/check_builtin.cpp | 2 +- src/check_expr.cpp | 8 ++++---- src/check_stmt.cpp | 8 +++++--- 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/src/check_builtin.cpp b/src/check_builtin.cpp index 7c5521dde..606283c32 100644 --- a/src/check_builtin.cpp +++ b/src/check_builtin.cpp @@ -3573,7 +3573,7 @@ gb_internal bool check_builtin_procedure(CheckerContext *c, Operand *operand, As Entity *base_type_entity = alloc_entity_type_name(scope, token, elem, EntityState_Resolved); add_entity(c, scope, nullptr, base_type_entity); - add_type_info_type(c, soa_struct); + // add_type_info_type(c, soa_struct); operand->type = soa_struct; break; diff --git a/src/check_expr.cpp b/src/check_expr.cpp index 746a29ce0..5f28504a2 100644 --- a/src/check_expr.cpp +++ b/src/check_expr.cpp @@ -8779,8 +8779,8 @@ gb_internal ExprKind check_type_assertion(CheckerContext *c, Operand *o, Ast *no return kind; } - add_type_info_type(c, o->type); - add_type_info_type(c, bsrc->Union.variants[0]); + // add_type_info_type(c, o->type); + // add_type_info_type(c, bsrc->Union.variants[0]); o->type = bsrc->Union.variants[0]; o->mode = Addressing_OptionalOk; @@ -8812,8 +8812,8 @@ gb_internal ExprKind check_type_assertion(CheckerContext *c, Operand *o, Ast *no return kind; } - add_type_info_type(c, o->type); - add_type_info_type(c, t); + // add_type_info_type(c, o->type); + // add_type_info_type(c, t); o->type = t; o->mode = Addressing_OptionalOk; diff --git a/src/check_stmt.cpp b/src/check_stmt.cpp index 6e84d0789..9547035d0 100644 --- a/src/check_stmt.cpp +++ b/src/check_stmt.cpp @@ -1132,7 +1132,7 @@ gb_internal void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_ check_expr(ctx, &x, rhs); check_assignment(ctx, &x, nullptr, str_lit("type switch expression")); - add_type_info_type(ctx, x.type); + // add_type_info_type(ctx, x.type); TypeSwitchKind switch_kind = check_valid_type_switch_type(x.type); if (switch_kind == TypeSwitch_Invalid) { @@ -1223,7 +1223,7 @@ gb_internal void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_ continue; } case_type = y.type; - add_type_info_type(ctx, y.type); + // add_type_info_type(ctx, y.type); } else if (switch_kind == TypeSwitch_Any) { case_type = y.type; add_type_info_type(ctx, y.type); @@ -1259,7 +1259,9 @@ gb_internal void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_ if (case_type == nullptr) { case_type = x.type; } - add_type_info_type(ctx, case_type); + if (switch_kind == TypeSwitch_Any) { + add_type_info_type(ctx, case_type); + } check_open_scope(ctx, stmt); { From 17fa8cb6ef4424e4c7cff2439e2d52220f440660 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Tue, 3 Jan 2023 18:21:42 +0000 Subject: [PATCH 53/78] Add extra mutex to TypePth just in case --- src/ptr_set.cpp | 3 +++ src/types.cpp | 16 ++++++++++++++-- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/src/ptr_set.cpp b/src/ptr_set.cpp index 303bde07e..9b8b678f8 100644 --- a/src/ptr_set.cpp +++ b/src/ptr_set.cpp @@ -1,5 +1,7 @@ template struct PtrSetEntry { + static_assert(sizeof(T) == sizeof(void *), "Key size must be pointer size"); + T ptr; MapIndex next; @@ -10,6 +12,7 @@ struct PtrSetEntry { template struct PtrSet { + Slice hashes; Array> entries; }; diff --git a/src/types.cpp b/src/types.cpp index fa7c1d7f7..ec7adab5a 100644 --- a/src/types.cpp +++ b/src/types.cpp @@ -748,6 +748,7 @@ gb_internal i64 type_align_of_internal(Type *t, TypePath *path); // IMPORTANT TODO(bill): SHould this TypePath code be removed since type cycle checking is handled much earlier on? struct TypePath { + RecursiveMutex mutex; Array path; // Entity_TypeName; bool failure; }; @@ -758,7 +759,9 @@ gb_internal void type_path_init(TypePath *tp) { } gb_internal void type_path_free(TypePath *tp) { + mutex_lock(&tp->mutex); array_free(&tp->path); + mutex_unlock(&tp->mutex); } gb_internal void type_path_print_illegal_cycle(TypePath *tp, isize start_index) { @@ -787,6 +790,8 @@ gb_internal bool type_path_push(TypePath *tp, Type *t) { } Entity *e = t->Named.type_name; + mutex_lock(&tp->mutex); + for (isize i = 0; i < tp->path.count; i++) { Entity *p = tp->path[i]; if (p == e) { @@ -795,12 +800,19 @@ gb_internal bool type_path_push(TypePath *tp, Type *t) { } array_add(&tp->path, e); + + mutex_unlock(&tp->mutex); + return true; } gb_internal void type_path_pop(TypePath *tp) { - if (tp != nullptr && tp->path.count > 0) { - array_pop(&tp->path); + if (tp != nullptr) { + mutex_lock(&tp->mutex); + if (tp->path.count > 0) { + array_pop(&tp->path); + } + mutex_unlock(&tp->mutex); } } From ec69101101d56e90ee183e449eb5bb3605e3afbe Mon Sep 17 00:00:00 2001 From: gingerBill Date: Tue, 3 Jan 2023 18:39:37 +0000 Subject: [PATCH 54/78] Convert `minimum_dependency_type_info_set` to use a `PtrMap` --- src/checker.cpp | 19 +++++++------------ src/checker.hpp | 2 +- src/llvm_backend_type.cpp | 7 ++++--- src/ptr_map.cpp | 1 - src/ptr_set.cpp | 10 ---------- 5 files changed, 12 insertions(+), 27 deletions(-) diff --git a/src/checker.cpp b/src/checker.cpp index 11e0dfa47..78f96e47f 100644 --- a/src/checker.cpp +++ b/src/checker.cpp @@ -1153,6 +1153,9 @@ gb_internal void init_checker_info(CheckerInfo *i) { array_init(&i->init_procedures, a, 0, 0); array_init(&i->required_foreign_imports_through_force, a, 0, 0); + map_init(&i->objc_msgSend_types); + string_map_init(&i->load_file_cache); + array_init(&i->all_procedures, heap_allocator()); TIME_SECTION("checker info: mpmc queues"); @@ -1160,16 +1163,7 @@ gb_internal void init_checker_info(CheckerInfo *i) { mpmc_init(&i->definition_queue, a, 1<<20); mpmc_init(&i->required_global_variable_queue, a, 1<<10); mpmc_init(&i->required_foreign_imports_through_force_queue, a, 1<<10); - - TIME_SECTION("checker info: mutexes"); - mpmc_init(&i->intrinsics_entry_point_usage, a, 1<<10); // just waste some memory here, even if it probably never used - - map_init(&i->objc_msgSend_types); - string_map_init(&i->load_file_cache); - - array_init(&i->all_procedures, heap_allocator()); - } gb_internal void destroy_checker_info(CheckerInfo *i) { @@ -2031,10 +2025,11 @@ gb_internal void add_min_dep_type_info(Checker *c, Type *t) { ti_index = type_info_index(&c->info, t, false); } GB_ASSERT(ti_index >= 0); - if (ptr_set_update(set, ti_index)) { - // Type Already exists + if (map_get(set, ti_index)) { + // Type already exists; return; } + map_set(set, ti_index, set->entries.count); // Add nested types if (t->kind == Type_Named) { @@ -2275,7 +2270,7 @@ gb_internal void generate_minimum_dependency_set(Checker *c, Entity *start) { isize min_dep_set_cap = next_pow2_isize(entity_count*4); // empirically determined factor ptr_set_init(&c->info.minimum_dependency_set, min_dep_set_cap); - ptr_set_init(&c->info.minimum_dependency_type_info_set); + map_init(&c->info.minimum_dependency_type_info_set); #define FORCE_ADD_RUNTIME_ENTITIES(condition, ...) do { \ if (condition) { \ diff --git a/src/checker.hpp b/src/checker.hpp index 60800349b..bb870e077 100644 --- a/src/checker.hpp +++ b/src/checker.hpp @@ -336,7 +336,7 @@ struct CheckerInfo { Scope * init_scope; Entity * entry_point; PtrSet minimum_dependency_set; - PtrSet minimum_dependency_type_info_set; + PtrMap minimum_dependency_type_info_set; diff --git a/src/llvm_backend_type.cpp b/src/llvm_backend_type.cpp index c306cdead..b9b450404 100644 --- a/src/llvm_backend_type.cpp +++ b/src/llvm_backend_type.cpp @@ -2,9 +2,10 @@ gb_internal isize lb_type_info_index(CheckerInfo *info, Type *type, bool err_on_ auto *set = &info->minimum_dependency_type_info_set; isize index = type_info_index(info, type, err_on_not_found); if (index >= 0) { - isize i = ptr_set_entry_index(set, index); - if (i >= 0) { - return i+1; + auto *found = map_get(set, index); + if (found) { + GB_ASSERT(*found >= 0); + return *found + 1; } } if (err_on_not_found) { diff --git a/src/ptr_map.cpp b/src/ptr_map.cpp index 264136881..ae3cd4b40 100644 --- a/src/ptr_map.cpp +++ b/src/ptr_map.cpp @@ -229,7 +229,6 @@ gb_internal void map_set(PtrMap *h, K key, V const &value) { } } - template gb_internal void map__erase(PtrMap *h, MapFindResult const &fr) { MapFindResult last; diff --git a/src/ptr_set.cpp b/src/ptr_set.cpp index 9b8b678f8..f730a47ff 100644 --- a/src/ptr_set.cpp +++ b/src/ptr_set.cpp @@ -12,7 +12,6 @@ struct PtrSetEntry { template struct PtrSet { - Slice hashes; Array> entries; }; @@ -154,15 +153,6 @@ gb_internal gb_inline bool ptr_set_exists(PtrSet *s, T ptr) { return index != MAP_SENTINEL; } -template -gb_internal gb_inline isize ptr_set_entry_index(PtrSet *s, T ptr) { - isize index = ptr_set__find(s, ptr).entry_index; - if (index != MAP_SENTINEL) { - return index; - } - return -1; -} - // Returns true if it already exists template gb_internal T ptr_set_add(PtrSet *s, T ptr) { From b3a55b8b6f54b71bb527c2b2b1cbe8b01e28d8a2 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Tue, 3 Jan 2023 18:42:13 +0000 Subject: [PATCH 55/78] Remove unused procedures --- src/ptr_set.cpp | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/src/ptr_set.cpp b/src/ptr_set.cpp index f730a47ff..e2b3f2372 100644 --- a/src/ptr_set.cpp +++ b/src/ptr_set.cpp @@ -23,9 +23,6 @@ template gb_internal bool ptr_set_update (PtrSet *s, T ptr); // template gb_internal bool ptr_set_exists (PtrSet *s, T ptr); template gb_internal void ptr_set_remove (PtrSet *s, T ptr); template gb_internal void ptr_set_clear (PtrSet *s); -template gb_internal void ptr_set_grow (PtrSet *s); -template gb_internal void ptr_set_rehash (PtrSet *s, isize new_count); -template gb_internal void ptr_set_reserve(PtrSet *h, isize cap); gb_internal gbAllocator ptr_set_allocator(void) { return heap_allocator(); @@ -104,12 +101,6 @@ gb_internal bool ptr_set__full(PtrSet *s) { return 0.75f * s->hashes.count <= s->entries.count; } -template -gb_internal gb_inline void ptr_set_grow(PtrSet *s) { - isize new_count = gb_max(s->hashes.count<<1, 16); - ptr_set_rehash(s, new_count); -} - template gb_internal void ptr_set_reset_entries(PtrSet *s) { for (isize i = 0; i < s->hashes.count; i++) { @@ -141,12 +132,13 @@ gb_internal void ptr_set_reserve(PtrSet *s, isize cap) { ptr_set_reset_entries(s); } - template -gb_internal void ptr_set_rehash(PtrSet *s, isize new_count) { +gb_internal gb_inline void ptr_set_grow(PtrSet *s) { + isize new_count = gb_max(s->hashes.count<<1, 16); ptr_set_reserve(s, new_count); } + template gb_internal gb_inline bool ptr_set_exists(PtrSet *s, T ptr) { isize index = ptr_set__find(s, ptr).entry_index; From d06a0e7093c3f06a474a040385f1b9dfdfce29ad Mon Sep 17 00:00:00 2001 From: gingerBill Date: Wed, 4 Jan 2023 13:30:27 +0000 Subject: [PATCH 56/78] Improve the `PtrSet` to be as simple and small as possible --- src/check_stmt.cpp | 1 + src/checker.cpp | 33 ++--- src/ptr_map.cpp | 2 +- src/ptr_set.cpp | 313 ++++++++++++++++++++------------------------- src/threading.cpp | 20 +-- 5 files changed, 162 insertions(+), 207 deletions(-) diff --git a/src/check_stmt.cpp b/src/check_stmt.cpp index 9547035d0..b4dd4cd7d 100644 --- a/src/check_stmt.cpp +++ b/src/check_stmt.cpp @@ -1289,6 +1289,7 @@ gb_internal void check_type_switch_stmt(CheckerContext *ctx, Ast *node, u32 mod_ for (Type *t : variants) { if (!type_ptr_set_exists(&seen, t)) { array_add(&unhandled, t); + gb_printf_err("HERE: %p %s\n", t, type_to_string(t)); } } diff --git a/src/checker.cpp b/src/checker.cpp index 78f96e47f..b8709f15e 100644 --- a/src/checker.cpp +++ b/src/checker.cpp @@ -66,15 +66,10 @@ gb_internal void scope_reserve(Scope *scope, isize capacity) { } gb_internal void entity_graph_node_set_destroy(EntityGraphNodeSet *s) { - if (s->hashes.data != nullptr) { - ptr_set_destroy(s); - } + ptr_set_destroy(s); } gb_internal void entity_graph_node_set_add(EntityGraphNodeSet *s, EntityGraphNode *n) { - if (s->hashes.data == nullptr) { - ptr_set_init(s); - } ptr_set_add(s, n); } @@ -2556,7 +2551,6 @@ gb_internal Array generate_entity_dependency_graph(CheckerInf } // IMPORTANT NOTE/TODO(bill, 2020-11-15): These three calls take the majority of the // the time to process - entity_graph_node_set_add(&p->succ, s); entity_graph_node_set_add(&s->pred, p); // Remove edge to 'n' @@ -2577,7 +2571,7 @@ gb_internal Array generate_entity_dependency_graph(CheckerInf for_array(i, G) { EntityGraphNode *n = G[i]; n->index = i; - n->dep_count = n->succ.entries.count; + n->dep_count = n->succ.count; GB_ASSERT(n->dep_count >= 0); } @@ -4228,7 +4222,7 @@ gb_internal Array generate_import_dependency_graph(Checker *c for (auto const &entry : M) { auto n = entry.value; n->index = i++; - n->dep_count = n->succ.entries.count; + n->dep_count = n->succ.count; GB_ASSERT(n->dep_count >= 0); array_add(&G, n); } @@ -5706,17 +5700,6 @@ gb_internal void check_parsed_files(Checker *c) { check_scope_usage(c, f->scope); } - TIME_SECTION("add untyped expression values"); - // Add untyped expression values - for (UntypedExprInfo u = {}; mpmc_dequeue(&c->global_untyped_queue, &u); /**/) { - GB_ASSERT(u.expr != nullptr && u.info != nullptr); - if (is_type_typed(u.info->type)) { - compiler_error("%s (type %s) is typed!", expr_to_string(u.expr), type_to_string(u.info->type)); - } - add_type_and_value(&c->builtin_ctx, u.expr, u.info->mode, u.info->type, u.info->value); - } - - TIME_SECTION("add basic type information"); // Add "Basic" type information for (isize i = 0; i < Basic_COUNT; i++) { @@ -5810,6 +5793,16 @@ gb_internal void check_parsed_files(Checker *c) { GB_ASSERT(c->info.entity_queue.count.load(std::memory_order_relaxed) == 0); GB_ASSERT(c->info.definition_queue.count.load(std::memory_order_relaxed) == 0); + TIME_SECTION("add untyped expression values"); + // Add untyped expression values + for (UntypedExprInfo u = {}; mpmc_dequeue(&c->global_untyped_queue, &u); /**/) { + GB_ASSERT(u.expr != nullptr && u.info != nullptr); + if (is_type_typed(u.info->type)) { + compiler_error("%s (type %s) is typed!", expr_to_string(u.expr), type_to_string(u.info->type)); + } + add_type_and_value(&c->builtin_ctx, u.expr, u.info->mode, u.info->type, u.info->value); + } + TIME_SECTION("sort init procedures"); check_sort_init_procedures(c); diff --git a/src/ptr_map.cpp b/src/ptr_map.cpp index ae3cd4b40..8869bf3fe 100644 --- a/src/ptr_map.cpp +++ b/src/ptr_map.cpp @@ -41,7 +41,7 @@ gb_internal gb_inline u32 ptr_map_hash_key(uintptr key) { u32 word = ((state >> ((state >> 28u) + 4u)) ^ state) * 277803737u; res = (word >> 22u) ^ word; #endif - return res ^ (res == MAP_SENTINEL); + return res; } gb_internal gb_inline u32 ptr_map_hash_key(void const *key) { return ptr_map_hash_key((uintptr)key); diff --git a/src/ptr_set.cpp b/src/ptr_set.cpp index e2b3f2372..8be2b0524 100644 --- a/src/ptr_set.cpp +++ b/src/ptr_set.cpp @@ -1,19 +1,22 @@ template -struct PtrSetEntry { - static_assert(sizeof(T) == sizeof(void *), "Key size must be pointer size"); - - T ptr; - MapIndex next; - - operator T() const noexcept { - return this->ptr; - } +struct TypeIsPointer { + enum {value = false}; }; +template +struct TypeIsPointer { + enum {value = true}; +}; + + template struct PtrSet { - Slice hashes; - Array> entries; + static_assert(TypeIsPointer::value, "PtrSet::T must be a pointer"); + static constexpr T TOMBSTONE = (T)(~uintptr(0)); + + T * keys; + usize count; + usize capacity; }; template gb_internal void ptr_set_init (PtrSet *s, isize capacity = 16); @@ -30,225 +33,183 @@ gb_internal gbAllocator ptr_set_allocator(void) { template gb_internal void ptr_set_init(PtrSet *s, isize capacity) { + GB_ASSERT(s->keys == nullptr); if (capacity != 0) { capacity = next_pow2_isize(gb_max(16, capacity)); + s->keys = gb_alloc_array(ptr_set_allocator(), T, capacity); + // This memory will be zeroed, no need to explicitly zero it } - - slice_init(&s->hashes, ptr_set_allocator(), capacity); - array_init(&s->entries, ptr_set_allocator(), 0, capacity); - for (isize i = 0; i < capacity; i++) { - s->hashes.data[i] = MAP_SENTINEL; - } + s->count = 0; + s->capacity = capacity; } template gb_internal void ptr_set_destroy(PtrSet *s) { - if (s->entries.allocator.proc == nullptr) { - s->entries.allocator = ptr_set_allocator(); - } - slice_free(&s->hashes, s->entries.allocator); - array_free(&s->entries); + gb_free(ptr_set_allocator(), s->keys); + s->keys = nullptr; + s->count = 0; + s->capacity = 0; } template -gb_internal MapIndex ptr_set__add_entry(PtrSet *s, T ptr) { - PtrSetEntry e = {}; - e.ptr = ptr; - e.next = MAP_SENTINEL; - array_add(&s->entries, e); - return cast(MapIndex)(s->entries.count-1); -} - - -template -gb_internal MapFindResult ptr_set__find(PtrSet *s, T ptr) { - MapFindResult fr = {MAP_SENTINEL, MAP_SENTINEL, MAP_SENTINEL}; - if (s->hashes.count != 0) { +gb_internal isize ptr_set__find(PtrSet *s, T ptr) { + GB_ASSERT(ptr != nullptr); + if (s->count != 0) { + #if 0 + for (usize i = 0; i < s->capacity; i++) { + if (s->keys[i] == ptr) { + return i; + } + } + #else u32 hash = ptr_map_hash_key(ptr); - fr.hash_index = cast(MapIndex)(hash & (s->hashes.count-1)); - fr.entry_index = s->hashes.data[fr.hash_index]; - while (fr.entry_index != MAP_SENTINEL) { - if (s->entries.data[fr.entry_index].ptr == ptr) { - return fr; + usize mask = s->capacity-1; + usize hash_index = cast(usize)hash & mask; + for (usize i = 0; i < s->capacity; i++) { + T key = s->keys[hash_index]; + if (key == ptr) { + return hash_index; + } else if (key == nullptr) { + return -1; } - fr.entry_prev = fr.entry_index; - fr.entry_index = s->entries.data[fr.entry_index].next; + hash_index = (hash_index+1)&mask; } + #endif } - return fr; -} - -template -gb_internal MapFindResult ptr_set__find_from_entry(PtrSet *s, PtrSetEntry *e) { - MapFindResult fr = {MAP_SENTINEL, MAP_SENTINEL, MAP_SENTINEL}; - if (s->hashes.count != 0) { - u32 hash = ptr_map_hash_key(e->ptr); - fr.hash_index = cast(MapIndex)(hash & (s->hashes.count-1)); - fr.entry_index = s->hashes.data[fr.hash_index]; - while (fr.entry_index != MAP_SENTINEL) { - if (&s->entries.data[fr.entry_index] == e) { - return fr; - } - fr.entry_prev = fr.entry_index; - fr.entry_index = s->entries.data[fr.entry_index].next; - } - } - return fr; + return -1; } template gb_internal bool ptr_set__full(PtrSet *s) { - return 0.75f * s->hashes.count <= s->entries.count; + return 0.75f * s->capacity <= s->count; } template -gb_internal void ptr_set_reset_entries(PtrSet *s) { - for (isize i = 0; i < s->hashes.count; i++) { - s->hashes.data[i] = MAP_SENTINEL; - } - for (isize i = 0; i < s->entries.count; i++) { - MapFindResult fr; - PtrSetEntry *e = &s->entries.data[i]; - e->next = MAP_SENTINEL; - fr = ptr_set__find_from_entry(s, e); - if (fr.entry_prev == MAP_SENTINEL) { - s->hashes[fr.hash_index] = cast(MapIndex)i; - } else { - s->entries[fr.entry_prev].next = cast(MapIndex)i; - } - } -} - -template -gb_internal void ptr_set_reserve(PtrSet *s, isize cap) { - if (s->entries.allocator.proc == nullptr) { - s->entries.allocator = ptr_set_allocator(); - } - array_reserve(&s->entries, cap); - if (s->entries.count*2 < s->hashes.count) { +gb_internal gb_inline void ptr_set_grow(PtrSet *old_set) { + if (old_set->capacity == 0) { + ptr_set_init(old_set); return; } - slice_resize(&s->hashes, s->entries.allocator, cap*2); - ptr_set_reset_entries(s); -} -template -gb_internal gb_inline void ptr_set_grow(PtrSet *s) { - isize new_count = gb_max(s->hashes.count<<1, 16); - ptr_set_reserve(s, new_count); + PtrSet new_set = {}; + ptr_set_init(&new_set, gb_max(old_set->capacity<<1, 16)); + + for (T ptr : *old_set) { + bool was_new = ptr_set_update(&new_set, ptr); + GB_ASSERT(!was_new); + } + GB_ASSERT(old_set->count == new_set.count); + + ptr_set_destroy(old_set); + + *old_set = new_set; } template gb_internal gb_inline bool ptr_set_exists(PtrSet *s, T ptr) { - isize index = ptr_set__find(s, ptr).entry_index; - return index != MAP_SENTINEL; + return ptr_set__find(s, ptr) >= 0; } -// Returns true if it already exists -template -gb_internal T ptr_set_add(PtrSet *s, T ptr) { - MapIndex index; - MapFindResult fr; - if (s->hashes.count == 0) { - ptr_set_grow(s); - } - fr = ptr_set__find(s, ptr); - if (fr.entry_index == MAP_SENTINEL) { - index = ptr_set__add_entry(s, ptr); - if (fr.entry_prev != MAP_SENTINEL) { - s->entries.data[fr.entry_prev].next = index; - } else { - s->hashes.data[fr.hash_index] = index; - } - } - if (ptr_set__full(s)) { - ptr_set_grow(s); - } - return ptr; -} template gb_internal bool ptr_set_update(PtrSet *s, T ptr) { // returns true if it previously existsed - bool exists = false; - MapIndex index; - MapFindResult fr; - if (s->hashes.count == 0) { + if (ptr_set_exists(s, ptr)) { + return true; + } + + if (s->keys == nullptr) { + ptr_set_init(s); + } else if (ptr_set__full(s)) { ptr_set_grow(s); } - fr = ptr_set__find(s, ptr); - if (fr.entry_index != MAP_SENTINEL) { - exists = true; - } else { - index = ptr_set__add_entry(s, ptr); - if (fr.entry_prev != MAP_SENTINEL) { - s->entries.data[fr.entry_prev].next = index; - } else { - s->hashes.data[fr.hash_index] = index; + GB_ASSERT(s->count < s->capacity); + GB_ASSERT(s->capacity >= 0); + + usize mask = s->capacity-1; + u32 hash = ptr_map_hash_key(ptr); + usize hash_index = (cast(usize)hash) & mask; + GB_ASSERT(hash_index < s->capacity); + for (usize i = 0; i < s->capacity; i++) { + T *key = &s->keys[hash_index]; + GB_ASSERT(*key != ptr); + if (*key == PtrSet::TOMBSTONE || *key == nullptr) { + *key = ptr; + s->count++; + return false; } + hash_index = (hash_index+1)&mask; } - if (ptr_set__full(s)) { - ptr_set_grow(s); - } - return exists; + + GB_PANIC("ptr set out of memory"); + return false; } - - template -gb_internal void ptr_set__erase(PtrSet *s, MapFindResult fr) { - MapFindResult last; - if (fr.entry_prev == MAP_SENTINEL) { - s->hashes.data[fr.hash_index] = s->entries.data[fr.entry_index].next; - } else { - s->entries.data[fr.entry_prev].next = s->entries.data[fr.entry_index].next; - } - if (cast(isize)fr.entry_index == s->entries.count-1) { - array_pop(&s->entries); - return; - } - s->entries.data[fr.entry_index] = s->entries.data[s->entries.count-1]; - last = ptr_set__find(s, s->entries.data[fr.entry_index].ptr); - if (last.entry_prev != MAP_SENTINEL) { - s->entries.data[last.entry_prev].next = fr.entry_index; - } else { - s->hashes.data[last.hash_index] = fr.entry_index; - } +gb_internal T ptr_set_add(PtrSet *s, T ptr) { + ptr_set_update(s, ptr); + return ptr; } + template gb_internal void ptr_set_remove(PtrSet *s, T ptr) { - MapFindResult fr = ptr_set__find(s, ptr); - if (fr.entry_index != MAP_SENTINEL) { - ptr_set__erase(s, fr); + isize index = ptr_set__find(s, ptr); + if (index >= 0) { + GB_ASSERT(s->count > 0); + s->keys[index] = PtrSet::TOMBSTONE; + s->count--; } } template gb_internal gb_inline void ptr_set_clear(PtrSet *s) { - array_clear(&s->entries); - for (isize i = 0; i < s->hashes.count; i++) { - s->hashes.data[i] = MAP_SENTINEL; + s->count = 0; + gb_zero_size(s->keys, s->capacity*gb_size_of(T)); +} + +template +struct PtrSetIterator { + PtrSet *set; + usize index; + + PtrSetIterator &operator++() noexcept { + for (;;) { + ++index; + if (set->capacity == index) { + return *this; + } + T key = set->keys[index]; + if (key != nullptr && key != PtrSet::TOMBSTONE) { + return *this; + } + } } -} + + bool operator==(PtrSetIterator const &other) const noexcept { + return this->set == other.set && this->index == other.index; + } + + + operator T *() const { + return &set->keys[index]; + } +}; template -gb_internal PtrSetEntry *begin(PtrSet &m) noexcept { - return m.entries.data; +gb_internal PtrSetIterator begin(PtrSet &set) noexcept { + usize index = 0; + while (index < set.capacity) { + T key = set.keys[index]; + if (key != nullptr && key != PtrSet::TOMBSTONE) { + break; + } + index++; + } + return PtrSetIterator{&set, index}; } template -gb_internal PtrSetEntry const *begin(PtrSet const &m) noexcept { - return m.entries.data; -} - - -template -gb_internal PtrSetEntry *end(PtrSet &m) noexcept { - return m.entries.data + m.entries.count; -} - -template -gb_internal PtrSetEntry const *end(PtrSet const &m) noexcept { - return m.entries.data + m.entries.count; +gb_internal PtrSetIterator end(PtrSet &set) noexcept { + return PtrSetIterator{&set, set.capacity}; } \ No newline at end of file diff --git a/src/threading.cpp b/src/threading.cpp index 27a17112e..bf298e024 100644 --- a/src/threading.cpp +++ b/src/threading.cpp @@ -699,13 +699,13 @@ extern "C" int __ulock_wake(uint32_t operation, void *addr, uint64_t wake_value) gb_internal void futex_signal(Futex *f) { for (;;) { int ret = __ulock_wake(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, f, 0); - if (ret >= 0) { + if (ret == 0) { return; } - if (ret == EINTR || ret == EFAULT) { + if (ret == -EINTR || ret == -EFAULT) { continue; } - if (ret == ENOENT) { + if (ret == -ENOENT) { return; } GB_PANIC("Failed in futex wake!\n"); @@ -716,13 +716,13 @@ gb_internal void futex_broadcast(Futex *f) { for (;;) { enum { ULF_WAKE_ALL = 0x00000100 }; int ret = __ulock_wake(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO | ULF_WAKE_ALL, f, 0); - if (ret >= 0) { + if (ret == 0) { return; } - if (ret == EINTR || ret == EFAULT) { + if (ret == -EINTR || ret == -EFAULT) { continue; } - if (ret == ENOENT) { + if (ret == -ENOENT) { return; } GB_PANIC("Failed in futex wake!\n"); @@ -732,16 +732,16 @@ gb_internal void futex_broadcast(Futex *f) { gb_internal void futex_wait(Futex *f, Footex val) { for (;;) { int ret = __ulock_wait(UL_COMPARE_AND_WAIT | ULF_NO_ERRNO, f, val, 0); - if (ret >= 0) { + if (ret == 0) { if (*f != val) { return; } continue; } - if (ret == EINTR || ret == EFAULT) { - continue; + if (ret == -EINTR || ret == -EFAULT) { + -continue; } - if (ret == ENOENT) { + if (ret == -ENOENT) { return; } From d4e18109da5fa051d689be84a6ecf1e77348c74e Mon Sep 17 00:00:00 2001 From: gingerBill Date: Wed, 4 Jan 2023 13:52:38 +0000 Subject: [PATCH 57/78] Move walking of dependencies for procedures to just before calculating the min dep set --- src/check_builtin.cpp | 3 +- src/check_decl.cpp | 31 ------------------ src/checker.cpp | 76 ++++++++++++++++++++++++++++++++++++------- src/checker.hpp | 5 +++ 4 files changed, 71 insertions(+), 44 deletions(-) diff --git a/src/check_builtin.cpp b/src/check_builtin.cpp index 606283c32..294bc7da8 100644 --- a/src/check_builtin.cpp +++ b/src/check_builtin.cpp @@ -1118,8 +1118,7 @@ gb_internal bool cache_load_file_directive(CheckerContext *c, Ast *call, String } }); - char *c_str = alloc_cstring(heap_allocator(), path); - defer (gb_free(heap_allocator(), c_str)); + char *c_str = alloc_cstring(temporary_allocator(), path); gbFile f = {}; if (cache == nullptr) { diff --git a/src/check_decl.cpp b/src/check_decl.cpp index 72c69b5dc..07b547feb 100644 --- a/src/check_decl.cpp +++ b/src/check_decl.cpp @@ -1576,36 +1576,5 @@ gb_internal bool check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de check_scope_usage(ctx->checker, ctx->scope); - if (decl->parent != nullptr) { - Scope *ps = decl->parent->scope; - if (ps->flags & (ScopeFlag_File & ScopeFlag_Pkg & ScopeFlag_Global)) { - return true; - } else { - // NOTE(bill): Add the dependencies from the procedure literal (lambda) - // But only at the procedure level - - rw_mutex_shared_lock(&decl->deps_mutex); - rw_mutex_lock(&decl->parent->deps_mutex); - - for (Entity *e : decl->deps) { - ptr_set_add(&decl->parent->deps, e); - } - - rw_mutex_unlock(&decl->parent->deps_mutex); - rw_mutex_shared_unlock(&decl->deps_mutex); - - - rw_mutex_shared_lock(&decl->type_info_deps_mutex); - rw_mutex_lock(&decl->parent->type_info_deps_mutex); - - for (Type *t : decl->type_info_deps) { - ptr_set_add(&decl->parent->type_info_deps, t); - } - - rw_mutex_unlock(&decl->parent->type_info_deps_mutex); - rw_mutex_shared_unlock(&decl->type_info_deps_mutex); - } - } - return true; } diff --git a/src/checker.cpp b/src/checker.cpp index b8709f15e..5fc9a5551 100644 --- a/src/checker.cpp +++ b/src/checker.cpp @@ -174,6 +174,12 @@ gb_internal void import_graph_node_swap(ImportGraphNode **data, isize i, isize j gb_internal void init_decl_info(DeclInfo *d, Scope *scope, DeclInfo *parent) { gb_zero_item(d); + if (parent) { + mutex_lock(&parent->next_mutex); + d->next_sibling = parent->next_child; + parent->next_child = d; + mutex_unlock(&parent->next_mutex); + } d->parent = parent; d->scope = scope; ptr_set_init(&d->deps, 0); @@ -5316,15 +5322,8 @@ gb_internal WORKER_TASK_PROC(check_proc_info_worker_proc) { return 1; } - -gb_internal void check_procedure_bodies(Checker *c) { - GB_ASSERT(c != nullptr); - - +gb_internal void check_init_worker_data(Checker *c) { u32 thread_count = cast(u32)global_thread_pool.threads.count; - if (!build_context.threaded_checker) { - thread_count = 1; - } check_procedure_bodies_worker_data = gb_alloc_array(permanent_allocator(), CheckProcedureBodyWorkerData, thread_count); @@ -5332,10 +5331,15 @@ gb_internal void check_procedure_bodies(Checker *c) { check_procedure_bodies_worker_data[i].c = c; map_init(&check_procedure_bodies_worker_data[i].untyped); } +} - defer (for (isize i = 0; i < thread_count; i++) { - map_destroy(&check_procedure_bodies_worker_data[i].untyped); - }); +gb_internal void check_procedure_bodies(Checker *c) { + GB_ASSERT(c != nullptr); + + u32 thread_count = cast(u32)global_thread_pool.threads.count; + if (!build_context.threaded_checker) { + thread_count = 1; + } if (thread_count == 1) { UntypedExprInfoMap *untyped = &check_procedure_bodies_worker_data[0].untyped; @@ -5636,6 +5640,50 @@ gb_internal void add_type_info_for_type_definitions(Checker *c) { } } +gb_internal void check_walk_all_dependencies(DeclInfo *decl) { + if (decl == nullptr) { + return; + } + for (DeclInfo *child = decl->next_child; child != nullptr; child = child->next_sibling) { + check_walk_all_dependencies(child); + } + if (decl->parent && decl->parent->entity && decl->parent->entity->kind == Entity_Procedure) { + Scope *ps = decl->parent->scope; + if (ps->flags & (ScopeFlag_File & ScopeFlag_Pkg & ScopeFlag_Global)) { + return; + } else { + // NOTE(bill): Add the dependencies from the procedure literal (lambda) + // But only at the procedure level + rw_mutex_shared_lock(&decl->deps_mutex); + rw_mutex_lock(&decl->parent->deps_mutex); + + for (Entity *e : decl->deps) { + ptr_set_add(&decl->parent->deps, e); + } + + rw_mutex_unlock(&decl->parent->deps_mutex); + rw_mutex_shared_unlock(&decl->deps_mutex); + + rw_mutex_shared_lock(&decl->type_info_deps_mutex); + rw_mutex_lock(&decl->parent->type_info_deps_mutex); + + for (Type *t : decl->type_info_deps) { + ptr_set_add(&decl->parent->type_info_deps, t); + } + + rw_mutex_unlock(&decl->parent->type_info_deps_mutex); + rw_mutex_shared_unlock(&decl->type_info_deps_mutex); + } + } +} + +gb_internal void check_update_dependency_tree_for_procedures(Checker *c) { + for (Entity *e : c->info.entities) { + DeclInfo *decl = e->decl_info; + check_walk_all_dependencies(decl); + } +} + gb_internal void check_parsed_files(Checker *c) { TIME_SECTION("map full filepaths to scope"); add_type_info_type(&c->builtin_ctx, t_invalid); @@ -5657,6 +5705,9 @@ gb_internal void check_parsed_files(Checker *c) { } } + TIME_SECTION("init worker data"); + check_init_worker_data(c); + TIME_SECTION("create file scopes"); check_create_file_scopes(c); @@ -5744,6 +5795,9 @@ gb_internal void check_parsed_files(Checker *c) { add_type_info_for_type_definitions(c); check_merge_queues_into_arrays(c); + TIME_SECTION("update dependency tree for procedures"); + check_update_dependency_tree_for_procedures(c); + TIME_SECTION("generate minimum dependency set"); generate_minimum_dependency_set(c, c->info.entry_point); diff --git a/src/checker.hpp b/src/checker.hpp index bb870e077..821d43922 100644 --- a/src/checker.hpp +++ b/src/checker.hpp @@ -159,6 +159,11 @@ char const *ProcCheckedState_strings[ProcCheckedState_COUNT] { // DeclInfo is used to store information of certain declarations to allow for "any order" usage struct DeclInfo { DeclInfo * parent; // NOTE(bill): only used for procedure literals at the moment + + BlockingMutex next_mutex; + DeclInfo * next_child; + DeclInfo * next_sibling; + Scope * scope; Entity *entity; From faa735d0c745ddc0b550e2a54f10588c873841b7 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Wed, 4 Jan 2023 15:15:12 +0000 Subject: [PATCH 58/78] Localize gen_types mutexes --- src/check_type.cpp | 117 ++++++++++++++++++++++++--------------------- src/checker.hpp | 9 +++- src/path.cpp | 12 ++--- 3 files changed, 74 insertions(+), 64 deletions(-) diff --git a/src/check_type.cpp b/src/check_type.cpp index 05fdbf4d3..fd4e965d4 100644 --- a/src/check_type.cpp +++ b/src/check_type.cpp @@ -257,63 +257,67 @@ gb_internal bool check_custom_align(CheckerContext *ctx, Ast *node, i64 *align_) gb_internal Entity *find_polymorphic_record_entity(CheckerContext *ctx, Type *original_type, isize param_count, Array const &ordered_operands, bool *failure) { - mutex_lock(&ctx->info->gen_types_mutex); - defer (mutex_unlock(&ctx->info->gen_types_mutex)); + rw_mutex_shared_lock(&ctx->info->gen_types_mutex); // @@global auto *found_gen_types = map_get(&ctx->info->gen_types, original_type); - if (found_gen_types != nullptr) { - // GB_ASSERT_MSG(ordered_operands.count >= param_count, "%td >= %td", ordered_operands.count, param_count); + if (found_gen_types == nullptr) { + rw_mutex_shared_unlock(&ctx->info->gen_types_mutex); // @@global + return nullptr; + } - for_array(i, *found_gen_types) { - Entity *e = (*found_gen_types)[i]; - Type *t = base_type(e->type); - TypeTuple *tuple = get_record_polymorphic_params(t); - GB_ASSERT(param_count == tuple->variables.count); + rw_mutex_shared_lock(&found_gen_types->mutex); // @@local + defer (rw_mutex_shared_unlock(&found_gen_types->mutex)); // @@local - bool skip = false; + rw_mutex_shared_unlock(&ctx->info->gen_types_mutex); // @@global - for (isize j = 0; j < param_count; j++) { - Entity *p = tuple->variables[j]; - Operand o = {}; - if (j < ordered_operands.count) { - o = ordered_operands[j]; - } - if (o.expr == nullptr) { - continue; - } - Entity *oe = entity_of_node(o.expr); - if (p == oe) { - // NOTE(bill): This is the same type, make sure that it will be be same thing and use that - // Saves on a lot of checking too below - continue; - } + for (Entity *e : found_gen_types->types) { + Type *t = base_type(e->type); + TypeTuple *tuple = get_record_polymorphic_params(t); + GB_ASSERT(param_count == tuple->variables.count); - if (p->kind == Entity_TypeName) { - if (is_type_polymorphic(o.type)) { - // NOTE(bill): Do not add polymorphic version to the gen_types - skip = true; - break; - } - if (!are_types_identical(o.type, p->type)) { - skip = true; - break; - } - } else if (p->kind == Entity_Constant) { - if (!compare_exact_values(Token_CmpEq, o.value, p->Constant.value)) { - skip = true; - break; - } - if (!are_types_identical(o.type, p->type)) { - skip = true; - break; - } - } else { - GB_PANIC("Unknown entity kind"); - } + bool skip = false; + + for (isize j = 0; j < param_count; j++) { + Entity *p = tuple->variables[j]; + Operand o = {}; + if (j < ordered_operands.count) { + o = ordered_operands[j]; } - if (!skip) { - return e; + if (o.expr == nullptr) { + continue; } + Entity *oe = entity_of_node(o.expr); + if (p == oe) { + // NOTE(bill): This is the same type, make sure that it will be be same thing and use that + // Saves on a lot of checking too below + continue; + } + + if (p->kind == Entity_TypeName) { + if (is_type_polymorphic(o.type)) { + // NOTE(bill): Do not add polymorphic version to the gen_types + skip = true; + break; + } + if (!are_types_identical(o.type, p->type)) { + skip = true; + break; + } + } else if (p->kind == Entity_Constant) { + if (!compare_exact_values(Token_CmpEq, o.value, p->Constant.value)) { + skip = true; + break; + } + if (!are_types_identical(o.type, p->type)) { + skip = true; + break; + } + } else { + GB_PANIC("Unknown entity kind"); + } + } + if (!skip) { + return e; } } return nullptr; @@ -346,16 +350,19 @@ gb_internal void add_polymorphic_record_entity(CheckerContext *ctx, Ast *node, T // TODO(bill): Is this even correct? Or should the metadata be copied? e->TypeName.objc_metadata = original_type->Named.type_name->TypeName.objc_metadata; - mutex_lock(&ctx->info->gen_types_mutex); + rw_mutex_lock(&ctx->info->gen_types_mutex); auto *found_gen_types = map_get(&ctx->info->gen_types, original_type); if (found_gen_types) { - array_add(found_gen_types, e); + rw_mutex_lock(&found_gen_types->mutex); + array_add(&found_gen_types->types, e); + rw_mutex_unlock(&found_gen_types->mutex); } else { - auto array = array_make(heap_allocator()); - array_add(&array, e); - map_set(&ctx->info->gen_types, original_type, array); + GenTypesData gen_types = {}; + gen_types.types = array_make(heap_allocator()); + array_add(&gen_types.types, e); + map_set(&ctx->info->gen_types, original_type, gen_types); } - mutex_unlock(&ctx->info->gen_types_mutex); + rw_mutex_unlock(&ctx->info->gen_types_mutex); } gb_internal Type *check_record_polymorphic_params(CheckerContext *ctx, Ast *polymorphic_params, diff --git a/src/checker.hpp b/src/checker.hpp index 821d43922..92926689e 100644 --- a/src/checker.hpp +++ b/src/checker.hpp @@ -327,6 +327,11 @@ struct GenProcsData { RwMutex mutex; }; +struct GenTypesData { + Array types; + RwMutex mutex; +}; + // CheckerInfo stores all the symbol information for a type-checked program struct CheckerInfo { Checker *checker; @@ -364,9 +369,9 @@ struct CheckerInfo { RecursiveMutex lazy_mutex; // Mutex required for lazy type checking of specific files BlockingMutex gen_procs_mutex; - RecursiveMutex gen_types_mutex; + RwMutex gen_types_mutex; PtrMap gen_procs; // Key: Ast * | Identifier -> Entity - PtrMap > gen_types; + PtrMap gen_types; BlockingMutex type_info_mutex; // NOT recursive Array type_info_types; diff --git a/src/path.cpp b/src/path.cpp index 500a40cc2..4b426fc87 100644 --- a/src/path.cpp +++ b/src/path.cpp @@ -222,7 +222,6 @@ gb_internal i64 get_file_size(String path) { gb_internal ReadDirectoryError read_directory(String path, Array *fi) { GB_ASSERT(fi != nullptr); - gbAllocator a = heap_allocator(); while (path.len > 0) { Rune end = path[path.len-1]; @@ -239,9 +238,7 @@ gb_internal ReadDirectoryError read_directory(String path, Array *fi) return ReadDirectory_InvalidPath; } { - char *c_str = alloc_cstring(a, path); - defer (gb_free(a, c_str)); - + char *c_str = alloc_cstring(temporary_allocator(), path); gbFile f = {}; gbFileError file_err = gb_file_open(&f, c_str); defer (gb_file_close(&f)); @@ -258,6 +255,7 @@ gb_internal ReadDirectoryError read_directory(String path, Array *fi) } + gbAllocator a = heap_allocator(); char *new_path = gb_alloc_array(a, char, path.len+3); defer (gb_free(a, new_path)); @@ -280,8 +278,8 @@ gb_internal ReadDirectoryError read_directory(String path, Array *fi) do { wchar_t *filename_w = file_data.cFileName; - i64 size = cast(i64)file_data.nFileSizeLow; - size |= (cast(i64)file_data.nFileSizeHigh) << 32; + u64 size = cast(u64)file_data.nFileSizeLow; + size |= (cast(u64)file_data.nFileSizeHigh) << 32; String name = string16_to_string(a, make_string16_c(filename_w)); if (name == "." || name == "..") { gb_free(a, name.text); @@ -299,7 +297,7 @@ gb_internal ReadDirectoryError read_directory(String path, Array *fi) FileInfo info = {}; info.name = name; info.fullpath = path_to_full_path(a, filepath); - info.size = size; + info.size = cast(i64)size; info.is_dir = (file_data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0; array_add(fi, info); } while (FindNextFileW(find_file, &file_data)); From 12e42d92d30b3a9cf4d7bb7bb17a2e031285073b Mon Sep 17 00:00:00 2001 From: gingerBill Date: Wed, 4 Jan 2023 15:35:24 +0000 Subject: [PATCH 59/78] Localize `GenProcsData` to the entity itself --- src/check_expr.cpp | 29 ++++++++++++++--------------- src/checker.cpp | 4 ++-- src/checker.hpp | 2 -- src/entity.cpp | 5 ++++- src/llvm_backend_stmt.cpp | 6 ++---- 5 files changed, 22 insertions(+), 24 deletions(-) diff --git a/src/check_expr.cpp b/src/check_expr.cpp index 5f28504a2..bce16f304 100644 --- a/src/check_expr.cpp +++ b/src/check_expr.cpp @@ -366,8 +366,6 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E GB_ASSERT(dst == nullptr); } - // MUTEX_GUARD(&info->gen_procs_mutex); - if (!src->Proc.is_polymorphic || src->Proc.is_poly_specialized) { return false; } @@ -434,20 +432,21 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E GenProcsData *gen_procs = nullptr; - // @@GPM ////////////////////////// - mutex_lock(&info->gen_procs_mutex); - /////////////////////////////////// - auto *found = map_get(&info->gen_procs, base_entity->identifier.load()); - if (found) { - gen_procs = *found; + GB_ASSERT(base_entity->identifier.load()->kind == Ast_Ident); + GB_ASSERT(base_entity->kind == Entity_Procedure); + + + mutex_lock(&base_entity->Procedure.gen_procs_mutex); // @entity-mutex + + gen_procs = base_entity->Procedure.gen_procs; + if (gen_procs) { rw_mutex_shared_lock(&gen_procs->mutex); // @local-mutex + for (Entity *other : gen_procs->procs) { Type *pt = base_type(other->type); if (are_types_identical(pt, final_proc_type)) { rw_mutex_shared_unlock(&gen_procs->mutex); // @local-mutex - // @@GPM //////////////////////////// - mutex_unlock(&info->gen_procs_mutex); - ///////////////////////////////////// + mutex_unlock(&base_entity->Procedure.gen_procs_mutex); // @entity-mutex if (poly_proc_data) { poly_proc_data->gen_entity = other; @@ -455,15 +454,15 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E return true; } } + rw_mutex_shared_unlock(&gen_procs->mutex); // @local-mutex } else { gen_procs = gb_alloc_item(permanent_allocator(), GenProcsData); gen_procs->procs.allocator = heap_allocator(); - map_set(&info->gen_procs, base_entity->identifier.load(), gen_procs); + base_entity->Procedure.gen_procs = gen_procs; } - // @@GPM //////////////////////////// - mutex_unlock(&info->gen_procs_mutex); - ///////////////////////////////////// + + mutex_unlock(&base_entity->Procedure.gen_procs_mutex); // @entity-mutex { // LEAK TODO(bill): This is technically a memory leak as it has to generate the type twice diff --git a/src/checker.cpp b/src/checker.cpp index 5fc9a5551..0bebce232 100644 --- a/src/checker.cpp +++ b/src/checker.cpp @@ -1143,7 +1143,7 @@ gb_internal void init_checker_info(CheckerInfo *i) { array_init(&i->entities, a); map_init(&i->global_untyped); string_map_init(&i->foreigns); - map_init(&i->gen_procs); + // map_init(&i->gen_procs); map_init(&i->gen_types); array_init(&i->type_info_types, a); map_init(&i->type_info_map); @@ -1172,7 +1172,7 @@ gb_internal void destroy_checker_info(CheckerInfo *i) { array_free(&i->entities); map_destroy(&i->global_untyped); string_map_destroy(&i->foreigns); - map_destroy(&i->gen_procs); + // map_destroy(&i->gen_procs); map_destroy(&i->gen_types); array_free(&i->type_info_types); map_destroy(&i->type_info_map); diff --git a/src/checker.hpp b/src/checker.hpp index 92926689e..8b8819d97 100644 --- a/src/checker.hpp +++ b/src/checker.hpp @@ -368,9 +368,7 @@ struct CheckerInfo { RecursiveMutex lazy_mutex; // Mutex required for lazy type checking of specific files - BlockingMutex gen_procs_mutex; RwMutex gen_types_mutex; - PtrMap gen_procs; // Key: Ast * | Identifier -> Entity PtrMap gen_types; BlockingMutex type_info_mutex; // NOT recursive diff --git a/src/entity.cpp b/src/entity.cpp index f82a2fb05..b92ba825f 100644 --- a/src/entity.cpp +++ b/src/entity.cpp @@ -130,7 +130,7 @@ enum EntityConstantFlags : u32 { EntityConstantFlag_ImplicitEnumValue = 1<<0, }; -enum ProcedureOptimizationMode : u32 { +enum ProcedureOptimizationMode : u8 { ProcedureOptimizationMode_Default, ProcedureOptimizationMode_None, ProcedureOptimizationMode_Minimal, @@ -233,6 +233,9 @@ struct Entity { String link_name; String link_prefix; DeferredProcedure deferred_procedure; + + struct GenProcsData *gen_procs; + BlockingMutex gen_procs_mutex; ProcedureOptimizationMode optimization_mode; bool is_foreign : 1; bool is_export : 1; diff --git a/src/llvm_backend_stmt.cpp b/src/llvm_backend_stmt.cpp index 1660d3487..0e6f75118 100644 --- a/src/llvm_backend_stmt.cpp +++ b/src/llvm_backend_stmt.cpp @@ -50,13 +50,11 @@ gb_internal void lb_build_constant_value_decl(lbProcedure *p, AstValueDecl *vd) continue; // It's an alias } - CheckerInfo *info = p->module->info; DeclInfo *decl = decl_info_of_entity(e); ast_node(pl, ProcLit, decl->proc_lit); if (pl->body != nullptr) { - auto *found = map_get(&info->gen_procs, ident); - if (found) { - GenProcsData *gpd = *found; + GenProcsData *gpd = e->Procedure.gen_procs; + if (gpd) { rw_mutex_shared_lock(&gpd->mutex); for (Entity *e : gpd->procs) { if (!ptr_set_exists(min_dep_set, e)) { From 8a99b8af3e875141156d359026b785fcfd284f57 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Wed, 4 Jan 2023 15:55:10 +0000 Subject: [PATCH 60/78] Narrow mutex usage --- src/check_expr.cpp | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/check_expr.cpp b/src/check_expr.cpp index bce16f304..e9e61486e 100644 --- a/src/check_expr.cpp +++ b/src/check_expr.cpp @@ -435,18 +435,17 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E GB_ASSERT(base_entity->identifier.load()->kind == Ast_Ident); GB_ASSERT(base_entity->kind == Entity_Procedure); - mutex_lock(&base_entity->Procedure.gen_procs_mutex); // @entity-mutex - gen_procs = base_entity->Procedure.gen_procs; if (gen_procs) { rw_mutex_shared_lock(&gen_procs->mutex); // @local-mutex + mutex_unlock(&base_entity->Procedure.gen_procs_mutex); // @entity-mutex + for (Entity *other : gen_procs->procs) { Type *pt = base_type(other->type); if (are_types_identical(pt, final_proc_type)) { rw_mutex_shared_unlock(&gen_procs->mutex); // @local-mutex - mutex_unlock(&base_entity->Procedure.gen_procs_mutex); // @entity-mutex if (poly_proc_data) { poly_proc_data->gen_entity = other; @@ -460,9 +459,9 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E gen_procs = gb_alloc_item(permanent_allocator(), GenProcsData); gen_procs->procs.allocator = heap_allocator(); base_entity->Procedure.gen_procs = gen_procs; + mutex_unlock(&base_entity->Procedure.gen_procs_mutex); // @entity-mutex } - mutex_unlock(&base_entity->Procedure.gen_procs_mutex); // @entity-mutex { // LEAK TODO(bill): This is technically a memory leak as it has to generate the type twice From 9455918eec18f3101d040adea09a628f684563f9 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Wed, 4 Jan 2023 22:20:18 +0000 Subject: [PATCH 61/78] Fix min dep type info problem caused by const ref of `map_set` --- src/check_decl.cpp | 29 +++++++++++++++++++++++++++++ src/checker.cpp | 5 ++++- src/llvm_backend_type.cpp | 2 +- 3 files changed, 34 insertions(+), 2 deletions(-) diff --git a/src/check_decl.cpp b/src/check_decl.cpp index 07b547feb..644771dcd 100644 --- a/src/check_decl.cpp +++ b/src/check_decl.cpp @@ -1576,5 +1576,34 @@ gb_internal bool check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de check_scope_usage(ctx->checker, ctx->scope); + // if (decl->parent) { + // Scope *ps = decl->parent->scope; + // if (ps->flags & (ScopeFlag_File & ScopeFlag_Pkg & ScopeFlag_Global)) { + // return true; + // } else { + // // NOTE(bill): Add the dependencies from the procedure literal (lambda) + // // But only at the procedure level + // rw_mutex_shared_lock(&decl->deps_mutex); + // rw_mutex_lock(&decl->parent->deps_mutex); + + // for (Entity *e : decl->deps) { + // ptr_set_add(&decl->parent->deps, e); + // } + + // rw_mutex_unlock(&decl->parent->deps_mutex); + // rw_mutex_shared_unlock(&decl->deps_mutex); + + // rw_mutex_shared_lock(&decl->type_info_deps_mutex); + // rw_mutex_lock(&decl->parent->type_info_deps_mutex); + + // for (Type *t : decl->type_info_deps) { + // ptr_set_add(&decl->parent->type_info_deps, t); + // } + + // rw_mutex_unlock(&decl->parent->type_info_deps_mutex); + // rw_mutex_shared_unlock(&decl->type_info_deps_mutex); + // } + // } + return true; } diff --git a/src/checker.cpp b/src/checker.cpp index 0bebce232..8cb6ea99d 100644 --- a/src/checker.cpp +++ b/src/checker.cpp @@ -2030,7 +2030,10 @@ gb_internal void add_min_dep_type_info(Checker *c, Type *t) { // Type already exists; return; } - map_set(set, ti_index, set->entries.count); + // IMPORTANT NOTE(bill): this must be copied as `map_set` takes a const ref + // and effectively assigns the `+1` of the value + isize const count = set->entries.count; + map_set(set, ti_index, count); // Add nested types if (t->kind == Type_Named) { diff --git a/src/llvm_backend_type.cpp b/src/llvm_backend_type.cpp index b9b450404..e2b5c9dd0 100644 --- a/src/llvm_backend_type.cpp +++ b/src/llvm_backend_type.cpp @@ -186,7 +186,7 @@ gb_internal void lb_setup_type_info_data(lbProcedure *p) { // NOTE(bill): Setup if (entry_index <= 0) { continue; } - + if (entries_handled[entry_index]) { continue; } From 291ea33939cc62420e0a3e4ae767ff7996b25ddc Mon Sep 17 00:00:00 2001 From: gingerBill Date: Wed, 4 Jan 2023 22:34:59 +0000 Subject: [PATCH 62/78] Initialize `TypePath` constructor like to keep the `Futex` constructor happy --- src/types.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/types.cpp b/src/types.cpp index ec7adab5a..5ff6d7261 100644 --- a/src/types.cpp +++ b/src/types.cpp @@ -3363,7 +3363,7 @@ gb_internal i64 type_size_of(Type *t) { } else if (t->kind != Type_Basic && t->cached_size >= 0) { return t->cached_size; } - TypePath path = {0}; + TypePath path{}; type_path_init(&path); t->cached_size = type_size_of_internal(t, &path); type_path_free(&path); @@ -3381,7 +3381,7 @@ gb_internal i64 type_align_of(Type *t) { return t->cached_align; } - TypePath path = {0}; + TypePath path{}; type_path_init(&path); t->cached_align = type_align_of_internal(t, &path); type_path_free(&path); From be23d83fc8a940de98d372276d475372e61b4bf2 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Thu, 5 Jan 2023 00:47:09 +0000 Subject: [PATCH 63/78] Remove unnecessary check is `align_formula*` et al --- src/common_memory.cpp | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/src/common_memory.cpp b/src/common_memory.cpp index cdf2281fe..4c77521e1 100644 --- a/src/common_memory.cpp +++ b/src/common_memory.cpp @@ -14,25 +14,16 @@ gb_internal gb_inline U const &bit_cast(V const &v) { return reinterpret_cast 0) { - i64 result = size + align-1; - return result - result%align; - } - return size; + i64 result = size + align-1; + return result - (i64)((u64)result%(u64)align); } gb_internal gb_inline isize align_formula_isize(isize size, isize align) { - if (align > 0) { - isize result = size + align-1; - return result - result%align; - } - return size; + isize result = size + align-1; + return result - (isize)((usize)result%(usize)align); } gb_internal gb_inline void *align_formula_ptr(void *ptr, isize align) { - if (align > 0) { - uintptr result = (cast(uintptr)ptr) + align-1; - return (void *)(result - result%align); - } - return ptr; + uintptr result = (cast(uintptr)ptr) + align-1; + return (void *)(result - result%align); } From bbb2164e38e3930cb79c5e7bc61b421e38131361 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Thu, 5 Jan 2023 01:25:37 +0000 Subject: [PATCH 64/78] Inline map gets; cast explicitly on TOMBSTONE checking --- src/ptr_map.cpp | 27 +++++++++++++++++++-------- src/ptr_set.cpp | 10 +++++----- src/string_map.cpp | 15 ++++++++++++--- 3 files changed, 36 insertions(+), 16 deletions(-) diff --git a/src/ptr_map.cpp b/src/ptr_map.cpp index 8869bf3fe..c33bf9ffb 100644 --- a/src/ptr_map.cpp +++ b/src/ptr_map.cpp @@ -112,11 +112,12 @@ gb_internal MapFindResult map__find(PtrMap *h, K key) { fr.hash_index = cast(MapIndex)(hash & (h->hashes.count-1)); fr.entry_index = h->hashes.data[fr.hash_index]; while (fr.entry_index != MAP_SENTINEL) { - if (h->entries.data[fr.entry_index].key == key) { + auto *entry = &h->entries.data[fr.entry_index]; + if (entry->key == key) { return fr; } fr.entry_prev = fr.entry_index; - fr.entry_index = h->entries.data[fr.entry_index].next; + fr.entry_index = entry->next; } return fr; } @@ -190,18 +191,28 @@ gb_internal void map_rehash(PtrMap *h, isize new_count) { template gb_internal V *map_get(PtrMap *h, K key) { - MapIndex index = map__find(h, key).entry_index; - if (index != MAP_SENTINEL) { - return &h->entries.data[index].value; + MapFindResult fr = {MAP_SENTINEL, MAP_SENTINEL, MAP_SENTINEL}; + if (h->hashes.count != 0) { + u32 hash = ptr_map_hash_key(key); + fr.hash_index = cast(MapIndex)(hash & (h->hashes.count-1)); + fr.entry_index = h->hashes.data[fr.hash_index]; + while (fr.entry_index != MAP_SENTINEL) { + auto *entry = &h->entries.data[fr.entry_index]; + if (entry->key == key) { + return &entry->value; + } + fr.entry_prev = fr.entry_index; + fr.entry_index = entry->next; + } } return nullptr; } template gb_internal V &map_must_get(PtrMap *h, K key) { - MapIndex index = map__find(h, key).entry_index; - GB_ASSERT(index != MAP_SENTINEL); - return h->entries.data[index].value; + V *ptr = map_get(h, key); + GB_ASSERT(ptr != nullptr); + return *ptr; } template diff --git a/src/ptr_set.cpp b/src/ptr_set.cpp index 8be2b0524..019ede8a5 100644 --- a/src/ptr_set.cpp +++ b/src/ptr_set.cpp @@ -12,7 +12,7 @@ struct TypeIsPointer { template struct PtrSet { static_assert(TypeIsPointer::value, "PtrSet::T must be a pointer"); - static constexpr T TOMBSTONE = (T)(~uintptr(0)); + static constexpr uintptr TOMBSTONE = ~(uintptr)(0ull); T * keys; usize count; @@ -133,7 +133,7 @@ gb_internal bool ptr_set_update(PtrSet *s, T ptr) { // returns true if it pre for (usize i = 0; i < s->capacity; i++) { T *key = &s->keys[hash_index]; GB_ASSERT(*key != ptr); - if (*key == PtrSet::TOMBSTONE || *key == nullptr) { + if (*key == (T)PtrSet::TOMBSTONE || *key == nullptr) { *key = ptr; s->count++; return false; @@ -157,7 +157,7 @@ gb_internal void ptr_set_remove(PtrSet *s, T ptr) { isize index = ptr_set__find(s, ptr); if (index >= 0) { GB_ASSERT(s->count > 0); - s->keys[index] = PtrSet::TOMBSTONE; + s->keys[index] = (T)PtrSet::TOMBSTONE; s->count--; } } @@ -180,7 +180,7 @@ struct PtrSetIterator { return *this; } T key = set->keys[index]; - if (key != nullptr && key != PtrSet::TOMBSTONE) { + if (key != nullptr && key != (T)PtrSet::TOMBSTONE) { return *this; } } @@ -202,7 +202,7 @@ gb_internal PtrSetIterator begin(PtrSet &set) noexcept { usize index = 0; while (index < set.capacity) { T key = set.keys[index]; - if (key != nullptr && key != PtrSet::TOMBSTONE) { + if (key != nullptr && key != (T)PtrSet::TOMBSTONE) { break; } index++; diff --git a/src/string_map.cpp b/src/string_map.cpp index 74a16de73..facd00bb0 100644 --- a/src/string_map.cpp +++ b/src/string_map.cpp @@ -180,9 +180,18 @@ gb_internal void string_map_rehash(StringMap *h, isize new_count) { template gb_internal T *string_map_get(StringMap *h, StringHashKey const &key) { - isize index = string_map__find(h, key).entry_index; - if (index != MAP_SENTINEL) { - return &h->entries.data[index].value; + MapFindResult fr = {MAP_SENTINEL, MAP_SENTINEL, MAP_SENTINEL}; + if (h->hashes.count != 0) { + fr.hash_index = cast(MapIndex)(key.hash & (h->hashes.count-1)); + fr.entry_index = h->hashes.data[fr.hash_index]; + while (fr.entry_index != MAP_SENTINEL) { + auto *entry = &h->entries.data[fr.entry_index]; + if (string_hash_key_equal(entry->key, key)) { + return &entry->value; + } + fr.entry_prev = fr.entry_index; + fr.entry_index = entry->next; + } } return nullptr; } From 1517f1d7793c8985664600a820e3434dfdf83810 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Thu, 5 Jan 2023 11:54:21 +0000 Subject: [PATCH 65/78] Add uncomment `add_type_info_type` calls for type assertions --- src/check_decl.cpp | 60 +++++++++++++++++++++++--------------------- src/check_expr.cpp | 8 +++--- src/checker.cpp | 38 ++++------------------------ src/llvm_backend.cpp | 1 + src/ptr_map.cpp | 29 +++++++++++++++++++++ 5 files changed, 71 insertions(+), 65 deletions(-) diff --git a/src/check_decl.cpp b/src/check_decl.cpp index 644771dcd..d4ae9c59d 100644 --- a/src/check_decl.cpp +++ b/src/check_decl.cpp @@ -1411,6 +1411,37 @@ end:; } +gb_internal void add_deps_from_child_to_parent(DeclInfo *decl) { + if (decl && decl->parent) { + Scope *ps = decl->parent->scope; + if (ps->flags & (ScopeFlag_File & ScopeFlag_Pkg & ScopeFlag_Global)) { + return; + } else { + // NOTE(bill): Add the dependencies from the procedure literal (lambda) + // But only at the procedure level + rw_mutex_shared_lock(&decl->deps_mutex); + rw_mutex_lock(&decl->parent->deps_mutex); + + for (Entity *e : decl->deps) { + ptr_set_add(&decl->parent->deps, e); + } + + rw_mutex_unlock(&decl->parent->deps_mutex); + rw_mutex_shared_unlock(&decl->deps_mutex); + + rw_mutex_shared_lock(&decl->type_info_deps_mutex); + rw_mutex_lock(&decl->parent->type_info_deps_mutex); + + for (Type *t : decl->type_info_deps) { + ptr_set_add(&decl->parent->type_info_deps, t); + } + + rw_mutex_unlock(&decl->parent->type_info_deps_mutex); + rw_mutex_shared_unlock(&decl->type_info_deps_mutex); + } + } +} + struct ProcUsingVar { Entity *e; Entity *uvar; @@ -1576,34 +1607,7 @@ gb_internal bool check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de check_scope_usage(ctx->checker, ctx->scope); - // if (decl->parent) { - // Scope *ps = decl->parent->scope; - // if (ps->flags & (ScopeFlag_File & ScopeFlag_Pkg & ScopeFlag_Global)) { - // return true; - // } else { - // // NOTE(bill): Add the dependencies from the procedure literal (lambda) - // // But only at the procedure level - // rw_mutex_shared_lock(&decl->deps_mutex); - // rw_mutex_lock(&decl->parent->deps_mutex); - - // for (Entity *e : decl->deps) { - // ptr_set_add(&decl->parent->deps, e); - // } - - // rw_mutex_unlock(&decl->parent->deps_mutex); - // rw_mutex_shared_unlock(&decl->deps_mutex); - - // rw_mutex_shared_lock(&decl->type_info_deps_mutex); - // rw_mutex_lock(&decl->parent->type_info_deps_mutex); - - // for (Type *t : decl->type_info_deps) { - // ptr_set_add(&decl->parent->type_info_deps, t); - // } - - // rw_mutex_unlock(&decl->parent->type_info_deps_mutex); - // rw_mutex_shared_unlock(&decl->type_info_deps_mutex); - // } - // } + add_deps_from_child_to_parent(decl); return true; } diff --git a/src/check_expr.cpp b/src/check_expr.cpp index e9e61486e..e0519d26b 100644 --- a/src/check_expr.cpp +++ b/src/check_expr.cpp @@ -8777,8 +8777,8 @@ gb_internal ExprKind check_type_assertion(CheckerContext *c, Operand *o, Ast *no return kind; } - // add_type_info_type(c, o->type); - // add_type_info_type(c, bsrc->Union.variants[0]); + add_type_info_type(c, o->type); + add_type_info_type(c, bsrc->Union.variants[0]); o->type = bsrc->Union.variants[0]; o->mode = Addressing_OptionalOk; @@ -8810,8 +8810,8 @@ gb_internal ExprKind check_type_assertion(CheckerContext *c, Operand *o, Ast *no return kind; } - // add_type_info_type(c, o->type); - // add_type_info_type(c, t); + add_type_info_type(c, o->type); + add_type_info_type(c, t); o->type = t; o->mode = Addressing_OptionalOk; diff --git a/src/checker.cpp b/src/checker.cpp index 8cb6ea99d..a2ed73119 100644 --- a/src/checker.cpp +++ b/src/checker.cpp @@ -2026,14 +2026,13 @@ gb_internal void add_min_dep_type_info(Checker *c, Type *t) { ti_index = type_info_index(&c->info, t, false); } GB_ASSERT(ti_index >= 0); - if (map_get(set, ti_index)) { - // Type already exists; - return; - } // IMPORTANT NOTE(bill): this must be copied as `map_set` takes a const ref // and effectively assigns the `+1` of the value isize const count = set->entries.count; - map_set(set, ti_index, count); + if (map_set_if_not_previously_exists(set, ti_index, count)) { + // Type already exists; + return; + } // Add nested types if (t->kind == Type_Named) { @@ -5650,34 +5649,7 @@ gb_internal void check_walk_all_dependencies(DeclInfo *decl) { for (DeclInfo *child = decl->next_child; child != nullptr; child = child->next_sibling) { check_walk_all_dependencies(child); } - if (decl->parent && decl->parent->entity && decl->parent->entity->kind == Entity_Procedure) { - Scope *ps = decl->parent->scope; - if (ps->flags & (ScopeFlag_File & ScopeFlag_Pkg & ScopeFlag_Global)) { - return; - } else { - // NOTE(bill): Add the dependencies from the procedure literal (lambda) - // But only at the procedure level - rw_mutex_shared_lock(&decl->deps_mutex); - rw_mutex_lock(&decl->parent->deps_mutex); - - for (Entity *e : decl->deps) { - ptr_set_add(&decl->parent->deps, e); - } - - rw_mutex_unlock(&decl->parent->deps_mutex); - rw_mutex_shared_unlock(&decl->deps_mutex); - - rw_mutex_shared_lock(&decl->type_info_deps_mutex); - rw_mutex_lock(&decl->parent->type_info_deps_mutex); - - for (Type *t : decl->type_info_deps) { - ptr_set_add(&decl->parent->type_info_deps, t); - } - - rw_mutex_unlock(&decl->parent->type_info_deps_mutex); - rw_mutex_shared_unlock(&decl->type_info_deps_mutex); - } - } + add_deps_from_child_to_parent(decl); } gb_internal void check_update_dependency_tree_for_procedures(Checker *c) { diff --git a/src/llvm_backend.cpp b/src/llvm_backend.cpp index 304e5ef36..192e5cc56 100644 --- a/src/llvm_backend.cpp +++ b/src/llvm_backend.cpp @@ -1920,6 +1920,7 @@ gb_internal void lb_generate_code(lbGenerator *gen) { if (!ptr_set_exists(min_dep_set, e)) { continue; } + DeclInfo *decl = decl_info_of_entity(e); if (decl == nullptr) { continue; diff --git a/src/ptr_map.cpp b/src/ptr_map.cpp index c33bf9ffb..89d2cbf9d 100644 --- a/src/ptr_map.cpp +++ b/src/ptr_map.cpp @@ -52,6 +52,7 @@ template gb_internal void map_init (PtrMap< template gb_internal void map_destroy (PtrMap *h); template gb_internal V * map_get (PtrMap *h, K key); template gb_internal void map_set (PtrMap *h, K key, V const &value); +template gb_internal bool map_set_if_not_previously_exists(PtrMap *h, K key, V const &value); // returns true if it previously existed template gb_internal void map_remove (PtrMap *h, K key); template gb_internal void map_clear (PtrMap *h); template gb_internal void map_grow (PtrMap *h); @@ -240,6 +241,34 @@ gb_internal void map_set(PtrMap *h, K key, V const &value) { } } +// returns true if it previously existed +template +gb_internal bool map_set_if_not_previously_exists(PtrMap *h, K key, V const &value) { + MapIndex index; + MapFindResult fr; + if (h->hashes.count == 0) { + map_grow(h); + } + fr = map__find(h, key); + if (fr.entry_index != MAP_SENTINEL) { + return true; + } else { + index = map__add_entry(h, key); + if (fr.entry_prev != MAP_SENTINEL) { + h->entries.data[fr.entry_prev].next = index; + } else { + h->hashes.data[fr.hash_index] = index; + } + } + h->entries.data[index].value = value; + + if (map__full(h)) { + map_grow(h); + } + return false; +} + + template gb_internal void map__erase(PtrMap *h, MapFindResult const &fr) { MapFindResult last; From 213a0499a1964e0bc5d2c48cd3b4450b45f59314 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Thu, 5 Jan 2023 12:29:16 +0000 Subject: [PATCH 66/78] Begin multithreading the llvm backend when `-use-separate-modules` is enabled --- src/llvm_backend.cpp | 46 ++++++++++++++++++++++++------------ src/llvm_backend.hpp | 3 +++ src/llvm_backend_general.cpp | 21 +++++++++++++--- src/llvm_backend_stmt.cpp | 2 +- 4 files changed, 53 insertions(+), 19 deletions(-) diff --git a/src/llvm_backend.cpp b/src/llvm_backend.cpp index 192e5cc56..f3c4dd50d 100644 --- a/src/llvm_backend.cpp +++ b/src/llvm_backend.cpp @@ -730,6 +730,8 @@ gb_internal lbValue lb_map_set_proc_for_type(lbModule *m, Type *type) { gb_internal lbValue lb_generate_anonymous_proc_lit(lbModule *m, String const &prefix_name, Ast *expr, lbProcedure *parent) { + MUTEX_GUARD(&m->gen->anonymous_proc_lits_mutex); + lbProcedure **found = map_get(&m->gen->anonymous_proc_lits, expr); if (found) { return lb_find_procedure_value_from_entity(m, (*found)->entity); @@ -1526,6 +1528,10 @@ struct lbLLVMModulePassWorkerData { gb_internal WORKER_TASK_PROC(lb_llvm_module_pass_worker_proc) { auto wd = cast(lbLLVMModulePassWorkerData *)data; + + lb_run_remove_unused_function_pass(wd->m); + lb_run_remove_unused_globals_pass(wd->m); + LLVMPassManagerRef module_pass_manager = LLVMCreatePassManager(); lb_populate_module_pass_manager(wd->target_machine, module_pass_manager, build_context.optimization_level); LLVMRunPassManager(module_pass_manager, wd->m->mod); @@ -2155,29 +2161,47 @@ gb_internal void lb_generate_code(lbGenerator *gen) { } } + isize non_empty_module_count = 0; + for (auto const &entry : gen->modules) { + lbModule *m = entry.value; + if (!lb_is_module_empty(m)) { + non_empty_module_count += 1; + } + } + if (non_empty_module_count <= 1) { + do_threading = false; + } TIME_SECTION("LLVM Function Pass"); for (auto const &entry : gen->modules) { lbModule *m = entry.value; - lb_llvm_function_pass_worker_proc(m); + // if (do_threading) { + // thread_pool_add_task(lb_llvm_function_pass_worker_proc, m); + // } else { + lb_llvm_function_pass_worker_proc(m); + // } } + thread_pool_wait(); TIME_SECTION("LLVM Module Pass"); - for (auto const &entry : gen->modules) { lbModule *m = entry.value; - lb_run_remove_unused_function_pass(m); - lb_run_remove_unused_globals_pass(m); - auto wd = gb_alloc_item(permanent_allocator(), lbLLVMModulePassWorkerData); wd->m = m; wd->target_machine = m->target_machine; - lb_llvm_module_pass_worker_proc(wd); + if (do_threading) { + thread_pool_add_task(lb_llvm_module_pass_worker_proc, wd); + } else { + lb_llvm_module_pass_worker_proc(wd); + } } + thread_pool_wait(); + TIME_SECTION("LLVM Module Verification"); + llvm_error = nullptr; defer (LLVMDisposeMessage(llvm_error)); @@ -2245,21 +2269,13 @@ gb_internal void lb_generate_code(lbGenerator *gen) { TIME_SECTION("LLVM Object Generation"); - isize non_empty_module_count = 0; - for (auto const &entry : gen->modules) { - lbModule *m = entry.value; - if (!lb_is_module_empty(m)) { - non_empty_module_count += 1; - } - } - if (build_context.ignore_llvm_build) { gb_printf_err("LLVM SUCCESS!\n"); gb_exit(1); return; } - if (do_threading && non_empty_module_count > 1) { + if (do_threading) { for (auto const &entry : gen->modules) { lbModule *m = entry.value; if (lb_is_module_empty(m)) { diff --git a/src/llvm_backend.hpp b/src/llvm_backend.hpp index 9f7caa3bb..d824b99cf 100644 --- a/src/llvm_backend.hpp +++ b/src/llvm_backend.hpp @@ -132,6 +132,8 @@ struct lbModule { PtrMap struct_field_remapping; // Key: LLVMTypeRef or Type * i32 internal_type_level; + RecursiveMutex values_mutex; + PtrMap values; PtrMap soa_values; StringMap members; @@ -178,6 +180,7 @@ struct lbGenerator { PtrMap modules_through_ctx; lbModule default_module; + BlockingMutex anonymous_proc_lits_mutex; PtrMap anonymous_proc_lits; BlockingMutex foreign_mutex; diff --git a/src/llvm_backend_general.cpp b/src/llvm_backend_general.cpp index a849929f0..c09648825 100644 --- a/src/llvm_backend_general.cpp +++ b/src/llvm_backend_general.cpp @@ -316,6 +316,7 @@ gb_internal bool lb_is_instr_terminating(LLVMValueRef instr) { gb_internal lbModule *lb_pkg_module(lbGenerator *gen, AstPackage *pkg) { + // NOTE(bill): no need for a mutex since it's immutable auto *found = map_get(&gen->modules, pkg); if (found) { return *found; @@ -1354,7 +1355,7 @@ gb_internal String lb_mangle_name(lbModule *m, Entity *e) { return mangled_name; } -gb_internal String lb_set_nested_type_name_ir_mangled_name(Entity *e, lbProcedure *p) { +gb_internal String lb_set_nested_type_name_ir_mangled_name(Entity *e, lbProcedure *p, lbModule *module) { // NOTE(bill, 2020-03-08): A polymorphic procedure may take a nested type declaration // and as a result, the declaration does not have time to determine what it should be @@ -1421,7 +1422,7 @@ gb_internal String lb_get_entity_name(lbModule *m, Entity *e, String default_nam } if (e->kind == Entity_TypeName && (e->scope->flags & ScopeFlag_File) == 0) { - return lb_set_nested_type_name_ir_mangled_name(e, nullptr); + return lb_set_nested_type_name_ir_mangled_name(e, nullptr, m); } String name = {}; @@ -2164,19 +2165,25 @@ gb_internal void lb_ensure_abi_function_type(lbModule *m, lbProcedure *p) { gb_internal void lb_add_entity(lbModule *m, Entity *e, lbValue val) { if (e != nullptr) { + mutex_lock(&m->values_mutex); map_set(&m->values, e, val); + mutex_unlock(&m->values_mutex); } } gb_internal void lb_add_member(lbModule *m, String const &name, lbValue val) { if (name.len > 0) { + mutex_lock(&m->values_mutex); string_map_set(&m->members, name, val); + mutex_unlock(&m->values_mutex); } } gb_internal void lb_add_procedure_value(lbModule *m, lbProcedure *p) { + mutex_lock(&m->values_mutex); if (p->entity != nullptr) { map_set(&m->procedure_values, p->value, p->entity); } string_map_set(&m->procedures, p->name, p); + mutex_unlock(&m->values_mutex); } @@ -2519,6 +2526,8 @@ gb_internal lbValue lb_find_ident(lbProcedure *p, lbModule *m, Entity *e, Ast *e return *found; } } + mutex_lock(&m->values_mutex); + defer (mutex_unlock(&m->values_mutex)); auto *found = map_get(&m->values, e); if (found) { @@ -2538,7 +2547,6 @@ gb_internal lbValue lb_find_ident(lbProcedure *p, lbModule *m, Entity *e, Ast *e if (USE_SEPARATE_MODULES) { lbModule *other_module = lb_pkg_module(m->gen, e->pkg); if (other_module != m) { - String name = lb_get_entity_name(other_module, e); lb_set_entity_from_other_modules_linkage_correctly(other_module, e, name); @@ -2569,6 +2577,9 @@ gb_internal lbValue lb_find_procedure_value_from_entity(lbModule *m, Entity *e) e = strip_entity_wrapping(e); GB_ASSERT(e != nullptr); + mutex_lock(&m->values_mutex); + defer (mutex_unlock(&m->values_mutex)); + auto *found = map_get(&m->values, e); if (found) { return *found; @@ -2657,6 +2668,10 @@ gb_internal lbValue lb_find_value_from_entity(lbModule *m, Entity *e) { return lb_find_procedure_value_from_entity(m, e); } + mutex_lock(&m->values_mutex); + defer (mutex_unlock(&m->values_mutex)); + + auto *found = map_get(&m->values, e); if (found) { return *found; diff --git a/src/llvm_backend_stmt.cpp b/src/llvm_backend_stmt.cpp index 0e6f75118..73b4e251f 100644 --- a/src/llvm_backend_stmt.cpp +++ b/src/llvm_backend_stmt.cpp @@ -32,7 +32,7 @@ gb_internal void lb_build_constant_value_decl(lbProcedure *p, AstValueDecl *vd) continue; } - lb_set_nested_type_name_ir_mangled_name(e, p); + lb_set_nested_type_name_ir_mangled_name(e, p, p->module); } for_array(i, vd->names) { From 025e87d97411da2782dc548ef4a49fc9e2de21f8 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Thu, 5 Jan 2023 12:39:57 +0000 Subject: [PATCH 67/78] Multithread LLVM procedure generation --- src/llvm_backend.cpp | 39 +++++++++++++++++++++++++-------------- src/main.cpp | 22 +++++++++++----------- 2 files changed, 36 insertions(+), 25 deletions(-) diff --git a/src/llvm_backend.cpp b/src/llvm_backend.cpp index f3c4dd50d..fd9d427ea 100644 --- a/src/llvm_backend.cpp +++ b/src/llvm_backend.cpp @@ -1579,7 +1579,16 @@ gb_internal void lb_generate_procedure(lbModule *m, lbProcedure *p) { } -gb_internal void lb_generate_code(lbGenerator *gen) { +gb_internal WORKER_TASK_PROC(lb_generate_procedures_worker_proc) { + lbModule *m = cast(lbModule *)data; + for (isize i = 0; i < m->procedures_to_generate.count; i++) { + lbProcedure *p = m->procedures_to_generate[i]; + lb_generate_procedure(p->module, p); + } + return 0; +} + +gb_internal bool lb_generate_code(lbGenerator *gen) { TIME_SECTION("LLVM Initializtion"); isize thread_count = gb_max(build_context.thread_count, 1); @@ -2126,13 +2135,15 @@ gb_internal void lb_generate_code(lbGenerator *gen) { TIME_SECTION("LLVM Procedure Generation"); for (auto const &entry : gen->modules) { lbModule *m = entry.value; - // NOTE(bill): procedures may be added during generation - for (isize i = 0; i < m->procedures_to_generate.count; i++) { - lbProcedure *p = m->procedures_to_generate[i]; - lb_generate_procedure(m, p); + if (do_threading) { + thread_pool_add_task(lb_generate_procedures_worker_proc, m); + } else { + lb_generate_procedures_worker_proc(m); } } + thread_pool_wait(); + if (build_context.command_kind == Command_test && !already_has_entry_point) { TIME_SECTION("LLVM main"); lb_create_main_procedure(default_module, startup_runtime); @@ -2221,11 +2232,11 @@ gb_internal void lb_generate_code(lbGenerator *gen) { if (LLVMPrintModuleToFile(m->mod, cast(char const *)filepath_ll.text, &llvm_error)) { gb_printf_err("LLVM Error: %s\n", llvm_error); gb_exit(1); - return; + return false; } } gb_exit(1); - return; + return false; } } llvm_error = nullptr; @@ -2243,14 +2254,13 @@ gb_internal void lb_generate_code(lbGenerator *gen) { if (LLVMPrintModuleToFile(m->mod, cast(char const *)filepath_ll.text, &llvm_error)) { gb_printf_err("LLVM Error: %s\n", llvm_error); gb_exit(1); - return; + return false; } array_add(&gen->output_temp_paths, filepath_ll); } if (build_context.build_mode == BuildMode_LLVM_IR) { - gb_exit(0); - return; + return true; } } @@ -2270,9 +2280,8 @@ gb_internal void lb_generate_code(lbGenerator *gen) { TIME_SECTION("LLVM Object Generation"); if (build_context.ignore_llvm_build) { - gb_printf_err("LLVM SUCCESS!\n"); - gb_exit(1); - return; + gb_printf_err("LLVM object generation has been ignored!\n"); + return false; } if (do_threading) { @@ -2315,10 +2324,12 @@ gb_internal void lb_generate_code(lbGenerator *gen) { if (LLVMTargetMachineEmitToFile(m->target_machine, m->mod, cast(char *)filepath_obj.text, code_gen_file_type, &llvm_error)) { gb_printf_err("LLVM Error: %s\n", llvm_error); gb_exit(1); - return; + return false; } } } gb_sort_array(gen->foreign_libraries.data, gen->foreign_libraries.count, foreign_library_cmp); + + return true; } diff --git a/src/main.cpp b/src/main.cpp index c07d2c400..a7e5677e9 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -2784,19 +2784,19 @@ int main(int arg_count, char const **arg_ptr) { if (!lb_init_generator(gen, checker)) { return 1; } - lb_generate_code(gen); - - switch (build_context.build_mode) { - case BuildMode_Executable: - case BuildMode_DynamicLibrary: - i32 result = linker_stage(gen); - if (result) { - if (build_context.show_timings) { - show_timings(checker, &global_timings); + if (lb_generate_code(gen)) { + switch (build_context.build_mode) { + case BuildMode_Executable: + case BuildMode_DynamicLibrary: + i32 result = linker_stage(gen); + if (result) { + if (build_context.show_timings) { + show_timings(checker, &global_timings); + } + return result; } - return result; + break; } - break; } remove_temp_files(gen); From 029cb6581b483f7db5b6bf35241b4b8a5cc544ff Mon Sep 17 00:00:00 2001 From: gingerBill Date: Thu, 5 Jan 2023 12:54:53 +0000 Subject: [PATCH 68/78] Unify function pass managers for auxiliary procedures (e.g. startup type info, runtime, objc names) --- src/llvm_backend.cpp | 57 ++++++++++++++++++---------------------- src/llvm_backend.hpp | 4 +++ src/llvm_backend_opt.cpp | 3 +++ 3 files changed, 32 insertions(+), 32 deletions(-) diff --git a/src/llvm_backend.cpp b/src/llvm_backend.cpp index fd9d427ea..f4e97aad7 100644 --- a/src/llvm_backend.cpp +++ b/src/llvm_backend.cpp @@ -990,10 +990,6 @@ gb_internal lbProcedure *lb_create_startup_type_info(lbModule *m) { if (build_context.disallow_rtti) { return nullptr; } - LLVMPassManagerRef default_function_pass_manager = LLVMCreateFunctionPassManagerForModule(m->mod); - lb_populate_function_pass_manager(m, default_function_pass_manager, false, build_context.optimization_level); - LLVMFinalizeFunctionPassManager(default_function_pass_manager); - Type *proc_type = alloc_type_proc(nullptr, nullptr, 0, nullptr, 0, false, ProcCC_CDecl); lbProcedure *p = lb_create_dummy_procedure(m, str_lit(LB_STARTUP_TYPE_INFO_PROC_NAME), proc_type); @@ -1014,9 +1010,6 @@ gb_internal lbProcedure *lb_create_startup_type_info(lbModule *m) { gb_printf_err("\n\n\n\n"); LLVMVerifyFunction(p->value, LLVMAbortProcessAction); } - - lb_run_function_pass_manager(default_function_pass_manager, p); - return p; } @@ -1037,11 +1030,6 @@ gb_internal void lb_finalize_objc_names(lbProcedure *p) { } lbModule *m = p->module; - LLVMPassManagerRef default_function_pass_manager = LLVMCreateFunctionPassManagerForModule(m->mod); - lb_populate_function_pass_manager(m, default_function_pass_manager, false, build_context.optimization_level); - LLVMFinalizeFunctionPassManager(default_function_pass_manager); - - auto args = array_make(permanent_allocator(), 1); LLVMSetLinkage(p->value, LLVMInternalLinkage); @@ -1061,16 +1049,9 @@ gb_internal void lb_finalize_objc_names(lbProcedure *p) { } lb_end_procedure_body(p); - - lb_run_function_pass_manager(default_function_pass_manager, p); - } gb_internal lbProcedure *lb_create_startup_runtime(lbModule *main_module, lbProcedure *startup_type_info, lbProcedure *objc_names, Array &global_variables) { // Startup Runtime - LLVMPassManagerRef default_function_pass_manager = LLVMCreateFunctionPassManagerForModule(main_module->mod); - lb_populate_function_pass_manager(main_module, default_function_pass_manager, false, build_context.optimization_level); - LLVMFinalizeFunctionPassManager(default_function_pass_manager); - Type *proc_type = alloc_type_proc(nullptr, nullptr, 0, nullptr, 0, false, ProcCC_Odin); lbProcedure *p = lb_create_dummy_procedure(main_module, str_lit(LB_STARTUP_RUNTIME_PROC_NAME), proc_type); @@ -1175,8 +1156,6 @@ gb_internal lbProcedure *lb_create_startup_runtime(lbModule *main_module, lbProc LLVMVerifyFunction(p->value, LLVMAbortProcessAction); } - lb_run_function_pass_manager(default_function_pass_manager, p); - return p; } @@ -1466,6 +1445,12 @@ gb_internal WORKER_TASK_PROC(lb_llvm_function_pass_worker_proc) { LLVMFinalizeFunctionPassManager(function_pass_manager_size); LLVMFinalizeFunctionPassManager(function_pass_manager_speed); + if (m == &m->gen->default_module) { + lb_run_function_pass_manager(default_function_pass_manager, m->gen->startup_type_info); + lb_run_function_pass_manager(default_function_pass_manager, m->gen->startup_runtime); + lb_finalize_objc_names(m->gen->objc_names); + } + LLVMPassManagerRef default_function_pass_manager_without_memcpy = LLVMCreateFunctionPassManagerForModule(m->mod); LLVMInitializeFunctionPassManager(default_function_pass_manager_without_memcpy); @@ -1588,6 +1573,16 @@ gb_internal WORKER_TASK_PROC(lb_generate_procedures_worker_proc) { return 0; } +gb_internal WORKER_TASK_PROC(lb_generate_missing_procedures_to_check_worker_proc) { + lbModule *m = cast(lbModule *)data; + for (isize i = 0; i < m->missing_procedures_to_check.count; i++) { + lbProcedure *p = m->missing_procedures_to_check[i]; + debugf("Generate missing procedure: %.*s\n", LIT(p->name)); + lb_generate_procedure(m, p); + } + return 0; +} + gb_internal bool lb_generate_code(lbGenerator *gen) { TIME_SECTION("LLVM Initializtion"); @@ -2055,13 +2050,11 @@ gb_internal bool lb_generate_code(lbGenerator *gen) { } TIME_SECTION("LLVM Runtime Type Information Creation"); - lbProcedure *startup_type_info = lb_create_startup_type_info(default_module); - - lbProcedure *objc_names = lb_create_objc_names(default_module); + gen->startup_type_info = lb_create_startup_type_info(default_module); + gen->objc_names = lb_create_objc_names(default_module); TIME_SECTION("LLVM Runtime Startup Creation (Global Variables)"); - lbProcedure *startup_runtime = lb_create_startup_runtime(default_module, startup_type_info, objc_names, global_variables); - gb_unused(startup_runtime); + gen->startup_runtime = lb_create_startup_runtime(default_module, gen->startup_type_info, gen->objc_names, global_variables); if (build_context.ODIN_DEBUG) { for (auto const &entry : builtin_pkg->scope->elements) { @@ -2146,20 +2139,20 @@ gb_internal bool lb_generate_code(lbGenerator *gen) { if (build_context.command_kind == Command_test && !already_has_entry_point) { TIME_SECTION("LLVM main"); - lb_create_main_procedure(default_module, startup_runtime); + lb_create_main_procedure(default_module, gen->startup_runtime); } for (auto const &entry : gen->modules) { lbModule *m = entry.value; // NOTE(bill): procedures may be added during generation - for (isize i = 0; i < m->missing_procedures_to_check.count; i++) { - lbProcedure *p = m->missing_procedures_to_check[i]; - debugf("Generate missing procedure: %.*s\n", LIT(p->name)); - lb_generate_procedure(m, p); + if (do_threading) { + thread_pool_add_task(lb_generate_missing_procedures_to_check_worker_proc, m); + } else { + lb_generate_missing_procedures_to_check_worker_proc(m); } } - lb_finalize_objc_names(objc_names); + thread_pool_wait(); if (build_context.ODIN_DEBUG) { TIME_SECTION("LLVM Debug Info Complete Types and Finalize"); diff --git a/src/llvm_backend.hpp b/src/llvm_backend.hpp index d824b99cf..c4cc17782 100644 --- a/src/llvm_backend.hpp +++ b/src/llvm_backend.hpp @@ -189,6 +189,10 @@ struct lbGenerator { std::atomic global_array_index; std::atomic global_generated_index; + + lbProcedure *startup_type_info; + lbProcedure *startup_runtime; + lbProcedure *objc_names; }; diff --git a/src/llvm_backend_opt.cpp b/src/llvm_backend_opt.cpp index fd6d94361..d7a34d82a 100644 --- a/src/llvm_backend_opt.cpp +++ b/src/llvm_backend_opt.cpp @@ -359,6 +359,9 @@ gb_internal void lb_run_remove_dead_instruction_pass(lbProcedure *p) { gb_internal void lb_run_function_pass_manager(LLVMPassManagerRef fpm, lbProcedure *p) { + if (p == nullptr) { + return; + } LLVMRunFunctionPassManager(fpm, p->value); // NOTE(bill): LLVMAddDCEPass doesn't seem to be exported in the official DLL's for LLVM // which means we cannot rely upon it From 5eee8077ddea82610bf72a37c4032bb06b1c4ab0 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Thu, 5 Jan 2023 15:56:45 +0000 Subject: [PATCH 69/78] enum-ifiy function pass managers for `lbModule` --- src/llvm_backend.cpp | 102 ++++++++++++++++++++----------------- src/llvm_backend.hpp | 14 +++++ src/llvm_backend_debug.cpp | 7 +++ 3 files changed, 75 insertions(+), 48 deletions(-) diff --git a/src/llvm_backend.cpp b/src/llvm_backend.cpp index f4e97aad7..54430246e 100644 --- a/src/llvm_backend.cpp +++ b/src/llvm_backend.cpp @@ -1420,89 +1420,89 @@ gb_internal WORKER_TASK_PROC(lb_llvm_emit_worker_proc) { return 0; } -gb_internal WORKER_TASK_PROC(lb_llvm_function_pass_worker_proc) { - GB_ASSERT(MULTITHREAD_OBJECT_GENERATION); - auto m = cast(lbModule *)data; +gb_internal void lb_llvm_function_pass_per_function_internal(lbModule *module, lbProcedure *p, lbFunctionPassManagerKind pass_manager_kind = lbFunctionPassManager_default) { + LLVMPassManagerRef pass_manager = module->function_pass_managers[pass_manager_kind]; + lb_run_function_pass_manager(pass_manager, p); +} - LLVMPassManagerRef default_function_pass_manager = LLVMCreateFunctionPassManagerForModule(m->mod); - LLVMPassManagerRef function_pass_manager_minimal = LLVMCreateFunctionPassManagerForModule(m->mod); - LLVMPassManagerRef function_pass_manager_size = LLVMCreateFunctionPassManagerForModule(m->mod); - LLVMPassManagerRef function_pass_manager_speed = LLVMCreateFunctionPassManagerForModule(m->mod); +gb_internal void lb_llvm_function_pass_per_module(lbModule *m) { + { + GB_ASSERT(m->function_pass_managers[lbFunctionPassManager_default] == nullptr); - LLVMInitializeFunctionPassManager(default_function_pass_manager); - LLVMInitializeFunctionPassManager(function_pass_manager_minimal); - LLVMInitializeFunctionPassManager(function_pass_manager_size); - LLVMInitializeFunctionPassManager(function_pass_manager_speed); + m->function_pass_managers[lbFunctionPassManager_default] = LLVMCreateFunctionPassManagerForModule(m->mod); + m->function_pass_managers[lbFunctionPassManager_default_without_memcpy] = LLVMCreateFunctionPassManagerForModule(m->mod); + m->function_pass_managers[lbFunctionPassManager_minimal] = LLVMCreateFunctionPassManagerForModule(m->mod); + m->function_pass_managers[lbFunctionPassManager_size] = LLVMCreateFunctionPassManagerForModule(m->mod); + m->function_pass_managers[lbFunctionPassManager_speed] = LLVMCreateFunctionPassManagerForModule(m->mod); - lb_populate_function_pass_manager(m, default_function_pass_manager, false, build_context.optimization_level); - lb_populate_function_pass_manager_specific(m, function_pass_manager_minimal, 0); - lb_populate_function_pass_manager_specific(m, function_pass_manager_size, 1); - lb_populate_function_pass_manager_specific(m, function_pass_manager_speed, 2); + LLVMInitializeFunctionPassManager(m->function_pass_managers[lbFunctionPassManager_default]); + LLVMInitializeFunctionPassManager(m->function_pass_managers[lbFunctionPassManager_default_without_memcpy]); + LLVMInitializeFunctionPassManager(m->function_pass_managers[lbFunctionPassManager_minimal]); + LLVMInitializeFunctionPassManager(m->function_pass_managers[lbFunctionPassManager_size]); + LLVMInitializeFunctionPassManager(m->function_pass_managers[lbFunctionPassManager_speed]); - LLVMFinalizeFunctionPassManager(default_function_pass_manager); - LLVMFinalizeFunctionPassManager(function_pass_manager_minimal); - LLVMFinalizeFunctionPassManager(function_pass_manager_size); - LLVMFinalizeFunctionPassManager(function_pass_manager_speed); + lb_populate_function_pass_manager(m, m->function_pass_managers[lbFunctionPassManager_default], false, build_context.optimization_level); + lb_populate_function_pass_manager(m, m->function_pass_managers[lbFunctionPassManager_default_without_memcpy], true, build_context.optimization_level); + lb_populate_function_pass_manager_specific(m, m->function_pass_managers[lbFunctionPassManager_minimal], 0); + lb_populate_function_pass_manager_specific(m, m->function_pass_managers[lbFunctionPassManager_size], 1); + lb_populate_function_pass_manager_specific(m, m->function_pass_managers[lbFunctionPassManager_speed], 2); - if (m == &m->gen->default_module) { - lb_run_function_pass_manager(default_function_pass_manager, m->gen->startup_type_info); - lb_run_function_pass_manager(default_function_pass_manager, m->gen->startup_runtime); - lb_finalize_objc_names(m->gen->objc_names); + LLVMFinalizeFunctionPassManager(m->function_pass_managers[lbFunctionPassManager_default]); + LLVMFinalizeFunctionPassManager(m->function_pass_managers[lbFunctionPassManager_default_without_memcpy]); + LLVMFinalizeFunctionPassManager(m->function_pass_managers[lbFunctionPassManager_minimal]); + LLVMFinalizeFunctionPassManager(m->function_pass_managers[lbFunctionPassManager_size]); + LLVMFinalizeFunctionPassManager(m->function_pass_managers[lbFunctionPassManager_speed]); } - - LLVMPassManagerRef default_function_pass_manager_without_memcpy = LLVMCreateFunctionPassManagerForModule(m->mod); - LLVMInitializeFunctionPassManager(default_function_pass_manager_without_memcpy); - lb_populate_function_pass_manager(m, default_function_pass_manager_without_memcpy, true, build_context.optimization_level); - LLVMFinalizeFunctionPassManager(default_function_pass_manager_without_memcpy); + if (m == &m->gen->default_module) { + lb_llvm_function_pass_per_function_internal(m, m->gen->startup_type_info); + lb_llvm_function_pass_per_function_internal(m, m->gen->startup_runtime); + lb_llvm_function_pass_per_function_internal(m, m->gen->objc_names); + } for (lbProcedure *p : m->procedures_to_generate) { if (p->body != nullptr) { // Build Procedure + lbFunctionPassManagerKind pass_manager_kind = lbFunctionPassManager_default; if (p->flags & lbProcedureFlag_WithoutMemcpyPass) { - lb_run_function_pass_manager(default_function_pass_manager_without_memcpy, p); + pass_manager_kind = lbFunctionPassManager_default_without_memcpy; } else { if (p->entity && p->entity->kind == Entity_Procedure) { switch (p->entity->Procedure.optimization_mode) { case ProcedureOptimizationMode_None: case ProcedureOptimizationMode_Minimal: - lb_run_function_pass_manager(function_pass_manager_minimal, p); + pass_manager_kind = lbFunctionPassManager_minimal; break; case ProcedureOptimizationMode_Size: - lb_run_function_pass_manager(function_pass_manager_size, p); + pass_manager_kind = lbFunctionPassManager_size; break; case ProcedureOptimizationMode_Speed: - lb_run_function_pass_manager(function_pass_manager_speed, p); - break; - default: - lb_run_function_pass_manager(default_function_pass_manager, p); + pass_manager_kind = lbFunctionPassManager_speed; break; } - } else { - lb_run_function_pass_manager(default_function_pass_manager, p); } } + + lb_llvm_function_pass_per_function_internal(m, p, pass_manager_kind); } } for (auto const &entry : m->equal_procs) { lbProcedure *p = entry.value; - lb_run_function_pass_manager(default_function_pass_manager, p); + lb_llvm_function_pass_per_function_internal(m, p); } for (auto const &entry : m->hasher_procs) { lbProcedure *p = entry.value; - lb_run_function_pass_manager(default_function_pass_manager, p); + lb_llvm_function_pass_per_function_internal(m, p); } for (auto const &entry : m->map_get_procs) { lbProcedure *p = entry.value; - lb_run_function_pass_manager(default_function_pass_manager, p); + lb_llvm_function_pass_per_function_internal(m, p); } for (auto const &entry : m->map_set_procs) { lbProcedure *p = entry.value; - lb_run_function_pass_manager(default_function_pass_manager, p); + lb_llvm_function_pass_per_function_internal(m, p); } - - return 0; } @@ -2125,6 +2125,10 @@ gb_internal bool lb_generate_code(lbGenerator *gen) { } } + if (gen->modules.entries.count <= 1) { + do_threading = false; + } + TIME_SECTION("LLVM Procedure Generation"); for (auto const &entry : gen->modules) { lbModule *m = entry.value; @@ -2142,6 +2146,7 @@ gb_internal bool lb_generate_code(lbGenerator *gen) { lb_create_main_procedure(default_module, gen->startup_runtime); } + TIME_SECTION("LLVM Procedure Generation (missing)"); for (auto const &entry : gen->modules) { lbModule *m = entry.value; // NOTE(bill): procedures may be added during generation @@ -2154,6 +2159,11 @@ gb_internal bool lb_generate_code(lbGenerator *gen) { thread_pool_wait(); + if (gen->objc_names) { + TIME_SECTION("Finalize objc names"); + lb_finalize_objc_names(gen->objc_names); + } + if (build_context.ODIN_DEBUG) { TIME_SECTION("LLVM Debug Info Complete Types and Finalize"); for (auto const &entry : gen->modules) { @@ -2180,11 +2190,7 @@ gb_internal bool lb_generate_code(lbGenerator *gen) { TIME_SECTION("LLVM Function Pass"); for (auto const &entry : gen->modules) { lbModule *m = entry.value; - // if (do_threading) { - // thread_pool_add_task(lb_llvm_function_pass_worker_proc, m); - // } else { - lb_llvm_function_pass_worker_proc(m); - // } + lb_llvm_function_pass_per_module(m); } thread_pool_wait(); diff --git a/src/llvm_backend.hpp b/src/llvm_backend.hpp index c4cc17782..0e3a38fd3 100644 --- a/src/llvm_backend.hpp +++ b/src/llvm_backend.hpp @@ -117,6 +117,16 @@ struct lbIncompleteDebugType { typedef Slice lbStructFieldRemapping; +enum lbFunctionPassManagerKind { + lbFunctionPassManager_default, + lbFunctionPassManager_default_without_memcpy, + lbFunctionPassManager_minimal, + lbFunctionPassManager_size, + lbFunctionPassManager_speed, + + lbFunctionPassManager_COUNT +}; + struct lbModule { LLVMModuleRef mod; LLVMContextRef ctx; @@ -158,6 +168,8 @@ struct lbModule { LLVMDIBuilderRef debug_builder; LLVMMetadataRef debug_compile_unit; + + RecursiveMutex debug_values_mutex; PtrMap debug_values; Array debug_incomplete_types; @@ -167,6 +179,8 @@ struct lbModule { PtrMap map_cell_info_map; // address of runtime.Map_Info PtrMap map_info_map; // address of runtime.Map_Cell_Info + + LLVMPassManagerRef function_pass_managers[lbFunctionPassManager_COUNT]; }; struct lbGenerator { diff --git a/src/llvm_backend_debug.cpp b/src/llvm_backend_debug.cpp index 55c4370a2..9bf4063d6 100644 --- a/src/llvm_backend_debug.cpp +++ b/src/llvm_backend_debug.cpp @@ -2,7 +2,9 @@ gb_internal LLVMMetadataRef lb_get_llvm_metadata(lbModule *m, void *key) { if (key == nullptr) { return nullptr; } + mutex_lock(&m->debug_values_mutex); auto found = map_get(&m->debug_values, key); + mutex_unlock(&m->debug_values_mutex); if (found) { return *found; } @@ -10,7 +12,9 @@ gb_internal LLVMMetadataRef lb_get_llvm_metadata(lbModule *m, void *key) { } gb_internal void lb_set_llvm_metadata(lbModule *m, void *key, LLVMMetadataRef value) { if (key != nullptr) { + mutex_lock(&m->debug_values_mutex); map_set(&m->debug_values, key, value); + mutex_unlock(&m->debug_values_mutex); } } @@ -491,6 +495,9 @@ gb_internal LLVMMetadataRef lb_get_base_scope_metadata(lbModule *m, Scope *scope } gb_internal LLVMMetadataRef lb_debug_type(lbModule *m, Type *type) { + mutex_lock(&m->debug_values_mutex); + defer (mutex_unlock(&m->debug_values_mutex)); + GB_ASSERT(type != nullptr); LLVMMetadataRef found = lb_get_llvm_metadata(m, type); if (found != nullptr) { From 23d0c52bf466864c768129083d45a0f6ec59090e Mon Sep 17 00:00:00 2001 From: gingerBill Date: Thu, 5 Jan 2023 16:42:02 +0000 Subject: [PATCH 70/78] Refactor llvm backend code into separate procedures to make it simpler to profile --- src/llvm_backend.cpp | 871 +++++++++++++++++++++++-------------------- src/llvm_backend.hpp | 2 +- 2 files changed, 463 insertions(+), 410 deletions(-) diff --git a/src/llvm_backend.cpp b/src/llvm_backend.cpp index 54430246e..6109ca247 100644 --- a/src/llvm_backend.cpp +++ b/src/llvm_backend.cpp @@ -1159,222 +1159,73 @@ gb_internal lbProcedure *lb_create_startup_runtime(lbModule *main_module, lbProc return p; } +gb_internal void lb_create_global_procedures_and_types(lbGenerator *gen, CheckerInfo *info) { + auto *min_dep_set = &info->minimum_dependency_set; -gb_internal lbProcedure *lb_create_main_procedure(lbModule *m, lbProcedure *startup_runtime) { - LLVMPassManagerRef default_function_pass_manager = LLVMCreateFunctionPassManagerForModule(m->mod); - lb_populate_function_pass_manager(m, default_function_pass_manager, false, build_context.optimization_level); - LLVMFinalizeFunctionPassManager(default_function_pass_manager); + for (Entity *e : info->entities) { + String name = e->token.string; + Scope * scope = e->scope; - Type *params = alloc_type_tuple(); - Type *results = alloc_type_tuple(); + if ((scope->flags & ScopeFlag_File) == 0) { + continue; + } - Type *t_ptr_cstring = alloc_type_pointer(t_cstring); - - bool call_cleanup = true; + Scope *package_scope = scope->parent; + GB_ASSERT(package_scope->flags & ScopeFlag_Pkg); - bool has_args = false; - bool is_dll_main = false; - String name = str_lit("main"); - if (build_context.metrics.os == TargetOs_windows && build_context.build_mode == BuildMode_DynamicLibrary) { - is_dll_main = true; - name = str_lit("DllMain"); - slice_init(¶ms->Tuple.variables, permanent_allocator(), 3); - params->Tuple.variables[0] = alloc_entity_param(nullptr, make_token_ident("hinstDLL"), t_rawptr, false, true); - params->Tuple.variables[1] = alloc_entity_param(nullptr, make_token_ident("fdwReason"), t_u32, false, true); - params->Tuple.variables[2] = alloc_entity_param(nullptr, make_token_ident("lpReserved"), t_rawptr, false, true); - call_cleanup = false; - } else if (build_context.metrics.os == TargetOs_windows && (build_context.metrics.arch == TargetArch_i386 || build_context.no_crt)) { - name = str_lit("mainCRTStartup"); - } else if (is_arch_wasm()) { - name = str_lit("_start"); - call_cleanup = false; - } else { - has_args = true; - slice_init(¶ms->Tuple.variables, permanent_allocator(), 2); - params->Tuple.variables[0] = alloc_entity_param(nullptr, make_token_ident("argc"), t_i32, false, true); - params->Tuple.variables[1] = alloc_entity_param(nullptr, make_token_ident("argv"), t_ptr_cstring, false, true); - } + switch (e->kind) { + case Entity_Variable: + // NOTE(bill): Handled above as it requires a specific load order + continue; + case Entity_ProcGroup: + continue; - slice_init(&results->Tuple.variables, permanent_allocator(), 1); - results->Tuple.variables[0] = alloc_entity_param(nullptr, blank_token, t_i32, false, true); - - Type *proc_type = alloc_type_proc(nullptr, - params, params->Tuple.variables.count, - results, results->Tuple.variables.count, false, ProcCC_CDecl); - - - lbProcedure *p = lb_create_dummy_procedure(m, name, proc_type); - p->is_startup = true; - - lb_begin_procedure_body(p); - - if (has_args) { // initialize `runtime.args__` - lbValue argc = {LLVMGetParam(p->value, 0), t_i32}; - lbValue argv = {LLVMGetParam(p->value, 1), t_ptr_cstring}; - LLVMSetValueName2(argc.value, "argc", 4); - LLVMSetValueName2(argv.value, "argv", 4); - argc = lb_emit_conv(p, argc, t_int); - lbAddr args = lb_addr(lb_find_runtime_value(p->module, str_lit("args__"))); - lb_fill_slice(p, args, argv, argc); - } - - lbValue startup_runtime_value = {startup_runtime->value, startup_runtime->type}; - lb_emit_call(p, startup_runtime_value, {}, ProcInlining_none); - - if (build_context.command_kind == Command_test) { - Type *t_Internal_Test = find_type_in_pkg(m->info, str_lit("testing"), str_lit("Internal_Test")); - Type *array_type = alloc_type_array(t_Internal_Test, m->info->testing_procedures.count); - Type *slice_type = alloc_type_slice(t_Internal_Test); - lbAddr all_tests_array_addr = lb_add_global_generated(p->module, array_type, {}); - lbValue all_tests_array = lb_addr_get_ptr(p, all_tests_array_addr); - - LLVMValueRef indices[2] = {}; - indices[0] = LLVMConstInt(lb_type(m, t_i32), 0, false); - - isize testing_proc_index = 0; - for (Entity *testing_proc : m->info->testing_procedures) { - String name = testing_proc->token.string; - - String pkg_name = {}; - if (testing_proc->pkg != nullptr) { - pkg_name = testing_proc->pkg->name; + case Entity_TypeName: + case Entity_Procedure: + break; + case Entity_Constant: + if (build_context.ODIN_DEBUG) { + add_debug_info_for_global_constant_from_entity(gen, e); } - lbValue v_pkg = lb_find_or_add_entity_string(m, pkg_name); - lbValue v_name = lb_find_or_add_entity_string(m, name); - lbValue v_proc = lb_find_procedure_value_from_entity(m, testing_proc); - - indices[1] = LLVMConstInt(lb_type(m, t_int), testing_proc_index++, false); - - LLVMValueRef vals[3] = {}; - vals[0] = v_pkg.value; - vals[1] = v_name.value; - vals[2] = v_proc.value; - GB_ASSERT(LLVMIsConstant(vals[0])); - GB_ASSERT(LLVMIsConstant(vals[1])); - GB_ASSERT(LLVMIsConstant(vals[2])); - - LLVMValueRef dst = LLVMConstInBoundsGEP2(llvm_addr_type(m, all_tests_array), all_tests_array.value, indices, gb_count_of(indices)); - LLVMValueRef src = llvm_const_named_struct(m, t_Internal_Test, vals, gb_count_of(vals)); - - LLVMBuildStore(p->builder, src, dst); + break; } - lbAddr all_tests_slice = lb_add_local_generated(p, slice_type, true); - lb_fill_slice(p, all_tests_slice, - lb_array_elem(p, all_tests_array), - lb_const_int(m, t_int, m->info->testing_procedures.count)); - - - lbValue runner = lb_find_package_value(m, str_lit("testing"), str_lit("runner")); - - auto args = array_make(heap_allocator(), 1); - args[0] = lb_addr_load(p, all_tests_slice); - lb_emit_call(p, runner, args); - } else { - if (m->info->entry_point != nullptr) { - lbValue entry_point = lb_find_procedure_value_from_entity(m, m->info->entry_point); - lb_emit_call(p, entry_point, {}, ProcInlining_no_inline); - } - } - - - if (call_cleanup) { - lbValue cleanup_runtime_value = lb_find_runtime_value(m, str_lit("_cleanup_runtime")); - lb_emit_call(p, cleanup_runtime_value, {}, ProcInlining_none); - } - - - if (is_dll_main) { - LLVMBuildRet(p->builder, LLVMConstInt(lb_type(m, t_i32), 1, false)); - } else { - LLVMBuildRet(p->builder, LLVMConstInt(lb_type(m, t_i32), 0, false)); - } - - lb_end_procedure_body(p); - - - LLVMSetLinkage(p->value, LLVMExternalLinkage); - if (is_arch_wasm()) { - lb_set_wasm_export_attributes(p->value, p->name); - } - - - if (!m->debug_builder && LLVMVerifyFunction(p->value, LLVMReturnStatusAction)) { - gb_printf_err("LLVM CODE GEN FAILED FOR PROCEDURE: %s\n", "main"); - LLVMDumpValue(p->value); - gb_printf_err("\n\n\n\n"); - LLVMVerifyFunction(p->value, LLVMAbortProcessAction); - } - - lb_run_function_pass_manager(default_function_pass_manager, p); - return p; -} - -gb_internal String lb_filepath_ll_for_module(lbModule *m) { - String path = concatenate3_strings(permanent_allocator(), - build_context.build_paths[BuildPath_Output].basename, - STR_LIT("/"), - build_context.build_paths[BuildPath_Output].name - ); - - if (m->pkg) { - path = concatenate3_strings(permanent_allocator(), path, STR_LIT("-"), m->pkg->name); - } else if (USE_SEPARATE_MODULES) { - path = concatenate_strings(permanent_allocator(), path, STR_LIT("-builtin")); - } - path = concatenate_strings(permanent_allocator(), path, STR_LIT(".ll")); - - return path; -} -gb_internal String lb_filepath_obj_for_module(lbModule *m) { - String path = concatenate3_strings(permanent_allocator(), - build_context.build_paths[BuildPath_Output].basename, - STR_LIT("/"), - build_context.build_paths[BuildPath_Output].name - ); - - if (m->pkg) { - path = concatenate3_strings(permanent_allocator(), path, STR_LIT("-"), m->pkg->name); - } - - String ext = {}; - - if (build_context.build_mode == BuildMode_Assembly) { - ext = STR_LIT(".S"); - } else { - if (is_arch_wasm()) { - ext = STR_LIT(".wasm.o"); - } else { - switch (build_context.metrics.os) { - case TargetOs_windows: - ext = STR_LIT(".obj"); - break; - default: - case TargetOs_darwin: - case TargetOs_linux: - case TargetOs_essence: - ext = STR_LIT(".o"); - break; - - case TargetOs_freestanding: - switch (build_context.metrics.abi) { - default: - case TargetABI_Default: - case TargetABI_SysV: - ext = STR_LIT(".o"); - break; - case TargetABI_Win64: - ext = STR_LIT(".obj"); - break; - } - break; + bool polymorphic_struct = false; + if (e->type != nullptr && e->kind == Entity_TypeName) { + Type *bt = base_type(e->type); + if (bt->kind == Type_Struct) { + polymorphic_struct = is_type_polymorphic(bt); } } - } - return concatenate_strings(permanent_allocator(), path, ext); + if (!polymorphic_struct && !ptr_set_exists(min_dep_set, e)) { + // NOTE(bill): Nothing depends upon it so doesn't need to be built + continue; + } + + lbModule *m = &gen->default_module; + if (USE_SEPARATE_MODULES) { + m = lb_pkg_module(gen, e->pkg); + } + + String mangled_name = lb_get_entity_name(m, e); + + switch (e->kind) { + case Entity_TypeName: + lb_type(m, e->type); + break; + case Entity_Procedure: + { + lbProcedure *p = lb_create_procedure(m, e); + array_add(&m->procedures_to_generate, p); + } + break; + } + } } +gb_internal void lb_generate_procedure(lbModule *m, lbProcedure *p); + gb_internal bool lb_is_module_empty(lbModule *m) { if (LLVMGetFirstFunction(m->mod) == nullptr && @@ -1524,6 +1375,392 @@ gb_internal WORKER_TASK_PROC(lb_llvm_module_pass_worker_proc) { } + +gb_internal WORKER_TASK_PROC(lb_generate_procedures_worker_proc) { + lbModule *m = cast(lbModule *)data; + for (isize i = 0; i < m->procedures_to_generate.count; i++) { + lbProcedure *p = m->procedures_to_generate[i]; + lb_generate_procedure(p->module, p); + } + return 0; +} + +gb_internal void lb_generate_procedures(lbGenerator *gen, bool do_threading) { + for (auto const &entry : gen->modules) { + lbModule *m = entry.value; + if (do_threading) { + thread_pool_add_task(lb_generate_procedures_worker_proc, m); + } else { + lb_generate_procedures_worker_proc(m); + } + } + + thread_pool_wait(); +} + +gb_internal WORKER_TASK_PROC(lb_generate_missing_procedures_to_check_worker_proc) { + lbModule *m = cast(lbModule *)data; + for (isize i = 0; i < m->missing_procedures_to_check.count; i++) { + lbProcedure *p = m->missing_procedures_to_check[i]; + debugf("Generate missing procedure: %.*s\n", LIT(p->name)); + lb_generate_procedure(m, p); + } + return 0; +} + +gb_internal void lb_generate_missing_procedures(lbGenerator *gen, bool do_threading) { + for (auto const &entry : gen->modules) { + lbModule *m = entry.value; + // NOTE(bill): procedures may be added during generation + if (do_threading) { + thread_pool_add_task(lb_generate_missing_procedures_to_check_worker_proc, m); + } else { + lb_generate_missing_procedures_to_check_worker_proc(m); + } + } + + thread_pool_wait(); +} + +gb_internal void lb_debug_info_complete_types_and_finalize(lbGenerator *gen) { + for (auto const &entry : gen->modules) { + lbModule *m = entry.value; + if (m->debug_builder != nullptr) { + lb_debug_complete_types(m); + LLVMDIBuilderFinalize(m->debug_builder); + } + } +} + +gb_internal void lb_llvm_function_passes(lbGenerator *gen) { + for (auto const &entry : gen->modules) { + lbModule *m = entry.value; + lb_llvm_function_pass_per_module(m); + } + thread_pool_wait(); +} + + +gb_internal void lb_llvm_module_passes(lbGenerator *gen, bool do_threading) { + for (auto const &entry : gen->modules) { + lbModule *m = entry.value; + auto wd = gb_alloc_item(permanent_allocator(), lbLLVMModulePassWorkerData); + wd->m = m; + wd->target_machine = m->target_machine; + + if (do_threading) { + thread_pool_add_task(lb_llvm_module_pass_worker_proc, wd); + } else { + lb_llvm_module_pass_worker_proc(wd); + } + } + thread_pool_wait(); +} + + +gb_internal String lb_filepath_ll_for_module(lbModule *m) { + String path = concatenate3_strings(permanent_allocator(), + build_context.build_paths[BuildPath_Output].basename, + STR_LIT("/"), + build_context.build_paths[BuildPath_Output].name + ); + + if (m->pkg) { + path = concatenate3_strings(permanent_allocator(), path, STR_LIT("-"), m->pkg->name); + } else if (USE_SEPARATE_MODULES) { + path = concatenate_strings(permanent_allocator(), path, STR_LIT("-builtin")); + } + path = concatenate_strings(permanent_allocator(), path, STR_LIT(".ll")); + + return path; +} +gb_internal String lb_filepath_obj_for_module(lbModule *m) { + String path = concatenate3_strings(permanent_allocator(), + build_context.build_paths[BuildPath_Output].basename, + STR_LIT("/"), + build_context.build_paths[BuildPath_Output].name + ); + + if (m->pkg) { + path = concatenate3_strings(permanent_allocator(), path, STR_LIT("-"), m->pkg->name); + } + + String ext = {}; + + if (build_context.build_mode == BuildMode_Assembly) { + ext = STR_LIT(".S"); + } else { + if (is_arch_wasm()) { + ext = STR_LIT(".wasm.o"); + } else { + switch (build_context.metrics.os) { + case TargetOs_windows: + ext = STR_LIT(".obj"); + break; + default: + case TargetOs_darwin: + case TargetOs_linux: + case TargetOs_essence: + ext = STR_LIT(".o"); + break; + + case TargetOs_freestanding: + switch (build_context.metrics.abi) { + default: + case TargetABI_Default: + case TargetABI_SysV: + ext = STR_LIT(".o"); + break; + case TargetABI_Win64: + ext = STR_LIT(".obj"); + break; + } + break; + } + } + } + + return concatenate_strings(permanent_allocator(), path, ext); +} + +gb_internal bool lb_llvm_module_verification(lbGenerator *gen, char **llvm_error_ptr) { + for (auto const &entry : gen->modules) { + lbModule *m = entry.value; + if (LLVMVerifyModule(m->mod, LLVMReturnStatusAction, llvm_error_ptr)) { + gb_printf_err("LLVM Error:\n%s\n", *llvm_error_ptr); + if (build_context.keep_temp_files) { + TIME_SECTION("LLVM Print Module to File"); + String filepath_ll = lb_filepath_ll_for_module(m); + if (LLVMPrintModuleToFile(m->mod, cast(char const *)filepath_ll.text, llvm_error_ptr)) { + gb_printf_err("LLVM Error: %s\n", *llvm_error_ptr); + gb_exit(1); + return false; + } + } + gb_exit(1); + return false; + } + } + *llvm_error_ptr = nullptr; + return true; +} + +gb_internal void lb_add_foreign_library_paths(lbGenerator *gen) { + for (auto const &entry : gen->modules) { + lbModule *m = entry.value; + for (Entity *e : m->info->required_foreign_imports_through_force) { + lb_add_foreign_library_path(m, e); + } + + if (lb_is_module_empty(m)) { + continue; + } + } +} + +gb_internal bool lb_llvm_object_generation(lbGenerator *gen, LLVMCodeGenFileType code_gen_file_type, bool do_threading) { + char *llvm_error = nullptr; + defer (LLVMDisposeMessage(llvm_error)); + + if (do_threading) { + for (auto const &entry : gen->modules) { + lbModule *m = entry.value; + if (lb_is_module_empty(m)) { + continue; + } + + String filepath_ll = lb_filepath_ll_for_module(m); + String filepath_obj = lb_filepath_obj_for_module(m); + array_add(&gen->output_object_paths, filepath_obj); + array_add(&gen->output_temp_paths, filepath_ll); + + auto *wd = gb_alloc_item(permanent_allocator(), lbLLVMEmitWorker); + wd->target_machine = m->target_machine; + wd->code_gen_file_type = code_gen_file_type; + wd->filepath_obj = filepath_obj; + wd->m = m; + thread_pool_add_task(lb_llvm_emit_worker_proc, wd); + } + + thread_pool_wait(&global_thread_pool); + } else { + for (auto const &entry : gen->modules) { + lbModule *m = entry.value; + if (lb_is_module_empty(m)) { + continue; + } + + String filepath_obj = lb_filepath_obj_for_module(m); + array_add(&gen->output_object_paths, filepath_obj); + + String short_name = remove_directory_from_path(filepath_obj); + gbString section_name = gb_string_make(heap_allocator(), "LLVM Generate Object: "); + section_name = gb_string_append_length(section_name, short_name.text, short_name.len); + + TIME_SECTION_WITH_LEN(section_name, gb_string_length(section_name)); + + if (LLVMTargetMachineEmitToFile(m->target_machine, m->mod, cast(char *)filepath_obj.text, code_gen_file_type, &llvm_error)) { + gb_printf_err("LLVM Error: %s\n", llvm_error); + gb_exit(1); + return false; + } + } + } + return true; +} + + + +gb_internal lbProcedure *lb_create_main_procedure(lbModule *m, lbProcedure *startup_runtime) { + LLVMPassManagerRef default_function_pass_manager = LLVMCreateFunctionPassManagerForModule(m->mod); + lb_populate_function_pass_manager(m, default_function_pass_manager, false, build_context.optimization_level); + LLVMFinalizeFunctionPassManager(default_function_pass_manager); + + Type *params = alloc_type_tuple(); + Type *results = alloc_type_tuple(); + + Type *t_ptr_cstring = alloc_type_pointer(t_cstring); + + bool call_cleanup = true; + + bool has_args = false; + bool is_dll_main = false; + String name = str_lit("main"); + if (build_context.metrics.os == TargetOs_windows && build_context.build_mode == BuildMode_DynamicLibrary) { + is_dll_main = true; + name = str_lit("DllMain"); + slice_init(¶ms->Tuple.variables, permanent_allocator(), 3); + params->Tuple.variables[0] = alloc_entity_param(nullptr, make_token_ident("hinstDLL"), t_rawptr, false, true); + params->Tuple.variables[1] = alloc_entity_param(nullptr, make_token_ident("fdwReason"), t_u32, false, true); + params->Tuple.variables[2] = alloc_entity_param(nullptr, make_token_ident("lpReserved"), t_rawptr, false, true); + call_cleanup = false; + } else if (build_context.metrics.os == TargetOs_windows && (build_context.metrics.arch == TargetArch_i386 || build_context.no_crt)) { + name = str_lit("mainCRTStartup"); + } else if (is_arch_wasm()) { + name = str_lit("_start"); + call_cleanup = false; + } else { + has_args = true; + slice_init(¶ms->Tuple.variables, permanent_allocator(), 2); + params->Tuple.variables[0] = alloc_entity_param(nullptr, make_token_ident("argc"), t_i32, false, true); + params->Tuple.variables[1] = alloc_entity_param(nullptr, make_token_ident("argv"), t_ptr_cstring, false, true); + } + + slice_init(&results->Tuple.variables, permanent_allocator(), 1); + results->Tuple.variables[0] = alloc_entity_param(nullptr, blank_token, t_i32, false, true); + + Type *proc_type = alloc_type_proc(nullptr, + params, params->Tuple.variables.count, + results, results->Tuple.variables.count, false, ProcCC_CDecl); + + + lbProcedure *p = lb_create_dummy_procedure(m, name, proc_type); + p->is_startup = true; + + lb_begin_procedure_body(p); + + if (has_args) { // initialize `runtime.args__` + lbValue argc = {LLVMGetParam(p->value, 0), t_i32}; + lbValue argv = {LLVMGetParam(p->value, 1), t_ptr_cstring}; + LLVMSetValueName2(argc.value, "argc", 4); + LLVMSetValueName2(argv.value, "argv", 4); + argc = lb_emit_conv(p, argc, t_int); + lbAddr args = lb_addr(lb_find_runtime_value(p->module, str_lit("args__"))); + lb_fill_slice(p, args, argv, argc); + } + + lbValue startup_runtime_value = {startup_runtime->value, startup_runtime->type}; + lb_emit_call(p, startup_runtime_value, {}, ProcInlining_none); + + if (build_context.command_kind == Command_test) { + Type *t_Internal_Test = find_type_in_pkg(m->info, str_lit("testing"), str_lit("Internal_Test")); + Type *array_type = alloc_type_array(t_Internal_Test, m->info->testing_procedures.count); + Type *slice_type = alloc_type_slice(t_Internal_Test); + lbAddr all_tests_array_addr = lb_add_global_generated(p->module, array_type, {}); + lbValue all_tests_array = lb_addr_get_ptr(p, all_tests_array_addr); + + LLVMValueRef indices[2] = {}; + indices[0] = LLVMConstInt(lb_type(m, t_i32), 0, false); + + isize testing_proc_index = 0; + for (Entity *testing_proc : m->info->testing_procedures) { + String name = testing_proc->token.string; + + String pkg_name = {}; + if (testing_proc->pkg != nullptr) { + pkg_name = testing_proc->pkg->name; + } + lbValue v_pkg = lb_find_or_add_entity_string(m, pkg_name); + lbValue v_name = lb_find_or_add_entity_string(m, name); + lbValue v_proc = lb_find_procedure_value_from_entity(m, testing_proc); + + indices[1] = LLVMConstInt(lb_type(m, t_int), testing_proc_index++, false); + + LLVMValueRef vals[3] = {}; + vals[0] = v_pkg.value; + vals[1] = v_name.value; + vals[2] = v_proc.value; + GB_ASSERT(LLVMIsConstant(vals[0])); + GB_ASSERT(LLVMIsConstant(vals[1])); + GB_ASSERT(LLVMIsConstant(vals[2])); + + LLVMValueRef dst = LLVMConstInBoundsGEP2(llvm_addr_type(m, all_tests_array), all_tests_array.value, indices, gb_count_of(indices)); + LLVMValueRef src = llvm_const_named_struct(m, t_Internal_Test, vals, gb_count_of(vals)); + + LLVMBuildStore(p->builder, src, dst); + } + + lbAddr all_tests_slice = lb_add_local_generated(p, slice_type, true); + lb_fill_slice(p, all_tests_slice, + lb_array_elem(p, all_tests_array), + lb_const_int(m, t_int, m->info->testing_procedures.count)); + + + lbValue runner = lb_find_package_value(m, str_lit("testing"), str_lit("runner")); + + auto args = array_make(heap_allocator(), 1); + args[0] = lb_addr_load(p, all_tests_slice); + lb_emit_call(p, runner, args); + } else { + if (m->info->entry_point != nullptr) { + lbValue entry_point = lb_find_procedure_value_from_entity(m, m->info->entry_point); + lb_emit_call(p, entry_point, {}, ProcInlining_no_inline); + } + } + + + if (call_cleanup) { + lbValue cleanup_runtime_value = lb_find_runtime_value(m, str_lit("_cleanup_runtime")); + lb_emit_call(p, cleanup_runtime_value, {}, ProcInlining_none); + } + + + if (is_dll_main) { + LLVMBuildRet(p->builder, LLVMConstInt(lb_type(m, t_i32), 1, false)); + } else { + LLVMBuildRet(p->builder, LLVMConstInt(lb_type(m, t_i32), 0, false)); + } + + lb_end_procedure_body(p); + + + LLVMSetLinkage(p->value, LLVMExternalLinkage); + if (is_arch_wasm()) { + lb_set_wasm_export_attributes(p->value, p->name); + } + + + if (!m->debug_builder && LLVMVerifyFunction(p->value, LLVMReturnStatusAction)) { + gb_printf_err("LLVM CODE GEN FAILED FOR PROCEDURE: %s\n", "main"); + LLVMDumpValue(p->value); + gb_printf_err("\n\n\n\n"); + LLVMVerifyFunction(p->value, LLVMAbortProcessAction); + } + + lb_run_function_pass_manager(default_function_pass_manager, p); + return p; +} + gb_internal void lb_generate_procedure(lbModule *m, lbProcedure *p) { if (p->is_done) { return; @@ -1564,25 +1801,6 @@ gb_internal void lb_generate_procedure(lbModule *m, lbProcedure *p) { } -gb_internal WORKER_TASK_PROC(lb_generate_procedures_worker_proc) { - lbModule *m = cast(lbModule *)data; - for (isize i = 0; i < m->procedures_to_generate.count; i++) { - lbProcedure *p = m->procedures_to_generate[i]; - lb_generate_procedure(p->module, p); - } - return 0; -} - -gb_internal WORKER_TASK_PROC(lb_generate_missing_procedures_to_check_worker_proc) { - lbModule *m = cast(lbModule *)data; - for (isize i = 0; i < m->missing_procedures_to_check.count; i++) { - lbProcedure *p = m->missing_procedures_to_check[i]; - debugf("Generate missing procedure: %.*s\n", LIT(p->name)); - lb_generate_procedure(m, p); - } - return 0; -} - gb_internal bool lb_generate_code(lbGenerator *gen) { TIME_SECTION("LLVM Initializtion"); @@ -2064,82 +2282,14 @@ gb_internal bool lb_generate_code(lbGenerator *gen) { } TIME_SECTION("LLVM Global Procedures and Types"); - for (Entity *e : info->entities) { - String name = e->token.string; - Scope * scope = e->scope; - - if ((scope->flags & ScopeFlag_File) == 0) { - continue; - } - - Scope *package_scope = scope->parent; - GB_ASSERT(package_scope->flags & ScopeFlag_Pkg); - - switch (e->kind) { - case Entity_Variable: - // NOTE(bill): Handled above as it requires a specific load order - continue; - case Entity_ProcGroup: - continue; - - case Entity_TypeName: - case Entity_Procedure: - break; - case Entity_Constant: - if (build_context.ODIN_DEBUG) { - add_debug_info_for_global_constant_from_entity(gen, e); - } - break; - } - - bool polymorphic_struct = false; - if (e->type != nullptr && e->kind == Entity_TypeName) { - Type *bt = base_type(e->type); - if (bt->kind == Type_Struct) { - polymorphic_struct = is_type_polymorphic(bt); - } - } - - if (!polymorphic_struct && !ptr_set_exists(min_dep_set, e)) { - // NOTE(bill): Nothing depends upon it so doesn't need to be built - continue; - } - - lbModule *m = &gen->default_module; - if (USE_SEPARATE_MODULES) { - m = lb_pkg_module(gen, e->pkg); - } - - String mangled_name = lb_get_entity_name(m, e); - - switch (e->kind) { - case Entity_TypeName: - lb_type(m, e->type); - break; - case Entity_Procedure: - { - lbProcedure *p = lb_create_procedure(m, e); - array_add(&m->procedures_to_generate, p); - } - break; - } - } + lb_create_global_procedures_and_types(gen, info); if (gen->modules.entries.count <= 1) { do_threading = false; } TIME_SECTION("LLVM Procedure Generation"); - for (auto const &entry : gen->modules) { - lbModule *m = entry.value; - if (do_threading) { - thread_pool_add_task(lb_generate_procedures_worker_proc, m); - } else { - lb_generate_procedures_worker_proc(m); - } - } - - thread_pool_wait(); + lb_generate_procedures(gen, do_threading); if (build_context.command_kind == Command_test && !already_has_entry_point) { TIME_SECTION("LLVM main"); @@ -2147,17 +2297,7 @@ gb_internal bool lb_generate_code(lbGenerator *gen) { } TIME_SECTION("LLVM Procedure Generation (missing)"); - for (auto const &entry : gen->modules) { - lbModule *m = entry.value; - // NOTE(bill): procedures may be added during generation - if (do_threading) { - thread_pool_add_task(lb_generate_missing_procedures_to_check_worker_proc, m); - } else { - lb_generate_missing_procedures_to_check_worker_proc(m); - } - } - - thread_pool_wait(); + lb_generate_missing_procedures(gen, do_threading); if (gen->objc_names) { TIME_SECTION("Finalize objc names"); @@ -2166,48 +2306,28 @@ gb_internal bool lb_generate_code(lbGenerator *gen) { if (build_context.ODIN_DEBUG) { TIME_SECTION("LLVM Debug Info Complete Types and Finalize"); - for (auto const &entry : gen->modules) { - lbModule *m = entry.value; - if (m->debug_builder != nullptr) { - lb_debug_complete_types(m); - LLVMDIBuilderFinalize(m->debug_builder); - } - } + lb_debug_info_complete_types_and_finalize(gen); } - isize non_empty_module_count = 0; - for (auto const &entry : gen->modules) { - lbModule *m = entry.value; - if (!lb_is_module_empty(m)) { - non_empty_module_count += 1; + if (do_threading) { + isize non_empty_module_count = 0; + for (auto const &entry : gen->modules) { + lbModule *m = entry.value; + if (!lb_is_module_empty(m)) { + non_empty_module_count += 1; + } + } + if (non_empty_module_count <= 1) { + do_threading = false; } - } - if (non_empty_module_count <= 1) { - do_threading = false; } TIME_SECTION("LLVM Function Pass"); - for (auto const &entry : gen->modules) { - lbModule *m = entry.value; - lb_llvm_function_pass_per_module(m); - } - thread_pool_wait(); + lb_llvm_function_passes(gen); TIME_SECTION("LLVM Module Pass"); - for (auto const &entry : gen->modules) { - lbModule *m = entry.value; - auto wd = gb_alloc_item(permanent_allocator(), lbLLVMModulePassWorkerData); - wd->m = m; - wd->target_machine = m->target_machine; - - if (do_threading) { - thread_pool_add_task(lb_llvm_module_pass_worker_proc, wd); - } else { - lb_llvm_module_pass_worker_proc(wd); - } - } - thread_pool_wait(); + lb_llvm_module_passes(gen, do_threading); TIME_SECTION("LLVM Module Verification"); @@ -2220,25 +2340,10 @@ gb_internal bool lb_generate_code(lbGenerator *gen) { code_gen_file_type = LLVMAssemblyFile; } - - for (auto const &entry : gen->modules) { - lbModule *m = entry.value; - if (LLVMVerifyModule(m->mod, LLVMReturnStatusAction, &llvm_error)) { - gb_printf_err("LLVM Error:\n%s\n", llvm_error); - if (build_context.keep_temp_files) { - TIME_SECTION("LLVM Print Module to File"); - String filepath_ll = lb_filepath_ll_for_module(m); - if (LLVMPrintModuleToFile(m->mod, cast(char const *)filepath_ll.text, &llvm_error)) { - gb_printf_err("LLVM Error: %s\n", llvm_error); - gb_exit(1); - return false; - } - } - gb_exit(1); - return false; - } + if (!lb_llvm_module_verification(gen, &llvm_error)) { + return false; } - llvm_error = nullptr; + if (build_context.keep_temp_files || build_context.build_mode == BuildMode_LLVM_IR) { TIME_SECTION("LLVM Print Module to File"); @@ -2264,17 +2369,7 @@ gb_internal bool lb_generate_code(lbGenerator *gen) { } TIME_SECTION("LLVM Add Foreign Library Paths"); - - for (auto const &entry : gen->modules) { - lbModule *m = entry.value; - for (Entity *e : m->info->required_foreign_imports_through_force) { - lb_add_foreign_library_path(m, e); - } - - if (lb_is_module_empty(m)) { - continue; - } - } + lb_add_foreign_library_paths(gen); TIME_SECTION("LLVM Object Generation"); @@ -2282,50 +2377,8 @@ gb_internal bool lb_generate_code(lbGenerator *gen) { gb_printf_err("LLVM object generation has been ignored!\n"); return false; } - - if (do_threading) { - for (auto const &entry : gen->modules) { - lbModule *m = entry.value; - if (lb_is_module_empty(m)) { - continue; - } - - String filepath_ll = lb_filepath_ll_for_module(m); - String filepath_obj = lb_filepath_obj_for_module(m); - array_add(&gen->output_object_paths, filepath_obj); - array_add(&gen->output_temp_paths, filepath_ll); - - auto *wd = gb_alloc_item(permanent_allocator(), lbLLVMEmitWorker); - wd->target_machine = m->target_machine; - wd->code_gen_file_type = code_gen_file_type; - wd->filepath_obj = filepath_obj; - wd->m = m; - thread_pool_add_task(lb_llvm_emit_worker_proc, wd); - } - - thread_pool_wait(&global_thread_pool); - } else { - for (auto const &entry : gen->modules) { - lbModule *m = entry.value; - if (lb_is_module_empty(m)) { - continue; - } - - String filepath_obj = lb_filepath_obj_for_module(m); - array_add(&gen->output_object_paths, filepath_obj); - - String short_name = remove_directory_from_path(filepath_obj); - gbString section_name = gb_string_make(heap_allocator(), "LLVM Generate Object: "); - section_name = gb_string_append_length(section_name, short_name.text, short_name.len); - - TIME_SECTION_WITH_LEN(section_name, gb_string_length(section_name)); - - if (LLVMTargetMachineEmitToFile(m->target_machine, m->mod, cast(char *)filepath_obj.text, code_gen_file_type, &llvm_error)) { - gb_printf_err("LLVM Error: %s\n", llvm_error); - gb_exit(1); - return false; - } - } + if (!lb_llvm_object_generation(gen, code_gen_file_type, do_threading)) { + return false; } gb_sort_array(gen->foreign_libraries.data, gen->foreign_libraries.count, foreign_library_cmp); diff --git a/src/llvm_backend.hpp b/src/llvm_backend.hpp index 0e3a38fd3..98b657256 100644 --- a/src/llvm_backend.hpp +++ b/src/llvm_backend.hpp @@ -37,7 +37,7 @@ #endif #if LLVM_VERSION_MAJOR > 12 || (LLVM_VERSION_MAJOR == 12 && LLVM_VERSION_MINOR >= 0 && LLVM_VERSION_PATCH > 0) -#define ODIN_LLVM_MINIMUM_VERSION_12 1 +#define ODIN_LLVM_MINIMUM_VERSION_12 0 #else #define ODIN_LLVM_MINIMUM_VERSION_12 0 #endif From 8ef406324bd500cfd9f3d857e3a5d51adce33374 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Thu, 5 Jan 2023 17:26:51 +0000 Subject: [PATCH 71/78] Multi thread more of the backend where possible --- src/llvm_backend.cpp | 126 +++++++++++++++++++++++------------ src/llvm_backend.hpp | 1 + src/llvm_backend_general.cpp | 1 + 3 files changed, 85 insertions(+), 43 deletions(-) diff --git a/src/llvm_backend.cpp b/src/llvm_backend.cpp index 6109ca247..e959b4741 100644 --- a/src/llvm_backend.cpp +++ b/src/llvm_backend.cpp @@ -1159,7 +1159,24 @@ gb_internal lbProcedure *lb_create_startup_runtime(lbModule *main_module, lbProc return p; } -gb_internal void lb_create_global_procedures_and_types(lbGenerator *gen, CheckerInfo *info) { +gb_internal WORKER_TASK_PROC(lb_generate_procedures_and_types_per_module) { + lbModule *m = cast(lbModule *)data; + for (Entity *e : m->global_procedures_and_types_to_create) { + String mangled_name = lb_get_entity_name(m, e); + + switch (e->kind) { + case Entity_TypeName: + lb_type(m, e->type); + break; + case Entity_Procedure: + array_add(&m->procedures_to_generate, lb_create_procedure(m, e)); + break; + } + } + return 0; +} + +gb_internal void lb_create_global_procedures_and_types(lbGenerator *gen, CheckerInfo *info, bool do_threading) { auto *min_dep_set = &info->minimum_dependency_set; for (Entity *e : info->entities) { @@ -1208,20 +1225,19 @@ gb_internal void lb_create_global_procedures_and_types(lbGenerator *gen, Checker m = lb_pkg_module(gen, e->pkg); } - String mangled_name = lb_get_entity_name(m, e); + array_add(&m->global_procedures_and_types_to_create, e); + } - switch (e->kind) { - case Entity_TypeName: - lb_type(m, e->type); - break; - case Entity_Procedure: - { - lbProcedure *p = lb_create_procedure(m, e); - array_add(&m->procedures_to_generate, p); - } - break; + for (auto const &entry : gen->modules) { + lbModule *m = entry.value; + if (do_threading) { + thread_pool_add_task(lb_generate_procedures_and_types_per_module, m); + } else { + lb_generate_procedures_and_types_per_module(m); } } + + thread_pool_wait(); } gb_internal void lb_generate_procedure(lbModule *m, lbProcedure *p); @@ -1277,7 +1293,8 @@ gb_internal void lb_llvm_function_pass_per_function_internal(lbModule *module, l lb_run_function_pass_manager(pass_manager, p); } -gb_internal void lb_llvm_function_pass_per_module(lbModule *m) { +gb_internal WORKER_TASK_PROC(lb_llvm_function_pass_per_module) { + lbModule *m = cast(lbModule *)data; { GB_ASSERT(m->function_pass_managers[lbFunctionPassManager_default] == nullptr); @@ -1354,6 +1371,8 @@ gb_internal void lb_llvm_function_pass_per_module(lbModule *m) { lbProcedure *p = entry.value; lb_llvm_function_pass_per_function_internal(m, p); } + + return 0; } @@ -1432,10 +1451,14 @@ gb_internal void lb_debug_info_complete_types_and_finalize(lbGenerator *gen) { } } -gb_internal void lb_llvm_function_passes(lbGenerator *gen) { +gb_internal void lb_llvm_function_passes(lbGenerator *gen, bool do_threading) { for (auto const &entry : gen->modules) { lbModule *m = entry.value; - lb_llvm_function_pass_per_module(m); + if (do_threading) { + thread_pool_add_task(lb_llvm_function_pass_per_module, m); + } else { + lb_llvm_function_pass_per_module(m); + } } thread_pool_wait(); } @@ -1523,25 +1546,41 @@ gb_internal String lb_filepath_obj_for_module(lbModule *m) { return concatenate_strings(permanent_allocator(), path, ext); } -gb_internal bool lb_llvm_module_verification(lbGenerator *gen, char **llvm_error_ptr) { +gb_internal WORKER_TASK_PROC(lb_llvm_module_verification_worker_proc) { + char *llvm_error = nullptr; + defer (LLVMDisposeMessage(llvm_error)); + lbModule *m = cast(lbModule *)data; + if (LLVMVerifyModule(m->mod, LLVMReturnStatusAction, &llvm_error)) { + gb_printf_err("LLVM Error:\n%s\n", llvm_error); + if (build_context.keep_temp_files) { + TIME_SECTION("LLVM Print Module to File"); + String filepath_ll = lb_filepath_ll_for_module(m); + if (LLVMPrintModuleToFile(m->mod, cast(char const *)filepath_ll.text, &llvm_error)) { + gb_printf_err("LLVM Error: %s\n", llvm_error); + gb_exit(1); + return false; + } + } + gb_exit(1); + return 1; + } + return 0; +} + + +gb_internal bool lb_llvm_module_verification(lbGenerator *gen, bool do_threading) { for (auto const &entry : gen->modules) { lbModule *m = entry.value; - if (LLVMVerifyModule(m->mod, LLVMReturnStatusAction, llvm_error_ptr)) { - gb_printf_err("LLVM Error:\n%s\n", *llvm_error_ptr); - if (build_context.keep_temp_files) { - TIME_SECTION("LLVM Print Module to File"); - String filepath_ll = lb_filepath_ll_for_module(m); - if (LLVMPrintModuleToFile(m->mod, cast(char const *)filepath_ll.text, llvm_error_ptr)) { - gb_printf_err("LLVM Error: %s\n", *llvm_error_ptr); - gb_exit(1); - return false; - } + if (do_threading) { + thread_pool_add_task(lb_llvm_module_verification_worker_proc, m); + } else { + if (lb_llvm_module_verification_worker_proc(m)) { + return false; } - gb_exit(1); - return false; } } - *llvm_error_ptr = nullptr; + thread_pool_wait(); + return true; } @@ -1558,7 +1597,12 @@ gb_internal void lb_add_foreign_library_paths(lbGenerator *gen) { } } -gb_internal bool lb_llvm_object_generation(lbGenerator *gen, LLVMCodeGenFileType code_gen_file_type, bool do_threading) { +gb_internal bool lb_llvm_object_generation(lbGenerator *gen, bool do_threading) { + LLVMCodeGenFileType code_gen_file_type = LLVMObjectFile; + if (build_context.build_mode == BuildMode_Assembly) { + code_gen_file_type = LLVMAssemblyFile; + } + char *llvm_error = nullptr; defer (LLVMDisposeMessage(llvm_error)); @@ -2281,13 +2325,13 @@ gb_internal bool lb_generate_code(lbGenerator *gen) { } } - TIME_SECTION("LLVM Global Procedures and Types"); - lb_create_global_procedures_and_types(gen, info); - if (gen->modules.entries.count <= 1) { do_threading = false; } + TIME_SECTION("LLVM Global Procedures and Types"); + lb_create_global_procedures_and_types(gen, info, do_threading); + TIME_SECTION("LLVM Procedure Generation"); lb_generate_procedures(gen, do_threading); @@ -2324,7 +2368,7 @@ gb_internal bool lb_generate_code(lbGenerator *gen) { TIME_SECTION("LLVM Function Pass"); - lb_llvm_function_passes(gen); + lb_llvm_function_passes(gen, do_threading); TIME_SECTION("LLVM Module Pass"); lb_llvm_module_passes(gen, do_threading); @@ -2332,18 +2376,14 @@ gb_internal bool lb_generate_code(lbGenerator *gen) { TIME_SECTION("LLVM Module Verification"); - llvm_error = nullptr; - defer (LLVMDisposeMessage(llvm_error)); - LLVMCodeGenFileType code_gen_file_type = LLVMObjectFile; - if (build_context.build_mode == BuildMode_Assembly) { - code_gen_file_type = LLVMAssemblyFile; - } - - if (!lb_llvm_module_verification(gen, &llvm_error)) { + if (!lb_llvm_module_verification(gen, do_threading)) { return false; } + llvm_error = nullptr; + defer (LLVMDisposeMessage(llvm_error)); + if (build_context.keep_temp_files || build_context.build_mode == BuildMode_LLVM_IR) { TIME_SECTION("LLVM Print Module to File"); @@ -2377,7 +2417,7 @@ gb_internal bool lb_generate_code(lbGenerator *gen) { gb_printf_err("LLVM object generation has been ignored!\n"); return false; } - if (!lb_llvm_object_generation(gen, code_gen_file_type, do_threading)) { + if (!lb_llvm_object_generation(gen, do_threading)) { return false; } diff --git a/src/llvm_backend.hpp b/src/llvm_backend.hpp index 98b657256..90dfbc311 100644 --- a/src/llvm_backend.hpp +++ b/src/llvm_backend.hpp @@ -163,6 +163,7 @@ struct lbModule { u32 nested_type_name_guid; Array procedures_to_generate; + Array global_procedures_and_types_to_create; lbProcedure *curr_procedure; diff --git a/src/llvm_backend_general.cpp b/src/llvm_backend_general.cpp index c09648825..dca8c829d 100644 --- a/src/llvm_backend_general.cpp +++ b/src/llvm_backend_general.cpp @@ -70,6 +70,7 @@ gb_internal void lb_init_module(lbModule *m, Checker *c) { map_init(&m->map_get_procs); map_init(&m->map_set_procs); array_init(&m->procedures_to_generate, a, 0, 1024); + array_init(&m->global_procedures_and_types_to_create, a, 0, 1024); array_init(&m->missing_procedures_to_check, a, 0, 16); map_init(&m->debug_values); array_init(&m->debug_incomplete_types, a, 0, 1024); From 3e8c63ad31791805ae6080a3e06e85d9e83e2f5d Mon Sep 17 00:00:00 2001 From: gingerBill Date: Tue, 10 Jan 2023 20:46:00 +0000 Subject: [PATCH 72/78] Add Odin check -threaded-checker test for windows --- .github/workflows/ci.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index dc2691d80..1307952ea 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -123,6 +123,11 @@ jobs: run: | call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat odin check examples/demo -vet + - name: Odin check -threaded-checker + shell: cmd + run: | + call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat + odin check examples/demo -vet -threaded-checker timeout-minutes: 10 - name: Odin run shell: cmd From 9b47a5eddba71989c37935eaafbec4bc1cd125c1 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Wed, 11 Jan 2023 00:49:04 +0000 Subject: [PATCH 73/78] Fix macro issue --- src/llvm_backend.cpp | 2 +- src/llvm_backend.hpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/llvm_backend.cpp b/src/llvm_backend.cpp index e959b4741..fef222817 100644 --- a/src/llvm_backend.cpp +++ b/src/llvm_backend.cpp @@ -1851,7 +1851,7 @@ gb_internal bool lb_generate_code(lbGenerator *gen) { isize thread_count = gb_max(build_context.thread_count, 1); isize worker_count = thread_count-1; - LLVMBool do_threading = (LLVMIsMultithreaded() && USE_SEPARATE_MODULES && MULTITHREAD_OBJECT_GENERATION && worker_count > 0); + bool do_threading = !!(LLVMIsMultithreaded() && USE_SEPARATE_MODULES && MULTITHREAD_OBJECT_GENERATION && worker_count > 0); lbModule *default_module = &gen->default_module; CheckerInfo *info = gen->info; diff --git a/src/llvm_backend.hpp b/src/llvm_backend.hpp index 90dfbc311..de4deffd4 100644 --- a/src/llvm_backend.hpp +++ b/src/llvm_backend.hpp @@ -37,7 +37,7 @@ #endif #if LLVM_VERSION_MAJOR > 12 || (LLVM_VERSION_MAJOR == 12 && LLVM_VERSION_MINOR >= 0 && LLVM_VERSION_PATCH > 0) -#define ODIN_LLVM_MINIMUM_VERSION_12 0 +#define ODIN_LLVM_MINIMUM_VERSION_12 1 #else #define ODIN_LLVM_MINIMUM_VERSION_12 0 #endif From 6ec014e98066beeff6b95cac95bfda6c459a01a1 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Wed, 11 Jan 2023 17:27:06 +0000 Subject: [PATCH 74/78] Make `-threaded-checker` the default not (opt out with `-no-threaded-checker`) --- src/build_settings.cpp | 2 +- src/checker.cpp | 2 +- src/main.cpp | 16 +--------------- 3 files changed, 3 insertions(+), 17 deletions(-) diff --git a/src/build_settings.cpp b/src/build_settings.cpp index 76a73b0e8..609a010de 100644 --- a/src/build_settings.cpp +++ b/src/build_settings.cpp @@ -298,7 +298,7 @@ struct BuildContext { bool linker_map_file; bool use_separate_modules; - bool threaded_checker; + bool no_threaded_checker; bool show_debug_messages; diff --git a/src/checker.cpp b/src/checker.cpp index a2ed73119..4e8d19016 100644 --- a/src/checker.cpp +++ b/src/checker.cpp @@ -5339,7 +5339,7 @@ gb_internal void check_procedure_bodies(Checker *c) { GB_ASSERT(c != nullptr); u32 thread_count = cast(u32)global_thread_pool.threads.count; - if (!build_context.threaded_checker) { + if (build_context.no_threaded_checker) { thread_count = 1; } diff --git a/src/main.cpp b/src/main.cpp index a7e5677e9..4e8dfaf75 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -617,7 +617,6 @@ enum BuildFlagKind { BuildFlag_NoEntryPoint, BuildFlag_UseLLD, BuildFlag_UseSeparateModules, - BuildFlag_ThreadedChecker, BuildFlag_NoThreadedChecker, BuildFlag_ShowDebugMessages, BuildFlag_Vet, @@ -793,7 +792,6 @@ gb_internal bool parse_build_flags(Array args) { add_flag(&build_flags, BuildFlag_NoEntryPoint, str_lit("no-entry-point"), BuildFlagParam_None, Command__does_check &~ Command_test); add_flag(&build_flags, BuildFlag_UseLLD, str_lit("lld"), BuildFlagParam_None, Command__does_build); add_flag(&build_flags, BuildFlag_UseSeparateModules, str_lit("use-separate-modules"), BuildFlagParam_None, Command__does_build); - add_flag(&build_flags, BuildFlag_ThreadedChecker, str_lit("threaded-checker"), BuildFlagParam_None, Command__does_check); add_flag(&build_flags, BuildFlag_NoThreadedChecker, str_lit("no-threaded-checker"), BuildFlagParam_None, Command__does_check); add_flag(&build_flags, BuildFlag_ShowDebugMessages, str_lit("show-debug-messages"), BuildFlagParam_None, Command_all); add_flag(&build_flags, BuildFlag_Vet, str_lit("vet"), BuildFlagParam_None, Command__does_check); @@ -1311,20 +1309,8 @@ gb_internal bool parse_build_flags(Array args) { case BuildFlag_UseSeparateModules: build_context.use_separate_modules = true; break; - case BuildFlag_ThreadedChecker: { - #if defined(DEFAULT_TO_THREADED_CHECKER) - gb_printf_err("-threaded-checker is the default on this platform\n"); - bad_flags = true; - #endif - build_context.threaded_checker = true; - break; - } case BuildFlag_NoThreadedChecker: { - #if !defined(DEFAULT_TO_THREADED_CHECKER) - gb_printf_err("-no-threaded-checker is the default on this platform\n"); - bad_flags = true; - #endif - build_context.threaded_checker = false; + build_context.no_threaded_checker = true; break; } case BuildFlag_ShowDebugMessages: From 3c7e45a46fc68426641047a540d4cb50b0fbd9c8 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Wed, 11 Jan 2023 17:45:18 +0000 Subject: [PATCH 75/78] Remove possible race condition in `type_size_of`/`type_align_of` --- src/types.cpp | 74 +++++++++++++++++++++++++-------------------------- 1 file changed, 36 insertions(+), 38 deletions(-) diff --git a/src/types.cpp b/src/types.cpp index 5ff6d7261..99f393cc5 100644 --- a/src/types.cpp +++ b/src/types.cpp @@ -3357,35 +3357,55 @@ gb_internal i64 type_size_of(Type *t) { if (t == nullptr) { return 0; } - // NOTE(bill): Always calculate the size when it is a Type_Basic - if (t->kind == Type_Named && t->cached_size >= 0) { + i64 size = -1; + if (t->kind == Type_Basic) { + GB_ASSERT_MSG(is_type_typed(t), "%s", type_to_string(t)); + switch (t->Basic.kind) { + case Basic_string: size = 2*build_context.word_size; break; + case Basic_cstring: size = build_context.word_size; break; + case Basic_any: size = 2*build_context.word_size; break; + case Basic_typeid: size = build_context.word_size; break; - } else if (t->kind != Type_Basic && t->cached_size >= 0) { - return t->cached_size; + case Basic_int: case Basic_uint: case Basic_uintptr: case Basic_rawptr: + size = build_context.word_size; + break; + default: + size = t->Basic.size; + break; + } + t->cached_size.store(size); + return size; + } else if (t->kind != Type_Named && t->cached_size >= 0) { + return t->cached_size.load(); + } else { + TypePath path{}; + type_path_init(&path); + { + MUTEX_GUARD(&g_type_mutex); + size = type_size_of_internal(t, &path); + t->cached_size.store(size); + } + type_path_free(&path); + return size; } - TypePath path{}; - type_path_init(&path); - t->cached_size = type_size_of_internal(t, &path); - type_path_free(&path); - return t->cached_size; } gb_internal i64 type_align_of(Type *t) { if (t == nullptr) { return 1; } - // NOTE(bill): Always calculate the size when it is a Type_Basic - if (t->kind == Type_Named && t->cached_align >= 0) { - - } if (t->kind != Type_Basic && t->cached_align > 0) { - return t->cached_align; + if (t->kind != Type_Named && t->cached_align > 0) { + return t->cached_align.load(); } TypePath path{}; type_path_init(&path); - t->cached_align = type_align_of_internal(t, &path); + { + MUTEX_GUARD(&g_type_mutex); + t->cached_align.store(type_align_of_internal(t, &path)); + } type_path_free(&path); - return t->cached_align; + return t->cached_align.load(); } @@ -3417,8 +3437,6 @@ gb_internal i64 type_align_of_internal(Type *t, TypePath *path) { } break; case Type_Array: { - // MUTEX_GUARD(&g_type_mutex); - Type *elem = t->Array.elem; bool pop = type_path_push(path, elem); if (path->failure) { @@ -3430,8 +3448,6 @@ gb_internal i64 type_align_of_internal(Type *t, TypePath *path) { } case Type_EnumeratedArray: { - // MUTEX_GUARD(&g_type_mutex); - Type *elem = t->EnumeratedArray.elem; bool pop = type_path_push(path, elem); if (path->failure) { @@ -3451,8 +3467,6 @@ gb_internal i64 type_align_of_internal(Type *t, TypePath *path) { case Type_Tuple: { - // MUTEX_GUARD(&g_type_mutex); - i64 max = 1; for_array(i, t->Tuple.variables) { i64 align = type_align_of_internal(t->Tuple.variables[i]->type, path); @@ -3476,8 +3490,6 @@ gb_internal i64 type_align_of_internal(Type *t, TypePath *path) { return gb_max(t->Union.custom_align, 1); } - // MUTEX_GUARD(&g_type_mutex); - i64 max = 1; for_array(i, t->Union.variants) { Type *variant = t->Union.variants[i]; @@ -3503,8 +3515,6 @@ gb_internal i64 type_align_of_internal(Type *t, TypePath *path) { return 1; } - // MUTEX_GUARD(&g_type_mutex); - i64 max = 1; for_array(i, t->Struct.fields) { Type *field_type = t->Struct.fields[i]->type; @@ -3616,8 +3626,6 @@ gb_internal i64 type_size_of_internal(Type *t, TypePath *path) { switch (t->kind) { case Type_Named: { - // MUTEX_GUARD(&g_type_mutex); - bool pop = type_path_push(path, t); if (path->failure) { return FAILURE_ALIGNMENT; @@ -3655,8 +3663,6 @@ gb_internal i64 type_size_of_internal(Type *t, TypePath *path) { return build_context.word_size*2; case Type_Array: { - // MUTEX_GUARD(&g_type_mutex); - i64 count, align, size, alignment; count = t->Array.count; if (count == 0) { @@ -3672,8 +3678,6 @@ gb_internal i64 type_size_of_internal(Type *t, TypePath *path) { } break; case Type_EnumeratedArray: { - // MUTEX_GUARD(&g_type_mutex); - i64 count, align, size, alignment; count = t->EnumeratedArray.count; if (count == 0) { @@ -3706,8 +3710,6 @@ gb_internal i64 type_size_of_internal(Type *t, TypePath *path) { return (1 + 1 + 2)*build_context.word_size; case Type_Tuple: { - // MUTEX_GUARD(&g_type_mutex); - i64 count, align, size; count = t->Tuple.variables.count; if (count == 0) { @@ -3726,8 +3728,6 @@ gb_internal i64 type_size_of_internal(Type *t, TypePath *path) { if (t->Union.variants.count == 0) { return 0; } - // MUTEX_GUARD(&g_type_mutex); - i64 align = type_align_of_internal(t, path); if (path->failure) { return FAILURE_SIZE; @@ -3765,8 +3765,6 @@ gb_internal i64 type_size_of_internal(Type *t, TypePath *path) { case Type_Struct: { - // MUTEX_GUARD(&g_type_mutex); - if (t->Struct.is_raw_union) { i64 count = t->Struct.fields.count; i64 align = type_align_of_internal(t, path); From 7124d541a132fc94b2c66c54bd73eb0d103ce3d3 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Wed, 11 Jan 2023 18:10:27 +0000 Subject: [PATCH 76/78] General optimizations --- src/check_type.cpp | 3 ++- src/checker.cpp | 15 ++++++----- src/checker.hpp | 2 +- src/ptr_map.cpp | 36 +++++++++++++++++++++++++ src/types.cpp | 66 ++++++++++++++++++++++++++++++++++++++++------ 5 files changed, 105 insertions(+), 17 deletions(-) diff --git a/src/check_type.cpp b/src/check_type.cpp index fd4e965d4..0863af967 100644 --- a/src/check_type.cpp +++ b/src/check_type.cpp @@ -2405,7 +2405,8 @@ gb_internal Type *make_soa_struct_internal(CheckerContext *ctx, Ast *array_typ_e } soa_struct->Struct.soa_count = cast(i32)count; - scope = create_scope(ctx->info, ctx->scope, 8); + scope = create_scope(ctx->info, ctx->scope); + string_map_init(&scope->elements, 8); soa_struct->Struct.scope = scope; String params_xyzw[4] = { diff --git a/src/checker.cpp b/src/checker.cpp index 4e8d19016..473af7128 100644 --- a/src/checker.cpp +++ b/src/checker.cpp @@ -220,11 +220,9 @@ gb_internal DeclInfo *make_decl_info(Scope *scope, DeclInfo *parent) { -gb_internal Scope *create_scope(CheckerInfo *info, Scope *parent, isize init_elements_capacity=DEFAULT_SCOPE_CAPACITY) { +gb_internal Scope *create_scope(CheckerInfo *info, Scope *parent) { Scope *s = gb_alloc_item(permanent_allocator(), Scope); s->parent = parent; - string_map_init(&s->elements, init_elements_capacity); - ptr_set_init(&s->imported, 0); if (parent != nullptr && parent != builtin_pkg->scope) { Scope *prev_head_child = parent->head_child.exchange(s, std::memory_order_acq_rel); @@ -246,7 +244,8 @@ gb_internal Scope *create_scope_from_file(CheckerInfo *info, AstFile *f) { GB_ASSERT(f->pkg->scope != nullptr); isize init_elements_capacity = gb_max(DEFAULT_SCOPE_CAPACITY, 2*f->total_file_decl_count); - Scope *s = create_scope(info, f->pkg->scope, init_elements_capacity); + Scope *s = create_scope(info, f->pkg->scope); + string_map_init(&s->elements, init_elements_capacity); s->flags |= ScopeFlag_File; @@ -265,7 +264,8 @@ gb_internal Scope *create_scope_from_package(CheckerContext *c, AstPackage *pkg) } isize init_elements_capacity = gb_max(DEFAULT_SCOPE_CAPACITY, 2*total_pkg_decl_count); - Scope *s = create_scope(c->info, builtin_pkg->scope, init_elements_capacity); + Scope *s = create_scope(c->info, builtin_pkg->scope); + string_map_init(&s->elements, init_elements_capacity); s->flags |= ScopeFlag_Pkg; s->pkg = pkg; @@ -1753,7 +1753,8 @@ gb_internal void add_type_info_type_internal(CheckerContext *c, Type *t) { add_type_info_dependency(c->info, c->decl, t); MUTEX_GUARD_BLOCK(&c->info->type_info_mutex) { - auto found = map_get(&c->info->type_info_map, t); + MapFindResult fr; + auto found = map_try_get(&c->info->type_info_map, t, &fr); if (found != nullptr) { // Types have already been added return; @@ -1777,7 +1778,7 @@ gb_internal void add_type_info_type_internal(CheckerContext *c, Type *t) { ti_index = c->info->type_info_types.count; array_add(&c->info->type_info_types, t); } - map_set(&c->checker->info.type_info_map, t, ti_index); + map_set_internal_from_try_get(&c->checker->info.type_info_map, t, ti_index, fr); if (prev) { // NOTE(bill): If a previous one exists already, no need to continue diff --git a/src/checker.hpp b/src/checker.hpp index 8b8819d97..806eb2e51 100644 --- a/src/checker.hpp +++ b/src/checker.hpp @@ -223,7 +223,7 @@ enum ScopeFlag : i32 { ScopeFlag_ContextDefined = 1<<16, }; -enum { DEFAULT_SCOPE_CAPACITY = 29 }; +enum { DEFAULT_SCOPE_CAPACITY = 32 }; struct Scope { Ast * node; diff --git a/src/ptr_map.cpp b/src/ptr_map.cpp index 89d2cbf9d..598904906 100644 --- a/src/ptr_map.cpp +++ b/src/ptr_map.cpp @@ -192,6 +192,26 @@ gb_internal void map_rehash(PtrMap *h, isize new_count) { template gb_internal V *map_get(PtrMap *h, K key) { + MapIndex hash_index = MAP_SENTINEL; + MapIndex entry_prev = MAP_SENTINEL; + MapIndex entry_index = MAP_SENTINEL; + if (h->hashes.count != 0) { + u32 hash = ptr_map_hash_key(key); + hash_index = cast(MapIndex)(hash & (h->hashes.count-1)); + entry_index = h->hashes.data[hash_index]; + while (entry_index != MAP_SENTINEL) { + auto *entry = &h->entries.data[entry_index]; + if (entry->key == key) { + return &entry->value; + } + entry_prev = entry_index; + entry_index = entry->next; + } + } + return nullptr; +} +template +gb_internal V *map_try_get(PtrMap *h, K key, MapFindResult *fr_) { MapFindResult fr = {MAP_SENTINEL, MAP_SENTINEL, MAP_SENTINEL}; if (h->hashes.count != 0) { u32 hash = ptr_map_hash_key(key); @@ -206,9 +226,25 @@ gb_internal V *map_get(PtrMap *h, K key) { fr.entry_index = entry->next; } } + if (h->hashes.count == 0 || map__full(h)) { + map_grow(h); + } + if (fr_) *fr_ = fr; return nullptr; } + +template +gb_internal void map_set_internal_from_try_get(PtrMap *h, K key, V const &value, MapFindResult const &fr) { + MapIndex index = map__add_entry(h, key); + if (fr.entry_prev != MAP_SENTINEL) { + h->entries.data[fr.entry_prev].next = index; + } else { + h->hashes.data[fr.hash_index] = index; + } + h->entries.data[index].value = value; +} + template gb_internal V &map_must_get(PtrMap *h, K key) { V *ptr = map_get(h, key); diff --git a/src/types.cpp b/src/types.cpp index 99f393cc5..69c1ebe68 100644 --- a/src/types.cpp +++ b/src/types.cpp @@ -2528,14 +2528,6 @@ gb_internal bool lookup_subtype_polymorphic_selection(Type *dst, Type *src, Sele gb_internal bool are_types_identical_internal(Type *x, Type *y, bool check_tuple_names); gb_internal bool are_types_identical(Type *x, Type *y) { - return are_types_identical_internal(x, y, false); -} -gb_internal bool are_types_identical_unique_tuples(Type *x, Type *y) { - return are_types_identical_internal(x, y, true); -} - - -gb_internal bool are_types_identical_internal(Type *x, Type *y, bool check_tuple_names) { if (x == y) { return true; } @@ -2561,6 +2553,64 @@ gb_internal bool are_types_identical_internal(Type *x, Type *y, bool check_tuple return false; } + return are_types_identical_internal(x, y, false); +} +gb_internal bool are_types_identical_unique_tuples(Type *x, Type *y) { + if (x == y) { + return true; + } + + if (!x | !y) { + return false; + } + + if (x->kind == Type_Named) { + Entity *e = x->Named.type_name; + if (e->TypeName.is_type_alias) { + x = x->Named.base; + } + } + if (y->kind == Type_Named) { + Entity *e = y->Named.type_name; + if (e->TypeName.is_type_alias) { + y = y->Named.base; + } + } + if (x->kind != y->kind) { + return false; + } + + return are_types_identical_internal(x, y, true); +} + + +gb_internal bool are_types_identical_internal(Type *x, Type *y, bool check_tuple_names) { + if (x == y) { + return true; + } + + if (!x | !y) { + return false; + } + + #if 0 + if (x->kind == Type_Named) { + Entity *e = x->Named.type_name; + if (e->TypeName.is_type_alias) { + x = x->Named.base; + } + } + if (y->kind == Type_Named) { + Entity *e = y->Named.type_name; + if (e->TypeName.is_type_alias) { + y = y->Named.base; + } + } + if (x->kind != y->kind) { + return false; + } + #endif + switch (x->kind) { case Type_Generic: return are_types_identical(x->Generic.specialized, y->Generic.specialized); From 7f2ef2ac67ca226c365b19b53f2442a2d3002f0e Mon Sep 17 00:00:00 2001 From: gingerBill Date: Wed, 11 Jan 2023 21:52:04 +0000 Subject: [PATCH 77/78] Move check for type info above --- src/checker.cpp | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/checker.cpp b/src/checker.cpp index 473af7128..053bb0e17 100644 --- a/src/checker.cpp +++ b/src/checker.cpp @@ -1735,10 +1735,6 @@ gb_internal void add_type_info_type(CheckerContext *c, Type *t) { if (build_context.disallow_rtti) { return; } - add_type_info_type_internal(c, t); -} - -gb_internal void add_type_info_type_internal(CheckerContext *c, Type *t) { if (t == nullptr) { return; } @@ -1750,6 +1746,14 @@ gb_internal void add_type_info_type_internal(CheckerContext *c, Type *t) { return; } + add_type_info_type_internal(c, t); +} + +gb_internal void add_type_info_type_internal(CheckerContext *c, Type *t) { + if (t == nullptr) { + return; + } + add_type_info_dependency(c->info, c->decl, t); MUTEX_GUARD_BLOCK(&c->info->type_info_mutex) { From d7d6608142c8e169a7856c9e5965619809653903 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Wed, 11 Jan 2023 22:08:25 +0000 Subject: [PATCH 78/78] Remove unneeded CI stage --- .github/workflows/ci.yml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1307952ea..dc2691d80 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -123,11 +123,6 @@ jobs: run: | call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat odin check examples/demo -vet - - name: Odin check -threaded-checker - shell: cmd - run: | - call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat - odin check examples/demo -vet -threaded-checker timeout-minutes: 10 - name: Odin run shell: cmd