Add mutex guards for signature scopes

This commit is contained in:
gingerBill
2022-01-10 14:50:28 +00:00
parent 6f3e450c50
commit 7cc265e14c
7 changed files with 68 additions and 14 deletions

View File

@@ -77,15 +77,19 @@ template <typename T> Slice<T> slice_from_array(Array<T> const &a);
template <typename T>
Slice<T> slice_make(gbAllocator const &allocator, isize count) {
GB_ASSERT(count >= 0);
Slice<T> s = {};
s.data = gb_alloc_array(allocator, T, count);
GB_ASSERT(s.data != nullptr);
s.count = count;
return s;
}
template <typename T>
void slice_init(Slice<T> *s, gbAllocator const &allocator, isize count) {
GB_ASSERT(count >= 0);
s->data = gb_alloc_array(allocator, T, count);
GB_ASSERT(s->data != nullptr);
s->count = count;
}

View File

@@ -1286,7 +1286,7 @@ void check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *decl, Type *ty
using_entities.allocator = heap_allocator();
defer (array_free(&using_entities));
{
MUTEX_GUARD_BLOCK(ctx->scope->mutex) {
if (type->Proc.param_count > 0) {
TypeTuple *params = &type->Proc.params->Tuple;
for_array(i, params->variables) {

View File

@@ -4021,10 +4021,12 @@ void check_did_you_mean_scope(String const &name, Scope *scope) {
DidYouMeanAnswers d = did_you_mean_make(heap_allocator(), scope->elements.entries.count, name);
defer (did_you_mean_destroy(&d));
mutex_lock(&scope->mutex);
for_array(i, scope->elements.entries) {
Entity *e = scope->elements.entries[i].value;
did_you_mean_append(&d, e->token.string);
}
mutex_unlock(&scope->mutex);
check_did_you_mean_print(&d);
}

View File

@@ -607,7 +607,7 @@ bool check_using_stmt_entity(CheckerContext *ctx, AstUsingStmt *us, Ast *expr, b
case Entity_ImportName: {
Scope *scope = e->ImportName.scope;
for_array(i, scope->elements.entries) {
MUTEX_GUARD_BLOCK(scope->mutex) for_array(i, scope->elements.entries) {
String name = scope->elements.entries[i].key.string;
Entity *decl = scope->elements.entries[i].value;
if (!is_entity_exported(decl)) continue;

View File

@@ -622,7 +622,7 @@ void check_scope_usage(Checker *c, Scope *scope) {
Array<VettedEntity> vetted_entities = {};
array_init(&vetted_entities, heap_allocator());
for_array(i, scope->elements.entries) {
MUTEX_GUARD_BLOCK(scope->mutex) for_array(i, scope->elements.entries) {
Entity *e = scope->elements.entries[i].value;
if (e == nullptr) continue;
VettedEntity ve = {};

View File

@@ -325,18 +325,32 @@ GB_ALLOCATOR_PROC(heap_allocator_proc) {
// TODO(bill): Throughly test!
switch (type) {
#if defined(GB_COMPILER_MSVC)
case gbAllocation_Alloc: {
isize aligned_size = align_formula_isize(size, alignment);
// TODO(bill): Make sure this is aligned correctly
ptr = HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, aligned_size);
} break;
case gbAllocation_Free:
HeapFree(GetProcessHeap(), 0, old_memory);
case gbAllocation_Alloc:
if (size == 0) {
return NULL;
} else {
isize aligned_size = align_formula_isize(size, alignment);
// TODO(bill): Make sure this is aligned correctly
ptr = HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, aligned_size);
}
break;
case gbAllocation_Free:
if (old_memory != nullptr) {
HeapFree(GetProcessHeap(), 0, old_memory);
}
break;
case gbAllocation_Resize:
if (old_memory != nullptr && size > 0) {
isize aligned_size = align_formula_isize(size, alignment);
ptr = HeapReAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, old_memory, aligned_size);
} else if (old_memory != nullptr) {
HeapFree(GetProcessHeap(), 0, old_memory);
} else if (size != 0) {
isize aligned_size = align_formula_isize(size, alignment);
// TODO(bill): Make sure this is aligned correctly
ptr = HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, aligned_size);
}
break;
case gbAllocation_Resize: {
isize aligned_size = align_formula_isize(size, alignment);
ptr = HeapReAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, old_memory, aligned_size);
} break;
#elif defined(GB_SYSTEM_LINUX)
// TODO(bill): *nix version that's decent
case gbAllocation_Alloc: {

View File

@@ -68,6 +68,40 @@ void yield_thread(void);
void yield_process(void);
struct MutexGuard {
MutexGuard() = delete;
MutexGuard(MutexGuard const &) = delete;
MutexGuard(BlockingMutex *bm) : bm{bm} {
mutex_lock(this->bm);
}
MutexGuard(RecursiveMutex *rm) : rm{rm} {
mutex_lock(this->rm);
}
MutexGuard(BlockingMutex &bm) : bm{&bm} {
mutex_lock(this->bm);
}
MutexGuard(RecursiveMutex &rm) : rm{&rm} {
mutex_lock(this->rm);
}
~MutexGuard() {
if (this->bm) {
mutex_unlock(this->bm);
} else if (this->rm) {
mutex_unlock(this->rm);
}
}
operator bool() const { return true; }
BlockingMutex *bm;
RecursiveMutex *rm;
};
#define MUTEX_GUARD_BLOCK(m) if (MutexGuard GB_DEFER_3(_mutex_guard_) = m)
#define MUTEX_GUARD(m) MutexGuard GB_DEFER_3(_mutex_guard_) = m
#if defined(GB_SYSTEM_WINDOWS)
struct BlockingMutex {
SRWLOCK srwlock;