Merge pull request #6215 from odin-lang/bill/fix-data-races-2026-02

Fix numerous data races
This commit is contained in:
gingerBill
2026-02-02 11:37:19 +00:00
committed by GitHub
12 changed files with 92 additions and 57 deletions

View File

@@ -534,12 +534,12 @@ gb_internal bool check_builtin_objc_procedure(CheckerContext *c, Operand *operan
return false;
}
if (ident.entity->kind != Entity_Procedure) {
if (ident.entity.load()->kind != Entity_Procedure) {
gbString e = expr_to_string(handler_node);
ERROR_BLOCK();
error(handler.expr, "'%.*s' expected a direct reference to a procedure", LIT(builtin_name));
if(ident.entity->kind == Entity_Variable) {
if(ident.entity.load()->kind == Entity_Variable) {
error_line("\tSuggestion: Variables referencing a procedure are not allowed, they are not a direct procedure reference.");
} else {
error_line("\tSuggestion: Ensure '%s' is not a runtime-evaluated expression.", e); // NOTE(harold): Is this case possible to hit?

View File

@@ -2155,7 +2155,7 @@ gb_internal bool check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de
rw_mutex_unlock(&ctx->scope->mutex);
bool where_clause_ok = evaluate_where_clauses(ctx, nullptr, decl->scope, &decl->proc_lit->ProcLit.where_clauses, !decl->where_clauses_evaluated);
bool where_clause_ok = evaluate_where_clauses(ctx, nullptr, decl->scope, &decl->proc_lit->ProcLit.where_clauses, !decl->where_clauses_evaluated.load(std::memory_order_relaxed));
if (!where_clause_ok) {
// NOTE(bill, 2019-08-31): Don't check the body as the where clauses failed
return false;
@@ -2173,15 +2173,15 @@ gb_internal bool check_proc_body(CheckerContext *ctx_, Token token, DeclInfo *de
}
GB_ASSERT(decl->proc_checked_state != ProcCheckedState_Checked);
if (decl->defer_use_checked) {
if (decl->defer_use_checked.load(std::memory_order_relaxed)) {
GB_ASSERT(is_type_polymorphic(type, true));
error(token, "Defer Use Checked: %.*s", LIT(decl->entity.load()->token.string));
GB_ASSERT(decl->defer_use_checked == false);
GB_ASSERT(decl->defer_use_checked.load(std::memory_order_relaxed) == false);
}
check_stmt_list(ctx, bs->stmts, Stmt_CheckScopeDecls);
decl->defer_use_checked = true;
decl->defer_use_checked.store(true, std::memory_order_relaxed);
for (Ast *stmt : bs->stmts) {
if (stmt->kind == Ast_ValueDecl) {

View File

@@ -1792,7 +1792,22 @@ gb_internal void add_untyped(CheckerContext *c, Ast *expr, AddressingMode mode,
check_set_expr_info(c, expr, mode, type, value);
}
gb_internal void add_type_and_value(CheckerContext *ctx, Ast *expr, AddressingMode mode, Type *type, ExactValue const &value, bool use_mutex) {
struct alignas(GB_CACHE_LINE_SIZE) TypeAndValueMutexStripes {
BlockingMutex mutex;
u8 padding[GB_CACHE_LINE_SIZE - gb_size_of(BlockingMutex)];
};
enum { TypeAndValueMutexStripes_COUNT = 128 };
gb_global TypeAndValueMutexStripes tav_mutex_stripes[TypeAndValueMutexStripes_COUNT];
gb_internal BlockingMutex *tav_mutex_for_node(Ast *node) {
GB_ASSERT(node != nullptr);
uintptr h = cast(uintptr)node;
h ^= h >> 6;
return &tav_mutex_stripes[h % TypeAndValueMutexStripes_COUNT].mutex;
}
gb_internal void add_type_and_value(CheckerContext *ctx, Ast *expr, AddressingMode mode, Type *type, ExactValue const &value) {
if (expr == nullptr) {
return;
}
@@ -1803,14 +1818,18 @@ gb_internal void add_type_and_value(CheckerContext *ctx, Ast *expr, AddressingMo
return;
}
BlockingMutex *mutex = &ctx->info->type_and_value_mutex;
if (ctx->decl) {
mutex = &ctx->decl->type_and_value_mutex;
} else if (ctx->pkg) {
mutex = &ctx->pkg->type_and_value_mutex;
}
BlockingMutex *mutex = tav_mutex_for_node(expr);
if (use_mutex) mutex_lock(mutex);
/* Previous logic:
BlockingMutex *mutex = &ctx->info->type_and_value_mutex;
if (ctx->decl) {
mutex = &ctx->decl->type_and_value_mutex;
} else if (ctx->pkg) {
mutex = &ctx->pkg->type_and_value_mutex;
}
*/
mutex_lock(mutex);
Ast *prev_expr = nullptr;
while (prev_expr != expr) {
prev_expr = expr;
@@ -1835,7 +1854,7 @@ gb_internal void add_type_and_value(CheckerContext *ctx, Ast *expr, AddressingMo
break;
};
}
if (use_mutex) mutex_unlock(mutex);
mutex_unlock(mutex);
}
gb_internal void add_entity_definition(CheckerInfo *i, Ast *identifier, Entity *entity) {

View File

@@ -221,14 +221,14 @@ struct DeclInfo {
Entity * para_poly_original;
bool is_using;
bool where_clauses_evaluated;
bool foreign_require_results;
bool is_using;
bool foreign_require_results;
std::atomic<bool> where_clauses_evaluated;
std::atomic<ProcCheckedState> proc_checked_state;
BlockingMutex proc_checked_mutex;
isize defer_used;
bool defer_use_checked;
BlockingMutex proc_checked_mutex;
isize defer_used;
std::atomic<bool> defer_use_checked;
CommentGroup *comment;
CommentGroup *docs;
@@ -631,7 +631,7 @@ gb_internal void scope_lookup_parent (Scope *s, String const &name, Scope **s
gb_internal Entity *scope_insert (Scope *s, Entity *entity);
gb_internal void add_type_and_value (CheckerContext *c, Ast *expression, AddressingMode mode, Type *type, ExactValue const &value, bool use_mutex=true);
gb_internal void add_type_and_value (CheckerContext *c, Ast *expression, AddressingMode mode, Type *type, ExactValue const &value);
gb_internal ExprInfo *check_get_expr_info (CheckerContext *c, Ast *expr);
gb_internal void add_untyped (CheckerContext *c, Ast *expression, AddressingMode mode, Type *basic_type, ExactValue const &value);
gb_internal void add_entity_use (CheckerContext *c, Ast *identifier, Entity *entity);

View File

@@ -170,7 +170,7 @@ struct Entity {
Type * type;
std::atomic<Ast *> identifier; // Can be nullptr
DeclInfo * decl_info;
DeclInfo * parent_proc_decl; // nullptr if in file/global scope
std::atomic<DeclInfo *> parent_proc_decl; // nullptr if in file/global scope
AstFile * file;
AstPackage *pkg;
@@ -181,11 +181,11 @@ struct Entity {
Entity * aliased_of;
union {
struct lbModule *code_gen_module;
std::atomic<struct lbModule *> code_gen_module;
struct cgModule *cg_module;
};
union {
struct lbProcedure *code_gen_procedure;
std::atomic<struct lbProcedure *> code_gen_procedure;
struct cgProcedure *cg_procedure;
};
@@ -370,7 +370,7 @@ gb_internal Entity *alloc_entity_using_variable(Entity *parent, Token token, Typ
token.pos = parent->token.pos;
Entity *entity = alloc_entity(Entity_Variable, parent->scope, token, type);
entity->using_parent = parent;
entity->parent_proc_decl = parent->parent_proc_decl;
entity->parent_proc_decl.store(parent->parent_proc_decl, std::memory_order_relaxed);
entity->using_expr = using_expr;
entity->flags |= EntityFlag_Using;
entity->flags |= EntityFlag_Used;

View File

@@ -676,7 +676,7 @@ gb_internal void lb_begin_procedure_body(lbProcedure *p) {
lbAddr res = {};
if (p->entity && p->entity->decl_info &&
p->entity->decl_info->defer_use_checked &&
p->entity->decl_info->defer_use_checked.load(std::memory_order_relaxed) &&
p->entity->decl_info->defer_used == 0) {
// NOTE(bill): this is a bodge to get around the issue of the problem BELOW
@@ -2217,8 +2217,10 @@ gb_internal lbValue lb_build_builtin_proc(lbProcedure *p, Ast *expr, TypeAndValu
Entity *e = entity_of_node(ident);
GB_ASSERT(e != nullptr);
if (e->parent_proc_decl != nullptr && e->parent_proc_decl->entity != nullptr) {
procedure = e->parent_proc_decl->entity.load()->token.string;
DeclInfo *parent_proc_decl = e->parent_proc_decl.load(std::memory_order_relaxed);
if (parent_proc_decl != nullptr &&
parent_proc_decl->entity != nullptr) {
procedure = parent_proc_decl->entity.load()->token.string;
} else {
procedure = str_lit("");
}

View File

@@ -2419,7 +2419,7 @@ gb_internal lbValue lb_handle_objc_block(lbProcedure *p, Ast *expr) {
Ast *proc_lit = unparen_expr(ce->args[capture_arg_count]);
if (proc_lit->kind == Ast_Ident) {
proc_lit = proc_lit->Ident.entity->decl_info->proc_lit;
proc_lit = proc_lit->Ident.entity.load()->decl_info->proc_lit;
}
GB_ASSERT(proc_lit->kind == Ast_ProcLit);

View File

@@ -559,8 +559,8 @@ gb_internal void write_canonical_parent_prefix(TypeWriter *w, Entity *e) {
// no prefix
return;
}
if (e->parent_proc_decl) {
Entity *p = e->parent_proc_decl->entity;
if (e->parent_proc_decl.load(std::memory_order_relaxed)) {
Entity *p = e->parent_proc_decl.load(std::memory_order_relaxed)->entity;
write_canonical_parent_prefix(w, p);
type_writer_append(w, p->token.string.text, p->token.string.len);
if (is_type_polymorphic(p->type)) {

View File

@@ -425,7 +425,7 @@ struct AstSplitArgs {
#define AST_KINDS \
AST_KIND(Ident, "identifier", struct { \
Token token; \
Entity *entity; \
std::atomic<Entity *> entity; \
u32 hash; \
}) \
AST_KIND(Implicit, "implicit", Token) \
@@ -856,19 +856,19 @@ gb_global isize const ast_variant_sizes[] = {
};
struct AstCommonStuff {
AstKind kind; // u16
u8 state_flags;
u8 viral_state_flags;
i32 file_id;
TypeAndValue tav; // NOTE(bill): Making this a pointer is slower
AstKind kind; // u16
u8 state_flags;
std::atomic<u8> viral_state_flags;
i32 file_id;
TypeAndValue tav; // NOTE(bill): Making this a pointer is slower
};
struct Ast {
AstKind kind; // u16
u8 state_flags;
u8 viral_state_flags;
i32 file_id;
TypeAndValue tav; // NOTE(bill): Making this a pointer is slower
AstKind kind; // u16
u8 state_flags;
std::atomic<u8> viral_state_flags;
i32 file_id;
TypeAndValue tav; // NOTE(bill): Making this a pointer is slower
// IMPORTANT NOTE(bill): This must be at the end since the AST is allocated to be size of the variant
union {

View File

@@ -1,5 +1,15 @@
// thread_pool.cpp
// TODO(bill): make work on MSVC
// #if defined(__SANITIZE_THREAD__) || (defined(__has_feature) && __has_feature(thread_sanitizer))
// #include <sanitizer/tsan_interface.h>
// #define TSAN_RELEASE(addr) __tsan_release(addr)
// #define TSAN_ACQUIRE(addr) __tsan_acquire(addr)
// #else
#define TSAN_RELEASE(addr)
#define TSAN_ACQUIRE(addr)
// #endif
struct WorkerTask;
struct ThreadPool;
@@ -88,6 +98,7 @@ void thread_pool_queue_push(Thread *thread, WorkerTask task) {
}
cur_ring->buffer[bot % cur_ring->size] = task;
TSAN_RELEASE(cur_ring->buffer[bot % cur_ring->size]);
std::atomic_thread_fence(std::memory_order_release);
thread->queue.bottom.store(bot + 1, std::memory_order_relaxed);
@@ -108,6 +119,7 @@ GrabState thread_pool_queue_take(Thread *thread, WorkerTask *task) {
if (top <= bot) {
// Queue is not empty
TSAN_ACQUIRE(cur_ring->buffer[bot % cur_ring->size]);
*task = cur_ring->buffer[bot % cur_ring->size];
if (top == bot) {
// Only one entry left in queue
@@ -139,6 +151,8 @@ GrabState thread_pool_queue_steal(Thread *thread, WorkerTask *task) {
if (top < bot) {
// Queue is not empty
TaskRingBuffer *cur_ring = thread->queue.ring.load(std::memory_order_consume);
TSAN_ACQUIRE(&cur_ring->buffer[top % cur_ring->size]);
*task = cur_ring->buffer[top % cur_ring->size];
if (!thread->queue.top.compare_exchange_strong(top, top + 1, std::memory_order_seq_cst, std::memory_order_relaxed)) {

View File

@@ -375,8 +375,8 @@ gb_internal void semaphore_wait(Semaphore *s) {
}
gb_internal bool mutex_try_lock(BlockingMutex *m) {
ANNOTATE_LOCK_PRE(m, 1);
i32 v = m->state().exchange(Internal_Mutex_State_Locked, std::memory_order_acquire);
if (v == Internal_Mutex_State_Unlocked) {
i32 expected = Internal_Mutex_State_Unlocked;
if (m->state().compare_exchange_strong(expected, Internal_Mutex_State_Locked, std::memory_order_acquire)) {
ANNOTATE_LOCK_POST(m);
return true;
}

View File

@@ -170,21 +170,21 @@ struct TypeStruct {
};
struct TypeUnion {
Slice<Type *> variants;
Slice<Type *> variants;
Ast * node;
Scope * scope;
Ast * node;
Scope * scope;
i64 variant_block_size;
i64 custom_align;
Type * polymorphic_params; // Type_Tuple
Type * polymorphic_parent;
Wait_Signal polymorphic_wait_signal;
std::atomic<i64> variant_block_size;
i64 custom_align;
Type * polymorphic_params; // Type_Tuple
Type * polymorphic_parent;
Wait_Signal polymorphic_wait_signal;
i16 tag_size;
bool is_polymorphic;
bool is_poly_specialized;
UnionTypeKind kind;
std::atomic<i16> tag_size;
bool is_polymorphic;
bool is_poly_specialized;
UnionTypeKind kind;
};
struct TypeProc {