Replace Scope.elements to use a custom hash map ScopeMap

This hash map is robin hood based with a inline slot amount for small scopes
This commit is contained in:
gingerBill
2026-03-16 17:41:58 +00:00
parent 1744f57d01
commit 36d5a19115
10 changed files with 374 additions and 81 deletions

View File

@@ -16,14 +16,25 @@ void MP_FREE(void *mem, size_t size) {
#else
void *MP_MALLOC(size_t size) {
return gb_alloc(permanent_allocator(), cast(isize)size);
Arena *arena = get_arena(ThreadArena_Permanent);
return arena_alloc(arena, cast(isize)size, 16);
}
void *MP_REALLOC(void *mem, size_t oldsize, size_t newsize) {
return gb_resize(permanent_allocator(), mem, cast(isize)oldsize, cast(isize)newsize);
if (newsize < oldsize) {
return mem;
}
if (newsize == 0) {
return mem;
}
Arena *arena = get_arena(ThreadArena_Permanent);
void *new_mem = arena_alloc(arena, cast(isize)newsize, 16);
gb_memcopy(new_mem, mem, oldsize);
return new_mem;
}
void *MP_CALLOC(size_t nmemb, size_t size) {
size_t total = nmemb*size;
return gb_alloc(permanent_allocator(), cast(isize)total);
Arena *arena = get_arena(ThreadArena_Permanent);
isize total_size = cast(isize)(nmemb * size);
return arena_alloc(arena, total_size, 16);
}
void MP_FREE(void *mem, size_t size) {
// DO NOTHING

View File

@@ -170,8 +170,9 @@ gb_internal void override_entity_in_scope(Entity *original_entity, Entity *new_e
// Therefore two things can be done: the type can be assigned to state that it
// has been "evaluated" and the variant data can be copied across
u32 hash = string_hash(original_name);
rw_mutex_lock(&found_scope->mutex);
string_map_set(&found_scope->elements, original_name, new_entity);
scope_map_insert(&found_scope->elements, original_name, hash, new_entity);
rw_mutex_unlock(&found_scope->mutex);
original_entity->flags |= EntityFlag_Overridden;

View File

@@ -514,7 +514,7 @@ gb_internal bool find_or_generate_polymorphic_procedure(CheckerContext *old_c, E
// NOTE(bill): Reset scope from the failed procedure type
scope->head_child.store(nullptr, std::memory_order_relaxed);
string_map_clear(&scope->elements);
scope_map_clear(&scope->elements);
ptr_set_clear(&scope->imported);
// LEAK NOTE(bill): Cloning this AST may be leaky but this is not really an issue due to arena-based allocation

View File

@@ -790,10 +790,11 @@ gb_internal bool check_using_stmt_entity(CheckerContext *ctx, AstUsingStmt *us,
for (auto const &entry : scope->elements) {
String name = entry.key;
u32 hash = entry.hash;
Entity *decl = entry.value;
if (!is_entity_exported(decl, true)) continue;
Entity *found = scope_insert_with_name(ctx->scope, name, decl);
Entity *found = scope_insert_with_name(ctx->scope, name, hash, decl);
if (found != nullptr) {
gbString expr_str = expr_to_string(expr);
error(us->token,

View File

@@ -117,6 +117,10 @@ gb_internal void check_struct_fields(CheckerContext *ctx, Ast *node, Slice<Entit
}
}
// Allocate all at once
Entity *entities_to_use = permanent_alloc_array<Entity>(variable_count);
isize entities_to_use_index = 0;
i32 field_src_index = 0;
i32 field_group_index = -1;
for_array(i, params) {
@@ -165,7 +169,14 @@ gb_internal void check_struct_fields(CheckerContext *ctx, Ast *node, Slice<Entit
}
Token name_token = name->Ident.token;
Entity *field = alloc_entity_field(ctx->scope, name_token, type, is_using, field_src_index);
// Entity *field = alloc_entity_field(ctx->scope, name_token, type, is_using, field_src_index);
Entity *field = &entities_to_use[entities_to_use_index++];
INTERNAL_ENTITY_INIT(field, Entity_Variable, ctx->scope, name_token, type);
field->state = EntityState_Unresolved;
field->flags |= EntityFlag_Field;
if (is_using) field->flags |= EntityFlag_Using;
field->Variable.field_index = field_src_index;
add_entity(ctx, ctx->scope, name, field);
field->Variable.field_group_index = field_group_index;
if (is_subtype) {
@@ -636,7 +647,7 @@ gb_internal void check_struct_type(CheckerContext *ctx, Type *struct_type, Ast *
isize min_field_count = 0;
for_array(field_index, st->fields) {
Ast *field = st->fields[field_index];
Ast *field = st->fields[field_index];
switch (field->kind) {
case_ast_node(f, ValueDecl, field);
min_field_count += f->names.count;
@@ -887,6 +898,10 @@ gb_internal void check_enum_type(CheckerContext *ctx, Type *enum_type, Type *nam
scope_reserve(ctx->scope, et->fields.count);
// Allocate all at once
Entity *entities_to_use = permanent_alloc_array<Entity>(et->fields.count);
isize entities_to_use_index = 0;
for_array(i, et->fields) {
Ast *field = et->fields[i];
Ast *ident = nullptr;
@@ -931,9 +946,6 @@ gb_internal void check_enum_type(CheckerContext *ctx, Type *enum_type, Type *nam
// NOTE(bill): Skip blank identifiers
if (is_blank_ident(name)) {
continue;
} else if (name == "names") {
error(field, "'names' is a reserved identifier for enumerations");
continue;
}
if (min_value_set) {
@@ -957,7 +969,11 @@ gb_internal void check_enum_type(CheckerContext *ctx, Type *enum_type, Type *nam
max_value_set = true;
}
Entity *e = alloc_entity_constant(ctx->scope, ident->Ident.token, constant_type, iota);
// Entity *e = alloc_entity_constant(ctx->scope, ident->Ident.token, constant_type, iota);
Entity *e = &entities_to_use[entities_to_use_index++];
Token token = ident->Ident.token;
INTERNAL_ENTITY_INIT(e, Entity_Constant, ctx->scope, token, constant_type);
e->Constant.value = iota;
e->identifier = ident;
e->flags |= EntityFlag_Visited;
e->state = EntityState_Resolved;
@@ -3205,7 +3221,7 @@ gb_internal Type *make_soa_struct_internal(CheckerContext *ctx, Ast *array_typ_e
soa_struct->Struct.fields = permanent_slice_make<Entity *>(field_count+extra_field_count);
soa_struct->Struct.tags = gb_alloc_array(permanent_allocator(), String, field_count+extra_field_count);
string_map_init(&scope->elements, 8);
scope_map_init(&scope->elements);
String params_xyzw[4] = {
str_lit("x"),

View File

@@ -55,7 +55,8 @@ gb_internal bool check_rtti_type_disallowed(Ast *expr, Type *type, char const *f
gb_internal void scope_reserve(Scope *scope, isize count) {
string_map_reserve(&scope->elements, 2*count);
scope_map_reserve(&scope->elements, 2*count);
// string_map_reserve(&scope->elements, 2*count);
}
gb_internal void entity_graph_node_set_destroy(EntityGraphNodeSet *s) {
@@ -214,6 +215,7 @@ gb_internal DeclInfo *make_decl_info(Scope *scope, DeclInfo *parent) {
gb_internal Scope *create_scope(CheckerInfo *info, Scope *parent) {
Scope *s = permanent_alloc_item<Scope>();
scope_map_init(&s->elements);
s->parent = parent;
if (parent != nullptr && parent != builtin_pkg->scope) {
@@ -254,9 +256,10 @@ gb_internal Scope *create_scope_from_package(CheckerContext *c, AstPackage *pkg)
total_pkg_decl_count += file->total_file_decl_count;
}
isize init_elements_capacity = gb_max(DEFAULT_SCOPE_CAPACITY, 2*total_pkg_decl_count);
// isize init_elements_capacity = gb_max(DEFAULT_SCOPE_CAPACITY, 2*total_pkg_decl_count);
Scope *s = create_scope(c->info, builtin_pkg->scope);
string_map_init(&s->elements, init_elements_capacity);
scope_map_reserve(&s->elements, 2*total_pkg_decl_count);
// string_map_init(&s->elements, init_elements_capacity);
s->flags |= ScopeFlag_Pkg;
s->pkg = pkg;
@@ -283,7 +286,7 @@ gb_internal void destroy_scope(Scope *scope) {
destroy_scope(child);
}
string_map_destroy(&scope->elements);
// string_map_destroy(&scope->elements);
ptr_set_destroy(&scope->imported);
// NOTE(bill): No need to free scope as it "should" be allocated in an arena (except for the global scope)
@@ -369,10 +372,14 @@ gb_internal void check_close_scope(CheckerContext *c) {
}
gb_internal Entity *scope_lookup_current(Scope *s, String const &name) {
Entity **found = string_map_get(&s->elements, name);
gb_internal Entity *scope_lookup_current(Scope *s, String const &name, u32 hash) {
// Entity **found = string_map_get(&s->elements, name);
if (hash == 0) {
hash = string_hash(name);
}
Entity *found = scope_map_get(&s->elements, name, hash);
if (found) {
return *found;
return found;
}
return nullptr;
}
@@ -385,20 +392,16 @@ gb_internal void scope_lookup_parent(Scope *scope, String const &name, Scope **s
if (scope != nullptr) {
bool gone_thru_proc = false;
bool gone_thru_package = false;
StringHashKey key = {};
if (hash) {
key.hash = hash;
key.string = name;
} else {
key = string_hash_string(name);
if (!hash) {
hash = string_hash(name);
}
for (Scope *s = scope; s != nullptr; s = s->parent) {
Entity **found = nullptr;
Entity *found = nullptr;
if (!is_single_threaded) rw_mutex_shared_lock(&s->mutex);
found = string_map_get(&s->elements, key);
found = scope_map_get(&s->elements, name, hash);
if (!is_single_threaded) rw_mutex_shared_unlock(&s->mutex);
if (found) {
Entity *e = *found;
Entity *e = found;
if (gone_thru_proc) {
if (e->kind == Entity_Label) {
continue;
@@ -437,35 +440,34 @@ gb_internal Entity *scope_lookup(Scope *s, String const &name, u32 hash) {
return entity;
}
gb_internal Entity *scope_insert_with_name_no_mutex(Scope *s, String const &name, Entity *entity) {
gb_internal Entity *scope_insert_with_name_no_mutex(Scope *s, String const &name, u32 hash, Entity *entity) {
if (name == "") {
return nullptr;
}
StringHashKey key = string_hash_string(name);
Entity **found = nullptr;
Entity *found = nullptr;
Entity *result = nullptr;
found = string_map_get(&s->elements, key);
found = scope_map_get(&s->elements, name, hash);
if (found) {
if (entity != *found) {
result = *found;
if (entity != found) {
result = found;
}
goto end;
}
if (s->parent != nullptr && (s->parent->flags & ScopeFlag_Proc) != 0) {
found = string_map_get(&s->parent->elements, key);
found = scope_map_get(&s->parent->elements, name, hash);
if (found) {
if ((*found)->flags & EntityFlag_Result) {
if (entity != *found) {
result = *found;
if (found->flags & EntityFlag_Result) {
if (entity != found) {
result = found;
}
goto end;
}
}
}
string_map_set(&s->elements, key, entity);
scope_map_insert(&s->elements, name, hash, entity);
if (entity->scope == nullptr) {
entity->scope = s;
}
@@ -474,31 +476,30 @@ end:;
}
gb_internal Entity *scope_insert_with_name(Scope *s, String const &name, Entity *entity) {
gb_internal Entity *scope_insert_with_name(Scope *s, String const &name, u32 hash, Entity *entity) {
if (name == "") {
return nullptr;
}
StringHashKey key = string_hash_string(name);
Entity **found = nullptr;
Entity *found = nullptr;
Entity *result = nullptr;
rw_mutex_lock(&s->mutex);
found = string_map_get(&s->elements, key);
found = scope_map_get(&s->elements, name, hash);
if (found) {
if (entity != *found) {
result = *found;
if (entity != found) {
result = found;
}
goto end;
}
if (s->parent != nullptr && (s->parent->flags & ScopeFlag_Proc) != 0) {
rw_mutex_shared_lock(&s->parent->mutex);
found = string_map_get(&s->parent->elements, key);
found = scope_map_get(&s->parent->elements, name, hash);
if (found) {
if ((*found)->flags & EntityFlag_Result) {
if (entity != *found) {
result = *found;
if (found->flags & EntityFlag_Result) {
if (entity != found) {
result = found;
}
rw_mutex_shared_unlock(&s->parent->mutex);
goto end;
@@ -507,7 +508,7 @@ gb_internal Entity *scope_insert_with_name(Scope *s, String const &name, Entity
rw_mutex_shared_unlock(&s->parent->mutex);
}
string_map_set(&s->elements, key, entity);
scope_map_insert(&s->elements, name, hash, entity);
if (entity->scope == nullptr) {
entity->scope = s;
}
@@ -519,16 +520,32 @@ end:;
gb_internal Entity *scope_insert(Scope *s, Entity *entity) {
String name = entity->token.string;
u32 hash = 0;
Ast *ident = entity->identifier.load(std::memory_order_relaxed);
if (ident != nullptr) {
hash = ident->Ident.hash;
}
if (hash == 0) {
hash = string_hash(name);
}
if (in_single_threaded_checker_stage.load(std::memory_order_relaxed)) {
return scope_insert_with_name_no_mutex(s, name, entity);
return scope_insert_with_name_no_mutex(s, name, hash, entity);
} else {
return scope_insert_with_name(s, name, entity);
return scope_insert_with_name(s, name, hash, entity);
}
}
gb_internal Entity *scope_insert_no_mutex(Scope *s, Entity *entity) {
String name = entity->token.string;
return scope_insert_with_name_no_mutex(s, name, entity);
u32 hash = 0;
Ast *ident = entity->identifier.load(std::memory_order_relaxed);
if (ident != nullptr) {
hash = ident->Ident.hash;
}
if (hash == 0) {
hash = string_hash(name);
}
return scope_insert_with_name_no_mutex(s, name, hash, entity);
}
@@ -3258,6 +3275,7 @@ gb_internal Type *find_type_in_pkg(CheckerInfo *info, String const &pkg, String
}
gb_internal CheckerTypePath *new_checker_type_path(gbAllocator allocator) {
// TODO(bill): Cache to reuse `CheckerTypePath`
auto *tp = gb_alloc_item(heap_allocator(), CheckerTypePath);
array_init(tp, allocator, 0, 16);
return tp;
@@ -4882,9 +4900,9 @@ gb_internal bool correct_single_type_alias(CheckerContext *c, Entity *e) {
gb_internal bool correct_type_alias_in_scope_backwards(CheckerContext *c, Scope *s) {
bool correction = false;
for (u32 n = s->elements.count, i = n-1; i < n; i--) {
auto const &entry = s->elements.entries[i];
Entity *e = entry.value;
if (entry.hash && e != nullptr) {
auto const &slot = s->elements.slots[i];
Entity *e = slot.value;
if (slot.hash && e != nullptr) {
correction |= correct_single_type_alias(c, e);
}
}

View File

@@ -266,6 +266,246 @@ struct ProcInfo {
};
enum { DEFAULT_SCOPE_CAPACITY = 32 };
struct ScopeMapSlot {
u32 hash;
u32 _pad;
String key;
Entity *value;
};
enum { SCOPE_MAP_INLINE_CAP = 16 };
struct ScopeMap {
ScopeMapSlot inline_slots[SCOPE_MAP_INLINE_CAP];
ScopeMapSlot *slots;
u32 count;
u32 cap;
};
gb_internal gb_inline u32 scope_map_max_load(u32 cap) {
return cap - (cap>>2); // 75%
}
gb_internal gb_inline void scope_map_init(ScopeMap *m) {
m->cap = SCOPE_MAP_INLINE_CAP;
m->slots = m->inline_slots;
}
gb_internal Entity *scope_map_insert_for_rehash(
ScopeMapSlot *slots, u32 mask,
String key, u32 hash, Entity *value) {
u32 pos = hash & mask;
u32 dist = 0;
for (;;) {
ScopeMapSlot *s = &slots[pos];
if (s->hash == 0) {
s->key = key;
s->hash = hash;
s->value = value;
return nullptr;
}
u32 existing_dist = (pos - s->hash) & mask;
if (dist > existing_dist) {
String tmp_key = s->key;
u32 tmp_hash = s->hash;
Entity *tmp_value = s->value;
s->key = key;
s->hash = hash;
s->value = value;
hash = tmp_hash;
value = tmp_value;
key = tmp_key;
dist = existing_dist;
}
dist += 1;
pos = (pos+1) & mask;
}
}
gb_internal void scope_map_grow(ScopeMap *m) {
u32 new_cap = m->cap << 1;
u32 new_mask = new_cap - 1;
ScopeMapSlot *new_slots = permanent_alloc_array<ScopeMapSlot>(new_cap);
if (m->count > 0) {
for (u32 i = 0; i < m->cap; i++) {
if (m->slots[i].hash) {
scope_map_insert_for_rehash(new_slots, new_mask,
m->slots[i].key, m->slots[i].hash, m->slots[i].value);
}
}
}
m->slots = new_slots;
m->cap = new_cap;
}
gb_internal void scope_map_reserve(ScopeMap *m, isize capacity) {
if (m->slots == nullptr) {
scope_map_init(m);
}
u32 new_cap = next_pow2_u32(cast(u32)capacity);
if (m->cap < new_cap && new_cap > SCOPE_MAP_INLINE_CAP) {
m->slots = permanent_alloc_array<ScopeMapSlot>(new_cap);
m->cap = new_cap;
}
}
gb_internal Entity *scope_map_insert(ScopeMap *m, String key, u32 hash, Entity *value) {
if (m->slots == nullptr) {
scope_map_init(m);
}
if (m->count >= scope_map_max_load(m->cap)) {
scope_map_grow(m);
}
u32 mask = m->cap-1;
u32 pos = hash & mask;
u32 dist = 0;
for (;;) {
ScopeMapSlot *s = &m->slots[pos];
if (s->hash == 0) {
s->key = key;
s->hash = hash;
s->value = value;
m->count += 1;
return nullptr;
}
if (s->hash == hash && s->key == key) {
Entity *old = s->value;
s->value = value;
return old;
}
u32 existing_dist = (pos - s->hash) & mask;
if (dist > existing_dist) {
String tmp_key = s->key;
u32 tmp_hash = s->hash;
Entity *tmp_value = s->value;
s->key = key;
s->hash = hash;
s->value = value;
key = tmp_key;
hash = tmp_hash;
value = tmp_value;
dist = existing_dist;
}
dist += 1;
pos = (pos+1) & mask;
}
}
gb_internal Entity *scope_map_get(ScopeMap *m, String key, u32 hash) {
u32 mask = m->cap-1;
u32 pos = hash & mask;
u32 dist = 0;
for (;;) {
ScopeMapSlot *s = &m->slots[pos];
if (s->hash == 0) {
return nullptr;
}
u32 existing_dist = (pos - s->hash) & mask;
if (dist > existing_dist) {
return nullptr;
}
if (s->hash == hash && s->key == key) {
return s->value;
}
dist += 1;
pos = (pos + 1) & mask;
}
}
gb_internal void scope_map_clear(ScopeMap *m) {
gb_memset(m->slots, 0, gb_size_of(*m->slots) * m->cap);
m->count = 0;
}
struct ScopeMapIterator {
ScopeMap const *map;
u32 index;
ScopeMapIterator &operator++() noexcept {
for (;;) {
++index;
if (map->cap == index) {
return *this;
}
ScopeMapSlot *s = map->slots+index;
if (s->hash) {
return *this;
}
}
}
bool operator==(ScopeMapIterator const &other) const noexcept {
return this->map == other.map && this->index == other.index;
}
operator ScopeMapSlot *() const {
return map->slots+index;
}
};
gb_internal ScopeMapIterator end(ScopeMap &m) noexcept {
return ScopeMapIterator{&m, m.cap};
}
gb_internal ScopeMapIterator const end(ScopeMap const &m) noexcept {
return ScopeMapIterator{&m, m.cap};
}
gb_internal ScopeMapIterator begin(ScopeMap &m) noexcept {
if (m.count == 0) {
return end(m);
}
u32 index = 0;
while (index < m.cap) {
if (m.slots[index].hash) {
break;
}
index++;
}
return ScopeMapIterator{&m, index};
}
gb_internal ScopeMapIterator const begin(ScopeMap const &m) noexcept {
if (m.count == 0) {
return end(m);
}
u32 index = 0;
while (index < m.cap) {
if (m.slots[index].hash) {
break;
}
index++;
}
return ScopeMapIterator{&m, index};
}
enum ScopeFlag : i32 {
ScopeFlag_Pkg = 1<<1,
@@ -281,7 +521,6 @@ enum ScopeFlag : i32 {
ScopeFlag_ContextDefined = 1<<16,
};
enum { DEFAULT_SCOPE_CAPACITY = 32 };
struct Scope {
Ast * node;
@@ -292,7 +531,7 @@ struct Scope {
i32 index; // within a procedure
RwMutex mutex;
StringMap<Entity *> elements;
ScopeMap elements;
PtrSet<Scope *> imported;
DeclInfo *decl_info;
@@ -625,7 +864,7 @@ gb_internal isize type_info_index (CheckerInfo *info, TypeInfoPair
gb_internal Entity *entity_of_node(Ast *expr);
gb_internal Entity *scope_lookup_current(Scope *s, String const &name);
// gb_internal Entity *scope_lookup_current(Scope *s, String const &name, u32 hash=0);
gb_internal Entity *scope_lookup (Scope *s, String const &name, u32 hash=0);
gb_internal void scope_lookup_parent (Scope *s, String const &name, Scope **scope_, Entity **entity_, u32 hash=0);
gb_internal Entity *scope_insert (Scope *s, Entity *entity);

View File

@@ -344,17 +344,23 @@ gb_internal bool entity_has_deferred_procedure(Entity *e) {
gb_global std::atomic<u64> global_entity_id;
// NOTE(bill): This exists to allow for bulk allocations of entities all at once to improve performance for type generation
#define INTERNAL_ENTITY_INIT(entity, kind_, scope_, token_, type_) do { \
(entity)->kind = (kind_); \
(entity)->state = EntityState_Unresolved; \
(entity)->scope = (scope_); \
(entity)->token = (token_); \
(entity)->type = (type_); \
(entity)->id = 1 + global_entity_id.fetch_add(1); \
if ((token_).pos.file_id) { \
entity->file = thread_unsafe_get_ast_file_from_id((token_).pos.file_id); \
} \
} while (0)
gb_internal Entity *alloc_entity(EntityKind kind, Scope *scope, Token token, Type *type) {
Entity *entity = permanent_alloc_item<Entity>();
entity->kind = kind;
entity->state = EntityState_Unresolved;
entity->scope = scope;
entity->token = token;
entity->type = type;
entity->id = 1 + global_entity_id.fetch_add(1);
if (token.pos.file_id) {
entity->file = thread_unsafe_get_ast_file_from_id(token.pos.file_id);
}
INTERNAL_ENTITY_INIT(entity, kind, scope, token, type);
return entity;
}
@@ -411,11 +417,10 @@ gb_internal Entity *alloc_entity_const_param(Scope *scope, Token token, Type *ty
gb_internal Entity *alloc_entity_field(Scope *scope, Token token, Type *type, bool is_using, i32 field_index, EntityState state = EntityState_Unresolved) {
Entity *entity = alloc_entity_variable(scope, token, type);
Entity *entity = alloc_entity_variable(scope, token, type, state);
entity->Variable.field_index = field_index;
if (is_using) entity->flags |= EntityFlag_Using;
entity->flags |= EntityFlag_Field;
entity->state = state;
return entity;
}

View File

@@ -123,10 +123,11 @@ gb_internal bool ptr_set_update(PtrSet<T> *s, T ptr) { // returns true if it pre
usize hash_index = (cast(usize)hash) & mask;
GB_ASSERT(hash_index < s->capacity);
for (usize i = 0; i < s->capacity; i++) {
T *key = &s->keys[hash_index];
GB_ASSERT(*key != ptr);
if (*key == (T)PtrSet<T>::TOMBSTONE || *key == 0) {
*key = ptr;
T *key_ptr = &s->keys[hash_index];
T key = *key_ptr;
GB_ASSERT(key != ptr);
if (key == (T)PtrSet<T>::TOMBSTONE || key == 0) {
*key_ptr = ptr;
s->count++;
return false;
}
@@ -161,10 +162,11 @@ gb_internal bool ptr_set_update_with_mutex(PtrSet<T> *s, T ptr, RWSpinLock *m) {
usize hash_index = (cast(usize)hash) & mask;
GB_ASSERT(hash_index < s->capacity);
for (usize i = 0; i < s->capacity; i++) {
T *key = &s->keys[hash_index];
GB_ASSERT(*key != ptr);
if (*key == (T)PtrSet<T>::TOMBSTONE || *key == 0) {
*key = ptr;
T *key_ptr = &s->keys[hash_index];
T key = *key_ptr;
GB_ASSERT(key != ptr);
if (key == (T)PtrSet<T>::TOMBSTONE || key == 0) {
*key_ptr = ptr;
s->count++;
return false;
}

View File

@@ -3617,7 +3617,7 @@ gb_internal Selection lookup_field_from_index(Type *type, i64 index) {
return empty_selection;
}
gb_internal Entity *scope_lookup_current(Scope *s, String const &name);
gb_internal Entity *scope_lookup_current(Scope *s, String const &name, u32 hash=0);
gb_internal bool has_type_got_objc_class_attribute(Type *t);
gb_internal Selection lookup_field_with_selection(Type *type_, String field_name, bool is_type, Selection sel, bool allow_blank_ident) {