mirror of
https://github.com/odin-lang/Odin.git
synced 2026-04-18 20:40:28 +00:00
Correct issue with the generated map type internals; Simplify map rehash logic to utilize resize
This commit is contained in:
@@ -204,67 +204,45 @@ __slice_resize :: proc(array_: ^$T/[]$E, new_count: int, allocator: Allocator, l
|
||||
return true
|
||||
}
|
||||
|
||||
__dynamic_map_reserve :: proc(using header: Map_Header, cap: int, loc := #caller_location) {
|
||||
__dynamic_array_reserve(&m.entries, entry_size, entry_align, cap, loc)
|
||||
|
||||
old_len := len(m.hashes)
|
||||
__slice_resize(&m.hashes, cap, m.entries.allocator, loc)
|
||||
for i in old_len..<len(m.hashes) {
|
||||
__dynamic_map_reset_entries :: proc(using header: Map_Header, loc := #caller_location) {
|
||||
for i in 0..<len(m.hashes) {
|
||||
m.hashes[i] = -1
|
||||
}
|
||||
|
||||
for i in 0 ..< m.entries.len {
|
||||
entry_header := __dynamic_map_get_entry(header, i)
|
||||
entry_hash := __get_map_hash_from_entry(header, entry_header)
|
||||
entry_header.next = -1
|
||||
|
||||
fr := __dynamic_map_find(header, entry_hash)
|
||||
if fr.entry_prev < 0 {
|
||||
m.hashes[fr.hash_index] = i
|
||||
} else {
|
||||
e := __dynamic_map_get_entry(header, fr.entry_prev)
|
||||
e.next = i
|
||||
}
|
||||
}
|
||||
}
|
||||
__dynamic_map_rehash :: proc(using header: Map_Header, new_count: int, loc := #caller_location) #no_bounds_check {
|
||||
|
||||
__dynamic_map_reserve :: proc(using header: Map_Header, cap: int, loc := #caller_location) {
|
||||
c := context
|
||||
if m.entries.allocator.procedure != nil {
|
||||
c.allocator = m.entries.allocator
|
||||
}
|
||||
context = c
|
||||
|
||||
nm := Raw_Map{}
|
||||
nm.entries.allocator = m.entries.allocator
|
||||
nm.hashes = m.hashes
|
||||
|
||||
new_header: Map_Header = header
|
||||
new_header.m = &nm
|
||||
|
||||
__dynamic_array_reserve(&m.entries, entry_size, entry_align, cap, loc)
|
||||
|
||||
new_count := new_count
|
||||
new_count = max(new_count, 2*m.entries.len)
|
||||
|
||||
__slice_resize(&nm.hashes, new_count, m.entries.allocator, loc)
|
||||
for _, i in nm.hashes {
|
||||
nm.hashes[i] = -1
|
||||
if m.entries.len*2 < len(m.hashes) {
|
||||
return
|
||||
}
|
||||
|
||||
__dynamic_array_reserve(&nm.entries, entry_size, entry_align, m.entries.len, loc)
|
||||
for i in 0 ..< m.entries.len {
|
||||
if len(nm.hashes) == 0 {
|
||||
__dynamic_map_grow(new_header, loc)
|
||||
}
|
||||
|
||||
entry_header := __dynamic_map_get_entry(header, i)
|
||||
entry_hash := __get_map_hash_from_entry(header, entry_header)
|
||||
|
||||
fr := __dynamic_map_find(new_header, entry_hash)
|
||||
j := __dynamic_map_add_entry(new_header, entry_hash, loc)
|
||||
if fr.entry_prev < 0 {
|
||||
nm.hashes[fr.hash_index] = j
|
||||
} else {
|
||||
e := __dynamic_map_get_entry(new_header, fr.entry_prev)
|
||||
e.next = j
|
||||
}
|
||||
|
||||
e := __dynamic_map_get_entry(new_header, j)
|
||||
__dynamic_map_copy_entry(header, e, entry_header)
|
||||
e.next = fr.entry_index
|
||||
|
||||
if __dynamic_map_full(new_header) {
|
||||
__dynamic_map_grow(new_header, loc)
|
||||
}
|
||||
if __slice_resize(&m.hashes, cap*2, m.entries.allocator, loc) {
|
||||
__dynamic_map_reset_entries(header, loc)
|
||||
}
|
||||
}
|
||||
|
||||
free(m.entries.data, m.entries.allocator, loc)
|
||||
header.m^ = nm
|
||||
__dynamic_map_rehash :: proc(using header: Map_Header, new_count: int, loc := #caller_location) {
|
||||
#force_inline __dynamic_map_reserve(header, new_count, loc)
|
||||
}
|
||||
|
||||
__dynamic_map_get :: proc(h: Map_Header, hash: Map_Hash) -> rawptr {
|
||||
@@ -331,15 +309,18 @@ __dynamic_map_full :: #force_inline proc "contextless" (using h: Map_Header) ->
|
||||
|
||||
|
||||
__dynamic_map_hash_equal :: proc "contextless" (h: Map_Header, a, b: Map_Hash) -> bool {
|
||||
if a.hash == b.hash {
|
||||
return h.equal(a.key_ptr, b.key_ptr)
|
||||
}
|
||||
return false
|
||||
return a.hash == b.hash && h.equal(a.key_ptr, b.key_ptr)
|
||||
}
|
||||
|
||||
__dynamic_map_find :: proc(using h: Map_Header, hash: Map_Hash) -> Map_Find_Result #no_bounds_check {
|
||||
fr := Map_Find_Result{-1, -1, -1}
|
||||
if n := uintptr(len(m.hashes)); n > 0 {
|
||||
for i in 0..<m.entries.len {
|
||||
entry := __dynamic_map_get_entry(h, i)
|
||||
assert(entry.next < m.entries.len)
|
||||
}
|
||||
|
||||
|
||||
fr.hash_index = int(hash.hash % n)
|
||||
fr.entry_index = m.hashes[fr.hash_index]
|
||||
for fr.entry_index >= 0 {
|
||||
@@ -348,6 +329,8 @@ __dynamic_map_find :: proc(using h: Map_Header, hash: Map_Hash) -> Map_Find_Resu
|
||||
if __dynamic_map_hash_equal(h, entry_hash, hash) {
|
||||
return fr
|
||||
}
|
||||
assert(entry.next < m.entries.len)
|
||||
|
||||
fr.entry_prev = fr.entry_index
|
||||
fr.entry_index = entry.next
|
||||
}
|
||||
@@ -379,11 +362,11 @@ __dynamic_map_get_entry :: proc(using h: Map_Header, index: int) -> ^Map_Entry_H
|
||||
return (^Map_Entry_Header)(uintptr(m.entries.data) + uintptr(index*entry_size))
|
||||
}
|
||||
|
||||
__dynamic_map_copy_entry :: proc "contextless" (h: Map_Header, new, old: ^Map_Entry_Header) {
|
||||
__dynamic_map_copy_entry :: proc(h: Map_Header, new, old: ^Map_Entry_Header) {
|
||||
mem_copy(new, old, h.entry_size)
|
||||
}
|
||||
|
||||
__dynamic_map_erase :: proc(using h: Map_Header, fr: Map_Find_Result) #no_bounds_check {
|
||||
__dynamic_map_erase :: proc(using h: Map_Header, fr: Map_Find_Result) #no_bounds_check {
|
||||
if fr.entry_prev < 0 {
|
||||
m.hashes[fr.hash_index] = __dynamic_map_get_entry(h, fr.entry_index).next
|
||||
} else {
|
||||
|
||||
@@ -2058,7 +2058,6 @@ void init_map_entry_type(Type *type) {
|
||||
|
||||
// NOTE(bill): The preload types may have not been set yet
|
||||
GB_ASSERT(t_map_hash != nullptr);
|
||||
Type *entry_type = alloc_type_struct();
|
||||
|
||||
/*
|
||||
struct {
|
||||
@@ -2076,7 +2075,7 @@ void init_map_entry_type(Type *type) {
|
||||
fields[2] = alloc_entity_field(s, make_token_ident(str_lit("key")), type->Map.key, false, 2, EntityState_Resolved);
|
||||
fields[3] = alloc_entity_field(s, make_token_ident(str_lit("value")), type->Map.value, false, 3, EntityState_Resolved);
|
||||
|
||||
|
||||
Type *entry_type = alloc_type_struct();
|
||||
entry_type->Struct.fields = fields;
|
||||
entry_type->Struct.tags = gb_alloc_array(permanent_allocator(), String, fields.count);
|
||||
|
||||
|
||||
@@ -495,9 +495,13 @@ lbValue lb_gen_map_header(lbProcedure *p, lbValue map_val_ptr, Type *map_type) {
|
||||
Type *val_type = map_type->Map.value;
|
||||
gb_unused(val_type);
|
||||
|
||||
GB_ASSERT(map_type->Map.entry_type->kind == Type_Struct);
|
||||
map_type->Map.entry_type->cached_size = -1;
|
||||
map_type->Map.entry_type->Struct.are_offsets_set = false;
|
||||
|
||||
i64 entry_size = type_size_of (map_type->Map.entry_type);
|
||||
i64 entry_align = type_align_of (map_type->Map.entry_type);
|
||||
|
||||
|
||||
i64 key_offset = type_offset_of(map_type->Map.entry_type, 2);
|
||||
i64 key_size = type_size_of (map_type->Map.key);
|
||||
|
||||
@@ -507,9 +511,9 @@ lbValue lb_gen_map_header(lbProcedure *p, lbValue map_val_ptr, Type *map_type) {
|
||||
|
||||
Type *map_header_base = base_type(t_map_header);
|
||||
GB_ASSERT(map_header_base->Struct.fields.count == 8);
|
||||
Type *m_type = map_header_base->Struct.fields[0]->type;
|
||||
Type *raw_map_ptr_type = map_header_base->Struct.fields[0]->type;
|
||||
LLVMValueRef const_values[8] = {};
|
||||
const_values[0] = LLVMConstNull(lb_type(p->module, m_type));
|
||||
const_values[0] = LLVMConstNull(lb_type(p->module, raw_map_ptr_type));
|
||||
const_values[1] = lb_get_equal_proc_for_type(p->module, key_type) .value;
|
||||
const_values[2] = lb_const_int(p->module, t_int, entry_size) .value;
|
||||
const_values[3] = lb_const_int(p->module, t_int, entry_align) .value;
|
||||
|
||||
@@ -239,8 +239,11 @@ lbProcedure *lb_create_procedure(lbModule *m, Entity *entity, bool ignore_body)
|
||||
}
|
||||
|
||||
if (p->body != nullptr) {
|
||||
// String debug_name = entity->token.string.text;
|
||||
String debug_name = p->name;
|
||||
|
||||
p->debug_info = LLVMDIBuilderCreateFunction(m->debug_builder, scope,
|
||||
cast(char const *)entity->token.string.text, entity->token.string.len,
|
||||
cast(char const *)debug_name.text, debug_name.len,
|
||||
cast(char const *)p->name.text, p->name.len,
|
||||
file, line, type,
|
||||
is_local_to_unit, is_definition,
|
||||
|
||||
@@ -3018,8 +3018,7 @@ i64 type_align_of_internal(Type *t, TypePath *path) {
|
||||
} break;
|
||||
|
||||
case Type_Map:
|
||||
init_map_internal_types(t);
|
||||
return type_align_of_internal(t->Map.internal_type, path);
|
||||
return build_context.word_size;
|
||||
case Type_Enum:
|
||||
return type_align_of_internal(t->Enum.base_type, path);
|
||||
|
||||
@@ -3248,11 +3247,16 @@ i64 type_size_of_internal(Type *t, TypePath *path) {
|
||||
|
||||
case Type_DynamicArray:
|
||||
// data + len + cap + allocator(procedure+data)
|
||||
return 3*build_context.word_size + 2*build_context.word_size;
|
||||
return (3 + 2)*build_context.word_size;
|
||||
|
||||
case Type_Map:
|
||||
init_map_internal_types(t);
|
||||
return type_size_of_internal(t->Map.internal_type, path);
|
||||
/*
|
||||
struct {
|
||||
hashes: []int, // 2 words
|
||||
entries: [dynamic]Entry_Type, // 5 words
|
||||
}
|
||||
*/
|
||||
return (2 + (3 + 2))*build_context.word_size;
|
||||
|
||||
case Type_Tuple: {
|
||||
i64 count, align, size;
|
||||
|
||||
Reference in New Issue
Block a user