Fix map assignment bug due to growth

This commit is contained in:
Ginger Bill
2017-04-11 21:13:21 +01:00
parent 5916e71d4f
commit f5819eafa9
5 changed files with 78 additions and 26 deletions

View File

@@ -284,6 +284,9 @@ __string_eq :: proc(a, b: string) -> bool {
if len(a) != len(b) {
return false;
}
if len(a) == 0 {
return true;
}
if ^a[0] == ^b[0] {
return true;
}
@@ -570,10 +573,9 @@ __Map_Header :: struct #ordered {
value_offset: int,
}
__dynamic_map_reserve :: proc(using header: __Map_Header, cap: int) -> bool {
h := __dynamic_array_reserve(^m.hashes, size_of(int), align_of(int), cap);
e := __dynamic_array_reserve(^m.entries, entry_size, entry_align, cap);
return h && e;
__dynamic_map_reserve :: proc(using header: __Map_Header, cap: int) {
__dynamic_array_reserve(^m.hashes, size_of(int), align_of(int), cap);
__dynamic_array_reserve(^m.entries, entry_size, entry_align, cap);
}
__dynamic_map_rehash :: proc(using header: __Map_Header, new_count: int) {
@@ -587,7 +589,7 @@ __dynamic_map_rehash :: proc(using header: __Map_Header, new_count: int) {
reserve(nm.hashes, new_count);
nm_hashes.len = nm_hashes.cap;
__dynamic_array_reserve(^nm.entries, entry_size, entry_align, m.entries.len);
__dynamic_array_reserve(^nm.entries, entry_size, entry_align, m.entries.cap);
for _, i in nm.hashes {
nm.hashes[i] = -1;
}
@@ -634,6 +636,8 @@ __dynamic_map_get :: proc(h: __Map_Header, key: __Map_Key) -> rawptr {
__dynamic_map_set :: proc(using h: __Map_Header, key: __Map_Key, value: rawptr) {
index: int;
assert(value != nil);
if len(m.hashes) == 0 {
__dynamic_map_grow(h);
@@ -659,11 +663,12 @@ __dynamic_map_set :: proc(using h: __Map_Header, key: __Map_Key, value: rawptr)
if __dynamic_map_full(h) {
__dynamic_map_grow(h);
}
fmt.println("entries:", h.m.entries.len);
}
__dynamic_map_grow :: proc(using h: __Map_Header) {
new_count := 2*m.entries.len + 8;
new_count := max(2*m.entries.cap + 8, 8);
__dynamic_map_rehash(h, new_count);
}

View File

@@ -31,12 +31,16 @@ TexImage2D :: proc(target, level, internal_format,
format, type: i32, pixels: rawptr) #foreign lib "glTexImage2D";
string_data :: proc(s: string) -> ^u8 #inline { return ^s[0]; }
_string_data :: proc(s: string) -> ^u8 #inline { return ^s[0]; }
_libgl := win32.LoadLibraryA(string_data("opengl32.dll\x00"));
_libgl := win32.LoadLibraryA(_string_data("opengl32.dll\x00"));
GetProcAddress :: proc(name: string) -> proc() #cc_c {
assert(name[len(name)-1] == 0);
if name[len(name)-1] == 0 {
name = name[..len(name)-1];
}
// NOTE(bill): null terminated
assert((^name[0] + len(name))^ == 0);
res := wgl.GetProcAddress(^name[0]);
if res == nil {
res = win32.GetProcAddress(_libgl, ^name[0]);

View File

@@ -3649,6 +3649,9 @@ gb_inline void gb_zero_size(void *ptr, isize size) { gb_memset(ptr, 0, size); }
gb_inline void *gb_memcopy(void *dest, void const *source, isize n) {
#if defined(_MSC_VER)
if (dest == NULL) {
return NULL;
}
// TODO(bill): Is this good enough?
__movsb(cast(u8 *)dest, cast(u8 *)source, n);
// #elif defined(GB_SYSTEM_OSX) || defined(GB_SYSTEM_UNIX)
@@ -3659,12 +3662,20 @@ gb_inline void *gb_memcopy(void *dest, void const *source, isize n) {
// since this is probably not the way the author intended this to work.
// memcpy(dest, source, n);
#elif defined(GB_CPU_X86)
if (dest == NULL) {
return NULL;
}
__asm__ __volatile__("rep movsb" : "+D"(dest), "+S"(source), "+c"(n) : : "memory");
#else
u8 *d = cast(u8 *)dest;
u8 const *s = cast(u8 const *)source;
u32 w, x;
if (dest == NULL) {
return NULL;
}
for (; cast(uintptr)s % 4 && n; n--) {
*d++ = *s++;
}
@@ -3796,10 +3807,16 @@ gb_inline void *gb_memmove(void *dest, void const *source, isize n) {
u8 *d = cast(u8 *)dest;
u8 const *s = cast(u8 const *)source;
if (d == s)
if (dest == NULL) {
return NULL;
}
if (d == s) {
return d;
if (s+n <= d || d+n <= s) // NOTE(bill): Non-overlapping
}
if (s+n <= d || d+n <= s) { // NOTE(bill): Non-overlapping
return gb_memcopy(d, s, n);
}
if (d < s) {
if (cast(uintptr)s % gb_size_of(isize) == cast(uintptr)d % gb_size_of(isize)) {
@@ -3838,6 +3855,10 @@ gb_inline void *gb_memset(void *dest, u8 c, isize n) {
isize k;
u32 c32 = ((u32)-1)/255 * c;
if (dest == NULL) {
return NULL;
}
if (n == 0)
return dest;
s[0] = s[n-1] = c;
@@ -3858,14 +3879,16 @@ gb_inline void *gb_memset(void *dest, u8 c, isize n) {
*cast(u32 *)(s+0) = c32;
*cast(u32 *)(s+n-4) = c32;
if (n < 9)
if (n < 9) {
return dest;
}
*cast(u32 *)(s + 4) = c32;
*cast(u32 *)(s + 8) = c32;
*cast(u32 *)(s+n-12) = c32;
*cast(u32 *)(s+n- 8) = c32;
if (n < 25)
if (n < 25) {
return dest;
}
*cast(u32 *)(s + 12) = c32;
*cast(u32 *)(s + 16) = c32;
*cast(u32 *)(s + 20) = c32;
@@ -3898,12 +3921,17 @@ gb_inline void *gb_memset(void *dest, u8 c, isize n) {
gb_inline i32 gb_memcompare(void const *s1, void const *s2, isize size) {
// TODO(bill): Heavily optimize
u8 const *s1p8 = cast(u8 const *)s1;
u8 const *s2p8 = cast(u8 const *)s2;
if (s1 == NULL || s2 == NULL) {
return 0;
}
while (size--) {
if (*s1p8 != *s2p8)
if (*s1p8 != *s2p8) {
return (*s1p8 - *s2p8);
}
s1p8++, s2p8++;
}
return 0;
@@ -3991,8 +4019,12 @@ gb_inline void gb_free_all (gbAllocator a)
gb_inline void *gb_resize (gbAllocator a, void *ptr, isize old_size, isize new_size) { return gb_resize_align(a, ptr, old_size, new_size, GB_DEFAULT_MEMORY_ALIGNMENT); }
gb_inline void *gb_resize_align(gbAllocator a, void *ptr, isize old_size, isize new_size, isize alignment) { return a.proc(a.data, gbAllocation_Resize, new_size, alignment, ptr, old_size, GB_DEFAULT_ALLOCATOR_FLAGS); }
gb_inline void *gb_alloc_copy (gbAllocator a, void const *src, isize size) { return gb_memcopy(gb_alloc(a, size), src, size); }
gb_inline void *gb_alloc_copy_align(gbAllocator a, void const *src, isize size, isize alignment) { return gb_memcopy(gb_alloc_align(a, size, alignment), src, size); }
gb_inline void *gb_alloc_copy (gbAllocator a, void const *src, isize size) {
return gb_memcopy(gb_alloc(a, size), src, size);
}
gb_inline void *gb_alloc_copy_align(gbAllocator a, void const *src, isize size, isize alignment) {
return gb_memcopy(gb_alloc_align(a, size, alignment), src, size);
}
gb_inline char *gb_alloc_str(gbAllocator a, char const *str) {
return gb_alloc_str_len(a, str, gb_strlen(str));
@@ -6037,6 +6069,9 @@ gb_inline void gb_str_to_upper(char *str) {
gb_inline isize gb_strlen(char const *str) {
char const *begin = str;
isize const *w;
if (str == NULL) {
return 0;
}
while (cast(uintptr)str % sizeof(usize)) {
if (!*str)
return str - begin;
@@ -7590,10 +7625,14 @@ u64 gb_murmur64_seed(void const *data_, isize len, u64 seed) {
gbFileError gb_file_new(gbFile *f, gbFileDescriptor fd, gbFileOperations ops, char const *filename) {
gbFileError err = gbFileError_None;
isize len = gb_strlen(filename);
// gb_printf_err("gb_file_new: %s\n", filename);
f->ops = ops;
f->fd = fd;
f->filename = gb_alloc_str(gb_heap_allocator(), filename);
f->filename = gb_alloc_array(gb_heap_allocator(), char, len+1);
gb_memcopy(cast(char *)f->filename, cast(char *)filename, len+1);
f->last_write_time = gb_file_last_write_time(f->filename);
return err;

View File

@@ -1592,13 +1592,14 @@ irValue *ir_insert_dynamic_map_key_and_value(irProcedure *proc, irValue *addr, T
irValue *h = ir_gen_map_header(proc, addr, map_type);
irValue *key = ir_gen_map_key(proc, map_key, map_type->Map.key);
irValue *v = ir_emit_conv(proc, map_value, map_type->Map.value);
irValue *ptr = ir_address_from_load_or_generate_local(proc, v);
ptr = ir_emit_conv(proc, ptr, t_rawptr);
irValue *ptr = ir_add_local_generated(proc, ir_type(v));
ir_emit_store(proc, ptr, v);
irValue **args = gb_alloc_array(proc->module->allocator, irValue *, 3);
args[0] = h;
args[1] = key;
args[2] = ptr;
args[2] = ir_emit_conv(proc, ptr, t_rawptr);
return ir_emit_global_call(proc, "__dynamic_map_set", args, 3);
}
@@ -3334,11 +3335,11 @@ bool is_double_pointer(Type *t) {
if (!is_type_pointer(t)) {
return false;
}
t = type_deref(t);
if (t == NULL) {
Type *td = type_deref(t);
if (td == NULL || td == t) {
return false;
}
return is_type_pointer(t);
return is_type_pointer(td);
}
irValue *ir_build_expr(irProcedure *proc, AstNode *expr) {
@@ -3634,7 +3635,8 @@ irValue *ir_build_expr(irProcedure *proc, AstNode *expr) {
return ir_slice_count(proc, v);
} else if (is_type_dynamic_array(t)) {
return ir_dynamic_array_count(proc, v);
} else if (is_type_map(t)) {
} else if (is_type_dynamic_map(t)) {
ir_emit_comment(proc, str_lit("len: map"));
irValue *entries = ir_emit_struct_ev(proc, v, 1);
return ir_dynamic_array_count(proc, entries);
}
@@ -3763,7 +3765,7 @@ irValue *ir_build_expr(irProcedure *proc, AstNode *expr) {
return ir_emit_load(proc, slice);
} else if (is_type_dynamic_map(type)) {
irValue *int_16 = ir_const_int(a, 16);
irValue *cap = v_zero;
irValue *cap = int_16;
if (ce->args.count == 2) {
cap = ir_emit_conv(proc, ir_build_expr(proc, ce->args.e[1]), t_int);
}

View File

@@ -113,6 +113,8 @@ TOKEN_KIND(Token__KeywordBegin, "_KeywordBegin"), \
TOKEN_KIND(Token_push_context, "push_context"), \
TOKEN_KIND(Token_push_allocator, "push_allocator"), \
TOKEN_KIND(Token_asm, "asm"), \
TOKEN_KIND(Token_yield, "yield"), \
TOKEN_KIND(Token_await, "await"), \
TOKEN_KIND(Token__KeywordEnd, "_KeywordEnd"), \
TOKEN_KIND(Token_Count, "")