diff --git a/src/array.cpp b/src/array.cpp index a2c802168..8ac025f39 100644 --- a/src/array.cpp +++ b/src/array.cpp @@ -333,6 +333,7 @@ void array_set_capacity(Array *array, isize capacity) { if (new_data == nullptr) { if (capacity > 0) { new_data = gb_alloc_array(array->allocator, T, capacity); + GB_ASSERT(new_data != nullptr); gb_memmove(new_data, array->data, gb_size_of(T) * array->capacity); } gb_free(array->allocator, array->data); diff --git a/src/llvm_backend.cpp b/src/llvm_backend.cpp index 1adf620e8..2267907a1 100644 --- a/src/llvm_backend.cpp +++ b/src/llvm_backend.cpp @@ -523,7 +523,7 @@ lbValue lb_const_hash(lbModule *m, lbValue key, Type *key_type) { lbValue hashed_key = {}; - +#if 0 if (lb_is_const(key)) { u64 hash = 0xcbf29ce484222325; if (is_type_cstring(key_type)) { @@ -545,7 +545,7 @@ lbValue lb_const_hash(lbModule *m, lbValue key, Type *key_type) { size_t ulength = 0; text = LLVMGetAsString(data, &ulength); - gb_printf_err("%td %td %s\n", length, ulength, text); + gb_printf_err("%lld %llu %s\n", length, ulength, text); length = gb_min(length, cast(i64)ulength); } hash = fnv64a(text, cast(isize)length); @@ -559,7 +559,7 @@ lbValue lb_const_hash(lbModule *m, lbValue key, Type *key_type) { } hashed_key = lb_const_int(m, t_uintptr, hash); } - +#endif return hashed_key; } diff --git a/src/queue.cpp b/src/queue.cpp index a9bbbeee7..d69a2845c 100644 --- a/src/queue.cpp +++ b/src/queue.cpp @@ -1,27 +1,29 @@ #define MPMC_CACHE_LINE_SIZE 64 +typedef std::atomic MPMCQueueAtomicIdx; + // Multiple Producer Multiple Consumer Queue template struct MPMCQueue { - static size_t const PAD0_OFFSET = (sizeof(T *) + sizeof(std::atomic *) + sizeof(gbAllocator) + sizeof(BlockingMutex) + sizeof(i32) + sizeof(i32)); + static size_t const PAD0_OFFSET = (sizeof(T *) + sizeof(MPMCQueueAtomicIdx *) + sizeof(gbAllocator) + sizeof(BlockingMutex) + sizeof(i32) + sizeof(i32)); - T * nodes; - std::atomic *indices; - gbAllocator allocator; - BlockingMutex mutex; - std::atomic count; - i32 mask; + T * nodes; + MPMCQueueAtomicIdx *indices; + gbAllocator allocator; + BlockingMutex mutex; + MPMCQueueAtomicIdx count; + i32 mask; // capacity-1, because capacity must be a power of 2 char pad0[(MPMC_CACHE_LINE_SIZE*2 - PAD0_OFFSET) % MPMC_CACHE_LINE_SIZE]; - std::atomic head_idx; + MPMCQueueAtomicIdx head_idx; char pad1[MPMC_CACHE_LINE_SIZE - sizeof(i32)]; - std::atomic tail_idx; + MPMCQueueAtomicIdx tail_idx; }; -void mpmc_internal_init_indices(std::atomic *indices, i32 offset, i32 size) { +void mpmc_internal_init_indices(MPMCQueueAtomicIdx *indices, i32 offset, i32 size) { GB_ASSERT(offset % 8 == 0); GB_ASSERT(size % 8 == 0); @@ -54,7 +56,7 @@ void mpmc_init(MPMCQueue *q, gbAllocator a, isize size_i) { q->mask = size-1; q->allocator = a; q->nodes = gb_alloc_array(a, T, size); - q->indices = cast(std::atomic *)gb_alloc_array(a, i32, size); + q->indices = gb_alloc_array(a, MPMCQueueAtomicIdx, size); mpmc_internal_init_indices(q->indices, 0, q->mask+1); }