Begin writing dynamic map procs and fix using bug in IR

This commit is contained in:
Ginger Bill
2017-02-05 23:52:01 +00:00
parent b1562edccf
commit 00c7489157
11 changed files with 630 additions and 320 deletions

View File

@@ -12,9 +12,14 @@
main :: proc() {
Value :: type f32;
m0: map[int]Value;
m1: map[string]Value;
m2: map[f32]Value;
m: map[int]Value;
m[123] = 345.0;
x, ok := m[123];
if ok {
fmt.println(x);
}
// fm: map[128, int]f32;
/*

View File

@@ -352,9 +352,6 @@ Raw_Dynamic_Map :: struct #ordered {
entries: Raw_Dynamic_Array,
};
__default_hash :: proc(data: rawptr, len: int) -> u64 {
return hash.murmur64(data, len);
}
__dynamic_array_reserve :: proc(array_: rawptr, elem_size, elem_align: int, capacity: int) -> bool {
@@ -410,3 +407,134 @@ __dynamic_array_append :: proc(array_: rawptr, elem_size, elem_align: int,
return array.count;
}
__default_hash :: proc(data: []byte) -> u64 {
return hash.murmur64(data);
}
Map_Find_Result :: struct {
hash_index: int,
entry_prev: int,
entry_index: int,
}
Map_Entry_Header :: struct {
hash: u64,
next: int,
/*
key: Key_Type,
value: Value_Type,
*/
}
Map_Header :: struct {
m: ^Raw_Dynamic_Map,
is_string: bool,
entry_size: int,
entry_align: int,
key_size: int,
key_align: int,
key_offset: int,
value_offset: int,
}
__dynamic_map_reserve :: proc(using header: Map_Header, capacity: int) -> bool {
h := __dynamic_array_reserve(^m.hashes, size_of(int), align_of(int), capacity);
e := __dynamic_array_reserve(^m.entries, entry_size, entry_align, capacity);
return h && e;
}
__dynamic_map_rehash :: proc(using header: Map_Header, new_count: int) {
new_header := header;
nm: Raw_Dynamic_Map;
new_header.m = ^nm;
reserve(^nm.hashes, new_count);
nm.hashes.count = nm.hashes.capacity;
__dynamic_array_reserve(^nm.entries, entry_size, entry_align, m.entries.count);
for _, i in nm.hashes {
nm.hashes[i] = -1;
}
for i := 0; i < nm.entries.count; i += 1 {
data := cast(^byte)nm.entries.data + i*entry_size;
entry_header := cast(^Map_Entry_Header)data;
if nm.hashes.count == 0 {
__dynamic_map_grow(new_header);
}
fr := __dynamic_map_find(new_header, entry_header);
j := __dynamic_map_add_entry(new_header, entry_header);
if fr.entry_prev < 0 {
nm.hashes[fr.hash_index] = j;
} else {
e := cast(^byte)nm.entries.data + fr.entry_prev*entry_size;
eh := cast(^Map_Entry_Header)e;
eh.next = j;
}
ndata := cast(^byte)nm.entries.data + j*entry_size;
e := cast(^Map_Entry_Header)ndata;
e.next = fr.entry_index;
mem.copy(ndata+value_offset, data+value_offset, entry_size-value_offset);
if __dynamic_map_full(new_header) {
__dynamic_map_grow(new_header);
}
}
free(header.m);
header.m^ = nm;
}
__dynamic_map_get :: proc(h: Map_Header, entry_header: ^Map_Entry_Header) -> rawptr {
index := __dynamic_map_find(h, entry_header).entry_index;
if index >= 0 {
data := cast(^byte)h.m.entries.data + index*h.entry_size;
return data + h.value_offset;
}
return nil;
}
__dynamic_map_set :: proc(using h: Map_Header, entry_header: ^Map_Entry_Header, value: rawptr) {
if m.hashes.count == 0 {
__dynamic_map_grow(h);
}
index: int;
fr := __dynamic_map_find(h, entry_header);
if fr.entry_index >= 0 {
index = fr.entry_index;
} else {
index = __dynamic_map_add_entry(h, entry_header);
if fr.entry_prev >= 0 {
entry := cast(^Map_Entry_Header)(cast(^byte)m.entries.data + fr.entry_prev*entry_size);
entry.next = index;
} else {
m.hashes[fr.hash_index] = index;
}
}
{
data := cast(^byte)m.entries.data + index*entry_size;
mem.copy(data+value_offset, value, entry_size-value_offset);
}
if __dynamic_map_full(h) {
__dynamic_map_grow(h);
}
}
__dynamic_map_grow :: proc(using header: Map_Header) {
}
__dynamic_map_full :: proc(using header: Map_Header) -> bool {
return false;
}
__dynamic_map_find :: proc(using header: Map_Header, entry_header: ^Map_Entry_Header) -> Map_Find_Result {
return Map_Find_Result{-1, -1, -1};
}
__dynamic_map_add_entry :: proc(using header: Map_Header, entry_header: ^Map_Entry_Header) -> int {
return 0;
}

View File

@@ -1,130 +1,112 @@
crc32 :: proc(data: rawptr, len: int) -> u32 {
crc32 :: proc(data: []byte) -> u32 {
result := ~cast(u32)0;
s := slice_ptr(cast(^u8)data, len);
for i in 0..<len {
b := cast(u32)s[i];
result = result>>8 ~ __CRC32_TABLE[(result ~ b) & 0xff];
for b in data {
result = result>>8 ~ __CRC32_TABLE[(result ~ cast(u32)b) & 0xff];
}
return ~result;
}
crc64 :: proc(data: rawptr, len: int) -> u64 {
crc64 :: proc(data: []byte) -> u64 {
result := ~cast(u64)0;
s := slice_ptr(cast(^u8)data, len);
for i in 0..<len {
b := cast(u64)s[i];
result = result>>8 ~ __CRC64_TABLE[(result ~ b) & 0xff];
for b in data {
result = result>>8 ~ __CRC64_TABLE[(result ~ cast(u64)b) & 0xff];
}
return ~result;
}
fnv32 :: proc(data: rawptr, len: int) -> u32 {
s := slice_ptr(cast(^u8)data, len);
fnv32 :: proc(data: []byte) -> u32 {
h: u32 = 0x811c9dc5;
for i in 0..<len {
h = (h * 0x01000193) ~ cast(u32)s[i];
for b in data {
h = (h * 0x01000193) ~ cast(u32)b;
}
return h;
}
fnv64 :: proc(data: rawptr, len: int) -> u64 {
s := slice_ptr(cast(^u8)data, len);
fnv64 :: proc(data: []byte) -> u64 {
h: u64 = 0xcbf29ce484222325;
for i in 0..<len {
h = (h * 0x100000001b3) ~ cast(u64)s[i];
for b in data {
h = (h * 0x100000001b3) ~ cast(u64)b;
}
return h;
}
fnv32a :: proc(data: rawptr, len: int) -> u32 {
s := slice_ptr(cast(^u8)data, len);
fnv32a :: proc(data: []byte) -> u32 {
h: u32 = 0x811c9dc5;
for i in 0..<len {
h = (h ~ cast(u32)s[i]) * 0x01000193;
for b in data {
h = (h ~ cast(u32)b) * 0x01000193;
}
return h;
}
fnv64a :: proc(data: rawptr, len: int) -> u64 {
s := slice_ptr(cast(^u8)data, len);
fnv64a :: proc(data: []byte) -> u64 {
h: u64 = 0xcbf29ce484222325;
for i in 0..<len {
h = (h ~ cast(u64)s[i]) * 0x100000001b3;
for b in data {
h = (h ~ cast(u64)b) * 0x100000001b3;
}
return h;
}
murmur32 :: proc(data: rawptr, len: int) -> u32 {
compile_assert(ODIN_ENDIAN == "little");
murmur32 :: proc(data: []byte) -> u32 {
c1_32: u32 : 0xcc9e2d51;
c2_32: u32 : 0x1b873593;
h1: u32 = 0;
nblocks := data.count/4;
p := data.data;
p1 := p + 4*nblocks;
for ; p < p1; p += 4 {
k1 := (cast(^u32)p)^;
k1 *= c1_32;
k1 = (k1 << 15) | (k1 >> 17);
k1 *= c2_32;
h1 ~= k1;
h1 = (h1 << 13) | (h1 >> 19);
h1 = h1*5 + 0xe6546b64;
}
tail := data[nblocks*4:];
k1: u32;
match tail.count&3 {
case 3:
k1 ~= cast(u32)tail[2] << 16;
fallthrough;
case 2:
k1 ~= cast(u32)tail[2] << 8;
fallthrough;
case 1:
k1 ~= cast(u32)tail[0];
k1 *= c1_32;
k1 = (k1 << 15) | (k1 >> 17) ;
k1 *= c2_32;
h1 ~= k1;
}
h1 ~= cast(u32)data.count;
h1 ~= h1 >> 16;
h1 *= 0x85ebca6b;
h1 ~= h1 >> 13;
h1 *= 0xc2b2ae35;
h1 ~= h1 >> 16;
return h1;
}
murmur64 :: proc(data: []byte) -> u64 {
SEED :: 0x9747b28c;
key := cast(^u8)data;
h: u32 = SEED;
if len > 3 {
key_x4 := cast(^u32)key;
i := len>>2;
for {
k := key_x4^; key_x4 += 1;
k *= 0xcc9e2d51;
k = (k << 15) | (k >> 17);
k *= 0x1b873593;
h ~= k;
h = (h << 13) | (h >> 19);
h += (h << 2) + 0xe6546b64;
i -= 1;
if i == 0 {
break;
}
}
key = cast(^u8)key_x4;
}
if len&3 != 0 {
i := len&3;
k: u32 = 0;
key += i-1;
for {
k <<= 8;
k |= cast(u32)key^;
key -= 1;
i -= 1;
if i == 0 {
break;
}
}
k *= 0xcc9e2d51;
k = (k << 15) | (k >> 17);
k *= 0x1b873593;
h ~= k;
}
h ~= cast(u32)len;
h ~= h >> 16;
h *= 0x85ebca6b;
h ~= h >> 13;
h *= 0xc2b2ae35;
h ~= h >> 16;
return h;
}
murmur64 :: proc(data_: rawptr, len: int) -> u64 {
SEED :: 0x9747b28c;
when size_of(int) == 8 {
when false && size_of(int) == 8 {
m :: 0xc6a4a7935bd1e995;
r :: 47;
h: u64 = SEED ~ (cast(u64)len * m);
h: u64 = SEED ~ (cast(u64)data.count * m);
data64 := slice_ptr(cast(^u64)^data[0], data.count/size_of(u64));
data := slice_ptr(cast(^u64)data_, len/size_of(u64));
data2 := slice_ptr(cast(^u8)data_, len);
for i in 0 ..< data.count {
k := data[i];
for _, i in data64 {
k := data64[i];
k *= m;
k ~= k>>r;
@@ -134,15 +116,15 @@ murmur64 :: proc(data_: rawptr, len: int) -> u64 {
h *= m;
}
match len & 7 {
case 7: h ~= cast(u64)data2[6] << 48; fallthrough;
case 6: h ~= cast(u64)data2[5] << 40; fallthrough;
case 5: h ~= cast(u64)data2[4] << 32; fallthrough;
case 4: h ~= cast(u64)data2[3] << 24; fallthrough;
case 3: h ~= cast(u64)data2[2] << 16; fallthrough;
case 2: h ~= cast(u64)data2[1] << 8; fallthrough;
match data.count&7 {
case 7: h ~= cast(u64)data[6] << 48; fallthrough;
case 6: h ~= cast(u64)data[5] << 40; fallthrough;
case 5: h ~= cast(u64)data[4] << 32; fallthrough;
case 4: h ~= cast(u64)data[3] << 24; fallthrough;
case 3: h ~= cast(u64)data[2] << 16; fallthrough;
case 2: h ~= cast(u64)data[1] << 8; fallthrough;
case 1:
h ~= cast(u64)data2[0];
h ~= cast(u64)data[0];
h *= m;
}
@@ -155,15 +137,16 @@ murmur64 :: proc(data_: rawptr, len: int) -> u64 {
m :: 0x5bd1e995;
r :: 24;
h1: u32 = cast(u32)SEED ~ cast(u32)len;
h1: u32 = cast(u32)SEED ~ cast(u32)data.count;
h2: u32 = SEED >> 32;
data := slice_ptr(cast(^u32)data_, len/size_of(u32));
data32 := slice_ptr(cast(^u32)^data[0], data.count/size_of(u32));
len := data.count;
i := 0;
for len >= 8 {
k1, k2: u32;
k1 = data[i]; i += 1;
k1 = data32[i]; i += 1;
k1 *= m;
k1 ~= k1>>r;
k1 *= m;
@@ -171,7 +154,7 @@ murmur64 :: proc(data_: rawptr, len: int) -> u64 {
h1 ~= k1;
len -= 4;
k2 = data[i]; i += 1;
k2 = data32[i]; i += 1;
k2 *= m;
k2 ~= k2>>r;
k2 *= m;
@@ -182,7 +165,7 @@ murmur64 :: proc(data_: rawptr, len: int) -> u64 {
if len >= 4 {
k1: u32;
k1 = data[i]; i += 1;
k1 = data32[i]; i += 1;
k1 *= m;
k1 ~= k1>>r;
k1 *= m;
@@ -191,11 +174,14 @@ murmur64 :: proc(data_: rawptr, len: int) -> u64 {
len -= 4;
}
data8 := slice_ptr(cast(^u8)(data.data+i), 3); // NOTE(bill): This is unsafe
data8 := slice_to_bytes(data32[i:])[:3];
match len {
case 3: h2 ~= cast(u32)data8[2] << 16; fallthrough;
case 2: h2 ~= cast(u32)data8[1] << 8; fallthrough;
case 3:
h2 ~= cast(u32)data8[2] << 16;
fallthrough;
case 2:
h2 ~= cast(u32)data8[1] << 8;
fallthrough;
case 1:
h2 ~= cast(u32)data8[0];
h2 *= m;
@@ -216,7 +202,6 @@ murmur64 :: proc(data_: rawptr, len: int) -> u64 {
}
__CRC32_TABLE := [256]u32{
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba,
0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3,

View File

@@ -61,24 +61,9 @@ void check_init_variables(Checker *c, Entity **lhs, isize lhs_count, AstNodeArra
// NOTE(bill): If there is a bad syntax error, rhs > lhs which would mean there would need to be
// an extra allocation
Array(Operand) operands;
ArrayOperand operands = {0};
array_init_reserve(&operands, c->tmp_allocator, 2*lhs_count);
// TODO(bill): Allow for type hints from the entities
for_array(i, inits) {
AstNode *rhs = inits.e[i];
Operand o = {0};
check_multi_expr(c, &o, rhs);
if (o.type == NULL || o.type->kind != Type_Tuple) {
array_add(&operands, o);
} else {
TypeTuple *tuple = &o.type->Tuple;
for (isize j = 0; j < tuple->variable_count; j++) {
o.type = tuple->variables[j]->type;
array_add(&operands, o);
}
}
}
check_unpack_arguments(c, &operands, inits);
isize rhs_count = operands.count;
for_array(i, operands) {

View File

@@ -252,7 +252,6 @@ void check_assignment(Checker *c, Operand *operand, Type *type, String context_n
return;
}
if (is_type_untyped(operand->type)) {
Type *target_type = type;
if (type == NULL || is_type_any(type)) {
@@ -1094,6 +1093,15 @@ i64 check_array_or_map_count(Checker *c, AstNode *e, bool is_map) {
return 0;
}
Type *make_map_tuple_type(gbAllocator a, Type *value) {
Type *t = make_type_tuple(a);
t->Tuple.variables = gb_alloc_array(a, Entity *, 2);
t->Tuple.variable_count = 2;
t->Tuple.variables[0] = make_entity_param(a, NULL, blank_token, value, false, false);
t->Tuple.variables[1] = make_entity_param(a, NULL, blank_token, t_bool, false, false);
return t;
}
void check_map_type(Checker *c, Type *type, AstNode *node) {
GB_ASSERT(type->kind == Type_Map);
ast_node(mt, MapType, node);
@@ -1121,6 +1129,75 @@ void check_map_type(Checker *c, Type *type, AstNode *node) {
type->Map.key = key;
type->Map.value = value;
gbAllocator a = c->allocator;
{
Type *entry_type = make_type_struct(a);
/*
struct {
hash: u64,
next: int,
key: Key_Type,
value: Value_Type,
}
*/
AstNode *dummy_node = gb_alloc_item(a, AstNode);
dummy_node->kind = AstNode_Invalid;
check_open_scope(c, dummy_node);
isize field_count = 4;
Entity **fields = gb_alloc_array(a, Entity *, field_count);
fields[0] = make_entity_field(a, c->context.scope, make_token_ident(str_lit("hash")), t_u64, false, false);
fields[1] = make_entity_field(a, c->context.scope, make_token_ident(str_lit("next")), t_int, false, false);
fields[2] = make_entity_field(a, c->context.scope, make_token_ident(str_lit("key")), key, false, false);
fields[3] = make_entity_field(a, c->context.scope, make_token_ident(str_lit("value")), value, false, false);
check_close_scope(c);
entry_type->Record.fields = fields;
entry_type->Record.fields_in_src_order = fields;
entry_type->Record.field_count = field_count;
type_set_offsets(c->sizes, a, entry_type);
type->Map.entry_type = entry_type;
}
{
Type *generated_struct_type = make_type_struct(a);
/*
struct {
hashes: [dynamic]int,
entries; [dynamic]Entry_Type,
}
*/
AstNode *dummy_node = gb_alloc_item(a, AstNode);
dummy_node->kind = AstNode_Invalid;
check_open_scope(c, dummy_node);
Type *hashes_type = make_type_dynamic_array(a, t_int);
Type *entries_type = make_type_dynamic_array(a, type->Map.entry_type);
isize field_count = 2;
Entity **fields = gb_alloc_array(a, Entity *, field_count);
fields[0] = make_entity_field(a, c->context.scope, make_token_ident(str_lit("hashes")), hashes_type, false, false);
fields[1] = make_entity_field(a, c->context.scope, make_token_ident(str_lit("entries")), entries_type, false, false);
check_close_scope(c);
generated_struct_type->Record.fields = fields;
generated_struct_type->Record.fields_in_src_order = fields;
generated_struct_type->Record.field_count = field_count;
type_set_offsets(c->sizes, a, generated_struct_type);
type->Map.generated_struct_type = generated_struct_type;
}
type->Map.lookup_result_type = make_map_tuple_type(a, value);
// error_node(node, "`map` types are not yet implemented");
}
@@ -3335,14 +3412,26 @@ bool check_builtin_procedure(Checker *c, Operand *operand, AstNode *call, i32 id
operand->mode = Addressing_Value;
} break;
case BuiltinProc_slice_to_bytes: {
// slice_to_bytes :: proc(a: []T) -> []byte
Type *slice_type = base_type(operand->type);
if (!is_type_slice(slice_type)) {
gbString type_str = type_to_string(operand->type);
error_node(call, "Expected a slice type, got `%s`", type_str);
gb_string_free(type_str);
return false;
}
operand->type = t_byte_slice;
operand->mode = Addressing_Value;
} break;
case BuiltinProc_min: {
// min :: proc(a, b: comparable) -> comparable
Type *type = base_type(operand->type);
if (!is_type_comparable(type) || !(is_type_numeric(type) || is_type_string(type))) {
gbString type_str = type_to_string(operand->type);
error_node(call,
"Expected a comparable numeric type to `min`, got `%s`",
type_str);
error_node(call, "Expected a comparable numeric type to `min`, got `%s`", type_str);
gb_string_free(type_str);
return false;
}
@@ -3730,28 +3819,39 @@ int valid_proc_and_score_cmp(void const *a, void const *b) {
return sj < si ? -1 : sj > si;
}
typedef Array(Operand) ArrayOperand;
void check_unpack_arguments(Checker *c, ArrayOperand *operands, AstNodeArray args) {
for_array(i, args) {
Operand o = {0};
check_multi_expr(c, &o, args.e[i]);
if (o.mode == Addressing_MapIndex) {
Type *tuple_type = make_map_tuple_type(c->allocator, o.type);
add_type_and_value(&c->info, o.expr, o.mode, tuple_type, (ExactValue){0});
o.type = tuple_type;
}
if (o.type == NULL || o.type->kind != Type_Tuple) {
array_add(operands, o);
} else {
TypeTuple *tuple = &o.type->Tuple;
for (isize j = 0; j < tuple->variable_count; j++) {
o.type = tuple->variables[j]->type;
array_add(operands, o);
}
}
}
}
Type *check_call_arguments(Checker *c, Operand *operand, Type *proc_type, AstNode *call) {
GB_ASSERT(call->kind == AstNode_CallExpr);
ast_node(ce, CallExpr, call);
Array(Operand) operands;
ArrayOperand operands;
array_init_reserve(&operands, heap_allocator(), 2*ce->args.count);
for_array(i, ce->args) {
Operand o = {0};
check_multi_expr(c, &o, ce->args.e[i]);
if (o.type == NULL || o.type->kind != Type_Tuple) {
array_add(&operands, o);
} else {
TypeTuple *tuple = &o.type->Tuple;
for (isize j = 0; j < tuple->variable_count; j++) {
o.type = tuple->variables[j]->type;
array_add(&operands, o);
}
}
}
check_unpack_arguments(c, &operands, ce->args);
if (operand->mode == Addressing_Overload) {
GB_ASSERT(operand->overload_entities != NULL &&
@@ -4886,6 +4986,19 @@ ExprKind check__expr_base(Checker *c, Operand *o, AstNode *node, Type *type_hint
Type *t = base_type(type_deref(o->type));
bool is_const = o->mode == Addressing_Constant;
if (is_type_map(t)) {
Operand key = {0};
check_expr(c, &key, ie->index);
check_assignment(c, &key, t->Map.key, str_lit("map index"));
if (key.mode == Addressing_Invalid) {
goto error;
}
o->mode = Addressing_MapIndex;
o->type = t->Map.value;
o->expr = node;
return Expr_Expr;
}
i64 max_count = -1;
bool valid = check_set_index_data(o, t, &max_count);

View File

@@ -261,12 +261,20 @@ Type *check_assignment_variable(Checker *c, Operand *op_a, AstNode *lhs) {
return NULL;
case Addressing_Variable:
break;
case Addressing_MapIndex:
break;
default: {
if (op_b.expr->kind == AstNode_SelectorExpr) {
// NOTE(bill): Extra error checks
Operand op_c = {Addressing_Invalid};
ast_node(se, SelectorExpr, op_b.expr);
check_expr(c, &op_c, se->expr);
if (op_c.mode == Addressing_MapIndex) {
gbString str = expr_to_string(op_b.expr);
error_node(op_b.expr, "Cannot assign to record field `%s` in map", str);
gb_string_free(str);
return NULL;
}
}
gbString str = expr_to_string(op_b.expr);

View File

@@ -54,6 +54,7 @@ typedef enum BuiltinProcId {
// BuiltinProc_ptr_offset,
// BuiltinProc_ptr_sub,
BuiltinProc_slice_ptr,
BuiltinProc_slice_to_bytes,
BuiltinProc_min,
BuiltinProc_max,
@@ -96,6 +97,7 @@ gb_global BuiltinProc builtin_procs[BuiltinProc_Count] = {
// {STR_LIT("ptr_offset"), 2, false, Expr_Expr},
// {STR_LIT("ptr_sub"), 2, false, Expr_Expr},
{STR_LIT("slice_ptr"), 2, false, Expr_Expr},
{STR_LIT("slice_to_bytes"), 1, false, Expr_Stmt},
{STR_LIT("min"), 2, false, Expr_Expr},
{STR_LIT("max"), 2, false, Expr_Expr},
@@ -115,6 +117,7 @@ typedef enum AddressingMode {
Addressing_Type,
Addressing_Builtin,
Addressing_Overload,
Addressing_MapIndex,
Addressing_Count,
} AddressingMode;
@@ -631,6 +634,7 @@ void init_universal_scope(BuildContext *bc) {
t_int_ptr = make_type_pointer(a, t_int);
t_i64_ptr = make_type_pointer(a, t_i64);
t_f64_ptr = make_type_pointer(a, t_f64);
t_byte_slice = make_type_slice(a, t_byte);
}

399
src/ir.c

File diff suppressed because it is too large Load Diff

View File

@@ -258,7 +258,7 @@ void ir_opt_blocks(irProcedure *proc) {
if (b == NULL) {
continue;
}
GB_ASSERT(b->index == i);
GB_ASSERT_MSG(b->index == i, "%d, %td", b->index, i);
if (ir_opt_block_fusion(proc, b)) {
changed = true;
@@ -467,7 +467,7 @@ void ir_opt_tree(irGen *s) {
}
ir_opt_blocks(proc);
#if 1
#if 0
ir_opt_build_referrers(proc);
ir_opt_build_dom_tree(proc);

View File

@@ -294,15 +294,8 @@ void ir_print_type(irFileBuffer *f, irModule *m, Type *t) {
} return;
case Type_Map: {
if (t->Map.count > 0) {
// ir_fprintf(f, "void");
} else {
ir_fprintf(f, "{");
ir_print_type(f, m, t_raw_dynamic_array);
ir_fprintf(f, ", ");
ir_print_type(f, m, t_raw_dynamic_array);
ir_fprintf(f, "}");
}
GB_ASSERT(t->Map.generated_struct_type != NULL);
ir_print_type(f, m, t->Map.generated_struct_type);
} break;
}
}

View File

@@ -128,6 +128,9 @@ typedef struct TypeRecord {
i64 count; /* 0 if dynamic */ \
Type *key; \
Type *value; \
Type *entry_type; \
Type *generated_struct_type; \
Type *lookup_result_type; \
}) \
@@ -269,6 +272,7 @@ gb_global Type *t_u8_ptr = NULL;
gb_global Type *t_int_ptr = NULL;
gb_global Type *t_i64_ptr = NULL;
gb_global Type *t_f64_ptr = NULL;
gb_global Type *t_byte_slice = NULL;
gb_global Type *t_type_info = NULL;