Copy lvalues on multi-valued assignments to allow for a, b = b, a on large types (minor bodge)

This commit is contained in:
gingerBill
2023-07-21 13:05:39 +01:00
parent 47b924990f
commit ec0a9a5f8a
3 changed files with 41 additions and 15 deletions

View File

@@ -196,6 +196,11 @@ typedef union TB_DataType {
typedef enum TB_NodeTypeEnum {
TB_NULL = 0,
// Immediates
TB_INTEGER_CONST,
TB_FLOAT32_CONST,
TB_FLOAT64_CONST,
// only one per function
TB_START, // fn()
@@ -253,11 +258,6 @@ typedef enum TB_NodeTypeEnum {
TB_MEMBER_ACCESS,
TB_ARRAY_ACCESS,
// Immediates
TB_INTEGER_CONST,
TB_FLOAT32_CONST,
TB_FLOAT64_CONST,
// Conversions
TB_TRUNCATE,
TB_FLOAT_EXT,
@@ -295,6 +295,8 @@ typedef enum TB_NodeTypeEnum {
TB_SHL,
TB_SHR,
TB_SAR,
TB_ROL,
TB_ROR,
TB_UDIV,
TB_SDIV,
TB_UMOD,
@@ -336,9 +338,6 @@ typedef enum TB_NodeTypeEnum {
} TB_NodeTypeEnum;
typedef uint8_t TB_NodeType;
#define TB_IS_NODE_SIDE_EFFECT(type) ((type) >= TB_LINE_INFO && (type) <= TB_DEBUGBREAK)
#define TB_IS_NODE_TERMINATOR(type) ((type) >= TB_BRANCH && (type) <= TB_TRAP)
typedef int TB_Label;
// just represents some region of bytes, usually in file parsing crap
@@ -980,6 +979,8 @@ TB_API TB_Node* tb_inst_xor(TB_Function* f, TB_Node* a, TB_Node* b);
TB_API TB_Node* tb_inst_sar(TB_Function* f, TB_Node* a, TB_Node* b);
TB_API TB_Node* tb_inst_shl(TB_Function* f, TB_Node* a, TB_Node* b, TB_ArithmeticBehavior arith_behavior);
TB_API TB_Node* tb_inst_shr(TB_Function* f, TB_Node* a, TB_Node* b);
TB_API TB_Node* tb_inst_rol(TB_Function* f, TB_Node* a, TB_Node* b);
TB_API TB_Node* tb_inst_ror(TB_Function* f, TB_Node* a, TB_Node* b);
// Atomics
// By default you can use TB_MEM_ORDER_SEQ_CST for the memory order to get
@@ -1052,10 +1053,7 @@ TB_API void tb_inst_ret(TB_Function* f, size_t count, TB_Node** values);
////////////////////////////////
// Optimizer
////////////////////////////////
// Function-level optimizations are managed via TB_FuncOpt, it's tied
// to a single TB_Function and it'll can be used to run peepholes incrementally
// between whatever passes TB may have.
// Function analysis, optimizations, and codegen are all part of this
typedef struct TB_FuncOpt TB_FuncOpt;
// the arena is used to allocate the nodes

Binary file not shown.

View File

@@ -850,11 +850,39 @@ gb_internal void cg_build_assignment(cgProcedure *p, Array<cgAddr> const &lvals,
p->in_multi_assignment = lval_count > 1;
GB_ASSERT(lvals.count == inits.count);
for_array(i, inits) {
if (inits.count > 1) for_array(i, inits) {
cgAddr lval = lvals[i];
cgValue init = inits[i];
cgValue init = cg_flatten_value(p, inits[i]);
GB_ASSERT(init.kind != cgValue_Multi);
if (init.type == nullptr) {
continue;
}
Type *type = cg_addr_type(lval);
GB_ASSERT(are_types_identical(type, init.type));
if (init.kind == cgValue_Addr &&
!cg_addr_is_empty(lval)) {
// NOTE(bill): This is needed for certain constructs such as this:
// a, b = b, a
// NOTE(bill): This is a bodge and not necessarily a good way of doing things whatsoever
TB_CharUnits size = cast(TB_CharUnits)type_size_of(type);
TB_CharUnits align = cast(TB_CharUnits)type_align_of(type);
TB_Node *copy = tb_inst_local(p->func, size, align);
tb_inst_memcpy(p->func, copy, init.node, tb_inst_uint(p->func, TB_TYPE_INT, size), align, false);
// use the copy instead
init.node = copy;
}
inits[i] = init;
}
for_array(i, inits) {
cgAddr lval = lvals[i];
cgValue init = inits[i];
GB_ASSERT(init.kind != cgValue_Multi);
if (init.type == nullptr) {
// TODO(bill): figure out how to do this
continue;
}
cg_addr_store(p, lval, init);