Work on improving bounds checking with constant known indices and lb_add_local_generated

This commit is contained in:
gingerBill
2026-03-16 14:06:23 +00:00
parent 93852df29e
commit 0bf2d01a04
2 changed files with 70 additions and 1 deletions

View File

@@ -2898,10 +2898,13 @@ gb_internal lbValue lb_emit_c_vararg(lbProcedure *p, lbValue arg, Type *type) {
gb_internal lbValue lb_compare_records(lbProcedure *p, TokenKind op_kind, lbValue left, lbValue right, Type *type) {
GB_ASSERT((is_type_struct(type) || is_type_soa_pointer(type) || is_type_union(type)) && is_type_comparable(type));
lbValue left_ptr = lb_address_from_load_or_generate_local(p, left);
lbValue right_ptr = lb_address_from_load_or_generate_local(p, right);
i64 size = type_size_of(type);
lbValue res = {};
if (type_size_of(type) == 0) {
if (size == 0) {
switch (op_kind) {
case Token_CmpEq:
return lb_const_bool(p->module, t_bool, true);
@@ -2910,8 +2913,23 @@ gb_internal lbValue lb_compare_records(lbProcedure *p, TokenKind op_kind, lbValu
}
GB_PANIC("invalid operator");
}
TEMPORARY_ALLOCATOR_GUARD();
if (is_type_simple_compare(type)) {
// if (size <= 8) {
// LLVMTypeRef int_type = LLVMIntTypeInContext(p->module->ctx, cast(unsigned)(size*8));
// LLVMValueRef l = OdinLLVMBuildLoad(p, int_type, LLVMBuildPointerCast(p->builder, left_ptr.value, LLVMPointerType(int_type, 0), ""));
// LLVMValueRef r = OdinLLVMBuildLoad(p, int_type, LLVMBuildPointerCast(p->builder, right_ptr.value, LLVMPointerType(int_type, 0), ""));
// LLVMIntPredicate pred = (op_kind == Token_CmpEq) ? LLVMIntEQ : LLVMIntNE;
// LLVMValueRef cmp = LLVMBuildICmp(p->builder, pred, l, r, "");
// res.value = cmp;
// res.type = t_bool;
// return res;
// }
// TODO(bill): Test to see if this is actually faster!!!!
auto args = array_make<lbValue>(temporary_allocator(), 3);
args[0] = lb_emit_conv(p, left_ptr, t_rawptr);
@@ -2925,6 +2943,7 @@ gb_internal lbValue lb_compare_records(lbProcedure *p, TokenKind op_kind, lbValu
args[1] = lb_emit_conv(p, right_ptr, t_rawptr);
res = lb_emit_call(p, value, args);
}
if (op_kind == Token_NotEq) {
res = lb_emit_unary_arith(p, Token_Not, res, res.type);
}

View File

@@ -709,6 +709,33 @@ gb_internal void lb_emit_bounds_check(lbProcedure *p, Token token, lbValue index
return;
}
if (LLVMIsConstant(index.value) && LLVMIsConstant(len.value)) {
i64 i = LLVMConstIntGetSExtValue(index.value);
i64 n = LLVMConstIntGetSExtValue(len.value);
if (0<= i && i < n) {
return;
}
}
if (LLVMIsAInstruction(index.value)) {
LLVMOpcode op = LLVMGetInstructionOpcode(index.value);
if (op == LLVMURem) {
LLVMValueRef divisor = LLVMGetOperand(index.value, 1);
if (divisor == len.value) {
return;
}
} else if (op == LLVMAnd) {
LLVMValueRef mask = LLVMGetOperand(index.value, 1);
if (LLVMIsConstant(mask) && LLVMIsConstant(len.value)) {
i64 m = LLVMConstIntGetSExtValue(mask);
i64 l = LLVMConstIntGetSExtValue(len.value);
if (l > 0 && (l & (l-1)) == 0 && m == l-1) {
return;
}
}
}
}
TEMPORARY_ALLOCATOR_GUARD();
index = lb_emit_conv(p, index, t_int);
@@ -3567,7 +3594,30 @@ gb_internal lbAddr lb_add_local(lbProcedure *p, Type *type, Entity *e, bool zero
}
gb_internal lbAddr lb_add_local_generated(lbProcedure *p, Type *type, bool zero_init) {
#if 0
return lb_add_local(p, type, nullptr, zero_init);
#else
LLVMTypeRef llvm_type = lb_type(p->module, type);
unsigned alignment = cast(unsigned)gb_max(type_align_of(type), lb_alignof(llvm_type));
if (is_type_matrix(type)) {
alignment *= 2; // NOTE(bill): Just in case
}
// This positions in the entry block, emits alloca, then restores position.
LLVMValueRef ptr = llvm_alloca(p, llvm_type, alignment, "");
if (zero_init) {
// Emit the zero-init store at the current position (not entry block)
// so it respects control flow. But the alloca itself is in entry.
lb_mem_zero_ptr(p, ptr, type, alignment);
}
lbValue val = {};
val.value = ptr;
val.type = alloc_type_pointer(type);
return lb_addr(val);
#endif
}
gb_internal lbAddr lb_add_local_generated_temp(lbProcedure *p, Type *type, i64 min_alignment) {