diff --git a/core/container/priority_queue/priority_queue.odin b/core/container/priority_queue/priority_queue.odin index e324287f3..0c5c4931d 100644 --- a/core/container/priority_queue/priority_queue.odin +++ b/core/container/priority_queue/priority_queue.odin @@ -85,7 +85,6 @@ _shift_down :: proc(pq: ^$Q/Priority_Queue($T), i0, n: int) -> bool { _shift_up :: proc(pq: ^$Q/Priority_Queue($T), j: int) { j := j queue := pq.queue[:] - n := builtin.len(queue) for 0 <= j { i := (j-1)/2 if i == j || !pq.less(queue[j], queue[i]) { diff --git a/core/intrinsics/intrinsics.odin b/core/intrinsics/intrinsics.odin index 22b5d953d..0b682fbc3 100644 --- a/core/intrinsics/intrinsics.odin +++ b/core/intrinsics/intrinsics.odin @@ -295,6 +295,9 @@ objc_register_selector :: proc($name: string) -> objc_SEL --- objc_find_class :: proc($name: string) -> objc_Class --- objc_register_class :: proc($name: string) -> objc_Class --- + +valgrind_client_request :: proc(default: uintptr, request: uintptr, a0, a1, a2, a3, a4: uintptr) -> uintptr --- + // Internal compiler use only __entry_point :: proc() --- \ No newline at end of file diff --git a/core/reflect/types.odin b/core/reflect/types.odin index edd4f7a26..f53b18e0d 100644 --- a/core/reflect/types.odin +++ b/core/reflect/types.odin @@ -302,6 +302,11 @@ is_dynamic_map :: proc(info: ^Type_Info) -> bool { _, ok := type_info_base(info).variant.(Type_Info_Map) return ok } +is_bit_set :: proc(info: ^Type_Info) -> bool { + if info == nil { return false } + _, ok := type_info_base(info).variant.(Type_Info_Bit_Set) + return ok +} is_slice :: proc(info: ^Type_Info) -> bool { if info == nil { return false } _, ok := type_info_base(info).variant.(Type_Info_Slice) diff --git a/core/sys/valgrind/callgrind.odin b/core/sys/valgrind/callgrind.odin new file mode 100644 index 000000000..1396f82ad --- /dev/null +++ b/core/sys/valgrind/callgrind.odin @@ -0,0 +1,63 @@ +//+build amd64 +package sys_valgrind + +import "core:intrinsics" + +Callgrind_Client_Request :: enum uintptr { + Dump_Stats = 'C'<<24 | 'T'<<16, + Zero_Stats, + Toggle_Collect, + Dump_Stats_At, + Start_Instrumentation, + Stop_Instrumentation, +} + +@(require_results) +callgrind_client_request_expr :: proc "c" (default: uintptr, request: Callgrind_Client_Request, a0, a1, a2, a3, a4: uintptr) -> uintptr { + return intrinsics.valgrind_client_request(default, uintptr(request), a0, a1, a2, a3, a4) +} +callgrind_client_request_stmt :: proc "c" (request: Callgrind_Client_Request, a0, a1, a2, a3, a4: uintptr) { + _ = intrinsics.valgrind_client_request(0, uintptr(request), a0, a1, a2, a3, a4) +} + +// Dump current state of cost centres, and zero them afterwards. +dump_stats :: proc "c" () { + callgrind_client_request_stmt(.Dump_Stats, 0, 0, 0, 0, 0) +} + +// Zero cost centres +zero_stats :: proc "c" () { + callgrind_client_request_stmt(.Zero_Stats, 0, 0, 0, 0, 0) +} + +// Toggles collection state. +// The collection state specifies whether the happening of events should be noted or +// if they are to be ignored. Events are noted by increment of counters in a cost centre. +toggle_collect :: proc "c" () { + callgrind_client_request_stmt(.Toggle_Collect, 0, 0, 0, 0, 0) +} + +// Dump current state of cost centres, and zero them afterwards. +// The argument is appended to a string stating the reason which triggered +// the dump. This string is written as a description field into the +// profile data dump. +dump_stats_at :: proc "c" (pos_str: rawptr) { + callgrind_client_request_stmt(.Dump_Stats_At, uintptr(pos_str), 0, 0, 0, 0) +} + +// Start full callgrind instrumentation if not already switched on. +// When cache simulation is done, it will flush the simulated cache; +// this will lead to an artificial cache warmup phase afterwards with +// cache misses which would not have happened in reality. +start_instrumentation :: proc "c" () { + callgrind_client_request_stmt(.Start_Instrumentation, 0, 0, 0, 0, 0) +} + +// Stop full callgrind instrumentation if not already switched off. +// This flushes Valgrinds translation cache, and does no additional instrumentation +// afterwards, which effectivly will run at the same speed as the "none" tool (ie. at minimal slowdown). +// Use this to bypass Callgrind aggregation for uninteresting code parts. +// To start Callgrind in this mode to ignore the setup phase, use the option "--instr-atstart=no". +stop_instrumentation :: proc "c" () { + callgrind_client_request_stmt(.Stop_Instrumentation, 0, 0, 0, 0, 0) +} \ No newline at end of file diff --git a/core/sys/valgrind/memcheck.odin b/core/sys/valgrind/memcheck.odin new file mode 100644 index 000000000..99c65272a --- /dev/null +++ b/core/sys/valgrind/memcheck.odin @@ -0,0 +1,169 @@ +//+build amd64 +package sys_valgrind + +import "core:intrinsics" + +Mem_Check_Client_Request :: enum uintptr { + Make_Mem_No_Access = 'M'<<24 | 'C'<<16, + Make_Mem_Undefined, + Make_Mem_Defined, + Discard, + Check_Mem_Is_Addressable, + Check_Mem_Is_Defined, + Do_Leak_Check, + Count_Leaks, + Get_Vbits, + Set_Vbits, + Create_Block, + Make_Mem_Defined_If_Addressable, + Count_Leak_Blocks, + Enable_Addr_Error_Reporting_In_Range, + Disable_Addr_Error_Reporting_In_Range, +} + +@(require_results) +mem_check_client_request_expr :: proc "c" (default: uintptr, request: Mem_Check_Client_Request, a0, a1, a2, a3, a4: uintptr) -> uintptr { + return intrinsics.valgrind_client_request(default, uintptr(request), a0, a1, a2, a3, a4) +} +mem_check_client_request_stmt :: proc "c" (request: Mem_Check_Client_Request, a0, a1, a2, a3, a4: uintptr) { + _ = intrinsics.valgrind_client_request(0, uintptr(request), a0, a1, a2, a3, a4) +} + +// Mark memory at `raw_data(qzz)` as unaddressable for `len(qzz)` bytes. +// Returns true when run on Valgrind and false otherwise. +make_mem_no_access :: proc "c" (qzz: []byte) -> bool { + return 0 != mem_check_client_request_expr(0, .Make_Mem_No_Access, uintptr(raw_data(qzz)), uintptr(len(qzz)), 0, 0, 0) +} +// Mark memory at `raw_data(qzz)` as addressable but undefined for `len(qzz)` bytes. +// Returns true when run on Valgrind and false otherwise. +make_mem_undefined :: proc "c" (qzz: []byte) -> bool { + return 0 != mem_check_client_request_expr(0, .Make_Mem_Undefined, uintptr(raw_data(qzz)), uintptr(len(qzz)), 0, 0, 0) +} +// Mark memory at `raw_data(qzz)` as addressable for `len(qzz)` bytes. +// Returns true when run on Valgrind and false otherwise. +make_mem_defined :: proc "c" (qzz: []byte) -> bool { + return 0 != mem_check_client_request_expr(0, .Make_Mem_Defined, uintptr(raw_data(qzz)), uintptr(len(qzz)), 0, 0, 0) +} + +// Check that memory at `raw_data(qzz)` is addressable for `len(qzz)` bytes. +// If suitable addressibility is not established, Valgrind prints an error +// message and returns the address of the first offending byte. +// Otherwise it returns zero. +check_mem_is_addressable :: proc "c" (qzz: []byte) -> uintptr { + return mem_check_client_request_expr(0, .Check_Mem_Is_Addressable, uintptr(raw_data(qzz)), uintptr(len(qzz)), 0, 0, 0) +} +// Check that memory at `raw_data(qzz)` is addressable and defined for `len(qzz)` bytes. +// If suitable addressibility and definedness are not established, +// Valgrind prints an error message and returns the address of the first +// offending byte. Otherwise it returns zero. +check_mem_is_defined :: proc "c" (qzz: []byte) -> uintptr { + return mem_check_client_request_expr(0, .Check_Mem_Is_Defined, uintptr(raw_data(qzz)), uintptr(len(qzz)), 0, 0, 0) +} + +// Similar to `make_mem_defined(qzz)` except that addressability is not altered: +// bytes which are addressable are marked as defined, but those which +// are not addressable are left unchanged. +// Returns true when run on Valgrind and false otherwise. +make_mem_defined_if_addressable :: proc "c" (qzz: []byte) -> bool { + return 0 != mem_check_client_request_expr(0, .Make_Mem_Defined_If_Addressable, uintptr(raw_data(qzz)), uintptr(len(qzz)), 0, 0, 0) +} + +// Create a block-description handle. +// The description is an ascii string which is included in any messages +// pertaining to addresses within the specified memory range. +// Has no other effect on the properties of the memory range. +create_block :: proc "c" (qzz: []u8, desc: cstring) -> bool { + return 0 != mem_check_client_request_expr(0, .Create_Block, uintptr(raw_data(qzz)), uintptr(len(qzz)), uintptr(rawptr(desc)), 0, 0) +} +// Discard a block-description-handle. Returns true for an invalid handle, false for a valid handle. +discard :: proc "c" (blk_index: uintptr) -> bool { + return 0 != mem_check_client_request_expr(0, .Discard, 0, blk_index, 0, 0, 0) +} + + +// Do a full memory leak check (like `--leak-check=full`) mid-execution. +leak_check :: proc "c" () { + mem_check_client_request_stmt(.Do_Leak_Check, 0, 0, 0, 0, 0) +} +// Same as `leak_check()` but only showing the entries for which there was an increase +// in leaked bytes or leaked nr of blocks since the previous leak search. +added_leak_check :: proc "c" () { + mem_check_client_request_stmt(.Do_Leak_Check, 0, 1, 0, 0, 0) +} +// Same as `added_leak_check()` but showing entries with increased or decreased +// leaked bytes/blocks since previous leak search. +changed_leak_check :: proc "c" () { + mem_check_client_request_stmt(.Do_Leak_Check, 0, 2, 0, 0, 0) +} +// Do a summary memory leak check (like `--leak-check=summary`) mid-execution. +quick_leak_check :: proc "c" () { + mem_check_client_request_stmt(.Do_Leak_Check, 1, 0, 0, 0, 0) +} + +Count_Result :: struct { + leaked: uint, + dubious: uint, + reachable: uint, + suppressed: uint, +} + +count_leaks :: proc "c" () -> (res: Count_Result) { + mem_check_client_request_stmt( + .Count_Leaks, + uintptr(&res.leaked), + uintptr(&res.dubious), + uintptr(&res.reachable), + uintptr(&res.suppressed), + 0, + ) + return +} + +count_leak_blocks :: proc "c" () -> (res: Count_Result) { + mem_check_client_request_stmt( + .Count_Leak_Blocks, + uintptr(&res.leaked), + uintptr(&res.dubious), + uintptr(&res.reachable), + uintptr(&res.suppressed), + 0, + ) + return +} + +// Get the validity data for addresses zza and copy it +// into the provided zzvbits array. Return values: +// 0 - if not running on valgrind +// 1 - success +// 2 - [previously indicated unaligned arrays; these are now allowed] +// 3 - if any parts of zzsrc/zzvbits are not addressable. +// The metadata is not copied in cases 0, 2 or 3 so it should be +// impossible to segfault your system by using this call. +get_vbits :: proc(zza, zzvbits: []byte) -> u8 { + // assert requires a `context` thus these procedures cannot `proc "c"` + assert(len(zzvbits) >= len(zza)/8) + return u8(mem_check_client_request_expr(0, .Get_Vbits, uintptr(raw_data(zza)), uintptr(raw_data(zzvbits)), uintptr(len(zza)), 0, 0)) +} + +// Set the validity data for addresses zza, copying it +// from the provided zzvbits array. Return values: +// 0 - if not running on valgrind +// 1 - success +// 2 - [previously indicated unaligned arrays; these are now allowed] +// 3 - if any parts of zza/zzvbits are not addressable. +// The metadata is not copied in cases 0, 2 or 3 so it should be +// impossible to segfault your system by using this call. +set_vbits :: proc(zzvbits, zza: []byte) -> u8 { + // assert requires a `context` thus these procedures cannot `proc "c"` + assert(len(zzvbits) >= len(zza)/8) + return u8(mem_check_client_request_expr(0, .Set_Vbits, uintptr(raw_data(zza)), uintptr(raw_data(zzvbits)), uintptr(len(zza)), 0, 0)) +} + +// (Re-)enable reporting of addressing errors in the specified address range. +enable_addr_error_reporting_in_range :: proc "c" (qzz: []byte) -> uintptr { + return mem_check_client_request_expr(0, .Enable_Addr_Error_Reporting_In_Range, uintptr(raw_data(qzz)), uintptr(len(qzz)), 0, 0, 0) +} +// Disable reporting of addressing errors in the specified address range. +disable_addr_error_reporting_in_range :: proc "c" (qzz: []byte) -> uintptr { + return mem_check_client_request_expr(0, .Disable_Addr_Error_Reporting_In_Range, uintptr(raw_data(qzz)), uintptr(len(qzz)), 0, 0, 0) +} \ No newline at end of file diff --git a/core/sys/valgrind/valgrind.odin b/core/sys/valgrind/valgrind.odin new file mode 100644 index 000000000..2fb95588f --- /dev/null +++ b/core/sys/valgrind/valgrind.odin @@ -0,0 +1,182 @@ +//+build amd64 +package sys_valgrind + +import "core:intrinsics" + +Client_Request :: enum uintptr { + Running_On_Valgrind = 4097, + Discard_Translations = 4098, + Client_Call0 = 4353, + Client_Call1 = 4354, + Client_Call2 = 4355, + Client_Call3 = 4356, + Count_Errors = 4609, + Gdb_Monitor_Command = 4610, + Malloc_Like_Block = 4865, + Resize_Inplace_Block = 4875, + Free_Like_Block = 4866, + Create_Mem_Pool = 4867, + Destroy_Mem_Pool = 4868, + Mem_Pool_Alloc = 4869, + Mem_Pool_Free = 4870, + Mem_Pool_Trim = 4871, + Move_Mem_Pool = 4872, + Mem_Pool_Change = 4873, + Mem_Pool_Exists = 4874, + Printf = 5121, + Printf_Backtrace = 5122, + Printf_Valist_By_Ref = 5123, + Printf_Backtrace_Valist_By_Ref = 5124, + Stack_Register = 5377, + Stack_Deregister = 5378, + Stack_Change = 5379, + Load_Pdb_Debug_Info = 5633, + Map_Ip_To_Src_Loc = 5889, + Change_Err_Disablement = 6145, + Vex_Init_For_Iri = 6401, + Inner_Threads = 6402, +} + +@(require_results) +client_request_expr :: proc "c" (default: uintptr, request: Client_Request, a0, a1, a2, a3, a4: uintptr) -> uintptr { + return intrinsics.valgrind_client_request(default, uintptr(request), a0, a1, a2, a3, a4) +} +client_request_stmt :: proc "c" (request: Client_Request, a0, a1, a2, a3, a4: uintptr) { + _ = intrinsics.valgrind_client_request(0, uintptr(request), a0, a1, a2, a3, a4) +} + +// Returns the number of Valgrinds this code is running under +// 0 - running natively +// 1 - running under Valgrind +// 2 - running under Valgrind which is running under another Valgrind +running_on_valgrind :: proc "c" () -> uintptr { + return client_request_expr(0, .Running_On_Valgrind, 0, 0, 0, 0, 0) +} + +// Discard translation of code in the slice qzz. Useful if you are debugging a JIT-er or some such, +// since it provides a way to make sure valgrind will retranslate the invalidated area. +discard_translations :: proc "c" (qzz: []byte) { + client_request_stmt(.Discard_Translations, uintptr(raw_data(qzz)), uintptr(len(qzz)), 0, 0, 0) +} + +non_simd_call0 :: proc "c" (p: proc "c" (uintptr) -> uintptr) -> uintptr { + return client_request_expr(0, .Client_Call0, uintptr(rawptr(p)), 0, 0, 0, 0) +} +non_simd_call1 :: proc "c" (p: proc "c" (uintptr, uintptr) -> uintptr, a0: uintptr) -> uintptr { + return client_request_expr(0, .Client_Call1, uintptr(rawptr(p)), a0, 0, 0, 0) +} +non_simd_call2 :: proc "c" (p: proc "c" (uintptr, uintptr, uintptr) -> uintptr, a0, a1: uintptr) -> uintptr { + return client_request_expr(0, .Client_Call2, uintptr(rawptr(p)), a0, a1, 0, 0) +} +non_simd_call3 :: proc "c" (p: proc "c" (uintptr, uintptr, uintptr, uintptr) -> uintptr, a0, a1, a2: uintptr) -> uintptr { + return client_request_expr(0, .Client_Call3, uintptr(rawptr(p)), a0, a1, a2, 0) +} + +// Counts the number of errors that have been recorded by a tool. +count_errrors :: proc "c" () -> uint { + return uint(client_request_expr(0, .Count_Errors, 0, 0, 0, 0, 0)) +} + +monitor_command :: proc "c" (command: cstring) -> bool { + return 0 != client_request_expr(0, .Gdb_Monitor_Command, uintptr(rawptr(command)), 0, 0, 0, 0) +} + + +malloc_like_block :: proc "c" (mem: []byte, rz_b: uintptr, is_zeroed: bool) { + client_request_stmt(.Malloc_Like_Block, uintptr(raw_data(mem)), uintptr(len(mem)), rz_b, uintptr(is_zeroed), 0) +} +resize_inplace_block :: proc "c" (old_mem: []byte, new_size: uint, rz_b: uintptr) { + client_request_stmt(.Resize_Inplace_Block, uintptr(raw_data(old_mem)), uintptr(len(old_mem)), uintptr(new_size), rz_b, 0) +} +free_like_block :: proc "c" (addr: rawptr, rz_b: uintptr) { + client_request_stmt(.Free_Like_Block, uintptr(addr), rz_b, 0, 0, 0) +} + +Mem_Pool_Flags :: distinct bit_set[Mem_Pool_Flag; uintptr] +Mem_Pool_Flag :: enum uintptr { + Auto_Free = 0, + Meta_Pool = 1, +} + +// Create a memory pool. +create_mem_pool :: proc "c" (pool: rawptr, rz_b: uintptr, is_zeroed: bool, flags: Mem_Pool_Flags) { + client_request_stmt(.Create_Mem_Pool, uintptr(pool), rz_b, uintptr(is_zeroed), transmute(uintptr)flags, 0) +} +// Destroy a memory pool. +destroy_mem_pool :: proc "c" (pool: rawptr) { + client_request_stmt(.Destroy_Mem_Pool, uintptr(pool), 0, 0, 0, 0) +} +// Associate a section of memory with a memory pool. +mem_pool_alloc :: proc "c" (pool: rawptr, mem: []byte) { + client_request_stmt(.Mem_Pool_Alloc, uintptr(pool), uintptr(raw_data(mem)), uintptr(len(mem)), 0, 0) +} +// Disassociate a section of memory from a memory pool. +mem_pool_free :: proc "c" (pool: rawptr, addr: rawptr) { + client_request_stmt(.Mem_Pool_Free, uintptr(pool), uintptr(addr), 0, 0, 0) +} +// Disassociate parts of a section of memory outside a particular range. +mem_pool_trim :: proc "c" (pool: rawptr, mem: []byte) { + client_request_stmt(.Mem_Pool_Trim, uintptr(pool), uintptr(raw_data(mem)), uintptr(len(mem)), 0, 0) +} +// Resize and/or move a section of memory associated with a memory pool. +move_mem_pool :: proc "c" (pool_a, pool_b: rawptr) { + client_request_stmt(.Move_Mem_Pool, uintptr(pool_a), uintptr(pool_b), 0, 0, 0) +} +// Resize and/or move a section of memory associated with a memory pool. +mem_pool_change :: proc "c" (pool: rawptr, addr_a: rawptr, mem: []byte) { + client_request_stmt(.Mem_Pool_Change, uintptr(pool), uintptr(addr_a), uintptr(raw_data(mem)), uintptr(len(mem)), 0) +} +// Return true if a memory pool exists +mem_pool_exists :: proc "c" (pool: rawptr) -> bool { + return 0 != client_request_expr(0, .Mem_Pool_Exists, uintptr(pool), 0, 0, 0, 0) +} + + +// Mark a section of memory as being a stack. Returns a stack id. +stack_register :: proc "c" (stack: []byte) -> (stack_id: uintptr) { + ptr := uintptr(raw_data(stack)) + return client_request_expr(0, .Stack_Register, ptr, ptr+uintptr(len(stack)), 0, 0, 0) +} + +// Unmark a section of memory associated with a stack id as being a stack. +stack_deregister :: proc "c" (id: uintptr) { + client_request_stmt(.Stack_Deregister, id, 0, 0, 0, 0) +} + +// Change the start and end address of the stack id with the `new_stack` slice. +stack_change :: proc "c" (id: uint, new_stack: []byte) { + ptr := uintptr(raw_data(new_stack)) + client_request_stmt(.Stack_Change, uintptr(id), ptr, ptr + uintptr(len(new_stack)), 0, 0) +} + + +// Disable error reporting for the current thread/ +// It behaves in a stack-like way, meaning you can safely call this multiple times +// given that `enable_error_reporting()` is called the same number of times to +// re-enable the error reporting. +// The first call of this macro disables reporting. +// Subsequent calls have no effect except to increase the number of `enable_error_reporting()` +// calls needed to re-enable reporting. +// Child threads do not inherit this setting from their parents; +// they are always created with reporting enabled. +disable_error_reporting :: proc "c" () { + client_request_stmt(.Change_Err_Disablement, 1, 0, 0, 0, 0) +} +// Re-enable error reporting +enable_error_reporting :: proc "c" () { + client_request_stmt(.Change_Err_Disablement, ~uintptr(0), 0, 0, 0, 0) +} + + +inner_threads :: proc "c" (qzz: rawptr) { + client_request_stmt(.Inner_Threads, uintptr(qzz), 0, 0, 0, 0) +} + + +// Map a code address to a source file name and line number. +// `buf64` must point to a 64-byte buffer in the caller's address space. +// The result will be dumped in there and is guaranteed to be zero terminated. +// If no info is found, the first byte is set to zero. +map_ip_to_src_loc :: proc "c" (addr: rawptr, buf64: ^[64]byte) -> uintptr { + return client_request_expr(0, .Map_Ip_To_Src_Loc, uintptr(addr), uintptr(buf64), 0, 0, 0) +} \ No newline at end of file diff --git a/src/build_settings.cpp b/src/build_settings.cpp index d49f1cecf..9d5c3e556 100644 --- a/src/build_settings.cpp +++ b/src/build_settings.cpp @@ -228,6 +228,7 @@ struct BuildContext { bool ODIN_DISABLE_ASSERT; // Whether the default 'assert' et al is disabled in code or not bool ODIN_DEFAULT_TO_NIL_ALLOCATOR; // Whether the default allocator is a "nil" allocator or not (i.e. it does nothing) bool ODIN_FOREIGN_ERROR_PROCEDURES; + bool ODIN_VALGRIND_SUPPORT; ErrorPosStyle ODIN_ERROR_POS_STYLE; @@ -1190,6 +1191,8 @@ void init_build_context(TargetMetrics *cross_target) { bc->optimization_level = gb_clamp(bc->optimization_level, 0, 3); + bc->ODIN_VALGRIND_SUPPORT = is_arch_x86() && build_context.metrics.os != TargetOs_windows; + #undef LINK_FLAG_X64 #undef LINK_FLAG_386 } diff --git a/src/check_builtin.cpp b/src/check_builtin.cpp index 687f1694b..83136d576 100644 --- a/src/check_builtin.cpp +++ b/src/check_builtin.cpp @@ -5388,6 +5388,41 @@ bool check_builtin_procedure(CheckerContext *c, Operand *operand, Ast *call, i32 } break; + case BuiltinProc_valgrind_client_request: + { + if (!is_arch_x86()) { + error(call, "'%.*s' is only allowed on x86 targets (i386, amd64)", LIT(builtin_name)); + return false; + } + + enum {ARG_COUNT = 7}; + GB_ASSERT(builtin_procs[BuiltinProc_valgrind_client_request].arg_count == ARG_COUNT); + + Operand operands[ARG_COUNT] = {}; + for (isize i = 0; i < ARG_COUNT; i++) { + Operand *op = &operands[i]; + check_expr_with_type_hint(c, op, ce->args[i], t_uintptr); + if (op->mode == Addressing_Invalid) { + return false; + } + convert_to_typed(c, op, t_uintptr); + if (op->mode == Addressing_Invalid) { + return false; + } + if (!are_types_identical(op->type, t_uintptr)) { + gbString str = type_to_string(op->type); + error(op->expr, "'%.*s' expected a uintptr, got %s", LIT(builtin_name), str); + gb_string_free(str); + return false; + } + } + + operand->type = t_uintptr; + operand->mode = Addressing_Value; + operand->value = {}; + return true; + } + } return true; diff --git a/src/checker.cpp b/src/checker.cpp index d01dc5323..a7470a4c9 100644 --- a/src/checker.cpp +++ b/src/checker.cpp @@ -1037,6 +1037,9 @@ void init_universal(void) { add_global_bool_constant("ODIN_FOREIGN_ERROR_PROCEDURES", bc->ODIN_FOREIGN_ERROR_PROCEDURES); add_global_bool_constant("ODIN_DISALLOW_RTTI", bc->disallow_rtti); + add_global_bool_constant("ODIN_VALGRIND_SUPPORT", bc->ODIN_VALGRIND_SUPPORT); + + // Builtin Procedures for (isize i = 0; i < gb_count_of(builtin_procs); i++) { diff --git a/src/checker_builtin_procs.hpp b/src/checker_builtin_procs.hpp index 8dd021255..717422df1 100644 --- a/src/checker_builtin_procs.hpp +++ b/src/checker_builtin_procs.hpp @@ -291,6 +291,8 @@ BuiltinProc__type_end, BuiltinProc_wasm_memory_atomic_wait32, BuiltinProc_wasm_memory_atomic_notify32, + BuiltinProc_valgrind_client_request, + BuiltinProc_COUNT, }; gb_global BuiltinProc builtin_procs[BuiltinProc_COUNT] = { @@ -582,4 +584,6 @@ gb_global BuiltinProc builtin_procs[BuiltinProc_COUNT] = { {STR_LIT("wasm_memory_size"), 1, false, Expr_Expr, BuiltinProcPkg_intrinsics}, {STR_LIT("wasm_memory_atomic_wait32"), 3, false, Expr_Expr, BuiltinProcPkg_intrinsics}, {STR_LIT("wasm_memory_atomic_notify32"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics}, + + {STR_LIT("valgrind_client_request"), 7, false, Expr_Expr, BuiltinProcPkg_intrinsics}, }; diff --git a/src/llvm_backend_expr.cpp b/src/llvm_backend_expr.cpp index 5fd2fbe6f..3b2e6c29f 100644 --- a/src/llvm_backend_expr.cpp +++ b/src/llvm_backend_expr.cpp @@ -3577,7 +3577,7 @@ void lb_build_addr_compound_lit_populate(lbProcedure *p, Slice const &ele } } else { - if (lb_is_elem_const(elem, et)) { + if (bt->kind != Type_DynamicArray && lb_is_elem_const(elem, et)) { continue; } diff --git a/src/llvm_backend_proc.cpp b/src/llvm_backend_proc.cpp index d7055ea31..f85d8397c 100644 --- a/src/llvm_backend_proc.cpp +++ b/src/llvm_backend_proc.cpp @@ -2745,6 +2745,55 @@ lbValue lb_build_builtin_proc(lbProcedure *p, Ast *expr, TypeAndValue const &tv, res.value = LLVMBuildCall2(p->builder, func_type, the_asm, args, gb_count_of(args), ""); return res; } + + case BuiltinProc_valgrind_client_request: + { + lbValue args[7] = {}; + for (isize i = 0; i < 7; i++) { + args[i] = lb_emit_conv(p, lb_build_expr(p, ce->args[i]), t_uintptr); + } + if (!build_context.ODIN_VALGRIND_SUPPORT) { + return args[0]; + } + lbValue array = lb_generate_local_array(p, t_uintptr, 6, false); + for (isize i = 0; i < 6; i++) { + lbValue gep = lb_emit_array_epi(p, array, i); + lb_emit_store(p, gep, args[i+1]); + } + + switch (build_context.metrics.arch) { + case TargetArch_amd64: + { + Type *param_types[2] = {}; + param_types[0] = t_uintptr; + param_types[1] = array.type; + + Type *type = alloc_type_proc_from_types(param_types, gb_count_of(param_types), t_uintptr, false, ProcCC_None); + LLVMTypeRef func_type = lb_get_procedure_raw_type(p->module, type); + LLVMValueRef the_asm = llvm_get_inline_asm( + func_type, + str_lit("rolq $3, %rdi; rolq $13, %rdi\n rolq $61, %rdi; rolq $51, %rdi\n xchgq %rbx, %rbx"), + str_lit("={rdx},{rdx},{rax},cc,memory"), + true + ); + + LLVMValueRef asm_args[2] = {}; + asm_args[0] = args[0].value; + asm_args[1] = array.value; + + lbValue res = {}; + res.type = t_uintptr; + res.value = LLVMBuildCall2(p->builder, func_type, the_asm, asm_args, gb_count_of(asm_args), ""); + return res; + } + break; + default: + GB_PANIC("Unsupported architecture: %.*s", LIT(target_arch_names[build_context.metrics.arch])); + break; + } + + } + } GB_PANIC("Unhandled built-in procedure %.*s", LIT(builtin_procs[id].name)); diff --git a/src/llvm_backend_utility.cpp b/src/llvm_backend_utility.cpp index 8be339ca7..7163f1d9e 100644 --- a/src/llvm_backend_utility.cpp +++ b/src/llvm_backend_utility.cpp @@ -42,6 +42,7 @@ bool lb_is_type_aggregate(Type *t) { void lb_emit_unreachable(lbProcedure *p) { LLVMValueRef instr = LLVMGetLastInstruction(p->curr_block->block); if (instr == nullptr || !lb_is_instr_terminating(instr)) { + lb_call_intrinsic(p, "llvm.trap", nullptr, 0, nullptr, 0); LLVMBuildUnreachable(p->builder); } }