From fa97aa4dc9745af233ea96997a1c8e4094993b7c Mon Sep 17 00:00:00 2001 From: Laytan Laats Date: Sun, 11 Jan 2026 20:10:30 +0100 Subject: [PATCH] thread: fix aligned loading of unaligned addresses --- core/thread/thread.odin | 45 ++++++++++++++++++++++------------------- 1 file changed, 24 insertions(+), 21 deletions(-) diff --git a/core/thread/thread.odin b/core/thread/thread.odin index 26c1a3e27..a07801b98 100644 --- a/core/thread/thread.odin +++ b/core/thread/thread.odin @@ -5,7 +5,10 @@ import "base:runtime" import "core:mem" import "base:intrinsics" -_ :: intrinsics +@(private) +unall :: intrinsics.unaligned_load +@(private) +unals :: intrinsics.unaligned_store /* Value, specifying whether `core:thread` functionality is available on the @@ -347,7 +350,9 @@ create_and_start_with_poly_data :: proc(data: $T, fn: proc(data: T), init_contex thread_proc :: proc(t: ^Thread) { fn := cast(proc(T))t.data assert(t.user_index >= 1) - data := (^T)(&t.user_args[0])^ + + data := unall((^T)(&t.user_args)) + fn(data) } if t = create(thread_proc, priority); t == nil { @@ -356,9 +361,7 @@ create_and_start_with_poly_data :: proc(data: $T, fn: proc(data: T), init_contex t.data = rawptr(fn) t.user_index = 1 - data := data - - mem.copy(&t.user_args[0], &data, size_of(T)) + unals((^T)(&t.user_args), data) if self_cleanup { intrinsics.atomic_or(&t.flags, {.Self_Cleanup}) @@ -393,9 +396,10 @@ create_and_start_with_poly_data2 :: proc(arg1: $T1, arg2: $T2, fn: proc(T1, T2), fn := cast(proc(T1, T2))t.data assert(t.user_index >= 2) - user_args := mem.slice_to_bytes(t.user_args[:]) - arg1 := (^T1)(raw_data(user_args))^ - arg2 := (^T2)(raw_data(user_args[size_of(T1):]))^ + ptr := uintptr(&t.user_args) + + arg1 := unall((^T1)(rawptr(ptr))) + arg2 := unall((^T2)(rawptr(ptr + size_of(T1)))) fn(arg1, arg2) } @@ -405,11 +409,10 @@ create_and_start_with_poly_data2 :: proc(arg1: $T1, arg2: $T2, fn: proc(T1, T2), t.data = rawptr(fn) t.user_index = 2 - arg1, arg2 := arg1, arg2 - user_args := mem.slice_to_bytes(t.user_args[:]) + ptr := uintptr(&t.user_args) - n := copy(user_args, mem.ptr_to_bytes(&arg1)) - _ = copy(user_args[n:], mem.ptr_to_bytes(&arg2)) + unals((^T1)(rawptr(ptr)), arg1) + unals((^T2)(rawptr(ptr + size_of(T1))), arg2) if self_cleanup { intrinsics.atomic_or(&t.flags, {.Self_Cleanup}) @@ -444,10 +447,11 @@ create_and_start_with_poly_data3 :: proc(arg1: $T1, arg2: $T2, arg3: $T3, fn: pr fn := cast(proc(T1, T2, T3))t.data assert(t.user_index >= 3) - user_args := mem.slice_to_bytes(t.user_args[:]) - arg1 := (^T1)(raw_data(user_args))^ - arg2 := (^T2)(raw_data(user_args[size_of(T1):]))^ - arg3 := (^T3)(raw_data(user_args[size_of(T1) + size_of(T2):]))^ + ptr := uintptr(&t.user_args) + + arg1 := unall((^T1)(rawptr(ptr))) + arg2 := unall((^T2)(rawptr(ptr + size_of(T1)))) + arg3 := unall((^T3)(rawptr(ptr + size_of(T1) + size_of(T2)))) fn(arg1, arg2, arg3) } @@ -457,12 +461,11 @@ create_and_start_with_poly_data3 :: proc(arg1: $T1, arg2: $T2, arg3: $T3, fn: pr t.data = rawptr(fn) t.user_index = 3 - arg1, arg2, arg3 := arg1, arg2, arg3 - user_args := mem.slice_to_bytes(t.user_args[:]) + ptr := uintptr(&t.user_args) - n := copy(user_args, mem.ptr_to_bytes(&arg1)) - n += copy(user_args[n:], mem.ptr_to_bytes(&arg2)) - _ = copy(user_args[n:], mem.ptr_to_bytes(&arg3)) + unals((^T1)(rawptr(ptr)), arg1) + unals((^T2)(rawptr(ptr + size_of(T1))), arg2) + unals((^T3)(rawptr(ptr + size_of(T1) + size_of(T2))), arg3) if self_cleanup { intrinsics.atomic_or(&t.flags, {.Self_Cleanup})