mirror of
https://github.com/odin-lang/Odin.git
synced 2025-12-29 01:14:40 +00:00
Update sync2 to just use atomic intrinsics rather than the parapoly wrappers
This commit is contained in:
@@ -2,7 +2,6 @@
|
||||
package os
|
||||
|
||||
import "core:time"
|
||||
import "core:path"
|
||||
|
||||
/*
|
||||
For reference
|
||||
@@ -71,6 +70,36 @@ _fill_file_info_from_stat :: proc(fi: ^File_Info, s: OS_Stat) {
|
||||
fi.access_time = _make_time_from_unix_file_time(s.last_access);
|
||||
}
|
||||
|
||||
|
||||
@private
|
||||
path_base :: proc(path: string) -> string {
|
||||
is_separator :: proc(c: byte) -> bool {
|
||||
return c == '/';
|
||||
}
|
||||
|
||||
if path == "" {
|
||||
return ".";
|
||||
}
|
||||
|
||||
path := path;
|
||||
for len(path) > 0 && is_separator(path[len(path)-1]) {
|
||||
path = path[:len(path)-1];
|
||||
}
|
||||
|
||||
i := len(path)-1;
|
||||
for i >= 0 && !is_separator(path[i]) {
|
||||
i -= 1;
|
||||
}
|
||||
if i >= 0 {
|
||||
path = path[i+1:];
|
||||
}
|
||||
if path == "" {
|
||||
return "/";
|
||||
}
|
||||
return path;
|
||||
}
|
||||
|
||||
|
||||
lstat :: proc(name: string, allocator := context.allocator) -> (fi: File_Info, err: Errno) {
|
||||
|
||||
context.allocator = allocator;
|
||||
@@ -85,7 +114,7 @@ lstat :: proc(name: string, allocator := context.allocator) -> (fi: File_Info, e
|
||||
if err != ERROR_NONE {
|
||||
return;
|
||||
}
|
||||
fi.name = path.base(fi.fullpath);
|
||||
fi.name = path_base(fi.fullpath);
|
||||
return fi, ERROR_NONE;
|
||||
}
|
||||
|
||||
@@ -103,7 +132,7 @@ stat :: proc(name: string, allocator := context.allocator) -> (fi: File_Info, er
|
||||
if err != ERROR_NONE {
|
||||
return;
|
||||
}
|
||||
fi.name = path.base(fi.fullpath);
|
||||
fi.name = path_base(fi.fullpath);
|
||||
return fi, ERROR_NONE;
|
||||
}
|
||||
|
||||
@@ -121,6 +150,6 @@ fstat :: proc(fd: Handle, allocator := context.allocator) -> (fi: File_Info, err
|
||||
if err != ERROR_NONE {
|
||||
return;
|
||||
}
|
||||
fi.name = path.base(fi.fullpath);
|
||||
fi.name = path_base(fi.fullpath);
|
||||
return fi, ERROR_NONE;
|
||||
}
|
||||
|
||||
@@ -4,167 +4,76 @@ import "intrinsics"
|
||||
|
||||
// TODO(bill): Is this even a good design? The intrinsics seem to be more than good enough and just as clean
|
||||
|
||||
Ordering :: enum {
|
||||
Relaxed, // Monotonic
|
||||
Release,
|
||||
Acquire,
|
||||
Acquire_Release,
|
||||
Sequentially_Consistent,
|
||||
}
|
||||
cpu_relax :: intrinsics.cpu_relax;
|
||||
|
||||
strongest_failure_ordering_table := [Ordering]Ordering{
|
||||
.Relaxed = .Relaxed,
|
||||
.Release = .Relaxed,
|
||||
.Acquire = .Acquire,
|
||||
.Acquire_Release = .Acquire,
|
||||
.Sequentially_Consistent = .Sequentially_Consistent,
|
||||
};
|
||||
atomic_fence :: intrinsics.atomic_fence;
|
||||
atomic_fence_acq :: intrinsics.atomic_fence_acq;
|
||||
atomic_fence_rel :: intrinsics.atomic_fence_rel;
|
||||
atomic_fence_acqrel :: intrinsics.atomic_fence_acqrel;
|
||||
|
||||
strongest_failure_ordering :: #force_inline proc(order: Ordering) -> Ordering {
|
||||
return strongest_failure_ordering_table[order];
|
||||
}
|
||||
atomic_store :: intrinsics.atomic_store;
|
||||
atomic_store_rel :: intrinsics.atomic_store_rel;
|
||||
atomic_store_relaxed :: intrinsics.atomic_store_relaxed;
|
||||
atomic_store_unordered :: intrinsics.atomic_store_unordered;
|
||||
|
||||
fence :: #force_inline proc($order: Ordering) {
|
||||
when order == .Relaxed { #panic("there is no such thing as a relaxed fence"); }
|
||||
else when order == .Release { intrinsics.atomic_fence_rel(); }
|
||||
else when order == .Acquire { intrinsics.atomic_fence_acq(); }
|
||||
else when order == .Acquire_Release { intrinsics.atomic_fence_acqrel(); }
|
||||
else when order == .Sequentially_Consistent { intrinsics.atomic_fence(); }
|
||||
else { #panic("unknown order"); }
|
||||
}
|
||||
atomic_load :: intrinsics.atomic_load;
|
||||
atomic_load_acq :: intrinsics.atomic_load_acq;
|
||||
atomic_load_relaxed :: intrinsics.atomic_load_relaxed;
|
||||
atomic_load_unordered :: intrinsics.atomic_load_unordered;
|
||||
|
||||
atomic_add :: intrinsics.atomic_add;
|
||||
atomic_add_acq :: intrinsics.atomic_add_acq;
|
||||
atomic_add_rel :: intrinsics.atomic_add_rel;
|
||||
atomic_add_acqrel :: intrinsics.atomic_add_acqrel;
|
||||
atomic_add_relaxed :: intrinsics.atomic_add_relaxed;
|
||||
atomic_sub :: intrinsics.atomic_sub;
|
||||
atomic_sub_acq :: intrinsics.atomic_sub_acq;
|
||||
atomic_sub_rel :: intrinsics.atomic_sub_rel;
|
||||
atomic_sub_acqrel :: intrinsics.atomic_sub_acqrel;
|
||||
atomic_sub_relaxed :: intrinsics.atomic_sub_relaxed;
|
||||
atomic_and :: intrinsics.atomic_and;
|
||||
atomic_and_acq :: intrinsics.atomic_and_acq;
|
||||
atomic_and_rel :: intrinsics.atomic_and_rel;
|
||||
atomic_and_acqrel :: intrinsics.atomic_and_acqrel;
|
||||
atomic_and_relaxed :: intrinsics.atomic_and_relaxed;
|
||||
atomic_nand :: intrinsics.atomic_nand;
|
||||
atomic_nand_acq :: intrinsics.atomic_nand_acq;
|
||||
atomic_nand_rel :: intrinsics.atomic_nand_rel;
|
||||
atomic_nand_acqrel :: intrinsics.atomic_nand_acqrel;
|
||||
atomic_nand_relaxed :: intrinsics.atomic_nand_relaxed;
|
||||
atomic_or :: intrinsics.atomic_or;
|
||||
atomic_or_acq :: intrinsics.atomic_or_acq;
|
||||
atomic_or_rel :: intrinsics.atomic_or_rel;
|
||||
atomic_or_acqrel :: intrinsics.atomic_or_acqrel;
|
||||
atomic_or_relaxed :: intrinsics.atomic_or_relaxed;
|
||||
atomic_xor :: intrinsics.atomic_xor;
|
||||
atomic_xor_acq :: intrinsics.atomic_xor_acq;
|
||||
atomic_xor_rel :: intrinsics.atomic_xor_rel;
|
||||
atomic_xor_acqrel :: intrinsics.atomic_xor_acqrel;
|
||||
atomic_xor_relaxed :: intrinsics.atomic_xor_relaxed;
|
||||
|
||||
atomic_store :: #force_inline proc(dst: ^$T, val: T, $order: Ordering) {
|
||||
when order == .Relaxed { intrinsics.atomic_store_relaxed(dst, val); }
|
||||
else when order == .Release { intrinsics.atomic_store_rel(dst, val); }
|
||||
else when order == .Sequentially_Consistent { intrinsics.atomic_store(dst, val); }
|
||||
else when order == .Acquire { #panic("there is not such thing as an acquire store"); }
|
||||
else when order == .Acquire_Release { #panic("there is not such thing as an acquire/release store"); }
|
||||
else { #panic("unknown order"); }
|
||||
}
|
||||
atomic_xchg :: intrinsics.atomic_xchg;
|
||||
atomic_xchg_acq :: intrinsics.atomic_xchg_acq;
|
||||
atomic_xchg_rel :: intrinsics.atomic_xchg_rel;
|
||||
atomic_xchg_acqrel :: intrinsics.atomic_xchg_acqrel;
|
||||
atomic_xchg_relaxed :: intrinsics.atomic_xchg_relaxed;
|
||||
|
||||
atomic_load :: #force_inline proc(dst: ^$T, $order: Ordering) -> T {
|
||||
when order == .Relaxed { return intrinsics.atomic_load_relaxed(dst); }
|
||||
else when order == .Acquire { return intrinsics.atomic_load_acq(dst); }
|
||||
else when order == .Sequentially_Consistent { return intrinsics.atomic_load(dst); }
|
||||
else when order == .Release { #panic("there is no such thing as a release load"); }
|
||||
else when order == .Acquire_Release { #panic("there is no such thing as an acquire/release load"); }
|
||||
else { #panic("unknown order"); }
|
||||
}
|
||||
|
||||
atomic_exchange :: #force_inline proc(dst: ^$T, val: T, $order: Ordering) -> T {
|
||||
when order == .Relaxed { return intrinsics.atomic_xchg_relaxed(dst, val); }
|
||||
else when order == .Release { return intrinsics.atomic_xchg_rel(dst, val); }
|
||||
else when order == .Acquire { return intrinsics.atomic_xchg_acq(dst, val); }
|
||||
else when order == .Acquire_Release { return intrinsics.atomic_xchg_acqrel(dst, val); }
|
||||
else when order == .Sequentially_Consistent { return intrinsics.atomic_xchg(dst, val); }
|
||||
else { #panic("unknown order"); }
|
||||
}
|
||||
|
||||
atomic_compare_exchange :: #force_inline proc(dst: ^$T, old, new: T, $success, $failure: Ordering) -> (val: T, ok: bool) {
|
||||
when failure == .Relaxed {
|
||||
when success == .Relaxed { return intrinsics.atomic_cxchg_relaxed(dst, old, new); }
|
||||
else when success == .Acquire { return intrinsics.atomic_cxchg_acq_failrelaxed(dst, old, new); }
|
||||
else when success == .Acquire_Release { return intrinsics.atomic_cxchg_acqrel_failrelaxed(dst, old, new); }
|
||||
else when success == .Sequentially_Consistent { return intrinsics.atomic_cxchg_failrelaxed(dst, old, new); }
|
||||
else when success == .Release { return intrinsics.atomic_cxchg_rel(dst, old, new); }
|
||||
else { #panic("an unknown ordering combination"); }
|
||||
} else when failure == .Acquire {
|
||||
when success == .Release { return intrinsics.atomic_cxchg_acqrel(dst, old, new); }
|
||||
else when success == .Acquire { return intrinsics.atomic_cxchg_acq(dst, old, new); }
|
||||
else { #panic("an unknown ordering combination"); }
|
||||
} else when failure == .Sequentially_Consistent {
|
||||
when success == .Sequentially_Consistent { return intrinsics.atomic_cxchg(dst, old, new); }
|
||||
else { #panic("an unknown ordering combination"); }
|
||||
} else when failure == .Acquire_Release {
|
||||
#panic("there is not such thing as an acquire/release failure ordering");
|
||||
} else when failure == .Release {
|
||||
when success == .Acquire { return instrinsics.atomic_cxchg_failacq(dst, old, new); }
|
||||
else { #panic("an unknown ordering combination"); }
|
||||
} else {
|
||||
return T{}, false;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
atomic_compare_exchange_weak :: #force_inline proc(dst: ^$T, old, new: T, $success, $failure: Ordering) -> (val: T, ok: bool) {
|
||||
when failure == .Relaxed {
|
||||
when success == .Relaxed { return intrinsics.atomic_cxchgweak_relaxed(dst, old, new); }
|
||||
else when success == .Acquire { return intrinsics.atomic_cxchgweak_acq_failrelaxed(dst, old, new); }
|
||||
else when success == .Acquire_Release { return intrinsics.atomic_cxchgweak_acqrel_failrelaxed(dst, old, new); }
|
||||
else when success == .Sequentially_Consistent { return intrinsics.atomic_cxchgweak_failrelaxed(dst, old, new); }
|
||||
else when success == .Release { return intrinsics.atomic_cxchgweak_rel(dst, old, new); }
|
||||
else { #panic("an unknown ordering combination"); }
|
||||
} else when failure == .Acquire {
|
||||
when success == .Release { return intrinsics.atomic_cxchgweak_acqrel(dst, old, new); }
|
||||
else when success == .Acquire { return intrinsics.atomic_cxchgweak_acq(dst, old, new); }
|
||||
else { #panic("an unknown ordering combination"); }
|
||||
} else when failure == .Sequentially_Consistent {
|
||||
when success == .Sequentially_Consistent { return intrinsics.atomic_cxchgweak(dst, old, new); }
|
||||
else { #panic("an unknown ordering combination"); }
|
||||
} else when failure == .Acquire_Release {
|
||||
#panic("there is not such thing as an acquire/release failure ordering");
|
||||
} else when failure == .Release {
|
||||
when success == .Acquire { return intrinsics.atomic_cxchgweak_failacq(dst, old, new); }
|
||||
else { #panic("an unknown ordering combination"); }
|
||||
} else {
|
||||
return T{}, false;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
atomic_add :: #force_inline proc(dst: ^$T, val: T, $order: Ordering) -> T {
|
||||
when order == .Relaxed { return intrinsics.atomic_add_relaxed(dst, val); }
|
||||
else when order == .Release { return intrinsics.atomic_add_rel(dst, val); }
|
||||
else when order == .Acquire { return intrinsics.atomic_add_acq(dst, val); }
|
||||
else when order == .Acquire_Release { return intrinsics.atomic_add_acqrel(dst, val); }
|
||||
else when order == .Sequentially_Consistent { return intrinsics.atomic_add(dst, val); }
|
||||
else { #panic("unknown order"); }
|
||||
}
|
||||
|
||||
atomic_sub :: #force_inline proc(dst: ^$T, val: T, $order: Ordering) -> T {
|
||||
when order == .Relaxed { return intrinsics.atomic_sub_relaxed(dst, val); }
|
||||
else when order == .Release { return intrinsics.atomic_sub_rel(dst, val); }
|
||||
else when order == .Acquire { return intrinsics.atomic_sub_acq(dst, val); }
|
||||
else when order == .Acquire_Release { return intrinsics.atomic_sub_acqrel(dst, val); }
|
||||
else when order == .Sequentially_Consistent { return intrinsics.atomic_sub(dst, val); }
|
||||
else { #panic("unknown order"); }
|
||||
}
|
||||
|
||||
atomic_and :: #force_inline proc(dst: ^$T, val: T, $order: Ordering) -> T {
|
||||
when order == .Relaxed { return intrinsics.atomic_and_relaxed(dst, val); }
|
||||
else when order == .Release { return intrinsics.atomic_and_rel(dst, val); }
|
||||
else when order == .Acquire { return intrinsics.atomic_and_acq(dst, val); }
|
||||
else when order == .Acquire_Release { return intrinsics.atomic_and_acqrel(dst, val); }
|
||||
else when order == .Sequentially_Consistent { return intrinsics.atomic_and(dst, val); }
|
||||
else { #panic("unknown order"); }
|
||||
}
|
||||
|
||||
atomic_nand :: #force_inline proc(dst: ^$T, val: T, $order: Ordering) -> T {
|
||||
when order == .Relaxed { return intrinsics.atomic_nand_relaxed(dst, val); }
|
||||
else when order == .Release { return intrinsics.atomic_nand_rel(dst, val); }
|
||||
else when order == .Acquire { return intrinsics.atomic_nand_acq(dst, val); }
|
||||
else when order == .Acquire_Release { return intrinsics.atomic_nand_acqrel(dst, val); }
|
||||
else when order == .Sequentially_Consistent { return intrinsics.atomic_nand(dst, val); }
|
||||
else { #panic("unknown order"); }
|
||||
}
|
||||
|
||||
atomic_or :: #force_inline proc(dst: ^$T, val: T, $order: Ordering) -> T {
|
||||
when order == .Relaxed { return intrinsics.atomic_or_relaxed(dst, val); }
|
||||
else when order == .Release { return intrinsics.atomic_or_rel(dst, val); }
|
||||
else when order == .Acquire { return intrinsics.atomic_or_acq(dst, val); }
|
||||
else when order == .Acquire_Release { return intrinsics.atomic_or_acqrel(dst, val); }
|
||||
else when order == .Sequentially_Consistent { return intrinsics.atomic_or(dst, val); }
|
||||
else { #panic("unknown order"); }
|
||||
}
|
||||
|
||||
atomic_xor :: #force_inline proc(dst: ^$T, val: T, $order: Ordering) -> T {
|
||||
when order == .Relaxed { return intrinsics.atomic_xor_relaxed(dst, val); }
|
||||
else when order == .Release { return intrinsics.atomic_xor_rel(dst, val); }
|
||||
else when order == .Acquire { return intrinsics.atomic_xor_acq(dst, val); }
|
||||
else when order == .Acquire_Release { return intrinsics.atomic_xor_acqrel(dst, val); }
|
||||
else when order == .Sequentially_Consistent { return intrinsics.atomic_xor(dst, val); }
|
||||
else { #panic("unknown order"); }
|
||||
}
|
||||
atomic_cxchg :: intrinsics.atomic_cxchg;
|
||||
atomic_cxchg_acq :: intrinsics.atomic_cxchg_acq;
|
||||
atomic_cxchg_rel :: intrinsics.atomic_cxchg_rel;
|
||||
atomic_cxchg_acqrel :: intrinsics.atomic_cxchg_acqrel;
|
||||
atomic_cxchg_relaxed :: intrinsics.atomic_cxchg_relaxed;
|
||||
atomic_cxchg_failrelaxed :: intrinsics.atomic_cxchg_failrelaxed;
|
||||
atomic_cxchg_failacq :: intrinsics.atomic_cxchg_failacq;
|
||||
atomic_cxchg_acq_failrelaxed :: intrinsics.atomic_cxchg_acq_failrelaxed;
|
||||
atomic_cxchg_acqrel_failrelaxed :: intrinsics.atomic_cxchg_acqrel_failrelaxed;
|
||||
|
||||
atomic_cxchgweak :: intrinsics.atomic_cxchgweak;
|
||||
atomic_cxchgweak_acq :: intrinsics.atomic_cxchgweak_acq;
|
||||
atomic_cxchgweak_rel :: intrinsics.atomic_cxchgweak_rel;
|
||||
atomic_cxchgweak_acqrel :: intrinsics.atomic_cxchgweak_acqrel;
|
||||
atomic_cxchgweak_relaxed :: intrinsics.atomic_cxchgweak_relaxed;
|
||||
atomic_cxchgweak_failrelaxed :: intrinsics.atomic_cxchgweak_failrelaxed;
|
||||
atomic_cxchgweak_failacq :: intrinsics.atomic_cxchgweak_failacq;
|
||||
atomic_cxchgweak_acq_failrelaxed :: intrinsics.atomic_cxchgweak_acq_failrelaxed;
|
||||
atomic_cxchgweak_acqrel_failrelaxed :: intrinsics.atomic_cxchgweak_acqrel_failrelaxed;
|
||||
|
||||
@@ -5,7 +5,6 @@ package sync2
|
||||
|
||||
import "core:mem"
|
||||
import "core:time"
|
||||
import "intrinsics"
|
||||
import "core:math/rand"
|
||||
|
||||
_, _ :: time, rand;
|
||||
@@ -136,10 +135,10 @@ channel_peek :: proc(ch: $C/Channel($T, $D)) -> int {
|
||||
if c == nil {
|
||||
return -1;
|
||||
}
|
||||
if intrinsics.atomic_load(&c.closed) {
|
||||
if atomic_load(&c.closed) {
|
||||
return -1;
|
||||
}
|
||||
return intrinsics.atomic_load(&c.len);
|
||||
return atomic_load(&c.len);
|
||||
}
|
||||
|
||||
|
||||
@@ -238,7 +237,7 @@ raw_channel_destroy :: proc(c: ^Raw_Channel) {
|
||||
return;
|
||||
}
|
||||
context.allocator = c.allocator;
|
||||
intrinsics.atomic_store(&c.closed, true);
|
||||
atomic_store(&c.closed, true);
|
||||
free(c);
|
||||
}
|
||||
|
||||
@@ -248,7 +247,7 @@ raw_channel_close :: proc(c: ^Raw_Channel, loc := #caller_location) {
|
||||
}
|
||||
mutex_lock(&c.mutex);
|
||||
defer mutex_unlock(&c.mutex);
|
||||
intrinsics.atomic_store(&c.closed, true);
|
||||
atomic_store(&c.closed, true);
|
||||
|
||||
// Release readers and writers
|
||||
raw_channel_wait_queue_broadcast(c.recvq);
|
||||
@@ -317,12 +316,12 @@ raw_channel_recv_impl :: proc(c: ^Raw_Channel, res: rawptr, loc := #caller_locat
|
||||
if c == nil {
|
||||
panic(message="cannot recv message; channel is nil", loc=loc);
|
||||
}
|
||||
intrinsics.atomic_store(&c.ready, true);
|
||||
atomic_store(&c.ready, true);
|
||||
for c.len < 1 {
|
||||
raw_channel_wait_queue_signal(c.sendq);
|
||||
cond_wait(&c.cond, &c.mutex);
|
||||
}
|
||||
intrinsics.atomic_store(&c.ready, false);
|
||||
atomic_store(&c.ready, false);
|
||||
recv(c, res, loc);
|
||||
if c.cap > 0 {
|
||||
if c.len == c.cap - 1 {
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
//+private
|
||||
package sync2
|
||||
|
||||
import "intrinsics"
|
||||
import win32 "core:sys/windows"
|
||||
import "core:time"
|
||||
|
||||
@@ -12,24 +11,24 @@ raw_channel_wait_queue_wait_on :: proc(state: ^uintptr, timeout: time.Duration)
|
||||
ms = win32.DWORD((max(time.duration_nanoseconds(timeout), 0) + 999999)/1000000);
|
||||
}
|
||||
|
||||
v := intrinsics.atomic_load(state);
|
||||
v := atomic_load(state);
|
||||
for v == 0 {
|
||||
win32.WaitOnAddress(state, &v, size_of(state^), ms);
|
||||
v = intrinsics.atomic_load(state);
|
||||
v = atomic_load(state);
|
||||
}
|
||||
intrinsics.atomic_store(state, 0);
|
||||
atomic_store(state, 0);
|
||||
}
|
||||
|
||||
raw_channel_wait_queue_signal :: proc(q: ^Raw_Channel_Wait_Queue) {
|
||||
for x := q; x != nil; x = x.next {
|
||||
intrinsics.atomic_add(x.state, 1);
|
||||
atomic_add(x.state, 1);
|
||||
win32.WakeByAddressSingle(x.state);
|
||||
}
|
||||
}
|
||||
|
||||
raw_channel_wait_queue_broadcast :: proc(q: ^Raw_Channel_Wait_Queue) {
|
||||
for x := q; x != nil; x = x.next {
|
||||
intrinsics.atomic_add(x.state, 1);
|
||||
atomic_add(x.state, 1);
|
||||
win32.WakeByAddressAll(x.state);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package sync2
|
||||
|
||||
import "core:runtime"
|
||||
import "intrinsics"
|
||||
|
||||
// A Wait_Group waits for a collection of threads to finish
|
||||
//
|
||||
@@ -20,7 +19,7 @@ wait_group_add :: proc(wg: ^Wait_Group, delta: int) {
|
||||
mutex_lock(&wg.mutex);
|
||||
defer mutex_unlock(&wg.mutex);
|
||||
|
||||
intrinsics.atomic_add(&wg.counter, delta);
|
||||
atomic_add(&wg.counter, delta);
|
||||
if wg.counter < 0 {
|
||||
panic("sync.Wait_Group negative counter");
|
||||
}
|
||||
@@ -130,14 +129,14 @@ Ticket_Mutex :: struct {
|
||||
}
|
||||
|
||||
ticket_mutex_lock :: #force_inline proc(m: ^Ticket_Mutex) {
|
||||
ticket := intrinsics.atomic_add_relaxed(&m.ticket, 1);
|
||||
for ticket != intrinsics.atomic_load_acq(&m.serving) {
|
||||
intrinsics.cpu_relax();
|
||||
ticket := atomic_add_relaxed(&m.ticket, 1);
|
||||
for ticket != atomic_load_acq(&m.serving) {
|
||||
cpu_relax();
|
||||
}
|
||||
}
|
||||
|
||||
ticket_mutex_unlock :: #force_inline proc(m: ^Ticket_Mutex) {
|
||||
intrinsics.atomic_add_relaxed(&m.serving, 1);
|
||||
atomic_add_relaxed(&m.serving, 1);
|
||||
}
|
||||
|
||||
|
||||
@@ -148,18 +147,18 @@ Benaphore :: struct {
|
||||
}
|
||||
|
||||
benaphore_lock :: proc(b: ^Benaphore) {
|
||||
if intrinsics.atomic_add_acq(&b.counter, 1) > 1 {
|
||||
if atomic_add_acq(&b.counter, 1) > 1 {
|
||||
sema_wait(&b.sema);
|
||||
}
|
||||
}
|
||||
|
||||
benaphore_try_lock :: proc(b: ^Benaphore) -> bool {
|
||||
v, _ := intrinsics.atomic_cxchg_acq(&b.counter, 1, 0);
|
||||
v, _ := atomic_cxchg_acq(&b.counter, 1, 0);
|
||||
return v == 0;
|
||||
}
|
||||
|
||||
benaphore_unlock :: proc(b: ^Benaphore) {
|
||||
if intrinsics.atomic_sub_rel(&b.counter, 1) > 0 {
|
||||
if atomic_sub_rel(&b.counter, 1) > 0 {
|
||||
sema_post(&b.sema);
|
||||
}
|
||||
}
|
||||
@@ -173,7 +172,7 @@ Recursive_Benaphore :: struct {
|
||||
|
||||
recursive_benaphore_lock :: proc(b: ^Recursive_Benaphore) {
|
||||
tid := runtime.current_thread_id();
|
||||
if intrinsics.atomic_add_acq(&b.counter, 1) > 1 {
|
||||
if atomic_add_acq(&b.counter, 1) > 1 {
|
||||
if tid != b.owner {
|
||||
sema_wait(&b.sema);
|
||||
}
|
||||
@@ -186,10 +185,10 @@ recursive_benaphore_lock :: proc(b: ^Recursive_Benaphore) {
|
||||
recursive_benaphore_try_lock :: proc(b: ^Recursive_Benaphore) -> bool {
|
||||
tid := runtime.current_thread_id();
|
||||
if b.owner == tid {
|
||||
intrinsics.atomic_add_acq(&b.counter, 1);
|
||||
atomic_add_acq(&b.counter, 1);
|
||||
}
|
||||
|
||||
if v, _ := intrinsics.atomic_cxchg_acq(&b.counter, 1, 0); v != 0 {
|
||||
if v, _ := atomic_cxchg_acq(&b.counter, 1, 0); v != 0 {
|
||||
return false;
|
||||
}
|
||||
// inside the lock
|
||||
@@ -206,7 +205,7 @@ recursive_benaphore_unlock :: proc(b: ^Recursive_Benaphore) {
|
||||
if recursion == 0 {
|
||||
b.owner = 0;
|
||||
}
|
||||
if intrinsics.atomic_sub_rel(&b.counter, 1) > 0 {
|
||||
if atomic_sub_rel(&b.counter, 1) > 0 {
|
||||
if recursion == 0 {
|
||||
sema_post(&b.sema);
|
||||
}
|
||||
@@ -224,7 +223,7 @@ Once :: struct {
|
||||
}
|
||||
|
||||
once_do :: proc(o: ^Once, fn: proc()) {
|
||||
if intrinsics.atomic_load_acq(&o.done) == false {
|
||||
if atomic_load_acq(&o.done) == false {
|
||||
_once_do_slow(o, fn);
|
||||
}
|
||||
}
|
||||
@@ -234,6 +233,6 @@ _once_do_slow :: proc(o: ^Once, fn: proc()) {
|
||||
defer mutex_unlock(&o.m);
|
||||
if !o.done {
|
||||
fn();
|
||||
intrinsics.atomic_store_rel(&o.done, true);
|
||||
atomic_store_rel(&o.done, true);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ package sync2
|
||||
|
||||
when !#config(ODIN_SYNC_USE_PTHREADS, false) {
|
||||
|
||||
import "intrinsics"
|
||||
import "core:time"
|
||||
|
||||
_Mutex_State :: enum i32 {
|
||||
@@ -17,13 +16,13 @@ _Mutex :: struct {
|
||||
}
|
||||
|
||||
_mutex_lock :: proc(m: ^Mutex) {
|
||||
if intrinsics.atomic_xchg_rel(&m.impl.state, .Unlocked) != .Unlocked {
|
||||
if atomic_xchg_rel(&m.impl.state, .Unlocked) != .Unlocked {
|
||||
_mutex_unlock_slow(m);
|
||||
}
|
||||
}
|
||||
|
||||
_mutex_unlock :: proc(m: ^Mutex) {
|
||||
switch intrinsics.atomic_xchg_rel(&m.impl.state, .Unlocked) {
|
||||
switch atomic_xchg_rel(&m.impl.state, .Unlocked) {
|
||||
case .Unlocked:
|
||||
unreachable();
|
||||
case .Locked:
|
||||
@@ -34,7 +33,7 @@ _mutex_unlock :: proc(m: ^Mutex) {
|
||||
}
|
||||
|
||||
_mutex_try_lock :: proc(m: ^Mutex) -> bool {
|
||||
_, ok := intrinsics.atomic_cxchg_acq(&m.impl.state, .Unlocked, .Locked);
|
||||
_, ok := atomic_cxchg_acq(&m.impl.state, .Unlocked, .Locked);
|
||||
return ok;
|
||||
}
|
||||
|
||||
@@ -44,7 +43,7 @@ _mutex_lock_slow :: proc(m: ^Mutex, curr_state: _Mutex_State) {
|
||||
new_state := curr_state; // Make a copy of it
|
||||
|
||||
spin_lock: for spin in 0..<i32(100) {
|
||||
state, ok := intrinsics.atomic_cxchgweak_acq(&m.impl.state, .Unlocked, new_state);
|
||||
state, ok := atomic_cxchgweak_acq(&m.impl.state, .Unlocked, new_state);
|
||||
if ok {
|
||||
return;
|
||||
}
|
||||
@@ -54,17 +53,17 @@ _mutex_lock_slow :: proc(m: ^Mutex, curr_state: _Mutex_State) {
|
||||
}
|
||||
|
||||
for i := min(spin+1, 32); i > 0; i -= 1 {
|
||||
intrinsics.cpu_relax();
|
||||
cpu_relax();
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
if intrinsics.atomic_xchg_acq(&m.impl.state, .Waiting) == .Unlocked {
|
||||
if atomic_xchg_acq(&m.impl.state, .Waiting) == .Unlocked {
|
||||
return;
|
||||
}
|
||||
|
||||
// TODO(bill): Use a Futex here for Linux to improve performance and error handling
|
||||
intrinsics.cpu_relax();
|
||||
cpu_relax();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -91,25 +90,25 @@ _RW_Mutex :: struct {
|
||||
}
|
||||
|
||||
_rw_mutex_lock :: proc(rw: ^RW_Mutex) {
|
||||
_ = intrinsics.atomic_add(&rw.impl.state, RW_Mutex_State_Writer);
|
||||
_ = atomic_add(&rw.impl.state, RW_Mutex_State_Writer);
|
||||
mutex_lock(&rw.impl.mutex);
|
||||
|
||||
state := intrinsics.atomic_or(&rw.impl.state, RW_Mutex_State_Writer);
|
||||
state := atomic_or(&rw.impl.state, RW_Mutex_State_Writer);
|
||||
if state & RW_Mutex_State_Reader_Mask != 0 {
|
||||
sema_wait(&rw.impl.sema);
|
||||
}
|
||||
}
|
||||
|
||||
_rw_mutex_unlock :: proc(rw: ^RW_Mutex) {
|
||||
_ = intrinsics.atomic_and(&rw.impl.state, ~RW_Mutex_State_Is_Writing);
|
||||
_ = atomic_and(&rw.impl.state, ~RW_Mutex_State_Is_Writing);
|
||||
mutex_unlock(&rw.impl.mutex);
|
||||
}
|
||||
|
||||
_rw_mutex_try_lock :: proc(rw: ^RW_Mutex) -> bool {
|
||||
if mutex_try_lock(&rw.impl.mutex) {
|
||||
state := intrinsics.atomic_load(&rw.impl.state);
|
||||
state := atomic_load(&rw.impl.state);
|
||||
if state & RW_Mutex_State_Reader_Mask == 0 {
|
||||
_ = intrinsics.atomic_or(&rw.impl.state, RW_Mutex_State_Is_Writing);
|
||||
_ = atomic_or(&rw.impl.state, RW_Mutex_State_Is_Writing);
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -119,22 +118,22 @@ _rw_mutex_try_lock :: proc(rw: ^RW_Mutex) -> bool {
|
||||
}
|
||||
|
||||
_rw_mutex_shared_lock :: proc(rw: ^RW_Mutex) {
|
||||
state := intrinsics.atomic_load(&rw.impl.state);
|
||||
state := atomic_load(&rw.impl.state);
|
||||
for state & (RW_Mutex_State_Is_Writing|RW_Mutex_State_Writer_Mask) == 0 {
|
||||
ok: bool;
|
||||
state, ok = intrinsics.atomic_cxchgweak(&rw.impl.state, state, state + RW_Mutex_State_Reader);
|
||||
state, ok = atomic_cxchgweak(&rw.impl.state, state, state + RW_Mutex_State_Reader);
|
||||
if ok {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_lock(&rw.impl.mutex);
|
||||
_ = intrinsics.atomic_add(&rw.impl.state, RW_Mutex_State_Reader);
|
||||
_ = atomic_add(&rw.impl.state, RW_Mutex_State_Reader);
|
||||
mutex_unlock(&rw.impl.mutex);
|
||||
}
|
||||
|
||||
_rw_mutex_shared_unlock :: proc(rw: ^RW_Mutex) {
|
||||
state := intrinsics.atomic_sub(&rw.impl.state, RW_Mutex_State_Reader);
|
||||
state := atomic_sub(&rw.impl.state, RW_Mutex_State_Reader);
|
||||
|
||||
if (state & RW_Mutex_State_Reader_Mask == RW_Mutex_State_Reader) &&
|
||||
(state & RW_Mutex_State_Is_Writing != 0) {
|
||||
@@ -143,15 +142,15 @@ _rw_mutex_shared_unlock :: proc(rw: ^RW_Mutex) {
|
||||
}
|
||||
|
||||
_rw_mutex_try_shared_lock :: proc(rw: ^RW_Mutex) -> bool {
|
||||
state := intrinsics.atomic_load(&rw.impl.state);
|
||||
state := atomic_load(&rw.impl.state);
|
||||
if state & (RW_Mutex_State_Is_Writing|RW_Mutex_State_Writer_Mask) == 0 {
|
||||
_, ok := intrinsics.atomic_cxchg(&rw.impl.state, state, state + RW_Mutex_State_Reader);
|
||||
_, ok := atomic_cxchg(&rw.impl.state, state, state + RW_Mutex_State_Reader);
|
||||
if ok {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
if mutex_try_lock(&rw.impl.mutex) {
|
||||
_ = intrinsics.atomic_add(&rw.impl.state, RW_Mutex_State_Reader);
|
||||
_ = atomic_add(&rw.impl.state, RW_Mutex_State_Reader);
|
||||
mutex_unlock(&rw.impl.mutex);
|
||||
return true;
|
||||
}
|
||||
@@ -167,13 +166,13 @@ Queue_Item :: struct {
|
||||
}
|
||||
|
||||
queue_item_wait :: proc(item: ^Queue_Item) {
|
||||
for intrinsics.atomic_load_acq(&item.futex) == 0 {
|
||||
for atomic_load_acq(&item.futex) == 0 {
|
||||
// TODO(bill): Use a Futex here for Linux to improve performance and error handling
|
||||
intrinsics.cpu_relax();
|
||||
cpu_relax();
|
||||
}
|
||||
}
|
||||
queue_item_signal :: proc(item: ^Queue_Item) {
|
||||
intrinsics.atomic_store_rel(&item.futex, 1);
|
||||
atomic_store_rel(&item.futex, 1);
|
||||
// TODO(bill): Use a Futex here for Linux to improve performance and error handling
|
||||
}
|
||||
|
||||
@@ -191,7 +190,7 @@ _cond_wait :: proc(c: ^Cond, m: ^Mutex) {
|
||||
waiter.next = c.impl.queue_head;
|
||||
c.impl.queue_head = waiter;
|
||||
|
||||
intrinsics.atomic_store(&c.impl.pending, true);
|
||||
atomic_store(&c.impl.pending, true);
|
||||
mutex_unlock(&c.impl.queue_mutex);
|
||||
|
||||
mutex_unlock(m);
|
||||
@@ -205,7 +204,7 @@ _cond_wait_with_timeout :: proc(c: ^Cond, m: ^Mutex, timeout: time.Duration) ->
|
||||
}
|
||||
|
||||
_cond_signal :: proc(c: ^Cond) {
|
||||
if !intrinsics.atomic_load(&c.impl.pending) {
|
||||
if !atomic_load(&c.impl.pending) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -214,7 +213,7 @@ _cond_signal :: proc(c: ^Cond) {
|
||||
if c.impl.queue_head != nil {
|
||||
c.impl.queue_head = c.impl.queue_head.next;
|
||||
}
|
||||
intrinsics.atomic_store(&c.impl.pending, c.impl.queue_head != nil);
|
||||
atomic_store(&c.impl.pending, c.impl.queue_head != nil);
|
||||
mutex_unlock(&c.impl.queue_mutex);
|
||||
|
||||
if waiter != nil {
|
||||
@@ -223,11 +222,11 @@ _cond_signal :: proc(c: ^Cond) {
|
||||
}
|
||||
|
||||
_cond_broadcast :: proc(c: ^Cond) {
|
||||
if !intrinsics.atomic_load(&c.impl.pending) {
|
||||
if !atomic_load(&c.impl.pending) {
|
||||
return;
|
||||
}
|
||||
|
||||
intrinsics.atomic_store(&c.impl.pending, false);
|
||||
atomic_store(&c.impl.pending, false);
|
||||
|
||||
mutex_lock(&c.impl.queue_mutex);
|
||||
waiters := c.impl.queue_head;
|
||||
|
||||
@@ -4,7 +4,6 @@ package sync2
|
||||
|
||||
when #config(ODIN_SYNC_USE_PTHREADS, false) {
|
||||
|
||||
import "intrinsics"
|
||||
import "core:time"
|
||||
import "core:sys/unix"
|
||||
|
||||
@@ -53,25 +52,25 @@ _RW_Mutex :: struct {
|
||||
}
|
||||
|
||||
_rw_mutex_lock :: proc(rw: ^RW_Mutex) {
|
||||
_ = intrinsics.atomic_add(&rw.impl.state, RW_Mutex_State_Writer);
|
||||
_ = atomic_add(&rw.impl.state, RW_Mutex_State_Writer);
|
||||
mutex_lock(&rw.impl.mutex);
|
||||
|
||||
state := intrinsics.atomic_or(&rw.impl.state, RW_Mutex_State_Writer);
|
||||
state := atomic_or(&rw.impl.state, RW_Mutex_State_Writer);
|
||||
if state & RW_Mutex_State_Reader_Mask != 0 {
|
||||
sema_wait(&rw.impl.sema);
|
||||
}
|
||||
}
|
||||
|
||||
_rw_mutex_unlock :: proc(rw: ^RW_Mutex) {
|
||||
_ = intrinsics.atomic_and(&rw.impl.state, ~RW_Mutex_State_Is_Writing);
|
||||
_ = atomic_and(&rw.impl.state, ~RW_Mutex_State_Is_Writing);
|
||||
mutex_unlock(&rw.impl.mutex);
|
||||
}
|
||||
|
||||
_rw_mutex_try_lock :: proc(rw: ^RW_Mutex) -> bool {
|
||||
if mutex_try_lock(&rw.impl.mutex) {
|
||||
state := intrinsics.atomic_load(&rw.impl.state);
|
||||
state := atomic_load(&rw.impl.state);
|
||||
if state & RW_Mutex_State_Reader_Mask == 0 {
|
||||
_ = intrinsics.atomic_or(&rw.impl.state, RW_Mutex_State_Is_Writing);
|
||||
_ = atomic_or(&rw.impl.state, RW_Mutex_State_Is_Writing);
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -81,22 +80,22 @@ _rw_mutex_try_lock :: proc(rw: ^RW_Mutex) -> bool {
|
||||
}
|
||||
|
||||
_rw_mutex_shared_lock :: proc(rw: ^RW_Mutex) {
|
||||
state := intrinsics.atomic_load(&rw.impl.state);
|
||||
state := atomic_load(&rw.impl.state);
|
||||
for state & (RW_Mutex_State_Is_Writing|RW_Mutex_State_Writer_Mask) == 0 {
|
||||
ok: bool;
|
||||
state, ok = intrinsics.atomic_cxchgweak(&rw.impl.state, state, state + RW_Mutex_State_Reader);
|
||||
state, ok = atomic_cxchgweak(&rw.impl.state, state, state + RW_Mutex_State_Reader);
|
||||
if ok {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_lock(&rw.impl.mutex);
|
||||
_ = intrinsics.atomic_add(&rw.impl.state, RW_Mutex_State_Reader);
|
||||
_ = atomic_add(&rw.impl.state, RW_Mutex_State_Reader);
|
||||
mutex_unlock(&rw.impl.mutex);
|
||||
}
|
||||
|
||||
_rw_mutex_shared_unlock :: proc(rw: ^RW_Mutex) {
|
||||
state := intrinsics.atomic_sub(&rw.impl.state, RW_Mutex_State_Reader);
|
||||
state := atomic_sub(&rw.impl.state, RW_Mutex_State_Reader);
|
||||
|
||||
if (state & RW_Mutex_State_Reader_Mask == RW_Mutex_State_Reader) &&
|
||||
(state & RW_Mutex_State_Is_Writing != 0) {
|
||||
@@ -105,15 +104,15 @@ _rw_mutex_shared_unlock :: proc(rw: ^RW_Mutex) {
|
||||
}
|
||||
|
||||
_rw_mutex_try_shared_lock :: proc(rw: ^RW_Mutex) -> bool {
|
||||
state := intrinsics.atomic_load(&rw.impl.state);
|
||||
state := atomic_load(&rw.impl.state);
|
||||
if state & (RW_Mutex_State_Is_Writing|RW_Mutex_State_Writer_Mask) == 0 {
|
||||
_, ok := intrinsics.atomic_cxchg(&rw.impl.state, state, state + RW_Mutex_State_Reader);
|
||||
_, ok := atomic_cxchg(&rw.impl.state, state, state + RW_Mutex_State_Reader);
|
||||
if ok {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
if mutex_try_lock(&rw.impl.mutex) {
|
||||
_ = intrinsics.atomic_add(&rw.impl.state, RW_Mutex_State_Reader);
|
||||
_ = atomic_add(&rw.impl.state, RW_Mutex_State_Reader);
|
||||
mutex_unlock(&rw.impl.mutex);
|
||||
return true;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user