mirror of
https://github.com/odin-lang/Odin.git
synced 2026-01-06 04:57:55 +00:00
Merge pull request #1673 from odin-lang/new-sync
Brand New `package sync` and Atomics Intrinsics
This commit is contained in:
@@ -47,24 +47,25 @@ kill_dependency :: #force_inline proc(value: $T) -> T {
|
||||
|
||||
// 7.17.4 Fences
|
||||
atomic_thread_fence :: #force_inline proc(order: memory_order) {
|
||||
switch (order) {
|
||||
case .relaxed:
|
||||
return
|
||||
case .consume:
|
||||
intrinsics.atomic_fence_acq()
|
||||
case .acquire:
|
||||
intrinsics.atomic_fence_acq()
|
||||
case .release:
|
||||
intrinsics.atomic_fence_rel()
|
||||
case .acq_rel:
|
||||
intrinsics.atomic_fence_acqrel()
|
||||
case .seq_cst:
|
||||
intrinsics.atomic_fence_acqrel()
|
||||
assert(order != .relaxed)
|
||||
assert(order != .consume)
|
||||
#partial switch order {
|
||||
case .acquire: intrinsics.atomic_thread_fence(.Acquire)
|
||||
case .release: intrinsics.atomic_thread_fence(.Release)
|
||||
case .acq_rel: intrinsics.atomic_thread_fence(.Acq_Rel)
|
||||
case .seq_cst: intrinsics.atomic_thread_fence(.Seq_Cst)
|
||||
}
|
||||
}
|
||||
|
||||
atomic_signal_fence :: #force_inline proc(order: memory_order) {
|
||||
atomic_thread_fence(order)
|
||||
assert(order != .relaxed)
|
||||
assert(order != .consume)
|
||||
#partial switch order {
|
||||
case .acquire: intrinsics.atomic_signal_fence(.Acquire)
|
||||
case .release: intrinsics.atomic_signal_fence(.Release)
|
||||
case .acq_rel: intrinsics.atomic_signal_fence(.Acq_Rel)
|
||||
case .seq_cst: intrinsics.atomic_signal_fence(.Seq_Cst)
|
||||
}
|
||||
}
|
||||
|
||||
// 7.17.5 Lock-free property
|
||||
@@ -121,13 +122,10 @@ atomic_store_explicit :: #force_inline proc(object: ^$T, desired: T, order: memo
|
||||
assert(order != .acquire)
|
||||
assert(order != .acq_rel)
|
||||
|
||||
#partial switch (order) {
|
||||
case .relaxed:
|
||||
intrinsics.atomic_store_relaxed(object, desired)
|
||||
case .release:
|
||||
intrinsics.atomic_store_rel(object, desired)
|
||||
case .seq_cst:
|
||||
intrinsics.atomic_store(object, desired)
|
||||
#partial switch order {
|
||||
case .relaxed: intrinsics.atomic_store_explicit(object, desired, .Relaxed)
|
||||
case .release: intrinsics.atomic_store_explicit(object, desired, .Release)
|
||||
case .seq_cst: intrinsics.atomic_store_explicit(object, desired, .Seq_Cst)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -139,36 +137,26 @@ atomic_load_explicit :: #force_inline proc(object: ^$T, order: memory_order) {
|
||||
assert(order != .release)
|
||||
assert(order != .acq_rel)
|
||||
|
||||
#partial switch (order) {
|
||||
case .relaxed:
|
||||
return intrinsics.atomic_load_relaxed(object)
|
||||
case .consume:
|
||||
return intrinsics.atomic_load_acq(object)
|
||||
case .acquire:
|
||||
return intrinsics.atomic_load_acq(object)
|
||||
case .seq_cst:
|
||||
return intrinsics.atomic_load(object)
|
||||
#partial switch order {
|
||||
case .relaxed: return intrinsics.atomic_load_explicit(object, .Relaxed)
|
||||
case .consume: return intrinsics.atomic_load_explicit(object, .Consume)
|
||||
case .acquire: return intrinsics.atomic_load_explicit(object, .Acquire)
|
||||
case .seq_cst: return intrinsics.atomic_load_explicit(object, .Seq_Cst)
|
||||
}
|
||||
}
|
||||
|
||||
atomic_exchange :: #force_inline proc(object: ^$T, desired: T) -> T {
|
||||
return intrinsics.atomic_xchg(object, desired)
|
||||
return intrinsics.atomic_exchange(object, desired)
|
||||
}
|
||||
|
||||
atomic_exchange_explicit :: #force_inline proc(object: ^$T, desired: T, order: memory_order) -> T {
|
||||
switch (order) {
|
||||
case .relaxed:
|
||||
return intrinsics.atomic_xchg_relaxed(object, desired)
|
||||
case .consume:
|
||||
return intrinsics.atomic_xchg_acq(object, desired)
|
||||
case .acquire:
|
||||
return intrinsics.atomic_xchg_acq(object, desired)
|
||||
case .release:
|
||||
return intrinsics.atomic_xchg_rel(object, desired)
|
||||
case .acq_rel:
|
||||
return intrinsics.atomic_xchg_acqrel(object, desired)
|
||||
case .seq_cst:
|
||||
return intrinsics.atomic_xchg(object, desired)
|
||||
switch order {
|
||||
case .relaxed: return intrinsics.atomic_exchange_explicit(object, desired, .Relaxed)
|
||||
case .consume: return intrinsics.atomic_exchange_explicit(object, desired, .Consume)
|
||||
case .acquire: return intrinsics.atomic_exchange_explicit(object, desired, .Acquire)
|
||||
case .release: return intrinsics.atomic_exchange_explicit(object, desired, .Release)
|
||||
case .acq_rel: return intrinsics.atomic_exchange_explicit(object, desired, .Acq_Rel)
|
||||
case .seq_cst: return intrinsics.atomic_exchange_explicit(object, desired, .Seq_Cst)
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -189,102 +177,104 @@ atomic_exchange_explicit :: #force_inline proc(object: ^$T, desired: T, order: m
|
||||
// [success = seq_cst, failure = acquire] => failacq
|
||||
// [success = acquire, failure = relaxed] => acq_failrelaxed
|
||||
// [success = acq_rel, failure = relaxed] => acqrel_failrelaxed
|
||||
atomic_compare_exchange_strong :: #force_inline proc(object, expected: ^$T, desired: T) {
|
||||
value, ok := intrinsics.atomic_cxchg(object, expected^, desired)
|
||||
atomic_compare_exchange_strong :: #force_inline proc(object, expected: ^$T, desired: T) -> bool {
|
||||
value, ok := intrinsics.atomic_compare_exchange_strong(object, expected^, desired)
|
||||
if !ok { expected^ = value }
|
||||
return ok
|
||||
}
|
||||
|
||||
atomic_compare_exchange_strong_explicit :: #force_inline proc(object, expected: ^$T, desired: T, success, failure: memory_order) {
|
||||
atomic_compare_exchange_strong_explicit :: #force_inline proc(object, expected: ^$T, desired: T, success, failure: memory_order) -> bool {
|
||||
assert(failure != .release)
|
||||
assert(failure != .acq_rel)
|
||||
|
||||
value: T; ok: bool
|
||||
#partial switch (failure) {
|
||||
#partial switch failure {
|
||||
case .seq_cst:
|
||||
assert(success != .relaxed)
|
||||
#partial switch (success) {
|
||||
#partial switch success {
|
||||
case .seq_cst:
|
||||
value, ok := intrinsics.atomic_cxchg(object, expected^, desired)
|
||||
value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .Seq_Cst, .Seq_Cst)
|
||||
case .acquire:
|
||||
value, ok := intrinsics.atomic_cxchg_acq(object, expected^, desired)
|
||||
value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .Acquire, .Seq_Cst)
|
||||
case .consume:
|
||||
value, ok := intrinsics.atomic_cxchg_acq(object, expected^, desired)
|
||||
value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .Consume, .Seq_Cst)
|
||||
case .release:
|
||||
value, ok := intrinsics.atomic_cxchg_rel(object, expected^, desired)
|
||||
value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .Release, .Seq_Cst)
|
||||
case .acq_rel:
|
||||
value, ok := intrinsics.atomic_cxchg_acqrel(object, expected^, desired)
|
||||
value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .Acq_Rel, .Seq_Cst)
|
||||
}
|
||||
case .relaxed:
|
||||
assert(success != .release)
|
||||
#partial switch (success) {
|
||||
#partial switch success {
|
||||
case .relaxed:
|
||||
value, ok := intrinsics.atomic_cxchg_relaxed(object, expected^, desired)
|
||||
value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .Relaxed, .Relaxed)
|
||||
case .seq_cst:
|
||||
value, ok := intrinsics.atomic_cxchg_failrelaxed(object, expected^, desired)
|
||||
value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .Seq_Cst, .Relaxed)
|
||||
case .acquire:
|
||||
value, ok := intrinsics.atomic_cxchg_acq_failrelaxed(object, expected^, desired)
|
||||
value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .Acquire, .Relaxed)
|
||||
case .consume:
|
||||
value, ok := intrinsics.atomic_cxchg_acq_failrelaxed(object, expected^, desired)
|
||||
value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .Consume, .Relaxed)
|
||||
case .acq_rel:
|
||||
value, ok := intrinsics.atomic_cxchg_acqrel_failrelaxed(object, expected^, desired)
|
||||
value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .Acq_Rel, .Relaxed)
|
||||
}
|
||||
case .consume:
|
||||
fallthrough
|
||||
assert(success == .seq_cst)
|
||||
value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .Seq_Cst, .Consume)
|
||||
case .acquire:
|
||||
assert(success == .seq_cst)
|
||||
value, ok := intrinsics.atomic_cxchg_failacq(object, expected^, desired)
|
||||
value, ok = intrinsics.atomic_compare_exchange_strong_explicit(object, expected^, desired, .Seq_Cst, .Acquire)
|
||||
|
||||
}
|
||||
if !ok { expected^ = value }
|
||||
return ok
|
||||
}
|
||||
|
||||
atomic_compare_exchange_weak :: #force_inline proc(object, expected: ^$T, desired: T) {
|
||||
value, ok := intrinsics.atomic_cxchgweak(object, expected^, desired)
|
||||
atomic_compare_exchange_weak :: #force_inline proc(object, expected: ^$T, desired: T) -> bool {
|
||||
value, ok := intrinsics.atomic_compare_exchange_weak(object, expected^, desired)
|
||||
if !ok { expected^ = value }
|
||||
return ok
|
||||
}
|
||||
|
||||
atomic_compare_exchange_weak_explicit :: #force_inline proc(object, expected: ^$T, desited: T, success, failure: memory_order) {
|
||||
atomic_compare_exchange_weak_explicit :: #force_inline proc(object, expected: ^$T, desited: T, success, failure: memory_order) -> bool {
|
||||
assert(failure != .release)
|
||||
assert(failure != .acq_rel)
|
||||
|
||||
value: T; ok: bool
|
||||
#partial switch (failure) {
|
||||
#partial switch failure {
|
||||
case .seq_cst:
|
||||
assert(success != .relaxed)
|
||||
#partial switch (success) {
|
||||
#partial switch success {
|
||||
case .seq_cst:
|
||||
value, ok := intrinsics.atomic_cxchgweak(object, expected^, desired)
|
||||
value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .Seq_Cst, .Seq_Cst)
|
||||
case .acquire:
|
||||
value, ok := intrinsics.atomic_cxchgweak_acq(object, expected^, desired)
|
||||
value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .Acquire, .Seq_Cst)
|
||||
case .consume:
|
||||
value, ok := intrinsics.atomic_cxchgweak_acq(object, expected^, desired)
|
||||
value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .Consume, .Seq_Cst)
|
||||
case .release:
|
||||
value, ok := intrinsics.atomic_cxchgweak_rel(object, expected^, desired)
|
||||
value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .Release, .Seq_Cst)
|
||||
case .acq_rel:
|
||||
value, ok := intrinsics.atomic_cxchgweak_acqrel(object, expected^, desired)
|
||||
value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .Acq_Rel, .Seq_Cst)
|
||||
}
|
||||
case .relaxed:
|
||||
assert(success != .release)
|
||||
#partial switch (success) {
|
||||
#partial switch success {
|
||||
case .relaxed:
|
||||
value, ok := intrinsics.atomic_cxchgweak_relaxed(object, expected^, desired)
|
||||
value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .Relaxed, .Relaxed)
|
||||
case .seq_cst:
|
||||
value, ok := intrinsics.atomic_cxchgweak_failrelaxed(object, expected^, desired)
|
||||
value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .Seq_Cst, .Relaxed)
|
||||
case .acquire:
|
||||
value, ok := intrinsics.atomic_cxchgweak_acq_failrelaxed(object, expected^, desired)
|
||||
value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .Acquire, .Relaxed)
|
||||
case .consume:
|
||||
value, ok := intrinsics.atomic_cxchgweak_acq_failrelaxed(object, expected^, desired)
|
||||
value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .Consume, .Relaxed)
|
||||
case .acq_rel:
|
||||
value, ok := intrinsics.atomic_cxchgweak_acqrel_failrelaxed(object, expected^, desired)
|
||||
value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .Acq_Rel, .Relaxed)
|
||||
}
|
||||
case .consume:
|
||||
fallthrough
|
||||
assert(success == .seq_cst)
|
||||
value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .Seq_Cst, .Consume)
|
||||
case .acquire:
|
||||
assert(success == .seq_cst)
|
||||
value, ok := intrinsics.atomic_cxchgweak_failacq(object, expected^, desired)
|
||||
value, ok = intrinsics.atomic_compare_exchange_weak_explicit(object, expected^, desired, .Seq_Cst, .Acquire)
|
||||
|
||||
}
|
||||
if !ok { expected^ = value }
|
||||
@@ -297,19 +287,14 @@ atomic_fetch_add :: #force_inline proc(object: ^$T, operand: T) -> T {
|
||||
}
|
||||
|
||||
atomic_fetch_add_explicit :: #force_inline proc(object: ^$T, operand: T, order: memory_order) -> T {
|
||||
switch (order) {
|
||||
case .relaxed:
|
||||
return intrinsics.atomic_add_relaxed(object, operand)
|
||||
case .consume:
|
||||
return intrinsics.atomic_add_acq(object, operand)
|
||||
case .acquire:
|
||||
return intrinsics.atomic_add_acq(object, operand)
|
||||
case .release:
|
||||
return intrinsics.atomic_add_rel(object, operand)
|
||||
case .acq_rel:
|
||||
return intrinsics.atomic_add_acqrel(object, operand)
|
||||
case .seq_cst:
|
||||
return intrinsics.atomic_add(object, operand)
|
||||
switch order {
|
||||
case .relaxed: return intrinsics.atomic_add_explicit(object, operand, .Relaxed)
|
||||
case .consume: return intrinsics.atomic_add_explicit(object, operand, .Consume)
|
||||
case .acquire: return intrinsics.atomic_add_explicit(object, operand, .Acquire)
|
||||
case .release: return intrinsics.atomic_add_explicit(object, operand, .Release)
|
||||
case .acq_rel: return intrinsics.atomic_add_explicit(object, operand, .Acq_Rel)
|
||||
case: fallthrough
|
||||
case .seq_cst: return intrinsics.atomic_add_explicit(object, operand, .Seq_Cst)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -318,19 +303,14 @@ atomic_fetch_sub :: #force_inline proc(object: ^$T, operand: T) -> T {
|
||||
}
|
||||
|
||||
atomic_fetch_sub_explicit :: #force_inline proc(object: ^$T, operand: T, order: memory_order) -> T {
|
||||
switch (order) {
|
||||
case .relaxed:
|
||||
return intrinsics.atomic_sub_relaxed(object, operand)
|
||||
case .consume:
|
||||
return intrinsics.atomic_sub_acq(object, operand)
|
||||
case .acquire:
|
||||
return intrinsics.atomic_sub_acq(object, operand)
|
||||
case .release:
|
||||
return intrinsics.atomic_sub_rel(object, operand)
|
||||
case .acq_rel:
|
||||
return intrinsics.atomic_sub_acqrel(object, operand)
|
||||
case .seq_cst:
|
||||
return intrinsics.atomic_sub(object, operand)
|
||||
switch order {
|
||||
case .relaxed: return intrinsics.atomic_sub_explicit(object, operand, .Relaxed)
|
||||
case .consume: return intrinsics.atomic_sub_explicit(object, operand, .Consume)
|
||||
case .acquire: return intrinsics.atomic_sub_explicit(object, operand, .Acquire)
|
||||
case .release: return intrinsics.atomic_sub_explicit(object, operand, .Release)
|
||||
case .acq_rel: return intrinsics.atomic_sub_explicit(object, operand, .Acq_Rel)
|
||||
case: fallthrough
|
||||
case .seq_cst: return intrinsics.atomic_sub_explicit(object, operand, .Seq_Cst)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -339,19 +319,14 @@ atomic_fetch_or :: #force_inline proc(object: ^$T, operand: T) -> T {
|
||||
}
|
||||
|
||||
atomic_fetch_or_explicit :: #force_inline proc(object: ^$T, operand: T, order: memory_order) -> T {
|
||||
switch (order) {
|
||||
case .relaxed:
|
||||
return intrinsics.atomic_or_relaxed(object, operand)
|
||||
case .consume:
|
||||
return intrinsics.atomic_or_acq(object, operand)
|
||||
case .acquire:
|
||||
return intrinsics.atomic_or_acq(object, operand)
|
||||
case .release:
|
||||
return intrinsics.atomic_or_rel(object, operand)
|
||||
case .acq_rel:
|
||||
return intrinsics.atomic_or_acqrel(object, operand)
|
||||
case .seq_cst:
|
||||
return intrinsics.atomic_or(object, operand)
|
||||
switch order {
|
||||
case .relaxed: return intrinsics.atomic_or_explicit(object, operand, .Relaxed)
|
||||
case .consume: return intrinsics.atomic_or_explicit(object, operand, .Consume)
|
||||
case .acquire: return intrinsics.atomic_or_explicit(object, operand, .Acquire)
|
||||
case .release: return intrinsics.atomic_or_explicit(object, operand, .Release)
|
||||
case .acq_rel: return intrinsics.atomic_or_explicit(object, operand, .Acq_Rel)
|
||||
case: fallthrough
|
||||
case .seq_cst: return intrinsics.atomic_or_explicit(object, operand, .Seq_Cst)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -360,19 +335,14 @@ atomic_fetch_xor :: #force_inline proc(object: ^$T, operand: T) -> T {
|
||||
}
|
||||
|
||||
atomic_fetch_xor_explicit :: #force_inline proc(object: ^$T, operand: T, order: memory_order) -> T {
|
||||
switch (order) {
|
||||
case .relaxed:
|
||||
return intrinsics.atomic_xor_relaxed(object, operand)
|
||||
case .consume:
|
||||
return intrinsics.atomic_xor_acq(object, operand)
|
||||
case .acquire:
|
||||
return intrinsics.atomic_xor_acq(object, operand)
|
||||
case .release:
|
||||
return intrinsics.atomic_xor_rel(object, operand)
|
||||
case .acq_rel:
|
||||
return intrinsics.atomic_xor_acqrel(object, operand)
|
||||
case .seq_cst:
|
||||
return intrinsics.atomic_xor(object, operand)
|
||||
switch order {
|
||||
case .relaxed: return intrinsics.atomic_xor_explicit(object, operand, .Relaxed)
|
||||
case .consume: return intrinsics.atomic_xor_explicit(object, operand, .Consume)
|
||||
case .acquire: return intrinsics.atomic_xor_explicit(object, operand, .Acquire)
|
||||
case .release: return intrinsics.atomic_xor_explicit(object, operand, .Release)
|
||||
case .acq_rel: return intrinsics.atomic_xor_explicit(object, operand, .Acq_Rel)
|
||||
case: fallthrough
|
||||
case .seq_cst: return intrinsics.atomic_xor_explicit(object, operand, .Seq_Cst)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -380,19 +350,14 @@ atomic_fetch_and :: #force_inline proc(object: ^$T, operand: T) -> T {
|
||||
return intrinsics.atomic_and(object, operand)
|
||||
}
|
||||
atomic_fetch_and_explicit :: #force_inline proc(object: ^$T, operand: T, order: memory_order) -> T {
|
||||
switch (order) {
|
||||
case .relaxed:
|
||||
return intrinsics.atomic_and_relaxed(object, operand)
|
||||
case .consume:
|
||||
return intrinsics.atomic_and_acq(object, operand)
|
||||
case .acquire:
|
||||
return intrinsics.atomic_and_acq(object, operand)
|
||||
case .release:
|
||||
return intrinsics.atomic_and_rel(object, operand)
|
||||
case .acq_rel:
|
||||
return intrinsics.atomic_and_acqrel(object, operand)
|
||||
case .seq_cst:
|
||||
return intrinsics.atomic_and(object, operand)
|
||||
switch order {
|
||||
case .relaxed: return intrinsics.atomic_and_explicit(object, operand, .Relaxed)
|
||||
case .consume: return intrinsics.atomic_and_explicit(object, operand, .Consume)
|
||||
case .acquire: return intrinsics.atomic_and_explicit(object, operand, .Acquire)
|
||||
case .release: return intrinsics.atomic_and_explicit(object, operand, .Release)
|
||||
case .acq_rel: return intrinsics.atomic_and_explicit(object, operand, .Acq_Rel)
|
||||
case: fallthrough
|
||||
case .seq_cst: return intrinsics.atomic_and_explicit(object, operand, .Seq_Cst)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -62,77 +62,44 @@ syscall :: proc(id: uintptr, args: ..uintptr) -> uintptr ---
|
||||
|
||||
|
||||
// Atomics
|
||||
atomic_fence :: proc() ---
|
||||
atomic_fence_acq :: proc() ---
|
||||
atomic_fence_rel :: proc() ---
|
||||
atomic_fence_acqrel :: proc() ---
|
||||
Atomic_Memory_Order :: enum {
|
||||
Relaxed = 0, // Unordered
|
||||
Consume = 1, // Monotonic
|
||||
Acquire = 2,
|
||||
Release = 3,
|
||||
Acq_Rel = 4,
|
||||
Seq_Cst = 5,
|
||||
}
|
||||
|
||||
atomic_store :: proc(dst: ^$T, val: T) ---
|
||||
atomic_store_rel :: proc(dst: ^$T, val: T) ---
|
||||
atomic_store_relaxed :: proc(dst: ^$T, val: T) ---
|
||||
atomic_store_unordered :: proc(dst: ^$T, val: T) ---
|
||||
atomic_thread_fence :: proc(order: Atomic_Memory_Order) ---
|
||||
atomic_signal_fence :: proc(order: Atomic_Memory_Order) ---
|
||||
|
||||
atomic_store :: proc(dst: ^$T, val: T) ---
|
||||
atomic_store_explicit :: proc(dst: ^$T, val: T, order: Atomic_Memory_Order) ---
|
||||
|
||||
atomic_load :: proc(dst: ^$T) -> T ---
|
||||
atomic_load_acq :: proc(dst: ^$T) -> T ---
|
||||
atomic_load_relaxed :: proc(dst: ^$T) -> T ---
|
||||
atomic_load_unordered :: proc(dst: ^$T) -> T ---
|
||||
atomic_load_explicit :: proc(dst: ^$T, order: Atomic_Memory_Order) -> T ---
|
||||
|
||||
atomic_add :: proc(dst; ^$T, val: T) -> T ---
|
||||
atomic_add_acq :: proc(dst; ^$T, val: T) -> T ---
|
||||
atomic_add_rel :: proc(dst; ^$T, val: T) -> T ---
|
||||
atomic_add_acqrel :: proc(dst; ^$T, val: T) -> T ---
|
||||
atomic_add_relaxed :: proc(dst; ^$T, val: T) -> T ---
|
||||
atomic_sub :: proc(dst; ^$T, val: T) -> T ---
|
||||
atomic_sub_acq :: proc(dst; ^$T, val: T) -> T ---
|
||||
atomic_sub_rel :: proc(dst; ^$T, val: T) -> T ---
|
||||
atomic_sub_acqrel :: proc(dst; ^$T, val: T) -> T ---
|
||||
atomic_sub_relaxed :: proc(dst; ^$T, val: T) -> T ---
|
||||
atomic_and :: proc(dst; ^$T, val: T) -> T ---
|
||||
atomic_and_acq :: proc(dst; ^$T, val: T) -> T ---
|
||||
atomic_and_rel :: proc(dst; ^$T, val: T) -> T ---
|
||||
atomic_and_acqrel :: proc(dst; ^$T, val: T) -> T ---
|
||||
atomic_and_relaxed :: proc(dst; ^$T, val: T) -> T ---
|
||||
atomic_nand :: proc(dst; ^$T, val: T) -> T ---
|
||||
atomic_nand_acq :: proc(dst; ^$T, val: T) -> T ---
|
||||
atomic_nand_rel :: proc(dst; ^$T, val: T) -> T ---
|
||||
atomic_nand_acqrel :: proc(dst; ^$T, val: T) -> T ---
|
||||
atomic_nand_relaxed :: proc(dst; ^$T, val: T) -> T ---
|
||||
atomic_or :: proc(dst; ^$T, val: T) -> T ---
|
||||
atomic_or_acq :: proc(dst; ^$T, val: T) -> T ---
|
||||
atomic_or_rel :: proc(dst; ^$T, val: T) -> T ---
|
||||
atomic_or_acqrel :: proc(dst; ^$T, val: T) -> T ---
|
||||
atomic_or_relaxed :: proc(dst; ^$T, val: T) -> T ---
|
||||
atomic_xor :: proc(dst; ^$T, val: T) -> T ---
|
||||
atomic_xor_acq :: proc(dst; ^$T, val: T) -> T ---
|
||||
atomic_xor_rel :: proc(dst; ^$T, val: T) -> T ---
|
||||
atomic_xor_acqrel :: proc(dst; ^$T, val: T) -> T ---
|
||||
atomic_xor_relaxed :: proc(dst; ^$T, val: T) -> T ---
|
||||
atomic_add :: proc(dst; ^$T, val: T) -> T ---
|
||||
atomic_add_explicit :: proc(dst; ^$T, val: T, order: Atomic_Memory_Order) -> T ---
|
||||
atomic_sub :: proc(dst; ^$T, val: T) -> T ---
|
||||
atomic_sub_explicit :: proc(dst; ^$T, val: T, order: Atomic_Memory_Order) -> T ---
|
||||
atomic_and :: proc(dst; ^$T, val: T) -> T ---
|
||||
atomic_and_explicit :: proc(dst; ^$T, val: T, order: Atomic_Memory_Order) -> T ---
|
||||
atomic_nand :: proc(dst; ^$T, val: T) -> T ---
|
||||
atomic_nand_explicit :: proc(dst; ^$T, val: T, order: Atomic_Memory_Order) -> T ---
|
||||
atomic_or :: proc(dst; ^$T, val: T) -> T ---
|
||||
atomic_or_explicit :: proc(dst; ^$T, val: T, order: Atomic_Memory_Order) -> T ---
|
||||
atomic_xor :: proc(dst; ^$T, val: T) -> T ---
|
||||
atomic_xor_explicit :: proc(dst; ^$T, val: T, order: Atomic_Memory_Order) -> T ---
|
||||
atomic_exchange :: proc(dst; ^$T, val: T) -> T ---
|
||||
atomic_exchange_explicit :: proc(dst; ^$T, val: T, order: Atomic_Memory_Order) -> T ---
|
||||
|
||||
atomic_xchg :: proc(dst; ^$T, val: T) -> T ---
|
||||
atomic_xchg_acq :: proc(dst; ^$T, val: T) -> T ---
|
||||
atomic_xchg_rel :: proc(dst; ^$T, val: T) -> T ---
|
||||
atomic_xchg_acqrel :: proc(dst; ^$T, val: T) -> T ---
|
||||
atomic_xchg_relaxed :: proc(dst; ^$T, val: T) -> T ---
|
||||
atomic_compare_exchange_strong :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
|
||||
atomic_compare_exchange_strong_explicit :: proc(dst: ^$T, old, new: T, success, failure: Atomic_Memory_Order) -> (T, bool) #optional_ok ---
|
||||
atomic_compare_exchange_weak :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
|
||||
atomic_compare_exchange_weak_explicit :: proc(dst: ^$T, old, new: T, success, failure: Atomic_Memory_Order) -> (T, bool) #optional_ok ---
|
||||
|
||||
atomic_cxchg :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
|
||||
atomic_cxchg_acq :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
|
||||
atomic_cxchg_rel :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
|
||||
atomic_cxchg_acqrel :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
|
||||
atomic_cxchg_relaxed :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
|
||||
atomic_cxchg_failrelaxed :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
|
||||
atomic_cxchg_failacq :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
|
||||
atomic_cxchg_acq_failrelaxed :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
|
||||
atomic_cxchg_acqrel_failrelaxed :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
|
||||
|
||||
atomic_cxchgweak :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
|
||||
atomic_cxchgweak_acq :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
|
||||
atomic_cxchgweak_rel :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
|
||||
atomic_cxchgweak_acqrel :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
|
||||
atomic_cxchgweak_relaxed :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
|
||||
atomic_cxchgweak_failrelaxed :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
|
||||
atomic_cxchgweak_failacq :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
|
||||
atomic_cxchgweak_acq_failrelaxed :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
|
||||
atomic_cxchgweak_acqrel_failrelaxed :: proc(dst: ^$T, old, new: T) -> (T, bool) #optional_ok ---
|
||||
|
||||
// Constant type tests
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ zero_explicit :: proc "contextless" (data: rawptr, len: int) -> rawptr {
|
||||
// equivalent semantics to those provided by the C11 Annex K 3.7.4.1
|
||||
// memset_s call.
|
||||
intrinsics.mem_zero_volatile(data, len) // Use the volatile mem_zero
|
||||
intrinsics.atomic_fence() // Prevent reordering
|
||||
intrinsics.atomic_thread_fence(.Seq_Cst) // Prevent reordering
|
||||
return data
|
||||
}
|
||||
zero_item :: proc "contextless" (item: $P/^$T) {
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
//+private
|
||||
package mem_virtual
|
||||
|
||||
import sync "core:sync/sync2"
|
||||
import "core:sync"
|
||||
|
||||
Platform_Memory_Block :: struct {
|
||||
block: Memory_Block,
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
package os2
|
||||
|
||||
import sync "core:sync/sync2"
|
||||
import "core:sync"
|
||||
import "core:time"
|
||||
import "core:runtime"
|
||||
|
||||
|
||||
@@ -2,167 +2,44 @@ package sync
|
||||
|
||||
import "core:intrinsics"
|
||||
|
||||
Ordering :: enum {
|
||||
Relaxed, // Monotonic
|
||||
Release,
|
||||
Acquire,
|
||||
Acquire_Release,
|
||||
Sequentially_Consistent,
|
||||
}
|
||||
|
||||
strongest_failure_ordering_table := [Ordering]Ordering{
|
||||
.Relaxed = .Relaxed,
|
||||
.Release = .Relaxed,
|
||||
.Acquire = .Acquire,
|
||||
.Acquire_Release = .Acquire,
|
||||
.Sequentially_Consistent = .Sequentially_Consistent,
|
||||
}
|
||||
|
||||
strongest_failure_ordering :: #force_inline proc(order: Ordering) -> Ordering {
|
||||
return strongest_failure_ordering_table[order]
|
||||
}
|
||||
|
||||
fence :: #force_inline proc($order: Ordering) {
|
||||
when order == .Relaxed { #panic("there is no such thing as a relaxed fence") }
|
||||
else when order == .Release { intrinsics.atomic_fence_rel() }
|
||||
else when order == .Acquire { intrinsics.atomic_fence_acq() }
|
||||
else when order == .Acquire_Release { intrinsics.atomic_fence_acqrel() }
|
||||
else when order == .Sequentially_Consistent { intrinsics.atomic_fence() }
|
||||
else { #panic("unknown order") }
|
||||
cpu_relax :: intrinsics.cpu_relax
|
||||
|
||||
/*
|
||||
Atomic_Memory_Order :: enum {
|
||||
Relaxed = 0,
|
||||
Consume = 1,
|
||||
Acquire = 2,
|
||||
Release = 3,
|
||||
Acq_Rel = 4,
|
||||
Seq_Cst = 5,
|
||||
}
|
||||
*/
|
||||
Atomic_Memory_Order :: intrinsics.Atomic_Memory_Order
|
||||
|
||||
|
||||
atomic_store :: #force_inline proc(dst: ^$T, val: T, $order: Ordering) {
|
||||
when order == .Relaxed { intrinsics.atomic_store_relaxed(dst, val) }
|
||||
else when order == .Release { intrinsics.atomic_store_rel(dst, val) }
|
||||
else when order == .Sequentially_Consistent { intrinsics.atomic_store(dst, val) }
|
||||
else when order == .Acquire { #panic("there is not such thing as an acquire store") }
|
||||
else when order == .Acquire_Release { #panic("there is not such thing as an acquire/release store") }
|
||||
else { #panic("unknown order") }
|
||||
}
|
||||
|
||||
atomic_load :: #force_inline proc(dst: ^$T, $order: Ordering) -> T {
|
||||
when order == .Relaxed { return intrinsics.atomic_load_relaxed(dst) }
|
||||
else when order == .Acquire { return intrinsics.atomic_load_acq(dst) }
|
||||
else when order == .Sequentially_Consistent { return intrinsics.atomic_load(dst) }
|
||||
else when order == .Release { #panic("there is no such thing as a release load") }
|
||||
else when order == .Acquire_Release { #panic("there is no such thing as an acquire/release load") }
|
||||
else { #panic("unknown order") }
|
||||
}
|
||||
|
||||
atomic_swap :: #force_inline proc(dst: ^$T, val: T, $order: Ordering) -> T {
|
||||
when order == .Relaxed { return intrinsics.atomic_xchg_relaxed(dst, val) }
|
||||
else when order == .Release { return intrinsics.atomic_xchg_rel(dst, val) }
|
||||
else when order == .Acquire { return intrinsics.atomic_xchg_acq(dst, val) }
|
||||
else when order == .Acquire_Release { return intrinsics.atomic_xchg_acqrel(dst, val) }
|
||||
else when order == .Sequentially_Consistent { return intrinsics.atomic_xchg(dst, val) }
|
||||
else { #panic("unknown order") }
|
||||
}
|
||||
|
||||
atomic_compare_exchange :: #force_inline proc(dst: ^$T, old, new: T, $success, $failure: Ordering) -> (val: T, ok: bool) {
|
||||
when failure == .Relaxed {
|
||||
when success == .Relaxed { return intrinsics.atomic_cxchg_relaxed(dst, old, new) }
|
||||
else when success == .Acquire { return intrinsics.atomic_cxchg_acq_failrelaxed(dst, old, new) }
|
||||
else when success == .Acquire_Release { return intrinsics.atomic_cxchg_acqrel_failrelaxed(dst, old, new) }
|
||||
else when success == .Sequentially_Consistent { return intrinsics.atomic_cxchg_failrelaxed(dst, old, new) }
|
||||
else when success == .Release { return intrinsics.atomic_cxchg_rel(dst, old, new) }
|
||||
else { #panic("an unknown ordering combination") }
|
||||
} else when failure == .Acquire {
|
||||
when success == .Release { return intrinsics.atomic_cxchg_acqrel(dst, old, new) }
|
||||
else when success == .Acquire { return intrinsics.atomic_cxchg_acq(dst, old, new) }
|
||||
else { #panic("an unknown ordering combination") }
|
||||
} else when failure == .Sequentially_Consistent {
|
||||
when success == .Sequentially_Consistent { return intrinsics.atomic_cxchg(dst, old, new) }
|
||||
else { #panic("an unknown ordering combination") }
|
||||
} else when failure == .Acquire_Release {
|
||||
#panic("there is not such thing as an acquire/release failure ordering")
|
||||
} else when failure == .Release {
|
||||
when success == .Acquire { return instrinsics.atomic_cxchg_failacq(dst, old, new) }
|
||||
else { #panic("an unknown ordering combination") }
|
||||
} else {
|
||||
return T{}, false
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
atomic_compare_exchange_weak :: #force_inline proc(dst: ^$T, old, new: T, $success, $failure: Ordering) -> (val: T, ok: bool) {
|
||||
when failure == .Relaxed {
|
||||
when success == .Relaxed { return intrinsics.atomic_cxchgweak_relaxed(dst, old, new) }
|
||||
else when success == .Acquire { return intrinsics.atomic_cxchgweak_acq_failrelaxed(dst, old, new) }
|
||||
else when success == .Acquire_Release { return intrinsics.atomic_cxchgweak_acqrel_failrelaxed(dst, old, new) }
|
||||
else when success == .Sequentially_Consistent { return intrinsics.atomic_cxchgweak_failrelaxed(dst, old, new) }
|
||||
else when success == .Release { return intrinsics.atomic_cxchgweak_rel(dst, old, new) }
|
||||
else { #panic("an unknown ordering combination") }
|
||||
} else when failure == .Acquire {
|
||||
when success == .Release { return intrinsics.atomic_cxchgweak_acqrel(dst, old, new) }
|
||||
else when success == .Acquire { return intrinsics.atomic_cxchgweak_acq(dst, old, new) }
|
||||
else { #panic("an unknown ordering combination") }
|
||||
} else when failure == .Sequentially_Consistent {
|
||||
when success == .Sequentially_Consistent { return intrinsics.atomic_cxchgweak(dst, old, new) }
|
||||
else { #panic("an unknown ordering combination") }
|
||||
} else when failure == .Acquire_Release {
|
||||
#panic("there is not such thing as an acquire/release failure ordering")
|
||||
} else when failure == .Release {
|
||||
when success == .Acquire { return intrinsics.atomic_cxchgweak_failacq(dst, old, new) }
|
||||
else { #panic("an unknown ordering combination") }
|
||||
} else {
|
||||
return T{}, false
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
atomic_add :: #force_inline proc(dst: ^$T, val: T, $order: Ordering) -> T {
|
||||
when order == .Relaxed { return intrinsics.atomic_add_relaxed(dst, val) }
|
||||
else when order == .Release { return intrinsics.atomic_add_rel(dst, val) }
|
||||
else when order == .Acquire { return intrinsics.atomic_add_acq(dst, val) }
|
||||
else when order == .Acquire_Release { return intrinsics.atomic_add_acqrel(dst, val) }
|
||||
else when order == .Sequentially_Consistent { return intrinsics.atomic_add(dst, val) }
|
||||
else { #panic("unknown order") }
|
||||
}
|
||||
|
||||
atomic_sub :: #force_inline proc(dst: ^$T, val: T, $order: Ordering) -> T {
|
||||
when order == .Relaxed { return intrinsics.atomic_sub_relaxed(dst, val) }
|
||||
else when order == .Release { return intrinsics.atomic_sub_rel(dst, val) }
|
||||
else when order == .Acquire { return intrinsics.atomic_sub_acq(dst, val) }
|
||||
else when order == .Acquire_Release { return intrinsics.atomic_sub_acqrel(dst, val) }
|
||||
else when order == .Sequentially_Consistent { return intrinsics.atomic_sub(dst, val) }
|
||||
else { #panic("unknown order") }
|
||||
}
|
||||
|
||||
atomic_and :: #force_inline proc(dst: ^$T, val: T, $order: Ordering) -> T {
|
||||
when order == .Relaxed { return intrinsics.atomic_and_relaxed(dst, val) }
|
||||
else when order == .Release { return intrinsics.atomic_and_rel(dst, val) }
|
||||
else when order == .Acquire { return intrinsics.atomic_and_acq(dst, val) }
|
||||
else when order == .Acquire_Release { return intrinsics.atomic_and_acqrel(dst, val) }
|
||||
else when order == .Sequentially_Consistent { return intrinsics.atomic_and(dst, val) }
|
||||
else { #panic("unknown order") }
|
||||
}
|
||||
|
||||
atomic_nand :: #force_inline proc(dst: ^$T, val: T, $order: Ordering) -> T {
|
||||
when order == .Relaxed { return intrinsics.atomic_nand_relaxed(dst, val) }
|
||||
else when order == .Release { return intrinsics.atomic_nand_rel(dst, val) }
|
||||
else when order == .Acquire { return intrinsics.atomic_nand_acq(dst, val) }
|
||||
else when order == .Acquire_Release { return intrinsics.atomic_nand_acqrel(dst, val) }
|
||||
else when order == .Sequentially_Consistent { return intrinsics.atomic_nand(dst, val) }
|
||||
else { #panic("unknown order") }
|
||||
}
|
||||
|
||||
atomic_or :: #force_inline proc(dst: ^$T, val: T, $order: Ordering) -> T {
|
||||
when order == .Relaxed { return intrinsics.atomic_or_relaxed(dst, val) }
|
||||
else when order == .Release { return intrinsics.atomic_or_rel(dst, val) }
|
||||
else when order == .Acquire { return intrinsics.atomic_or_acq(dst, val) }
|
||||
else when order == .Acquire_Release { return intrinsics.atomic_or_acqrel(dst, val) }
|
||||
else when order == .Sequentially_Consistent { return intrinsics.atomic_or(dst, val) }
|
||||
else { #panic("unknown order") }
|
||||
}
|
||||
|
||||
atomic_xor :: #force_inline proc(dst: ^$T, val: T, $order: Ordering) -> T {
|
||||
when order == .Relaxed { return intrinsics.atomic_xor_relaxed(dst, val) }
|
||||
else when order == .Release { return intrinsics.atomic_xor_rel(dst, val) }
|
||||
else when order == .Acquire { return intrinsics.atomic_xor_acq(dst, val) }
|
||||
else when order == .Acquire_Release { return intrinsics.atomic_xor_acqrel(dst, val) }
|
||||
else when order == .Sequentially_Consistent { return intrinsics.atomic_xor(dst, val) }
|
||||
else { #panic("unknown order") }
|
||||
}
|
||||
atomic_thread_fence :: intrinsics.atomic_thread_fence
|
||||
atomic_signal_fence :: intrinsics.atomic_signal_fence
|
||||
atomic_store :: intrinsics.atomic_store
|
||||
atomic_store_explicit :: intrinsics.atomic_store_explicit
|
||||
atomic_load :: intrinsics.atomic_load
|
||||
atomic_load_explicit :: intrinsics.atomic_load_explicit
|
||||
atomic_add :: intrinsics.atomic_add
|
||||
atomic_add_explicit :: intrinsics.atomic_add_explicit
|
||||
atomic_sub :: intrinsics.atomic_sub
|
||||
atomic_sub_explicit :: intrinsics.atomic_sub_explicit
|
||||
atomic_and :: intrinsics.atomic_and
|
||||
atomic_and_explicit :: intrinsics.atomic_and_explicit
|
||||
atomic_nand :: intrinsics.atomic_nand
|
||||
atomic_nand_explicit :: intrinsics.atomic_nand_explicit
|
||||
atomic_or :: intrinsics.atomic_or
|
||||
atomic_or_explicit :: intrinsics.atomic_or_explicit
|
||||
atomic_xor :: intrinsics.atomic_xor
|
||||
atomic_xor_explicit :: intrinsics.atomic_xor_explicit
|
||||
atomic_exchange :: intrinsics.atomic_exchange
|
||||
atomic_exchange_explicit :: intrinsics.atomic_exchange_explicit
|
||||
|
||||
// Returns value and optional ok boolean
|
||||
atomic_compare_exchange_strong :: intrinsics.atomic_compare_exchange_strong
|
||||
atomic_compare_exchange_strong_explicit :: intrinsics.atomic_compare_exchange_strong_explicit
|
||||
atomic_compare_exchange_weak :: intrinsics.atomic_compare_exchange_weak
|
||||
atomic_compare_exchange_weak_explicit :: intrinsics.atomic_compare_exchange_weak_explicit
|
||||
@@ -1,80 +0,0 @@
|
||||
package sync
|
||||
|
||||
|
||||
/*
|
||||
A barrier enabling multiple threads to synchronize the beginning of some computation
|
||||
Example:
|
||||
|
||||
package example
|
||||
|
||||
import "core:fmt"
|
||||
import "core:sync"
|
||||
import "core:thread"
|
||||
|
||||
barrier := &sync.Barrier{};
|
||||
|
||||
main :: proc() {
|
||||
fmt.println("Start");
|
||||
|
||||
THREAD_COUNT :: 4;
|
||||
threads: [THREAD_COUNT]^thread.Thread;
|
||||
|
||||
sync.barrier_init(barrier, THREAD_COUNT);
|
||||
defer sync.barrier_destroy(barrier);
|
||||
|
||||
|
||||
for _, i in threads {
|
||||
threads[i] = thread.create_and_start(proc(t: ^thread.Thread) {
|
||||
// Same messages will be printed together but without any interleaving
|
||||
fmt.println("Getting ready!");
|
||||
sync.barrier_wait(barrier);
|
||||
fmt.println("Off their marks they go!");
|
||||
});
|
||||
}
|
||||
|
||||
for t in threads {
|
||||
thread.destroy(t); // join and free thread
|
||||
}
|
||||
fmt.println("Finished");
|
||||
}
|
||||
*/
|
||||
Barrier :: struct {
|
||||
mutex: Blocking_Mutex,
|
||||
cond: Condition,
|
||||
index: int,
|
||||
generation_id: int,
|
||||
thread_count: int,
|
||||
}
|
||||
|
||||
barrier_init :: proc(b: ^Barrier, thread_count: int) {
|
||||
blocking_mutex_init(&b.mutex)
|
||||
condition_init(&b.cond, &b.mutex)
|
||||
b.index = 0
|
||||
b.generation_id = 0
|
||||
b.thread_count = thread_count
|
||||
}
|
||||
|
||||
barrier_destroy :: proc(b: ^Barrier) {
|
||||
blocking_mutex_destroy(&b.mutex)
|
||||
condition_destroy(&b.cond)
|
||||
}
|
||||
|
||||
// Block the current thread until all threads have rendezvoused
|
||||
// Barrier can be reused after all threads rendezvoused once, and can be used continuously
|
||||
barrier_wait :: proc(b: ^Barrier) -> (is_leader: bool) {
|
||||
blocking_mutex_lock(&b.mutex)
|
||||
defer blocking_mutex_unlock(&b.mutex)
|
||||
local_gen := b.generation_id
|
||||
b.index += 1
|
||||
if b.index < b.thread_count {
|
||||
for local_gen == b.generation_id && b.index < b.thread_count {
|
||||
condition_wait_for(&b.cond)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
b.index = 0
|
||||
b.generation_id += 1
|
||||
condition_broadcast(&b.cond)
|
||||
return true
|
||||
}
|
||||
@@ -1,889 +0,0 @@
|
||||
package sync
|
||||
|
||||
import "core:mem"
|
||||
import "core:time"
|
||||
import "core:intrinsics"
|
||||
import "core:math/rand"
|
||||
|
||||
_, _ :: time, rand
|
||||
|
||||
Channel_Direction :: enum i8 {
|
||||
Both = 0,
|
||||
Send = +1,
|
||||
Recv = -1,
|
||||
}
|
||||
|
||||
Channel :: struct($T: typeid, $Direction := Channel_Direction.Both) {
|
||||
using _internal: ^Raw_Channel,
|
||||
}
|
||||
|
||||
channel_init :: proc(ch: ^$C/Channel($T, $D), cap := 0, allocator := context.allocator) {
|
||||
context.allocator = allocator
|
||||
ch._internal = raw_channel_create(size_of(T), align_of(T), cap)
|
||||
return
|
||||
}
|
||||
|
||||
channel_make :: proc($T: typeid, cap := 0, allocator := context.allocator) -> (ch: Channel(T, .Both)) {
|
||||
context.allocator = allocator
|
||||
ch._internal = raw_channel_create(size_of(T), align_of(T), cap)
|
||||
return
|
||||
}
|
||||
|
||||
channel_make_send :: proc($T: typeid, cap := 0, allocator := context.allocator) -> (ch: Channel(T, .Send)) {
|
||||
context.allocator = allocator
|
||||
ch._internal = raw_channel_create(size_of(T), align_of(T), cap)
|
||||
return
|
||||
}
|
||||
channel_make_recv :: proc($T: typeid, cap := 0, allocator := context.allocator) -> (ch: Channel(T, .Recv)) {
|
||||
context.allocator = allocator
|
||||
ch._internal = raw_channel_create(size_of(T), align_of(T), cap)
|
||||
return
|
||||
}
|
||||
|
||||
channel_destroy :: proc(ch: $C/Channel($T, $D)) {
|
||||
raw_channel_destroy(ch._internal)
|
||||
}
|
||||
|
||||
channel_as_send :: proc(ch: $C/Channel($T, .Both)) -> (res: Channel(T, .Send)) {
|
||||
res._internal = ch._internal
|
||||
return
|
||||
}
|
||||
|
||||
channel_as_recv :: proc(ch: $C/Channel($T, .Both)) -> (res: Channel(T, .Recv)) {
|
||||
res._internal = ch._internal
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
channel_len :: proc(ch: $C/Channel($T, $D)) -> int {
|
||||
return ch._internal.len if ch._internal != nil else 0
|
||||
}
|
||||
channel_cap :: proc(ch: $C/Channel($T, $D)) -> int {
|
||||
return ch._internal.cap if ch._internal != nil else 0
|
||||
}
|
||||
|
||||
|
||||
channel_send :: proc(ch: $C/Channel($T, $D), msg: T, loc := #caller_location) where D >= .Both {
|
||||
msg := msg
|
||||
_ = raw_channel_send_impl(ch._internal, &msg, /*block*/true, loc)
|
||||
}
|
||||
channel_try_send :: proc(ch: $C/Channel($T, $D), msg: T, loc := #caller_location) -> bool where D >= .Both {
|
||||
msg := msg
|
||||
return raw_channel_send_impl(ch._internal, &msg, /*block*/false, loc)
|
||||
}
|
||||
|
||||
channel_recv :: proc(ch: $C/Channel($T, $D), loc := #caller_location) -> (msg: T) where D <= .Both {
|
||||
c := ch._internal
|
||||
if c == nil {
|
||||
panic(message="cannot recv message; channel is nil", loc=loc)
|
||||
}
|
||||
mutex_lock(&c.mutex)
|
||||
raw_channel_recv_impl(c, &msg, loc)
|
||||
mutex_unlock(&c.mutex)
|
||||
return
|
||||
}
|
||||
channel_try_recv :: proc(ch: $C/Channel($T, $D), loc := #caller_location) -> (msg: T, ok: bool) where D <= .Both {
|
||||
c := ch._internal
|
||||
if c != nil && mutex_try_lock(&c.mutex) {
|
||||
if c.len > 0 {
|
||||
raw_channel_recv_impl(c, &msg, loc)
|
||||
ok = true
|
||||
}
|
||||
mutex_unlock(&c.mutex)
|
||||
}
|
||||
return
|
||||
}
|
||||
channel_try_recv_ptr :: proc(ch: $C/Channel($T, $D), msg: ^T, loc := #caller_location) -> (ok: bool) where D <= .Both {
|
||||
res: T
|
||||
res, ok = channel_try_recv(ch, loc)
|
||||
if ok && msg != nil {
|
||||
msg^ = res
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
channel_is_nil :: proc(ch: $C/Channel($T, $D)) -> bool {
|
||||
return ch._internal == nil
|
||||
}
|
||||
channel_is_open :: proc(ch: $C/Channel($T, $D)) -> bool {
|
||||
c := ch._internal
|
||||
return c != nil && !c.closed
|
||||
}
|
||||
|
||||
|
||||
channel_eq :: proc(a, b: $C/Channel($T, $D)) -> bool {
|
||||
return a._internal == b._internal
|
||||
}
|
||||
channel_ne :: proc(a, b: $C/Channel($T, $D)) -> bool {
|
||||
return a._internal != b._internal
|
||||
}
|
||||
|
||||
|
||||
channel_can_send :: proc(ch: $C/Channel($T, $D)) -> (ok: bool) where D >= .Both {
|
||||
return raw_channel_can_send(ch._internal)
|
||||
}
|
||||
channel_can_recv :: proc(ch: $C/Channel($T, $D)) -> (ok: bool) where D <= .Both {
|
||||
return raw_channel_can_recv(ch._internal)
|
||||
}
|
||||
|
||||
|
||||
channel_peek :: proc(ch: $C/Channel($T, $D)) -> int {
|
||||
c := ch._internal
|
||||
if c == nil {
|
||||
return -1
|
||||
}
|
||||
if intrinsics.atomic_load(&c.closed) {
|
||||
return -1
|
||||
}
|
||||
return intrinsics.atomic_load(&c.len)
|
||||
}
|
||||
|
||||
|
||||
channel_close :: proc(ch: $C/Channel($T, $D), loc := #caller_location) {
|
||||
raw_channel_close(ch._internal, loc)
|
||||
}
|
||||
|
||||
|
||||
channel_iterator :: proc(ch: $C/Channel($T, $D)) -> (msg: T, ok: bool) where D <= .Both {
|
||||
c := ch._internal
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !c.closed || c.len > 0 {
|
||||
msg, ok = channel_recv(ch), true
|
||||
}
|
||||
return
|
||||
}
|
||||
channel_drain :: proc(ch: $C/Channel($T, $D)) where D >= .Both {
|
||||
raw_channel_drain(ch._internal)
|
||||
}
|
||||
|
||||
|
||||
channel_move :: proc(dst: $C1/Channel($T, $D1) src: $C2/Channel(T, $D2)) where D1 <= .Both, D2 >= .Both {
|
||||
for msg in channel_iterator(src) {
|
||||
channel_send(dst, msg)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Raw_Channel_Wait_Queue :: struct {
|
||||
next: ^Raw_Channel_Wait_Queue,
|
||||
state: ^uintptr,
|
||||
}
|
||||
|
||||
|
||||
Raw_Channel :: struct {
|
||||
closed: bool,
|
||||
ready: bool, // ready to recv
|
||||
data_offset: u16, // data is stored at the end of this data structure
|
||||
elem_size: u32,
|
||||
len, cap: int,
|
||||
read, write: int,
|
||||
mutex: Mutex,
|
||||
cond: Condition,
|
||||
allocator: mem.Allocator,
|
||||
|
||||
sendq: ^Raw_Channel_Wait_Queue,
|
||||
recvq: ^Raw_Channel_Wait_Queue,
|
||||
}
|
||||
|
||||
raw_channel_wait_queue_insert :: proc(head: ^^Raw_Channel_Wait_Queue, val: ^Raw_Channel_Wait_Queue) {
|
||||
val.next = head^
|
||||
head^ = val
|
||||
}
|
||||
raw_channel_wait_queue_remove :: proc(head: ^^Raw_Channel_Wait_Queue, val: ^Raw_Channel_Wait_Queue) {
|
||||
p := head
|
||||
for p^ != nil && p^ != val {
|
||||
p = &p^.next
|
||||
}
|
||||
if p != nil {
|
||||
p^ = p^.next
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
raw_channel_create :: proc(elem_size, elem_align: int, cap := 0) -> ^Raw_Channel {
|
||||
assert(int(u32(elem_size)) == elem_size)
|
||||
|
||||
s := size_of(Raw_Channel)
|
||||
s = mem.align_forward_int(s, elem_align)
|
||||
data_offset := uintptr(s)
|
||||
s += elem_size * max(cap, 1)
|
||||
|
||||
a := max(elem_align, align_of(Raw_Channel))
|
||||
|
||||
c := (^Raw_Channel)(mem.alloc(s, a))
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
c.data_offset = u16(data_offset)
|
||||
c.elem_size = u32(elem_size)
|
||||
c.len, c.cap = 0, max(cap, 0)
|
||||
c.read, c.write = 0, 0
|
||||
mutex_init(&c.mutex)
|
||||
condition_init(&c.cond, &c.mutex)
|
||||
c.allocator = context.allocator
|
||||
c.closed = false
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
|
||||
raw_channel_destroy :: proc(c: ^Raw_Channel) {
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
context.allocator = c.allocator
|
||||
intrinsics.atomic_store(&c.closed, true)
|
||||
|
||||
condition_destroy(&c.cond)
|
||||
mutex_destroy(&c.mutex)
|
||||
free(c)
|
||||
}
|
||||
|
||||
raw_channel_close :: proc(c: ^Raw_Channel, loc := #caller_location) {
|
||||
if c == nil {
|
||||
panic(message="cannot close nil channel", loc=loc)
|
||||
}
|
||||
mutex_lock(&c.mutex)
|
||||
defer mutex_unlock(&c.mutex)
|
||||
intrinsics.atomic_store(&c.closed, true)
|
||||
|
||||
// Release readers and writers
|
||||
raw_channel_wait_queue_broadcast(c.recvq)
|
||||
raw_channel_wait_queue_broadcast(c.sendq)
|
||||
condition_broadcast(&c.cond)
|
||||
}
|
||||
|
||||
|
||||
|
||||
raw_channel_send_impl :: proc(c: ^Raw_Channel, msg: rawptr, block: bool, loc := #caller_location) -> bool {
|
||||
send :: proc(c: ^Raw_Channel, src: rawptr) {
|
||||
data := uintptr(c) + uintptr(c.data_offset)
|
||||
dst := data + uintptr(c.write * int(c.elem_size))
|
||||
mem.copy(rawptr(dst), src, int(c.elem_size))
|
||||
c.len += 1
|
||||
c.write = (c.write + 1) % max(c.cap, 1)
|
||||
}
|
||||
|
||||
switch {
|
||||
case c == nil:
|
||||
panic(message="cannot send message; channel is nil", loc=loc)
|
||||
case c.closed:
|
||||
panic(message="cannot send message; channel is closed", loc=loc)
|
||||
}
|
||||
|
||||
mutex_lock(&c.mutex)
|
||||
defer mutex_unlock(&c.mutex)
|
||||
|
||||
if c.cap > 0 {
|
||||
if !block && c.len >= c.cap {
|
||||
return false
|
||||
}
|
||||
|
||||
for c.len >= c.cap {
|
||||
condition_wait_for(&c.cond)
|
||||
}
|
||||
} else if c.len > 0 { // TODO(bill): determine correct behaviour
|
||||
if !block {
|
||||
return false
|
||||
}
|
||||
condition_wait_for(&c.cond)
|
||||
} else if c.len == 0 && !block {
|
||||
return false
|
||||
}
|
||||
|
||||
send(c, msg)
|
||||
condition_signal(&c.cond)
|
||||
raw_channel_wait_queue_signal(c.recvq)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
raw_channel_recv_impl :: proc(c: ^Raw_Channel, res: rawptr, loc := #caller_location) {
|
||||
recv :: proc(c: ^Raw_Channel, dst: rawptr, loc := #caller_location) {
|
||||
if c.len < 1 {
|
||||
panic(message="cannot recv message; channel is empty", loc=loc)
|
||||
}
|
||||
c.len -= 1
|
||||
|
||||
data := uintptr(c) + uintptr(c.data_offset)
|
||||
src := data + uintptr(c.read * int(c.elem_size))
|
||||
mem.copy(dst, rawptr(src), int(c.elem_size))
|
||||
c.read = (c.read + 1) % max(c.cap, 1)
|
||||
}
|
||||
|
||||
if c == nil {
|
||||
panic(message="cannot recv message; channel is nil", loc=loc)
|
||||
}
|
||||
intrinsics.atomic_store(&c.ready, true)
|
||||
for c.len < 1 {
|
||||
raw_channel_wait_queue_signal(c.sendq)
|
||||
condition_wait_for(&c.cond)
|
||||
}
|
||||
intrinsics.atomic_store(&c.ready, false)
|
||||
recv(c, res, loc)
|
||||
if c.cap > 0 {
|
||||
if c.len == c.cap - 1 {
|
||||
// NOTE(bill): Only signal on the last one
|
||||
condition_signal(&c.cond)
|
||||
}
|
||||
} else {
|
||||
condition_signal(&c.cond)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
raw_channel_can_send :: proc(c: ^Raw_Channel) -> (ok: bool) {
|
||||
if c == nil {
|
||||
return false
|
||||
}
|
||||
mutex_lock(&c.mutex)
|
||||
switch {
|
||||
case c.closed:
|
||||
ok = false
|
||||
case c.cap > 0:
|
||||
ok = c.ready && c.len < c.cap
|
||||
case:
|
||||
ok = c.ready && c.len == 0
|
||||
}
|
||||
mutex_unlock(&c.mutex)
|
||||
return
|
||||
}
|
||||
raw_channel_can_recv :: proc(c: ^Raw_Channel) -> (ok: bool) {
|
||||
if c == nil {
|
||||
return false
|
||||
}
|
||||
mutex_lock(&c.mutex)
|
||||
ok = c.len > 0
|
||||
mutex_unlock(&c.mutex)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
raw_channel_drain :: proc(c: ^Raw_Channel) {
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
mutex_lock(&c.mutex)
|
||||
c.len = 0
|
||||
c.read = 0
|
||||
c.write = 0
|
||||
mutex_unlock(&c.mutex)
|
||||
}
|
||||
|
||||
|
||||
|
||||
MAX_SELECT_CHANNELS :: 64
|
||||
SELECT_MAX_TIMEOUT :: max(time.Duration)
|
||||
|
||||
Select_Command :: enum {
|
||||
Recv,
|
||||
Send,
|
||||
}
|
||||
|
||||
Select_Channel :: struct {
|
||||
channel: ^Raw_Channel,
|
||||
command: Select_Command,
|
||||
}
|
||||
|
||||
|
||||
|
||||
select :: proc(channels: ..Select_Channel) -> (index: int) {
|
||||
return select_timeout(SELECT_MAX_TIMEOUT, ..channels)
|
||||
}
|
||||
select_timeout :: proc(timeout: time.Duration, channels: ..Select_Channel) -> (index: int) {
|
||||
switch len(channels) {
|
||||
case 0:
|
||||
panic("sync: select with no channels")
|
||||
}
|
||||
|
||||
assert(len(channels) <= MAX_SELECT_CHANNELS)
|
||||
|
||||
backing: [MAX_SELECT_CHANNELS]int
|
||||
queues: [MAX_SELECT_CHANNELS]Raw_Channel_Wait_Queue
|
||||
candidates := backing[:]
|
||||
cap := len(channels)
|
||||
candidates = candidates[:cap]
|
||||
|
||||
count := u32(0)
|
||||
for c, i in channels {
|
||||
if c.channel == nil {
|
||||
continue
|
||||
}
|
||||
switch c.command {
|
||||
case .Recv:
|
||||
if raw_channel_can_recv(c.channel) {
|
||||
candidates[count] = i
|
||||
count += 1
|
||||
}
|
||||
case .Send:
|
||||
if raw_channel_can_send(c.channel) {
|
||||
candidates[count] = i
|
||||
count += 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if count == 0 {
|
||||
wait_state: uintptr = 0
|
||||
for _, i in channels {
|
||||
q := &queues[i]
|
||||
q.state = &wait_state
|
||||
}
|
||||
|
||||
for c, i in channels {
|
||||
if c.channel == nil {
|
||||
continue
|
||||
}
|
||||
q := &queues[i]
|
||||
switch c.command {
|
||||
case .Recv: raw_channel_wait_queue_insert(&c.channel.recvq, q)
|
||||
case .Send: raw_channel_wait_queue_insert(&c.channel.sendq, q)
|
||||
}
|
||||
}
|
||||
raw_channel_wait_queue_wait_on(&wait_state, timeout)
|
||||
for c, i in channels {
|
||||
if c.channel == nil {
|
||||
continue
|
||||
}
|
||||
q := &queues[i]
|
||||
switch c.command {
|
||||
case .Recv: raw_channel_wait_queue_remove(&c.channel.recvq, q)
|
||||
case .Send: raw_channel_wait_queue_remove(&c.channel.sendq, q)
|
||||
}
|
||||
}
|
||||
|
||||
for c, i in channels {
|
||||
switch c.command {
|
||||
case .Recv:
|
||||
if raw_channel_can_recv(c.channel) {
|
||||
candidates[count] = i
|
||||
count += 1
|
||||
}
|
||||
case .Send:
|
||||
if raw_channel_can_send(c.channel) {
|
||||
candidates[count] = i
|
||||
count += 1
|
||||
}
|
||||
}
|
||||
}
|
||||
if count == 0 && timeout == SELECT_MAX_TIMEOUT {
|
||||
index = -1
|
||||
return
|
||||
}
|
||||
|
||||
assert(count != 0)
|
||||
}
|
||||
|
||||
t := time.now()
|
||||
r := rand.create(transmute(u64)t)
|
||||
i := rand.uint32(&r)
|
||||
|
||||
index = candidates[i % count]
|
||||
return
|
||||
}
|
||||
|
||||
select_recv :: proc(channels: ..^Raw_Channel) -> (index: int) {
|
||||
switch len(channels) {
|
||||
case 0:
|
||||
panic("sync: select with no channels")
|
||||
}
|
||||
|
||||
assert(len(channels) <= MAX_SELECT_CHANNELS)
|
||||
|
||||
backing: [MAX_SELECT_CHANNELS]int
|
||||
queues: [MAX_SELECT_CHANNELS]Raw_Channel_Wait_Queue
|
||||
candidates := backing[:]
|
||||
cap := len(channels)
|
||||
candidates = candidates[:cap]
|
||||
|
||||
count := u32(0)
|
||||
for c, i in channels {
|
||||
if raw_channel_can_recv(c) {
|
||||
candidates[count] = i
|
||||
count += 1
|
||||
}
|
||||
}
|
||||
|
||||
if count == 0 {
|
||||
state: uintptr
|
||||
for c, i in channels {
|
||||
q := &queues[i]
|
||||
q.state = &state
|
||||
raw_channel_wait_queue_insert(&c.recvq, q)
|
||||
}
|
||||
raw_channel_wait_queue_wait_on(&state, SELECT_MAX_TIMEOUT)
|
||||
for c, i in channels {
|
||||
q := &queues[i]
|
||||
raw_channel_wait_queue_remove(&c.recvq, q)
|
||||
}
|
||||
|
||||
for c, i in channels {
|
||||
if raw_channel_can_recv(c) {
|
||||
candidates[count] = i
|
||||
count += 1
|
||||
}
|
||||
}
|
||||
assert(count != 0)
|
||||
}
|
||||
|
||||
t := time.now()
|
||||
r := rand.create(transmute(u64)t)
|
||||
i := rand.uint32(&r)
|
||||
|
||||
index = candidates[i % count]
|
||||
return
|
||||
}
|
||||
|
||||
select_recv_msg :: proc(channels: ..$C/Channel($T, $D)) -> (msg: T, index: int) {
|
||||
switch len(channels) {
|
||||
case 0:
|
||||
panic("sync: select with no channels")
|
||||
}
|
||||
|
||||
assert(len(channels) <= MAX_SELECT_CHANNELS)
|
||||
|
||||
queues: [MAX_SELECT_CHANNELS]Raw_Channel_Wait_Queue
|
||||
candidates: [MAX_SELECT_CHANNELS]int
|
||||
|
||||
count := u32(0)
|
||||
for c, i in channels {
|
||||
if raw_channel_can_recv(c) {
|
||||
candidates[count] = i
|
||||
count += 1
|
||||
}
|
||||
}
|
||||
|
||||
if count == 0 {
|
||||
state: uintptr
|
||||
for c, i in channels {
|
||||
q := &queues[i]
|
||||
q.state = &state
|
||||
raw_channel_wait_queue_insert(&c.recvq, q)
|
||||
}
|
||||
raw_channel_wait_queue_wait_on(&state, SELECT_MAX_TIMEOUT)
|
||||
for c, i in channels {
|
||||
q := &queues[i]
|
||||
raw_channel_wait_queue_remove(&c.recvq, q)
|
||||
}
|
||||
|
||||
for c, i in channels {
|
||||
if raw_channel_can_recv(c) {
|
||||
candidates[count] = i
|
||||
count += 1
|
||||
}
|
||||
}
|
||||
assert(count != 0)
|
||||
}
|
||||
|
||||
t := time.now()
|
||||
r := rand.create(transmute(u64)t)
|
||||
i := rand.uint32(&r)
|
||||
|
||||
index = candidates[i % count]
|
||||
msg = channel_recv(channels[index])
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
select_send_msg :: proc(msg: $T, channels: ..$C/Channel(T, $D)) -> (index: int) {
|
||||
switch len(channels) {
|
||||
case 0:
|
||||
panic("sync: select with no channels")
|
||||
}
|
||||
|
||||
assert(len(channels) <= MAX_SELECT_CHANNELS)
|
||||
|
||||
backing: [MAX_SELECT_CHANNELS]int
|
||||
queues: [MAX_SELECT_CHANNELS]Raw_Channel_Wait_Queue
|
||||
candidates := backing[:]
|
||||
cap := len(channels)
|
||||
candidates = candidates[:cap]
|
||||
|
||||
count := u32(0)
|
||||
for c, i in channels {
|
||||
if raw_channel_can_recv(c) {
|
||||
candidates[count] = i
|
||||
count += 1
|
||||
}
|
||||
}
|
||||
|
||||
if count == 0 {
|
||||
state: uintptr
|
||||
for c, i in channels {
|
||||
q := &queues[i]
|
||||
q.state = &state
|
||||
raw_channel_wait_queue_insert(&c.recvq, q)
|
||||
}
|
||||
raw_channel_wait_queue_wait_on(&state, SELECT_MAX_TIMEOUT)
|
||||
for c, i in channels {
|
||||
q := &queues[i]
|
||||
raw_channel_wait_queue_remove(&c.recvq, q)
|
||||
}
|
||||
|
||||
for c, i in channels {
|
||||
if raw_channel_can_recv(c) {
|
||||
candidates[count] = i
|
||||
count += 1
|
||||
}
|
||||
}
|
||||
assert(count != 0)
|
||||
}
|
||||
|
||||
t := time.now()
|
||||
r := rand.create(transmute(u64)t)
|
||||
i := rand.uint32(&r)
|
||||
|
||||
index = candidates[i % count]
|
||||
|
||||
if msg != nil {
|
||||
channel_send(channels[index], msg)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
select_send :: proc(channels: ..^Raw_Channel) -> (index: int) {
|
||||
switch len(channels) {
|
||||
case 0:
|
||||
panic("sync: select with no channels")
|
||||
}
|
||||
|
||||
assert(len(channels) <= MAX_SELECT_CHANNELS)
|
||||
candidates: [MAX_SELECT_CHANNELS]int
|
||||
queues: [MAX_SELECT_CHANNELS]Raw_Channel_Wait_Queue
|
||||
|
||||
count := u32(0)
|
||||
for c, i in channels {
|
||||
if raw_channel_can_send(c) {
|
||||
candidates[count] = i
|
||||
count += 1
|
||||
}
|
||||
}
|
||||
|
||||
if count == 0 {
|
||||
state: uintptr
|
||||
for c, i in channels {
|
||||
q := &queues[i]
|
||||
q.state = &state
|
||||
raw_channel_wait_queue_insert(&c.sendq, q)
|
||||
}
|
||||
raw_channel_wait_queue_wait_on(&state, SELECT_MAX_TIMEOUT)
|
||||
for c, i in channels {
|
||||
q := &queues[i]
|
||||
raw_channel_wait_queue_remove(&c.sendq, q)
|
||||
}
|
||||
|
||||
for c, i in channels {
|
||||
if raw_channel_can_send(c) {
|
||||
candidates[count] = i
|
||||
count += 1
|
||||
}
|
||||
}
|
||||
assert(count != 0)
|
||||
}
|
||||
|
||||
t := time.now()
|
||||
r := rand.create(transmute(u64)t)
|
||||
i := rand.uint32(&r)
|
||||
|
||||
index = candidates[i % count]
|
||||
return
|
||||
}
|
||||
|
||||
select_try :: proc(channels: ..Select_Channel) -> (index: int) {
|
||||
switch len(channels) {
|
||||
case 0:
|
||||
panic("sync: select with no channels")
|
||||
}
|
||||
|
||||
assert(len(channels) <= MAX_SELECT_CHANNELS)
|
||||
|
||||
backing: [MAX_SELECT_CHANNELS]int
|
||||
candidates := backing[:]
|
||||
cap := len(channels)
|
||||
candidates = candidates[:cap]
|
||||
|
||||
count := u32(0)
|
||||
for c, i in channels {
|
||||
switch c.command {
|
||||
case .Recv:
|
||||
if raw_channel_can_recv(c.channel) {
|
||||
candidates[count] = i
|
||||
count += 1
|
||||
}
|
||||
case .Send:
|
||||
if raw_channel_can_send(c.channel) {
|
||||
candidates[count] = i
|
||||
count += 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if count == 0 {
|
||||
index = -1
|
||||
return
|
||||
}
|
||||
|
||||
t := time.now()
|
||||
r := rand.create(transmute(u64)t)
|
||||
i := rand.uint32(&r)
|
||||
|
||||
index = candidates[i % count]
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
select_try_recv :: proc(channels: ..^Raw_Channel) -> (index: int) {
|
||||
switch len(channels) {
|
||||
case 0:
|
||||
index = -1
|
||||
return
|
||||
case 1:
|
||||
index = -1
|
||||
if raw_channel_can_recv(channels[0]) {
|
||||
index = 0
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
assert(len(channels) <= MAX_SELECT_CHANNELS)
|
||||
candidates: [MAX_SELECT_CHANNELS]int
|
||||
|
||||
count := u32(0)
|
||||
for c, i in channels {
|
||||
if raw_channel_can_recv(c) {
|
||||
candidates[count] = i
|
||||
count += 1
|
||||
}
|
||||
}
|
||||
|
||||
if count == 0 {
|
||||
index = -1
|
||||
return
|
||||
}
|
||||
|
||||
t := time.now()
|
||||
r := rand.create(transmute(u64)t)
|
||||
i := rand.uint32(&r)
|
||||
|
||||
index = candidates[i % count]
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
select_try_send :: proc(channels: ..^Raw_Channel) -> (index: int) #no_bounds_check {
|
||||
switch len(channels) {
|
||||
case 0:
|
||||
return -1
|
||||
case 1:
|
||||
if raw_channel_can_send(channels[0]) {
|
||||
return 0
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
assert(len(channels) <= MAX_SELECT_CHANNELS)
|
||||
candidates: [MAX_SELECT_CHANNELS]int
|
||||
|
||||
count := u32(0)
|
||||
for c, i in channels {
|
||||
if raw_channel_can_send(c) {
|
||||
candidates[count] = i
|
||||
count += 1
|
||||
}
|
||||
}
|
||||
|
||||
if count == 0 {
|
||||
index = -1
|
||||
return
|
||||
}
|
||||
|
||||
t := time.now()
|
||||
r := rand.create(transmute(u64)t)
|
||||
i := rand.uint32(&r)
|
||||
|
||||
index = candidates[i % count]
|
||||
return
|
||||
}
|
||||
|
||||
select_try_recv_msg :: proc(channels: ..$C/Channel($T, $D)) -> (msg: T, index: int) {
|
||||
switch len(channels) {
|
||||
case 0:
|
||||
index = -1
|
||||
return
|
||||
case 1:
|
||||
ok: bool
|
||||
if msg, ok = channel_try_recv(channels[0]); ok {
|
||||
index = 0
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
assert(len(channels) <= MAX_SELECT_CHANNELS)
|
||||
candidates: [MAX_SELECT_CHANNELS]int
|
||||
|
||||
count := u32(0)
|
||||
for c, i in channels {
|
||||
if channel_can_recv(c) {
|
||||
candidates[count] = i
|
||||
count += 1
|
||||
}
|
||||
}
|
||||
|
||||
if count == 0 {
|
||||
index = -1
|
||||
return
|
||||
}
|
||||
|
||||
t := time.now()
|
||||
r := rand.create(transmute(u64)t)
|
||||
i := rand.uint32(&r)
|
||||
|
||||
index = candidates[i % count]
|
||||
msg = channel_recv(channels[index])
|
||||
return
|
||||
}
|
||||
|
||||
select_try_send_msg :: proc(msg: $T, channels: ..$C/Channel(T, $D)) -> (index: int) {
|
||||
index = -1
|
||||
switch len(channels) {
|
||||
case 0:
|
||||
return
|
||||
case 1:
|
||||
if channel_try_send(channels[0], msg) {
|
||||
index = 0
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
assert(len(channels) <= MAX_SELECT_CHANNELS)
|
||||
candidates: [MAX_SELECT_CHANNELS]int
|
||||
|
||||
count := u32(0)
|
||||
for c, i in channels {
|
||||
if raw_channel_can_send(c) {
|
||||
candidates[count] = i
|
||||
count += 1
|
||||
}
|
||||
}
|
||||
|
||||
if count == 0 {
|
||||
index = -1
|
||||
return
|
||||
}
|
||||
|
||||
t := time.now()
|
||||
r := rand.create(transmute(u64)t)
|
||||
i := rand.uint32(&r)
|
||||
|
||||
index = candidates[i % count]
|
||||
channel_send(channels[index], msg)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
// +build linux, darwin, freebsd, openbsd
|
||||
package sync
|
||||
|
||||
import "core:time"
|
||||
|
||||
raw_channel_wait_queue_wait_on :: proc(state: ^uintptr, timeout: time.Duration) {
|
||||
// stub
|
||||
}
|
||||
|
||||
raw_channel_wait_queue_signal :: proc(q: ^Raw_Channel_Wait_Queue) {
|
||||
// stub
|
||||
}
|
||||
|
||||
raw_channel_wait_queue_broadcast :: proc(q: ^Raw_Channel_Wait_Queue) {
|
||||
// stub
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
package sync
|
||||
|
||||
import "core:intrinsics"
|
||||
import win32 "core:sys/windows"
|
||||
import "core:time"
|
||||
|
||||
raw_channel_wait_queue_wait_on :: proc(state: ^uintptr, timeout: time.Duration) {
|
||||
ms: win32.DWORD = win32.INFINITE
|
||||
if max(time.Duration) != SELECT_MAX_TIMEOUT {
|
||||
ms = win32.DWORD((max(time.duration_nanoseconds(timeout), 0) + 999999)/1000000)
|
||||
}
|
||||
|
||||
v := intrinsics.atomic_load(state)
|
||||
for v == 0 {
|
||||
win32.WaitOnAddress(state, &v, size_of(state^), ms)
|
||||
v = intrinsics.atomic_load(state)
|
||||
}
|
||||
intrinsics.atomic_store(state, 0)
|
||||
}
|
||||
|
||||
raw_channel_wait_queue_signal :: proc(q: ^Raw_Channel_Wait_Queue) {
|
||||
for x := q; x != nil; x = x.next {
|
||||
intrinsics.atomic_add(x.state, 1)
|
||||
win32.WakeByAddressSingle(x.state)
|
||||
}
|
||||
}
|
||||
|
||||
raw_channel_wait_queue_broadcast :: proc(q: ^Raw_Channel_Wait_Queue) {
|
||||
for x := q; x != nil; x = x.next {
|
||||
intrinsics.atomic_add(x.state, 1)
|
||||
win32.WakeByAddressAll(x.state)
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package sync2
|
||||
package sync
|
||||
|
||||
import "core:time"
|
||||
|
||||
@@ -146,10 +146,10 @@ Auto_Reset_Event :: struct {
|
||||
}
|
||||
|
||||
auto_reset_event_signal :: proc(e: ^Auto_Reset_Event) {
|
||||
old_status := atomic_load_relaxed(&e.status)
|
||||
old_status := atomic_load_explicit(&e.status, .Relaxed)
|
||||
for {
|
||||
new_status := old_status + 1 if old_status < 1 else 1
|
||||
if _, ok := atomic_compare_exchange_weak_release(&e.status, old_status, new_status); ok {
|
||||
if _, ok := atomic_compare_exchange_weak_explicit(&e.status, old_status, new_status, .Release, .Relaxed); ok {
|
||||
break
|
||||
}
|
||||
|
||||
@@ -160,7 +160,7 @@ auto_reset_event_signal :: proc(e: ^Auto_Reset_Event) {
|
||||
}
|
||||
|
||||
auto_reset_event_wait :: proc(e: ^Auto_Reset_Event) {
|
||||
old_status := atomic_sub_acquire(&e.status, 1)
|
||||
old_status := atomic_sub_explicit(&e.status, 1, .Acquire)
|
||||
if old_status < 1 {
|
||||
sema_wait(&e.sema)
|
||||
}
|
||||
@@ -174,14 +174,14 @@ Ticket_Mutex :: struct {
|
||||
}
|
||||
|
||||
ticket_mutex_lock :: #force_inline proc(m: ^Ticket_Mutex) {
|
||||
ticket := atomic_add_relaxed(&m.ticket, 1)
|
||||
for ticket != atomic_load_acquire(&m.serving) {
|
||||
ticket := atomic_add_explicit(&m.ticket, 1, .Relaxed)
|
||||
for ticket != atomic_load_explicit(&m.serving, .Acquire) {
|
||||
cpu_relax()
|
||||
}
|
||||
}
|
||||
|
||||
ticket_mutex_unlock :: #force_inline proc(m: ^Ticket_Mutex) {
|
||||
atomic_add_relaxed(&m.serving, 1)
|
||||
atomic_add_explicit(&m.serving, 1, .Relaxed)
|
||||
}
|
||||
@(deferred_in=ticket_mutex_unlock)
|
||||
ticket_mutex_guard :: proc(m: ^Ticket_Mutex) -> bool {
|
||||
@@ -196,18 +196,18 @@ Benaphore :: struct {
|
||||
}
|
||||
|
||||
benaphore_lock :: proc(b: ^Benaphore) {
|
||||
if atomic_add_acquire(&b.counter, 1) > 1 {
|
||||
if atomic_add_explicit(&b.counter, 1, .Acquire) > 1 {
|
||||
sema_wait(&b.sema)
|
||||
}
|
||||
}
|
||||
|
||||
benaphore_try_lock :: proc(b: ^Benaphore) -> bool {
|
||||
v, _ := atomic_compare_exchange_strong_acquire(&b.counter, 1, 0)
|
||||
v, _ := atomic_compare_exchange_strong_explicit(&b.counter, 1, 0, .Acquire, .Acquire)
|
||||
return v == 0
|
||||
}
|
||||
|
||||
benaphore_unlock :: proc(b: ^Benaphore) {
|
||||
if atomic_sub_release(&b.counter, 1) > 0 {
|
||||
if atomic_sub_explicit(&b.counter, 1, .Release) > 0 {
|
||||
sema_post(&b.sema)
|
||||
}
|
||||
}
|
||||
@@ -227,7 +227,7 @@ Recursive_Benaphore :: struct {
|
||||
|
||||
recursive_benaphore_lock :: proc(b: ^Recursive_Benaphore) {
|
||||
tid := current_thread_id()
|
||||
if atomic_add_acquire(&b.counter, 1) > 1 {
|
||||
if atomic_add_explicit(&b.counter, 1, .Acquire) > 1 {
|
||||
if tid != b.owner {
|
||||
sema_wait(&b.sema)
|
||||
}
|
||||
@@ -240,10 +240,10 @@ recursive_benaphore_lock :: proc(b: ^Recursive_Benaphore) {
|
||||
recursive_benaphore_try_lock :: proc(b: ^Recursive_Benaphore) -> bool {
|
||||
tid := current_thread_id()
|
||||
if b.owner == tid {
|
||||
atomic_add_acquire(&b.counter, 1)
|
||||
atomic_add_explicit(&b.counter, 1, .Acquire)
|
||||
}
|
||||
|
||||
if v, _ := atomic_compare_exchange_strong_acquire(&b.counter, 1, 0); v != 0 {
|
||||
if v, _ := atomic_compare_exchange_strong_explicit(&b.counter, 1, 0, .Acquire, .Acquire); v != 0 {
|
||||
return false
|
||||
}
|
||||
// inside the lock
|
||||
@@ -260,7 +260,7 @@ recursive_benaphore_unlock :: proc(b: ^Recursive_Benaphore) {
|
||||
if recursion == 0 {
|
||||
b.owner = 0
|
||||
}
|
||||
if atomic_sub_release(&b.counter, 1) > 0 {
|
||||
if atomic_sub_explicit(&b.counter, 1, .Release) > 0 {
|
||||
if recursion == 0 {
|
||||
sema_post(&b.sema)
|
||||
}
|
||||
@@ -293,12 +293,12 @@ once_do :: proc(o: ^Once, fn: proc()) {
|
||||
defer mutex_unlock(&o.m)
|
||||
if !o.done {
|
||||
fn()
|
||||
atomic_store_release(&o.done, true)
|
||||
atomic_store_explicit(&o.done, true, .Release)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if atomic_load_acquire(&o.done) == false {
|
||||
if atomic_load_explicit(&o.done, .Acquire) == false {
|
||||
do_slow(o, fn)
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
//+private
|
||||
//+build darwin
|
||||
package sync2
|
||||
package sync
|
||||
|
||||
import "core:c"
|
||||
import "core:time"
|
||||
@@ -1,6 +1,6 @@
|
||||
//+private
|
||||
//+build freebsd
|
||||
package sync2
|
||||
package sync
|
||||
|
||||
import "core:c"
|
||||
import "core:os"
|
||||
@@ -1,6 +1,6 @@
|
||||
//+private
|
||||
//+build linux
|
||||
package sync2
|
||||
package sync
|
||||
|
||||
import "core:c"
|
||||
import "core:time"
|
||||
@@ -14,12 +14,6 @@ FUTEX_PRIVATE_FLAG :: 128
|
||||
FUTEX_WAIT_PRIVATE :: (FUTEX_WAIT | FUTEX_PRIVATE_FLAG)
|
||||
FUTEX_WAKE_PRIVATE :: (FUTEX_WAKE | FUTEX_PRIVATE_FLAG)
|
||||
|
||||
foreign import libc "system:c"
|
||||
|
||||
foreign libc {
|
||||
__errno_location :: proc "c" () -> ^c.int ---
|
||||
}
|
||||
|
||||
ESUCCESS :: 0
|
||||
EINTR :: -4
|
||||
EAGAIN :: -11
|
||||
@@ -1,6 +1,6 @@
|
||||
//+private
|
||||
//+build openbsd
|
||||
package sync2
|
||||
package sync
|
||||
|
||||
import "core:c"
|
||||
import "core:os"
|
||||
@@ -1,6 +1,6 @@
|
||||
//+private
|
||||
//+build windows
|
||||
package sync2
|
||||
package sync
|
||||
|
||||
import "core:time"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package sync2
|
||||
package sync
|
||||
|
||||
import "core:time"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package sync2
|
||||
package sync
|
||||
|
||||
import "core:time"
|
||||
|
||||
@@ -24,7 +24,7 @@ atomic_mutex_lock :: proc(m: ^Atomic_Mutex) {
|
||||
new_state := curr_state // Make a copy of it
|
||||
|
||||
spin_lock: for spin in 0..<i32(100) {
|
||||
state, ok := atomic_compare_exchange_weak_acquire(&m.state, .Unlocked, new_state)
|
||||
state, ok := atomic_compare_exchange_weak_explicit(&m.state, .Unlocked, new_state, .Acquire, .Consume)
|
||||
if ok {
|
||||
return
|
||||
}
|
||||
@@ -42,7 +42,7 @@ atomic_mutex_lock :: proc(m: ^Atomic_Mutex) {
|
||||
new_state = .Waiting
|
||||
|
||||
for {
|
||||
if atomic_exchange_acquire(&m.state, .Waiting) == .Unlocked {
|
||||
if atomic_exchange_explicit(&m.state, .Waiting, .Acquire) == .Unlocked {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -52,7 +52,7 @@ atomic_mutex_lock :: proc(m: ^Atomic_Mutex) {
|
||||
}
|
||||
|
||||
|
||||
if v := atomic_exchange_acquire(&m.state, .Locked); v != .Unlocked {
|
||||
if v := atomic_exchange_explicit(&m.state, .Locked, .Acquire); v != .Unlocked {
|
||||
lock_slow(m, v)
|
||||
}
|
||||
}
|
||||
@@ -65,7 +65,7 @@ atomic_mutex_unlock :: proc(m: ^Atomic_Mutex) {
|
||||
}
|
||||
|
||||
|
||||
switch atomic_exchange_release(&m.state, .Unlocked) {
|
||||
switch atomic_exchange_explicit(&m.state, .Unlocked, .Release) {
|
||||
case .Unlocked:
|
||||
unreachable()
|
||||
case .Locked:
|
||||
@@ -77,7 +77,7 @@ atomic_mutex_unlock :: proc(m: ^Atomic_Mutex) {
|
||||
|
||||
// atomic_mutex_try_lock tries to lock m, will return true on success, and false on failure
|
||||
atomic_mutex_try_lock :: proc(m: ^Atomic_Mutex) -> bool {
|
||||
_, ok := atomic_compare_exchange_strong_acquire(&m.state, .Unlocked, .Locked)
|
||||
_, ok := atomic_compare_exchange_strong_explicit(&m.state, .Unlocked, .Locked, .Acquire, .Consume)
|
||||
return ok
|
||||
}
|
||||
|
||||
@@ -290,7 +290,7 @@ Queue_Item :: struct {
|
||||
|
||||
@(private="file")
|
||||
queue_item_wait :: proc(item: ^Queue_Item) {
|
||||
for atomic_load_acquire(&item.futex) == 0 {
|
||||
for atomic_load_explicit(&item.futex, .Acquire) == 0 {
|
||||
futex_wait(&item.futex, 0)
|
||||
cpu_relax()
|
||||
}
|
||||
@@ -298,7 +298,7 @@ queue_item_wait :: proc(item: ^Queue_Item) {
|
||||
@(private="file")
|
||||
queue_item_wait_with_timeout :: proc(item: ^Queue_Item, duration: time.Duration) -> bool {
|
||||
start := time.tick_now()
|
||||
for atomic_load_acquire(&item.futex) == 0 {
|
||||
for atomic_load_explicit(&item.futex, .Acquire) == 0 {
|
||||
remaining := duration - time.tick_since(start)
|
||||
if remaining < 0 {
|
||||
return false
|
||||
@@ -312,7 +312,7 @@ queue_item_wait_with_timeout :: proc(item: ^Queue_Item, duration: time.Duration)
|
||||
}
|
||||
@(private="file")
|
||||
queue_item_signal :: proc(item: ^Queue_Item) {
|
||||
atomic_store_release(&item.futex, 1)
|
||||
atomic_store_explicit(&item.futex, 1, .Release)
|
||||
futex_signal(&item.futex)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
//+build darwin
|
||||
//+private
|
||||
package sync2
|
||||
package sync
|
||||
|
||||
import "core:c"
|
||||
import "core:time"
|
||||
46
core/sync/primitives_freebsd.odin
Normal file
46
core/sync/primitives_freebsd.odin
Normal file
@@ -0,0 +1,46 @@
|
||||
//+build freebsd
|
||||
//+private
|
||||
package sync
|
||||
|
||||
import "core:os"
|
||||
import "core:time"
|
||||
|
||||
_current_thread_id :: proc "contextless" () -> int {
|
||||
return os.current_thread_id()
|
||||
}
|
||||
|
||||
_Mutex :: struct {
|
||||
mutex: Atomic_Mutex,
|
||||
}
|
||||
|
||||
_mutex_lock :: proc(m: ^Mutex) {
|
||||
atomic_mutex_lock(&m.impl.mutex)
|
||||
}
|
||||
|
||||
_mutex_unlock :: proc(m: ^Mutex) {
|
||||
atomic_mutex_unlock(&m.impl.mutex)
|
||||
}
|
||||
|
||||
_mutex_try_lock :: proc(m: ^Mutex) -> bool {
|
||||
return atomic_mutex_try_lock(&m.impl.mutex)
|
||||
}
|
||||
|
||||
_Cond :: struct {
|
||||
cond: Atomic_Cond,
|
||||
}
|
||||
|
||||
_cond_wait :: proc(c: ^Cond, m: ^Mutex) {
|
||||
atomic_cond_wait(&c.impl.cond, &m.impl.mutex)
|
||||
}
|
||||
|
||||
_cond_wait_with_timeout :: proc(c: ^Cond, m: ^Mutex, duration: time.Duration) -> bool {
|
||||
return atomic_cond_wait_with_timeout(&c.impl.cond, &m.impl.mutex, duration)
|
||||
}
|
||||
|
||||
_cond_signal :: proc(c: ^Cond) {
|
||||
atomic_cond_signal(&c.impl.cond)
|
||||
}
|
||||
|
||||
_cond_broadcast :: proc(c: ^Cond) {
|
||||
atomic_cond_broadcast(&c.impl.cond)
|
||||
}
|
||||
125
core/sync/primitives_internal.odin
Normal file
125
core/sync/primitives_internal.odin
Normal file
@@ -0,0 +1,125 @@
|
||||
//+private
|
||||
package sync
|
||||
|
||||
when #config(ODIN_SYNC_RECURSIVE_MUTEX_USE_FUTEX, true) {
|
||||
_Recursive_Mutex :: struct {
|
||||
owner: Futex,
|
||||
recursion: i32,
|
||||
}
|
||||
|
||||
_recursive_mutex_lock :: proc(m: ^Recursive_Mutex) {
|
||||
tid := Futex(current_thread_id())
|
||||
for {
|
||||
prev_owner := atomic_compare_exchange_strong_explicit(&m.impl.owner, tid, 0, .Acquire, .Acquire)
|
||||
switch prev_owner {
|
||||
case 0, tid:
|
||||
m.impl.recursion += 1
|
||||
// inside the lock
|
||||
return
|
||||
}
|
||||
|
||||
futex_wait(&m.impl.owner, u32(prev_owner))
|
||||
}
|
||||
}
|
||||
|
||||
_recursive_mutex_unlock :: proc(m: ^Recursive_Mutex) {
|
||||
m.impl.recursion -= 1
|
||||
if m.impl.recursion != 0 {
|
||||
return
|
||||
}
|
||||
atomic_exchange_explicit(&m.impl.owner, 0, .Release)
|
||||
|
||||
futex_signal(&m.impl.owner)
|
||||
// outside the lock
|
||||
|
||||
}
|
||||
|
||||
_recursive_mutex_try_lock :: proc(m: ^Recursive_Mutex) -> bool {
|
||||
tid := Futex(current_thread_id())
|
||||
prev_owner := atomic_compare_exchange_strong_explicit(&m.impl.owner, tid, 0, .Acquire, .Acquire)
|
||||
switch prev_owner {
|
||||
case 0, tid:
|
||||
m.impl.recursion += 1
|
||||
// inside the lock
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
_Recursive_Mutex :: struct {
|
||||
owner: int,
|
||||
recursion: int,
|
||||
mutex: Mutex,
|
||||
}
|
||||
|
||||
_recursive_mutex_lock :: proc(m: ^Recursive_Mutex) {
|
||||
tid := current_thread_id()
|
||||
if tid != m.impl.owner {
|
||||
mutex_lock(&m.impl.mutex)
|
||||
}
|
||||
// inside the lock
|
||||
m.impl.owner = tid
|
||||
m.impl.recursion += 1
|
||||
}
|
||||
|
||||
_recursive_mutex_unlock :: proc(m: ^Recursive_Mutex) {
|
||||
tid := current_thread_id()
|
||||
assert(tid == m.impl.owner)
|
||||
m.impl.recursion -= 1
|
||||
recursion := m.impl.recursion
|
||||
if recursion == 0 {
|
||||
m.impl.owner = 0
|
||||
}
|
||||
if recursion == 0 {
|
||||
mutex_unlock(&m.impl.mutex)
|
||||
}
|
||||
// outside the lock
|
||||
|
||||
}
|
||||
|
||||
_recursive_mutex_try_lock :: proc(m: ^Recursive_Mutex) -> bool {
|
||||
tid := current_thread_id()
|
||||
if m.impl.owner == tid {
|
||||
return mutex_try_lock(&m.impl.mutex)
|
||||
}
|
||||
if !mutex_try_lock(&m.impl.mutex) {
|
||||
return false
|
||||
}
|
||||
// inside the lock
|
||||
m.impl.owner = tid
|
||||
m.impl.recursion += 1
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
when ODIN_OS != .Windows {
|
||||
_RW_Mutex :: struct {
|
||||
mutex: Atomic_RW_Mutex,
|
||||
}
|
||||
|
||||
_rw_mutex_lock :: proc(rw: ^RW_Mutex) {
|
||||
atomic_rw_mutex_lock(&rw.impl.mutex)
|
||||
}
|
||||
|
||||
_rw_mutex_unlock :: proc(rw: ^RW_Mutex) {
|
||||
atomic_rw_mutex_unlock(&rw.impl.mutex)
|
||||
}
|
||||
|
||||
_rw_mutex_try_lock :: proc(rw: ^RW_Mutex) -> bool {
|
||||
return atomic_rw_mutex_try_lock(&rw.impl.mutex)
|
||||
}
|
||||
|
||||
_rw_mutex_shared_lock :: proc(rw: ^RW_Mutex) {
|
||||
atomic_rw_mutex_shared_lock(&rw.impl.mutex)
|
||||
}
|
||||
|
||||
_rw_mutex_shared_unlock :: proc(rw: ^RW_Mutex) {
|
||||
atomic_rw_mutex_shared_unlock(&rw.impl.mutex)
|
||||
}
|
||||
|
||||
_rw_mutex_try_shared_lock :: proc(rw: ^RW_Mutex) -> bool {
|
||||
return atomic_rw_mutex_try_shared_lock(&rw.impl.mutex)
|
||||
}
|
||||
|
||||
}
|
||||
47
core/sync/primitives_linux.odin
Normal file
47
core/sync/primitives_linux.odin
Normal file
@@ -0,0 +1,47 @@
|
||||
//+build linux
|
||||
//+private
|
||||
package sync
|
||||
|
||||
import "core:sys/unix"
|
||||
import "core:time"
|
||||
|
||||
_current_thread_id :: proc "contextless" () -> int {
|
||||
return unix.sys_gettid()
|
||||
}
|
||||
|
||||
|
||||
_Mutex :: struct {
|
||||
mutex: Atomic_Mutex,
|
||||
}
|
||||
|
||||
_mutex_lock :: proc(m: ^Mutex) {
|
||||
atomic_mutex_lock(&m.impl.mutex)
|
||||
}
|
||||
|
||||
_mutex_unlock :: proc(m: ^Mutex) {
|
||||
atomic_mutex_unlock(&m.impl.mutex)
|
||||
}
|
||||
|
||||
_mutex_try_lock :: proc(m: ^Mutex) -> bool {
|
||||
return atomic_mutex_try_lock(&m.impl.mutex)
|
||||
}
|
||||
|
||||
_Cond :: struct {
|
||||
cond: Atomic_Cond,
|
||||
}
|
||||
|
||||
_cond_wait :: proc(c: ^Cond, m: ^Mutex) {
|
||||
atomic_cond_wait(&c.impl.cond, &m.impl.mutex)
|
||||
}
|
||||
|
||||
_cond_wait_with_timeout :: proc(c: ^Cond, m: ^Mutex, duration: time.Duration) -> bool {
|
||||
return atomic_cond_wait_with_timeout(&c.impl.cond, &m.impl.mutex, duration)
|
||||
}
|
||||
|
||||
_cond_signal :: proc(c: ^Cond) {
|
||||
atomic_cond_signal(&c.impl.cond)
|
||||
}
|
||||
|
||||
_cond_broadcast :: proc(c: ^Cond) {
|
||||
atomic_cond_broadcast(&c.impl.cond)
|
||||
}
|
||||
46
core/sync/primitives_openbsd.odin
Normal file
46
core/sync/primitives_openbsd.odin
Normal file
@@ -0,0 +1,46 @@
|
||||
//+build openbsd
|
||||
//+private
|
||||
package sync
|
||||
|
||||
import "core:os"
|
||||
import "core:time"
|
||||
|
||||
_current_thread_id :: proc "contextless" () -> int {
|
||||
return os.current_thread_id()
|
||||
}
|
||||
|
||||
_Mutex :: struct {
|
||||
mutex: Atomic_Mutex,
|
||||
}
|
||||
|
||||
_mutex_lock :: proc(m: ^Mutex) {
|
||||
atomic_mutex_lock(&m.impl.mutex)
|
||||
}
|
||||
|
||||
_mutex_unlock :: proc(m: ^Mutex) {
|
||||
atomic_mutex_unlock(&m.impl.mutex)
|
||||
}
|
||||
|
||||
_mutex_try_lock :: proc(m: ^Mutex) -> bool {
|
||||
return atomic_mutex_try_lock(&m.impl.mutex)
|
||||
}
|
||||
|
||||
_Cond :: struct {
|
||||
cond: Atomic_Cond,
|
||||
}
|
||||
|
||||
_cond_wait :: proc(c: ^Cond, m: ^Mutex) {
|
||||
atomic_cond_wait(&c.impl.cond, &m.impl.mutex)
|
||||
}
|
||||
|
||||
_cond_wait_with_timeout :: proc(c: ^Cond, m: ^Mutex, duration: time.Duration) -> bool {
|
||||
return atomic_cond_wait_with_timeout(&c.impl.cond, &m.impl.mutex, duration)
|
||||
}
|
||||
|
||||
_cond_signal :: proc(c: ^Cond) {
|
||||
atomic_cond_signal(&c.impl.cond)
|
||||
}
|
||||
|
||||
_cond_broadcast :: proc(c: ^Cond) {
|
||||
atomic_cond_broadcast(&c.impl.cond)
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
//+build windows
|
||||
//+private
|
||||
package sync2
|
||||
package sync
|
||||
|
||||
import "core:time"
|
||||
import win32 "core:sys/windows"
|
||||
@@ -1,5 +1,5 @@
|
||||
//+private
|
||||
package sync2
|
||||
package sync
|
||||
|
||||
import "core:time"
|
||||
|
||||
@@ -1,123 +0,0 @@
|
||||
package sync
|
||||
|
||||
import "core:intrinsics"
|
||||
|
||||
cpu_relax :: #force_inline proc "contextless" () {
|
||||
intrinsics.cpu_relax()
|
||||
}
|
||||
|
||||
Condition_Mutex_Ptr :: union{^Mutex, ^Blocking_Mutex}
|
||||
|
||||
|
||||
Ticket_Mutex :: struct {
|
||||
ticket: u64,
|
||||
serving: u64,
|
||||
}
|
||||
|
||||
ticket_mutex_init :: proc(m: ^Ticket_Mutex) {
|
||||
atomic_store(&m.ticket, 0, .Relaxed)
|
||||
atomic_store(&m.serving, 0, .Relaxed)
|
||||
}
|
||||
|
||||
ticket_mutex_lock :: #force_inline proc(m: ^Ticket_Mutex) {
|
||||
ticket := atomic_add(&m.ticket, 1, .Relaxed)
|
||||
for ticket != atomic_load(&m.serving, .Acquire) {
|
||||
intrinsics.cpu_relax()
|
||||
}
|
||||
}
|
||||
|
||||
ticket_mutex_unlock :: #force_inline proc(m: ^Ticket_Mutex) {
|
||||
atomic_add(&m.serving, 1, .Relaxed)
|
||||
}
|
||||
|
||||
|
||||
Benaphore :: struct {
|
||||
counter: int,
|
||||
sema: Semaphore,
|
||||
}
|
||||
|
||||
benaphore_init :: proc(b: ^Benaphore) {
|
||||
intrinsics.atomic_store(&b.counter, 0)
|
||||
semaphore_init(&b.sema)
|
||||
}
|
||||
|
||||
benaphore_destroy :: proc(b: ^Benaphore) {
|
||||
semaphore_destroy(&b.sema)
|
||||
}
|
||||
|
||||
benaphore_lock :: proc(b: ^Benaphore) {
|
||||
if intrinsics.atomic_add_acq(&b.counter, 1) > 1 {
|
||||
semaphore_wait_for(&b.sema)
|
||||
}
|
||||
}
|
||||
|
||||
benaphore_try_lock :: proc(b: ^Benaphore) -> bool {
|
||||
v, _ := intrinsics.atomic_cxchg_acq(&b.counter, 1, 0)
|
||||
return v == 0
|
||||
}
|
||||
|
||||
benaphore_unlock :: proc(b: ^Benaphore) {
|
||||
if intrinsics.atomic_sub_rel(&b.counter, 1) > 0 {
|
||||
semaphore_post(&b.sema)
|
||||
}
|
||||
}
|
||||
|
||||
Recursive_Benaphore :: struct {
|
||||
counter: int,
|
||||
owner: int,
|
||||
recursion: int,
|
||||
sema: Semaphore,
|
||||
}
|
||||
|
||||
recursive_benaphore_init :: proc(b: ^Recursive_Benaphore) {
|
||||
intrinsics.atomic_store(&b.counter, 0)
|
||||
semaphore_init(&b.sema)
|
||||
}
|
||||
|
||||
recursive_benaphore_destroy :: proc(b: ^Recursive_Benaphore) {
|
||||
semaphore_destroy(&b.sema)
|
||||
}
|
||||
|
||||
recursive_benaphore_lock :: proc(b: ^Recursive_Benaphore) {
|
||||
tid := current_thread_id()
|
||||
if intrinsics.atomic_add_acq(&b.counter, 1) > 1 {
|
||||
if tid != b.owner {
|
||||
semaphore_wait_for(&b.sema)
|
||||
}
|
||||
}
|
||||
// inside the lock
|
||||
b.owner = tid
|
||||
b.recursion += 1
|
||||
}
|
||||
|
||||
recursive_benaphore_try_lock :: proc(b: ^Recursive_Benaphore) -> bool {
|
||||
tid := current_thread_id()
|
||||
if b.owner == tid {
|
||||
intrinsics.atomic_add_acq(&b.counter, 1)
|
||||
} else {
|
||||
v, _ := intrinsics.atomic_cxchg_acq(&b.counter, 1, 0)
|
||||
if v != 0 {
|
||||
return false
|
||||
}
|
||||
// inside the lock
|
||||
b.owner = tid
|
||||
}
|
||||
b.recursion += 1
|
||||
return true
|
||||
}
|
||||
|
||||
recursive_benaphore_unlock :: proc(b: ^Recursive_Benaphore) {
|
||||
tid := current_thread_id()
|
||||
assert(tid == b.owner)
|
||||
b.recursion -= 1
|
||||
recursion := b.recursion
|
||||
if recursion == 0 {
|
||||
b.owner = 0
|
||||
}
|
||||
if intrinsics.atomic_sub_rel(&b.counter, 1) > 0 {
|
||||
if recursion == 0 {
|
||||
semaphore_post(&b.sema)
|
||||
}
|
||||
}
|
||||
// outside the lock
|
||||
}
|
||||
@@ -1,79 +0,0 @@
|
||||
package sync2
|
||||
|
||||
import "core:intrinsics"
|
||||
|
||||
cpu_relax :: intrinsics.cpu_relax
|
||||
|
||||
atomic_fence :: intrinsics.atomic_fence
|
||||
atomic_fence_acquire :: intrinsics.atomic_fence_acq
|
||||
atomic_fence_release :: intrinsics.atomic_fence_rel
|
||||
atomic_fence_acqrel :: intrinsics.atomic_fence_acqrel
|
||||
|
||||
atomic_store :: intrinsics.atomic_store
|
||||
atomic_store_release :: intrinsics.atomic_store_rel
|
||||
atomic_store_relaxed :: intrinsics.atomic_store_relaxed
|
||||
atomic_store_unordered :: intrinsics.atomic_store_unordered
|
||||
|
||||
atomic_load :: intrinsics.atomic_load
|
||||
atomic_load_acquire :: intrinsics.atomic_load_acq
|
||||
atomic_load_relaxed :: intrinsics.atomic_load_relaxed
|
||||
atomic_load_unordered :: intrinsics.atomic_load_unordered
|
||||
|
||||
atomic_add :: intrinsics.atomic_add
|
||||
atomic_add_acquire :: intrinsics.atomic_add_acq
|
||||
atomic_add_release :: intrinsics.atomic_add_rel
|
||||
atomic_add_acqrel :: intrinsics.atomic_add_acqrel
|
||||
atomic_add_relaxed :: intrinsics.atomic_add_relaxed
|
||||
atomic_sub :: intrinsics.atomic_sub
|
||||
atomic_sub_acquire :: intrinsics.atomic_sub_acq
|
||||
atomic_sub_release :: intrinsics.atomic_sub_rel
|
||||
atomic_sub_acqrel :: intrinsics.atomic_sub_acqrel
|
||||
atomic_sub_relaxed :: intrinsics.atomic_sub_relaxed
|
||||
atomic_and :: intrinsics.atomic_and
|
||||
atomic_and_acquire :: intrinsics.atomic_and_acq
|
||||
atomic_and_release :: intrinsics.atomic_and_rel
|
||||
atomic_and_acqrel :: intrinsics.atomic_and_acqrel
|
||||
atomic_and_relaxed :: intrinsics.atomic_and_relaxed
|
||||
atomic_nand :: intrinsics.atomic_nand
|
||||
atomic_nand_acquire :: intrinsics.atomic_nand_acq
|
||||
atomic_nand_release :: intrinsics.atomic_nand_rel
|
||||
atomic_nand_acqrel :: intrinsics.atomic_nand_acqrel
|
||||
atomic_nand_relaxed :: intrinsics.atomic_nand_relaxed
|
||||
atomic_or :: intrinsics.atomic_or
|
||||
atomic_or_acquire :: intrinsics.atomic_or_acq
|
||||
atomic_or_release :: intrinsics.atomic_or_rel
|
||||
atomic_or_acqrel :: intrinsics.atomic_or_acqrel
|
||||
atomic_or_relaxed :: intrinsics.atomic_or_relaxed
|
||||
atomic_xor :: intrinsics.atomic_xor
|
||||
atomic_xor_acquire :: intrinsics.atomic_xor_acq
|
||||
atomic_xor_release :: intrinsics.atomic_xor_rel
|
||||
atomic_xor_acqrel :: intrinsics.atomic_xor_acqrel
|
||||
atomic_xor_relaxed :: intrinsics.atomic_xor_relaxed
|
||||
|
||||
atomic_exchange :: intrinsics.atomic_xchg
|
||||
atomic_exchange_acquire :: intrinsics.atomic_xchg_acq
|
||||
atomic_exchange_release :: intrinsics.atomic_xchg_rel
|
||||
atomic_exchange_acqrel :: intrinsics.atomic_xchg_acqrel
|
||||
atomic_exchange_relaxed :: intrinsics.atomic_xchg_relaxed
|
||||
|
||||
// Returns value and optional ok boolean
|
||||
atomic_compare_exchange_strong :: intrinsics.atomic_cxchg
|
||||
atomic_compare_exchange_strong_acquire :: intrinsics.atomic_cxchg_acq
|
||||
atomic_compare_exchange_strong_release :: intrinsics.atomic_cxchg_rel
|
||||
atomic_compare_exchange_strong_acqrel :: intrinsics.atomic_cxchg_acqrel
|
||||
atomic_compare_exchange_strong_relaxed :: intrinsics.atomic_cxchg_relaxed
|
||||
atomic_compare_exchange_strong_failrelaxed :: intrinsics.atomic_cxchg_failrelaxed
|
||||
atomic_compare_exchange_strong_failacquire :: intrinsics.atomic_cxchg_failacq
|
||||
atomic_compare_exchange_strong_acquire_failrelaxed :: intrinsics.atomic_cxchg_acq_failrelaxed
|
||||
atomic_compare_exchange_strong_acqrel_failrelaxed :: intrinsics.atomic_cxchg_acqrel_failrelaxed
|
||||
|
||||
// Returns value and optional ok boolean
|
||||
atomic_compare_exchange_weak :: intrinsics.atomic_cxchgweak
|
||||
atomic_compare_exchange_weak_acquire :: intrinsics.atomic_cxchgweak_acq
|
||||
atomic_compare_exchange_weak_release :: intrinsics.atomic_cxchgweak_rel
|
||||
atomic_compare_exchange_weak_acqrel :: intrinsics.atomic_cxchgweak_acqrel
|
||||
atomic_compare_exchange_weak_relaxed :: intrinsics.atomic_cxchgweak_relaxed
|
||||
atomic_compare_exchange_weak_failrelaxed :: intrinsics.atomic_cxchgweak_failrelaxed
|
||||
atomic_compare_exchange_weak_failacquire :: intrinsics.atomic_cxchgweak_failacq
|
||||
atomic_compare_exchange_weak_acquire_failrelaxed :: intrinsics.atomic_cxchgweak_acq_failrelaxed
|
||||
atomic_compare_exchange_weak_acqrel_failrelaxed :: intrinsics.atomic_cxchgweak_acqrel_failrelaxed
|
||||
@@ -1,9 +0,0 @@
|
||||
//+build freebsd
|
||||
//+private
|
||||
package sync2
|
||||
|
||||
import "core:os"
|
||||
|
||||
_current_thread_id :: proc "contextless" () -> int {
|
||||
return os.current_thread_id()
|
||||
}
|
||||
@@ -1,184 +0,0 @@
|
||||
//+private
|
||||
package sync2
|
||||
|
||||
when #config(ODIN_SYNC_RECURSIVE_MUTEX_USE_FUTEX, true) {
|
||||
_Recursive_Mutex :: struct {
|
||||
owner: Futex,
|
||||
recursion: i32,
|
||||
}
|
||||
|
||||
_recursive_mutex_lock :: proc(m: ^Recursive_Mutex) {
|
||||
tid := Futex(current_thread_id())
|
||||
for {
|
||||
prev_owner := atomic_compare_exchange_strong_acquire(&m.impl.owner, tid, 0)
|
||||
switch prev_owner {
|
||||
case 0, tid:
|
||||
m.impl.recursion += 1
|
||||
// inside the lock
|
||||
return
|
||||
}
|
||||
|
||||
futex_wait(&m.impl.owner, u32(prev_owner))
|
||||
}
|
||||
}
|
||||
|
||||
_recursive_mutex_unlock :: proc(m: ^Recursive_Mutex) {
|
||||
m.impl.recursion -= 1
|
||||
if m.impl.recursion != 0 {
|
||||
return
|
||||
}
|
||||
atomic_exchange_release(&m.impl.owner, 0)
|
||||
|
||||
futex_signal(&m.impl.owner)
|
||||
// outside the lock
|
||||
|
||||
}
|
||||
|
||||
_recursive_mutex_try_lock :: proc(m: ^Recursive_Mutex) -> bool {
|
||||
tid := Futex(current_thread_id())
|
||||
prev_owner := atomic_compare_exchange_strong_acquire(&m.impl.owner, tid, 0)
|
||||
switch prev_owner {
|
||||
case 0, tid:
|
||||
m.impl.recursion += 1
|
||||
// inside the lock
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
_Recursive_Mutex :: struct {
|
||||
owner: int,
|
||||
recursion: int,
|
||||
mutex: Mutex,
|
||||
}
|
||||
|
||||
_recursive_mutex_lock :: proc(m: ^Recursive_Mutex) {
|
||||
tid := current_thread_id()
|
||||
if tid != m.impl.owner {
|
||||
mutex_lock(&m.impl.mutex)
|
||||
}
|
||||
// inside the lock
|
||||
m.impl.owner = tid
|
||||
m.impl.recursion += 1
|
||||
}
|
||||
|
||||
_recursive_mutex_unlock :: proc(m: ^Recursive_Mutex) {
|
||||
tid := current_thread_id()
|
||||
assert(tid == m.impl.owner)
|
||||
m.impl.recursion -= 1
|
||||
recursion := m.impl.recursion
|
||||
if recursion == 0 {
|
||||
m.impl.owner = 0
|
||||
}
|
||||
if recursion == 0 {
|
||||
mutex_unlock(&m.impl.mutex)
|
||||
}
|
||||
// outside the lock
|
||||
|
||||
}
|
||||
|
||||
_recursive_mutex_try_lock :: proc(m: ^Recursive_Mutex) -> bool {
|
||||
tid := current_thread_id()
|
||||
if m.impl.owner == tid {
|
||||
return mutex_try_lock(&m.impl.mutex)
|
||||
}
|
||||
if !mutex_try_lock(&m.impl.mutex) {
|
||||
return false
|
||||
}
|
||||
// inside the lock
|
||||
m.impl.owner = tid
|
||||
m.impl.recursion += 1
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
when ODIN_OS != .Windows {
|
||||
RW_Mutex_State :: distinct uint
|
||||
RW_Mutex_State_Half_Width :: size_of(RW_Mutex_State)*8/2
|
||||
RW_Mutex_State_Is_Writing :: RW_Mutex_State(1)
|
||||
RW_Mutex_State_Writer :: RW_Mutex_State(1)<<1
|
||||
RW_Mutex_State_Reader :: RW_Mutex_State(1)<<RW_Mutex_State_Half_Width
|
||||
|
||||
RW_Mutex_State_Writer_Mask :: RW_Mutex_State(1<<(RW_Mutex_State_Half_Width-1) - 1) << 1
|
||||
RW_Mutex_State_Reader_Mask :: RW_Mutex_State(1<<(RW_Mutex_State_Half_Width-1) - 1) << RW_Mutex_State_Half_Width
|
||||
|
||||
|
||||
_RW_Mutex :: struct {
|
||||
// NOTE(bill): pthread_rwlock_t cannot be used since pthread_rwlock_destroy is required on some platforms
|
||||
// TODO(bill): Can we determine which platforms exactly?
|
||||
state: RW_Mutex_State,
|
||||
mutex: Mutex,
|
||||
sema: Sema,
|
||||
}
|
||||
|
||||
_rw_mutex_lock :: proc(rw: ^RW_Mutex) {
|
||||
_ = atomic_add(&rw.impl.state, RW_Mutex_State_Writer)
|
||||
mutex_lock(&rw.impl.mutex)
|
||||
|
||||
state := atomic_or(&rw.impl.state, RW_Mutex_State_Writer)
|
||||
if state & RW_Mutex_State_Reader_Mask != 0 {
|
||||
sema_wait(&rw.impl.sema)
|
||||
}
|
||||
}
|
||||
|
||||
_rw_mutex_unlock :: proc(rw: ^RW_Mutex) {
|
||||
_ = atomic_and(&rw.impl.state, ~RW_Mutex_State_Is_Writing)
|
||||
mutex_unlock(&rw.impl.mutex)
|
||||
}
|
||||
|
||||
_rw_mutex_try_lock :: proc(rw: ^RW_Mutex) -> bool {
|
||||
if mutex_try_lock(&rw.impl.mutex) {
|
||||
state := atomic_load(&rw.impl.state)
|
||||
if state & RW_Mutex_State_Reader_Mask == 0 {
|
||||
_ = atomic_or(&rw.impl.state, RW_Mutex_State_Is_Writing)
|
||||
return true
|
||||
}
|
||||
|
||||
mutex_unlock(&rw.impl.mutex)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
_rw_mutex_shared_lock :: proc(rw: ^RW_Mutex) {
|
||||
state := atomic_load(&rw.impl.state)
|
||||
for state & (RW_Mutex_State_Is_Writing|RW_Mutex_State_Writer_Mask) == 0 {
|
||||
ok: bool
|
||||
state, ok = atomic_compare_exchange_weak(&rw.impl.state, state, state + RW_Mutex_State_Reader)
|
||||
if ok {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
mutex_lock(&rw.impl.mutex)
|
||||
_ = atomic_add(&rw.impl.state, RW_Mutex_State_Reader)
|
||||
mutex_unlock(&rw.impl.mutex)
|
||||
}
|
||||
|
||||
_rw_mutex_shared_unlock :: proc(rw: ^RW_Mutex) {
|
||||
state := atomic_sub(&rw.impl.state, RW_Mutex_State_Reader)
|
||||
|
||||
if (state & RW_Mutex_State_Reader_Mask == RW_Mutex_State_Reader) &&
|
||||
(state & RW_Mutex_State_Is_Writing != 0) {
|
||||
sema_post(&rw.impl.sema)
|
||||
}
|
||||
}
|
||||
|
||||
_rw_mutex_try_shared_lock :: proc(rw: ^RW_Mutex) -> bool {
|
||||
state := atomic_load(&rw.impl.state)
|
||||
if state & (RW_Mutex_State_Is_Writing|RW_Mutex_State_Writer_Mask) == 0 {
|
||||
_, ok := atomic_compare_exchange_strong(&rw.impl.state, state, state + RW_Mutex_State_Reader)
|
||||
if ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
if mutex_try_lock(&rw.impl.mutex) {
|
||||
_ = atomic_add(&rw.impl.state, RW_Mutex_State_Reader)
|
||||
mutex_unlock(&rw.impl.mutex)
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
//+build linux
|
||||
//+private
|
||||
package sync2
|
||||
|
||||
import "core:sys/unix"
|
||||
|
||||
_current_thread_id :: proc "contextless" () -> int {
|
||||
return unix.sys_gettid()
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
//+build openbsd
|
||||
//+private
|
||||
package sync2
|
||||
|
||||
import "core:os"
|
||||
|
||||
_current_thread_id :: proc "contextless" () -> int {
|
||||
return os.current_thread_id()
|
||||
}
|
||||
@@ -1,58 +0,0 @@
|
||||
//+build linux, freebsd, openbsd
|
||||
//+private
|
||||
package sync2
|
||||
|
||||
import "core:time"
|
||||
import "core:sys/unix"
|
||||
|
||||
_Mutex_State :: enum i32 {
|
||||
Unlocked = 0,
|
||||
Locked = 1,
|
||||
Waiting = 2,
|
||||
}
|
||||
_Mutex :: struct {
|
||||
pthread_mutex: unix.pthread_mutex_t,
|
||||
}
|
||||
|
||||
_mutex_lock :: proc(m: ^Mutex) {
|
||||
err := unix.pthread_mutex_lock(&m.impl.pthread_mutex)
|
||||
assert(err == 0)
|
||||
}
|
||||
|
||||
_mutex_unlock :: proc(m: ^Mutex) {
|
||||
err := unix.pthread_mutex_unlock(&m.impl.pthread_mutex)
|
||||
assert(err == 0)
|
||||
}
|
||||
|
||||
_mutex_try_lock :: proc(m: ^Mutex) -> bool {
|
||||
err := unix.pthread_mutex_trylock(&m.impl.pthread_mutex)
|
||||
return err == 0
|
||||
}
|
||||
|
||||
_Cond :: struct {
|
||||
pthread_cond: unix.pthread_cond_t,
|
||||
}
|
||||
|
||||
_cond_wait :: proc(c: ^Cond, m: ^Mutex) {
|
||||
err := unix.pthread_cond_wait(&c.impl.pthread_cond, &m.impl.pthread_mutex)
|
||||
assert(err == 0)
|
||||
}
|
||||
|
||||
|
||||
_cond_wait_with_timeout :: proc(c: ^Cond, m: ^Mutex, duration: time.Duration) -> bool {
|
||||
tv_sec := i64(duration/1e9)
|
||||
tv_nsec := i64(duration%1e9)
|
||||
err := unix.pthread_cond_timedwait(&c.impl.pthread_cond, &m.impl.pthread_mutex, &{tv_sec, tv_nsec})
|
||||
return err == 0
|
||||
}
|
||||
|
||||
|
||||
_cond_signal :: proc(c: ^Cond) {
|
||||
err := unix.pthread_cond_signal(&c.impl.pthread_cond)
|
||||
assert(err == 0)
|
||||
}
|
||||
|
||||
_cond_broadcast :: proc(c: ^Cond) {
|
||||
err := unix.pthread_cond_broadcast(&c.impl.pthread_cond)
|
||||
assert(err == 0)
|
||||
}
|
||||
@@ -1,54 +0,0 @@
|
||||
package sync
|
||||
|
||||
import "core:sys/darwin"
|
||||
|
||||
import "core:c"
|
||||
|
||||
foreign import pthread "System.framework"
|
||||
|
||||
current_thread_id :: proc "contextless" () -> int {
|
||||
tid: u64
|
||||
// NOTE(Oskar): available from OSX 10.6 and iOS 3.2.
|
||||
// For older versions there is `syscall(SYS_thread_selfid)`, but not really
|
||||
// the same thing apparently.
|
||||
foreign pthread { pthread_threadid_np :: proc "c" (rawptr, ^u64) -> c.int --- }
|
||||
pthread_threadid_np(nil, &tid)
|
||||
return int(tid)
|
||||
}
|
||||
|
||||
|
||||
// The Darwin docs say it best:
|
||||
// A semaphore is much like a lock, except that a finite number of threads can hold it simultaneously.
|
||||
// Semaphores can be thought of as being much like piles of tokens; multiple threads can take these tokens,
|
||||
// but when there are none left, a thread must wait until another thread returns one.
|
||||
Semaphore :: struct #align 16 {
|
||||
handle: darwin.semaphore_t,
|
||||
}
|
||||
// TODO(tetra): Only marked with alignment because we cannot mark distinct integers with alignments.
|
||||
// See core/sys/unix/pthread_linux.odin/pthread_t.
|
||||
|
||||
semaphore_init :: proc(s: ^Semaphore, initial_count := 0) {
|
||||
ct := darwin.mach_task_self()
|
||||
res := darwin.semaphore_create(ct, &s.handle, 0, c.int(initial_count))
|
||||
assert(res == 0)
|
||||
}
|
||||
|
||||
semaphore_destroy :: proc(s: ^Semaphore) {
|
||||
ct := darwin.mach_task_self()
|
||||
res := darwin.semaphore_destroy(ct, s.handle)
|
||||
assert(res == 0)
|
||||
s.handle = {}
|
||||
}
|
||||
|
||||
semaphore_post :: proc(s: ^Semaphore, count := 1) {
|
||||
// NOTE: SPEED: If there's one syscall to do this, we should use it instead of the loop.
|
||||
for in 0..<count {
|
||||
res := darwin.semaphore_signal(s.handle)
|
||||
assert(res == 0)
|
||||
}
|
||||
}
|
||||
|
||||
semaphore_wait_for :: proc(s: ^Semaphore) {
|
||||
res := darwin.semaphore_wait(s.handle)
|
||||
assert(res == 0)
|
||||
}
|
||||
@@ -1,40 +0,0 @@
|
||||
package sync
|
||||
|
||||
import "core:sys/unix"
|
||||
import "core:intrinsics"
|
||||
|
||||
|
||||
current_thread_id :: proc "contextless" () -> int {
|
||||
SYS_GETTID :: 186
|
||||
return int(intrinsics.syscall(SYS_GETTID))
|
||||
}
|
||||
|
||||
|
||||
// The Darwin docs say it best:
|
||||
// A semaphore is much like a lock, except that a finite number of threads can hold it simultaneously.
|
||||
// Semaphores can be thought of as being much like piles of tokens; multiple threads can take these tokens,
|
||||
// but when there are none left, a thread must wait until another thread returns one.
|
||||
Semaphore :: struct #align 16 {
|
||||
handle: unix.sem_t,
|
||||
}
|
||||
|
||||
semaphore_init :: proc(s: ^Semaphore, initial_count := 0) {
|
||||
assert(unix.sem_init(&s.handle, 0, u32(initial_count)) == 0)
|
||||
}
|
||||
|
||||
semaphore_destroy :: proc(s: ^Semaphore) {
|
||||
assert(unix.sem_destroy(&s.handle) == 0)
|
||||
s.handle = {}
|
||||
}
|
||||
|
||||
semaphore_post :: proc(s: ^Semaphore, count := 1) {
|
||||
// NOTE: SPEED: If there's one syscall to do this, we should use it instead of the loop.
|
||||
for in 0..<count {
|
||||
assert(unix.sem_post(&s.handle) == 0)
|
||||
}
|
||||
}
|
||||
|
||||
semaphore_wait_for :: proc(s: ^Semaphore) {
|
||||
assert(unix.sem_wait(&s.handle) == 0)
|
||||
}
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
package sync
|
||||
|
||||
import "core:sys/unix"
|
||||
|
||||
current_thread_id :: proc "contextless" () -> int {
|
||||
return unix.sys_gettid()
|
||||
}
|
||||
|
||||
|
||||
// The Darwin docs say it best:
|
||||
// A semaphore is much like a lock, except that a finite number of threads can hold it simultaneously.
|
||||
// Semaphores can be thought of as being much like piles of tokens; multiple threads can take these tokens,
|
||||
// but when there are none left, a thread must wait until another thread returns one.
|
||||
Semaphore :: struct #align 16 {
|
||||
handle: unix.sem_t,
|
||||
}
|
||||
|
||||
semaphore_init :: proc(s: ^Semaphore, initial_count := 0) {
|
||||
assert(unix.sem_init(&s.handle, 0, u32(initial_count)) == 0)
|
||||
}
|
||||
|
||||
semaphore_destroy :: proc(s: ^Semaphore) {
|
||||
assert(unix.sem_destroy(&s.handle) == 0)
|
||||
s.handle = {}
|
||||
}
|
||||
|
||||
semaphore_post :: proc(s: ^Semaphore, count := 1) {
|
||||
// NOTE: SPEED: If there's one syscall to do this, we should use it instead of the loop.
|
||||
for in 0..<count {
|
||||
assert(unix.sem_post(&s.handle) == 0)
|
||||
}
|
||||
}
|
||||
|
||||
semaphore_wait_for :: proc(s: ^Semaphore) {
|
||||
assert(unix.sem_wait(&s.handle) == 0)
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
package sync
|
||||
|
||||
import "core:sys/unix"
|
||||
import "core:os"
|
||||
|
||||
current_thread_id :: proc "contextless" () -> int {
|
||||
return os.current_thread_id()
|
||||
}
|
||||
|
||||
// The Darwin docs say it best:
|
||||
// A semaphore is much like a lock, except that a finite number of threads can hold it simultaneously.
|
||||
// Semaphores can be thought of as being much like piles of tokens; multiple threads can take these tokens,
|
||||
// but when there are none left, a thread must wait until another thread returns one.
|
||||
Semaphore :: struct #align 16 {
|
||||
handle: unix.sem_t,
|
||||
}
|
||||
|
||||
semaphore_init :: proc(s: ^Semaphore, initial_count := 0) {
|
||||
assert(unix.sem_init(&s.handle, 0, u32(initial_count)) == 0)
|
||||
}
|
||||
|
||||
semaphore_destroy :: proc(s: ^Semaphore) {
|
||||
assert(unix.sem_destroy(&s.handle) == 0)
|
||||
s.handle = {}
|
||||
}
|
||||
|
||||
semaphore_post :: proc(s: ^Semaphore, count := 1) {
|
||||
// NOTE: SPEED: If there's one syscall to do this, we should use it instead of the loop.
|
||||
for in 0..<count {
|
||||
assert(unix.sem_post(&s.handle) == 0)
|
||||
}
|
||||
}
|
||||
|
||||
semaphore_wait_for :: proc(s: ^Semaphore) {
|
||||
assert(unix.sem_wait(&s.handle) == 0)
|
||||
}
|
||||
@@ -1,248 +0,0 @@
|
||||
// +build linux, darwin, freebsd, openbsd
|
||||
package sync
|
||||
|
||||
import "core:sys/unix"
|
||||
import "core:time"
|
||||
|
||||
// A recursive lock that can only be held by one thread at once
|
||||
Mutex :: struct {
|
||||
handle: unix.pthread_mutex_t,
|
||||
}
|
||||
|
||||
|
||||
mutex_init :: proc(m: ^Mutex) {
|
||||
// NOTE(tetra, 2019-11-01): POSIX OOM if we cannot init the attrs or the mutex.
|
||||
attrs: unix.pthread_mutexattr_t
|
||||
assert(unix.pthread_mutexattr_init(&attrs) == 0)
|
||||
defer unix.pthread_mutexattr_destroy(&attrs) // ignores destruction error
|
||||
unix.pthread_mutexattr_settype(&attrs, unix.PTHREAD_MUTEX_RECURSIVE)
|
||||
|
||||
assert(unix.pthread_mutex_init(&m.handle, &attrs) == 0)
|
||||
}
|
||||
|
||||
mutex_destroy :: proc(m: ^Mutex) {
|
||||
assert(unix.pthread_mutex_destroy(&m.handle) == 0)
|
||||
m.handle = {}
|
||||
}
|
||||
|
||||
mutex_lock :: proc(m: ^Mutex) {
|
||||
assert(unix.pthread_mutex_lock(&m.handle) == 0)
|
||||
}
|
||||
|
||||
// Returns false if someone else holds the lock.
|
||||
mutex_try_lock :: proc(m: ^Mutex) -> bool {
|
||||
return unix.pthread_mutex_trylock(&m.handle) == 0
|
||||
}
|
||||
|
||||
mutex_unlock :: proc(m: ^Mutex) {
|
||||
assert(unix.pthread_mutex_unlock(&m.handle) == 0)
|
||||
}
|
||||
|
||||
|
||||
Blocking_Mutex :: struct {
|
||||
handle: unix.pthread_mutex_t,
|
||||
}
|
||||
|
||||
|
||||
blocking_mutex_init :: proc(m: ^Blocking_Mutex) {
|
||||
// NOTE(tetra, 2019-11-01): POSIX OOM if we cannot init the attrs or the mutex.
|
||||
attrs: unix.pthread_mutexattr_t
|
||||
assert(unix.pthread_mutexattr_init(&attrs) == 0)
|
||||
defer unix.pthread_mutexattr_destroy(&attrs) // ignores destruction error
|
||||
|
||||
assert(unix.pthread_mutex_init(&m.handle, &attrs) == 0)
|
||||
}
|
||||
|
||||
blocking_mutex_destroy :: proc(m: ^Blocking_Mutex) {
|
||||
assert(unix.pthread_mutex_destroy(&m.handle) == 0)
|
||||
m.handle = {}
|
||||
}
|
||||
|
||||
blocking_mutex_lock :: proc(m: ^Blocking_Mutex) {
|
||||
assert(unix.pthread_mutex_lock(&m.handle) == 0)
|
||||
}
|
||||
|
||||
// Returns false if someone else holds the lock.
|
||||
blocking_mutex_try_lock :: proc(m: ^Blocking_Mutex) -> bool {
|
||||
return unix.pthread_mutex_trylock(&m.handle) == 0
|
||||
}
|
||||
|
||||
blocking_mutex_unlock :: proc(m: ^Blocking_Mutex) {
|
||||
assert(unix.pthread_mutex_unlock(&m.handle) == 0)
|
||||
}
|
||||
|
||||
|
||||
|
||||
// Blocks until signalled, and then lets past exactly
|
||||
// one thread.
|
||||
Condition :: struct {
|
||||
handle: unix.pthread_cond_t,
|
||||
mutex: Condition_Mutex_Ptr,
|
||||
|
||||
// NOTE(tetra, 2019-11-11): Used to mimic the more sane behavior of Windows' AutoResetEvent.
|
||||
// This means that you may signal the condition before anyone is waiting to cause the
|
||||
// next thread that tries to wait to just pass by uninterrupted, without sleeping.
|
||||
// Without this, signalling a condition will only wake up a thread which is already waiting,
|
||||
// but not one that is about to wait, which can cause your program to become out of sync in
|
||||
// ways that are hard to debug or fix.
|
||||
flag: bool, // atomically mutated
|
||||
}
|
||||
|
||||
condition_init :: proc(c: ^Condition, mutex: Condition_Mutex_Ptr) -> bool {
|
||||
// NOTE(tetra, 2019-11-01): POSIX OOM if we cannot init the attrs or the condition.
|
||||
attrs: unix.pthread_condattr_t
|
||||
if unix.pthread_condattr_init(&attrs) != 0 {
|
||||
return false
|
||||
}
|
||||
defer unix.pthread_condattr_destroy(&attrs) // ignores destruction error
|
||||
|
||||
c.flag = false
|
||||
c.mutex = mutex
|
||||
return unix.pthread_cond_init(&c.handle, &attrs) == 0
|
||||
}
|
||||
|
||||
condition_destroy :: proc(c: ^Condition) {
|
||||
assert(unix.pthread_cond_destroy(&c.handle) == 0)
|
||||
c.handle = {}
|
||||
}
|
||||
|
||||
// Awaken exactly one thread who is waiting on the condition
|
||||
condition_signal :: proc(c: ^Condition) -> bool {
|
||||
switch m in c.mutex {
|
||||
case ^Mutex:
|
||||
mutex_lock(m)
|
||||
defer mutex_unlock(m)
|
||||
atomic_swap(&c.flag, true, .Sequentially_Consistent)
|
||||
return unix.pthread_cond_signal(&c.handle) == 0
|
||||
case ^Blocking_Mutex:
|
||||
blocking_mutex_lock(m)
|
||||
defer blocking_mutex_unlock(m)
|
||||
atomic_swap(&c.flag, true, .Sequentially_Consistent)
|
||||
return unix.pthread_cond_signal(&c.handle) == 0
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Awaken all threads who are waiting on the condition
|
||||
condition_broadcast :: proc(c: ^Condition) -> bool {
|
||||
return unix.pthread_cond_broadcast(&c.handle) == 0
|
||||
}
|
||||
|
||||
// Wait for the condition to be signalled.
|
||||
// Does not block if the condition has been signalled and no one
|
||||
// has waited on it yet.
|
||||
condition_wait_for :: proc(c: ^Condition) -> bool {
|
||||
switch m in c.mutex {
|
||||
case ^Mutex:
|
||||
mutex_lock(m)
|
||||
defer mutex_unlock(m)
|
||||
// NOTE(tetra): If a thread comes by and steals the flag immediately after the signal occurs,
|
||||
// the thread that gets signalled and wakes up, discovers that the flag was taken and goes
|
||||
// back to sleep.
|
||||
// Though this overall behavior is the most sane, there may be a better way to do this that means that
|
||||
// the first thread to wait, gets the flag first.
|
||||
if atomic_swap(&c.flag, false, .Sequentially_Consistent) {
|
||||
return true
|
||||
}
|
||||
for {
|
||||
if unix.pthread_cond_wait(&c.handle, &m.handle) != 0 {
|
||||
return false
|
||||
}
|
||||
if atomic_swap(&c.flag, false, .Sequentially_Consistent) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
|
||||
case ^Blocking_Mutex:
|
||||
blocking_mutex_lock(m)
|
||||
defer blocking_mutex_unlock(m)
|
||||
// NOTE(tetra): If a thread comes by and steals the flag immediately after the signal occurs,
|
||||
// the thread that gets signalled and wakes up, discovers that the flag was taken and goes
|
||||
// back to sleep.
|
||||
// Though this overall behavior is the most sane, there may be a better way to do this that means that
|
||||
// the first thread to wait, gets the flag first.
|
||||
if atomic_swap(&c.flag, false, .Sequentially_Consistent) {
|
||||
return true
|
||||
}
|
||||
for {
|
||||
if unix.pthread_cond_wait(&c.handle, &m.handle) != 0 {
|
||||
return false
|
||||
}
|
||||
if atomic_swap(&c.flag, false, .Sequentially_Consistent) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Wait for the condition to be signalled.
|
||||
// Does not block if the condition has been signalled and no one
|
||||
// has waited on it yet.
|
||||
condition_wait_for_timeout :: proc(c: ^Condition, duration: time.Duration) -> bool {
|
||||
switch m in c.mutex {
|
||||
case ^Mutex:
|
||||
mutex_lock(m)
|
||||
defer mutex_unlock(m)
|
||||
// NOTE(tetra): If a thread comes by and steals the flag immediately after the signal occurs,
|
||||
// the thread that gets signalled and wakes up, discovers that the flag was taken and goes
|
||||
// back to sleep.
|
||||
// Though this overall behavior is the most sane, there may be a better way to do this that means that
|
||||
// the first thread to wait, gets the flag first.
|
||||
if atomic_swap(&c.flag, false, .Sequentially_Consistent) {
|
||||
return true
|
||||
}
|
||||
|
||||
ns := time.duration_nanoseconds(duration)
|
||||
timeout: time.TimeSpec
|
||||
timeout.tv_sec = ns / 1e9
|
||||
timeout.tv_nsec = ns % 1e9
|
||||
|
||||
for {
|
||||
if unix.pthread_cond_timedwait(&c.handle, &m.handle, &timeout) != 0 {
|
||||
return false
|
||||
}
|
||||
if atomic_swap(&c.flag, false, .Sequentially_Consistent) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
|
||||
case ^Blocking_Mutex:
|
||||
blocking_mutex_lock(m)
|
||||
defer blocking_mutex_unlock(m)
|
||||
// NOTE(tetra): If a thread comes by and steals the flag immediately after the signal occurs,
|
||||
// the thread that gets signalled and wakes up, discovers that the flag was taken and goes
|
||||
// back to sleep.
|
||||
// Though this overall behavior is the most sane, there may be a better way to do this that means that
|
||||
// the first thread to wait, gets the flag first.
|
||||
if atomic_swap(&c.flag, false, .Sequentially_Consistent) {
|
||||
return true
|
||||
}
|
||||
|
||||
ns := time.duration_nanoseconds(duration)
|
||||
|
||||
timeout: time.TimeSpec
|
||||
timeout.tv_sec = ns / 1e9
|
||||
timeout.tv_nsec = ns % 1e9
|
||||
|
||||
for {
|
||||
if unix.pthread_cond_timedwait(&c.handle, &m.handle, &timeout) != 0 {
|
||||
return false
|
||||
}
|
||||
if atomic_swap(&c.flag, false, .Sequentially_Consistent) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
|
||||
thread_yield :: proc() {
|
||||
unix.sched_yield()
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package sync2
|
||||
package sync
|
||||
|
||||
/*
|
||||
Example:
|
||||
@@ -1,180 +0,0 @@
|
||||
// +build windows
|
||||
package sync
|
||||
|
||||
import win32 "core:sys/windows"
|
||||
import "core:time"
|
||||
|
||||
current_thread_id :: proc "contextless" () -> int {
|
||||
return int(win32.GetCurrentThreadId())
|
||||
}
|
||||
|
||||
|
||||
// When waited upon, blocks until the internal count is greater than zero, then subtracts one.
|
||||
// Posting to the semaphore increases the count by one, or the provided amount.
|
||||
Semaphore :: struct {
|
||||
_handle: win32.HANDLE,
|
||||
}
|
||||
|
||||
semaphore_init :: proc(s: ^Semaphore, initial_count := 0) {
|
||||
s._handle = win32.CreateSemaphoreW(nil, i32(initial_count), 1<<31-1, nil)
|
||||
}
|
||||
|
||||
semaphore_destroy :: proc(s: ^Semaphore) {
|
||||
win32.CloseHandle(s._handle)
|
||||
}
|
||||
|
||||
semaphore_post :: proc(s: ^Semaphore, count := 1) {
|
||||
win32.ReleaseSemaphore(s._handle, i32(count), nil)
|
||||
}
|
||||
|
||||
semaphore_wait_for :: proc(s: ^Semaphore) {
|
||||
// NOTE(tetra, 2019-10-30): wait_for_single_object decrements the count before it returns.
|
||||
result := win32.WaitForSingleObject(s._handle, win32.INFINITE)
|
||||
assert(result != win32.WAIT_FAILED)
|
||||
}
|
||||
|
||||
|
||||
Mutex :: struct {
|
||||
_critical_section: win32.CRITICAL_SECTION,
|
||||
}
|
||||
|
||||
|
||||
mutex_init :: proc(m: ^Mutex, spin_count := 0) {
|
||||
win32.InitializeCriticalSectionAndSpinCount(&m._critical_section, u32(spin_count))
|
||||
}
|
||||
|
||||
mutex_destroy :: proc(m: ^Mutex) {
|
||||
win32.DeleteCriticalSection(&m._critical_section)
|
||||
}
|
||||
|
||||
mutex_lock :: proc(m: ^Mutex) {
|
||||
win32.EnterCriticalSection(&m._critical_section)
|
||||
}
|
||||
|
||||
mutex_try_lock :: proc(m: ^Mutex) -> bool {
|
||||
return bool(win32.TryEnterCriticalSection(&m._critical_section))
|
||||
}
|
||||
|
||||
mutex_unlock :: proc(m: ^Mutex) {
|
||||
win32.LeaveCriticalSection(&m._critical_section)
|
||||
}
|
||||
|
||||
Blocking_Mutex :: struct {
|
||||
_handle: win32.SRWLOCK,
|
||||
}
|
||||
|
||||
|
||||
blocking_mutex_init :: proc(m: ^Blocking_Mutex) {
|
||||
win32.InitializeSRWLock(&m._handle)
|
||||
}
|
||||
|
||||
blocking_mutex_destroy :: proc(m: ^Blocking_Mutex) {
|
||||
//
|
||||
}
|
||||
|
||||
blocking_mutex_lock :: proc(m: ^Blocking_Mutex) {
|
||||
win32.AcquireSRWLockExclusive(&m._handle)
|
||||
}
|
||||
|
||||
blocking_mutex_try_lock :: proc(m: ^Blocking_Mutex) -> bool {
|
||||
return bool(win32.TryAcquireSRWLockExclusive(&m._handle))
|
||||
}
|
||||
|
||||
blocking_mutex_unlock :: proc(m: ^Blocking_Mutex) {
|
||||
win32.ReleaseSRWLockExclusive(&m._handle)
|
||||
}
|
||||
|
||||
|
||||
// Blocks until signalled.
|
||||
// When signalled, awakens exactly one waiting thread.
|
||||
Condition :: struct {
|
||||
_handle: win32.CONDITION_VARIABLE,
|
||||
|
||||
mutex: Condition_Mutex_Ptr,
|
||||
}
|
||||
|
||||
|
||||
condition_init :: proc(c: ^Condition, mutex: Condition_Mutex_Ptr) -> bool {
|
||||
assert(mutex != nil)
|
||||
win32.InitializeConditionVariable(&c._handle)
|
||||
c.mutex = mutex
|
||||
return true
|
||||
}
|
||||
|
||||
condition_destroy :: proc(c: ^Condition) {
|
||||
//
|
||||
}
|
||||
|
||||
condition_signal :: proc(c: ^Condition) -> bool {
|
||||
if c._handle.ptr == nil {
|
||||
return false
|
||||
}
|
||||
win32.WakeConditionVariable(&c._handle)
|
||||
return true
|
||||
}
|
||||
|
||||
condition_broadcast :: proc(c: ^Condition) -> bool {
|
||||
if c._handle.ptr == nil {
|
||||
return false
|
||||
}
|
||||
win32.WakeAllConditionVariable(&c._handle)
|
||||
return true
|
||||
}
|
||||
|
||||
condition_wait_for :: proc(c: ^Condition) -> bool {
|
||||
switch m in &c.mutex {
|
||||
case ^Mutex:
|
||||
return cast(bool)win32.SleepConditionVariableCS(&c._handle, &m._critical_section, win32.INFINITE)
|
||||
case ^Blocking_Mutex:
|
||||
return cast(bool)win32.SleepConditionVariableSRW(&c._handle, &m._handle, win32.INFINITE, 0)
|
||||
}
|
||||
return false
|
||||
}
|
||||
condition_wait_for_timeout :: proc(c: ^Condition, duration: time.Duration) -> bool {
|
||||
ms := win32.DWORD((max(time.duration_nanoseconds(duration), 0) + 999999)/1000000)
|
||||
switch m in &c.mutex {
|
||||
case ^Mutex:
|
||||
return cast(bool)win32.SleepConditionVariableCS(&c._handle, &m._critical_section, ms)
|
||||
case ^Blocking_Mutex:
|
||||
return cast(bool)win32.SleepConditionVariableSRW(&c._handle, &m._handle, ms, 0)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
RW_Lock :: struct {
|
||||
_handle: win32.SRWLOCK,
|
||||
}
|
||||
|
||||
rw_lock_init :: proc(l: ^RW_Lock) {
|
||||
l._handle = win32.SRWLOCK_INIT
|
||||
}
|
||||
rw_lock_destroy :: proc(l: ^RW_Lock) {
|
||||
//
|
||||
}
|
||||
rw_lock_read :: proc(l: ^RW_Lock) {
|
||||
win32.AcquireSRWLockShared(&l._handle)
|
||||
}
|
||||
rw_lock_try_read :: proc(l: ^RW_Lock) -> bool {
|
||||
return bool(win32.TryAcquireSRWLockShared(&l._handle))
|
||||
}
|
||||
rw_lock_write :: proc(l: ^RW_Lock) {
|
||||
win32.AcquireSRWLockExclusive(&l._handle)
|
||||
}
|
||||
rw_lock_try_write :: proc(l: ^RW_Lock) -> bool {
|
||||
return bool(win32.TryAcquireSRWLockExclusive(&l._handle))
|
||||
}
|
||||
rw_lock_read_unlock :: proc(l: ^RW_Lock) {
|
||||
win32.ReleaseSRWLockShared(&l._handle)
|
||||
}
|
||||
rw_lock_write_unlock :: proc(l: ^RW_Lock) {
|
||||
win32.ReleaseSRWLockExclusive(&l._handle)
|
||||
}
|
||||
|
||||
|
||||
thread_yield :: proc() {
|
||||
win32.SwitchToThread()
|
||||
}
|
||||
|
||||
@@ -1,58 +0,0 @@
|
||||
package sync
|
||||
|
||||
import "core:intrinsics"
|
||||
|
||||
Wait_Group :: struct {
|
||||
counter: int,
|
||||
mutex: Blocking_Mutex,
|
||||
cond: Condition,
|
||||
}
|
||||
|
||||
wait_group_init :: proc(wg: ^Wait_Group) {
|
||||
wg.counter = 0
|
||||
blocking_mutex_init(&wg.mutex)
|
||||
condition_init(&wg.cond, &wg.mutex)
|
||||
}
|
||||
|
||||
|
||||
wait_group_destroy :: proc(wg: ^Wait_Group) {
|
||||
condition_destroy(&wg.cond)
|
||||
blocking_mutex_destroy(&wg.mutex)
|
||||
}
|
||||
|
||||
wait_group_add :: proc(wg: ^Wait_Group, delta: int) {
|
||||
if delta == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
blocking_mutex_lock(&wg.mutex)
|
||||
defer blocking_mutex_unlock(&wg.mutex)
|
||||
|
||||
intrinsics.atomic_add(&wg.counter, delta)
|
||||
if wg.counter < 0 {
|
||||
panic("sync.Wait_Group negative counter")
|
||||
}
|
||||
if wg.counter == 0 {
|
||||
condition_broadcast(&wg.cond)
|
||||
if wg.counter != 0 {
|
||||
panic("sync.Wait_Group misuse: sync.wait_group_add called concurrently with sync.wait_group_wait")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
wait_group_done :: proc(wg: ^Wait_Group) {
|
||||
wait_group_add(wg, -1)
|
||||
}
|
||||
|
||||
wait_group_wait :: proc(wg: ^Wait_Group) {
|
||||
blocking_mutex_lock(&wg.mutex)
|
||||
defer blocking_mutex_unlock(&wg.mutex)
|
||||
|
||||
if wg.counter != 0 {
|
||||
condition_wait_for(&wg.cond)
|
||||
if wg.counter != 0 {
|
||||
panic("sync.Wait_Group misuse: sync.wait_group_add called concurrently with sync.wait_group_wait")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ sema_wait :: proc "contextless" (s: ^Sema) {
|
||||
win32.WaitOnAddress(&s.count, &original_count, size_of(original_count), win32.INFINITE)
|
||||
original_count = s.count
|
||||
}
|
||||
if original_count == intrinsics.atomic_cxchg(&s.count, original_count-1, original_count) {
|
||||
if original_count == intrinsics.atomic_compare_exchange_strong(&s.count, original_count-1, original_count) {
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -46,7 +46,7 @@ sema_wait_with_timeout :: proc "contextless" (s: ^Sema, duration: time.Duration)
|
||||
}
|
||||
original_count = s.count
|
||||
}
|
||||
if original_count == intrinsics.atomic_cxchg(&s.count, original_count-1, original_count) {
|
||||
if original_count == intrinsics.atomic_compare_exchange_strong(&s.count, original_count-1, original_count) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,67 +1,75 @@
|
||||
package thread
|
||||
|
||||
/*
|
||||
thread.Pool
|
||||
Copyright 2022 eisbehr
|
||||
Made available under Odin's BSD-3 license.
|
||||
*/
|
||||
|
||||
import "core:intrinsics"
|
||||
import "core:sync"
|
||||
import "core:mem"
|
||||
|
||||
Task_Status :: enum i32 {
|
||||
Ready,
|
||||
Busy,
|
||||
Waiting,
|
||||
Term,
|
||||
}
|
||||
|
||||
Task_Proc :: #type proc(task: ^Task)
|
||||
Task_Proc :: #type proc(task: Task)
|
||||
|
||||
Task :: struct {
|
||||
procedure: Task_Proc,
|
||||
data: rawptr,
|
||||
procedure: Task_Proc,
|
||||
data: rawptr,
|
||||
user_index: int,
|
||||
allocator: mem.Allocator,
|
||||
}
|
||||
|
||||
Task_Id :: distinct i32
|
||||
INVALID_TASK_ID :: Task_Id(-1)
|
||||
|
||||
|
||||
// Do not access the pool's members directly while the pool threads are running,
|
||||
// since they use different kinds of locking and mutual exclusion devices.
|
||||
// Careless access can and will lead to nasty bugs. Once initialized, the
|
||||
// pool's memory address is not allowed to change until it is destroyed.
|
||||
Pool :: struct {
|
||||
allocator: mem.Allocator,
|
||||
mutex: sync.Mutex,
|
||||
sem_available: sync.Semaphore,
|
||||
processing_task_count: int, // atomic
|
||||
is_running: bool,
|
||||
allocator: mem.Allocator,
|
||||
mutex: sync.Mutex,
|
||||
sem_available: sync.Sema,
|
||||
|
||||
// the following values are atomic
|
||||
num_waiting: int,
|
||||
num_in_processing: int,
|
||||
num_outstanding: int, // num_waiting + num_in_processing
|
||||
num_done: int,
|
||||
// end of atomics
|
||||
|
||||
is_running: bool,
|
||||
|
||||
threads: []^Thread,
|
||||
|
||||
tasks: [dynamic]Task,
|
||||
tasks: [dynamic]Task,
|
||||
tasks_done: [dynamic]Task,
|
||||
}
|
||||
|
||||
pool_init :: proc(pool: ^Pool, thread_count: int, allocator := context.allocator) {
|
||||
worker_thread_internal :: proc(t: ^Thread) {
|
||||
pool := (^Pool)(t.data)
|
||||
|
||||
for pool.is_running {
|
||||
sync.semaphore_wait_for(&pool.sem_available)
|
||||
|
||||
if task, ok := pool_try_and_pop_task(pool); ok {
|
||||
pool_do_work(pool, &task)
|
||||
}
|
||||
}
|
||||
|
||||
sync.semaphore_post(&pool.sem_available, 1)
|
||||
}
|
||||
|
||||
|
||||
// Once initialized, the pool's memory address is not allowed to change until
|
||||
// it is destroyed. If thread_count < 1, thread count 1 will be used.
|
||||
//
|
||||
// The thread pool requires an allocator which it either owns, or which is thread safe.
|
||||
pool_init :: proc(pool: ^Pool, thread_count: int, allocator: mem.Allocator) {
|
||||
context.allocator = allocator
|
||||
pool.allocator = allocator
|
||||
pool.tasks = make([dynamic]Task)
|
||||
pool.threads = make([]^Thread, thread_count)
|
||||
pool.tasks = make([dynamic]Task)
|
||||
pool.tasks_done = make([dynamic]Task)
|
||||
pool.threads = make([]^Thread, max(thread_count, 1))
|
||||
|
||||
sync.mutex_init(&pool.mutex)
|
||||
sync.semaphore_init(&pool.sem_available)
|
||||
pool.is_running = true
|
||||
|
||||
for _, i in pool.threads {
|
||||
t := create(worker_thread_internal)
|
||||
t := create(proc(t: ^Thread) {
|
||||
pool := (^Pool)(t.data)
|
||||
|
||||
for intrinsics.atomic_load(&pool.is_running) {
|
||||
sync.wait(&pool.sem_available)
|
||||
|
||||
if task, ok := pool_pop_waiting(pool); ok {
|
||||
pool_do_work(pool, task)
|
||||
}
|
||||
}
|
||||
|
||||
sync.post(&pool.sem_available, 1)
|
||||
})
|
||||
t.user_index = i
|
||||
t.data = pool
|
||||
pool.threads[i] = t
|
||||
@@ -70,15 +78,13 @@ pool_init :: proc(pool: ^Pool, thread_count: int, allocator := context.allocator
|
||||
|
||||
pool_destroy :: proc(pool: ^Pool) {
|
||||
delete(pool.tasks)
|
||||
delete(pool.tasks_done)
|
||||
|
||||
for thread in &pool.threads {
|
||||
destroy(thread)
|
||||
for t in &pool.threads {
|
||||
destroy(t)
|
||||
}
|
||||
|
||||
delete(pool.threads, pool.allocator)
|
||||
|
||||
sync.mutex_destroy(&pool.mutex)
|
||||
sync.semaphore_destroy(&pool.sem_available)
|
||||
}
|
||||
|
||||
pool_start :: proc(pool: ^Pool) {
|
||||
@@ -87,10 +93,12 @@ pool_start :: proc(pool: ^Pool) {
|
||||
}
|
||||
}
|
||||
|
||||
// Finish tasks that have already started processing, then shut down all pool
|
||||
// threads. Might leave over waiting tasks, any memory allocated for the
|
||||
// user data of those tasks will not be freed.
|
||||
pool_join :: proc(pool: ^Pool) {
|
||||
pool.is_running = false
|
||||
|
||||
sync.semaphore_post(&pool.sem_available, len(pool.threads))
|
||||
intrinsics.atomic_store(&pool.is_running, false)
|
||||
sync.post(&pool.sem_available, len(pool.threads))
|
||||
|
||||
yield()
|
||||
|
||||
@@ -99,53 +107,112 @@ pool_join :: proc(pool: ^Pool) {
|
||||
}
|
||||
}
|
||||
|
||||
pool_add_task :: proc(pool: ^Pool, procedure: Task_Proc, data: rawptr, user_index: int = 0) {
|
||||
sync.mutex_lock(&pool.mutex)
|
||||
defer sync.mutex_unlock(&pool.mutex)
|
||||
// Add a task to the thread pool.
|
||||
//
|
||||
// Tasks can be added from any thread, not just the thread that created
|
||||
// the thread pool. You can even add tasks from inside other tasks.
|
||||
//
|
||||
// Each task also needs an allocator which it either owns, or which is thread
|
||||
// safe. By default, allocations in the task are disabled by use of the
|
||||
// nil_allocator.
|
||||
pool_add_task :: proc(pool: ^Pool, procedure: Task_Proc, data: rawptr, user_index: int = 0, allocator := context.allocator) {
|
||||
sync.guard(&pool.mutex)
|
||||
|
||||
task: Task
|
||||
task.procedure = procedure
|
||||
task.data = data
|
||||
task.user_index = user_index
|
||||
|
||||
append(&pool.tasks, task)
|
||||
sync.semaphore_post(&pool.sem_available, 1)
|
||||
append(&pool.tasks, Task{
|
||||
procedure = procedure,
|
||||
data = data,
|
||||
user_index = user_index,
|
||||
allocator = allocator,
|
||||
})
|
||||
intrinsics.atomic_add(&pool.num_waiting, 1)
|
||||
intrinsics.atomic_add(&pool.num_outstanding, 1)
|
||||
sync.post(&pool.sem_available, 1)
|
||||
}
|
||||
|
||||
pool_try_and_pop_task :: proc(pool: ^Pool) -> (task: Task, got_task: bool = false) {
|
||||
if sync.mutex_try_lock(&pool.mutex) {
|
||||
if len(pool.tasks) != 0 {
|
||||
intrinsics.atomic_add(&pool.processing_task_count, 1)
|
||||
task = pop_front(&pool.tasks)
|
||||
got_task = true
|
||||
}
|
||||
sync.mutex_unlock(&pool.mutex)
|
||||
// Number of tasks waiting to be processed. Only informational, mostly for
|
||||
// debugging. Don't rely on this value being consistent with other num_*
|
||||
// values.
|
||||
pool_num_waiting :: #force_inline proc(pool: ^Pool) -> int {
|
||||
return intrinsics.atomic_load(&pool.num_waiting)
|
||||
}
|
||||
|
||||
// Number of tasks currently being processed. Only informational, mostly for
|
||||
// debugging. Don't rely on this value being consistent with other num_*
|
||||
// values.
|
||||
pool_num_in_processing :: #force_inline proc(pool: ^Pool) -> int {
|
||||
return intrinsics.atomic_load(&pool.num_in_processing)
|
||||
}
|
||||
|
||||
// Outstanding tasks are all tasks that are not done, that is, tasks that are
|
||||
// waiting, as well as tasks that are currently being processed. Only
|
||||
// informational, mostly for debugging. Don't rely on this value being
|
||||
// consistent with other num_* values.
|
||||
pool_num_outstanding :: #force_inline proc(pool: ^Pool) -> int {
|
||||
return intrinsics.atomic_load(&pool.num_outstanding)
|
||||
}
|
||||
|
||||
// Number of tasks which are done processing. Only informational, mostly for
|
||||
// debugging. Don't rely on this value being consistent with other num_*
|
||||
// values.
|
||||
pool_num_done :: #force_inline proc(pool: ^Pool) -> int {
|
||||
return intrinsics.atomic_load(&pool.num_done)
|
||||
}
|
||||
|
||||
// If tasks are only being added from one thread, and this procedure is being
|
||||
// called from that same thread, it will reliably tell if the thread pool is
|
||||
// empty or not. Empty in this case means there are no tasks waiting, being
|
||||
// processed, or _done_.
|
||||
pool_is_empty :: #force_inline proc(pool: ^Pool) -> bool {
|
||||
return pool_num_outstanding(pool) == 0 && pool_num_done(pool) == 0
|
||||
}
|
||||
|
||||
// Mostly for internal use.
|
||||
pool_pop_waiting :: proc(pool: ^Pool) -> (task: Task, got_task: bool) {
|
||||
sync.guard(&pool.mutex)
|
||||
|
||||
if len(pool.tasks) != 0 {
|
||||
intrinsics.atomic_sub(&pool.num_waiting, 1)
|
||||
intrinsics.atomic_add(&pool.num_in_processing, 1)
|
||||
task = pop_front(&pool.tasks)
|
||||
got_task = true
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Use this to take out finished tasks.
|
||||
pool_pop_done :: proc(pool: ^Pool) -> (task: Task, got_task: bool) {
|
||||
sync.guard(&pool.mutex)
|
||||
|
||||
pool_do_work :: proc(pool: ^Pool, task: ^Task) {
|
||||
task.procedure(task)
|
||||
intrinsics.atomic_sub(&pool.processing_task_count, 1)
|
||||
}
|
||||
|
||||
|
||||
pool_wait_and_process :: proc(pool: ^Pool) {
|
||||
for len(pool.tasks) != 0 || intrinsics.atomic_load(&pool.processing_task_count) != 0 {
|
||||
if task, ok := pool_try_and_pop_task(pool); ok {
|
||||
pool_do_work(pool, &task)
|
||||
}
|
||||
|
||||
// Safety kick
|
||||
if len(pool.tasks) != 0 && intrinsics.atomic_load(&pool.processing_task_count) == 0 {
|
||||
sync.mutex_lock(&pool.mutex)
|
||||
sync.semaphore_post(&pool.sem_available, len(pool.tasks))
|
||||
sync.mutex_unlock(&pool.mutex)
|
||||
}
|
||||
|
||||
yield()
|
||||
if len(pool.tasks_done) != 0 {
|
||||
task = pop_front(&pool.tasks_done)
|
||||
got_task = true
|
||||
intrinsics.atomic_sub(&pool.num_done, 1)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Mostly for internal use.
|
||||
pool_do_work :: proc(pool: ^Pool, task: Task) {
|
||||
{
|
||||
context.allocator = task.allocator
|
||||
task.procedure(task)
|
||||
}
|
||||
|
||||
sync.guard(&pool.mutex)
|
||||
|
||||
append(&pool.tasks_done, task)
|
||||
intrinsics.atomic_add(&pool.num_done, 1)
|
||||
intrinsics.atomic_sub(&pool.num_outstanding, 1)
|
||||
intrinsics.atomic_sub(&pool.num_in_processing, 1)
|
||||
}
|
||||
|
||||
// Process the rest of the tasks, also use this thread for processing, then join
|
||||
// all the pool threads.
|
||||
pool_finish :: proc(pool: ^Pool) {
|
||||
for task in pool_pop_waiting(pool) {
|
||||
pool_do_work(pool, task)
|
||||
}
|
||||
pool_join(pool)
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ package thread
|
||||
|
||||
import "core:runtime"
|
||||
import "core:intrinsics"
|
||||
import sync "core:sync/sync2"
|
||||
import "core:sync"
|
||||
import "core:sys/unix"
|
||||
|
||||
Thread_State :: enum u8 {
|
||||
|
||||
@@ -3,13 +3,21 @@
|
||||
package thread
|
||||
|
||||
import "core:runtime"
|
||||
import sync "core:sync/sync2"
|
||||
import "core:intrinsics"
|
||||
import "core:sync"
|
||||
import win32 "core:sys/windows"
|
||||
|
||||
Thread_State :: enum u8 {
|
||||
Started,
|
||||
Joined,
|
||||
Done,
|
||||
}
|
||||
|
||||
Thread_Os_Specific :: struct {
|
||||
win32_thread: win32.HANDLE,
|
||||
win32_thread_id: win32.DWORD,
|
||||
done: bool, // see note in `is_done`
|
||||
mutex: sync.Mutex,
|
||||
flags: bit_set[Thread_State; u8],
|
||||
}
|
||||
|
||||
_thread_priority_map := [Thread_Priority]i32{
|
||||
@@ -26,15 +34,16 @@ _create :: proc(procedure: Thread_Proc, priority := Thread_Priority.Normal) -> ^
|
||||
context = t.init_context.? or_else runtime.default_context()
|
||||
|
||||
t.id = sync.current_thread_id()
|
||||
|
||||
t.procedure(t)
|
||||
|
||||
intrinsics.atomic_store(&t.flags, t.flags + {.Done})
|
||||
|
||||
if t.init_context == nil {
|
||||
if context.temp_allocator.data == &runtime.global_default_temp_allocator_data {
|
||||
runtime.default_temp_allocator_destroy(auto_cast context.temp_allocator.data)
|
||||
}
|
||||
}
|
||||
|
||||
sync.atomic_store(&t.done, true)
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -61,23 +70,31 @@ _create :: proc(procedure: Thread_Proc, priority := Thread_Priority.Normal) -> ^
|
||||
return thread
|
||||
}
|
||||
|
||||
_start :: proc(thread: ^Thread) {
|
||||
win32.ResumeThread(thread.win32_thread)
|
||||
_start :: proc(t: ^Thread) {
|
||||
sync.guard(&t.mutex)
|
||||
t.flags += {.Started}
|
||||
win32.ResumeThread(t.win32_thread)
|
||||
}
|
||||
|
||||
_is_done :: proc(using thread: ^Thread) -> bool {
|
||||
_is_done :: proc(t: ^Thread) -> bool {
|
||||
// NOTE(tetra, 2019-10-31): Apparently using wait_for_single_object and
|
||||
// checking if it didn't time out immediately, is not good enough,
|
||||
// so we do it this way instead.
|
||||
return sync.atomic_load(&done)
|
||||
return .Done in sync.atomic_load(&t.flags)
|
||||
}
|
||||
|
||||
_join :: proc(using thread: ^Thread) {
|
||||
if win32_thread != win32.INVALID_HANDLE {
|
||||
win32.WaitForSingleObject(win32_thread, win32.INFINITE)
|
||||
win32.CloseHandle(win32_thread)
|
||||
win32_thread = win32.INVALID_HANDLE
|
||||
_join :: proc(t: ^Thread) {
|
||||
sync.guard(&t.mutex)
|
||||
|
||||
if .Joined in t.flags || t.win32_thread == win32.INVALID_HANDLE {
|
||||
return
|
||||
}
|
||||
|
||||
win32.WaitForSingleObject(t.win32_thread, win32.INFINITE)
|
||||
win32.CloseHandle(t.win32_thread)
|
||||
t.win32_thread = win32.INVALID_HANDLE
|
||||
|
||||
t.flags += {.Joined}
|
||||
}
|
||||
|
||||
_join_multiple :: proc(threads: ..^Thread) {
|
||||
|
||||
@@ -96,7 +96,6 @@ import sort "core:sort"
|
||||
import strconv "core:strconv"
|
||||
import strings "core:strings"
|
||||
import sync "core:sync"
|
||||
import sync2 "core:sync/sync2"
|
||||
import testing "core:testing"
|
||||
import scanner "core:text/scanner"
|
||||
import thread "core:thread"
|
||||
@@ -187,7 +186,6 @@ _ :: sort
|
||||
_ :: strconv
|
||||
_ :: strings
|
||||
_ :: sync
|
||||
_ :: sync2
|
||||
_ :: testing
|
||||
_ :: scanner
|
||||
_ :: thread
|
||||
|
||||
@@ -1147,7 +1147,7 @@ threading_example :: proc() {
|
||||
|
||||
{ // Thread Pool
|
||||
fmt.println("\n## Thread Pool")
|
||||
task_proc :: proc(t: ^thread.Task) {
|
||||
task_proc :: proc(t: thread.Task) {
|
||||
index := t.user_index % len(prefix_table)
|
||||
for iteration in 1..=5 {
|
||||
fmt.printf("Worker Task %d is on iteration %d\n", t.user_index, iteration)
|
||||
@@ -1157,7 +1157,7 @@ threading_example :: proc() {
|
||||
}
|
||||
|
||||
pool: thread.Pool
|
||||
thread.pool_init(pool=&pool, thread_count=3)
|
||||
thread.pool_init(pool=&pool, thread_count=3, allocator=context.allocator)
|
||||
defer thread.pool_destroy(&pool)
|
||||
|
||||
|
||||
@@ -1166,7 +1166,7 @@ threading_example :: proc() {
|
||||
}
|
||||
|
||||
thread.pool_start(&pool)
|
||||
thread.pool_wait_and_process(&pool)
|
||||
thread.pool_finish(&pool)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -379,6 +379,35 @@ bool check_builtin_objc_procedure(CheckerContext *c, Operand *operand, Ast *call
|
||||
}
|
||||
}
|
||||
|
||||
bool check_atomic_memory_order_argument(CheckerContext *c, Ast *expr, String const &builtin_name, OdinAtomicMemoryOrder *memory_order_, char const *extra_message = nullptr) {
|
||||
Operand x = {};
|
||||
check_expr_with_type_hint(c, &x, expr, t_atomic_memory_order);
|
||||
if (x.mode == Addressing_Invalid) {
|
||||
return false;
|
||||
}
|
||||
if (!are_types_identical(x.type, t_atomic_memory_order) || x.mode != Addressing_Constant) {
|
||||
gbString str = type_to_string(x.type);
|
||||
if (extra_message) {
|
||||
error(x.expr, "Expected a constant Atomic_Memory_Order value for the %s of '%.*s', got %s", extra_message, LIT(builtin_name), str);
|
||||
} else {
|
||||
error(x.expr, "Expected a constant Atomic_Memory_Order value for '%.*s', got %s", LIT(builtin_name), str);
|
||||
}
|
||||
gb_string_free(str);
|
||||
return false;
|
||||
}
|
||||
i64 value = exact_value_to_i64(x.value);
|
||||
if (value < 0 || value >= OdinAtomicMemoryOrder_COUNT) {
|
||||
error(x.expr, "Illegal Atomic_Memory_Order value, got %lld", cast(long long)value);
|
||||
return false;
|
||||
}
|
||||
if (memory_order_) {
|
||||
*memory_order_ = cast(OdinAtomicMemoryOrder)value;
|
||||
}
|
||||
|
||||
return true;
|
||||
|
||||
}
|
||||
|
||||
bool check_builtin_procedure(CheckerContext *c, Operand *operand, Ast *call, i32 id, Type *type_hint) {
|
||||
ast_node(ce, CallExpr, call);
|
||||
if (ce->inlining != ProcInlining_none) {
|
||||
@@ -423,6 +452,11 @@ bool check_builtin_procedure(CheckerContext *c, Operand *operand, Ast *call, i32
|
||||
// NOTE(bill): The first arg may be a Type, this will be checked case by case
|
||||
break;
|
||||
|
||||
case BuiltinProc_atomic_thread_fence:
|
||||
case BuiltinProc_atomic_signal_fence:
|
||||
// NOTE(bill): first type will require a type hint
|
||||
break;
|
||||
|
||||
case BuiltinProc_DIRECTIVE: {
|
||||
ast_node(bd, BasicDirective, ce->proc);
|
||||
String name = bd->name.string;
|
||||
@@ -3198,11 +3232,27 @@ bool check_builtin_procedure(CheckerContext *c, Operand *operand, Ast *call, i32
|
||||
break;
|
||||
|
||||
|
||||
case BuiltinProc_atomic_fence:
|
||||
case BuiltinProc_atomic_fence_acq:
|
||||
case BuiltinProc_atomic_fence_rel:
|
||||
case BuiltinProc_atomic_fence_acqrel:
|
||||
operand->mode = Addressing_NoValue;
|
||||
|
||||
case BuiltinProc_atomic_thread_fence:
|
||||
case BuiltinProc_atomic_signal_fence:
|
||||
{
|
||||
OdinAtomicMemoryOrder memory_order = {};
|
||||
if (!check_atomic_memory_order_argument(c, ce->args[0], builtin_name, &memory_order)) {
|
||||
return false;
|
||||
}
|
||||
switch (memory_order) {
|
||||
case OdinAtomicMemoryOrder_acquire:
|
||||
case OdinAtomicMemoryOrder_release:
|
||||
case OdinAtomicMemoryOrder_acq_rel:
|
||||
case OdinAtomicMemoryOrder_seq_cst:
|
||||
break;
|
||||
default:
|
||||
error(ce->args[0], "Illegal memory ordering for '%.*s', got .%s", LIT(builtin_name), OdinAtomicMemoryOrder_strings[memory_order]);
|
||||
break;
|
||||
}
|
||||
|
||||
operand->mode = Addressing_NoValue;
|
||||
}
|
||||
break;
|
||||
|
||||
case BuiltinProc_volatile_store:
|
||||
@@ -3210,9 +3260,6 @@ bool check_builtin_procedure(CheckerContext *c, Operand *operand, Ast *call, i32
|
||||
case BuiltinProc_unaligned_store:
|
||||
/*fallthrough*/
|
||||
case BuiltinProc_atomic_store:
|
||||
case BuiltinProc_atomic_store_rel:
|
||||
case BuiltinProc_atomic_store_relaxed:
|
||||
case BuiltinProc_atomic_store_unordered:
|
||||
{
|
||||
Type *elem = nullptr;
|
||||
if (!is_type_normal_pointer(operand->type, &elem)) {
|
||||
@@ -3228,14 +3275,40 @@ bool check_builtin_procedure(CheckerContext *c, Operand *operand, Ast *call, i32
|
||||
break;
|
||||
}
|
||||
|
||||
case BuiltinProc_atomic_store_explicit:
|
||||
{
|
||||
Type *elem = nullptr;
|
||||
if (!is_type_normal_pointer(operand->type, &elem)) {
|
||||
error(operand->expr, "Expected a pointer for '%.*s'", LIT(builtin_name));
|
||||
return false;
|
||||
}
|
||||
Operand x = {};
|
||||
check_expr_with_type_hint(c, &x, ce->args[1], elem);
|
||||
check_assignment(c, &x, elem, builtin_name);
|
||||
|
||||
OdinAtomicMemoryOrder memory_order = {};
|
||||
if (!check_atomic_memory_order_argument(c, ce->args[2], builtin_name, &memory_order)) {
|
||||
return false;
|
||||
}
|
||||
switch (memory_order) {
|
||||
case OdinAtomicMemoryOrder_consume:
|
||||
case OdinAtomicMemoryOrder_acquire:
|
||||
case OdinAtomicMemoryOrder_acq_rel:
|
||||
error(ce->args[2], "Illegal memory order .%s for '%.*s'", OdinAtomicMemoryOrder_strings[memory_order], LIT(builtin_name));
|
||||
break;
|
||||
}
|
||||
|
||||
operand->type = nullptr;
|
||||
operand->mode = Addressing_NoValue;
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
case BuiltinProc_volatile_load:
|
||||
/*fallthrough*/
|
||||
case BuiltinProc_unaligned_load:
|
||||
/*fallthrough*/
|
||||
case BuiltinProc_atomic_load:
|
||||
case BuiltinProc_atomic_load_acq:
|
||||
case BuiltinProc_atomic_load_relaxed:
|
||||
case BuiltinProc_atomic_load_unordered:
|
||||
{
|
||||
Type *elem = nullptr;
|
||||
if (!is_type_normal_pointer(operand->type, &elem)) {
|
||||
@@ -3247,41 +3320,38 @@ bool check_builtin_procedure(CheckerContext *c, Operand *operand, Ast *call, i32
|
||||
break;
|
||||
}
|
||||
|
||||
case BuiltinProc_atomic_load_explicit:
|
||||
{
|
||||
Type *elem = nullptr;
|
||||
if (!is_type_normal_pointer(operand->type, &elem)) {
|
||||
error(operand->expr, "Expected a pointer for '%.*s'", LIT(builtin_name));
|
||||
return false;
|
||||
}
|
||||
|
||||
OdinAtomicMemoryOrder memory_order = {};
|
||||
if (!check_atomic_memory_order_argument(c, ce->args[1], builtin_name, &memory_order)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
switch (memory_order) {
|
||||
case OdinAtomicMemoryOrder_release:
|
||||
case OdinAtomicMemoryOrder_acq_rel:
|
||||
error(ce->args[1], "Illegal memory order .%s for '%.*s'", OdinAtomicMemoryOrder_strings[memory_order], LIT(builtin_name));
|
||||
break;
|
||||
}
|
||||
|
||||
operand->type = elem;
|
||||
operand->mode = Addressing_Value;
|
||||
break;
|
||||
}
|
||||
|
||||
case BuiltinProc_atomic_add:
|
||||
case BuiltinProc_atomic_add_acq:
|
||||
case BuiltinProc_atomic_add_rel:
|
||||
case BuiltinProc_atomic_add_acqrel:
|
||||
case BuiltinProc_atomic_add_relaxed:
|
||||
case BuiltinProc_atomic_sub:
|
||||
case BuiltinProc_atomic_sub_acq:
|
||||
case BuiltinProc_atomic_sub_rel:
|
||||
case BuiltinProc_atomic_sub_acqrel:
|
||||
case BuiltinProc_atomic_sub_relaxed:
|
||||
case BuiltinProc_atomic_and:
|
||||
case BuiltinProc_atomic_and_acq:
|
||||
case BuiltinProc_atomic_and_rel:
|
||||
case BuiltinProc_atomic_and_acqrel:
|
||||
case BuiltinProc_atomic_and_relaxed:
|
||||
case BuiltinProc_atomic_nand:
|
||||
case BuiltinProc_atomic_nand_acq:
|
||||
case BuiltinProc_atomic_nand_rel:
|
||||
case BuiltinProc_atomic_nand_acqrel:
|
||||
case BuiltinProc_atomic_nand_relaxed:
|
||||
case BuiltinProc_atomic_or:
|
||||
case BuiltinProc_atomic_or_acq:
|
||||
case BuiltinProc_atomic_or_rel:
|
||||
case BuiltinProc_atomic_or_acqrel:
|
||||
case BuiltinProc_atomic_or_relaxed:
|
||||
case BuiltinProc_atomic_xor:
|
||||
case BuiltinProc_atomic_xor_acq:
|
||||
case BuiltinProc_atomic_xor_rel:
|
||||
case BuiltinProc_atomic_xor_acqrel:
|
||||
case BuiltinProc_atomic_xor_relaxed:
|
||||
case BuiltinProc_atomic_xchg:
|
||||
case BuiltinProc_atomic_xchg_acq:
|
||||
case BuiltinProc_atomic_xchg_rel:
|
||||
case BuiltinProc_atomic_xchg_acqrel:
|
||||
case BuiltinProc_atomic_xchg_relaxed:
|
||||
case BuiltinProc_atomic_exchange:
|
||||
{
|
||||
Type *elem = nullptr;
|
||||
if (!is_type_normal_pointer(operand->type, &elem)) {
|
||||
@@ -3297,25 +3367,35 @@ bool check_builtin_procedure(CheckerContext *c, Operand *operand, Ast *call, i32
|
||||
break;
|
||||
}
|
||||
|
||||
case BuiltinProc_atomic_cxchg:
|
||||
case BuiltinProc_atomic_cxchg_acq:
|
||||
case BuiltinProc_atomic_cxchg_rel:
|
||||
case BuiltinProc_atomic_cxchg_acqrel:
|
||||
case BuiltinProc_atomic_cxchg_relaxed:
|
||||
case BuiltinProc_atomic_cxchg_failrelaxed:
|
||||
case BuiltinProc_atomic_cxchg_failacq:
|
||||
case BuiltinProc_atomic_cxchg_acq_failrelaxed:
|
||||
case BuiltinProc_atomic_cxchg_acqrel_failrelaxed:
|
||||
case BuiltinProc_atomic_add_explicit:
|
||||
case BuiltinProc_atomic_sub_explicit:
|
||||
case BuiltinProc_atomic_and_explicit:
|
||||
case BuiltinProc_atomic_nand_explicit:
|
||||
case BuiltinProc_atomic_or_explicit:
|
||||
case BuiltinProc_atomic_xor_explicit:
|
||||
case BuiltinProc_atomic_exchange_explicit:
|
||||
{
|
||||
Type *elem = nullptr;
|
||||
if (!is_type_normal_pointer(operand->type, &elem)) {
|
||||
error(operand->expr, "Expected a pointer for '%.*s'", LIT(builtin_name));
|
||||
return false;
|
||||
}
|
||||
Operand x = {};
|
||||
check_expr_with_type_hint(c, &x, ce->args[1], elem);
|
||||
check_assignment(c, &x, elem, builtin_name);
|
||||
|
||||
case BuiltinProc_atomic_cxchgweak:
|
||||
case BuiltinProc_atomic_cxchgweak_acq:
|
||||
case BuiltinProc_atomic_cxchgweak_rel:
|
||||
case BuiltinProc_atomic_cxchgweak_acqrel:
|
||||
case BuiltinProc_atomic_cxchgweak_relaxed:
|
||||
case BuiltinProc_atomic_cxchgweak_failrelaxed:
|
||||
case BuiltinProc_atomic_cxchgweak_failacq:
|
||||
case BuiltinProc_atomic_cxchgweak_acq_failrelaxed:
|
||||
case BuiltinProc_atomic_cxchgweak_acqrel_failrelaxed:
|
||||
|
||||
if (!check_atomic_memory_order_argument(c, ce->args[2], builtin_name, nullptr)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
operand->type = elem;
|
||||
operand->mode = Addressing_Value;
|
||||
break;
|
||||
}
|
||||
|
||||
case BuiltinProc_atomic_compare_exchange_strong:
|
||||
case BuiltinProc_atomic_compare_exchange_weak:
|
||||
{
|
||||
Type *elem = nullptr;
|
||||
if (!is_type_normal_pointer(operand->type, &elem)) {
|
||||
@@ -3333,7 +3413,92 @@ bool check_builtin_procedure(CheckerContext *c, Operand *operand, Ast *call, i32
|
||||
operand->type = elem;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
||||
case BuiltinProc_atomic_compare_exchange_strong_explicit:
|
||||
case BuiltinProc_atomic_compare_exchange_weak_explicit:
|
||||
{
|
||||
Type *elem = nullptr;
|
||||
if (!is_type_normal_pointer(operand->type, &elem)) {
|
||||
error(operand->expr, "Expected a pointer for '%.*s'", LIT(builtin_name));
|
||||
return false;
|
||||
}
|
||||
Operand x = {};
|
||||
Operand y = {};
|
||||
check_expr_with_type_hint(c, &x, ce->args[1], elem);
|
||||
check_expr_with_type_hint(c, &y, ce->args[2], elem);
|
||||
check_assignment(c, &x, elem, builtin_name);
|
||||
check_assignment(c, &y, elem, builtin_name);
|
||||
|
||||
OdinAtomicMemoryOrder success_memory_order = {};
|
||||
OdinAtomicMemoryOrder failure_memory_order = {};
|
||||
if (!check_atomic_memory_order_argument(c, ce->args[3], builtin_name, &success_memory_order, "success ordering")) {
|
||||
return false;
|
||||
}
|
||||
if (!check_atomic_memory_order_argument(c, ce->args[4], builtin_name, &failure_memory_order, "failure ordering")) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool invalid_combination = false;
|
||||
|
||||
switch (success_memory_order) {
|
||||
case OdinAtomicMemoryOrder_relaxed:
|
||||
case OdinAtomicMemoryOrder_release:
|
||||
if (failure_memory_order != OdinAtomicMemoryOrder_relaxed) {
|
||||
invalid_combination = true;
|
||||
}
|
||||
break;
|
||||
case OdinAtomicMemoryOrder_consume:
|
||||
switch (failure_memory_order) {
|
||||
case OdinAtomicMemoryOrder_relaxed:
|
||||
case OdinAtomicMemoryOrder_consume:
|
||||
break;
|
||||
default:
|
||||
invalid_combination = true;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case OdinAtomicMemoryOrder_acquire:
|
||||
case OdinAtomicMemoryOrder_acq_rel:
|
||||
switch (failure_memory_order) {
|
||||
case OdinAtomicMemoryOrder_relaxed:
|
||||
case OdinAtomicMemoryOrder_consume:
|
||||
case OdinAtomicMemoryOrder_acquire:
|
||||
break;
|
||||
default:
|
||||
invalid_combination = true;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case OdinAtomicMemoryOrder_seq_cst:
|
||||
switch (failure_memory_order) {
|
||||
case OdinAtomicMemoryOrder_relaxed:
|
||||
case OdinAtomicMemoryOrder_consume:
|
||||
case OdinAtomicMemoryOrder_acquire:
|
||||
case OdinAtomicMemoryOrder_seq_cst:
|
||||
break;
|
||||
default:
|
||||
invalid_combination = true;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
invalid_combination = true;
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
if (invalid_combination) {
|
||||
error(ce->args[3], "Illegal memory order pairing for '%.*s', success = .%s, failure = .%s",
|
||||
LIT(builtin_name),
|
||||
OdinAtomicMemoryOrder_strings[success_memory_order],
|
||||
OdinAtomicMemoryOrder_strings[failure_memory_order]
|
||||
);
|
||||
}
|
||||
|
||||
operand->mode = Addressing_OptionalOk;
|
||||
operand->type = elem;
|
||||
break;
|
||||
}
|
||||
|
||||
case BuiltinProc_fixed_point_mul:
|
||||
case BuiltinProc_fixed_point_div:
|
||||
|
||||
@@ -829,15 +829,16 @@ struct GlobalEnumValue {
|
||||
i64 value;
|
||||
};
|
||||
|
||||
Slice<Entity *> add_global_enum_type(String const &type_name, GlobalEnumValue *values, isize value_count) {
|
||||
Slice<Entity *> add_global_enum_type(String const &type_name, GlobalEnumValue *values, isize value_count, Type **enum_type_ = nullptr) {
|
||||
Scope *scope = create_scope(nullptr, builtin_pkg->scope);
|
||||
Entity *e = alloc_entity_type_name(scope, make_token_ident(type_name), nullptr, EntityState_Resolved);
|
||||
Entity *entity = alloc_entity_type_name(scope, make_token_ident(type_name), nullptr, EntityState_Resolved);
|
||||
|
||||
Type *enum_type = alloc_type_enum();
|
||||
Type *named_type = alloc_type_named(type_name, enum_type, e);
|
||||
Type *named_type = alloc_type_named(type_name, enum_type, entity);
|
||||
set_base_type(named_type, enum_type);
|
||||
enum_type->Enum.base_type = t_int;
|
||||
enum_type->Enum.scope = scope;
|
||||
entity->type = named_type;
|
||||
|
||||
auto fields = array_make<Entity *>(permanent_allocator(), value_count);
|
||||
for (isize i = 0; i < value_count; i++) {
|
||||
@@ -858,6 +859,9 @@ Slice<Entity *> add_global_enum_type(String const &type_name, GlobalEnumValue *v
|
||||
enum_type->Enum.min_value = &enum_type->Enum.fields[enum_type->Enum.min_value_index]->Constant.value;
|
||||
enum_type->Enum.max_value = &enum_type->Enum.fields[enum_type->Enum.max_value_index]->Constant.value;
|
||||
|
||||
|
||||
if (enum_type_) *enum_type_ = named_type;
|
||||
|
||||
return slice_from_array(fields);
|
||||
}
|
||||
void add_global_enum_constant(Slice<Entity *> const &fields, char const *name, i64 value) {
|
||||
@@ -986,6 +990,21 @@ void init_universal(void) {
|
||||
add_global_enum_constant(fields, "ODIN_ERROR_POS_STYLE", build_context.ODIN_ERROR_POS_STYLE);
|
||||
}
|
||||
|
||||
{
|
||||
GlobalEnumValue values[OdinAtomicMemoryOrder_COUNT] = {
|
||||
{OdinAtomicMemoryOrder_strings[OdinAtomicMemoryOrder_relaxed], OdinAtomicMemoryOrder_relaxed},
|
||||
{OdinAtomicMemoryOrder_strings[OdinAtomicMemoryOrder_consume], OdinAtomicMemoryOrder_consume},
|
||||
{OdinAtomicMemoryOrder_strings[OdinAtomicMemoryOrder_acquire], OdinAtomicMemoryOrder_acquire},
|
||||
{OdinAtomicMemoryOrder_strings[OdinAtomicMemoryOrder_release], OdinAtomicMemoryOrder_release},
|
||||
{OdinAtomicMemoryOrder_strings[OdinAtomicMemoryOrder_acq_rel], OdinAtomicMemoryOrder_acq_rel},
|
||||
{OdinAtomicMemoryOrder_strings[OdinAtomicMemoryOrder_seq_cst], OdinAtomicMemoryOrder_seq_cst},
|
||||
};
|
||||
|
||||
add_global_enum_type(str_lit("Atomic_Memory_Order"), values, gb_count_of(values), &t_atomic_memory_order);
|
||||
GB_ASSERT(t_atomic_memory_order->kind == Type_Named);
|
||||
scope_insert(intrinsics_pkg->scope, t_atomic_memory_order->Named.type_name);
|
||||
}
|
||||
|
||||
|
||||
add_global_bool_constant("ODIN_DEBUG", bc->ODIN_DEBUG);
|
||||
add_global_bool_constant("ODIN_DISABLE_ASSERT", bc->ODIN_DISABLE_ASSERT);
|
||||
|
||||
@@ -86,77 +86,30 @@ enum BuiltinProcId {
|
||||
BuiltinProc_prefetch_write_instruction,
|
||||
BuiltinProc_prefetch_write_data,
|
||||
|
||||
BuiltinProc_atomic_fence,
|
||||
BuiltinProc_atomic_fence_acq,
|
||||
BuiltinProc_atomic_fence_rel,
|
||||
BuiltinProc_atomic_fence_acqrel,
|
||||
|
||||
BuiltinProc_atomic_thread_fence,
|
||||
BuiltinProc_atomic_signal_fence,
|
||||
BuiltinProc_atomic_store,
|
||||
BuiltinProc_atomic_store_rel,
|
||||
BuiltinProc_atomic_store_relaxed,
|
||||
BuiltinProc_atomic_store_unordered,
|
||||
|
||||
BuiltinProc_atomic_store_explicit,
|
||||
BuiltinProc_atomic_load,
|
||||
BuiltinProc_atomic_load_acq,
|
||||
BuiltinProc_atomic_load_relaxed,
|
||||
BuiltinProc_atomic_load_unordered,
|
||||
|
||||
BuiltinProc_atomic_load_explicit,
|
||||
BuiltinProc_atomic_add,
|
||||
BuiltinProc_atomic_add_acq,
|
||||
BuiltinProc_atomic_add_rel,
|
||||
BuiltinProc_atomic_add_acqrel,
|
||||
BuiltinProc_atomic_add_relaxed,
|
||||
BuiltinProc_atomic_add_explicit,
|
||||
BuiltinProc_atomic_sub,
|
||||
BuiltinProc_atomic_sub_acq,
|
||||
BuiltinProc_atomic_sub_rel,
|
||||
BuiltinProc_atomic_sub_acqrel,
|
||||
BuiltinProc_atomic_sub_relaxed,
|
||||
BuiltinProc_atomic_sub_explicit,
|
||||
BuiltinProc_atomic_and,
|
||||
BuiltinProc_atomic_and_acq,
|
||||
BuiltinProc_atomic_and_rel,
|
||||
BuiltinProc_atomic_and_acqrel,
|
||||
BuiltinProc_atomic_and_relaxed,
|
||||
BuiltinProc_atomic_and_explicit,
|
||||
BuiltinProc_atomic_nand,
|
||||
BuiltinProc_atomic_nand_acq,
|
||||
BuiltinProc_atomic_nand_rel,
|
||||
BuiltinProc_atomic_nand_acqrel,
|
||||
BuiltinProc_atomic_nand_relaxed,
|
||||
BuiltinProc_atomic_nand_explicit,
|
||||
BuiltinProc_atomic_or,
|
||||
BuiltinProc_atomic_or_acq,
|
||||
BuiltinProc_atomic_or_rel,
|
||||
BuiltinProc_atomic_or_acqrel,
|
||||
BuiltinProc_atomic_or_relaxed,
|
||||
BuiltinProc_atomic_or_explicit,
|
||||
BuiltinProc_atomic_xor,
|
||||
BuiltinProc_atomic_xor_acq,
|
||||
BuiltinProc_atomic_xor_rel,
|
||||
BuiltinProc_atomic_xor_acqrel,
|
||||
BuiltinProc_atomic_xor_relaxed,
|
||||
|
||||
BuiltinProc_atomic_xchg,
|
||||
BuiltinProc_atomic_xchg_acq,
|
||||
BuiltinProc_atomic_xchg_rel,
|
||||
BuiltinProc_atomic_xchg_acqrel,
|
||||
BuiltinProc_atomic_xchg_relaxed,
|
||||
|
||||
BuiltinProc_atomic_cxchg,
|
||||
BuiltinProc_atomic_cxchg_acq,
|
||||
BuiltinProc_atomic_cxchg_rel,
|
||||
BuiltinProc_atomic_cxchg_acqrel,
|
||||
BuiltinProc_atomic_cxchg_relaxed,
|
||||
BuiltinProc_atomic_cxchg_failrelaxed,
|
||||
BuiltinProc_atomic_cxchg_failacq,
|
||||
BuiltinProc_atomic_cxchg_acq_failrelaxed,
|
||||
BuiltinProc_atomic_cxchg_acqrel_failrelaxed,
|
||||
|
||||
BuiltinProc_atomic_cxchgweak,
|
||||
BuiltinProc_atomic_cxchgweak_acq,
|
||||
BuiltinProc_atomic_cxchgweak_rel,
|
||||
BuiltinProc_atomic_cxchgweak_acqrel,
|
||||
BuiltinProc_atomic_cxchgweak_relaxed,
|
||||
BuiltinProc_atomic_cxchgweak_failrelaxed,
|
||||
BuiltinProc_atomic_cxchgweak_failacq,
|
||||
BuiltinProc_atomic_cxchgweak_acq_failrelaxed,
|
||||
BuiltinProc_atomic_cxchgweak_acqrel_failrelaxed,
|
||||
BuiltinProc_atomic_xor_explicit,
|
||||
BuiltinProc_atomic_exchange,
|
||||
BuiltinProc_atomic_exchange_explicit,
|
||||
BuiltinProc_atomic_compare_exchange_strong,
|
||||
BuiltinProc_atomic_compare_exchange_strong_explicit,
|
||||
BuiltinProc_atomic_compare_exchange_weak,
|
||||
BuiltinProc_atomic_compare_exchange_weak_explicit,
|
||||
|
||||
BuiltinProc_fixed_point_mul,
|
||||
BuiltinProc_fixed_point_div,
|
||||
@@ -352,78 +305,30 @@ gb_global BuiltinProc builtin_procs[BuiltinProc_COUNT] = {
|
||||
{STR_LIT("prefetch_write_instruction"), 2, false, Expr_Stmt, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("prefetch_write_data"), 2, false, Expr_Stmt, BuiltinProcPkg_intrinsics},
|
||||
|
||||
{STR_LIT("atomic_fence"), 0, false, Expr_Stmt, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_fence_acq"), 0, false, Expr_Stmt, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_fence_rel"), 0, false, Expr_Stmt, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_fence_acqrel"), 0, false, Expr_Stmt, BuiltinProcPkg_intrinsics},
|
||||
|
||||
{STR_LIT("atomic_store"), 2, false, Expr_Stmt, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_store_rel"), 2, false, Expr_Stmt, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_store_relaxed"), 2, false, Expr_Stmt, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_store_unordered"), 2, false, Expr_Stmt, BuiltinProcPkg_intrinsics},
|
||||
|
||||
{STR_LIT("atomic_load"), 1, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_load_acq"), 1, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_load_relaxed"), 1, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_load_unordered"), 1, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
|
||||
{STR_LIT("atomic_add"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_add_acq"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_add_rel"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_add_acqrel"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_add_relaxed"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_sub"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_sub_acq"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_sub_rel"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_sub_acqrel"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_sub_relaxed"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_and"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_and_acq"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_and_rel"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_and_acqrel"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_and_relaxed"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_nand"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_nand_acq"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_nand_rel"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_nand_acqrel"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_nand_relaxed"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_or"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_or_acq"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_or_rel"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_or_acqrel"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_or_relaxed"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_xor"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_xor_acq"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_xor_rel"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_xor_acqrel"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_xor_relaxed"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
|
||||
{STR_LIT("atomic_xchg"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_xchg_acq"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_xchg_rel"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_xchg_acqrel"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_xchg_relaxed"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
|
||||
{STR_LIT("atomic_cxchg"), 3, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_cxchg_acq"), 3, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_cxchg_rel"), 3, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_cxchg_acqrel"), 3, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_cxchg_relaxed"), 3, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_cxchg_failrelaxed"), 3, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_cxchg_failacq"), 3, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_cxchg_acq_failrelaxed"), 3, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_cxchg_acqrel_failrelaxed"), 3, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
|
||||
{STR_LIT("atomic_cxchgweak"), 3, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_cxchgweak_acq"), 3, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_cxchgweak_rel"), 3, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_cxchgweak_acqrel"), 3, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_cxchgweak_relaxed"), 3, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_cxchgweak_failrelaxed"), 3, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_cxchgweak_failacq"), 3, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_cxchgweak_acq_failrelaxed"), 3, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_cxchgweak_acqrel_failrelaxed"), 3, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
|
||||
{STR_LIT("atomic_thread_fence"), 1, false, Expr_Stmt, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_signal_fence"), 1, false, Expr_Stmt, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_store"), 2, false, Expr_Stmt, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_store_explicit"), 3, false, Expr_Stmt, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_load"), 1, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_load_explicit"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_add"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_add_explicit"), 3, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_sub"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_sub_explicit"), 3, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_and"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_and_explicit"), 3, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_nand"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_nand_explicit"), 3, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_or"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_or_explicit"), 3, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_xor"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_xor_explicit"), 3, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_exchange"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_exchange_explicit"), 3, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_compare_exchange_strong"), 3, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_compare_exchange_strong_explicit"), 5, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_compare_exchange_weak"), 3, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("atomic_compare_exchange_weak_explicit"), 5, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
|
||||
{STR_LIT("fixed_point_mul"), 3, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
{STR_LIT("fixed_point_div"), 3, false, Expr_Expr, BuiltinProcPkg_intrinsics},
|
||||
|
||||
@@ -1606,36 +1606,26 @@ lbValue lb_build_builtin_proc(lbProcedure *p, Ast *expr, TypeAndValue const &tv,
|
||||
}
|
||||
|
||||
|
||||
|
||||
case BuiltinProc_atomic_fence:
|
||||
LLVMBuildFence(p->builder, LLVMAtomicOrderingSequentiallyConsistent, false, "");
|
||||
// TODO(bill): Which is correct?
|
||||
case BuiltinProc_atomic_thread_fence:
|
||||
LLVMBuildFence(p->builder, llvm_atomic_ordering_from_odin(ce->args[0]), false, "");
|
||||
return {};
|
||||
case BuiltinProc_atomic_fence_acq:
|
||||
LLVMBuildFence(p->builder, LLVMAtomicOrderingAcquire, false, "");
|
||||
return {};
|
||||
case BuiltinProc_atomic_fence_rel:
|
||||
LLVMBuildFence(p->builder, LLVMAtomicOrderingRelease, false, "");
|
||||
return {};
|
||||
case BuiltinProc_atomic_fence_acqrel:
|
||||
LLVMBuildFence(p->builder, LLVMAtomicOrderingAcquireRelease, false, "");
|
||||
case BuiltinProc_atomic_signal_fence:
|
||||
LLVMBuildFence(p->builder, llvm_atomic_ordering_from_odin(ce->args[0]), true, "");
|
||||
return {};
|
||||
|
||||
case BuiltinProc_volatile_store:
|
||||
case BuiltinProc_atomic_store:
|
||||
case BuiltinProc_atomic_store_rel:
|
||||
case BuiltinProc_atomic_store_relaxed:
|
||||
case BuiltinProc_atomic_store_unordered: {
|
||||
case BuiltinProc_atomic_store_explicit: {
|
||||
lbValue dst = lb_build_expr(p, ce->args[0]);
|
||||
lbValue val = lb_build_expr(p, ce->args[1]);
|
||||
val = lb_emit_conv(p, val, type_deref(dst.type));
|
||||
|
||||
LLVMValueRef instr = LLVMBuildStore(p->builder, val.value, dst.value);
|
||||
switch (id) {
|
||||
case BuiltinProc_volatile_store: LLVMSetVolatile(instr, true); break;
|
||||
case BuiltinProc_atomic_store: LLVMSetOrdering(instr, LLVMAtomicOrderingSequentiallyConsistent); break;
|
||||
case BuiltinProc_atomic_store_rel: LLVMSetOrdering(instr, LLVMAtomicOrderingRelease); break;
|
||||
case BuiltinProc_atomic_store_relaxed: LLVMSetOrdering(instr, LLVMAtomicOrderingMonotonic); break;
|
||||
case BuiltinProc_atomic_store_unordered: LLVMSetOrdering(instr, LLVMAtomicOrderingUnordered); break;
|
||||
case BuiltinProc_volatile_store: LLVMSetVolatile(instr, true); break;
|
||||
case BuiltinProc_atomic_store: LLVMSetOrdering(instr, LLVMAtomicOrderingSequentiallyConsistent); break;
|
||||
case BuiltinProc_atomic_store_explicit: LLVMSetOrdering(instr, llvm_atomic_ordering_from_odin(ce->args[2])); break;
|
||||
}
|
||||
|
||||
LLVMSetAlignment(instr, cast(unsigned)type_align_of(type_deref(dst.type)));
|
||||
@@ -1645,18 +1635,14 @@ lbValue lb_build_builtin_proc(lbProcedure *p, Ast *expr, TypeAndValue const &tv,
|
||||
|
||||
case BuiltinProc_volatile_load:
|
||||
case BuiltinProc_atomic_load:
|
||||
case BuiltinProc_atomic_load_acq:
|
||||
case BuiltinProc_atomic_load_relaxed:
|
||||
case BuiltinProc_atomic_load_unordered: {
|
||||
case BuiltinProc_atomic_load_explicit: {
|
||||
lbValue dst = lb_build_expr(p, ce->args[0]);
|
||||
|
||||
LLVMValueRef instr = LLVMBuildLoad(p->builder, dst.value, "");
|
||||
switch (id) {
|
||||
case BuiltinProc_volatile_load: LLVMSetVolatile(instr, true); break;
|
||||
case BuiltinProc_atomic_load: LLVMSetOrdering(instr, LLVMAtomicOrderingSequentiallyConsistent); break;
|
||||
case BuiltinProc_atomic_load_acq: LLVMSetOrdering(instr, LLVMAtomicOrderingAcquire); break;
|
||||
case BuiltinProc_atomic_load_relaxed: LLVMSetOrdering(instr, LLVMAtomicOrderingMonotonic); break;
|
||||
case BuiltinProc_atomic_load_unordered: LLVMSetOrdering(instr, LLVMAtomicOrderingUnordered); break;
|
||||
case BuiltinProc_volatile_load: LLVMSetVolatile(instr, true); break;
|
||||
case BuiltinProc_atomic_load: LLVMSetOrdering(instr, LLVMAtomicOrderingSequentiallyConsistent); break;
|
||||
case BuiltinProc_atomic_load_explicit: LLVMSetOrdering(instr, llvm_atomic_ordering_from_odin(ce->args[1])); break;
|
||||
}
|
||||
LLVMSetAlignment(instr, cast(unsigned)type_align_of(type_deref(dst.type)));
|
||||
|
||||
@@ -1686,40 +1672,19 @@ lbValue lb_build_builtin_proc(lbProcedure *p, Ast *expr, TypeAndValue const &tv,
|
||||
}
|
||||
|
||||
case BuiltinProc_atomic_add:
|
||||
case BuiltinProc_atomic_add_acq:
|
||||
case BuiltinProc_atomic_add_rel:
|
||||
case BuiltinProc_atomic_add_acqrel:
|
||||
case BuiltinProc_atomic_add_relaxed:
|
||||
case BuiltinProc_atomic_sub:
|
||||
case BuiltinProc_atomic_sub_acq:
|
||||
case BuiltinProc_atomic_sub_rel:
|
||||
case BuiltinProc_atomic_sub_acqrel:
|
||||
case BuiltinProc_atomic_sub_relaxed:
|
||||
case BuiltinProc_atomic_and:
|
||||
case BuiltinProc_atomic_and_acq:
|
||||
case BuiltinProc_atomic_and_rel:
|
||||
case BuiltinProc_atomic_and_acqrel:
|
||||
case BuiltinProc_atomic_and_relaxed:
|
||||
case BuiltinProc_atomic_nand:
|
||||
case BuiltinProc_atomic_nand_acq:
|
||||
case BuiltinProc_atomic_nand_rel:
|
||||
case BuiltinProc_atomic_nand_acqrel:
|
||||
case BuiltinProc_atomic_nand_relaxed:
|
||||
case BuiltinProc_atomic_or:
|
||||
case BuiltinProc_atomic_or_acq:
|
||||
case BuiltinProc_atomic_or_rel:
|
||||
case BuiltinProc_atomic_or_acqrel:
|
||||
case BuiltinProc_atomic_or_relaxed:
|
||||
case BuiltinProc_atomic_xor:
|
||||
case BuiltinProc_atomic_xor_acq:
|
||||
case BuiltinProc_atomic_xor_rel:
|
||||
case BuiltinProc_atomic_xor_acqrel:
|
||||
case BuiltinProc_atomic_xor_relaxed:
|
||||
case BuiltinProc_atomic_xchg:
|
||||
case BuiltinProc_atomic_xchg_acq:
|
||||
case BuiltinProc_atomic_xchg_rel:
|
||||
case BuiltinProc_atomic_xchg_acqrel:
|
||||
case BuiltinProc_atomic_xchg_relaxed: {
|
||||
case BuiltinProc_atomic_exchange:
|
||||
case BuiltinProc_atomic_add_explicit:
|
||||
case BuiltinProc_atomic_sub_explicit:
|
||||
case BuiltinProc_atomic_and_explicit:
|
||||
case BuiltinProc_atomic_nand_explicit:
|
||||
case BuiltinProc_atomic_or_explicit:
|
||||
case BuiltinProc_atomic_xor_explicit:
|
||||
case BuiltinProc_atomic_exchange_explicit: {
|
||||
lbValue dst = lb_build_expr(p, ce->args[0]);
|
||||
lbValue val = lb_build_expr(p, ce->args[1]);
|
||||
val = lb_emit_conv(p, val, type_deref(dst.type));
|
||||
@@ -1728,41 +1693,20 @@ lbValue lb_build_builtin_proc(lbProcedure *p, Ast *expr, TypeAndValue const &tv,
|
||||
LLVMAtomicOrdering ordering = {};
|
||||
|
||||
switch (id) {
|
||||
case BuiltinProc_atomic_add: op = LLVMAtomicRMWBinOpAdd; ordering = LLVMAtomicOrderingSequentiallyConsistent; break;
|
||||
case BuiltinProc_atomic_add_acq: op = LLVMAtomicRMWBinOpAdd; ordering = LLVMAtomicOrderingAcquire; break;
|
||||
case BuiltinProc_atomic_add_rel: op = LLVMAtomicRMWBinOpAdd; ordering = LLVMAtomicOrderingRelease; break;
|
||||
case BuiltinProc_atomic_add_acqrel: op = LLVMAtomicRMWBinOpAdd; ordering = LLVMAtomicOrderingAcquireRelease; break;
|
||||
case BuiltinProc_atomic_add_relaxed: op = LLVMAtomicRMWBinOpAdd; ordering = LLVMAtomicOrderingMonotonic; break;
|
||||
case BuiltinProc_atomic_sub: op = LLVMAtomicRMWBinOpSub; ordering = LLVMAtomicOrderingSequentiallyConsistent; break;
|
||||
case BuiltinProc_atomic_sub_acq: op = LLVMAtomicRMWBinOpSub; ordering = LLVMAtomicOrderingAcquire; break;
|
||||
case BuiltinProc_atomic_sub_rel: op = LLVMAtomicRMWBinOpSub; ordering = LLVMAtomicOrderingRelease; break;
|
||||
case BuiltinProc_atomic_sub_acqrel: op = LLVMAtomicRMWBinOpSub; ordering = LLVMAtomicOrderingAcquireRelease; break;
|
||||
case BuiltinProc_atomic_sub_relaxed: op = LLVMAtomicRMWBinOpSub; ordering = LLVMAtomicOrderingMonotonic; break;
|
||||
case BuiltinProc_atomic_and: op = LLVMAtomicRMWBinOpAnd; ordering = LLVMAtomicOrderingSequentiallyConsistent; break;
|
||||
case BuiltinProc_atomic_and_acq: op = LLVMAtomicRMWBinOpAnd; ordering = LLVMAtomicOrderingAcquire; break;
|
||||
case BuiltinProc_atomic_and_rel: op = LLVMAtomicRMWBinOpAnd; ordering = LLVMAtomicOrderingRelease; break;
|
||||
case BuiltinProc_atomic_and_acqrel: op = LLVMAtomicRMWBinOpAnd; ordering = LLVMAtomicOrderingAcquireRelease; break;
|
||||
case BuiltinProc_atomic_and_relaxed: op = LLVMAtomicRMWBinOpAnd; ordering = LLVMAtomicOrderingMonotonic; break;
|
||||
case BuiltinProc_atomic_nand: op = LLVMAtomicRMWBinOpNand; ordering = LLVMAtomicOrderingSequentiallyConsistent; break;
|
||||
case BuiltinProc_atomic_nand_acq: op = LLVMAtomicRMWBinOpNand; ordering = LLVMAtomicOrderingAcquire; break;
|
||||
case BuiltinProc_atomic_nand_rel: op = LLVMAtomicRMWBinOpNand; ordering = LLVMAtomicOrderingRelease; break;
|
||||
case BuiltinProc_atomic_nand_acqrel: op = LLVMAtomicRMWBinOpNand; ordering = LLVMAtomicOrderingAcquireRelease; break;
|
||||
case BuiltinProc_atomic_nand_relaxed: op = LLVMAtomicRMWBinOpNand; ordering = LLVMAtomicOrderingMonotonic; break;
|
||||
case BuiltinProc_atomic_or: op = LLVMAtomicRMWBinOpOr; ordering = LLVMAtomicOrderingSequentiallyConsistent; break;
|
||||
case BuiltinProc_atomic_or_acq: op = LLVMAtomicRMWBinOpOr; ordering = LLVMAtomicOrderingAcquire; break;
|
||||
case BuiltinProc_atomic_or_rel: op = LLVMAtomicRMWBinOpOr; ordering = LLVMAtomicOrderingRelease; break;
|
||||
case BuiltinProc_atomic_or_acqrel: op = LLVMAtomicRMWBinOpOr; ordering = LLVMAtomicOrderingAcquireRelease; break;
|
||||
case BuiltinProc_atomic_or_relaxed: op = LLVMAtomicRMWBinOpOr; ordering = LLVMAtomicOrderingMonotonic; break;
|
||||
case BuiltinProc_atomic_xor: op = LLVMAtomicRMWBinOpXor; ordering = LLVMAtomicOrderingSequentiallyConsistent; break;
|
||||
case BuiltinProc_atomic_xor_acq: op = LLVMAtomicRMWBinOpXor; ordering = LLVMAtomicOrderingAcquire; break;
|
||||
case BuiltinProc_atomic_xor_rel: op = LLVMAtomicRMWBinOpXor; ordering = LLVMAtomicOrderingRelease; break;
|
||||
case BuiltinProc_atomic_xor_acqrel: op = LLVMAtomicRMWBinOpXor; ordering = LLVMAtomicOrderingAcquireRelease; break;
|
||||
case BuiltinProc_atomic_xor_relaxed: op = LLVMAtomicRMWBinOpXor; ordering = LLVMAtomicOrderingMonotonic; break;
|
||||
case BuiltinProc_atomic_xchg: op = LLVMAtomicRMWBinOpXchg; ordering = LLVMAtomicOrderingSequentiallyConsistent; break;
|
||||
case BuiltinProc_atomic_xchg_acq: op = LLVMAtomicRMWBinOpXchg; ordering = LLVMAtomicOrderingAcquire; break;
|
||||
case BuiltinProc_atomic_xchg_rel: op = LLVMAtomicRMWBinOpXchg; ordering = LLVMAtomicOrderingRelease; break;
|
||||
case BuiltinProc_atomic_xchg_acqrel: op = LLVMAtomicRMWBinOpXchg; ordering = LLVMAtomicOrderingAcquireRelease; break;
|
||||
case BuiltinProc_atomic_xchg_relaxed: op = LLVMAtomicRMWBinOpXchg; ordering = LLVMAtomicOrderingMonotonic; break;
|
||||
case BuiltinProc_atomic_add: op = LLVMAtomicRMWBinOpAdd; ordering = LLVMAtomicOrderingSequentiallyConsistent; break;
|
||||
case BuiltinProc_atomic_sub: op = LLVMAtomicRMWBinOpSub; ordering = LLVMAtomicOrderingSequentiallyConsistent; break;
|
||||
case BuiltinProc_atomic_and: op = LLVMAtomicRMWBinOpAnd; ordering = LLVMAtomicOrderingSequentiallyConsistent; break;
|
||||
case BuiltinProc_atomic_nand: op = LLVMAtomicRMWBinOpNand; ordering = LLVMAtomicOrderingSequentiallyConsistent; break;
|
||||
case BuiltinProc_atomic_or: op = LLVMAtomicRMWBinOpOr; ordering = LLVMAtomicOrderingSequentiallyConsistent; break;
|
||||
case BuiltinProc_atomic_xor: op = LLVMAtomicRMWBinOpXor; ordering = LLVMAtomicOrderingSequentiallyConsistent; break;
|
||||
case BuiltinProc_atomic_exchange: op = LLVMAtomicRMWBinOpXchg; ordering = LLVMAtomicOrderingSequentiallyConsistent; break;
|
||||
case BuiltinProc_atomic_add_explicit: op = LLVMAtomicRMWBinOpAdd; ordering = llvm_atomic_ordering_from_odin(ce->args[2]); break;
|
||||
case BuiltinProc_atomic_sub_explicit: op = LLVMAtomicRMWBinOpSub; ordering = llvm_atomic_ordering_from_odin(ce->args[2]); break;
|
||||
case BuiltinProc_atomic_and_explicit: op = LLVMAtomicRMWBinOpAnd; ordering = llvm_atomic_ordering_from_odin(ce->args[2]); break;
|
||||
case BuiltinProc_atomic_nand_explicit: op = LLVMAtomicRMWBinOpNand; ordering = llvm_atomic_ordering_from_odin(ce->args[2]); break;
|
||||
case BuiltinProc_atomic_or_explicit: op = LLVMAtomicRMWBinOpOr; ordering = llvm_atomic_ordering_from_odin(ce->args[2]); break;
|
||||
case BuiltinProc_atomic_xor_explicit: op = LLVMAtomicRMWBinOpXor; ordering = llvm_atomic_ordering_from_odin(ce->args[2]); break;
|
||||
case BuiltinProc_atomic_exchange_explicit: op = LLVMAtomicRMWBinOpXchg; ordering = llvm_atomic_ordering_from_odin(ce->args[2]); break;
|
||||
}
|
||||
|
||||
lbValue res = {};
|
||||
@@ -1771,24 +1715,10 @@ lbValue lb_build_builtin_proc(lbProcedure *p, Ast *expr, TypeAndValue const &tv,
|
||||
return res;
|
||||
}
|
||||
|
||||
case BuiltinProc_atomic_cxchg:
|
||||
case BuiltinProc_atomic_cxchg_acq:
|
||||
case BuiltinProc_atomic_cxchg_rel:
|
||||
case BuiltinProc_atomic_cxchg_acqrel:
|
||||
case BuiltinProc_atomic_cxchg_relaxed:
|
||||
case BuiltinProc_atomic_cxchg_failrelaxed:
|
||||
case BuiltinProc_atomic_cxchg_failacq:
|
||||
case BuiltinProc_atomic_cxchg_acq_failrelaxed:
|
||||
case BuiltinProc_atomic_cxchg_acqrel_failrelaxed:
|
||||
case BuiltinProc_atomic_cxchgweak:
|
||||
case BuiltinProc_atomic_cxchgweak_acq:
|
||||
case BuiltinProc_atomic_cxchgweak_rel:
|
||||
case BuiltinProc_atomic_cxchgweak_acqrel:
|
||||
case BuiltinProc_atomic_cxchgweak_relaxed:
|
||||
case BuiltinProc_atomic_cxchgweak_failrelaxed:
|
||||
case BuiltinProc_atomic_cxchgweak_failacq:
|
||||
case BuiltinProc_atomic_cxchgweak_acq_failrelaxed:
|
||||
case BuiltinProc_atomic_cxchgweak_acqrel_failrelaxed: {
|
||||
case BuiltinProc_atomic_compare_exchange_strong:
|
||||
case BuiltinProc_atomic_compare_exchange_weak:
|
||||
case BuiltinProc_atomic_compare_exchange_strong_explicit:
|
||||
case BuiltinProc_atomic_compare_exchange_weak_explicit: {
|
||||
lbValue address = lb_build_expr(p, ce->args[0]);
|
||||
Type *elem = type_deref(address.type);
|
||||
lbValue old_value = lb_build_expr(p, ce->args[1]);
|
||||
@@ -1801,28 +1731,14 @@ lbValue lb_build_builtin_proc(lbProcedure *p, Ast *expr, TypeAndValue const &tv,
|
||||
LLVMBool weak = false;
|
||||
|
||||
switch (id) {
|
||||
case BuiltinProc_atomic_cxchg: success_ordering = LLVMAtomicOrderingSequentiallyConsistent; failure_ordering = LLVMAtomicOrderingSequentiallyConsistent; weak = false; break;
|
||||
case BuiltinProc_atomic_cxchg_acq: success_ordering = LLVMAtomicOrderingAcquire; failure_ordering = LLVMAtomicOrderingMonotonic; weak = false; break;
|
||||
case BuiltinProc_atomic_cxchg_rel: success_ordering = LLVMAtomicOrderingRelease; failure_ordering = LLVMAtomicOrderingMonotonic; weak = false; break;
|
||||
case BuiltinProc_atomic_cxchg_acqrel: success_ordering = LLVMAtomicOrderingAcquireRelease; failure_ordering = LLVMAtomicOrderingMonotonic; weak = false; break;
|
||||
case BuiltinProc_atomic_cxchg_relaxed: success_ordering = LLVMAtomicOrderingMonotonic; failure_ordering = LLVMAtomicOrderingMonotonic; weak = false; break;
|
||||
case BuiltinProc_atomic_cxchg_failrelaxed: success_ordering = LLVMAtomicOrderingSequentiallyConsistent; failure_ordering = LLVMAtomicOrderingMonotonic; weak = false; break;
|
||||
case BuiltinProc_atomic_cxchg_failacq: success_ordering = LLVMAtomicOrderingSequentiallyConsistent; failure_ordering = LLVMAtomicOrderingAcquire; weak = false; break;
|
||||
case BuiltinProc_atomic_cxchg_acq_failrelaxed: success_ordering = LLVMAtomicOrderingAcquire; failure_ordering = LLVMAtomicOrderingMonotonic; weak = false; break;
|
||||
case BuiltinProc_atomic_cxchg_acqrel_failrelaxed: success_ordering = LLVMAtomicOrderingAcquireRelease; failure_ordering = LLVMAtomicOrderingMonotonic; weak = false; break;
|
||||
case BuiltinProc_atomic_cxchgweak: success_ordering = LLVMAtomicOrderingSequentiallyConsistent; failure_ordering = LLVMAtomicOrderingSequentiallyConsistent; weak = false; break;
|
||||
case BuiltinProc_atomic_cxchgweak_acq: success_ordering = LLVMAtomicOrderingAcquire; failure_ordering = LLVMAtomicOrderingMonotonic; weak = true; break;
|
||||
case BuiltinProc_atomic_cxchgweak_rel: success_ordering = LLVMAtomicOrderingRelease; failure_ordering = LLVMAtomicOrderingMonotonic; weak = true; break;
|
||||
case BuiltinProc_atomic_cxchgweak_acqrel: success_ordering = LLVMAtomicOrderingAcquireRelease; failure_ordering = LLVMAtomicOrderingMonotonic; weak = true; break;
|
||||
case BuiltinProc_atomic_cxchgweak_relaxed: success_ordering = LLVMAtomicOrderingMonotonic; failure_ordering = LLVMAtomicOrderingMonotonic; weak = true; break;
|
||||
case BuiltinProc_atomic_cxchgweak_failrelaxed: success_ordering = LLVMAtomicOrderingSequentiallyConsistent; failure_ordering = LLVMAtomicOrderingMonotonic; weak = true; break;
|
||||
case BuiltinProc_atomic_cxchgweak_failacq: success_ordering = LLVMAtomicOrderingSequentiallyConsistent; failure_ordering = LLVMAtomicOrderingAcquire; weak = true; break;
|
||||
case BuiltinProc_atomic_cxchgweak_acq_failrelaxed: success_ordering = LLVMAtomicOrderingAcquire; failure_ordering = LLVMAtomicOrderingMonotonic; weak = true; break;
|
||||
case BuiltinProc_atomic_cxchgweak_acqrel_failrelaxed: success_ordering = LLVMAtomicOrderingAcquireRelease; failure_ordering = LLVMAtomicOrderingMonotonic; weak = true; break;
|
||||
case BuiltinProc_atomic_compare_exchange_strong: success_ordering = LLVMAtomicOrderingSequentiallyConsistent; failure_ordering = LLVMAtomicOrderingSequentiallyConsistent; weak = false; break;
|
||||
case BuiltinProc_atomic_compare_exchange_weak: success_ordering = LLVMAtomicOrderingSequentiallyConsistent; failure_ordering = LLVMAtomicOrderingSequentiallyConsistent; weak = true; break;
|
||||
case BuiltinProc_atomic_compare_exchange_strong_explicit: success_ordering = llvm_atomic_ordering_from_odin(ce->args[3]); failure_ordering = llvm_atomic_ordering_from_odin(ce->args[4]); weak = false; break;
|
||||
case BuiltinProc_atomic_compare_exchange_weak_explicit: success_ordering = llvm_atomic_ordering_from_odin(ce->args[3]); failure_ordering = llvm_atomic_ordering_from_odin(ce->args[4]); weak = true; break;
|
||||
}
|
||||
|
||||
// TODO(bill): Figure out how to make it weak
|
||||
LLVMBool single_threaded = weak;
|
||||
LLVMBool single_threaded = false;
|
||||
|
||||
LLVMValueRef value = LLVMBuildAtomicCmpXchg(
|
||||
p->builder, address.value,
|
||||
@@ -1831,6 +1747,7 @@ lbValue lb_build_builtin_proc(lbProcedure *p, Ast *expr, TypeAndValue const &tv,
|
||||
failure_ordering,
|
||||
single_threaded
|
||||
);
|
||||
LLVMSetWeak(value, weak);
|
||||
|
||||
if (tv.type->kind == Type_Tuple) {
|
||||
Type *fix_typed = alloc_type_tuple();
|
||||
|
||||
@@ -2005,3 +2005,25 @@ lbValue lb_handle_objc_send(lbProcedure *p, Ast *expr) {
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
LLVMAtomicOrdering llvm_atomic_ordering_from_odin(ExactValue const &value) {
|
||||
GB_ASSERT(value.kind == ExactValue_Integer);
|
||||
i64 v = exact_value_to_i64(value);
|
||||
switch (v) {
|
||||
case OdinAtomicMemoryOrder_relaxed: return LLVMAtomicOrderingUnordered;
|
||||
case OdinAtomicMemoryOrder_consume: return LLVMAtomicOrderingMonotonic;
|
||||
case OdinAtomicMemoryOrder_acquire: return LLVMAtomicOrderingAcquire;
|
||||
case OdinAtomicMemoryOrder_release: return LLVMAtomicOrderingRelease;
|
||||
case OdinAtomicMemoryOrder_acq_rel: return LLVMAtomicOrderingAcquireRelease;
|
||||
case OdinAtomicMemoryOrder_seq_cst: return LLVMAtomicOrderingSequentiallyConsistent;
|
||||
}
|
||||
GB_PANIC("Unknown atomic ordering");
|
||||
return LLVMAtomicOrderingSequentiallyConsistent;
|
||||
}
|
||||
|
||||
|
||||
LLVMAtomicOrdering llvm_atomic_ordering_from_odin(Ast *expr) {
|
||||
ExactValue value = type_and_value_of_expr(expr).value;
|
||||
return llvm_atomic_ordering_from_odin(value);
|
||||
}
|
||||
|
||||
@@ -692,6 +692,28 @@ gb_global Type *t_objc_id = nullptr;
|
||||
gb_global Type *t_objc_SEL = nullptr;
|
||||
gb_global Type *t_objc_Class = nullptr;
|
||||
|
||||
enum OdinAtomicMemoryOrder : i32 {
|
||||
OdinAtomicMemoryOrder_relaxed = 0, // unordered
|
||||
OdinAtomicMemoryOrder_consume = 1, // monotonic
|
||||
OdinAtomicMemoryOrder_acquire = 2,
|
||||
OdinAtomicMemoryOrder_release = 3,
|
||||
OdinAtomicMemoryOrder_acq_rel = 4,
|
||||
OdinAtomicMemoryOrder_seq_cst = 5,
|
||||
OdinAtomicMemoryOrder_COUNT,
|
||||
};
|
||||
|
||||
char const *OdinAtomicMemoryOrder_strings[OdinAtomicMemoryOrder_COUNT] = {
|
||||
"Relaxed",
|
||||
"Consume",
|
||||
"Acquire",
|
||||
"Release",
|
||||
"Acq_Rel",
|
||||
"Seq_Cst",
|
||||
};
|
||||
|
||||
gb_global Type *t_atomic_memory_order = nullptr;
|
||||
|
||||
|
||||
|
||||
|
||||
gb_global RecursiveMutex g_type_mutex;
|
||||
|
||||
Reference in New Issue
Block a user