From 3ff7bded642ef399fda68ac1078d6a091474ab11 Mon Sep 17 00:00:00 2001 From: gingerBill Date: Tue, 9 Mar 2021 11:40:36 +0000 Subject: [PATCH] Add `intrinsics.volatile_store` and `intrinsics.volatile_load` --- core/intrinsics/intrinsics.odin | 81 +++++++++++++++++---------------- src/check_expr.cpp | 8 ++++ src/checker_builtin_procs.hpp | 6 +++ src/ir.cpp | 19 ++++++-- src/ir_print.cpp | 3 ++ src/llvm_backend.cpp | 4 ++ 6 files changed, 79 insertions(+), 42 deletions(-) diff --git a/core/intrinsics/intrinsics.odin b/core/intrinsics/intrinsics.odin index 7c53773f5..0979241af 100644 --- a/core/intrinsics/intrinsics.odin +++ b/core/intrinsics/intrinsics.odin @@ -9,6 +9,9 @@ x86_mmx :: x86_mmx; // Specialized SIMD Vector type simd_vector :: proc($N: int, $T: typeid) -> type/#simd[N]T soa_struct :: proc($N: int, $T: typeid) -> type/#soa[N]T +// Volatile +volatile_load :: proc(dst: ^$T) -> T --- +volatile_store :: proc(dst: ^$T, val: T) -> T --- // Atomics @@ -17,52 +20,52 @@ atomic_fence_acq :: proc() --- atomic_fence_rel :: proc() --- atomic_fence_acqrel :: proc() --- -atomic_store :: proc(dst: ^$T, val: $T) --- -atomic_store_rel :: proc(dst: ^$T, val: $T) --- -atomic_store_relaxed :: proc(dst: ^$T, val: $T) --- -atomic_store_unordered :: proc(dst: ^$T, val: $T) --- +atomic_store :: proc(dst: ^$T, val: T) --- +atomic_store_rel :: proc(dst: ^$T, val: T) --- +atomic_store_relaxed :: proc(dst: ^$T, val: T) --- +atomic_store_unordered :: proc(dst: ^$T, val: T) --- atomic_load :: proc(dst: ^$T) -> T --- atomic_load_acq :: proc(dst: ^$T) -> T --- atomic_load_relaxed :: proc(dst: ^$T) -> T --- atomic_load_unordered :: proc(dst: ^$T) -> T --- -atomic_add :: proc(dst; ^$T, val: $T) -> T --- -atomic_add_acq :: proc(dst; ^$T, val: $T) -> T --- -atomic_add_rel :: proc(dst; ^$T, val: $T) -> T --- -atomic_add_acqrel :: proc(dst; ^$T, val: $T) -> T --- -atomic_add_relaxed :: proc(dst; ^$T, val: $T) -> T --- -atomic_sub :: proc(dst; ^$T, val: $T) -> T --- -atomic_sub_acq :: proc(dst; ^$T, val: $T) -> T --- -atomic_sub_rel :: proc(dst; ^$T, val: $T) -> T --- -atomic_sub_acqrel :: proc(dst; ^$T, val: $T) -> T --- -atomic_sub_relaxed :: proc(dst; ^$T, val: $T) -> T --- -atomic_and :: proc(dst; ^$T, val: $T) -> T --- -atomic_and_acq :: proc(dst; ^$T, val: $T) -> T --- -atomic_and_rel :: proc(dst; ^$T, val: $T) -> T --- -atomic_and_acqrel :: proc(dst; ^$T, val: $T) -> T --- -atomic_and_relaxed :: proc(dst; ^$T, val: $T) -> T --- -atomic_nand :: proc(dst; ^$T, val: $T) -> T --- -atomic_nand_acq :: proc(dst; ^$T, val: $T) -> T --- -atomic_nand_rel :: proc(dst; ^$T, val: $T) -> T --- -atomic_nand_acqrel :: proc(dst; ^$T, val: $T) -> T --- -atomic_nand_relaxed :: proc(dst; ^$T, val: $T) -> T --- -atomic_or :: proc(dst; ^$T, val: $T) -> T --- -atomic_or_acq :: proc(dst; ^$T, val: $T) -> T --- -atomic_or_rel :: proc(dst; ^$T, val: $T) -> T --- -atomic_or_acqrel :: proc(dst; ^$T, val: $T) -> T --- -atomic_or_relaxed :: proc(dst; ^$T, val: $T) -> T --- -atomic_xor :: proc(dst; ^$T, val: $T) -> T --- -atomic_xor_acq :: proc(dst; ^$T, val: $T) -> T --- -atomic_xor_rel :: proc(dst; ^$T, val: $T) -> T --- -atomic_xor_acqrel :: proc(dst; ^$T, val: $T) -> T --- -atomic_xor_relaxed :: proc(dst; ^$T, val: $T) -> T --- +atomic_add :: proc(dst; ^$T, val: T) -> T --- +atomic_add_acq :: proc(dst; ^$T, val: T) -> T --- +atomic_add_rel :: proc(dst; ^$T, val: T) -> T --- +atomic_add_acqrel :: proc(dst; ^$T, val: T) -> T --- +atomic_add_relaxed :: proc(dst; ^$T, val: T) -> T --- +atomic_sub :: proc(dst; ^$T, val: T) -> T --- +atomic_sub_acq :: proc(dst; ^$T, val: T) -> T --- +atomic_sub_rel :: proc(dst; ^$T, val: T) -> T --- +atomic_sub_acqrel :: proc(dst; ^$T, val: T) -> T --- +atomic_sub_relaxed :: proc(dst; ^$T, val: T) -> T --- +atomic_and :: proc(dst; ^$T, val: T) -> T --- +atomic_and_acq :: proc(dst; ^$T, val: T) -> T --- +atomic_and_rel :: proc(dst; ^$T, val: T) -> T --- +atomic_and_acqrel :: proc(dst; ^$T, val: T) -> T --- +atomic_and_relaxed :: proc(dst; ^$T, val: T) -> T --- +atomic_nand :: proc(dst; ^$T, val: T) -> T --- +atomic_nand_acq :: proc(dst; ^$T, val: T) -> T --- +atomic_nand_rel :: proc(dst; ^$T, val: T) -> T --- +atomic_nand_acqrel :: proc(dst; ^$T, val: T) -> T --- +atomic_nand_relaxed :: proc(dst; ^$T, val: T) -> T --- +atomic_or :: proc(dst; ^$T, val: T) -> T --- +atomic_or_acq :: proc(dst; ^$T, val: T) -> T --- +atomic_or_rel :: proc(dst; ^$T, val: T) -> T --- +atomic_or_acqrel :: proc(dst; ^$T, val: T) -> T --- +atomic_or_relaxed :: proc(dst; ^$T, val: T) -> T --- +atomic_xor :: proc(dst; ^$T, val: T) -> T --- +atomic_xor_acq :: proc(dst; ^$T, val: T) -> T --- +atomic_xor_rel :: proc(dst; ^$T, val: T) -> T --- +atomic_xor_acqrel :: proc(dst; ^$T, val: T) -> T --- +atomic_xor_relaxed :: proc(dst; ^$T, val: T) -> T --- -atomic_xchg :: proc(dst; ^$T, val: $T) -> T --- -atomic_xchg_acq :: proc(dst; ^$T, val: $T) -> T --- -atomic_xchg_rel :: proc(dst; ^$T, val: $T) -> T --- -atomic_xchg_acqrel :: proc(dst; ^$T, val: $T) -> T --- -atomic_xchg_relaxed :: proc(dst; ^$T, val: $T) -> T --- +atomic_xchg :: proc(dst; ^$T, val: T) -> T --- +atomic_xchg_acq :: proc(dst; ^$T, val: T) -> T --- +atomic_xchg_rel :: proc(dst; ^$T, val: T) -> T --- +atomic_xchg_acqrel :: proc(dst; ^$T, val: T) -> T --- +atomic_xchg_relaxed :: proc(dst; ^$T, val: T) -> T --- atomic_cxchg :: proc(dst: ^$T, old, new: T) -> (T, /*option*/bool) --- atomic_cxchg_acq :: proc(dst: ^$T, old, new: T) -> (T, /*option*/bool) --- diff --git a/src/check_expr.cpp b/src/check_expr.cpp index caa2194aa..6762a4d34 100644 --- a/src/check_expr.cpp +++ b/src/check_expr.cpp @@ -5502,6 +5502,9 @@ bool check_builtin_procedure(CheckerContext *c, Operand *operand, Ast *call, i32 operand->mode = Addressing_NoValue; break; + + + case BuiltinProc_atomic_fence: case BuiltinProc_atomic_fence_acq: case BuiltinProc_atomic_fence_rel: @@ -5509,6 +5512,8 @@ bool check_builtin_procedure(CheckerContext *c, Operand *operand, Ast *call, i32 operand->mode = Addressing_NoValue; break; + case BuiltinProc_volatile_store: + /*fallthrough*/ case BuiltinProc_atomic_store: case BuiltinProc_atomic_store_rel: case BuiltinProc_atomic_store_relaxed: @@ -5527,6 +5532,9 @@ bool check_builtin_procedure(CheckerContext *c, Operand *operand, Ast *call, i32 operand->mode = Addressing_NoValue; break; } + + case BuiltinProc_volatile_load: + /*fallthrough*/ case BuiltinProc_atomic_load: case BuiltinProc_atomic_load_acq: case BuiltinProc_atomic_load_relaxed: diff --git a/src/checker_builtin_procs.hpp b/src/checker_builtin_procs.hpp index f648c8027..a997ee9ff 100644 --- a/src/checker_builtin_procs.hpp +++ b/src/checker_builtin_procs.hpp @@ -39,6 +39,9 @@ enum BuiltinProcId { BuiltinProc_alloca, BuiltinProc_cpu_relax, + BuiltinProc_volatile_store, + BuiltinProc_volatile_load, + BuiltinProc_atomic_fence, BuiltinProc_atomic_fence_acq, BuiltinProc_atomic_fence_rel, @@ -232,6 +235,9 @@ gb_global BuiltinProc builtin_procs[BuiltinProc_COUNT] = { {STR_LIT("alloca"), 2, false, Expr_Expr, BuiltinProcPkg_intrinsics}, {STR_LIT("cpu_relax"), 0, false, Expr_Stmt, BuiltinProcPkg_intrinsics}, + {STR_LIT("volatile_store"), 2, false, Expr_Stmt, BuiltinProcPkg_intrinsics}, + {STR_LIT("volatile_load"), 1, false, Expr_Expr, BuiltinProcPkg_intrinsics}, + {STR_LIT("atomic_fence"), 0, false, Expr_Stmt, BuiltinProcPkg_intrinsics}, {STR_LIT("atomic_fence_acq"), 0, false, Expr_Stmt, BuiltinProcPkg_intrinsics}, {STR_LIT("atomic_fence_rel"), 0, false, Expr_Stmt, BuiltinProcPkg_intrinsics}, diff --git a/src/ir.cpp b/src/ir.cpp index 300b5b3af..0f235b914 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -202,7 +202,7 @@ gbAllocator ir_allocator(void) { }) \ IR_INSTR_KIND(ZeroInit, struct { irValue *address; }) \ IR_INSTR_KIND(Store, struct { irValue *address, *value; bool is_volatile; }) \ - IR_INSTR_KIND(Load, struct { Type *type; irValue *address; i64 custom_align; }) \ + IR_INSTR_KIND(Load, struct { Type *type; irValue *address; i64 custom_align; bool is_volatile; }) \ IR_INSTR_KIND(InlineCode, struct { BuiltinProcId id; Array operands; Type *type; }) \ IR_INSTR_KIND(AtomicFence, struct { BuiltinProcId id; }) \ IR_INSTR_KIND(AtomicStore, struct { \ @@ -1084,11 +1084,12 @@ irValue *ir_instr_store(irProcedure *p, irValue *address, irValue *value, bool i return v; } -irValue *ir_instr_load(irProcedure *p, irValue *address) { +irValue *ir_instr_load(irProcedure *p, irValue *address, bool is_volatile) { irValue *v = ir_alloc_instr(p, irInstr_Load); irInstr *i = &v->Instr; i->Load.address = address; i->Load.type = type_deref(ir_type(address)); + i->Load.is_volatile = is_volatile; if (address) address->uses += 1; @@ -3166,7 +3167,7 @@ irValue *ir_emit_load(irProcedure *p, irValue *address, i64 custom_align) { // return ir_emit(p, ir_instr_load_bool(p, address)); // } if (address) address->uses += 1; - auto instr = ir_instr_load(p, address); + auto instr = ir_instr_load(p, address, false); instr->Instr.Load.custom_align = custom_align; return ir_emit(p, instr); } @@ -7527,6 +7528,18 @@ irValue *ir_build_builtin_proc(irProcedure *proc, Ast *expr, TypeAndValue tv, Bu case BuiltinProc_cpu_relax: return ir_emit(proc, ir_instr_inline_code(proc, id, {}, nullptr)); + case BuiltinProc_volatile_store: { + irValue *dst = ir_build_expr(proc, ce->args[0]); + irValue *val = ir_build_expr(proc, ce->args[1]); + val = ir_emit_conv(proc, val, type_deref(ir_type(dst))); + return ir_emit(proc, ir_instr_store(proc, dst, val, true)); + } + + case BuiltinProc_volatile_load: { + irValue *dst = ir_build_expr(proc, ce->args[0]); + return ir_emit(proc, ir_instr_load(proc, dst, true)); + } + case BuiltinProc_atomic_fence: case BuiltinProc_atomic_fence_acq: case BuiltinProc_atomic_fence_rel: diff --git a/src/ir_print.cpp b/src/ir_print.cpp index e7af016d5..d6f46ce4d 100644 --- a/src/ir_print.cpp +++ b/src/ir_print.cpp @@ -1534,6 +1534,9 @@ void ir_print_instr(irFileBuffer *f, irModule *m, irValue *value) { case irInstr_Load: { Type *type = instr->Load.type; ir_fprintf(f, "%%%d = load ", value->index); + if (instr->Load.is_volatile) { + ir_write_str_lit(f, "volatile "); + } ir_print_type(f, m, type); ir_write_str_lit(f, ", "); ir_print_type(f, m, type); diff --git a/src/llvm_backend.cpp b/src/llvm_backend.cpp index 7685d8143..09c4ef2fd 100644 --- a/src/llvm_backend.cpp +++ b/src/llvm_backend.cpp @@ -8012,6 +8012,7 @@ lbValue lb_build_builtin_proc(lbProcedure *p, Ast *expr, TypeAndValue const &tv, LLVMBuildFence(p->builder, LLVMAtomicOrderingAcquireRelease, false, ""); return {}; + case BuiltinProc_volatile_store: case BuiltinProc_atomic_store: case BuiltinProc_atomic_store_rel: case BuiltinProc_atomic_store_relaxed: @@ -8022,6 +8023,7 @@ lbValue lb_build_builtin_proc(lbProcedure *p, Ast *expr, TypeAndValue const &tv, LLVMValueRef instr = LLVMBuildStore(p->builder, val.value, dst.value); switch (id) { + case BuiltinProc_volatile_store: LLVMSetVolatile(instr, true); break; case BuiltinProc_atomic_store: LLVMSetOrdering(instr, LLVMAtomicOrderingSequentiallyConsistent); break; case BuiltinProc_atomic_store_rel: LLVMSetOrdering(instr, LLVMAtomicOrderingRelease); break; case BuiltinProc_atomic_store_relaxed: LLVMSetOrdering(instr, LLVMAtomicOrderingMonotonic); break; @@ -8033,6 +8035,7 @@ lbValue lb_build_builtin_proc(lbProcedure *p, Ast *expr, TypeAndValue const &tv, return {}; } + case BuiltinProc_volatile_load: case BuiltinProc_atomic_load: case BuiltinProc_atomic_load_acq: case BuiltinProc_atomic_load_relaxed: @@ -8041,6 +8044,7 @@ lbValue lb_build_builtin_proc(lbProcedure *p, Ast *expr, TypeAndValue const &tv, LLVMValueRef instr = LLVMBuildLoad(p->builder, dst.value, ""); switch (id) { + case BuiltinProc_volatile_load: LLVMSetVolatile(instr, true); break; case BuiltinProc_atomic_load: LLVMSetOrdering(instr, LLVMAtomicOrderingSequentiallyConsistent); break; case BuiltinProc_atomic_load_acq: LLVMSetOrdering(instr, LLVMAtomicOrderingAcquire); break; case BuiltinProc_atomic_load_relaxed: LLVMSetOrdering(instr, LLVMAtomicOrderingMonotonic); break;