Prepare for M1 Mac

This commit is contained in:
gingerBill
2020-11-24 12:20:48 +00:00
parent a55568b0c4
commit 776c3f4e90
3 changed files with 111 additions and 4 deletions

31
build-m1.sh Executable file
View File

@@ -0,0 +1,31 @@
#!/usr/bin/env bash
release_mode=$1
warnings_to_disable="-std=c++11 -Wno-switch -Wno-pointer-sign -Wno-tautological-constant-out-of-range-compare -Wno-tautological-compare -Wno-macro-redefined"
libraries="-pthread -ldl -lm -lstdc++"
other_args="-DLLVM_BACKEND_SUPPORT -DUSE_NEW_LLVM_ABI_SYSTEM"
compiler="clang"
if [ -z "$release_mode" ]; then release_mode="0"; fi
if [ "$release_mode" -eq "0" ]; then
other_args="${other_args} -g"
fi
if [ "$release_mode" -eq "1" ]; then
other_args="${other_args} -O3 -march=native"
fi
if [[ "$(uname)" == "Darwin" ]]; then
# Set compiler to clang on MacOS
# MacOS provides a symlink to clang called gcc, but it's nice to be explicit here.
compiler="clang"
other_args="${other_args} -liconv"
elif [[ "$(uname)" == "FreeBSD" ]]; then
compiler="clang"
fi
${compiler} src/main.cpp ${warnings_to_disable} ${libraries} ${other_args} -o odin \
&& ./odin run examples/demo/demo.odin -llvm-api

View File

@@ -157,7 +157,7 @@ extern "C" {
#endif
#endif
#if defined(_WIN64) || defined(__x86_64__) || defined(_M_X64) || defined(__64BIT__) || defined(__powerpc64__) || defined(__ppc64__)
#if defined(_WIN64) || defined(__x86_64__) || defined(_M_X64) || defined(__64BIT__) || defined(__powerpc64__) || defined(__ppc64__) || defined(__aarch64__)
#ifndef GB_ARCH_64_BIT
#define GB_ARCH_64_BIT 1
#endif
@@ -230,7 +230,7 @@ extern "C" {
#define GB_CACHE_LINE_SIZE 128
#endif
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__) || defined(_M_ARM) || defined(_M_ARM64)
#ifndef GB_CPU_ARM
#define GB_CPU_ARM 1
#endif
@@ -3702,6 +3702,12 @@ gb_inline void *gb_memcopy(void *dest, void const *source, isize n) {
void *dest_copy = dest;
__asm__ __volatile__("rep movsb" : "+D"(dest_copy), "+S"(source), "+c"(n) : : "memory");
#elif defined(GB_CPU_ARM)
u8 *s = cast(u8 *)source;
u8 *d = cast(u8 *)dest;
for (isize i = 0; i < n; i++) {
*d++ = *s++;
}
#else
u8 *d = cast(u8 *)dest;
u8 const *s = cast(u8 const *)source;
@@ -4438,6 +4444,76 @@ gb_inline i64 gb_atomic64_fetch_or(gbAtomic64 volatile *a, i64 operand) {
#endif
}
#elif defined(GB_CPU_ARM)
gb_inline i32 gb_atomic32_load (gbAtomic32 const volatile *a) {
return __atomic_load_n(&a->value, __ATOMIC_SEQ_CST);
}
gb_inline void gb_atomic32_store(gbAtomic32 volatile *a, i32 value) {
__atomic_store_n(&a->value, value, __ATOMIC_SEQ_CST);
}
gb_inline i32 gb_atomic32_compare_exchange(gbAtomic32 volatile *a, i32 expected, i32 desired) {
i32 expected_copy = expected;
auto result = __atomic_compare_exchange_n(&a->value, &expected_copy, desired, true, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
if (result) {
return expected;
} else {
return expected_copy;
}
}
gb_inline i32 gb_atomic32_exchanged(gbAtomic32 volatile *a, i32 desired) {
return __atomic_exchange_n(&a->value, desired, __ATOMIC_SEQ_CST);
}
gb_inline i32 gb_atomic32_fetch_add(gbAtomic32 volatile *a, i32 operand) {
return __atomic_fetch_add(&a->value, operand, __ATOMIC_SEQ_CST);
}
gb_inline i32 gb_atomic32_fetch_and(gbAtomic32 volatile *a, i32 operand) {
return __atomic_fetch_and(&a->value, operand, __ATOMIC_SEQ_CST);
}
gb_inline i32 gb_atomic32_fetch_or(gbAtomic32 volatile *a, i32 operand) {
return __atomic_fetch_or(&a->value, operand, __ATOMIC_SEQ_CST);
}
gb_inline i64 gb_atomic64_load(gbAtomic64 const volatile *a) {
return __atomic_load_n(&a->value, __ATOMIC_SEQ_CST);
}
gb_inline void gb_atomic64_store(gbAtomic64 volatile *a, i64 value) {
__atomic_store_n(&a->value, value, __ATOMIC_SEQ_CST);
}
gb_inline i64 gb_atomic64_compare_exchange(gbAtomic64 volatile *a, i64 expected, i64 desired) {
i64 expected_copy = expected;
auto result = __atomic_compare_exchange_n(&a->value, &expected_copy, desired, true, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
if (result) {
return expected;
} else {
return expected_copy;
}
}
gb_inline i64 gb_atomic64_exchanged(gbAtomic64 volatile *a, i64 desired) {
return __atomic_exchange_n(&a->value, desired, __ATOMIC_SEQ_CST);
}
gb_inline i64 gb_atomic64_fetch_add(gbAtomic64 volatile *a, i64 operand) {
return __atomic_fetch_add(&a->value, operand, __ATOMIC_SEQ_CST);
}
gb_inline i64 gb_atomic64_fetch_and(gbAtomic64 volatile *a, i64 operand) {
return __atomic_fetch_and(&a->value, operand, __ATOMIC_SEQ_CST);
}
gb_inline i64 gb_atomic64_fetch_or(gbAtomic64 volatile *a, i64 operand) {
return __atomic_fetch_or(&a->value, operand, __ATOMIC_SEQ_CST);
}
#else
#error TODO(bill): Implement Atomics for this CPU
#endif

View File

@@ -8312,8 +8312,8 @@ lbValue lb_build_builtin_proc(lbProcedure *p, Ast *expr, TypeAndValue const &tv,
{
LLVMTypeRef func_type = LLVMFunctionType(LLVMVoidTypeInContext(p->module->ctx), nullptr, 0, false);
LLVMValueRef the_asm = LLVMGetInlineAsm(func_type,
"pause", 5,
"", 0,
cast(char *)"pause", 5,
cast(char *)"", 0,
/*HasSideEffects*/true, /*IsAlignStack*/false,
LLVMInlineAsmDialectATT
);