SDL_malloc.c: replace FORCEINLINE usage with SDL_FORCE_INLINE

This commit is contained in:
Ozkan Sezer
2024-12-29 21:50:28 +03:00
committed by Anonymous Maarten
parent 3842384a38
commit 54752f8d1c

View File

@@ -816,6 +816,7 @@ struct mallinfo {
inlining are defined as macros, so these aren't used for them.
*/
#if 0 /* SDL */
#ifndef FORCEINLINE
#if defined(__GNUC__)
#define FORCEINLINE __inline __attribute__ ((always_inline))
@@ -823,6 +824,7 @@ struct mallinfo {
#define FORCEINLINE __forceinline
#endif
#endif
#endif /* SDL */
#ifndef NOINLINE
#if defined(__GNUC__)
#define NOINLINE __attribute__ ((noinline))
@@ -835,13 +837,17 @@ struct mallinfo {
#ifdef __cplusplus
extern "C" {
#if 0 /* SDL */
#ifndef FORCEINLINE
#define FORCEINLINE inline
#endif
#endif /* SDL */
#endif /* __cplusplus */
#if 0 /* SDL */
#ifndef FORCEINLINE
#define FORCEINLINE
#endif
#endif /* SDL_FORCE_INLINE */
#if !ONLY_MSPACES
@@ -1697,20 +1703,20 @@ static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
#else /* WIN32 */
/* Win32 MMAP via VirtualAlloc */
static FORCEINLINE void* win32mmap(size_t size) {
SDL_FORCE_INLINE void* win32mmap(size_t size) {
void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
return (ptr != 0)? ptr: MFAIL;
}
/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
static FORCEINLINE void* win32direct_mmap(size_t size) {
SDL_FORCE_INLINE void* win32direct_mmap(size_t size) {
void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
PAGE_READWRITE);
return (ptr != 0)? ptr: MFAIL;
}
/* This function supports releasing coalesed segments */
static FORCEINLINE int win32munmap(void* ptr, size_t size) {
SDL_FORCE_INLINE int win32munmap(void* ptr, size_t size) {
MEMORY_BASIC_INFORMATION minfo;
char* cptr = (char*)ptr;
while (size) {
@@ -1863,7 +1869,7 @@ static FORCEINLINE int win32munmap(void* ptr, size_t size) {
#elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)))
/* Custom spin locks for older gcc on x86 */
static FORCEINLINE int x86_cas_lock(int *sl) {
SDL_FORCE_INLINE int x86_cas_lock(int *sl) {
int ret;
int val = 1;
int cmp = 0;
@@ -1874,7 +1880,7 @@ static FORCEINLINE int x86_cas_lock(int *sl) {
return ret;
}
static FORCEINLINE void x86_clear_lock(int* sl) {
SDL_FORCE_INLINE void x86_clear_lock(int* sl) {
assert(*sl != 0);
int prev = 0;
int ret;
@@ -1952,14 +1958,14 @@ struct malloc_recursive_lock {
#define MLOCK_T struct malloc_recursive_lock
static MLOCK_T malloc_global_mutex = { 0, 0, (THREAD_ID_T)0};
static FORCEINLINE void recursive_release_lock(MLOCK_T *lk) {
SDL_FORCE_INLINE void recursive_release_lock(MLOCK_T *lk) {
assert(lk->sl != 0);
if (--lk->c == 0) {
CLEAR_LOCK(&lk->sl);
}
}
static FORCEINLINE int recursive_acquire_lock(MLOCK_T *lk) {
SDL_FORCE_INLINE int recursive_acquire_lock(MLOCK_T *lk) {
THREAD_ID_T mythreadid = CURRENT_THREAD;
int spins = 0;
for (;;) {
@@ -1980,7 +1986,7 @@ static FORCEINLINE int recursive_acquire_lock(MLOCK_T *lk) {
}
}
static FORCEINLINE int recursive_try_lock(MLOCK_T *lk) {
SDL_FORCE_INLINE int recursive_try_lock(MLOCK_T *lk) {
THREAD_ID_T mythreadid = CURRENT_THREAD;
if (*((volatile int *)(&lk->sl)) == 0) {
if (!CAS_LOCK(&lk->sl)) {