Renamed atomic functions to match SDL 3.0 naming convention

This will also allow us to cleanly add atomic operations for other types in the future.
This commit is contained in:
Sam Lantinga
2024-09-16 23:21:31 -07:00
parent f3e419596b
commit 8d223b3037
64 changed files with 496 additions and 472 deletions

View File

@@ -46,7 +46,7 @@ void *SDL_GetTLS(SDL_TLSID *id)
return NULL;
}
storage_index = SDL_AtomicGet(id) - 1;
storage_index = SDL_GetAtomicInt(id) - 1;
storage = SDL_SYS_GetTLSData();
if (!storage || storage_index < 0 || storage_index >= storage->limit) {
return NULL;
@@ -70,16 +70,16 @@ SDL_bool SDL_SetTLS(SDL_TLSID *id, const void *value, SDL_TLSDestructorCallback
SDL_InitTLSData();
// Get the storage index associated with the ID in a thread-safe way
storage_index = SDL_AtomicGet(id) - 1;
storage_index = SDL_GetAtomicInt(id) - 1;
if (storage_index < 0) {
int new_id = (SDL_AtomicIncRef(&SDL_tls_id) + 1);
SDL_AtomicCompareAndSwap(id, 0, new_id);
SDL_CompareAndSwapAtomicInt(id, 0, new_id);
/* If there was a race condition we'll have wasted an ID, but every thread
* will have the same storage index for this id.
*/
storage_index = SDL_AtomicGet(id) - 1;
storage_index = SDL_GetAtomicInt(id) - 1;
}
// Get the storage for the current thread
@@ -135,7 +135,7 @@ void SDL_QuitTLSData(void)
{
SDL_CleanupTLS();
if (SDL_AtomicGet(&SDL_tls_allocated) == 0) {
if (SDL_GetAtomicInt(&SDL_tls_allocated) == 0) {
SDL_SYS_QuitTLSData();
} else {
// Some thread hasn't called SDL_CleanupTLS()
@@ -326,9 +326,9 @@ void SDL_RunThread(SDL_Thread *thread)
SDL_CleanupTLS();
// Mark us as ready to be joined (or detached)
if (!SDL_AtomicCompareAndSwap(&thread->state, SDL_THREAD_STATE_ALIVE, SDL_THREAD_STATE_ZOMBIE)) {
if (!SDL_CompareAndSwapAtomicInt(&thread->state, SDL_THREAD_STATE_ALIVE, SDL_THREAD_STATE_ZOMBIE)) {
// Clean up if something already detached us.
if (SDL_AtomicCompareAndSwap(&thread->state, SDL_THREAD_STATE_DETACHED, SDL_THREAD_STATE_CLEANED)) {
if (SDL_CompareAndSwapAtomicInt(&thread->state, SDL_THREAD_STATE_DETACHED, SDL_THREAD_STATE_CLEANED)) {
SDL_free(thread->name); // Can't free later, we've already cleaned up TLS
SDL_free(thread);
}
@@ -364,7 +364,7 @@ SDL_Thread *SDL_CreateThreadWithPropertiesRuntime(SDL_PropertiesID props,
return NULL;
}
thread->status = -1;
SDL_AtomicSet(&thread->state, SDL_THREAD_STATE_ALIVE);
SDL_SetAtomicInt(&thread->state, SDL_THREAD_STATE_ALIVE);
// Set up the arguments for the thread
if (name) {
@@ -463,11 +463,11 @@ void SDL_DetachThread(SDL_Thread *thread)
}
// Grab dibs if the state is alive+joinable.
if (SDL_AtomicCompareAndSwap(&thread->state, SDL_THREAD_STATE_ALIVE, SDL_THREAD_STATE_DETACHED)) {
if (SDL_CompareAndSwapAtomicInt(&thread->state, SDL_THREAD_STATE_ALIVE, SDL_THREAD_STATE_DETACHED)) {
SDL_SYS_DetachThread(thread);
} else {
// all other states are pretty final, see where we landed.
const int thread_state = SDL_AtomicGet(&thread->state);
const int thread_state = SDL_GetAtomicInt(&thread->state);
if ((thread_state == SDL_THREAD_STATE_DETACHED) || (thread_state == SDL_THREAD_STATE_CLEANED)) {
return; // already detached (you shouldn't call this twice!)
} else if (thread_state == SDL_THREAD_STATE_ZOMBIE) {

View File

@@ -74,8 +74,8 @@ SDL_RWLock *SDL_CreateRWLock_generic(void)
return NULL;
}
SDL_AtomicSet(&rwlock->reader_count, 0);
SDL_AtomicSet(&rwlock->writer_count, 0);
SDL_SetAtomicInt(&rwlock->reader_count, 0);
SDL_SetAtomicInt(&rwlock->writer_count, 0);
#endif
return rwlock;
@@ -98,8 +98,8 @@ void SDL_LockRWLockForReading_generic(SDL_RWLock *rwlock) SDL_NO_THREAD_SAFETY_A
if (rwlock) {
// !!! FIXME: these don't have to be atomic, we always gate them behind a mutex.
SDL_LockMutex(rwlock->lock);
SDL_assert(SDL_AtomicGet(&rwlock->writer_count) == 0); // shouldn't be able to grab lock if there's a writer!
SDL_AtomicAdd(&rwlock->reader_count, 1);
SDL_assert(SDL_GetAtomicInt(&rwlock->writer_count) == 0); // shouldn't be able to grab lock if there's a writer!
SDL_AddAtomicInt(&rwlock->reader_count, 1);
SDL_UnlockMutex(rwlock->lock); // other readers can attempt to share the lock.
}
#endif
@@ -110,12 +110,12 @@ void SDL_LockRWLockForWriting_generic(SDL_RWLock *rwlock) SDL_NO_THREAD_SAFETY_A
#ifndef SDL_THREADS_DISABLED
if (rwlock) {
SDL_LockMutex(rwlock->lock);
while (SDL_AtomicGet(&rwlock->reader_count) > 0) { // while something is holding the shared lock, keep waiting.
while (SDL_GetAtomicInt(&rwlock->reader_count) > 0) { // while something is holding the shared lock, keep waiting.
SDL_WaitCondition(rwlock->condition, rwlock->lock); // release the lock and wait for readers holding the shared lock to release it, regrab the lock.
}
// we hold the lock!
SDL_AtomicAdd(&rwlock->writer_count, 1); // we let these be recursive, but the API doesn't require this. It _does_ trust you unlock correctly!
SDL_AddAtomicInt(&rwlock->writer_count, 1); // we let these be recursive, but the API doesn't require this. It _does_ trust you unlock correctly!
}
#endif
}
@@ -129,8 +129,8 @@ bool SDL_TryLockRWLockForReading_generic(SDL_RWLock *rwlock)
return false;
}
SDL_assert(SDL_AtomicGet(&rwlock->writer_count) == 0); // shouldn't be able to grab lock if there's a writer!
SDL_AtomicAdd(&rwlock->reader_count, 1);
SDL_assert(SDL_GetAtomicInt(&rwlock->writer_count) == 0); // shouldn't be able to grab lock if there's a writer!
SDL_AddAtomicInt(&rwlock->reader_count, 1);
SDL_UnlockMutex(rwlock->lock); // other readers can attempt to share the lock.
}
#endif
@@ -153,13 +153,13 @@ bool SDL_TryLockRWLockForWriting_generic(SDL_RWLock *rwlock)
return false;
}
if (SDL_AtomicGet(&rwlock->reader_count) > 0) { // a reader is using the shared lock, treat it as unavailable.
if (SDL_GetAtomicInt(&rwlock->reader_count) > 0) { // a reader is using the shared lock, treat it as unavailable.
SDL_UnlockMutex(rwlock->lock);
return false;
}
// we hold the lock!
SDL_AtomicAdd(&rwlock->writer_count, 1); // we let these be recursive, but the API doesn't require this. It _does_ trust you unlock correctly!
SDL_AddAtomicInt(&rwlock->writer_count, 1); // we let these be recursive, but the API doesn't require this. It _does_ trust you unlock correctly!
}
#endif
@@ -179,11 +179,11 @@ void SDL_UnlockRWLock_generic(SDL_RWLock *rwlock) SDL_NO_THREAD_SAFETY_ANALYSIS
if (rwlock) {
SDL_LockMutex(rwlock->lock); // recursive lock for writers, readers grab lock to make sure things are sane.
if (SDL_AtomicGet(&rwlock->reader_count) > 0) { // we're a reader
SDL_AtomicAdd(&rwlock->reader_count, -1);
if (SDL_GetAtomicInt(&rwlock->reader_count) > 0) { // we're a reader
SDL_AddAtomicInt(&rwlock->reader_count, -1);
SDL_BroadcastCondition(rwlock->condition); // alert any pending writers to attempt to try to grab the lock again.
} else if (SDL_AtomicGet(&rwlock->writer_count) > 0) { // we're a writer
SDL_AtomicAdd(&rwlock->writer_count, -1);
} else if (SDL_GetAtomicInt(&rwlock->writer_count) > 0) { // we're a writer
SDL_AddAtomicInt(&rwlock->writer_count, -1);
SDL_UnlockMutex(rwlock->lock); // recursive unlock.
}

View File

@@ -126,7 +126,7 @@ void SDL_SYS_WaitThread(SDL_Thread *thread)
Detached threads can be waited on, but should NOT be cleaned manually
as it would result in a fatal error.
*/
if (R_SUCCEEDED(res) && SDL_AtomicGet(&thread->state) != SDL_THREAD_STATE_DETACHED) {
if (R_SUCCEEDED(res) && SDL_GetAtomicInt(&thread->state) != SDL_THREAD_STATE_DETACHED) {
threadFree(thread->handle);
}
}