Merge pull request #1096 from nakst/master

thread_pool.cpp: fix with 1 thread; gb.h: remove buggy /proc/cpuinfo code
This commit is contained in:
gingerBill
2021-08-23 09:32:41 +01:00
committed by GitHub
2 changed files with 19 additions and 85 deletions

View File

@@ -3785,74 +3785,14 @@ isize gb_affinity_thread_count_for_core(gbAffinity *a, isize core) {
}
#elif defined(GB_SYSTEM_LINUX)
// IMPORTANT TODO(bill): This gbAffinity stuff for linux needs be improved a lot!
// NOTE(zangent): I have to read /proc/cpuinfo to get the number of threads per core.
#include <stdio.h>
void gb_affinity_init(gbAffinity *a) {
b32 accurate = true;
isize threads = 0;
a->thread_count = 1;
a->core_count = sysconf(_SC_NPROCESSORS_ONLN);
a->threads_per_core = 1;
if(a->core_count <= 0) {
a->core_count = 1;
accurate = false;
}
// Parsing /proc/cpuinfo to get the number of threads per core.
// NOTE(zangent): This calls the CPU's threads "cores", although the wording
// is kind of weird. This should be right, though.
FILE* cpu_info = fopen("/proc/cpuinfo", "r");
if (cpu_info != NULL) {
for (;;) {
// The 'temporary char'. Everything goes into this char,
// so that we can check against EOF at the end of this loop.
int c;
#define AF__CHECK(letter) ((c = getc(cpu_info)) == letter)
if (AF__CHECK('c') && AF__CHECK('p') && AF__CHECK('u') && AF__CHECK(' ') &&
AF__CHECK('c') && AF__CHECK('o') && AF__CHECK('r') && AF__CHECK('e') && AF__CHECK('s')) {
// We're on a CPU info line.
while (!AF__CHECK(EOF)) {
if (c == '\n') {
break;
} else if (c < '0' || '9' > c) {
continue;
}
threads = threads * 10 + (c - '0');
}
break;
} else {
while (!AF__CHECK('\n')) {
if (c==EOF) {
break;
}
}
}
if (c == EOF) {
break;
}
#undef AF__CHECK
}
fclose(cpu_info);
}
if (threads == 0) {
threads = 1;
accurate = false;
}
a->threads_per_core = threads;
a->thread_count = a->threads_per_core * a->core_count;
a->is_accurate = accurate;
a->is_accurate = a->core_count > 0;
a->core_count = a->is_accurate ? a->core_count : 1;
a->thread_count = a->core_count;
}
void gb_affinity_destroy(gbAffinity *a) {

View File

@@ -11,30 +11,27 @@ struct WorkerTask {
struct ThreadPool {
std::atomic<isize> outstanding_task_count;
WorkerTask *next_task;
WorkerTask *volatile next_task;
BlockingMutex task_list_mutex;
};
void thread_pool_init(ThreadPool *pool, gbAllocator const &a, isize thread_count, char const *worker_prefix = nullptr);
void thread_pool_destroy(ThreadPool *pool);
void thread_pool_wait(ThreadPool *pool);
void thread_pool_add_task(ThreadPool *pool, WorkerTaskProc *proc, void *data);
void worker_thread_internal();
void thread_pool_thread_entry(ThreadPool *pool) {
while (pool->outstanding_task_count) {
mutex_lock(&pool->task_list_mutex);
if (pool->next_task) {
WorkerTask *task = pool->next_task;
pool->next_task = task->next_task;
mutex_unlock(&pool->task_list_mutex);
task->do_work(task->data);
pool->outstanding_task_count.fetch_sub(1);
gb_free(heap_allocator(), task);
if (!pool->next_task) {
yield(); // No need to grab the mutex.
} else {
mutex_unlock(&pool->task_list_mutex);
yield();
mutex_lock(&pool->task_list_mutex);
if (pool->next_task) {
WorkerTask *task = pool->next_task;
pool->next_task = task->next_task;
mutex_unlock(&pool->task_list_mutex);
task->do_work(task->data);
pool->outstanding_task_count.fetch_sub(1);
gb_free(heap_allocator(), task);
} else {
mutex_unlock(&pool->task_list_mutex);
}
}
}
}
@@ -77,10 +74,7 @@ void thread_pool_destroy(ThreadPool *pool) {
void thread_pool_wait(ThreadPool *pool) {
pool->outstanding_task_count.fetch_sub(1);
while (pool->outstanding_task_count.load() != 0) {
yield();
}
thread_pool_thread_entry(pool);
}
void thread_pool_add_task(ThreadPool *pool, WorkerTaskProc *proc, void *data) {