Mesa (main): util/c11: Update function u_thread_create to be c11 conformance
GitLab Mirror
gitlab-mirror at kemper.freedesktop.org
Wed Jun 15 18:05:01 UTC 2022
Module: Mesa
Branch: main
Commit: a9e2c699aa3677e24a58de10c5e68ecb950f4a94
URL: http://cgit.freedesktop.org/mesa/mesa/commit/?id=a9e2c699aa3677e24a58de10c5e68ecb950f4a94
Author: Yonggang Luo <luoyonggang at gmail.com>
Date: Tue Jun 14 14:00:57 2022 +0800
util/c11: Update function u_thread_create to be c11 conformance
Do not assume thrd_t to be a pointer or integer, as the C11 standard tells us:
thrd_t: implementation-defined complete object type identifying a thread
At https://en.cppreference.com/w/c/thread
So we always return the thread creation return code instead of thrd_t value, and judge the return
code properly.
Signed-off-by: Yonggang Luo <luoyonggang at gmail.com>
Reviewed-by: Jesse Natalie <jenatali at microsoft.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/15087>
---
src/gallium/auxiliary/driver_ddebug/dd_context.c | 3 +--
src/gallium/auxiliary/driver_rbug/rbug_core.c | 5 ++++-
src/gallium/drivers/llvmpipe/lp_cs_tpool.c | 8 ++++++--
src/gallium/drivers/llvmpipe/lp_rast.c | 5 ++---
src/gallium/drivers/r600/r600_gpu_load.c | 15 +++++++++------
src/gallium/drivers/r600/r600_pipe_common.h | 1 +
src/gallium/drivers/radeonsi/si_gpu_load.c | 13 ++++++++-----
src/gallium/drivers/radeonsi/si_pipe.h | 1 +
src/gallium/frontends/nine/nine_state.c | 3 +--
src/gallium/tests/unit/pipe_barrier_test.c | 2 +-
src/util/u_queue.c | 4 +---
src/util/u_thread.h | 14 +++++---------
12 files changed, 40 insertions(+), 34 deletions(-)
diff --git a/src/gallium/auxiliary/driver_ddebug/dd_context.c b/src/gallium/auxiliary/driver_ddebug/dd_context.c
index 53b68a3529c..cddea36367d 100644
--- a/src/gallium/auxiliary/driver_ddebug/dd_context.c
+++ b/src/gallium/auxiliary/driver_ddebug/dd_context.c
@@ -959,8 +959,7 @@ dd_context_create(struct dd_screen *dscreen, struct pipe_context *pipe)
list_inithead(&dctx->records);
(void) mtx_init(&dctx->mutex, mtx_plain);
(void) cnd_init(&dctx->cond);
- dctx->thread = u_thread_create(dd_thread_main, dctx);
- if (!dctx->thread) {
+ if (thrd_success != u_thread_create(&dctx->thread,dd_thread_main, dctx)) {
mtx_destroy(&dctx->mutex);
goto fail;
}
diff --git a/src/gallium/auxiliary/driver_rbug/rbug_core.c b/src/gallium/auxiliary/driver_rbug/rbug_core.c
index ac5262ced84..a74d20a987b 100644
--- a/src/gallium/auxiliary/driver_rbug/rbug_core.c
+++ b/src/gallium/auxiliary/driver_rbug/rbug_core.c
@@ -841,7 +841,10 @@ rbug_start(struct rbug_screen *rb_screen)
tr_rbug->rb_screen = rb_screen;
tr_rbug->running = true;
- tr_rbug->thread = u_thread_create(rbug_thread, tr_rbug);
+ if (thrd_success != u_thread_create(&tr_rbug->thread, rbug_thread, tr_rbug)) {
+ FREE(tr_rbug);
+ return NULL;
+ }
return tr_rbug;
}
diff --git a/src/gallium/drivers/llvmpipe/lp_cs_tpool.c b/src/gallium/drivers/llvmpipe/lp_cs_tpool.c
index 4bf76a16bc3..4ce0b6941d5 100644
--- a/src/gallium/drivers/llvmpipe/lp_cs_tpool.c
+++ b/src/gallium/drivers/llvmpipe/lp_cs_tpool.c
@@ -96,9 +96,13 @@ lp_cs_tpool_create(unsigned num_threads)
list_inithead(&pool->workqueue);
assert (num_threads <= LP_MAX_THREADS);
+ for (unsigned i = 0; i < num_threads; i++) {
+ if (thrd_success != u_thread_create(pool->threads + i, lp_cs_tpool_worker, pool)) {
+ num_threads = i; /* previous thread is max */
+ break;
+ }
+ }
pool->num_threads = num_threads;
- for (unsigned i = 0; i < num_threads; i++)
- pool->threads[i] = u_thread_create(lp_cs_tpool_worker, pool);
return pool;
}
diff --git a/src/gallium/drivers/llvmpipe/lp_rast.c b/src/gallium/drivers/llvmpipe/lp_rast.c
index 252053174ce..0e498aaf4a6 100644
--- a/src/gallium/drivers/llvmpipe/lp_rast.c
+++ b/src/gallium/drivers/llvmpipe/lp_rast.c
@@ -1238,9 +1238,8 @@ create_rast_threads(struct lp_rasterizer *rast)
for (unsigned i = 0; i < rast->num_threads; i++) {
pipe_semaphore_init(&rast->tasks[i].work_ready, 0);
pipe_semaphore_init(&rast->tasks[i].work_done, 0);
- rast->threads[i] = u_thread_create(thread_function,
- (void *) &rast->tasks[i]);
- if (!rast->threads[i]) {
+ if (thrd_success != u_thread_create(rast->threads + i, thread_function,
+ (void *) &rast->tasks[i])) {
rast->num_threads = i; /* previous thread is max */
break;
}
diff --git a/src/gallium/drivers/r600/r600_gpu_load.c b/src/gallium/drivers/r600/r600_gpu_load.c
index a2de8cc71a4..6ad76c28ee6 100644
--- a/src/gallium/drivers/r600/r600_gpu_load.c
+++ b/src/gallium/drivers/r600/r600_gpu_load.c
@@ -144,24 +144,27 @@ r600_gpu_load_thread(void *param)
void r600_gpu_load_kill_thread(struct r600_common_screen *rscreen)
{
- if (!rscreen->gpu_load_thread)
+ if (!rscreen->gpu_load_thread_created)
return;
p_atomic_inc(&rscreen->gpu_load_stop_thread);
thrd_join(rscreen->gpu_load_thread, NULL);
- rscreen->gpu_load_thread = 0;
+ rscreen->gpu_load_thread_created = false;
}
static uint64_t r600_read_mmio_counter(struct r600_common_screen *rscreen,
unsigned busy_index)
{
/* Start the thread if needed. */
- if (!rscreen->gpu_load_thread) {
+ if (!rscreen->gpu_load_thread_created) {
mtx_lock(&rscreen->gpu_load_mutex);
/* Check again inside the mutex. */
- if (!rscreen->gpu_load_thread)
- rscreen->gpu_load_thread =
- u_thread_create(r600_gpu_load_thread, rscreen);
+ if (!rscreen->gpu_load_thread_created) {
+ int ret = u_thread_create(&rscreen->gpu_load_thread, r600_gpu_load_thread, rscreen);
+ if (ret == thrd_success) {
+ rscreen->gpu_load_thread_created = true;
+ }
+ }
mtx_unlock(&rscreen->gpu_load_mutex);
}
diff --git a/src/gallium/drivers/r600/r600_pipe_common.h b/src/gallium/drivers/r600/r600_pipe_common.h
index 60560de69e4..b834cfb948c 100644
--- a/src/gallium/drivers/r600/r600_pipe_common.h
+++ b/src/gallium/drivers/r600/r600_pipe_common.h
@@ -360,6 +360,7 @@ struct r600_common_screen {
/* GPU load thread. */
mtx_t gpu_load_mutex;
thrd_t gpu_load_thread;
+ bool gpu_load_thread_created;
union r600_mmio_counters mmio_counters;
volatile unsigned gpu_load_stop_thread; /* bool */
diff --git a/src/gallium/drivers/radeonsi/si_gpu_load.c b/src/gallium/drivers/radeonsi/si_gpu_load.c
index b6cd976a404..615acd36e79 100644
--- a/src/gallium/drivers/radeonsi/si_gpu_load.c
+++ b/src/gallium/drivers/radeonsi/si_gpu_load.c
@@ -159,22 +159,25 @@ static int si_gpu_load_thread(void *param)
void si_gpu_load_kill_thread(struct si_screen *sscreen)
{
- if (!sscreen->gpu_load_thread)
+ if (!sscreen->gpu_load_thread_created)
return;
p_atomic_inc(&sscreen->gpu_load_stop_thread);
thrd_join(sscreen->gpu_load_thread, NULL);
- sscreen->gpu_load_thread = 0;
+ sscreen->gpu_load_thread_created = false;
}
static uint64_t si_read_mmio_counter(struct si_screen *sscreen, unsigned busy_index)
{
/* Start the thread if needed. */
- if (!sscreen->gpu_load_thread) {
+ if (!sscreen->gpu_load_thread_created) {
simple_mtx_lock(&sscreen->gpu_load_mutex);
/* Check again inside the mutex. */
- if (!sscreen->gpu_load_thread)
- sscreen->gpu_load_thread = u_thread_create(si_gpu_load_thread, sscreen);
+ if (!sscreen->gpu_load_thread_created) {
+ if (thrd_success == u_thread_create(&sscreen->gpu_load_thread, si_gpu_load_thread, sscreen)) {
+ sscreen->gpu_load_thread_created = true;
+ }
+ }
simple_mtx_unlock(&sscreen->gpu_load_mutex);
}
diff --git a/src/gallium/drivers/radeonsi/si_pipe.h b/src/gallium/drivers/radeonsi/si_pipe.h
index 6cd7466baa5..2b18772f79b 100644
--- a/src/gallium/drivers/radeonsi/si_pipe.h
+++ b/src/gallium/drivers/radeonsi/si_pipe.h
@@ -630,6 +630,7 @@ struct si_screen {
/* GPU load thread. */
simple_mtx_t gpu_load_mutex;
thrd_t gpu_load_thread;
+ bool gpu_load_thread_created;
union si_mmio_counters mmio_counters;
volatile unsigned gpu_load_stop_thread; /* bool */
diff --git a/src/gallium/frontends/nine/nine_state.c b/src/gallium/frontends/nine/nine_state.c
index 1365f1c937d..c991aa312a5 100644
--- a/src/gallium/frontends/nine/nine_state.c
+++ b/src/gallium/frontends/nine/nine_state.c
@@ -163,8 +163,7 @@ nine_csmt_create( struct NineDevice9 *This )
ctx->device = This;
- ctx->worker = u_thread_create(nine_csmt_worker, ctx);
- if (!ctx->worker) {
+ if (thrd_success != u_thread_create(&ctx->worker, nine_csmt_worker, ctx)) {
nine_queue_delete(ctx->pool);
FREE(ctx);
return NULL;
diff --git a/src/gallium/tests/unit/pipe_barrier_test.c b/src/gallium/tests/unit/pipe_barrier_test.c
index 838c9bfd96c..e987b957bd0 100644
--- a/src/gallium/tests/unit/pipe_barrier_test.c
+++ b/src/gallium/tests/unit/pipe_barrier_test.c
@@ -114,7 +114,7 @@ int main(int argc, char *argv[])
for (i = 0; i < NUM_THREADS; i++) {
thread_ids[i] = i;
- threads[i] = u_thread_create(thread_function, (void *) &thread_ids[i]);
+ u_thread_create(threads + i, thread_function, (void *) &thread_ids[i]);
}
for (i = 0; i < NUM_THREADS; i++ ) {
diff --git a/src/util/u_queue.c b/src/util/u_queue.c
index 1a73ffdbd5b..df2f2da4a95 100644
--- a/src/util/u_queue.c
+++ b/src/util/u_queue.c
@@ -337,9 +337,7 @@ util_queue_create_thread(struct util_queue *queue, unsigned index)
input->queue = queue;
input->thread_index = index;
- queue->threads[index] = u_thread_create(util_queue_thread_func, input);
-
- if (!queue->threads[index]) {
+ if (thrd_success != u_thread_create(queue->threads + index, util_queue_thread_func, input)) {
free(input);
return false;
}
diff --git a/src/util/u_thread.h b/src/util/u_thread.h
index 15fadfae19a..6221720123d 100644
--- a/src/util/u_thread.h
+++ b/src/util/u_thread.h
@@ -101,26 +101,22 @@ util_get_current_cpu(void)
#endif
}
-static inline thrd_t u_thread_create(int (*routine)(void *), void *param)
+static inline int u_thread_create(thrd_t *thrd, int (*routine)(void *), void *param)
{
- thrd_t thread;
+ int ret = thrd_error;
#ifdef HAVE_PTHREAD
sigset_t saved_set, new_set;
- int ret;
sigfillset(&new_set);
sigdelset(&new_set, SIGSYS);
pthread_sigmask(SIG_BLOCK, &new_set, &saved_set);
- ret = thrd_create( &thread, routine, param );
+ ret = thrd_create(thrd, routine, param);
pthread_sigmask(SIG_SETMASK, &saved_set, NULL);
#else
- int ret;
- ret = thrd_create( &thread, routine, param );
+ ret = thrd_create(thrd, routine, param);
#endif
- if (ret)
- return 0;
- return thread;
+ return ret;
}
static inline void u_thread_setname( const char *name )
More information about the mesa-commit
mailing list