[Mesa-dev] [PATCH 2/4] gallium/util: replace pipe_thread_wait() with thrd_join()

Timothy Arceri tarceri at itsqueeze.com
Mon Mar 6 00:58:27 UTC 2017


Replace done using:
find ./src -type f -exec sed -i -- \
's:pipe_thread_wait(\([^)]*\)):thrd_join(\1, NULL):g' {} \;
---
 src/gallium/auxiliary/os/os_thread.h         | 5 -----
 src/gallium/auxiliary/util/u_queue.c         | 2 +-
 src/gallium/drivers/ddebug/dd_context.c      | 2 +-
 src/gallium/drivers/llvmpipe/lp_rast.c       | 2 +-
 src/gallium/drivers/radeon/r600_gpu_load.c   | 2 +-
 src/gallium/drivers/rbug/rbug_core.c         | 2 +-
 src/gallium/state_trackers/nine/nine_state.c | 2 +-
 src/gallium/tests/unit/pipe_barrier_test.c   | 2 +-
 8 files changed, 7 insertions(+), 12 deletions(-)

diff --git a/src/gallium/auxiliary/os/os_thread.h b/src/gallium/auxiliary/os/os_thread.h
index bb767fa..2aad3e2 100644
--- a/src/gallium/auxiliary/os/os_thread.h
+++ b/src/gallium/auxiliary/os/os_thread.h
@@ -61,25 +61,20 @@ static inline thrd_t pipe_thread_create(int (*routine)(void *), void *param)
 #else
    int ret;
    ret = thrd_create( &thread, routine, param );
 #endif
    if (ret)
       return 0;
 
    return thread;
 }
 
-static inline int pipe_thread_wait( thrd_t thread )
-{
-   return thrd_join( thread, NULL );
-}
-
 static inline int pipe_thread_destroy( thrd_t thread )
 {
    return thrd_detach( thread );
 }
 
 static inline void pipe_thread_setname( const char *name )
 {
 #if defined(HAVE_PTHREAD)
 #  if defined(__GNU_LIBRARY__) && defined(__GLIBC__) && defined(__GLIBC_MINOR__) && \
       (__GLIBC__ >= 3 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 12))
diff --git a/src/gallium/auxiliary/util/u_queue.c b/src/gallium/auxiliary/util/u_queue.c
index a3aed29..9565c53 100644
--- a/src/gallium/auxiliary/util/u_queue.c
+++ b/src/gallium/auxiliary/util/u_queue.c
@@ -265,21 +265,21 @@ util_queue_killall_and_wait(struct util_queue *queue)
 {
    unsigned i;
 
    /* Signal all threads to terminate. */
    mtx_lock(&queue->lock);
    queue->kill_threads = 1;
    cnd_broadcast(&queue->has_queued_cond);
    mtx_unlock(&queue->lock);
 
    for (i = 0; i < queue->num_threads; i++)
-      pipe_thread_wait(queue->threads[i]);
+      thrd_join(queue->threads[i], NULL);
    queue->num_threads = 0;
 }
 
 void
 util_queue_destroy(struct util_queue *queue)
 {
    util_queue_killall_and_wait(queue);
    remove_from_atexit_list(queue);
 
    cnd_destroy(&queue->has_space_cond);
diff --git a/src/gallium/drivers/ddebug/dd_context.c b/src/gallium/drivers/ddebug/dd_context.c
index a52975d..eae128a 100644
--- a/src/gallium/drivers/ddebug/dd_context.c
+++ b/src/gallium/drivers/ddebug/dd_context.c
@@ -590,21 +590,21 @@ dd_context_set_stream_output_targets(struct pipe_context *_pipe,
 static void
 dd_context_destroy(struct pipe_context *_pipe)
 {
    struct dd_context *dctx = dd_context(_pipe);
    struct pipe_context *pipe = dctx->pipe;
 
    if (dctx->thread) {
       mtx_lock(&dctx->mutex);
       dctx->kill_thread = 1;
       mtx_unlock(&dctx->mutex);
-      pipe_thread_wait(dctx->thread);
+      thrd_join(dctx->thread, NULL);
       mtx_destroy(&dctx->mutex);
       assert(!dctx->records);
    }
 
    if (dctx->fence) {
       pipe->transfer_unmap(pipe, dctx->fence_transfer);
       pipe_resource_reference(&dctx->fence, NULL);
    }
    pipe->destroy(pipe);
    FREE(dctx);
diff --git a/src/gallium/drivers/llvmpipe/lp_rast.c b/src/gallium/drivers/llvmpipe/lp_rast.c
index 2f222d0..678ef0b 100644
--- a/src/gallium/drivers/llvmpipe/lp_rast.c
+++ b/src/gallium/drivers/llvmpipe/lp_rast.c
@@ -949,21 +949,21 @@ void lp_rast_destroy( struct lp_rasterizer *rast )
       pipe_semaphore_signal(&rast->tasks[i].work_ready);
    }
 
    /* Wait for threads to terminate before cleaning up per-thread data.
     * We don't actually call pipe_thread_wait to avoid dead lock on Windows
     * per https://bugs.freedesktop.org/show_bug.cgi?id=76252 */
    for (i = 0; i < rast->num_threads; i++) {
 #ifdef _WIN32
       pipe_semaphore_wait(&rast->tasks[i].work_done);
 #else
-      pipe_thread_wait(rast->threads[i]);
+      thrd_join(rast->threads[i], NULL);
 #endif
    }
 
    /* Clean up per-thread data */
    for (i = 0; i < rast->num_threads; i++) {
       pipe_semaphore_destroy(&rast->tasks[i].work_ready);
       pipe_semaphore_destroy(&rast->tasks[i].work_done);
    }
    for (i = 0; i < MAX2(1, rast->num_threads); i++) {
       align_free(rast->tasks[i].thread_data.cache);
diff --git a/src/gallium/drivers/radeon/r600_gpu_load.c b/src/gallium/drivers/radeon/r600_gpu_load.c
index 67a2f35..00d49aa 100644
--- a/src/gallium/drivers/radeon/r600_gpu_load.c
+++ b/src/gallium/drivers/radeon/r600_gpu_load.c
@@ -163,21 +163,21 @@ r600_gpu_load_thread(void *param)
 	p_atomic_dec(&rscreen->gpu_load_stop_thread);
 	return 0;
 }
 
 void r600_gpu_load_kill_thread(struct r600_common_screen *rscreen)
 {
 	if (!rscreen->gpu_load_thread)
 		return;
 
 	p_atomic_inc(&rscreen->gpu_load_stop_thread);
-	pipe_thread_wait(rscreen->gpu_load_thread);
+	thrd_join(rscreen->gpu_load_thread, NULL);
 	rscreen->gpu_load_thread = 0;
 }
 
 static uint64_t r600_read_mmio_counter(struct r600_common_screen *rscreen,
 				       unsigned busy_index)
 {
 	/* Start the thread if needed. */
 	if (!rscreen->gpu_load_thread) {
 		mtx_lock(&rscreen->gpu_load_mutex);
 		/* Check again inside the mutex. */
diff --git a/src/gallium/drivers/rbug/rbug_core.c b/src/gallium/drivers/rbug/rbug_core.c
index 5752e5e..d09c4b5 100644
--- a/src/gallium/drivers/rbug/rbug_core.c
+++ b/src/gallium/drivers/rbug/rbug_core.c
@@ -862,21 +862,21 @@ rbug_start(struct rbug_screen *rb_screen)
    return tr_rbug;
 }
 
 void
 rbug_stop(struct rbug_rbug *tr_rbug)
 {
    if (!tr_rbug)
       return;
 
    tr_rbug->running = false;
-   pipe_thread_wait(tr_rbug->thread);
+   thrd_join(tr_rbug->thread, NULL);
 
    FREE(tr_rbug);
 
    return;
 }
 
 void
 rbug_notify_draw_blocked(struct rbug_context *rb_context)
 {
    struct rbug_screen *rb_screen = rbug_screen(rb_context->base.screen);
diff --git a/src/gallium/state_trackers/nine/nine_state.c b/src/gallium/state_trackers/nine/nine_state.c
index 5ec3d34..47a715d 100644
--- a/src/gallium/state_trackers/nine/nine_state.c
+++ b/src/gallium/state_trackers/nine/nine_state.c
@@ -231,21 +231,21 @@ nine_csmt_destroy( struct NineDevice9 *device, struct csmt_context *ctx )
     /* Signal worker to terminate. */
     p_atomic_set(&ctx->terminate, TRUE);
     nine_queue_flush(ctx->pool);
 
     nine_csmt_wait_processed(ctx);
     nine_queue_delete(ctx->pool);
     mtx_destroy(&ctx->mutex_processed);
 
     FREE(ctx);
 
-    pipe_thread_wait(render_thread);
+    thrd_join(render_thread, NULL);
 }
 
 static void
 nine_csmt_pause( struct NineDevice9 *device )
 {
     struct csmt_context *ctx = device->csmt_ctx;
 
     if (!device->csmt_active)
         return;
 
diff --git a/src/gallium/tests/unit/pipe_barrier_test.c b/src/gallium/tests/unit/pipe_barrier_test.c
index 34a77b8..f77f1e1 100644
--- a/src/gallium/tests/unit/pipe_barrier_test.c
+++ b/src/gallium/tests/unit/pipe_barrier_test.c
@@ -110,21 +110,21 @@ int main(int argc, char *argv[])
    LOG("pipe_barrier_test starting\n");
 
    pipe_barrier_init(&barrier, NUM_THREADS);
 
    for (i = 0; i < NUM_THREADS; i++) {
       thread_ids[i] = i;
       threads[i] = pipe_thread_create(thread_function, (void *) &thread_ids[i]);
    }
 
    for (i = 0; i < NUM_THREADS; i++ ) {
-      pipe_thread_wait(threads[i]);
+      thrd_join(threads[i], NULL);
    }
 
    CHECK(p_atomic_read(&proceeded) == NUM_THREADS);
 
    pipe_barrier_destroy(&barrier);
 
    LOG("pipe_barrier_test exiting\n");
 
    return 0;
 }
-- 
2.9.3



More information about the mesa-dev mailing list