[Mesa-dev] [PATCH 3/5] gallium/util: replace pipe_condvar_wait() with cnd_wait()
Timothy Arceri
tarceri at itsqueeze.com
Sat Mar 4 23:41:31 UTC 2017
pipe_condvar_wait() was made unnecessary with fd33a6bcd7f12.
---
src/gallium/auxiliary/os/os_thread.h | 7 ++-----
src/gallium/auxiliary/util/u_queue.c | 6 +++---
src/gallium/auxiliary/util/u_ringbuffer.c | 4 ++--
src/gallium/drivers/llvmpipe/lp_fence.c | 2 +-
src/gallium/drivers/rbug/rbug_context.c | 2 +-
src/gallium/state_trackers/nine/nine_queue.c | 4 ++--
src/gallium/state_trackers/nine/nine_state.c | 2 +-
7 files changed, 12 insertions(+), 15 deletions(-)
diff --git a/src/gallium/auxiliary/os/os_thread.h b/src/gallium/auxiliary/os/os_thread.h
index e230d06..6895f4e 100644
--- a/src/gallium/auxiliary/os/os_thread.h
+++ b/src/gallium/auxiliary/os/os_thread.h
@@ -141,23 +141,20 @@ __pipe_mutex_assert_locked(pipe_mutex *mutex)
assert(ret == thrd_busy);
if (ret == thrd_success)
mtx_unlock(mutex);
#endif
}
/* pipe_condvar
*/
typedef cnd_t pipe_condvar;
-#define pipe_condvar_wait(cond, mutex) \
- cnd_wait(&(cond), &(mutex))
-
#define pipe_condvar_signal(cond) \
cnd_signal(&(cond))
#define pipe_condvar_broadcast(cond) \
cnd_broadcast(&(cond))
/*
* pipe_barrier
*/
@@ -212,21 +209,21 @@ static inline void pipe_barrier_wait(pipe_barrier *barrier)
{
pipe_mutex_lock(barrier->mutex);
assert(barrier->waiters < barrier->count);
barrier->waiters++;
if (barrier->waiters < barrier->count) {
uint64_t sequence = barrier->sequence;
do {
- pipe_condvar_wait(barrier->condvar, barrier->mutex);
+ cnd_wait(&barrier->condvar, &barrier->mutex);
} while (sequence == barrier->sequence);
} else {
barrier->waiters = 0;
barrier->sequence++;
pipe_condvar_broadcast(barrier->condvar);
}
pipe_mutex_unlock(barrier->mutex);
}
@@ -270,21 +267,21 @@ pipe_semaphore_signal(pipe_semaphore *sema)
pipe_condvar_signal(sema->cond);
pipe_mutex_unlock(sema->mutex);
}
/** Wait for semaphore counter to be greater than zero */
static inline void
pipe_semaphore_wait(pipe_semaphore *sema)
{
pipe_mutex_lock(sema->mutex);
while (sema->counter <= 0) {
- pipe_condvar_wait(sema->cond, sema->mutex);
+ cnd_wait(&sema->cond, &sema->mutex);
}
sema->counter--;
pipe_mutex_unlock(sema->mutex);
}
/*
* Thread-specific data.
*/
diff --git a/src/gallium/auxiliary/util/u_queue.c b/src/gallium/auxiliary/util/u_queue.c
index 87f0120..8fc2f3b 100644
--- a/src/gallium/auxiliary/util/u_queue.c
+++ b/src/gallium/auxiliary/util/u_queue.c
@@ -98,21 +98,21 @@ util_queue_fence_signal(struct util_queue_fence *fence)
fence->signalled = true;
pipe_condvar_broadcast(fence->cond);
pipe_mutex_unlock(fence->mutex);
}
void
util_queue_fence_wait(struct util_queue_fence *fence)
{
pipe_mutex_lock(fence->mutex);
while (!fence->signalled)
- pipe_condvar_wait(fence->cond, fence->mutex);
+ cnd_wait(&fence->cond, &fence->mutex);
pipe_mutex_unlock(fence->mutex);
}
void
util_queue_fence_init(struct util_queue_fence *fence)
{
memset(fence, 0, sizeof(*fence));
pipe_mutex_init(fence->mutex);
cnd_init(&fence->cond);
fence->signalled = true;
@@ -149,21 +149,21 @@ static PIPE_THREAD_ROUTINE(util_queue_thread_func, input)
}
while (1) {
struct util_queue_job job;
pipe_mutex_lock(queue->lock);
assert(queue->num_queued >= 0 && queue->num_queued <= queue->max_jobs);
/* wait if the queue is empty */
while (!queue->kill_threads && queue->num_queued == 0)
- pipe_condvar_wait(queue->has_queued_cond, queue->lock);
+ cnd_wait(&queue->has_queued_cond, &queue->lock);
if (queue->kill_threads) {
pipe_mutex_unlock(queue->lock);
break;
}
job = queue->jobs[queue->read_idx];
memset(&queue->jobs[queue->read_idx], 0, sizeof(struct util_queue_job));
queue->read_idx = (queue->read_idx + 1) % queue->max_jobs;
@@ -298,21 +298,21 @@ util_queue_add_job(struct util_queue *queue,
struct util_queue_job *ptr;
assert(fence->signalled);
fence->signalled = false;
pipe_mutex_lock(queue->lock);
assert(queue->num_queued >= 0 && queue->num_queued <= queue->max_jobs);
/* if the queue is full, wait until there is space */
while (queue->num_queued == queue->max_jobs)
- pipe_condvar_wait(queue->has_space_cond, queue->lock);
+ cnd_wait(&queue->has_space_cond, &queue->lock);
ptr = &queue->jobs[queue->write_idx];
assert(ptr->job == NULL);
ptr->job = job;
ptr->fence = fence;
ptr->execute = execute;
ptr->cleanup = cleanup;
queue->write_idx = (queue->write_idx + 1) % queue->max_jobs;
queue->num_queued++;
diff --git a/src/gallium/auxiliary/util/u_ringbuffer.c b/src/gallium/auxiliary/util/u_ringbuffer.c
index 334be6a..adba9ea 100644
--- a/src/gallium/auxiliary/util/u_ringbuffer.c
+++ b/src/gallium/auxiliary/util/u_ringbuffer.c
@@ -78,21 +78,21 @@ void util_ringbuffer_enqueue( struct util_ringbuffer *ring,
*/
pipe_mutex_lock(ring->mutex);
/* make sure we don't request an impossible amount of space
*/
assert(packet->dwords <= ring->mask);
/* Wait for free space:
*/
while (util_ringbuffer_space(ring) < packet->dwords)
- pipe_condvar_wait(ring->change, ring->mutex);
+ cnd_wait(&ring->change, &ring->mutex);
/* Copy data to ring:
*/
for (i = 0; i < packet->dwords; i++) {
/* Copy all dwords of the packet. Note we're abusing the
* typesystem a little - we're being passed a pointer to
* something, but probably not an array of packet structs:
*/
ring->buf[ring->head] = packet[i];
@@ -116,21 +116,21 @@ enum pipe_error util_ringbuffer_dequeue( struct util_ringbuffer *ring,
int ret = PIPE_OK;
/* XXX: over-reliance on mutexes, etc:
*/
pipe_mutex_lock(ring->mutex);
/* Get next ring entry:
*/
if (wait) {
while (util_ringbuffer_empty(ring))
- pipe_condvar_wait(ring->change, ring->mutex);
+ cnd_wait(&ring->change, &ring->mutex);
}
else {
if (util_ringbuffer_empty(ring)) {
ret = PIPE_ERROR_OUT_OF_MEMORY;
goto out;
}
}
ring_packet = &ring->buf[ring->tail];
diff --git a/src/gallium/drivers/llvmpipe/lp_fence.c b/src/gallium/drivers/llvmpipe/lp_fence.c
index 3b35eb2..1a8e365 100644
--- a/src/gallium/drivers/llvmpipe/lp_fence.c
+++ b/src/gallium/drivers/llvmpipe/lp_fence.c
@@ -112,16 +112,16 @@ lp_fence_signalled(struct lp_fence *f)
void
lp_fence_wait(struct lp_fence *f)
{
if (LP_DEBUG & DEBUG_FENCE)
debug_printf("%s %d\n", __FUNCTION__, f->id);
pipe_mutex_lock(f->mutex);
assert(f->issued);
while (f->count < f->rank) {
- pipe_condvar_wait(f->signalled, f->mutex);
+ cnd_wait(&f->signalled, &f->mutex);
}
pipe_mutex_unlock(f->mutex);
}
diff --git a/src/gallium/drivers/rbug/rbug_context.c b/src/gallium/drivers/rbug/rbug_context.c
index 9634f88..e34278e 100644
--- a/src/gallium/drivers/rbug/rbug_context.c
+++ b/src/gallium/drivers/rbug/rbug_context.c
@@ -101,21 +101,21 @@ rbug_draw_block_locked(struct rbug_context *rb_pipe, int flag)
if (block)
rb_pipe->draw_blocked |= (flag | RBUG_BLOCK_RULE);
}
if (rb_pipe->draw_blocked)
rbug_notify_draw_blocked(rb_pipe);
/* wait for rbug to clear the blocked flag */
while (rb_pipe->draw_blocked & flag) {
rb_pipe->draw_blocked |= flag;
- pipe_condvar_wait(rb_pipe->draw_cond, rb_pipe->draw_mutex);
+ cnd_wait(&rb_pipe->draw_cond, &rb_pipe->draw_mutex);
}
}
static void
rbug_draw_vbo(struct pipe_context *_pipe, const struct pipe_draw_info *info)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
struct pipe_context *pipe = rb_pipe->pipe;
diff --git a/src/gallium/state_trackers/nine/nine_queue.c b/src/gallium/state_trackers/nine/nine_queue.c
index 004d581..fdfbdbb 100644
--- a/src/gallium/state_trackers/nine/nine_queue.c
+++ b/src/gallium/state_trackers/nine/nine_queue.c
@@ -82,21 +82,21 @@ struct nine_queue_pool {
void
nine_queue_wait_flush(struct nine_queue_pool* ctx)
{
struct nine_cmdbuf *cmdbuf = &ctx->pool[ctx->tail];
/* wait for cmdbuf full */
pipe_mutex_lock(ctx->mutex_push);
while (!cmdbuf->full)
{
DBG("waiting for full cmdbuf\n");
- pipe_condvar_wait(ctx->event_push, ctx->mutex_push);
+ cnd_wait(&ctx->event_push, &ctx->mutex_push);
}
DBG("got cmdbuf=%p\n", cmdbuf);
pipe_mutex_unlock(ctx->mutex_push);
cmdbuf->offset = 0;
ctx->cur_instr = 0;
}
/* Gets a pointer to the next memory slice.
* Does not block.
@@ -155,21 +155,21 @@ nine_queue_flush(struct nine_queue_pool* ctx)
ctx->head = (ctx->head + 1) & NINE_CMD_BUFS_MASK;
cmdbuf = &ctx->pool[ctx->head];
/* wait for queue empty */
pipe_mutex_lock(ctx->mutex_pop);
while (cmdbuf->full)
{
DBG("waiting for empty cmdbuf\n");
- pipe_condvar_wait(ctx->event_pop, ctx->mutex_pop);
+ cnd_wait(&ctx->event_pop, &ctx->mutex_pop);
}
DBG("got empty cmdbuf=%p\n", cmdbuf);
pipe_mutex_unlock(ctx->mutex_pop);
cmdbuf->offset = 0;
cmdbuf->num_instr = 0;
}
/* Gets a a pointer to slice of memory with size @space.
* Does block if queue is full.
* Returns NULL on @space > NINE_QUEUE_SIZE. */
diff --git a/src/gallium/state_trackers/nine/nine_state.c b/src/gallium/state_trackers/nine/nine_state.c
index e1a2ff0..0a2a0b9 100644
--- a/src/gallium/state_trackers/nine/nine_state.c
+++ b/src/gallium/state_trackers/nine/nine_state.c
@@ -74,21 +74,21 @@ struct csmt_context {
};
/* Wait for instruction to be processed.
* Caller has to ensure that only one thread waits at time.
*/
static void
nine_csmt_wait_processed(struct csmt_context *ctx)
{
pipe_mutex_lock(ctx->mutex_processed);
while (!p_atomic_read(&ctx->processed)) {
- pipe_condvar_wait(ctx->event_processed, ctx->mutex_processed);
+ cnd_wait(&ctx->event_processed, &ctx->mutex_processed);
}
pipe_mutex_unlock(ctx->mutex_processed);
}
/* CSMT worker thread */
static
PIPE_THREAD_ROUTINE(nine_csmt_worker, arg)
{
struct csmt_context *ctx = arg;
struct csmt_instruction *instr;
--
2.9.3
More information about the mesa-dev
mailing list