[Mesa-dev] [PATCH 1/6] gallium/u_queue: use a ring instead of a stack

Marek Olšák maraeo at gmail.com
Tue Jun 21 14:44:47 UTC 2016


On Tue, Jun 21, 2016 at 4:31 PM, Nicolai Hähnle <nhaehnle at gmail.com> wrote:
> On 21.06.2016 14:17, Marek Olšák wrote:
>>
>> From: Marek Olšák <marek.olsak at amd.com>
>>
>> and allow specifying its size in util_queue_init.
>> ---
>>   src/gallium/auxiliary/util/u_queue.c              | 55
>> ++++++++++++++++-------
>>   src/gallium/auxiliary/util/u_queue.h              |  8 ++--
>>   src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c     |  2 +-
>>   src/gallium/winsys/radeon/drm/radeon_drm_winsys.c |  2 +-
>>   4 files changed, 47 insertions(+), 20 deletions(-)
>>
>> diff --git a/src/gallium/auxiliary/util/u_queue.c
>> b/src/gallium/auxiliary/util/u_queue.c
>> index 8e58414..2372c07 100644
>> --- a/src/gallium/auxiliary/util/u_queue.c
>> +++ b/src/gallium/auxiliary/util/u_queue.c
>> @@ -29,7 +29,6 @@
>>   static PIPE_THREAD_ROUTINE(util_queue_thread_func, param)
>>   {
>>      struct util_queue *queue = (struct util_queue*)param;
>> -   unsigned i;
>>
>>      while (1) {
>>         struct util_queue_job job;
>> @@ -39,10 +38,9 @@ static PIPE_THREAD_ROUTINE(util_queue_thread_func,
>> param)
>>            break;
>>
>>         pipe_mutex_lock(queue->lock);
>> -      job = queue->jobs[0];
>> -      for (i = 1; i < queue->num_jobs; i++)
>> -         queue->jobs[i - 1] = queue->jobs[i];
>> -      queue->jobs[--queue->num_jobs].job = NULL;
>> +      job = queue->jobs[queue->read_idx];
>> +      queue->jobs[queue->read_idx].job = NULL;
>> +      queue->read_idx = (queue->read_idx + 1) % queue->max_jobs;
>>         pipe_mutex_unlock(queue->lock);
>>
>>         pipe_semaphore_signal(&queue->has_space);
>> @@ -55,25 +53,49 @@ static PIPE_THREAD_ROUTINE(util_queue_thread_func,
>> param)
>>
>>      /* signal remaining jobs before terminating */
>>      pipe_mutex_lock(queue->lock);
>> -   for (i = 0; i < queue->num_jobs; i++) {
>> -      pipe_semaphore_signal(&queue->jobs[i].fence->done);
>> -      queue->jobs[i].job = NULL;
>> +   while (queue->jobs[queue->read_idx].job) {
>> +      pipe_semaphore_signal(&queue->jobs[queue->read_idx].fence->done);
>> +      queue->jobs[queue->read_idx].job = NULL;
>> +      queue->read_idx = (queue->read_idx + 1) % queue->max_jobs;
>>      }
>> -   queue->num_jobs = 0;
>>      pipe_mutex_unlock(queue->lock);
>>      return 0;
>>   }
>>
>> -void
>> +bool
>>   util_queue_init(struct util_queue *queue,
>> +                unsigned max_jobs,
>>                   void (*execute_job)(void *))
>>   {
>>      memset(queue, 0, sizeof(*queue));
>> +   queue->max_jobs = max_jobs;
>> +
>> +   queue->jobs = (struct util_queue_job*)
>> +                 calloc(max_jobs, sizeof(struct util_queue_job));
>
>
> Maybe CALLOC (and FREE twice below)? Not sure how strict we have to be about
> that...

CALLOC is different if:
- PIPE_SUBSYSTEM_EMBEDDED is set. It disables multithreading in
llvmpipe, which suggests it doesn't support multithreaded
environments, which rules out any use of u_queue.
- PIPE_OS_WINDOWS && DEBUG are set. Some memory leak debugging. Not
useful for us since we have valgrind.

Marek


More information about the mesa-dev mailing list