[Mesa-dev] [PATCH 4/4] Shorten u_queue names

Timothy Arceri tarceri at itsqueeze.com
Wed Jul 4 00:05:54 UTC 2018


Series:

Reviewed-by: Timothy Arceri <tarceri at itsqueeze.com>

On 04/07/18 09:16, Marek Olšák wrote:
> From: Marek Olšák <marek.olsak at amd.com>
> 
> There is a 15-character limit for thread names shared by the queue name
> and process name. Shorten the thread name to make space for the process
> name.
> ---
>   src/gallium/auxiliary/util/u_threaded_context.c   | 2 +-
>   src/gallium/drivers/radeonsi/si_pipe.c            | 4 ++--
>   src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c     | 2 +-
>   src/gallium/winsys/radeon/drm/radeon_drm_winsys.c | 2 +-
>   src/mesa/main/glthread.c                          | 2 +-
>   src/util/disk_cache.c                             | 2 +-
>   6 files changed, 7 insertions(+), 7 deletions(-)
> 
> diff --git a/src/gallium/auxiliary/util/u_threaded_context.c b/src/gallium/auxiliary/util/u_threaded_context.c
> index 1c647a3efd0..28d0f77ebaa 100644
> --- a/src/gallium/auxiliary/util/u_threaded_context.c
> +++ b/src/gallium/auxiliary/util/u_threaded_context.c
> @@ -2559,21 +2559,21 @@ threaded_context_create(struct pipe_context *pipe,
>      else
>         tc->base.const_uploader = u_upload_clone(&tc->base, pipe->const_uploader);
>   
>      if (!tc->base.stream_uploader || !tc->base.const_uploader)
>         goto fail;
>   
>      /* The queue size is the number of batches "waiting". Batches are removed
>       * from the queue before being executed, so keep one tc_batch slot for that
>       * execution. Also, keep one unused slot for an unflushed batch.
>       */
> -   if (!util_queue_init(&tc->queue, "gallium_drv", TC_MAX_BATCHES - 2, 1, 0))
> +   if (!util_queue_init(&tc->queue, "gdrv", TC_MAX_BATCHES - 2, 1, 0))
>         goto fail;
>   
>      for (unsigned i = 0; i < TC_MAX_BATCHES; i++) {
>         tc->batch_slots[i].sentinel = TC_SENTINEL;
>         tc->batch_slots[i].pipe = pipe;
>         util_queue_fence_init(&tc->batch_slots[i].fence);
>      }
>   
>      LIST_INITHEAD(&tc->unflushed_queries);
>   
> diff --git a/src/gallium/drivers/radeonsi/si_pipe.c b/src/gallium/drivers/radeonsi/si_pipe.c
> index 5da8a4b9873..4376b9223a5 100644
> --- a/src/gallium/drivers/radeonsi/si_pipe.c
> +++ b/src/gallium/drivers/radeonsi/si_pipe.c
> @@ -893,30 +893,30 @@ struct pipe_screen *radeonsi_screen_create(struct radeon_winsys *ws,
>   	} else {
>   		num_comp_hi_threads = 1;
>   		num_comp_lo_threads = 1;
>   	}
>   
>   	num_comp_hi_threads = MIN2(num_comp_hi_threads,
>   				   ARRAY_SIZE(sscreen->compiler));
>   	num_comp_lo_threads = MIN2(num_comp_lo_threads,
>   				   ARRAY_SIZE(sscreen->compiler_lowp));
>   
> -	if (!util_queue_init(&sscreen->shader_compiler_queue, "si_shader",
> +	if (!util_queue_init(&sscreen->shader_compiler_queue, "sh",
>   			     64, num_comp_hi_threads,
>   			     UTIL_QUEUE_INIT_RESIZE_IF_FULL)) {
>   		si_destroy_shader_cache(sscreen);
>   		FREE(sscreen);
>   		return NULL;
>   	}
>   
>   	if (!util_queue_init(&sscreen->shader_compiler_queue_low_priority,
> -			     "si_shader_low",
> +			     "shlo",
>   			     64, num_comp_lo_threads,
>   			     UTIL_QUEUE_INIT_RESIZE_IF_FULL |
>   			     UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY)) {
>   	       si_destroy_shader_cache(sscreen);
>   	       FREE(sscreen);
>   	       return NULL;
>   	}
>   
>   	si_handle_env_var_force_family(sscreen);
>   
> diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c b/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c
> index d60b3640f61..cca6a3cc25b 100644
> --- a/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c
> +++ b/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c
> @@ -313,21 +313,21 @@ amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
>      ws->base.get_chip_name = amdgpu_get_chip_name;
>   
>      amdgpu_bo_init_functions(ws);
>      amdgpu_cs_init_functions(ws);
>      amdgpu_surface_init_functions(ws);
>   
>      LIST_INITHEAD(&ws->global_bo_list);
>      (void) simple_mtx_init(&ws->global_bo_list_lock, mtx_plain);
>      (void) simple_mtx_init(&ws->bo_fence_lock, mtx_plain);
>   
> -   if (!util_queue_init(&ws->cs_queue, "amdgpu_cs", 8, 1,
> +   if (!util_queue_init(&ws->cs_queue, "cs", 8, 1,
>                           UTIL_QUEUE_INIT_RESIZE_IF_FULL)) {
>         amdgpu_winsys_destroy(&ws->base);
>         simple_mtx_unlock(&dev_tab_mutex);
>         return NULL;
>      }
>   
>      /* Create the screen at the end. The winsys must be initialized
>       * completely.
>       *
>       * Alternatively, we could create the screen based on "ws->gen"
> diff --git a/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c b/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c
> index c02f596f637..491e8e159f4 100644
> --- a/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c
> +++ b/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c
> @@ -900,21 +900,21 @@ radeon_drm_winsys_create(int fd, const struct pipe_screen_config *config,
>        */
>       if (ws->info.drm_minor >= 41) {
>           ws->vm64.start = 1ull << 32;
>           ws->vm64.end = 1ull << 33;
>       }
>   
>       /* TTM aligns the BO size to the CPU page size */
>       ws->info.gart_page_size = sysconf(_SC_PAGESIZE);
>   
>       if (ws->num_cpus > 1 && debug_get_option_thread())
> -        util_queue_init(&ws->cs_queue, "radeon_cs", 8, 1, 0);
> +        util_queue_init(&ws->cs_queue, "rcs", 8, 1, 0);
>   
>       /* Create the screen at the end. The winsys must be initialized
>        * completely.
>        *
>        * Alternatively, we could create the screen based on "ws->gen"
>        * and link all drivers into one binary blob. */
>       ws->base.screen = screen_create(&ws->base, config);
>       if (!ws->base.screen) {
>           radeon_winsys_destroy(&ws->base);
>           mtx_unlock(&fd_tab_mutex);
> diff --git a/src/mesa/main/glthread.c b/src/mesa/main/glthread.c
> index c71c03778aa..18a83bb9be4 100644
> --- a/src/mesa/main/glthread.c
> +++ b/src/mesa/main/glthread.c
> @@ -66,21 +66,21 @@ glthread_thread_initialization(void *job, int thread_index)
>   }
>   
>   void
>   _mesa_glthread_init(struct gl_context *ctx)
>   {
>      struct glthread_state *glthread = calloc(1, sizeof(*glthread));
>   
>      if (!glthread)
>         return;
>   
> -   if (!util_queue_init(&glthread->queue, "glthread", MARSHAL_MAX_BATCHES - 2,
> +   if (!util_queue_init(&glthread->queue, "gl", MARSHAL_MAX_BATCHES - 2,
>                           1, 0)) {
>         free(glthread);
>         return;
>      }
>   
>      ctx->MarshalExec = _mesa_create_marshal_table(ctx);
>      if (!ctx->MarshalExec) {
>         util_queue_destroy(&glthread->queue);
>         free(glthread);
>         return;
> diff --git a/src/util/disk_cache.c b/src/util/disk_cache.c
> index 4a762eff20e..87ddfb86b27 100644
> --- a/src/util/disk_cache.c
> +++ b/src/util/disk_cache.c
> @@ -369,21 +369,21 @@ disk_cache_create(const char *gpu_name, const char *timestamp,
>      }
>   
>      cache->max_size = max_size;
>   
>      /* 1 thread was chosen because we don't really care about getting things
>       * to disk quickly just that it's not blocking other tasks.
>       *
>       * The queue will resize automatically when it's full, so adding new jobs
>       * doesn't stall.
>       */
> -   util_queue_init(&cache->cache_queue, "disk_cache", 32, 1,
> +   util_queue_init(&cache->cache_queue, "disk$", 32, 1,
>                      UTIL_QUEUE_INIT_RESIZE_IF_FULL |
>                      UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY);
>   
>      cache->path_init_failed = false;
>   
>    path_fail:
>   
>      cache->driver_keys_blob_size = cv_size;
>   
>      /* Create driver id keys */
> 


More information about the mesa-dev mailing list