[Mesa-dev] [PATCH v2 5/7] disk cache: initialize cache path and index only when used

Jordan Justen jordan.l.justen at intel.com
Wed Jan 31 09:07:20 UTC 2018


On 2018-01-30 23:17:04, Tapani Pälli wrote:
> This patch makes disk_cache initialize path and index lazily so
> that we can utilize disk_cache without a path using callback
> functionality introduced by next patch.
> 
> v2: unmap mmap and destroy queue only if index_mmap exists
> 
> Signed-off-by: Tapani Pälli <tapani.palli at intel.com>
> ---
>  src/util/disk_cache.c | 127 +++++++++++++++++++++++++++++++-------------------
>  1 file changed, 78 insertions(+), 49 deletions(-)
> 
> diff --git a/src/util/disk_cache.c b/src/util/disk_cache.c
> index 2884d3c9c1..9fa264440b 100644
> --- a/src/util/disk_cache.c
> +++ b/src/util/disk_cache.c
> @@ -77,6 +77,7 @@
>  struct disk_cache {
>     /* The path to the cache directory. */
>     char *path;
> +   bool path_init_failed;
>  
>     /* Thread queue for compressing and writing cache entries to disk */
>     struct util_queue cache_queue;
> @@ -178,37 +179,20 @@ concatenate_and_mkdir(void *ctx, const char *path, const char *name)
>        return NULL;
>  }
>  
> -#define DRV_KEY_CPY(_dst, _src, _src_size) \
> -do {                                       \
> -   memcpy(_dst, _src, _src_size);          \
> -   _dst += _src_size;                      \
> -} while (0);
> -
> -struct disk_cache *
> -disk_cache_create(const char *gpu_name, const char *timestamp,
> -                  uint64_t driver_flags)
> +static bool
> +disk_cache_path_init(struct disk_cache *cache)
>  {
> -   void *local;
> -   struct disk_cache *cache = NULL;
> -   char *path, *max_size_str;
> -   uint64_t max_size;
> +   void *local = NULL;
> +   char *path;
>     int fd = -1;
>     struct stat sb;
>     size_t size;
>  
> -   /* If running as a users other than the real user disable cache */
> -   if (geteuid() != getuid())
> -      return NULL;
> -
>     /* A ralloc context for transient data during this invocation. */
>     local = ralloc_context(NULL);
>     if (local == NULL)
>        goto fail;
>  
> -   /* At user request, disable shader cache entirely. */
> -   if (env_var_as_boolean("MESA_GLSL_CACHE_DISABLE", false))
> -      goto fail;
> -
>     /* Determine path for cache based on the first defined name as follows:
>      *
>      *   $MESA_GLSL_CACHE_DIR
> @@ -273,10 +257,6 @@ disk_cache_create(const char *gpu_name, const char *timestamp,
>           goto fail;
>     }
>  
> -   cache = ralloc(NULL, struct disk_cache);
> -   if (cache == NULL)
> -      goto fail;
> -
>     cache->path = ralloc_strdup(cache, path);
>     if (cache->path == NULL)
>        goto fail;
> @@ -325,6 +305,58 @@ disk_cache_create(const char *gpu_name, const char *timestamp,
>     cache->size = (uint64_t *) cache->index_mmap;
>     cache->stored_keys = cache->index_mmap + sizeof(uint64_t);
>  
> +   /* 1 thread was chosen because we don't really care about getting things
> +    * to disk quickly just that it's not blocking other tasks.
> +    *
> +    * The queue will resize automatically when it's full, so adding new jobs
> +    * doesn't stall.
> +    */
> +   util_queue_init(&cache->cache_queue, "disk_cache", 32, 1,
> +                   UTIL_QUEUE_INIT_RESIZE_IF_FULL |
> +                   UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY);
> +
> +   ralloc_free(local);
> +
> +   return true;
> +
> + fail:
> +   if (fd != -1)
> +      close(fd);
> +
> +   if (local)
> +      ralloc_free(local);
> +
> +   cache->path_init_failed = true;
> +
> +   return false;
> +}
> +
> +#define DRV_KEY_CPY(_dst, _src, _src_size) \
> +do {                                       \
> +   memcpy(_dst, _src, _src_size);          \
> +   _dst += _src_size;                      \
> +} while (0);
> +
> +struct disk_cache *
> +disk_cache_create(const char *gpu_name, const char *timestamp,
> +                  uint64_t driver_flags)
> +{
> +   struct disk_cache *cache = NULL;
> +   char *max_size_str;
> +   uint64_t max_size;
> +
> +   /* If running as a users other than the real user disable cache */
> +   if (geteuid() != getuid())
> +      return NULL;
> +
> +   /* At user request, disable shader cache entirely. */
> +   if (env_var_as_boolean("MESA_GLSL_CACHE_DISABLE", false))
> +      return NULL;
> +
> +   cache = rzalloc(NULL, struct disk_cache);
> +   if (cache == NULL)
> +      return NULL;
> +
>     max_size = 0;
>  
>     max_size_str = getenv("MESA_GLSL_CACHE_MAX_SIZE");
> @@ -360,16 +392,6 @@ disk_cache_create(const char *gpu_name, const char *timestamp,
>  
>     cache->max_size = max_size;
>  
> -   /* 1 thread was chosen because we don't really care about getting things
> -    * to disk quickly just that it's not blocking other tasks.
> -    *
> -    * The queue will resize automatically when it's full, so adding new jobs
> -    * doesn't stall.
> -    */
> -   util_queue_init(&cache->cache_queue, "disk_cache", 32, 1,
> -                   UTIL_QUEUE_INIT_RESIZE_IF_FULL |
> -                   UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY);
> -
>     uint8_t cache_version = CACHE_VERSION;
>     size_t cv_size = sizeof(cache_version);
>     cache->driver_keys_blob_size = cv_size;
> @@ -392,8 +414,10 @@ disk_cache_create(const char *gpu_name, const char *timestamp,
>  
>     cache->driver_keys_blob =
>        ralloc_size(cache, cache->driver_keys_blob_size);
> -   if (!cache->driver_keys_blob)
> -      goto fail;
> +   if (!cache->driver_keys_blob) {
> +      ralloc_free(cache);
> +      return NULL;
> +   }
>  
>     uint8_t *drv_key_blob = cache->driver_keys_blob;
>     DRV_KEY_CPY(drv_key_blob, &cache_version, cv_size)
> @@ -405,24 +429,13 @@ disk_cache_create(const char *gpu_name, const char *timestamp,
>     /* Seed our rand function */
>     s_rand_xorshift128plus(cache->seed_xorshift128plus, true);
>  
> -   ralloc_free(local);
> -
>     return cache;
> -
> - fail:
> -   if (fd != -1)
> -      close(fd);
> -   if (cache)
> -      ralloc_free(cache);
> -   ralloc_free(local);
> -
> -   return NULL;
>  }
>  
>  void
>  disk_cache_destroy(struct disk_cache *cache)
>  {
> -   if (cache) {
> +   if (cache && cache->index_mmap) {
>        util_queue_destroy(&cache->cache_queue);
>        munmap(cache->index_mmap, cache->index_mmap_size);
>     }
> @@ -441,6 +454,9 @@ get_cache_file(struct disk_cache *cache, const cache_key key)
>     char buf[41];
>     char *filename;
>  
> +   if (!cache->path)
> +      return NULL;
> +
>     _mesa_sha1_format(buf, key);
>     if (asprintf(&filename, "%s/%c%c/%s", cache->path, buf[0],
>                  buf[1], buf + 2) == -1)
> @@ -999,6 +1015,11 @@ disk_cache_put(struct disk_cache *cache, const cache_key key,
>     struct disk_cache_put_job *dc_job =
>        create_put_job(cache, key, data, size, cache_item_metadata);
>  
> +   /* Initialize path if not initialized yet. */
> +   if (cache->path_init_failed ||
> +       (!cache->path && !disk_cache_path_init(cache)))
> +      return;

We could move a bit into the function and have something like:

   if (!ensure_disk_cache_path(cache)))
      return;

It would remove a little duplication, but not much.

Patches 1 - 5:
Reviewed-by: Jordan Justen <jordan.l.justen at intel.com>

> +
>     if (dc_job) {
>        util_queue_fence_init(&dc_job->fence);
>        util_queue_add_job(&cache->cache_queue, dc_job, &dc_job->fence,
> @@ -1173,6 +1194,9 @@ disk_cache_put_key(struct disk_cache *cache, const cache_key key)
>     int i = CPU_TO_LE32(*key_chunk) & CACHE_INDEX_KEY_MASK;
>     unsigned char *entry;
>  
> +   if (!cache->path)
> +      return;
> +
>     entry = &cache->stored_keys[i * CACHE_KEY_SIZE];
>  
>     memcpy(entry, key, CACHE_KEY_SIZE);
> @@ -1192,6 +1216,11 @@ disk_cache_has_key(struct disk_cache *cache, const cache_key key)
>     int i = CPU_TO_LE32(*key_chunk) & CACHE_INDEX_KEY_MASK;
>     unsigned char *entry;
>  
> +   /* Initialize path if not initialized yet. */
> +   if (cache->path_init_failed ||
> +       (!cache->path && !disk_cache_path_init(cache)))
> +      return false;
> +
>     entry = &cache->stored_keys[i * CACHE_KEY_SIZE];
>  
>     return memcmp(entry, key, CACHE_KEY_SIZE) == 0;
> -- 
> 2.13.6
> 


More information about the mesa-dev mailing list