[Mesa-dev] [PATCH V2 3/3] util/disk_cache: write cache entry keys to file header

Nicolai Hähnle nhaehnle at gmail.com
Wed Mar 22 07:21:22 UTC 2017


On 22.03.2017 04:45, Timothy Arceri wrote:
> This can be used to deal with key hash collisions from different
> versions (should we find that to actually happen) and to find
> which mesa version produced the cache entry.
>
> V2: use blob created add cache creation.

Typo: at


> ---
>  src/util/disk_cache.c | 53 +++++++++++++++++++++++++++++++++++++++++++++------
>  1 file changed, 47 insertions(+), 6 deletions(-)
>
> diff --git a/src/util/disk_cache.c b/src/util/disk_cache.c
> index 8fb59be..01a9220 100644
> --- a/src/util/disk_cache.c
> +++ b/src/util/disk_cache.c
> @@ -755,20 +755,21 @@ struct cache_entry_file_data {
>  };
>
>  static void
>  cache_put(void *job, int thread_index)
>  {
>     assert(job);
>
>     int fd = -1, fd_final = -1, err, ret;
>     unsigned i = 0;
>     char *filename = NULL, *filename_tmp = NULL;
> +   uint8_t *key_blob = NULL;

This appears to be a left-over from earlier work.

Cheers,
Nicolai


>     struct disk_cache_put_job *dc_job = (struct disk_cache_put_job *) job;
>
>     filename = get_cache_file(dc_job->cache, dc_job->key);
>     if (filename == NULL)
>        goto done;
>
>     /* If the cache is too large, evict something else first. */
>     while (*dc_job->cache->size + dc_job->size > dc_job->cache->max_size &&
>            i < 8) {
>        evict_lru_item(dc_job->cache);
> @@ -813,23 +814,35 @@ cache_put(void *job, int thread_index)
>      */
>     fd_final = open(filename, O_RDONLY | O_CLOEXEC);
>     if (fd_final != -1) {
>        unlink(filename_tmp);
>        goto done;
>     }
>
>     /* OK, we're now on the hook to write out a file that we know is
>      * not in the cache, and is also not being written out to the cache
>      * by some other process.
> -    *
> -    * Create CRC of the data and store at the start of the file. We will
> -    * read this when restoring the cache and use it to check for corruption.
> +    */
> +
> +   /* Write the key_blob, this can be used find information about the
> +    * mesa version that produced the entry or deal with hash collisions,
> +    * should that ever become a real problem.
> +    */
> +   ret = write_all(fd, dc_job->cache->driver_keys_blob,
> +                   dc_job->cache->driver_keys_blob_size);
> +   if (ret == -1) {
> +      unlink(filename_tmp);
> +      goto done;
> +   }
> +
> +   /* Create CRC of the data. We will read this when restoring the cache and
> +    * use it to check for corruption.
>      */
>     struct cache_entry_file_data cf_data;
>     cf_data.crc32 = util_hash_crc32(dc_job->data, dc_job->size);
>     cf_data.uncompressed_size = dc_job->size;
>
>     size_t cf_data_size = sizeof(cf_data);
>     ret = write_all(fd, &cf_data, cf_data_size);
>     if (ret == -1) {
>        unlink(filename_tmp);
>        goto done;
> @@ -844,35 +857,37 @@ cache_put(void *job, int thread_index)
>     if (file_size == 0) {
>        unlink(filename_tmp);
>        goto done;
>     }
>     ret = rename(filename_tmp, filename);
>     if (ret == -1) {
>        unlink(filename_tmp);
>        goto done;
>     }
>
> -   file_size += cf_data_size;
> +   file_size += cf_data_size + dc_job->cache->driver_keys_blob_size;
>     p_atomic_add(dc_job->cache->size, file_size);
>
>   done:
>     if (fd_final != -1)
>        close(fd_final);
>     /* This close finally releases the flock, (now that the final file
>      * has been renamed into place and the size has been added).
>      */
>     if (fd != -1)
>        close(fd);
>     if (filename_tmp)
>        free(filename_tmp);
>     if (filename)
>        free(filename);
> +   if (key_blob)
> +      free(key_blob);
>  }
>
>  void
>  disk_cache_put(struct disk_cache *cache, const cache_key key,
>                 const void *data, size_t size)
>  {
>     struct disk_cache_put_job *dc_job =
>        create_put_job(cache, key, data, size);
>
>     if (dc_job) {
> @@ -941,32 +956,58 @@ disk_cache_get(struct disk_cache *cache, const cache_key key, size_t *size)
>     if (fd == -1)
>        goto fail;
>
>     if (fstat(fd, &sb) == -1)
>        goto fail;
>
>     data = malloc(sb.st_size);
>     if (data == NULL)
>        goto fail;
>
> +   size_t ck_size = cache->driver_keys_blob_size;
> +#ifndef NDEBUG
> +   uint8_t *file_header = malloc(ck_size);
> +   if (!file_header)
> +      goto fail;
> +
> +   assert(sb.st_size > ck_size);
> +   for (len = 0; len < ck_size; len += ret) {
> +      ret = read(fd, ((uint8_t *) file_header) + len, ck_size - len);
> +      if (ret == -1) {
> +         free(file_header);
> +         goto fail;
> +      }
> +   }
> +
> +   assert(memcmp(cache->driver_keys_blob, file_header, ck_size) == 0);
> +
> +   free(file_header);
> +#else
> +   /* The cache keys are currently just used for distributing precompiled
> +    * shaders, they are not used by Mesa so just skip them for now.
> +    */
> +   ret = lseek(fd, ck_size, SEEK_CUR);
> +   if (ret == -1)
> +      goto fail;
> +#endif
> +
>     /* Load the CRC that was created when the file was written. */
>     struct cache_entry_file_data cf_data;
>     size_t cf_data_size = sizeof(cf_data);
> -   assert(sb.st_size > cf_data_size);
>     for (len = 0; len < cf_data_size; len += ret) {
>        ret = read(fd, ((uint8_t *) &cf_data) + len, cf_data_size - len);
>        if (ret == -1)
>           goto fail;
>     }
>
>     /* Load the actual cache data. */
> -   size_t cache_data_size = sb.st_size - cf_data_size;
> +   size_t cache_data_size = sb.st_size - cf_data_size - ck_size;
>     for (len = 0; len < cache_data_size; len += ret) {
>        ret = read(fd, data + len, cache_data_size - len);
>        if (ret == -1)
>           goto fail;
>     }
>
>     /* Uncompress the cache data */
>     uncompressed_data = malloc(cf_data.uncompressed_size);
>     if (!inflate_cache_data(data, cache_data_size, uncompressed_data,
>                             cf_data.uncompressed_size))
>



More information about the mesa-dev mailing list