[Mesa-dev] [PATCH V2 3/3] util/disk_cache: use rand_xorshift128plus() to get our random int
Nicolai Hähnle
nhaehnle at gmail.com
Wed Mar 22 07:10:30 UTC 2017
On 22.03.2017 00:59, Timothy Arceri wrote:
> Otherwise for apps that don't seed the regular rand() we will always
> remove old cache entries from the same dirs.
>
> V2: assume bits returned by rand are independent uniformly distributed
> bits and grab our hex value without taking the modulus of the whole
> value, this also fixes a bug where 'f' was always missing.
For the series:
Reviewed-by: Nicolai Hähnle <nicolai.haehnle at amd.com>
> ---
> src/util/disk_cache.c | 18 ++++++++++--------
> 1 file changed, 10 insertions(+), 8 deletions(-)
>
> diff --git a/src/util/disk_cache.c b/src/util/disk_cache.c
> index f2d67c9..f8677e3 100644
> --- a/src/util/disk_cache.c
> +++ b/src/util/disk_cache.c
> @@ -34,20 +34,21 @@
> #include <sys/statvfs.h>
> #include <sys/mman.h>
> #include <unistd.h>
> #include <fcntl.h>
> #include <pwd.h>
> #include <errno.h>
> #include <dirent.h>
> #include "zlib.h"
>
> #include "util/crc32.h"
> +#include "util/rand_xor.h"
> #include "util/u_atomic.h"
> #include "util/u_queue.h"
> #include "util/mesa-sha1.h"
> #include "util/ralloc.h"
> #include "main/errors.h"
> #include "util/macros.h"
>
> #include "disk_cache.h"
>
> /* Number of bits to mask off from a cache key to get an index. */
> @@ -59,20 +60,23 @@
> /* The number of keys that can be stored in the index. */
> #define CACHE_INDEX_MAX_KEYS (1 << CACHE_INDEX_KEY_BITS)
>
> struct disk_cache {
> /* The path to the cache directory. */
> char *path;
>
> /* Thread queue for compressing and writing cache entries to disk */
> struct util_queue cache_queue;
>
> + /* Seed for rand, which is used to pick a random directory */
> + uint64_t seed_xorshift128plus[2];
> +
> /* A pointer to the mmapped index file within the cache directory. */
> uint8_t *index_mmap;
> size_t index_mmap_size;
>
> /* Pointer to total size of all objects in cache (within index_mmap) */
> uint64_t *size;
>
> /* Pointer to stored keys, (within index_mmap). */
> uint8_t *stored_keys;
>
> @@ -354,20 +358,23 @@ disk_cache_create(const char *gpu_name, const char *timestamp)
> cache->max_size = max_size;
>
> /* A limit of 32 jobs was choosen as observations of Deus Ex start-up times
> * showed that we reached at most 11 jobs on an Intel i5-6400 CPU at 2.70GHz
> * (a fairly modest desktop CPU). 1 thread was chosen because we don't
> * really care about getting things to disk quickly just that it's not
> * blocking other tasks.
> */
> util_queue_init(&cache->cache_queue, "disk_cache", 32, 1);
>
> + /* Seed our rand function */
> + s_rand_xorshift128plus(cache->seed_xorshift128plus, true);
> +
> ralloc_free(local);
>
> return cache;
>
> fail:
> if (fd != -1)
> close(fd);
> if (cache)
> ralloc_free(cache);
> ralloc_free(local);
> @@ -559,37 +566,32 @@ is_two_character_sub_directory(const char *path, const struct stat *sb,
> /* If dir only contains '.' and '..' it must be empty */
> if (subdir_entries <= 2)
> return false;
>
> return true;
> }
>
> static void
> evict_lru_item(struct disk_cache *cache)
> {
> - const char hex[] = "0123456789abcde";
> char *dir_path;
> - int a, b;
> - size_t size;
>
> /* With a reasonably-sized, full cache, (and with keys generated
> * from a cryptographic hash), we can choose two random hex digits
> * and reasonably expect the directory to exist with a file in it.
> * Provides pseudo-LRU eviction to reduce checking all cache files.
> */
> - a = rand() % 16;
> - b = rand() % 16;
> -
> - if (asprintf(&dir_path, "%s/%c%c", cache->path, hex[a], hex[b]) < 0)
> + uint64_t rand64 = rand_xorshift128plus(cache->seed_xorshift128plus);
> + if (asprintf(&dir_path, "%s/%02" PRIx64 , cache->path, rand64 & 0xff) < 0)
> return;
>
> - size = unlink_lru_file_from_directory(dir_path);
> + size_t size = unlink_lru_file_from_directory(dir_path);
>
> free(dir_path);
>
> if (size) {
> p_atomic_add(cache->size, - (uint64_t)size);
> return;
> }
>
> /* In the case where the random choice of directory didn't find
> * something, we choose the least recently accessed from the
>
More information about the mesa-dev
mailing list