[Mesa-dev] [PATCH 14/18] anv/allocator: Add the capability to allocate blocks of different sizes
Jason Ekstrand
jason at jlekstrand.net
Wed Apr 26 14:35:13 UTC 2017
---
src/intel/vulkan/anv_allocator.c | 23 ++++++++++-------------
src/intel/vulkan/tests/block_pool_no_free.c | 7 ++++---
2 files changed, 14 insertions(+), 16 deletions(-)
diff --git a/src/intel/vulkan/anv_allocator.c b/src/intel/vulkan/anv_allocator.c
index fa7101b..e58734b 100644
--- a/src/intel/vulkan/anv_allocator.c
+++ b/src/intel/vulkan/anv_allocator.c
@@ -430,8 +430,7 @@ anv_block_pool_expand_range(struct anv_block_pool *pool,
* the pool and a 4K CPU page.
*/
static uint32_t
-anv_block_pool_grow(struct anv_block_pool *pool, struct anv_block_state *state,
- uint32_t block_size)
+anv_block_pool_grow(struct anv_block_pool *pool, struct anv_block_state *state)
{
uint32_t size;
VkResult result = VK_SUCCESS;
@@ -470,7 +469,7 @@ anv_block_pool_grow(struct anv_block_pool *pool, struct anv_block_state *state,
if (old_size == 0) {
/* This is the first allocation */
- size = MAX2(32 * block_size, PAGE_SIZE);
+ size = 32 * PAGE_SIZE;
} else {
size = old_size * 2;
}
@@ -498,10 +497,8 @@ anv_block_pool_grow(struct anv_block_pool *pool, struct anv_block_state *state,
*/
center_bo_offset = ((uint64_t)size * back_used) / total_used;
- /* Align down to a multiple of both the block size and page size */
- uint32_t granularity = MAX2(block_size, PAGE_SIZE);
- assert(util_is_power_of_two(granularity));
- center_bo_offset &= ~(granularity - 1);
+ /* Align down to a multiple of both the page size */
+ center_bo_offset &= ~(PAGE_SIZE - 1);
assert(center_bo_offset >= back_used);
@@ -514,7 +511,6 @@ anv_block_pool_grow(struct anv_block_pool *pool, struct anv_block_state *state,
center_bo_offset = size - pool->state.end;
}
- assert(center_bo_offset % block_size == 0);
assert(center_bo_offset % PAGE_SIZE == 0);
result = anv_block_pool_expand_range(pool, center_bo_offset, size);
@@ -545,21 +541,22 @@ anv_block_pool_alloc_new(struct anv_block_pool *pool,
{
struct anv_block_state state, old, new;
- assert(util_is_power_of_two(block_size));
-
while (1) {
state.u64 = __sync_fetch_and_add(&pool_state->u64, block_size);
- if (state.next < state.end) {
+ if (state.next + block_size <= state.end) {
assert(pool->map);
return state.next;
- } else if (state.next == state.end) {
+ } else if (state.next <= state.end) {
/* We allocated the first block outside the pool so we have to grow
* the pool. pool_state->next acts a mutex: threads who try to
* allocate now will get block indexes above the current limit and
* hit futex_wait below.
*/
new.next = state.next + block_size;
- new.end = anv_block_pool_grow(pool, pool_state, block_size);
+ do {
+ new.end = anv_block_pool_grow(pool, pool_state);
+ } while (new.end < new.next);
+
old.u64 = __sync_lock_test_and_set(&pool_state->u64, new.u64);
if (old.next != state.next)
futex_wake(&pool_state->end, INT_MAX);
diff --git a/src/intel/vulkan/tests/block_pool_no_free.c b/src/intel/vulkan/tests/block_pool_no_free.c
index 6e6fc68..f5ace7f 100644
--- a/src/intel/vulkan/tests/block_pool_no_free.c
+++ b/src/intel/vulkan/tests/block_pool_no_free.c
@@ -25,7 +25,6 @@
#include "anv_private.h"
-#define BLOCK_SIZE 16
#define NUM_THREADS 16
#define BLOCKS_PER_THREAD 1024
#define NUM_RUNS 64
@@ -42,16 +41,18 @@ struct job {
static void *alloc_blocks(void *_job)
{
struct job *job = _job;
+ uint32_t job_id = job - jobs;
+ uint32_t block_size = 16 * ((job_id % 4) + 1);
int32_t block, *data;
for (unsigned i = 0; i < BLOCKS_PER_THREAD; i++) {
- block = anv_block_pool_alloc(job->pool, BLOCK_SIZE);
+ block = anv_block_pool_alloc(job->pool, block_size);
data = job->pool->map + block;
*data = block;
assert(block >= 0);
job->blocks[i] = block;
- block = anv_block_pool_alloc_back(job->pool, BLOCK_SIZE);
+ block = anv_block_pool_alloc_back(job->pool, block_size);
data = job->pool->map + block;
*data = block;
assert(block < 0);
--
2.5.0.400.gff86faf
More information about the mesa-dev
mailing list