[Mesa-dev] [PATCH 6/9] anv: soft pin state pools
Jason Ekstrand
jason at jlekstrand.net
Thu May 3 22:14:25 UTC 2018
I commented on this in the office, but I think this whole thing would be
cleaner if we just clearly documented address ranges in anv_private.h with
a good comment. Something like
#define LOW_HEAP_BASE_ADDRESS 4096
#define LOW_HEAP_SIZE ((3ull << 30) - 4096)
#define DYNAMIC_STATE_POOL_ADDRESS (3ull << 30)
#define BINDING_TABLE_POOL_ADDRESS (4ull << 30)
#define SURFACE_STATE_POOL_ADDRESS (5ull << 30)
Maybe we want it in hex? I'm not sure. In any case, I think having the
layout explicit is better.
On Wed, May 2, 2018 at 9:01 AM, Scott D Phillips <scott.d.phillips at intel.com
> wrote:
> The state_pools reserve virtual address space of the full
> BLOCK_POOL_MEMFD_SIZE, but maintain the current behavior of
> growing from the middle.
> ---
> src/intel/vulkan/anv_allocator.c | 25 +++++++++++++++++++++++++
> src/intel/vulkan/anv_device.c | 13 +++++++++----
> src/intel/vulkan/anv_private.h | 2 ++
> 3 files changed, 36 insertions(+), 4 deletions(-)
>
> diff --git a/src/intel/vulkan/anv_allocator.c
> b/src/intel/vulkan/anv_allocator.c
> index 642e1618c10..fa4e7d74ac7 100644
> --- a/src/intel/vulkan/anv_allocator.c
> +++ b/src/intel/vulkan/anv_allocator.c
> @@ -250,6 +250,27 @@ anv_block_pool_init(struct anv_block_pool *pool,
>
> pool->device = device;
> pool->bo_flags = bo_flags;
> +
> + if (bo_flags & EXEC_OBJECT_PINNED) {
> + pool->offset = 0;
> +
> + pthread_mutex_lock(&device->vma_mutex);
> +
> + if (bo_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS)
> + pool->offset = util_vma_heap_alloc(&device->vma_hi,
> + BLOCK_POOL_MEMFD_SIZE, 4096);
> +
> + if (!pool->offset)
> + pool->offset = util_vma_heap_alloc(&device->vma_lo,
> + BLOCK_POOL_MEMFD_SIZE, 4096);
> +
> + pthread_mutex_unlock(&device->vma_mutex);
> +
> + if (!pool->offset)
> + return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
> +
> + pool->offset = canonical_address(pool->offset);
> + }
> anv_bo_init(&pool->bo, 0, 0);
>
> pool->fd = memfd_create("block pool", MFD_CLOEXEC);
> @@ -402,6 +423,10 @@ anv_block_pool_expand_range(struct anv_block_pool
> *pool,
> * hard work for us.
> */
> anv_bo_init(&pool->bo, gem_handle, size);
> + if (pool->bo_flags & EXEC_OBJECT_PINNED) {
> + pool->bo.offset = pool->offset + BLOCK_POOL_MEMFD_CENTER -
> + center_bo_offset;
> + }
> pool->bo.flags = pool->bo_flags;
> pool->bo.map = map;
>
> diff --git a/src/intel/vulkan/anv_device.c b/src/intel/vulkan/anv_device.c
> index d3d9c779d62..2837d2f83ca 100644
> --- a/src/intel/vulkan/anv_device.c
> +++ b/src/intel/vulkan/anv_device.c
> @@ -1613,12 +1613,17 @@ VkResult anv_CreateDevice(
> if (result != VK_SUCCESS)
> goto fail_batch_bo_pool;
>
> - /* For the state pools we explicitly disable 48bit. */
> - bo_flags = (physical_device->has_exec_async ? EXEC_OBJECT_ASYNC : 0) |
> - (physical_device->has_exec_capture ? EXEC_OBJECT_CAPTURE :
> 0);
> + if (physical_device->has_exec_softpin)
> + bo_flags |= EXEC_OBJECT_PINNED;
> + else
> + bo_flags &= ~EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
>
> + /* dynamic_state_pool needs to stay in the same 4GiB as index and
> + * vertex buffers. For rationale, see the comment in
> + * anv_physical_device_init_heaps.
> + */
> result = anv_state_pool_init(&device->dynamic_state_pool, device,
> 16384,
> - bo_flags);
> + bo_flags & ~EXEC_OBJECT_SUPPORTS_48B_ADDR
> ESS);
> if (result != VK_SUCCESS)
> goto fail_bo_cache;
>
> diff --git a/src/intel/vulkan/anv_private.h b/src/intel/vulkan/anv_private
> .h
> index 708c3a540d3..23527eebaab 100644
> --- a/src/intel/vulkan/anv_private.h
> +++ b/src/intel/vulkan/anv_private.h
> @@ -582,6 +582,8 @@ struct anv_block_pool {
>
> struct anv_bo bo;
>
> + uint64_t offset;
>
This might be better named "start_address". Also, we should have a comment
saying what it means. :-)
> +
> /* The offset from the start of the bo to the "center" of the block
> * pool. Pointers to allocated blocks are given by
> * bo.map + center_bo_offset + offsets.
> --
> 2.14.3
>
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.freedesktop.org/archives/mesa-dev/attachments/20180503/f2c321f2/attachment.html>
More information about the mesa-dev
mailing list