[Mesa-dev] [PATCH 5/9] anv: Add vma_heap allocators in anv_device
Jason Ekstrand
jason at jlekstrand.net
Thu May 3 21:22:56 UTC 2018
On Wed, May 2, 2018 at 9:01 AM, Scott D Phillips <scott.d.phillips at intel.com
> wrote:
> These will be used to assign virtual addresses to soft pinned
> buffers in a later patch.
> ---
> src/intel/vulkan/anv_device.c | 75 ++++++++++++++++++++++++++++++
> ++++++++++++
> src/intel/vulkan/anv_private.h | 11 +++++++
> 2 files changed, 86 insertions(+)
>
> diff --git a/src/intel/vulkan/anv_device.c b/src/intel/vulkan/anv_device.c
> index c0cec175826..d3d9c779d62 100644
> --- a/src/intel/vulkan/anv_device.c
> +++ b/src/intel/vulkan/anv_device.c
> @@ -369,6 +369,8 @@ anv_physical_device_init(struct anv_physical_device
> *device,
> device->has_exec_async = anv_gem_get_param(fd,
> I915_PARAM_HAS_EXEC_ASYNC);
> device->has_exec_capture = anv_gem_get_param(fd,
> I915_PARAM_HAS_EXEC_CAPTURE);
> device->has_exec_fence = anv_gem_get_param(fd,
> I915_PARAM_HAS_EXEC_FENCE);
> + device->has_exec_softpin = anv_gem_get_param(fd,
> I915_PARAM_HAS_EXEC_SOFTPIN)
> + && device->supports_48bit_addresses;
>
I'd rather we call this something like use_softpin since it isn't just a
"does the kernel have this feature" flag.
> device->has_syncobj = anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_FENCE_
> ARRAY);
> device->has_syncobj_wait = device->has_syncobj &&
> anv_gem_supports_syncobj_wait(fd);
> @@ -1527,6 +1529,26 @@ VkResult anv_CreateDevice(
> goto fail_fd;
> }
>
> + if (physical_device->has_exec_softpin) {
> + if (pthread_mutex_init(&device->vma_mutex, NULL) != 0) {
> + result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
> + goto fail_fd;
> + }
> +
> + /* keep the page with address zero out of the allocator */
> + util_vma_heap_init(&device->vma_lo, 4096, (1ull << 32) - 2 * 4096);
>
Why are you subtracting 2 * 4096?
> + device->vma_lo_available =
> + physical_device->memory.heaps[physical_device->memory.heap_count
> - 1].size;
> +
> + /* Leave the last 4GiB out of the high vma range, so that no state
> base
> + * address + size can overflow 48 bits.
>
Might be good to have a more detailed comment here or at least reference
the comment in anv_allocator.c that deals with the workaround.
> + */
> + util_vma_heap_init(&device->vma_hi, (1ull << 32) + 4096,
> + (1ull << 48) - 2 * (1ull << 32) - 2 * 4096);
>
Why are you not starting at (1ull << 32)?
If neither of those have a good reason, then we should probably only drop
the bottom and top pages in the entire 48-bit range.
> + device->vma_hi_available = physical_device->memory.heap_count == 1
> ? 0 :
> + physical_device->memory.heaps[0].size;
> + }
> +
> /* As per spec, the driver implementation may deny requests to acquire
> * a priority above the default priority (MEDIUM) if the caller does
> not
> * have sufficient privileges. In this scenario
> VK_ERROR_NOT_PERMITTED_EXT
> @@ -1887,6 +1909,59 @@ VkResult anv_DeviceWaitIdle(
> return anv_device_submit_simple_batch(device, &batch);
> }
>
> +bool
> +anv_vma_alloc(struct anv_device *device, struct anv_bo *bo)
> +{
> + if (!(bo->flags & EXEC_OBJECT_PINNED))
> + return true;
>
When are things not pinned?
> +
> + pthread_mutex_lock(&device->vma_mutex);
> +
> + bo->offset = 0;
> +
> + if (bo->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS &&
>
+ device->vma_hi_available >= bo->size) {
> + uint64_t addr = util_vma_heap_alloc(&device->vma_hi, bo->size,
> 4096);
> + if (addr) {
> + bo->offset = canonical_address(addr);
> + device->vma_hi_available -= bo->size;
> + }
> + }
> +
> + if (bo->offset == 0 && device->vma_lo_available >= bo->size) {
> + uint64_t addr = util_vma_heap_alloc(&device->vma_lo, bo->size,
> 4096);
> + if (addr) {
> + bo->offset = canonical_address(addr);
> + device->vma_lo_available -= bo->size;
> + }
> + }
>
I'm not sure how I feel about using EXEC_OBJECT_SUPPORTS_48B_ADDRESS for
this. I think it certainly works but it's not what I had pictured.
> +
> + pthread_mutex_unlock(&device->vma_mutex);
> +
> + return bo->offset != 0;
> +}
> +
> +void
> +anv_vma_free(struct anv_device *device, struct anv_bo *bo)
> +{
> + if (!(bo->flags & EXEC_OBJECT_PINNED))
> + return;
> +
> + pthread_mutex_lock(&device->vma_mutex);
> +
> + if (bo->offset >= 1ull << 32) {
> + util_vma_heap_free(&device->vma_hi, bo->offset, bo->size);
> + device->vma_hi_available += bo->size;
> + } else {
> + util_vma_heap_free(&device->vma_lo, bo->offset, bo->size);
> + device->vma_lo_available += bo->size;
> + }
> +
> + pthread_mutex_unlock(&device->vma_mutex);
> +
> + bo->offset = 0;
> +}
> +
> VkResult
> anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t
> size)
> {
> diff --git a/src/intel/vulkan/anv_private.h b/src/intel/vulkan/anv_
> private.h
> index 761601d1e37..708c3a540d3 100644
> --- a/src/intel/vulkan/anv_private.h
> +++ b/src/intel/vulkan/anv_private.h
> @@ -49,6 +49,7 @@
> #include "util/list.h"
> #include "util/u_atomic.h"
> #include "util/u_vector.h"
> +#include "util/vma.h"
> #include "vk_alloc.h"
> #include "vk_debug_report.h"
>
> @@ -802,6 +803,7 @@ struct anv_physical_device {
> bool has_exec_async;
> bool has_exec_capture;
> bool has_exec_fence;
> + bool has_exec_softpin;
> bool has_syncobj;
> bool has_syncobj_wait;
> bool has_context_priority;
> @@ -898,6 +900,12 @@ struct anv_device {
> struct anv_device_extension_table enabled_extensions;
> struct anv_dispatch_table dispatch;
>
> + pthread_mutex_t vma_mutex;
> + struct util_vma_heap vma_lo;
> + struct util_vma_heap vma_hi;
> + uint64_t vma_lo_available;
> + uint64_t vma_hi_available;
> +
> struct anv_bo_pool batch_bo_pool;
>
> struct anv_bo_cache bo_cache;
> @@ -991,6 +999,9 @@ int anv_gem_syncobj_wait(struct anv_device *device,
> uint32_t *handles, uint32_t num_handles,
> int64_t abs_timeout_ns, bool wait_all);
>
> +bool anv_vma_alloc(struct anv_device *device, struct anv_bo *bo);
> +void anv_vma_free(struct anv_device *device, struct anv_bo *bo);
> +
> VkResult anv_bo_init_new(struct anv_bo *bo, struct anv_device *device,
> uint64_t size);
>
> struct anv_reloc_list {
> --
> 2.14.3
>
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.freedesktop.org/archives/mesa-dev/attachments/20180503/158f6c1b/attachment-0001.html>
More information about the mesa-dev
mailing list