[Mesa-dev] [PATCH 09/21] anv: Use the BO cache for DeviceMemory allocations

Jason Ekstrand jason at jlekstrand.net
Fri Apr 14 17:37:56 UTC 2017


Reviewed-by: Chad Versace <chadversary at chromium.org>
---
 src/intel/vulkan/anv_device.c  | 27 ++++++++++++++++-----------
 src/intel/vulkan/anv_image.c   |  2 +-
 src/intel/vulkan/anv_intel.c   | 15 ++++++---------
 src/intel/vulkan/anv_private.h |  4 +++-
 src/intel/vulkan/anv_wsi.c     |  8 ++++----
 5 files changed, 30 insertions(+), 26 deletions(-)

diff --git a/src/intel/vulkan/anv_device.c b/src/intel/vulkan/anv_device.c
index a7ae6ce..eaf93b5 100644
--- a/src/intel/vulkan/anv_device.c
+++ b/src/intel/vulkan/anv_device.c
@@ -1124,10 +1124,14 @@ VkResult anv_CreateDevice(
 
    anv_bo_pool_init(&device->batch_bo_pool, device);
 
+   result = anv_bo_cache_init(&device->bo_cache);
+   if (result != VK_SUCCESS)
+      goto fail_batch_bo_pool;
+
    result = anv_block_pool_init(&device->dynamic_state_block_pool, device,
                                 16384);
    if (result != VK_SUCCESS)
-      goto fail_batch_bo_pool;
+      goto fail_bo_cache;
 
    anv_state_pool_init(&device->dynamic_state_pool,
                        &device->dynamic_state_block_pool);
@@ -1199,6 +1203,8 @@ VkResult anv_CreateDevice(
  fail_dynamic_state_pool:
    anv_state_pool_finish(&device->dynamic_state_pool);
    anv_block_pool_finish(&device->dynamic_state_block_pool);
+ fail_bo_cache:
+   anv_bo_cache_finish(&device->bo_cache);
  fail_batch_bo_pool:
    anv_bo_pool_finish(&device->batch_bo_pool);
    pthread_cond_destroy(&device->queue_submit);
@@ -1246,6 +1252,8 @@ void anv_DestroyDevice(
    anv_state_pool_finish(&device->dynamic_state_pool);
    anv_block_pool_finish(&device->dynamic_state_block_pool);
 
+   anv_bo_cache_finish(&device->bo_cache);
+
    anv_bo_pool_finish(&device->batch_bo_pool);
 
    pthread_cond_destroy(&device->queue_submit);
@@ -1613,7 +1621,8 @@ VkResult anv_AllocateMemory(
    /* The kernel is going to give us whole pages anyway */
    uint64_t alloc_size = align_u64(pAllocateInfo->allocationSize, 4096);
 
-   result = anv_bo_init_new(&mem->bo, device, alloc_size);
+   result = anv_bo_cache_alloc(device, &device->bo_cache,
+                               alloc_size, &mem->bo);
    if (result != VK_SUCCESS)
       goto fail;
 
@@ -1646,11 +1655,7 @@ void anv_FreeMemory(
    if (mem->map)
       anv_UnmapMemory(_device, _mem);
 
-   if (mem->bo.map)
-      anv_gem_munmap(mem->bo.map, mem->bo.size);
-
-   if (mem->bo.gem_handle != 0)
-      anv_gem_close(device, mem->bo.gem_handle);
+   anv_bo_cache_release(device, &device->bo_cache, mem->bo);
 
    vk_free2(&device->alloc, pAllocator, mem);
 }
@@ -1672,7 +1677,7 @@ VkResult anv_MapMemory(
    }
 
    if (size == VK_WHOLE_SIZE)
-      size = mem->bo.size - offset;
+      size = mem->bo->size - offset;
 
    /* From the Vulkan spec version 1.0.32 docs for MapMemory:
     *
@@ -1682,7 +1687,7 @@ VkResult anv_MapMemory(
     *    equal to the size of the memory minus offset
     */
    assert(size > 0);
-   assert(offset + size <= mem->bo.size);
+   assert(offset + size <= mem->bo->size);
 
    /* FIXME: Is this supposed to be thread safe? Since vkUnmapMemory() only
     * takes a VkDeviceMemory pointer, it seems like only one map of the memory
@@ -1702,7 +1707,7 @@ VkResult anv_MapMemory(
    /* Let's map whole pages */
    map_size = align_u64(map_size, 4096);
 
-   void *map = anv_gem_mmap(device, mem->bo.gem_handle,
+   void *map = anv_gem_mmap(device, mem->bo->gem_handle,
                             map_offset, map_size, gem_flags);
    if (map == MAP_FAILED)
       return vk_error(VK_ERROR_MEMORY_MAP_FAILED);
@@ -1854,7 +1859,7 @@ VkResult anv_BindBufferMemory(
    ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
 
    if (mem) {
-      buffer->bo = &mem->bo;
+      buffer->bo = mem->bo;
       buffer->offset = memoryOffset;
    } else {
       buffer->bo = NULL;
diff --git a/src/intel/vulkan/anv_image.c b/src/intel/vulkan/anv_image.c
index cf34dbe..4874f2f 100644
--- a/src/intel/vulkan/anv_image.c
+++ b/src/intel/vulkan/anv_image.c
@@ -341,7 +341,7 @@ VkResult anv_BindImageMemory(
       return VK_SUCCESS;
    }
 
-   image->bo = &mem->bo;
+   image->bo = mem->bo;
    image->offset = memoryOffset;
 
    if (image->aux_surface.isl.size > 0) {
diff --git a/src/intel/vulkan/anv_intel.c b/src/intel/vulkan/anv_intel.c
index eda474e..991a935 100644
--- a/src/intel/vulkan/anv_intel.c
+++ b/src/intel/vulkan/anv_intel.c
@@ -49,18 +49,15 @@ VkResult anv_CreateDmaBufImageINTEL(
    if (mem == NULL)
       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
 
-   uint32_t gem_handle = anv_gem_fd_to_handle(device, pCreateInfo->fd);
-   if (!gem_handle) {
-      result = vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
-      goto fail;
-   }
-
    uint64_t size = (uint64_t)pCreateInfo->strideInBytes * pCreateInfo->extent.height;
 
-   anv_bo_init(&mem->bo, gem_handle, size);
+   result = anv_bo_cache_import(device, &device->bo_cache,
+                                pCreateInfo->fd, size, &mem->bo);
+   if (result != VK_SUCCESS)
+      goto fail;
 
    if (device->instance->physicalDevice.supports_48bit_addresses)
-      mem->bo.flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
+      mem->bo->flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
 
    anv_image_create(_device,
       &(struct anv_image_create_info) {
@@ -83,7 +80,7 @@ VkResult anv_CreateDmaBufImageINTEL(
       pAllocator, &image_h);
 
    image = anv_image_from_handle(image_h);
-   image->bo = &mem->bo;
+   image->bo = mem->bo;
    image->offset = 0;
 
    assert(image->extent.width > 0);
diff --git a/src/intel/vulkan/anv_private.h b/src/intel/vulkan/anv_private.h
index 127f3c2..898f0cf 100644
--- a/src/intel/vulkan/anv_private.h
+++ b/src/intel/vulkan/anv_private.h
@@ -721,6 +721,8 @@ struct anv_device {
 
     struct anv_bo_pool                          batch_bo_pool;
 
+    struct anv_bo_cache                         bo_cache;
+
     struct anv_block_pool                       dynamic_state_block_pool;
     struct anv_state_pool                       dynamic_state_pool;
 
@@ -983,7 +985,7 @@ _anv_combine_address(struct anv_batch *batch, void *location,
    }
 
 struct anv_device_memory {
-   struct anv_bo                                bo;
+   struct anv_bo *                              bo;
    uint32_t                                     type_index;
    VkDeviceSize                                 map_size;
    void *                                       map;
diff --git a/src/intel/vulkan/anv_wsi.c b/src/intel/vulkan/anv_wsi.c
index a024561..17c43f8 100644
--- a/src/intel/vulkan/anv_wsi.c
+++ b/src/intel/vulkan/anv_wsi.c
@@ -208,8 +208,8 @@ x11_anv_wsi_image_create(VkDevice device_h,
     * know we're writing to them and synchronize uses on other rings (eg if
     * the display server uses the blitter ring).
     */
-   memory->bo.flags &= ~EXEC_OBJECT_ASYNC;
-   memory->bo.flags |= EXEC_OBJECT_WRITE;
+   memory->bo->flags &= ~EXEC_OBJECT_ASYNC;
+   memory->bo->flags |= EXEC_OBJECT_WRITE;
 
    anv_BindImageMemory(device_h, image_h, memory_h, 0);
 
@@ -217,7 +217,7 @@ x11_anv_wsi_image_create(VkDevice device_h,
    assert(surface->isl.tiling == ISL_TILING_X);
 
    *row_pitch = surface->isl.row_pitch;
-   int ret = anv_gem_set_tiling(device, memory->bo.gem_handle,
+   int ret = anv_gem_set_tiling(device, memory->bo->gem_handle,
                                 surface->isl.row_pitch, I915_TILING_X);
    if (ret) {
       /* FINISHME: Choose a better error. */
@@ -226,7 +226,7 @@ x11_anv_wsi_image_create(VkDevice device_h,
       goto fail_alloc_memory;
    }
 
-   int fd = anv_gem_handle_to_fd(device, memory->bo.gem_handle);
+   int fd = anv_gem_handle_to_fd(device, memory->bo->gem_handle);
    if (fd == -1) {
       /* FINISHME: Choose a better error. */
       result = vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
-- 
2.5.0.400.gff86faf



More information about the mesa-dev mailing list