[Mesa-dev] [RFC PATCH 08/14] anv/allocator: Add support for a list of BOs in block pool.

Rafael Antognolli rafael.antognolli at intel.com
Sat Dec 8 00:05:47 UTC 2018


So far we use only one BO (the last one created) in the block pool. When
we switch to not use the userptr API, we will need multiple BOs. So add
code now to store multiple BOs in the block pool.

This has several implications, the main one being that we can't use
pool->map as before. For that reason we update the getter to find which
BO a given offset is part of, and return the respective map.
---
 src/intel/vulkan/anv_allocator.c | 132 +++++++++++++++++++++++++------
 src/intel/vulkan/anv_private.h   |  17 ++++
 2 files changed, 125 insertions(+), 24 deletions(-)

diff --git a/src/intel/vulkan/anv_allocator.c b/src/intel/vulkan/anv_allocator.c
index 2eb191e98dc..31258e38635 100644
--- a/src/intel/vulkan/anv_allocator.c
+++ b/src/intel/vulkan/anv_allocator.c
@@ -428,6 +428,34 @@ static VkResult
 anv_block_pool_expand_range(struct anv_block_pool *pool,
                             uint32_t center_bo_offset, uint32_t size);
 
+static struct anv_bo *
+anv_block_pool_bo_append(struct anv_block_pool *pool, struct anv_bo_list *elem)
+{
+   /* struct anv_bo_list *elem = malloc(sizeof(*elem)); */
+   elem->next = NULL;
+
+   if (pool->last)
+      pool->last->next = elem;
+   pool->last = elem;
+
+   /* if it's the first BO added, set the pointer to BOs too */
+   if (pool->bos == NULL)
+      pool->bos = elem;
+
+   return &elem->bo;
+}
+
+static void
+anv_block_pool_bo_finish(struct anv_block_pool *pool)
+{
+   struct anv_bo_list *iter, *next;
+
+   for (iter = pool->bos; iter != NULL; iter = next) {
+      next = iter ? iter->next : NULL;
+      free(iter);
+   }
+}
+
 VkResult
 anv_block_pool_init(struct anv_block_pool *pool,
                     struct anv_device *device,
@@ -439,19 +467,15 @@ anv_block_pool_init(struct anv_block_pool *pool,
 
    pool->device = device;
    pool->bo_flags = bo_flags;
+   pool->bo = NULL;
+   pool->bos = NULL;
+   pool->last = NULL;
+   pool->size = 0;
    pool->start_address = gen_canonical_address(start_address);
 
-   pool->bo = malloc(sizeof(*pool->bo));
-   if (!pool->bo)
-      return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
-
-   anv_bo_init(pool->bo, 0, 0);
-
    pool->fd = memfd_create("block pool", MFD_CLOEXEC);
-   if (pool->fd == -1) {
-      result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
-      goto fail_bo;
-   }
+   if (pool->fd == -1)
+      return vk_error(VK_ERROR_INITIALIZATION_FAILED);
 
    /* Just make it 2GB up-front.  The Linux kernel won't actually back it
     * with pages until we either map and fault on one of them or we use
@@ -484,8 +508,6 @@ anv_block_pool_init(struct anv_block_pool *pool,
    u_vector_finish(&pool->mmap_cleanups);
  fail_fd:
    close(pool->fd);
- fail_bo:
-   free(pool->bo);
 
    return result;
 }
@@ -495,7 +517,6 @@ anv_block_pool_finish(struct anv_block_pool *pool)
 {
    struct anv_mmap_cleanup *cleanup;
 
-   free(pool->bo);
    u_vector_foreach(cleanup, &pool->mmap_cleanups) {
       if (cleanup->map)
          munmap(cleanup->map, cleanup->size);
@@ -506,6 +527,7 @@ anv_block_pool_finish(struct anv_block_pool *pool)
    u_vector_finish(&pool->mmap_cleanups);
 
    close(pool->fd);
+   anv_block_pool_bo_finish(pool);
 }
 
 static VkResult
@@ -599,24 +621,86 @@ anv_block_pool_expand_range(struct anv_block_pool *pool,
     * the EXEC_OBJECT_SUPPORTS_48B_ADDRESS flag and the kernel does all of the
     * hard work for us.
     */
-   anv_bo_init(pool->bo, gem_handle, size);
+   struct anv_bo *bo;
+   struct anv_bo_list *bo_elem = NULL;
+
+   /* If using softpin, we will keep adding new BOs every time we expand the
+    * range. On the other hand, if not using softpin, we need to add a BO if we
+    * don't have one yet.
+    */
+   if (!pool->bo) {
+      bo_elem = malloc(sizeof(*bo_elem));
+      bo = &bo_elem->bo;
+   } else {
+      bo = pool->bo;
+   }
+
+   /* pool->bo will always point to the first BO added on this block pool. */
+   if (!pool->bo)
+      pool->bo = bo;
+
+   anv_bo_init(bo, gem_handle, size);
    if (pool->bo_flags & EXEC_OBJECT_PINNED) {
-      pool->bo->offset = pool->start_address + BLOCK_POOL_MEMFD_CENTER -
+      bo->offset = pool->start_address + BLOCK_POOL_MEMFD_CENTER -
          center_bo_offset;
    }
-   pool->bo->flags = pool->bo_flags;
-   pool->bo->map = map;
+   bo->flags = pool->bo_flags;
+   bo->map = map;
+
+   if (bo_elem)
+      anv_block_pool_bo_append(pool, bo_elem);
+   pool->size = size;
 
    return VK_SUCCESS;
 }
 
+static struct anv_bo *
+anv_block_pool_get_bo(struct anv_block_pool *pool, int32_t *offset)
+{
+   struct anv_bo *bo, *bo_found = NULL;
+   int32_t cur_offset = 0;
+
+   assert(offset);
+
+   if (!(pool->bo_flags & EXEC_OBJECT_PINNED))
+      return pool->bo;
+
+   struct anv_bo_list *iter;
+   anv_block_pool_foreach_bo(pool->bos, iter, bo) {
+      if (*offset < cur_offset + bo->size) {
+         bo_found = bo;
+         break;
+      }
+      cur_offset += bo->size;
+   }
+
+   assert(bo_found != NULL);
+   *offset -= cur_offset;
+
+   return bo_found;
+}
+
 struct anv_pool_map
 anv_block_pool_map(struct anv_block_pool *pool, int32_t offset)
 {
-   return (struct anv_pool_map) {
-      .map = pool->map,
-      .offset = offset,
-   };
+   if (pool->bo_flags & EXEC_OBJECT_PINNED) {
+      /* If softpin is used, we have multiple BOs, and we need the map from the
+       * BO that contains this offset.
+       */
+      struct anv_bo *bo = anv_block_pool_get_bo(pool, &offset);
+      return (struct anv_pool_map) {
+         .map = bo->map,
+         .offset = offset,
+      };
+   } else {
+      /* Otherwise we want the pool map, which takes into account the center
+       * offset too.
+       */
+      return (struct anv_pool_map) {
+         .map = pool->map,
+         .offset = offset,
+      };
+   }
 }
 
 /** Grows and re-centers the block pool.
@@ -668,7 +752,7 @@ anv_block_pool_grow(struct anv_block_pool *pool, struct anv_block_state *state)
 
    assert(state == &pool->state || back_used > 0);
 
-   uint32_t old_size = pool->bo->size;
+   uint32_t old_size = pool->size;
 
    /* The block pool is always initialized to a nonzero size and this function
     * is always called after initialization.
@@ -694,7 +778,7 @@ anv_block_pool_grow(struct anv_block_pool *pool, struct anv_block_state *state)
    while (size < back_required + front_required)
       size *= 2;
 
-   assert(size > pool->bo->size);
+   assert(size > pool->size);
 
    /* We compute a new center_bo_offset such that, when we double the size
     * of the pool, we maintain the ratio of how much is used by each side.
@@ -742,7 +826,7 @@ done:
        * needs to do so in order to maintain its concurrency model.
        */
       if (state == &pool->state) {
-         return pool->bo->size - pool->center_bo_offset;
+         return pool->size - pool->center_bo_offset;
       } else {
          assert(pool->center_bo_offset > 0);
          return pool->center_bo_offset;
diff --git a/src/intel/vulkan/anv_private.h b/src/intel/vulkan/anv_private.h
index bf98c700873..b01b392daee 100644
--- a/src/intel/vulkan/anv_private.h
+++ b/src/intel/vulkan/anv_private.h
@@ -627,13 +627,30 @@ struct anv_block_state {
    };
 };
 
+struct anv_bo_list;
+
+struct anv_bo_list {
+   struct anv_bo bo;
+   struct anv_bo_list *next;
+};
+
+#define anv_block_pool_foreach_bo(list, iter, bo)  \
+   for (iter = list, bo = &list->bo;               \
+        iter != NULL;                              \
+        iter = iter->next, bo = &iter->bo)
+
+
 struct anv_block_pool {
    struct anv_device *device;
 
    uint64_t bo_flags;
 
+   struct anv_bo_list *bos;
+   struct anv_bo_list *last;
    struct anv_bo *bo;
 
+   uint64_t size;
+
    /* The address where the start of the pool is pinned. The various bos that
     * are created as the pool grows will have addresses in the range
     * [start_address, start_address + BLOCK_POOL_MEMFD_SIZE).
-- 
2.17.1



More information about the mesa-dev mailing list