[Mesa-dev] [PATCH 1/4] i965/drm: Reorganize code for the next patch

James Xiong james.xiong at intel.com
Sat May 5 00:56:02 UTC 2018


From: "Xiong, James" <james.xiong at intel.com>

split bo_alloc_internal, and add a new function cached_bo_for_size
searches for a suitable cached buffer for a given size.

Signed-off-by: Xiong, James <james.xiong at intel.com>
---
 src/mesa/drivers/dri/i965/brw_bufmgr.c | 92 +++++++++++++++++-----------------
 1 file changed, 45 insertions(+), 47 deletions(-)

diff --git a/src/mesa/drivers/dri/i965/brw_bufmgr.c b/src/mesa/drivers/dri/i965/brw_bufmgr.c
index 7cb1f03..e68da26 100644
--- a/src/mesa/drivers/dri/i965/brw_bufmgr.c
+++ b/src/mesa/drivers/dri/i965/brw_bufmgr.c
@@ -263,53 +263,29 @@ brw_bo_cache_purge_bucket(struct brw_bufmgr *bufmgr,
    }
 }
 
+/* search for a suitable cached bo for reuse */
 static struct brw_bo *
-bo_alloc_internal(struct brw_bufmgr *bufmgr,
-                  const char *name,
-                  uint64_t size,
-                  unsigned flags,
-                  uint32_t tiling_mode,
-                  uint32_t stride)
+cached_bo_for_size(struct brw_bufmgr *bufmgr,
+   uint64_t size,
+   uint32_t tiling_mode,
+   uint32_t stride,
+   unsigned flags)
 {
-   struct brw_bo *bo;
-   unsigned int page_size = getpagesize();
-   int ret;
-   struct bo_cache_bucket *bucket;
-   bool alloc_from_cache;
-   uint64_t bo_size;
-   bool busy = false;
-   bool zeroed = false;
-
-   if (flags & BO_ALLOC_BUSY)
-      busy = true;
-
-   if (flags & BO_ALLOC_ZEROED)
-      zeroed = true;
+   bool busy = (flags & BO_ALLOC_BUSY) ? true : false;
+   bool zeroed = (flags & BO_ALLOC_ZEROED) ? true : false;
+   struct bo_cache_bucket *bucket =
+      (bufmgr->bo_reuse) ? bucket_for_size(bufmgr, size) : NULL;
 
    /* BUSY does doesn't really jive with ZEROED as we have to wait for it to
     * be idle before we can memset.  Just disallow that combination.
     */
    assert(!(busy && zeroed));
 
-   /* Round the allocated size up to a power of two number of pages. */
-   bucket = bucket_for_size(bufmgr, size);
-
-   /* If we don't have caching at this size, don't actually round the
-    * allocation up.
-    */
-   if (bucket == NULL) {
-      bo_size = size;
-      if (bo_size < page_size)
-         bo_size = page_size;
-   } else {
-      bo_size = bucket->size;
-   }
-
-   mtx_lock(&bufmgr->lock);
-   /* Get a buffer out of the cache if available */
+   if(bucket != NULL && !list_empty(&bucket->head)) {
+      struct brw_bo *bo;
 retry:
-   alloc_from_cache = false;
-   if (bucket != NULL && !list_empty(&bucket->head)) {
+      bo = NULL;
+
       if (busy && !zeroed) {
          /* Allocate new render-target BOs from the tail (MRU)
           * of the list, as it will likely be hot in the GPU
@@ -319,7 +295,6 @@ retry:
           */
          bo = LIST_ENTRY(struct brw_bo, bucket->head.prev, head);
          list_del(&bo->head);
-         alloc_from_cache = true;
       } else {
          /* For non-render-target BOs (where we're probably
           * going to map it first thing in order to fill it
@@ -330,16 +305,15 @@ retry:
           */
          bo = LIST_ENTRY(struct brw_bo, bucket->head.next, head);
          if (!brw_bo_busy(bo)) {
-            alloc_from_cache = true;
             list_del(&bo->head);
          }
       }
 
-      if (alloc_from_cache) {
+      if (bo) {
          if (!brw_bo_madvise(bo, I915_MADV_WILLNEED)) {
             bo_free(bo);
             brw_bo_cache_purge_bucket(bufmgr, bucket);
-            goto retry;
+            return NULL;
          }
 
          if (bo_set_tiling_internal(bo, tiling_mode, stride)) {
@@ -353,20 +327,44 @@ retry:
                bo_free(bo);
                goto retry;
             }
-            memset(map, 0, bo_size);
+            memset(map, 0, bo->size);
          }
       }
+
+      return bo;
    }
 
-   if (!alloc_from_cache) {
+   return NULL;
+}
+
+static struct brw_bo *
+bo_alloc_internal(struct brw_bufmgr *bufmgr,
+                  const char *name,
+                  uint64_t size,
+                  unsigned flags,
+                  uint32_t tiling_mode,
+                  uint32_t stride)
+{
+   struct brw_bo *bo;
+   int ret;
+
+   /* align the request size to page size */
+   size = ALIGN(size, getpagesize());
+
+   mtx_lock(&bufmgr->lock);
+
+   /* Get a buffer out of the cache if available */
+   bo = cached_bo_for_size(bufmgr, size, tiling_mode, stride, flags);
+
+   if (bo == NULL) {
       bo = calloc(1, sizeof(*bo));
       if (!bo)
          goto err;
 
-      bo->size = bo_size;
+      bo->size = size;
       bo->idle = true;
 
-      struct drm_i915_gem_create create = { .size = bo_size };
+      struct drm_i915_gem_create create = { .size = size };
 
       /* All new BOs we get from the kernel are zeroed, so we don't need to
        * worry about that here.
@@ -413,7 +411,7 @@ retry:
    mtx_unlock(&bufmgr->lock);
 
    DBG("bo_create: buf %d (%s) %llub\n", bo->gem_handle, bo->name,
-       (unsigned long long) size);
+       (unsigned long long) bo->size);
 
    return bo;
 
-- 
2.7.4



More information about the mesa-dev mailing list