[Mesa-dev] [PATCH v2 20/25] winsys/amdgpu: sparse buffer debugging helpers

Nicolai Hähnle nhaehnle at gmail.com
Tue Mar 28 09:12:10 UTC 2017


From: Nicolai Hähnle <nicolai.haehnle at amd.com>

---
 src/gallium/winsys/amdgpu/drm/amdgpu_bo.c | 61 +++++++++++++++++++++++++++++++
 1 file changed, 61 insertions(+)

diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c b/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
index 567399d..c650993 100644
--- a/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
+++ b/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
@@ -31,20 +31,22 @@
 
 #include "amdgpu_cs.h"
 
 #include "os/os_time.h"
 #include "state_tracker/drm_driver.h"
 #include <amdgpu_drm.h>
 #include <xf86drm.h>
 #include <stdio.h>
 #include <inttypes.h>
 
+/* Set to 1 for verbose output showing committed sparse buffer ranges. */
+#define DEBUG_SPARSE_COMMITS 0
 
 struct amdgpu_sparse_backing_chunk {
    uint32_t begin, end;
 };
 
 static struct pb_buffer *
 amdgpu_bo_create(struct radeon_winsys *rws,
                  uint64_t size,
                  unsigned alignment,
                  enum radeon_bo_domain domain,
@@ -568,20 +570,75 @@ void amdgpu_bo_slab_free(void *priv, struct pb_slab *pslab)
    struct amdgpu_slab *slab = amdgpu_slab(pslab);
 
    for (unsigned i = 0; i < slab->base.num_entries; ++i)
       amdgpu_bo_remove_fences(&slab->entries[i]);
 
    FREE(slab->entries);
    amdgpu_winsys_bo_reference(&slab->buffer, NULL);
    FREE(slab);
 }
 
+#if DEBUG_SPARSE_COMMITS
+static void
+sparse_dump(struct amdgpu_winsys_bo *bo, const char *func)
+{
+   fprintf(stderr, "%s: %p (size=%"PRIu64", num_va_pages=%u) @ %s\n"
+                   "Commitments:\n",
+           __func__, bo, bo->base.size, bo->u.sparse.num_va_pages, func);
+
+   struct amdgpu_sparse_backing *span_backing = NULL;
+   uint32_t span_first_backing_page = 0;
+   uint32_t span_first_va_page = 0;
+   uint32_t va_page = 0;
+
+   for (;;) {
+      struct amdgpu_sparse_backing *backing = 0;
+      uint32_t backing_page = 0;
+
+      if (va_page < bo->u.sparse.num_va_pages) {
+         backing = bo->u.sparse.commitments[va_page].backing;
+         backing_page = bo->u.sparse.commitments[va_page].page;
+      }
+
+      if (span_backing &&
+          (backing != span_backing ||
+           backing_page != span_first_backing_page + (va_page - span_first_va_page))) {
+         fprintf(stderr, " %u..%u: backing=%p:%u..%u\n",
+                 span_first_va_page, va_page - 1, span_backing,
+                 span_first_backing_page,
+                 span_first_backing_page + (va_page - span_first_va_page) - 1);
+
+         span_backing = NULL;
+      }
+
+      if (va_page >= bo->u.sparse.num_va_pages)
+         break;
+
+      if (backing && !span_backing) {
+         span_backing = backing;
+         span_first_backing_page = backing_page;
+         span_first_va_page = va_page;
+      }
+
+      va_page++;
+   }
+
+   fprintf(stderr, "Backing:\n");
+
+   list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->u.sparse.backing, list) {
+      fprintf(stderr, " %p (size=%"PRIu64")\n", backing, backing->bo->base.size);
+      for (unsigned i = 0; i < backing->num_chunks; ++i)
+         fprintf(stderr, "   %u..%u\n", backing->chunks[i].begin, backing->chunks[i].end);
+   }
+}
+#endif
+
 /*
  * Attempt to allocate the given number of backing pages. Fewer pages may be
  * allocated (depending on the fragmentation of existing backing buffers),
  * which will be reflected by a change to *pnum_pages.
  */
 static struct amdgpu_sparse_backing *
 sparse_backing_alloc(struct amdgpu_winsys_bo *bo, uint32_t *pstart_page, uint32_t *pnum_pages)
 {
    struct amdgpu_sparse_backing *best_backing;
    unsigned best_idx;
@@ -862,20 +919,24 @@ amdgpu_bo_sparse_commit(struct pb_buffer *buf, uint64_t offset, uint64_t size,
    assert(offset <= bo->base.size);
    assert(size <= bo->base.size - offset);
    assert(size % RADEON_SPARSE_PAGE_SIZE == 0 || offset + size == bo->base.size);
 
    comm = bo->u.sparse.commitments;
    va_page = offset / RADEON_SPARSE_PAGE_SIZE;
    end_va_page = va_page + DIV_ROUND_UP(size, RADEON_SPARSE_PAGE_SIZE);
 
    mtx_lock(&bo->u.sparse.commit_lock);
 
+#if DEBUG_SPARSE_COMMITS
+   sparse_dump(bo, __func__);
+#endif
+
    if (commit) {
       while (va_page < end_va_page) {
          uint32_t span_va_page;
 
          /* Skip pages that are already committed. */
          if (comm[va_page].backing) {
             va_page++;
             continue;
          }
 
-- 
2.9.3



More information about the mesa-dev mailing list