[Mesa-dev] [PATCH] winsys/amdgpu: use the new BO list API

Marek Olšák maraeo at gmail.com
Mon Jan 7 19:54:16 UTC 2019


From: Marek Olšák <marek.olsak at amd.com>

---
I'll bump the libdrm version requirement after the libdrm patch lands.

 src/gallium/winsys/amdgpu/drm/amdgpu_cs.c | 74 ++++++++---------------
 1 file changed, 25 insertions(+), 49 deletions(-)

diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c b/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c
index 9e4de7779e2..72cf1e6c639 100644
--- a/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c
+++ b/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c
@@ -1290,52 +1290,53 @@ static bool amdgpu_add_sparse_backing_buffers(struct amdgpu_cs_context *cs)
 
    return true;
 }
 
 void amdgpu_cs_submit_ib(void *job, int thread_index)
 {
    struct amdgpu_cs *acs = (struct amdgpu_cs*)job;
    struct amdgpu_winsys *ws = acs->ctx->ws;
    struct amdgpu_cs_context *cs = acs->cst;
    int i, r;
-   amdgpu_bo_list_handle bo_list = NULL;
+   uint32_t bo_list = 0;
    uint64_t seq_no = 0;
    bool has_user_fence = amdgpu_cs_has_user_fence(cs);
    bool use_bo_list_create = ws->info.drm_minor < 27;
    struct drm_amdgpu_bo_list_in bo_list_in;
 
    /* Prepare the buffer list. */
    if (ws->debug_all_bos) {
       /* The buffer list contains all buffers. This is a slow path that
        * ensures that no buffer is missing in the BO list.
        */
+      unsigned num_handles = 0;
+      struct drm_amdgpu_bo_list_entry *list =
+         alloca(ws->num_buffers * sizeof(struct drm_amdgpu_bo_list_entry));
       struct amdgpu_winsys_bo *bo;
-      amdgpu_bo_handle *handles;
-      unsigned num = 0;
 
       simple_mtx_lock(&ws->global_bo_list_lock);
-      handles = alloca(sizeof(handles[0]) * ws->num_buffers);
-
       LIST_FOR_EACH_ENTRY(bo, &ws->global_bo_list, u.real.global_list_item) {
-         assert(num < ws->num_buffers);
-         handles[num++] = bo->bo;
+         if (bo->is_local)
+            continue;
+
+         list[num_handles].bo_handle = bo->u.real.kms_handle;
+         list[num_handles].bo_priority = 0;
+         ++num_handles;
       }
 
-      r = amdgpu_bo_list_create(ws->dev, ws->num_buffers,
-                                handles, NULL, &bo_list);
+      r = amdgpu_bo_list_create_raw(ws->dev, ws->num_buffers, list, &bo_list);
       simple_mtx_unlock(&ws->global_bo_list_lock);
       if (r) {
          fprintf(stderr, "amdgpu: buffer list creation failed (%d)\n", r);
          goto cleanup;
       }
-   } else if (!use_bo_list_create) {
-      /* Standard path passing the buffer list via the CS ioctl. */
+   } else {
       if (!amdgpu_add_sparse_backing_buffers(cs)) {
          fprintf(stderr, "amdgpu: amdgpu_add_sparse_backing_buffers failed\n");
          r = -ENOMEM;
          goto cleanup;
       }
 
       struct drm_amdgpu_bo_list_entry *list =
          alloca(cs->num_real_buffers * sizeof(struct drm_amdgpu_bo_list_entry));
 
       unsigned num_handles = 0;
@@ -1345,59 +1346,34 @@ void amdgpu_cs_submit_ib(void *job, int thread_index)
          if (buffer->bo->is_local)
             continue;
 
          assert(buffer->u.real.priority_usage != 0);
 
          list[num_handles].bo_handle = buffer->bo->u.real.kms_handle;
          list[num_handles].bo_priority = (util_last_bit(buffer->u.real.priority_usage) - 1) / 2;
          ++num_handles;
       }
 
-      bo_list_in.operation = ~0;
-      bo_list_in.list_handle = ~0;
-      bo_list_in.bo_number = num_handles;
-      bo_list_in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
-      bo_list_in.bo_info_ptr = (uint64_t)(uintptr_t)list;
-   } else {
-      /* Legacy path creating the buffer list handle and passing it to the CS ioctl. */
-      unsigned num_handles;
-
-      if (!amdgpu_add_sparse_backing_buffers(cs)) {
-         fprintf(stderr, "amdgpu: amdgpu_add_sparse_backing_buffers failed\n");
-         r = -ENOMEM;
-         goto cleanup;
-      }
-
-      amdgpu_bo_handle *handles = alloca(sizeof(*handles) * cs->num_real_buffers);
-      uint8_t *flags = alloca(sizeof(*flags) * cs->num_real_buffers);
-
-      num_handles = 0;
-      for (i = 0; i < cs->num_real_buffers; ++i) {
-         struct amdgpu_cs_buffer *buffer = &cs->real_buffers[i];
-
-	 if (buffer->bo->is_local)
-            continue;
-
-         assert(buffer->u.real.priority_usage != 0);
-
-         handles[num_handles] = buffer->bo->bo;
-         flags[num_handles] = (util_last_bit(buffer->u.real.priority_usage) - 1) / 2;
-	 ++num_handles;
-      }
-
-      if (num_handles) {
-         r = amdgpu_bo_list_create(ws->dev, num_handles,
-                                   handles, flags, &bo_list);
+      if (use_bo_list_create) {
+         /* Legacy path creating the buffer list handle and passing it to the CS ioctl. */
+         r = amdgpu_bo_list_create_raw(ws->dev, num_handles, list, &bo_list);
          if (r) {
             fprintf(stderr, "amdgpu: buffer list creation failed (%d)\n", r);
             goto cleanup;
          }
+      } else {
+         /* Standard path passing the buffer list via the CS ioctl. */
+         bo_list_in.operation = ~0;
+         bo_list_in.list_handle = ~0;
+         bo_list_in.bo_number = num_handles;
+         bo_list_in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
+         bo_list_in.bo_info_ptr = (uint64_t)(uintptr_t)list;
       }
    }
 
    if (acs->ring_type == RING_GFX)
       ws->gfx_bo_list_counter += cs->num_real_buffers;
 
    if (acs->stop_exec_on_failure && acs->ctx->num_rejected_cs) {
       r = -ECANCELED;
    } else {
       struct drm_amdgpu_cs_chunk chunks[6];
@@ -1494,22 +1470,22 @@ void amdgpu_cs_submit_ib(void *job, int thread_index)
       /* BO list */
       if (!use_bo_list_create) {
          chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_BO_HANDLES;
          chunks[num_chunks].length_dw = sizeof(struct drm_amdgpu_bo_list_in) / 4;
          chunks[num_chunks].chunk_data = (uintptr_t)&bo_list_in;
          num_chunks++;
       }
 
       assert(num_chunks <= ARRAY_SIZE(chunks));
 
-      r = amdgpu_cs_submit_raw(ws->dev, acs->ctx->ctx, bo_list,
-                               num_chunks, chunks, &seq_no);
+      r = amdgpu_cs_submit_raw2(ws->dev, acs->ctx->ctx, bo_list,
+                                num_chunks, chunks, &seq_no);
    }
 
    if (r) {
       if (r == -ENOMEM)
          fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
       else if (r == -ECANCELED)
          fprintf(stderr, "amdgpu: The CS has been cancelled because the context is lost.\n");
       else
          fprintf(stderr, "amdgpu: The CS has been rejected, "
                  "see dmesg for more information (%i).\n", r);
@@ -1520,21 +1496,21 @@ void amdgpu_cs_submit_ib(void *job, int thread_index)
       /* Success. */
       uint64_t *user_fence = NULL;
 
       if (has_user_fence)
          user_fence = acs->ctx->user_fence_cpu_address_base + acs->ring_type;
       amdgpu_fence_submitted(cs->fence, seq_no, user_fence);
    }
 
    /* Cleanup. */
    if (bo_list)
-      amdgpu_bo_list_destroy(bo_list);
+      amdgpu_bo_list_destroy_raw(ws->dev, bo_list);
 
 cleanup:
    /* If there was an error, signal the fence, because it won't be signalled
     * by the hardware. */
    if (r)
       amdgpu_fence_signalled(cs->fence);
 
    cs->error_code = r;
 
    for (i = 0; i < cs->num_real_buffers; i++)
-- 
2.17.1



More information about the mesa-dev mailing list