[Mesa-dev] [PATCH 7/7] winsys/amdgpu: use the new raw CS API

Marek Olšák maraeo at gmail.com
Wed Sep 6 09:34:30 UTC 2017


From: Marek Olšák <marek.olsak at amd.com>

This also cleans things up.
---
 src/gallium/winsys/amdgpu/drm/amdgpu_cs.c | 165 ++++++++++++++++--------------
 src/gallium/winsys/amdgpu/drm/amdgpu_cs.h |   5 +-
 2 files changed, 93 insertions(+), 77 deletions(-)

diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c b/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c
index 1d7ea34..0f9c1fa 100644
--- a/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c
+++ b/src/gallium/winsys/amdgpu/drm/amdgpu_cs.c
@@ -26,21 +26,20 @@
  * of the Software.
  */
 /*
  * Authors:
  *      Marek Olšák <maraeo at gmail.com>
  */
 
 #include "amdgpu_cs.h"
 #include "os/os_time.h"
 #include <stdio.h>
-#include <amdgpu_drm.h>
 
 #include "amd/common/sid.h"
 
 DEBUG_GET_ONCE_BOOL_OPTION(noop, "RADEON_NOOP", false)
 
 /* FENCES */
 
 static struct pipe_fence_handle *
 amdgpu_fence_create(struct amdgpu_ctx *ctx, unsigned ip_type,
                     unsigned ip_instance, unsigned ring)
@@ -52,26 +51,26 @@ amdgpu_fence_create(struct amdgpu_ctx *ctx, unsigned ip_type,
    fence->fence.context = ctx->ctx;
    fence->fence.ip_type = ip_type;
    fence->fence.ip_instance = ip_instance;
    fence->fence.ring = ring;
    fence->submission_in_progress = true;
    p_atomic_inc(&ctx->refcount);
    return (struct pipe_fence_handle *)fence;
 }
 
 static void amdgpu_fence_submitted(struct pipe_fence_handle *fence,
-				struct amdgpu_cs_request* request,
-				uint64_t *user_fence_cpu_address)
+                                   uint64_t seq_no,
+                                   uint64_t *user_fence_cpu_address)
 {
    struct amdgpu_fence *rfence = (struct amdgpu_fence*)fence;
 
-   rfence->fence.fence = request->seq_no;
+   rfence->fence.fence = seq_no;
    rfence->user_fence_cpu_address = user_fence_cpu_address;
    rfence->submission_in_progress = false;
 }
 
 static void amdgpu_fence_signalled(struct pipe_fence_handle *fence)
 {
    struct amdgpu_fence *rfence = (struct amdgpu_fence*)fence;
 
    rfence->signalled = true;
    rfence->submission_in_progress = false;
@@ -147,23 +146,23 @@ amdgpu_cs_get_next_fence(struct radeon_winsys_cs *rcs)
 
    if (debug_get_option_noop())
       return NULL;
 
    if (cs->next_fence) {
       amdgpu_fence_reference(&fence, cs->next_fence);
       return fence;
    }
 
    fence = amdgpu_fence_create(cs->ctx,
-                               cs->csc->request.ip_type,
-                               cs->csc->request.ip_instance,
-                               cs->csc->request.ring);
+                               cs->csc->ib[IB_MAIN].ip_type,
+                               cs->csc->ib[IB_MAIN].ip_instance,
+                               cs->csc->ib[IB_MAIN].ring);
    if (!fence)
       return NULL;
 
    amdgpu_fence_reference(&cs->next_fence, fence);
    return fence;
 }
 
 /* CONTEXTS */
 
 static struct radeon_winsys_ctx *amdgpu_ctx_create(struct radeon_winsys *ws)
@@ -251,23 +250,23 @@ amdgpu_ctx_query_reset_status(struct radeon_winsys_ctx *rwctx)
    case AMDGPU_CTX_NO_RESET:
    default:
       return PIPE_NO_RESET;
    }
 }
 
 /* COMMAND SUBMISSION */
 
 static bool amdgpu_cs_has_user_fence(struct amdgpu_cs_context *cs)
 {
-   return cs->request.ip_type != AMDGPU_HW_IP_UVD &&
-          cs->request.ip_type != AMDGPU_HW_IP_VCE &&
-          cs->request.ip_type != AMDGPU_HW_IP_VCN_DEC;
+   return cs->ib[IB_MAIN].ip_type != AMDGPU_HW_IP_UVD &&
+          cs->ib[IB_MAIN].ip_type != AMDGPU_HW_IP_VCE &&
+          cs->ib[IB_MAIN].ip_type != AMDGPU_HW_IP_VCN_DEC;
 }
 
 static bool amdgpu_cs_has_chaining(struct amdgpu_cs *cs)
 {
    return cs->ctx->ws->info.chip_class >= CIK &&
           cs->ring_type == RING_GFX;
 }
 
 static unsigned amdgpu_cs_epilog_dws(enum ring_type ring_type)
 {
@@ -611,21 +610,21 @@ static unsigned amdgpu_ib_max_submit_dwords(enum ib_type ib_type)
 
 static bool amdgpu_get_new_ib(struct radeon_winsys *ws, struct amdgpu_cs *cs,
                               enum ib_type ib_type)
 {
    struct amdgpu_winsys *aws = (struct amdgpu_winsys*)ws;
    /* Small IBs are better than big IBs, because the GPU goes idle quicker
     * and there is less waiting for buffers and fences. Proof:
     *   http://www.phoronix.com/scan.php?page=article&item=mesa-111-si&num=1
     */
    struct amdgpu_ib *ib = NULL;
-   struct amdgpu_cs_ib_info *info = &cs->csc->ib[ib_type];
+   struct drm_amdgpu_cs_chunk_ib *info = &cs->csc->ib[ib_type];
    unsigned ib_size = 0;
 
    switch (ib_type) {
    case IB_MAIN:
       ib = &cs->main;
       ib_size = 4 * 1024 * 4;
       break;
    default:
       unreachable("unhandled IB type");
    }
@@ -643,24 +642,25 @@ static bool amdgpu_get_new_ib(struct radeon_winsys *ws, struct amdgpu_cs *cs,
    ib->base.current.cdw = 0;
    ib->base.current.buf = NULL;
 
    /* Allocate a new buffer for IBs if the current buffer is all used. */
    if (!ib->big_ib_buffer ||
        ib->used_ib_space + ib_size > ib->big_ib_buffer->size) {
       if (!amdgpu_ib_new_buffer(aws, ib))
          return false;
    }
 
-   info->ib_mc_address = amdgpu_winsys_bo(ib->big_ib_buffer)->va +
-                         ib->used_ib_space;
-   info->size = 0;
-   ib->ptr_ib_size = &info->size;
+   info->va_start = amdgpu_winsys_bo(ib->big_ib_buffer)->va + ib->used_ib_space;
+   info->ib_bytes = 0;
+   /* ib_bytes is in dwords and the conversion to bytes will be done before
+    * the CS ioctl. */
+   ib->ptr_ib_size = &info->ib_bytes;
 
    amdgpu_cs_add_buffer(&cs->main.base, ib->big_ib_buffer,
                         RADEON_USAGE_READ, 0, RADEON_PRIO_IB1);
 
    ib->base.current.buf = (uint32_t*)(ib->ib_mapped + ib->used_ib_space);
 
    ib_size = ib->big_ib_buffer->size - ib->used_ib_space;
    ib->base.current.max_dw = ib_size / 4 - amdgpu_cs_epilog_dws(cs->ring_type);
    return true;
 }
@@ -670,51 +670,47 @@ static void amdgpu_ib_finalize(struct amdgpu_ib *ib)
    *ib->ptr_ib_size |= ib->base.current.cdw;
    ib->used_ib_space += ib->base.current.cdw * 4;
    ib->max_ib_size = MAX2(ib->max_ib_size, ib->base.prev_dw + ib->base.current.cdw);
 }
 
 static bool amdgpu_init_cs_context(struct amdgpu_cs_context *cs,
                                    enum ring_type ring_type)
 {
    switch (ring_type) {
    case RING_DMA:
-      cs->request.ip_type = AMDGPU_HW_IP_DMA;
+      cs->ib[IB_MAIN].ip_type = AMDGPU_HW_IP_DMA;
       break;
 
    case RING_UVD:
-      cs->request.ip_type = AMDGPU_HW_IP_UVD;
+      cs->ib[IB_MAIN].ip_type = AMDGPU_HW_IP_UVD;
       break;
 
    case RING_VCE:
-      cs->request.ip_type = AMDGPU_HW_IP_VCE;
+      cs->ib[IB_MAIN].ip_type = AMDGPU_HW_IP_VCE;
       break;
 
    case RING_COMPUTE:
-      cs->request.ip_type = AMDGPU_HW_IP_COMPUTE;
+      cs->ib[IB_MAIN].ip_type = AMDGPU_HW_IP_COMPUTE;
       break;
 
    case RING_VCN_DEC:
-      cs->request.ip_type = AMDGPU_HW_IP_VCN_DEC;
+      cs->ib[IB_MAIN].ip_type = AMDGPU_HW_IP_VCN_DEC;
       break;
 
    default:
    case RING_GFX:
-      cs->request.ip_type = AMDGPU_HW_IP_GFX;
+      cs->ib[IB_MAIN].ip_type = AMDGPU_HW_IP_GFX;
       break;
    }
 
    memset(cs->buffer_indices_hashlist, -1, sizeof(cs->buffer_indices_hashlist));
    cs->last_added_bo = NULL;
-
-   cs->request.number_of_ibs = 1;
-   cs->request.ibs = &cs->ib[IB_MAIN];
-
    return true;
 }
 
 static void amdgpu_cs_context_cleanup(struct amdgpu_cs_context *cs)
 {
    unsigned i;
 
    for (i = 0; i < cs->num_real_buffers; i++) {
       p_atomic_dec(&cs->real_buffers[i].bo->num_cs_references);
       amdgpu_winsys_bo_reference(&cs->real_buffers[i].bo, NULL);
@@ -767,20 +763,25 @@ amdgpu_cs_create(struct radeon_winsys_ctx *rwctx,
       return NULL;
    }
 
    util_queue_fence_init(&cs->flush_completed);
 
    cs->ctx = ctx;
    cs->flush_cs = flush;
    cs->flush_data = flush_ctx;
    cs->ring_type = ring_type;
 
+   struct amdgpu_cs_fence_info fence_info;
+   fence_info.handle = cs->ctx->user_fence_bo;
+   fence_info.offset = cs->ring_type;
+   amdgpu_cs_chunk_fence_info_to_data(&fence_info, (void*)&cs->fence_chunk);
+
    cs->main.ib_type = IB_MAIN;
 
    if (!amdgpu_init_cs_context(&cs->csc1, ring_type)) {
       FREE(cs);
       return NULL;
    }
 
    if (!amdgpu_init_cs_context(&cs->csc2, ring_type)) {
       amdgpu_destroy_cs_context(&cs->csc1);
       FREE(cs);
@@ -921,23 +922,23 @@ static unsigned add_fence_dependency_entry(struct amdgpu_cs_context *cs)
    }
    return idx;
 }
 
 static bool is_noop_fence_dependency(struct amdgpu_cs *acs,
                                      struct amdgpu_fence *fence)
 {
    struct amdgpu_cs_context *cs = acs->csc;
 
    if (fence->ctx == acs->ctx &&
-       fence->fence.ip_type == cs->request.ip_type &&
-       fence->fence.ip_instance == cs->request.ip_instance &&
-       fence->fence.ring == cs->request.ring)
+       fence->fence.ip_type == cs->ib[IB_MAIN].ip_type &&
+       fence->fence.ip_instance == cs->ib[IB_MAIN].ip_instance &&
+       fence->fence.ring == cs->ib[IB_MAIN].ring)
       return true;
 
    return amdgpu_fence_wait((void *)fence, 0, false);
 }
 
 static void amdgpu_cs_add_fence_dependency(struct radeon_winsys_cs *rws,
                                            struct pipe_fence_handle *pfence)
 {
    struct amdgpu_cs *acs = amdgpu_cs(rws);
    struct amdgpu_cs_context *cs = acs->csc;
@@ -1084,49 +1085,23 @@ static bool amdgpu_add_sparse_backing_buffers(struct amdgpu_cs_context *cs)
 
    return true;
 }
 
 void amdgpu_cs_submit_ib(void *job, int thread_index)
 {
    struct amdgpu_cs *acs = (struct amdgpu_cs*)job;
    struct amdgpu_winsys *ws = acs->ctx->ws;
    struct amdgpu_cs_context *cs = acs->cst;
    int i, r;
-   struct amdgpu_cs_fence *dependencies = NULL;
-
-   /* Set dependencies (input fences). */
-   if (cs->num_fence_dependencies) {
-      dependencies = alloca(sizeof(dependencies[0]) *
-                            cs->num_fence_dependencies);
-      unsigned num = 0;
-
-      for (i = 0; i < cs->num_fence_dependencies; i++) {
-         struct amdgpu_fence *fence =
-            (struct amdgpu_fence*)cs->fence_dependencies[i];
-
-         /* Past fences can't be unsubmitted because we have only 1 CS thread. */
-         assert(!fence->submission_in_progress);
-         memcpy(&dependencies[num++], &fence->fence, sizeof(dependencies[0]));
-      }
-      cs->request.dependencies = dependencies;
-      cs->request.number_of_dependencies = num;
-   } else {
-      cs->request.dependencies = NULL;
-      cs->request.number_of_dependencies = 0;
-   }
-
-   /* Set the output fence. */
-   cs->request.fence_info.handle = NULL;
-   if (amdgpu_cs_has_user_fence(cs)) {
-	cs->request.fence_info.handle = acs->ctx->user_fence_bo;
-	cs->request.fence_info.offset = acs->ring_type;
-   }
+   amdgpu_bo_list_handle bo_list = NULL;
+   uint64_t seq_no = 0;
+   bool has_user_fence = amdgpu_cs_has_user_fence(cs);
 
    /* Create the buffer list.
     * Use a buffer list containing all allocated buffers if requested.
     */
    if (ws->debug_all_bos) {
       struct amdgpu_winsys_bo *bo;
       amdgpu_bo_handle *handles;
       unsigned num = 0;
 
       mtx_lock(&ws->global_bo_list_lock);
@@ -1138,22 +1113,21 @@ void amdgpu_cs_submit_ib(void *job, int thread_index)
          cs->error_code = -ENOMEM;
          return;
       }
 
       LIST_FOR_EACH_ENTRY(bo, &ws->global_bo_list, u.real.global_list_item) {
          assert(num < ws->num_buffers);
          handles[num++] = bo->bo;
       }
 
       r = amdgpu_bo_list_create(ws->dev, ws->num_buffers,
-                                handles, NULL,
-                                &cs->request.resources);
+                                handles, NULL, &bo_list);
       free(handles);
       mtx_unlock(&ws->global_bo_list_lock);
    } else {
       unsigned num_handles;
 
       if (!amdgpu_add_sparse_backing_buffers(cs)) {
          r = -ENOMEM;
          goto bo_list_error;
       }
 
@@ -1183,68 +1157,109 @@ void amdgpu_cs_submit_ib(void *job, int thread_index)
          cs->handles[num_handles] = buffer->bo->bo;
          cs->flags[num_handles] = (util_last_bit64(buffer->u.real.priority_usage) - 1) / 4;
 	 ++num_handles;
       }
 
       if (acs->ring_type == RING_GFX)
          ws->gfx_bo_list_counter += cs->num_real_buffers;
 
       if (num_handles) {
          r = amdgpu_bo_list_create(ws->dev, num_handles,
-                                   cs->handles, cs->flags,
-                                   &cs->request.resources);
+                                   cs->handles, cs->flags, &bo_list);
       } else {
          r = 0;
-	 cs->request.resources = 0;
       }
    }
 bo_list_error:
 
    if (r) {
       fprintf(stderr, "amdgpu: buffer list creation failed (%d)\n", r);
-      cs->request.resources = NULL;
       amdgpu_fence_signalled(cs->fence);
       cs->error_code = r;
       goto cleanup;
    }
 
-   if (acs->ctx->num_rejected_cs)
+   if (acs->ctx->num_rejected_cs) {
       r = -ECANCELED;
-   else
-      r = amdgpu_cs_submit(acs->ctx->ctx, 0, &cs->request, 1);
+   } else {
+      struct drm_amdgpu_cs_chunk chunks[3];
+      unsigned num_chunks = 0;
+
+      /* Convert from dwords to bytes. */
+      cs->ib[IB_MAIN].ib_bytes *= 4;
+
+      /* IB */
+      chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_IB;
+      chunks[num_chunks].length_dw = sizeof(struct drm_amdgpu_cs_chunk_ib) / 4;
+      chunks[num_chunks].chunk_data = (uintptr_t)&cs->ib;
+      num_chunks++;
+
+      /* Fence */
+      if (has_user_fence) {
+         chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_FENCE;
+         chunks[num_chunks].length_dw = sizeof(struct drm_amdgpu_cs_chunk_fence) / 4;
+         chunks[num_chunks].chunk_data = (uintptr_t)&acs->fence_chunk;
+         num_chunks++;
+      }
+
+      /* Dependencies */
+      unsigned num_dependencies = cs->num_fence_dependencies;
+      if (num_dependencies) {
+         struct drm_amdgpu_cs_chunk_dep *dep_chunk =
+            alloca(num_dependencies * sizeof(*dep_chunk));
+
+         for (unsigned i = 0; i < num_dependencies; i++) {
+            struct amdgpu_fence *fence =
+               (struct amdgpu_fence*)cs->fence_dependencies[i];
+
+            assert(!fence->submission_in_progress);
+            amdgpu_cs_chunk_fence_to_dep(&fence->fence, &dep_chunk[i]);
+         }
+
+         chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_DEPENDENCIES;
+         chunks[num_chunks].length_dw = sizeof(struct drm_amdgpu_cs_chunk_dep) / 4 *
+                                        num_dependencies;
+         chunks[num_chunks].chunk_data = (uintptr_t)dep_chunk;
+         num_chunks++;
+      }
+      assert(num_chunks <= ARRAY_SIZE(chunks));
+
+      r = amdgpu_cs_submit_raw(ws->dev, acs->ctx->ctx, bo_list,
+                               num_chunks, chunks, &seq_no);
+   }
 
    cs->error_code = r;
    if (r) {
       if (r == -ENOMEM)
          fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
       else if (r == -ECANCELED)
          fprintf(stderr, "amdgpu: The CS has been cancelled because the context is lost.\n");
       else
          fprintf(stderr, "amdgpu: The CS has been rejected, "
                  "see dmesg for more information (%i).\n", r);
 
       amdgpu_fence_signalled(cs->fence);
 
       acs->ctx->num_rejected_cs++;
       ws->num_total_rejected_cs++;
    } else {
       /* Success. */
       uint64_t *user_fence = NULL;
-      if (amdgpu_cs_has_user_fence(cs))
-         user_fence = acs->ctx->user_fence_cpu_address_base +
-                      cs->request.fence_info.offset;
-      amdgpu_fence_submitted(cs->fence, &cs->request, user_fence);
+
+      if (has_user_fence)
+         user_fence = acs->ctx->user_fence_cpu_address_base + acs->ring_type;
+      amdgpu_fence_submitted(cs->fence, seq_no, user_fence);
    }
 
    /* Cleanup. */
-   if (cs->request.resources)
-      amdgpu_bo_list_destroy(cs->request.resources);
+   if (bo_list)
+      amdgpu_bo_list_destroy(bo_list);
 
 cleanup:
    for (i = 0; i < cs->num_real_buffers; i++)
       p_atomic_dec(&cs->real_buffers[i].bo->num_active_ioctls);
    for (i = 0; i < cs->num_slab_buffers; i++)
       p_atomic_dec(&cs->slab_buffers[i].bo->num_active_ioctls);
    for (i = 0; i < cs->num_sparse_buffers; i++)
       p_atomic_dec(&cs->sparse_buffers[i].bo->num_active_ioctls);
 
    amdgpu_cs_context_cleanup(cs);
@@ -1316,23 +1331,23 @@ static int amdgpu_cs_flush(struct radeon_winsys_cs *rcs,
       amdgpu_ib_finalize(&cs->main);
 
       /* Create a fence. */
       amdgpu_fence_reference(&cur->fence, NULL);
       if (cs->next_fence) {
          /* just move the reference */
          cur->fence = cs->next_fence;
          cs->next_fence = NULL;
       } else {
          cur->fence = amdgpu_fence_create(cs->ctx,
-                                          cur->request.ip_type,
-                                          cur->request.ip_instance,
-                                          cur->request.ring);
+                                          cur->ib[IB_MAIN].ip_type,
+                                          cur->ib[IB_MAIN].ip_instance,
+                                          cur->ib[IB_MAIN].ring);
       }
       if (fence)
          amdgpu_fence_reference(fence, cur->fence);
 
       amdgpu_cs_sync_flush(rcs);
 
       /* Prepare buffers.
        *
        * This fence must be held until the submission is queued to ensure
        * that the order of fence dependency updates matches the order of
diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_cs.h b/src/gallium/winsys/amdgpu/drm/amdgpu_cs.h
index 8f5c336..de00912 100644
--- a/src/gallium/winsys/amdgpu/drm/amdgpu_cs.h
+++ b/src/gallium/winsys/amdgpu/drm/amdgpu_cs.h
@@ -27,20 +27,21 @@
 /*
  * Authors:
  *      Marek Olšák <maraeo at gmail.com>
  */
 
 #ifndef AMDGPU_CS_H
 #define AMDGPU_CS_H
 
 #include "amdgpu_bo.h"
 #include "util/u_memory.h"
+#include <amdgpu_drm.h>
 
 struct amdgpu_ctx {
    struct amdgpu_winsys *ws;
    amdgpu_context_handle ctx;
    amdgpu_bo_handle user_fence_bo;
    uint64_t *user_fence_cpu_address_base;
    int refcount;
    unsigned initial_num_total_rejected_cs;
    unsigned num_rejected_cs;
 };
@@ -69,22 +70,21 @@ struct amdgpu_ib {
    /* A buffer out of which new IBs are allocated. */
    struct pb_buffer        *big_ib_buffer;
    uint8_t                 *ib_mapped;
    unsigned                used_ib_space;
    unsigned                max_ib_size;
    uint32_t                *ptr_ib_size;
    enum ib_type            ib_type;
 };
 
 struct amdgpu_cs_context {
-   struct amdgpu_cs_request    request;
-   struct amdgpu_cs_ib_info    ib[IB_NUM];
+   struct drm_amdgpu_cs_chunk_ib ib[IB_NUM];
 
    /* Buffers. */
    unsigned                    max_real_buffers;
    unsigned                    num_real_buffers;
    struct amdgpu_cs_buffer     *real_buffers;
 
    unsigned                    max_real_submit;
    amdgpu_bo_handle            *handles;
    uint8_t                     *flags;
 
@@ -110,20 +110,21 @@ struct amdgpu_cs_context {
    struct pipe_fence_handle    *fence;
 
    /* the error returned from cs_flush for non-async submissions */
    int                         error_code;
 };
 
 struct amdgpu_cs {
    struct amdgpu_ib main; /* must be first because this is inherited */
    struct amdgpu_ctx *ctx;
    enum ring_type ring_type;
+   struct drm_amdgpu_cs_chunk_fence fence_chunk;
 
    /* We flip between these two CS. While one is being consumed
     * by the kernel in another thread, the other one is being filled
     * by the pipe driver. */
    struct amdgpu_cs_context csc1;
    struct amdgpu_cs_context csc2;
    /* The currently-used CS. */
    struct amdgpu_cs_context *csc;
    /* The CS being currently-owned by the other thread. */
    struct amdgpu_cs_context *cst;
-- 
2.7.4



More information about the mesa-dev mailing list