[PATCH 2/2] amdgpu: add public bo list interface v2
Christian König
deathsimple at vodafone.de
Thu Apr 23 08:53:31 PDT 2015
From: Christian König <christian.koenig at amd.com>
v2: cleanup comments and function parameter
Signed-off-by: Christian König <christian.koenig at amd.com>
Reviewed-by: Jammy Zhou <Jammy.Zhou at amd.com>
Reviewed-by: Alex Deucher <alexander.deucher at amd.com>
---
amdgpu/amdgpu.h | 51 ++++++++++++++++++++++++------
amdgpu/amdgpu_bo.c | 56 +++++++++++++++++++++++++++++++++
amdgpu/amdgpu_cs.c | 77 +++-------------------------------------------
amdgpu/amdgpu_internal.h | 6 ++++
tests/amdgpu/basic_tests.c | 8 +++--
tests/amdgpu/cs_tests.c | 12 ++++++--
6 files changed, 123 insertions(+), 87 deletions(-)
diff --git a/amdgpu/amdgpu.h b/amdgpu/amdgpu.h
index 7a85982..66e5df0 100644
--- a/amdgpu/amdgpu.h
+++ b/amdgpu/amdgpu.h
@@ -166,6 +166,11 @@ typedef struct amdgpu_context *amdgpu_context_handle;
typedef struct amdgpu_bo *amdgpu_bo_handle;
/**
+ * Define handle for list of BOs
+ */
+typedef struct amdgpu_bo_list *amdgpu_bo_list_handle;
+
+/**
* Define handle to be used when dealing with command
* buffers (a.k.a. ibs)
*
@@ -400,17 +405,9 @@ struct amdgpu_cs_request {
uint32_t ring;
/**
- * Specify number of resource handles passed.
- * Size of 'handles' array
- *
+ * List handle with resources used by this request.
*/
- uint32_t number_of_resources;
-
- /** Array of resources used by submission. */
- amdgpu_bo_handle *resources;
-
- /** Array of resources flags. This is optional and can be NULL. */
- uint8_t *resource_flags;
+ amdgpu_bo_list_handle resources;
/** Number of IBs to submit in the field ibs. */
uint32_t number_of_ibs;
@@ -788,6 +785,40 @@ int amdgpu_bo_wait_for_idle(amdgpu_bo_handle buf_handle,
uint64_t timeout_ns,
bool *buffer_busy);
+/**
+ * Creates a BO list handle for command submission.
+ *
+ * \param dev - \c [in] Device handle.
+ * See #amdgpu_device_initialize()
+ * \param number_of_resources - \c [in] Number of BOs in the list
+ * \param resources - \c [in] List of BO handles
+ * \param resource_prios - \c [in] Optional priority for each handle
+ * \param result - \c [out] Created BO list handle
+ *
+ * \return 0 on success\n
+ * >0 - AMD specific error code\n
+ * <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_bo_list_destroy()
+*/
+int amdgpu_bo_list_create(amdgpu_device_handle dev,
+ uint32_t number_of_resources,
+ amdgpu_bo_handle *resources,
+ uint8_t *resource_prios,
+ amdgpu_bo_list_handle *result);
+
+/**
+ * Destroys a BO list handle.
+ *
+ * \param handle - \c [in] BO list handle.
+ *
+ * \return 0 on success\n
+ * >0 - AMD specific error code\n
+ * <0 - Negative POSIX Error code
+ *
+ * \sa amdgpu_bo_list_create()
+*/
+int amdgpu_bo_list_destroy(amdgpu_bo_list_handle handle);
/*
* Special GPU Resources
diff --git a/amdgpu/amdgpu_bo.c b/amdgpu/amdgpu_bo.c
index ce7e9d1..e5744d6 100644
--- a/amdgpu/amdgpu_bo.c
+++ b/amdgpu/amdgpu_bo.c
@@ -620,3 +620,59 @@ int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
info->virtual_mc_base_address = bo->virtual_mc_base_address;
return r;
}
+
+int amdgpu_bo_list_create(amdgpu_device_handle dev,
+ uint32_t number_of_resources,
+ amdgpu_bo_handle *resources,
+ uint8_t *resource_prios,
+ amdgpu_bo_list_handle *result)
+{
+ struct drm_amdgpu_bo_list_entry *list;
+ union drm_amdgpu_bo_list args;
+ unsigned i;
+ int r;
+
+ list = alloca(sizeof(struct drm_amdgpu_bo_list_entry) * number_of_resources);
+
+ memset(&args, 0, sizeof(args));
+ args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
+ args.in.bo_number = number_of_resources;
+ args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
+ args.in.bo_info_ptr = (uint64_t)(uintptr_t)list;
+
+ for (i = 0; i < number_of_resources; i++) {
+ list[i].bo_handle = resources[i]->handle;
+ if (resource_prios)
+ list[i].bo_priority = resource_prios[i];
+ else
+ list[i].bo_priority = 0;
+ }
+
+ r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
+ &args, sizeof(args));
+ if (r)
+ return r;
+
+ *result = calloc(1, sizeof(struct amdgpu_bo_list));
+ (*result)->dev = dev;
+ (*result)->handle = args.out.list_handle;
+ return 0;
+}
+
+int amdgpu_bo_list_destroy(amdgpu_bo_list_handle list)
+{
+ union drm_amdgpu_bo_list args;
+ int r;
+
+ memset(&args, 0, sizeof(args));
+ args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
+ args.in.list_handle = list->handle;
+
+ r = drmCommandWriteRead(list->dev->fd, DRM_AMDGPU_BO_LIST,
+ &args, sizeof(args));
+
+ if (!r)
+ free(list);
+
+ return r;
+}
diff --git a/amdgpu/amdgpu_cs.c b/amdgpu/amdgpu_cs.c
index d6b4b2d..8a473a1 100644
--- a/amdgpu/amdgpu_cs.c
+++ b/amdgpu/amdgpu_cs.c
@@ -611,64 +611,6 @@ int amdgpu_cs_ctx_free(amdgpu_context_handle context)
return r;
}
-static int amdgpu_cs_create_bo_list(amdgpu_context_handle context,
- struct amdgpu_cs_request *request,
- amdgpu_ib_handle fence_ib,
- uint32_t *handle)
-{
- struct drm_amdgpu_bo_list_entry *list;
- union drm_amdgpu_bo_list args;
- unsigned num_resources;
- unsigned i;
- int r;
-
- num_resources = request->number_of_resources;
- if (fence_ib)
- ++num_resources;
-
- list = alloca(sizeof(struct drm_amdgpu_bo_list_entry) * num_resources);
-
- memset(&args, 0, sizeof(args));
- args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
- args.in.bo_number = num_resources;
- args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
- args.in.bo_info_ptr = (uint64_t)(uintptr_t)list;
-
- for (i = 0; i < request->number_of_resources; i++) {
- list[i].bo_handle = request->resources[i]->handle;
- if (request->resource_flags)
- list[i].bo_priority = request->resource_flags[i];
- else
- list[i].bo_priority = 0;
- }
-
- if (fence_ib)
- list[i].bo_handle = fence_ib->buf_handle->handle;
-
- r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_BO_LIST,
- &args, sizeof(args));
- if (r)
- return r;
-
- *handle = args.out.list_handle;
- return 0;
-}
-
-static int amdgpu_cs_free_bo_list(amdgpu_context_handle context, uint32_t handle)
-{
- union drm_amdgpu_bo_list args;
- int r;
-
- memset(&args, 0, sizeof(args));
- args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
- args.in.list_handle = handle;
-
- r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_BO_LIST,
- &args, sizeof(args));
-
- return r;
-}
-
static uint32_t amdgpu_cs_fence_index(unsigned ip, unsigned ring)
{
return ip * AMDGPU_CS_MAX_RINGS + ring;
@@ -702,11 +644,10 @@ static int amdgpu_cs_submit_one(amdgpu_context_handle context,
if (ibs_request->number_of_ibs > AMDGPU_CS_MAX_IBS_PER_SUBMIT)
return -EINVAL;
- size = (ibs_request->number_of_ibs + 1) * ((sizeof(uint64_t) +
+ size = (ibs_request->number_of_ibs + 1) * (
+ sizeof(uint64_t) +
sizeof(struct drm_amdgpu_cs_chunk) +
- sizeof(struct drm_amdgpu_cs_chunk_data)) +
- ibs_request->number_of_resources + 1) *
- sizeof(struct drm_amdgpu_bo_list_entry);
+ sizeof(struct drm_amdgpu_cs_chunk_data));
chunk_array = malloc(size);
if (NULL == chunk_array)
return -ENOMEM;
@@ -718,6 +659,7 @@ static int amdgpu_cs_submit_one(amdgpu_context_handle context,
memset(&cs, 0, sizeof(cs));
cs.in.chunks = (uint64_t)(uintptr_t)chunk_array;
cs.in.ctx_id = context->id;
+ cs.in.bo_list_handle = ibs_request->resources->handle;
cs.in.num_chunks = ibs_request->number_of_ibs;
/* IB chunks */
for (i = 0; i < ibs_request->number_of_ibs; i++) {
@@ -740,11 +682,6 @@ static int amdgpu_cs_submit_one(amdgpu_context_handle context,
chunk_data[i].ib_data.flags = AMDGPU_IB_FLAG_CE;
}
- r = amdgpu_cs_create_bo_list(context, ibs_request, NULL,
- &cs.in.bo_list_handle);
- if (r)
- goto error_unlock;
-
pthread_mutex_lock(&context->sequence_mutex);
if (ibs_request->ip_type != AMDGPU_HW_IP_UVD &&
@@ -792,17 +729,11 @@ static int amdgpu_cs_submit_one(amdgpu_context_handle context,
pthread_mutex_unlock(&context->sequence_mutex);
- r = amdgpu_cs_free_bo_list(context, cs.in.bo_list_handle);
- if (r)
- goto error_free;
-
free(chunk_array);
return 0;
error_unlock:
pthread_mutex_unlock(&context->sequence_mutex);
-
-error_free:
free(chunk_array);
return r;
}
diff --git a/amdgpu/amdgpu_internal.h b/amdgpu/amdgpu_internal.h
index 55db645..6077e04 100644
--- a/amdgpu/amdgpu_internal.h
+++ b/amdgpu/amdgpu_internal.h
@@ -87,6 +87,12 @@ struct amdgpu_bo {
int cpu_map_count;
};
+struct amdgpu_bo_list {
+ struct amdgpu_device *dev;
+
+ uint32_t handle;
+};
+
/*
* There are three mutexes.
* To avoid deadlock, only hold the mutexes in this order:
diff --git a/tests/amdgpu/basic_tests.c b/tests/amdgpu/basic_tests.c
index c01a7f4..1f99235 100644
--- a/tests/amdgpu/basic_tests.c
+++ b/tests/amdgpu/basic_tests.c
@@ -306,9 +306,10 @@ static void amdgpu_sdma_test_exec_cs(amdgpu_context_handle context_handle,
ibs_request->ring = instance;
ibs_request->number_of_ibs = 1;
ibs_request->ibs = ib_info;
- ibs_request->number_of_resources = res_cnt;
- ibs_request->resources = resources;
+ r = amdgpu_bo_list_create(device_handle, res_cnt, resources,
+ NULL, &ibs_request->resources);
+ CU_ASSERT_EQUAL(r, 0);
CU_ASSERT_NOT_EQUAL(ibs_request, NULL);
@@ -317,6 +318,9 @@ static void amdgpu_sdma_test_exec_cs(amdgpu_context_handle context_handle,
ibs_request, 1, &fence_status.fence);
CU_ASSERT_EQUAL(r, 0);
+ r = amdgpu_bo_list_destroy(ibs_request->resources);
+ CU_ASSERT_EQUAL(r, 0);
+
fence_status.ip_type = AMDGPU_HW_IP_DMA;
fence_status.ring = ibs_request->ring;
fence_status.context = context_handle;
diff --git a/tests/amdgpu/cs_tests.c b/tests/amdgpu/cs_tests.c
index c0d0c0d..47e8732 100644
--- a/tests/amdgpu/cs_tests.c
+++ b/tests/amdgpu/cs_tests.c
@@ -115,8 +115,12 @@ static int submit(unsigned ndw, unsigned ip)
ib_info.size = ndw;
ibs_request.ip_type = ip;
- ibs_request.number_of_resources = num_resources;
- ibs_request.resources = resources;
+
+ r = amdgpu_bo_list_create(device_handle, num_resources, resources,
+ NULL, &ibs_request.resources);
+ if (r)
+ return r;
+
ibs_request.number_of_ibs = 1;
ibs_request.ibs = &ib_info;
@@ -125,6 +129,10 @@ static int submit(unsigned ndw, unsigned ip)
if (r)
return r;
+ r = amdgpu_bo_list_destroy(ibs_request.resources);
+ if (r)
+ return r;
+
r = amdgpu_cs_alloc_ib(context_handle, IB_SIZE, &ib_result);
if (r)
return r;
--
1.9.1
More information about the dri-devel
mailing list