[PATCH 1/2] drm/amdgpu/vcn: add unified queue ib test

Ruijing Dong ruijing.dong at amd.com
Tue Jun 7 18:36:32 UTC 2022


- add unified queue headers
- add unified queue ib tests.

Signed-off-by: Ruijing Dong <ruijing.dong at amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 102 +++++++++++++++++++++++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h |   1 +
 2 files changed, 100 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index aa7acfabf360..e62ff7db4736 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -329,6 +329,18 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
 	return 0;
 }
 
+/* from vcn4 and above, only unified queue is used */
+bool amdgpu_vcn_using_unified_queue(struct amdgpu_ring *ring)
+{
+	struct amdgpu_device *adev = ring->adev;
+	bool ret = false;
+
+	if (adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0))
+		ret = true;
+
+	return ret;
+}
+
 bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance)
 {
 	bool ret = false;
@@ -718,19 +730,55 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	return r;
 }
 
+static uint32_t * amdgpu_vcn_unified_ring_ib_header(struct amdgpu_ib *ib,
+					  uint32_t ib_pack_in_dw, bool enc)
+{
+	uint32_t *ib_checksum;
+
+	ib->ptr[ib->length_dw++] = 0x00000010; /* single queue checksum */
+	ib->ptr[ib->length_dw++] = 0x30000002;
+	ib_checksum = &ib->ptr[ib->length_dw++];
+	ib->ptr[ib->length_dw++] = ib_pack_in_dw;
+
+	ib->ptr[ib->length_dw++] = 0x00000010; /* engine info */
+	ib->ptr[ib->length_dw++] = 0x30000001;
+	ib->ptr[ib->length_dw++] = enc ? 0x2 : 0x3;
+	ib->ptr[ib->length_dw++] = ib_pack_in_dw * sizeof(uint32_t);
+
+	return ib_checksum;
+}
+
+static void amdgpu_vcn_unified_ring_ib_checksum(uint32_t **ib_checksum,
+				        uint32_t ib_pack_in_dw)
+{
+	uint32_t i;
+	uint32_t checksum = 0;
+
+	for (i = 0; i < ib_pack_in_dw; i++)
+		checksum += *(*ib_checksum + 2 + i);
+
+	**ib_checksum = checksum;
+}
+
 static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
 				      struct amdgpu_ib *ib_msg,
 				      struct dma_fence **fence)
 {
 	struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
-	const unsigned int ib_size_dw = 64;
+	unsigned int ib_size_dw = 64;
 	struct amdgpu_device *adev = ring->adev;
 	struct dma_fence *f = NULL;
 	struct amdgpu_job *job;
 	struct amdgpu_ib *ib;
 	uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
+	bool sq = amdgpu_vcn_using_unified_queue(ring);
+	uint32_t *ib_checksum;
+	uint32_t ib_pack_in_dw;
 	int i, r;
 
+	if (sq)
+		ib_size_dw += 8;
+
 	r = amdgpu_job_alloc_with_ib(adev, ib_size_dw * 4,
 				AMDGPU_IB_POOL_DIRECT, &job);
 	if (r)
@@ -739,6 +787,13 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
 	ib = &job->ibs[0];
 	ib->length_dw = 0;
 
+	/* single queue headers */
+	if (sq) {
+		ib_pack_in_dw = sizeof(struct amdgpu_vcn_decode_buffer) / sizeof(uint32_t)
+			                     + 4 + 2; /* engine info + decoding ib in dw */
+		ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, ib_pack_in_dw, false);
+	}
+
 	ib->ptr[ib->length_dw++] = sizeof(struct amdgpu_vcn_decode_buffer) + 8;
 	ib->ptr[ib->length_dw++] = cpu_to_le32(AMDGPU_VCN_IB_FLAG_DECODE_BUFFER);
 	decode_buffer = (struct amdgpu_vcn_decode_buffer *)&(ib->ptr[ib->length_dw]);
@@ -752,6 +807,9 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
 	for (i = ib->length_dw; i < ib_size_dw; ++i)
 		ib->ptr[i] = 0x0;
 
+	if (sq)
+		amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, ib_pack_in_dw);
+
 	r = amdgpu_job_submit_direct(job, ring, &f);
 	if (r)
 		goto err_free;
@@ -838,13 +896,18 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
 					 struct amdgpu_ib *ib_msg,
 					 struct dma_fence **fence)
 {
-	const unsigned ib_size_dw = 16;
+	unsigned ib_size_dw = 16;
 	struct amdgpu_job *job;
 	struct amdgpu_ib *ib;
 	struct dma_fence *f = NULL;
+	uint32_t *ib_checksum = NULL;
 	uint64_t addr;
+	bool sq = amdgpu_vcn_using_unified_queue(ring);
 	int i, r;
 
+	if (sq)
+		ib_size_dw += 8;
+
 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
 					AMDGPU_IB_POOL_DIRECT, &job);
 	if (r)
@@ -854,6 +917,10 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
 	addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
 
 	ib->length_dw = 0;
+
+	if (sq)
+		ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
+
 	ib->ptr[ib->length_dw++] = 0x00000018;
 	ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
 	ib->ptr[ib->length_dw++] = handle;
@@ -873,6 +940,9 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
 	for (i = ib->length_dw; i < ib_size_dw; ++i)
 		ib->ptr[i] = 0x0;
 
+	if (sq)
+		amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
+
 	r = amdgpu_job_submit_direct(job, ring, &f);
 	if (r)
 		goto err;
@@ -892,13 +962,18 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
 					  struct amdgpu_ib *ib_msg,
 					  struct dma_fence **fence)
 {
-	const unsigned ib_size_dw = 16;
+	unsigned ib_size_dw = 16;
 	struct amdgpu_job *job;
 	struct amdgpu_ib *ib;
 	struct dma_fence *f = NULL;
+	uint32_t *ib_checksum = NULL;
 	uint64_t addr;
+	bool sq = amdgpu_vcn_using_unified_queue(ring);
 	int i, r;
 
+	if (sq)
+		ib_size_dw += 8;
+
 	r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
 					AMDGPU_IB_POOL_DIRECT, &job);
 	if (r)
@@ -908,6 +983,10 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
 	addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
 
 	ib->length_dw = 0;
+
+	if (sq)
+		ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
+
 	ib->ptr[ib->length_dw++] = 0x00000018;
 	ib->ptr[ib->length_dw++] = 0x00000001;
 	ib->ptr[ib->length_dw++] = handle;
@@ -927,6 +1006,9 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
 	for (i = ib->length_dw; i < ib_size_dw; ++i)
 		ib->ptr[i] = 0x0;
 
+	if (sq)
+		amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
+
 	r = amdgpu_job_submit_direct(job, ring, &f);
 	if (r)
 		goto err;
@@ -977,6 +1059,20 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 	return r;
 }
 
+int amdgpu_vcn_unified_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+{
+	long r;
+
+	r = amdgpu_vcn_enc_ring_test_ib(ring, timeout);
+	if (r)
+		goto error;
+
+	r =  amdgpu_vcn_dec_sw_ring_test_ib(ring, timeout);
+
+error:
+	return r;
+}
+
 enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring)
 {
 	switch(ring) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 6f90fcee0f9c..60c608144480 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -364,6 +364,7 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring);
 int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout);
 int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring);
 int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout);
+int amdgpu_vcn_unified_ring_test_ib(struct amdgpu_ring *ring, long timeout);
 
 int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring);
 int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout);
-- 
2.25.1



More information about the amd-gfx mailing list