[PATCH 23/29] drm/amdgpu: hw_init for each vcn instance
boyuan.zhang at amd.com
boyuan.zhang at amd.com
Tue Oct 29 17:42:34 UTC 2024
From: Boyuan Zhang <boyuan.zhang at amd.com>
Pass instance parameter to amdgpu_vcn_hw_init(), and perform
hw init ONLY for the given vcn instance, instead of for all
vcn instances. Modify each vcn generation accordingly.
Signed-off-by: Boyuan Zhang <boyuan.zhang at amd.com>
Acked-by: Sunil Khatri <sunil.khatri at amd.com>
Acked-by: Leo Liu <leo.liu at amd.com>
Reviewed-by: Alex Deucher <alexander.deucher at amd.com>
---
drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c | 39 +++++++------
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c | 73 ++++++++++++-------------
drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c | 37 ++++++-------
drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c | 67 +++++++++++------------
drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c | 21 ++++---
drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c | 21 ++++---
6 files changed, 123 insertions(+), 135 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index d135e63e7301..8ce3cea6cf44 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -338,37 +338,36 @@ static int vcn_v2_5_hw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
- int i, j, r = 0;
+ int inst = ip_block->instance;
+ int i, r = 0;
if (amdgpu_sriov_vf(adev))
r = vcn_v2_5_sriov_start(adev);
- for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
- if (adev->vcn.harvest_config & (1 << j))
- continue;
+ if (adev->vcn.harvest_config & (1 << inst))
+ return r;
- if (amdgpu_sriov_vf(adev)) {
- adev->vcn.inst[j].ring_enc[0].sched.ready = true;
- adev->vcn.inst[j].ring_enc[1].sched.ready = false;
- adev->vcn.inst[j].ring_enc[2].sched.ready = false;
- adev->vcn.inst[j].ring_dec.sched.ready = true;
- } else {
+ if (amdgpu_sriov_vf(adev)) {
+ adev->vcn.inst[inst].ring_enc[0].sched.ready = true;
+ adev->vcn.inst[inst].ring_enc[1].sched.ready = false;
+ adev->vcn.inst[inst].ring_enc[2].sched.ready = false;
+ adev->vcn.inst[inst].ring_dec.sched.ready = true;
+ } else {
+
+ ring = &adev->vcn.inst[inst].ring_dec;
- ring = &adev->vcn.inst[j].ring_dec;
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ ring->doorbell_index, inst);
- adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
- ring->doorbell_index, j);
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
+ for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ ring = &adev->vcn.inst[inst].ring_enc[i];
r = amdgpu_ring_test_helper(ring);
if (r)
return r;
-
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
- ring = &adev->vcn.inst[j].ring_enc[i];
- r = amdgpu_ring_test_helper(ring);
- if (r)
- return r;
- }
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index d00b7a7cbdce..36100c2612d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -345,8 +345,9 @@ static int vcn_v3_0_sw_fini(struct amdgpu_ip_block *ip_block)
static int vcn_v3_0_hw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int inst = ip_block->instance;
struct amdgpu_ring *ring;
- int i, j, r;
+ int j, r;
if (amdgpu_sriov_vf(adev)) {
r = vcn_v3_0_start_sriov(adev);
@@ -354,57 +355,53 @@ static int vcn_v3_0_hw_init(struct amdgpu_ip_block *ip_block)
return r;
/* initialize VCN dec and enc ring buffers */
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
+ if (adev->vcn.harvest_config & (1 << inst))
+ return 0;
+
+ ring = &adev->vcn.inst[inst].ring_dec;
+ if (amdgpu_vcn_is_disabled_vcn(adev, VCN_DECODE_RING, inst)) {
+ ring->sched.ready = false;
+ ring->no_scheduler = true;
+ dev_info(adev->dev, "ring %s is disabled by hypervisor\n", ring->name);
+ } else {
+ ring->wptr = 0;
+ ring->wptr_old = 0;
+ vcn_v3_0_dec_ring_set_wptr(ring);
+ ring->sched.ready = true;
+ }
- ring = &adev->vcn.inst[i].ring_dec;
- if (amdgpu_vcn_is_disabled_vcn(adev, VCN_DECODE_RING, i)) {
+ for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
+ ring = &adev->vcn.inst[inst].ring_enc[j];
+ if (amdgpu_vcn_is_disabled_vcn(adev, VCN_ENCODE_RING, inst)) {
ring->sched.ready = false;
ring->no_scheduler = true;
dev_info(adev->dev, "ring %s is disabled by hypervisor\n", ring->name);
} else {
ring->wptr = 0;
ring->wptr_old = 0;
- vcn_v3_0_dec_ring_set_wptr(ring);
+ vcn_v3_0_enc_ring_set_wptr(ring);
ring->sched.ready = true;
}
-
- for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
- ring = &adev->vcn.inst[i].ring_enc[j];
- if (amdgpu_vcn_is_disabled_vcn(adev, VCN_ENCODE_RING, i)) {
- ring->sched.ready = false;
- ring->no_scheduler = true;
- dev_info(adev->dev, "ring %s is disabled by hypervisor\n", ring->name);
- } else {
- ring->wptr = 0;
- ring->wptr_old = 0;
- vcn_v3_0_enc_ring_set_wptr(ring);
- ring->sched.ready = true;
- }
- }
}
- } else {
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
+ }
- ring = &adev->vcn.inst[i].ring_dec;
+ if (adev->vcn.harvest_config & (1 << inst))
+ return 0;
- adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
- ring->doorbell_index, i);
+ ring = &adev->vcn.inst[inst].ring_dec;
- r = amdgpu_ring_test_helper(ring);
- if (r)
- return r;
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ ring->doorbell_index, inst);
- for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
- ring = &adev->vcn.inst[i].ring_enc[j];
- r = amdgpu_ring_test_helper(ring);
- if (r)
- return r;
- }
- }
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
+
+ for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
+ ring = &adev->vcn.inst[inst].ring_enc[j];
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index 7c3a62f84707..00ff7affc647 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -299,37 +299,34 @@ static int vcn_v4_0_hw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
- int i, r;
+ int inst = ip_block->instance;
+ int r;
if (amdgpu_sriov_vf(adev)) {
r = vcn_v4_0_start_sriov(adev);
if (r)
return r;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
+ if (adev->vcn.harvest_config & (1 << inst))
+ return 0;
- ring = &adev->vcn.inst[i].ring_enc[0];
- ring->wptr = 0;
- ring->wptr_old = 0;
- vcn_v4_0_unified_ring_set_wptr(ring);
- ring->sched.ready = true;
- }
+ ring = &adev->vcn.inst[inst].ring_enc[0];
+ ring->wptr = 0;
+ ring->wptr_old = 0;
+ vcn_v4_0_unified_ring_set_wptr(ring);
+ ring->sched.ready = true;
} else {
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
+ if (adev->vcn.harvest_config & (1 << inst))
+ return 0;
- ring = &adev->vcn.inst[i].ring_enc[0];
+ ring = &adev->vcn.inst[inst].ring_enc[0];
- adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
- ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i), i);
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * inst), inst);
- r = amdgpu_ring_test_helper(ring);
- if (r)
- return r;
- }
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
index 5a3de3dbc3c9..feb373a96cfb 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
@@ -257,49 +257,46 @@ static int vcn_v4_0_3_hw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
- int i, r, vcn_inst;
+ int inst = ip_block->instance;
+ int r = 0, vcn_inst;
if (amdgpu_sriov_vf(adev)) {
r = vcn_v4_0_3_start_sriov(adev);
if (r)
return r;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- ring = &adev->vcn.inst[i].ring_enc[0];
- ring->wptr = 0;
- ring->wptr_old = 0;
- vcn_v4_0_3_unified_ring_set_wptr(ring);
- ring->sched.ready = true;
- }
+ ring = &adev->vcn.inst[inst].ring_enc[0];
+ ring->wptr = 0;
+ ring->wptr_old = 0;
+ vcn_v4_0_3_unified_ring_set_wptr(ring);
+ ring->sched.ready = true;
} else {
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- vcn_inst = GET_INST(VCN, i);
- ring = &adev->vcn.inst[i].ring_enc[0];
-
- if (ring->use_doorbell) {
- adev->nbio.funcs->vcn_doorbell_range(
- adev, ring->use_doorbell,
- (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
- 9 * vcn_inst,
- adev->vcn.inst[i].aid_id);
-
- WREG32_SOC15(
- VCN, GET_INST(VCN, ring->me),
- regVCN_RB1_DB_CTRL,
- ring->doorbell_index
- << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
- VCN_RB1_DB_CTRL__EN_MASK);
-
- /* Read DB_CTRL to flush the write DB_CTRL command. */
- RREG32_SOC15(
- VCN, GET_INST(VCN, ring->me),
- regVCN_RB1_DB_CTRL);
- }
-
- r = amdgpu_ring_test_helper(ring);
- if (r)
- return r;
+ vcn_inst = GET_INST(VCN, inst);
+ ring = &adev->vcn.inst[inst].ring_enc[0];
+
+ if (ring->use_doorbell) {
+ adev->nbio.funcs->vcn_doorbell_range(
+ adev, ring->use_doorbell,
+ (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ 9 * vcn_inst,
+ adev->vcn.inst[inst].aid_id);
+
+ WREG32_SOC15(
+ VCN, GET_INST(VCN, ring->me),
+ regVCN_RB1_DB_CTRL,
+ ring->doorbell_index
+ << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
+ VCN_RB1_DB_CTRL__EN_MASK);
+
+ /* Read DB_CTRL to flush the write DB_CTRL command. */
+ RREG32_SOC15(
+ VCN, GET_INST(VCN, ring->me),
+ regVCN_RB1_DB_CTRL);
}
+
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
}
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
index 2c9f863c40b1..fb1e1d5bcdbe 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
@@ -268,21 +268,20 @@ static int vcn_v4_0_5_hw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
- int i, r;
+ int inst = ip_block->instance;
+ int r;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
+ if (adev->vcn.harvest_config & (1 << inst))
+ return 0;
- ring = &adev->vcn.inst[i].ring_enc[0];
+ ring = &adev->vcn.inst[inst].ring_enc[0];
- adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
- ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i), i);
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * inst), inst);
- r = amdgpu_ring_test_helper(ring);
- if (r)
- return r;
- }
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
index 9d67e884952a..137c3b452433 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
@@ -232,21 +232,20 @@ static int vcn_v5_0_0_hw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
- int i, r;
+ int inst = ip_block->instance;
+ int r;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
+ if (adev->vcn.harvest_config & (1 << inst))
+ return 0;
- ring = &adev->vcn.inst[i].ring_enc[0];
+ ring = &adev->vcn.inst[inst].ring_enc[0];
- adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
- ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i), i);
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * inst), inst);
- r = amdgpu_ring_test_helper(ring);
- if (r)
- return r;
- }
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
return 0;
}
--
2.34.1
More information about the amd-gfx
mailing list