[PATCH] drm/amdgpu: Fix SDMA RAP violations on Sienna Cichlid SRIOV
Zhang, Hawking
Hawking.Zhang at amd.com
Tue Sep 22 02:08:35 UTC 2020
[AMD Public Use]
Similar as the RLC....
If the engine is not allowed to access from the guest (or most bare metal programming sequence is not allowed in guest environment), we shall consider to disable it from high level, instead of adding amdgpu_sriov_vf(adev) everywhere.
Regards,
Hawking
-----Original Message-----
From: Khaire, Rohit <Rohit.Khaire at amd.com>
Sent: Tuesday, September 22, 2020 05:14
To: amd-gfx at lists.freedesktop.org
Cc: Tuikov, Luben <Luben.Tuikov at amd.com>; Koenig, Christian <Christian.Koenig at amd.com>; Deucher, Alexander <Alexander.Deucher at amd.com>; Xiao, Jack <Jack.Xiao at amd.com>; Zhang, Hawking <Hawking.Zhang at amd.com>; Xu, Feifei <Feifei.Xu at amd.com>; Wang, Kevin(Yang) <Kevin1.Wang at amd.com>; Yuan, Xiaojie <Xiaojie.Yuan at amd.com>; Li, Rong (Zero) <Rong.Li at amd.com>; Min, Frank <Frank.Min at amd.com>; Khaire, Rohit <Rohit.Khaire at amd.com>
Subject: [PATCH] drm/amdgpu: Fix SDMA RAP violations on Sienna Cichlid SRIOV
Signed-off-by: Rohit Khaire <rohit.khaire at amd.com>
---
drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c | 128 ++++++++++++++-----------
1 file changed, 70 insertions(+), 58 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 34ccf376ee45..6fb5588fc0b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -541,7 +541,9 @@ static void sdma_v5_2_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
phase_quantum);
}
- WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
+ if (!amdgpu_sriov_vf(adev))
+ WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL),
+ f32_cntl);
}
}
@@ -559,6 +561,9 @@ static void sdma_v5_2_enable(struct amdgpu_device *adev, bool enable)
u32 f32_cntl;
int i;
+ if (amdgpu_sriov_vf(adev))
+ return;
+
if (!enable) {
sdma_v5_2_gfx_stop(adev);
sdma_v5_2_rlc_stop(adev);
@@ -596,7 +601,9 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
ring = &adev->sdma.instance[i].ring;
wb_offset = (ring->rptr_offs * 4);
- WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
+ if (!amdgpu_sriov_vf(adev))
+ WREG32(sdma_v5_2_get_reg_offset(adev, i,
+ mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
/* Set ring buffer size in dwords */
rb_bufsz = order_base_2(ring->ring_size / 4); @@ -621,13 +628,16 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
lower_32_bits(wptr_gpu_addr));
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
upper_32_bits(wptr_gpu_addr));
- wptr_poll_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i,
- mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
- wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
- SDMA0_GFX_RB_WPTR_POLL_CNTL,
- F32_POLL_ENABLE, 1);
- WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL),
- wptr_poll_cntl);
+
+ if (!amdgpu_sriov_vf(adev)) {
+ wptr_poll_cntl = RREG32(sdma_v5_2_get_reg_offset(
+ adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
+ wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
+ SDMA0_GFX_RB_WPTR_POLL_CNTL,
+ F32_POLL_ENABLE, 1);
+ WREG32(sdma_v5_2_get_reg_offset(adev, i,
+ mmSDMA0_GFX_RB_WPTR_POLL_CNTL), wptr_poll_cntl);
+ }
/* set the wb address whether it's enabled or not */
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI), @@ -673,30 +683,40 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
/* set minor_ptr_update to 0 after wptr programed */
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
- /* set utc l1 enable flag always to 1 */
- temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL));
- temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
-
- /* enable MCBP */
- temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1);
- WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
-
- /* Set up RESP_MODE to non-copy addresses */
- temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL));
- temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
- temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
- WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp);
-
- /* program default cache read and write policy */
- temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE));
- /* clean read policy and write policy bits */
- temp &= 0xFF0FFF;
- temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) |
- (CACHE_WRITE_POLICY_L2__DEFAULT << 14) |
- 0x01000000);
- WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp);
-
if (!amdgpu_sriov_vf(adev)) {
+ /* set utc l1 enable flag always to 1 */
+ temp = RREG32(sdma_v5_2_get_reg_offset(adev, i,
+ mmSDMA0_CNTL));
+ temp = REG_SET_FIELD(temp, SDMA0_CNTL,
+ UTC_L1_ENABLE, 1);
+
+ /* enable MCBP */
+ temp = REG_SET_FIELD(temp, SDMA0_CNTL,
+ MIDCMD_PREEMPT_ENABLE, 1);
+ WREG32(sdma_v5_2_get_reg_offset(adev, i,
+ mmSDMA0_CNTL), temp);
+
+ /* Set up RESP_MODE to non-copy addresses */
+ temp = RREG32(sdma_v5_2_get_reg_offset(adev, i,
+ mmSDMA0_UTCL1_CNTL));
+ temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL,
+ RESP_MODE, 3);
+ temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL,
+ REDO_DELAY, 9);
+ WREG32(sdma_v5_2_get_reg_offset(adev, i,
+ mmSDMA0_UTCL1_CNTL), temp);
+
+ /* program default cache read and write policy */
+ temp = RREG32(sdma_v5_2_get_reg_offset(adev, i,
+ mmSDMA0_UTCL1_PAGE));
+ /* clean read policy and write policy bits */
+ temp &= 0xFF0FFF;
+ temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) |
+ (CACHE_WRITE_POLICY_L2__DEFAULT << 14) |
+ 0x01000000);
+ WREG32(sdma_v5_2_get_reg_offset(adev, i,
+ mmSDMA0_UTCL1_PAGE), temp);
+
/* unhalt engine */
temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0); @@ -717,11 +737,6 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
ring->sched.ready = true;
- if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
- sdma_v5_2_ctx_switch_enable(adev, true);
- sdma_v5_2_enable(adev, true);
- }
-
r = amdgpu_ring_test_ring(ring);
if (r) {
ring->sched.ready = false;
@@ -804,30 +819,23 @@ static int sdma_v5_2_start(struct amdgpu_device *adev) {
int r = 0;
- if (amdgpu_sriov_vf(adev)) {
- sdma_v5_2_ctx_switch_enable(adev, false);
- sdma_v5_2_enable(adev, false);
-
- /* set RB registers */
- r = sdma_v5_2_gfx_resume(adev);
- return r;
- }
+ if (!amdgpu_sriov_vf(adev)) {
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
+ r = sdma_v5_2_load_microcode(adev);
+ if (r)
+ return r;
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
- r = sdma_v5_2_load_microcode(adev);
- if (r)
- return r;
+ /* The value of mmSDMA_F32_CNTL is invalid the moment after loading fw */
+ if (amdgpu_emu_mode == 1)
+ msleep(1000);
+ }
- /* The value of mmSDMA_F32_CNTL is invalid the moment after loading fw */
- if (amdgpu_emu_mode == 1)
- msleep(1000);
+ /* unhalt the MEs */
+ sdma_v5_2_enable(adev, true);
+ /* enable sdma ring preemption */
+ sdma_v5_2_ctx_switch_enable(adev, true);
}
- /* unhalt the MEs */
- sdma_v5_2_enable(adev, true);
- /* enable sdma ring preemption */
- sdma_v5_2_ctx_switch_enable(adev, true);
-
/* start the gfx rings and rlc compute queues */
r = sdma_v5_2_gfx_resume(adev);
if (r)
@@ -1403,8 +1411,12 @@ static int sdma_v5_2_set_trap_irq_state(struct amdgpu_device *adev,
enum amdgpu_interrupt_state state) {
u32 sdma_cntl;
+ u32 reg_offset;
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
- u32 reg_offset = sdma_v5_2_get_reg_offset(adev, type, mmSDMA0_CNTL);
+ reg_offset = sdma_v5_2_get_reg_offset(adev, type, mmSDMA0_CNTL);
sdma_cntl = RREG32(reg_offset);
sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
--
2.17.1
More information about the amd-gfx
mailing list