[PATCH 3/4] drm/amdgpu: add sdma v5_2 soft reset
Victor Zhao
Victor.Zhao at amd.com
Thu Mar 10 06:11:54 UTC 2022
enable sdma v5_2 soft reset
Signed-off-by: Victor Zhao <Victor.Zhao at amd.com>
---
drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c | 79 +++++++++++++++++++++++++-
1 file changed, 78 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 4d4d1aa51b8a..f9f978d8fe8a 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -806,6 +806,80 @@ static int sdma_v5_2_load_microcode(struct amdgpu_device *adev)
return 0;
}
+static bool sdma_v5_2_check_soft_reset(void *handle, struct amdgpu_job *job)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (job) {
+ if (!strncmp(job->base.sched->name, "sdma", 4))
+ return true;
+ else
+ return false;
+ } else {
+ u32 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
+
+ if (tmp == 0xffffffff)
+ return false;
+
+ if (tmp & GRBM_STATUS2__SDMA_BUSY_MASK)
+ return true;
+ else
+ return false;
+ }
+}
+
+static int sdma_v5_2_pre_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i, j;
+ uint32_t tmp, f32_cntl;
+
+ for (i = 0; i < adev->sdma.num_instances; i++)
+ WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE), 0x10);
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ for (j = 0; j < 10; j++) {
+ tmp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE));
+ DRM_DEBUG("SDMA%d_FREEZE=0x%x", i, tmp);
+ if (tmp & SDMA0_FREEZE__FROZEN_MASK) {
+ WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE), 0);
+ break;
+ } else {
+ udelay(10);
+ }
+ }
+ if (j == 10) {
+ DRM_ERROR("SDMA%d_FREEZE frozen not set", i);
+ return -1;
+ }
+ }
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
+ f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1);
+ WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl);
+ }
+
+ return 0;
+}
+
+static int sdma_v5_2_post_soft_reset(void *handle)
+{
+ int i;
+ uint32_t f32_cntl;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
+ f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0);
+ WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl);
+ }
+
+ sdma_v5_2_gfx_resume(adev);
+ udelay(10);
+
+ return 0;
+}
+
static int sdma_v5_2_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -831,7 +905,7 @@ static int sdma_v5_2_soft_reset(void *handle)
WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- udelay(50);
+ mdelay(2);
}
return 0;
@@ -1655,6 +1729,9 @@ const struct amd_ip_funcs sdma_v5_2_ip_funcs = {
.resume = sdma_v5_2_resume,
.is_idle = sdma_v5_2_is_idle,
.wait_for_idle = sdma_v5_2_wait_for_idle,
+ .pre_soft_reset = sdma_v5_2_pre_soft_reset,
+ .post_soft_reset = sdma_v5_2_post_soft_reset,
+ .check_soft_reset = sdma_v5_2_check_soft_reset,
.soft_reset = sdma_v5_2_soft_reset,
.set_clockgating_state = sdma_v5_2_set_clockgating_state,
.set_powergating_state = sdma_v5_2_set_powergating_state,
--
2.25.1
More information about the amd-gfx
mailing list