[PATCH 08/10] drm/amdgpu: implement gfx8 pre/post soft reset
Chunming Zhou
David1.Zhou at amd.com
Wed Jul 13 10:32:20 UTC 2016
Change-Id: I5c3a5d637cb8e726e66cd3a6a00c53e6c849b44a
Signed-off-by: Chunming Zhou <David1.Zhou at amd.com>
---
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 80 +++++++++++++++++++++++++++++++++++
1 file changed, 80 insertions(+)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index e23fc64..7d4b514 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -5045,6 +5045,84 @@ static int gfx_v8_0_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
+static void gfx_v8_0_inactive_hqd(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
+{
+ int i;
+
+ vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+ if (RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK) {
+ u32 tmp;
+ tmp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_DEQUEUE_REQUEST,
+ DEQUEUE_REQ, 2);
+ WREG32(mmCP_HQD_DEQUEUE_REQUEST, tmp);
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK))
+ break;
+ udelay(1);
+ }
+ }
+}
+
+static int gfx_v8_0_pre_soft_reset(void *handle, u32 reset_mask)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ /* stop the rlc */
+ gfx_v8_0_rlc_stop(adev);
+
+ if (reset_mask & (AMDGPU_RESET_GFX | AMDGPU_RESET_CP))
+ /* Disable GFX parsing/prefetching */
+ gfx_v8_0_cp_gfx_enable(adev, false);
+
+ if (reset_mask & (AMDGPU_RESET_COMP1 | AMDGPU_RESET_COMP2 | AMDGPU_RESET_CP)) {
+ int i;
+
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
+ gfx_v8_0_inactive_hqd(adev, ring);
+ }
+
+ /* Disable MEC parsing/prefetching */
+ gfx_v8_0_cp_compute_enable(adev, false);
+ }
+
+ return 0;
+}
+
+static void gfx_v8_0_init_hqd(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
+{
+ vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+ WREG32(mmCP_HQD_DEQUEUE_REQUEST, 0);
+ WREG32(mmCP_HQD_PQ_RPTR, 0);
+ WREG32(mmCP_HQD_PQ_WPTR, 0);
+ vi_srbm_select(adev, 0, 0, 0, 0);
+}
+
+static int gfx_v8_0_post_soft_reset(void *handle, u32 reset_mask)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (reset_mask & (AMDGPU_RESET_GFX | AMDGPU_RESET_CP))
+ gfx_v8_0_cp_gfx_resume(adev);
+
+ if (reset_mask & (AMDGPU_RESET_COMP1 | AMDGPU_RESET_COMP2 | AMDGPU_RESET_CP)) {
+ int i;
+
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
+ gfx_v8_0_init_hqd(adev, ring);
+ }
+
+ gfx_v8_0_cp_compute_resume(adev);
+ }
+ gfx_v8_0_rlc_start(adev);
+
+ return 0;
+}
+
static int gfx_v8_0_soft_reset(void *handle)
{
u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
@@ -6368,6 +6446,8 @@ const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
.resume = gfx_v8_0_resume,
.is_idle = gfx_v8_0_is_idle,
.wait_for_idle = gfx_v8_0_wait_for_idle,
+ .pre_soft_reset = gfx_v8_0_pre_soft_reset,
+ .post_soft_reset = gfx_v8_0_post_soft_reset,
.soft_reset = gfx_v8_0_soft_reset,
.set_clockgating_state = gfx_v8_0_set_clockgating_state,
.set_powergating_state = gfx_v8_0_set_powergating_state,
--
1.9.1
More information about the amd-gfx
mailing list