[PATCH 2/2] drm/amdgpu/gfx10: implement gfx queue reset via MMIO
Jesse.zhang@amd.com
jesse.zhang at amd.com
Thu Jan 9 06:28:07 UTC 2025
implement gfx10 kgq reset via mmio.
Signed-off-by: Jesse Zhang <jesse.zhang at amd.com>
---
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 98 ++++++++++++++++++--------
1 file changed, 70 insertions(+), 28 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 89409cb7d195..aac250c121d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -9464,6 +9464,8 @@ static int gfx_v10_0_queue_reset(struct amdgpu_ring *ring, uint32_t vmid, bool u
struct amdgpu_ring *kiq_ring = &kiq->ring;
uint32_t queue_type = ring->funcs->type;
unsigned long flags;
+ uint32_t tmp;
+ uint64_t addr;
int i, r = 0;
if (use_mmio) {
@@ -9498,6 +9500,40 @@ static int gfx_v10_0_queue_reset(struct amdgpu_ring *ring, uint32_t vmid, bool u
nv_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+ } else if (queue_type == AMDGPU_RING_TYPE_GFX) {
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
+ mutex_lock(&adev->srbm_mutex);
+ nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+
+ WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX,
+ (uint32_t)(0x1 << GRBM_GFX_INDEX__SE_BROADCAST_WRITES__SHIFT));
+ tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
+ if (ring->pipe == 0)
+ tmp = REG_SET_FIELD(tmp, CP_VMID_RESET, PIPE0_QUEUES, 1 << ring->queue);
+ else
+ tmp = REG_SET_FIELD(tmp, CP_VMID_RESET, PIPE1_QUEUES, 1 << ring->queue);
+ WREG32_SOC15(GC, 0, mmCP_VMID_RESET, tmp);
+
+
+ nv_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+
+ mutex_lock(&adev->srbm_mutex);
+ nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+ /* wait till dequeue take effects */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32_SOC15(GC, 0, mmCP_GFX_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ if (i >= adev->usec_timeout) {
+ dev_err(adev->dev, "failed to wait on gfx hqd deactivate\n");
+ r = -ETIMEDOUT;
+ }
+
+ nv_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
}
} else {
if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
@@ -9537,6 +9573,28 @@ static int gfx_v10_0_queue_reset(struct amdgpu_ring *ring, uint32_t vmid, bool u
dev_err(adev->dev, "fail to wait on hqd deactivate\n");
}
+ } else if (queue_type == AMDGPU_RING_TYPE_GFX) {
+ if (amdgpu_ring_alloc(kiq_ring, 5 + 7 + 7)) {
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ return -ENOMEM;
+ }
+
+ addr = amdgpu_bo_gpu_offset(ring->mqd_obj) +
+ offsetof(struct v10_gfx_mqd, cp_gfx_hqd_active);
+ tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
+ if (ring->pipe == 0)
+ tmp = REG_SET_FIELD(tmp, CP_VMID_RESET, PIPE0_QUEUES, 1 << ring->queue);
+ else
+ tmp = REG_SET_FIELD(tmp, CP_VMID_RESET, PIPE1_QUEUES, 1 << ring->queue);
+
+ gfx_v10_0_ring_emit_wreg(kiq_ring,
+ SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), tmp);
+ gfx_v10_0_wait_reg_mem(kiq_ring, 0, 1, 0,
+ lower_32_bits(addr), upper_32_bits(addr),
+ 0, 1, 0x20);
+ gfx_v10_0_ring_emit_reg_wait(kiq_ring,
+ SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), 0, 0xffffffff);
+ amdgpu_ring_commit(kiq_ring);
}
}
@@ -9549,8 +9607,6 @@ static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
struct amdgpu_ring *kiq_ring = &kiq->ring;
unsigned long flags;
- u32 tmp;
- u64 addr;
int r;
if (amdgpu_sriov_vf(adev))
@@ -9560,35 +9616,21 @@ static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
return -EINVAL;
spin_lock_irqsave(&kiq->ring_lock, flags);
+ gfx_v10_0_queue_reset(ring, vmid, false);
- if (amdgpu_ring_alloc(kiq_ring, 5 + 7 + 7 + kiq->pmf->map_queues_size)) {
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
- return -ENOMEM;
- }
-
- addr = amdgpu_bo_gpu_offset(ring->mqd_obj) +
- offsetof(struct v10_gfx_mqd, cp_gfx_hqd_active);
- tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
- if (ring->pipe == 0)
- tmp = REG_SET_FIELD(tmp, CP_VMID_RESET, PIPE0_QUEUES, 1 << ring->queue);
- else
- tmp = REG_SET_FIELD(tmp, CP_VMID_RESET, PIPE1_QUEUES, 1 << ring->queue);
-
- gfx_v10_0_ring_emit_wreg(kiq_ring,
- SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), tmp);
- gfx_v10_0_wait_reg_mem(kiq_ring, 0, 1, 0,
- lower_32_bits(addr), upper_32_bits(addr),
- 0, 1, 0x20);
- gfx_v10_0_ring_emit_reg_wait(kiq_ring,
- SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), 0, 0xffffffff);
- kiq->pmf->kiq_map_queues(kiq_ring, ring);
- amdgpu_ring_commit(kiq_ring);
+ r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size);
+ if (r) {
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ return r;
+ }
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ kiq->pmf->kiq_map_queues(kiq_ring, ring);
+ amdgpu_ring_commit(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
- r = amdgpu_ring_test_ring(kiq_ring);
- if (r)
- return r;
+ r = amdgpu_ring_test_ring(kiq_ring);
+ if (r)
+ return r;
r = amdgpu_bo_reserve(ring->mqd_obj, false);
if (unlikely(r != 0)) {
--
2.25.1
More information about the amd-gfx
mailing list