[PATCH 08/11] drm/amdgpu: fix and cleanup gmc_v11_0_flush_gpu_tlb_pasid
Christian König
ckoenig.leichtzumerken at gmail.com
Tue Sep 5 06:04:12 UTC 2023
The same PASID can be used by more than one VMID, reset each of them.
Use the common KIQ handling.
Signed-off-by: Christian König <christian.koenig at amd.com>
---
drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c | 63 ++++++++------------------
1 file changed, 19 insertions(+), 44 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index 3c3ad3f17c6a..aa39c1087e44 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -303,54 +303,27 @@ static int gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
uint16_t pasid, uint32_t flush_type,
bool all_hub, uint32_t inst)
{
+ uint16_t queried;
int vmid, i;
- signed long r;
- uint32_t seq;
- uint16_t queried_pasid;
- bool ret;
- struct amdgpu_ring *ring = &adev->gfx.kiq[0].ring;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
-
- if (amdgpu_emu_mode == 0 && ring->sched.ready) {
- spin_lock(&adev->gfx.kiq[0].ring_lock);
- /* 2 dwords flush + 8 dwords fence */
- amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
- kiq->pmf->kiq_invalidate_tlbs(ring,
- pasid, flush_type, all_hub);
- r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
- if (r) {
- amdgpu_ring_undo(ring);
- spin_unlock(&adev->gfx.kiq[0].ring_lock);
- return -ETIME;
- }
-
- amdgpu_ring_commit(ring);
- spin_unlock(&adev->gfx.kiq[0].ring_lock);
- r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
- if (r < 1) {
- dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
- return -ETIME;
- }
-
- return 0;
- }
for (vmid = 1; vmid < 16; vmid++) {
-
- ret = gmc_v11_0_get_vmid_pasid_mapping_info(adev, vmid,
- &queried_pasid);
- if (ret && queried_pasid == pasid) {
- if (all_hub) {
- for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS)
- gmc_v11_0_flush_gpu_tlb(adev, vmid,
- i, flush_type);
- } else {
- gmc_v11_0_flush_gpu_tlb(adev, vmid,
- AMDGPU_GFXHUB(0), flush_type);
- }
+ bool valid;
+
+ valid = gmc_v11_0_get_vmid_pasid_mapping_info(adev, vmid,
+ &queried);
+ if (!valid || queried == pasid)
+ continue;
+
+ if (all_hub) {
+ for_each_set_bit(i, adev->vmhubs_mask,
+ AMDGPU_MAX_VMHUBS)
+ gmc_v11_0_flush_gpu_tlb(adev, vmid, i,
+ flush_type);
+ } else {
+ gmc_v11_0_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB(0),
+ flush_type);
}
}
-
return 0;
}
@@ -918,8 +891,10 @@ static int gmc_v11_0_gart_enable(struct amdgpu_device *adev)
static int gmc_v11_0_hw_init(void *handle)
{
- int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ adev->gmc.flush_pasid_uses_kiq = !amdgpu_emu_mode;
/* The sequence of these two function calls matters.*/
gmc_v11_0_init_golden_registers(adev);
--
2.34.1
More information about the amd-gfx
mailing list