[PATCH v2] drm/amdgpu/sriov: For sriov runtime, use kiq to do invalidate tlb

Emily Deng Emily.Deng at amd.com
Tue Aug 14 10:56:51 UTC 2018


To avoid the tlb flush not interrupted by world switch, use kiq and one
command to do tlb invalidate.

v2:
Add firmware version checking.

SWDEV-161497

Signed-off-by: Emily Deng <Emily.Deng at amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 63 ++++++++++++++++++++++++++++++++
 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h |  2 +
 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c    |  7 ++++
 3 files changed, 72 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 21adb1b6..436030c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -233,6 +233,69 @@ void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
 	pr_err("failed to write reg:%x\n", reg);
 }
 
+signed long  amdgpu_virt_kiq_invalidate_tlb(struct amdgpu_device *adev, struct amdgpu_vmhub *hub,
+		unsigned eng, u32 req, uint32_t vmid)
+{
+	signed long r, cnt = 0;
+	unsigned long flags;
+	uint32_t seq;
+	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+	struct amdgpu_ring *ring = &kiq->ring;
+	struct drm_amdgpu_info_firmware fw_info;
+
+	if (ring->me == 1) {
+		fw_info.ver = adev->gfx.mec_fw_version;
+		fw_info.feature = adev->gfx.mec_feature_version;
+	} else {
+		fw_info.ver = adev->gfx.mec2_fw_version;
+		fw_info.feature = adev->gfx.mec2_feature_version;
+	}
+
+	if (fw_info.ver <  0x00000190 || fw_info.feature < 42)
+		return -EPERM;
+
+	BUG_ON(!ring->funcs->emit_reg_write_reg_wait);
+
+	spin_lock_irqsave(&kiq->ring_lock, flags);
+	amdgpu_ring_alloc(ring, 32);
+	amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng,
+					    hub->vm_inv_eng0_ack + eng,
+					    req, 1 << vmid);
+	amdgpu_fence_emit_polling(ring, &seq);
+	amdgpu_ring_commit(ring);
+	spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+	/* don't wait anymore for gpu reset case because this way may
+	 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
+	 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
+	 * never return if we keep waiting in virt_kiq_rreg, which cause
+	 * gpu_recover() hang there.
+	 *
+	 * also don't wait anymore for IRQ context
+	 * */
+	if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
+		goto failed_kiq;
+
+	if (in_interrupt())
+		might_sleep();
+
+	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+	}
+
+	if (cnt > MAX_KIQ_REG_TRY)
+		goto failed_kiq;
+
+	return 0;
+
+failed_kiq:
+	pr_err("failed to invalidate tlb with kiq\n");
+	return r;
+}
+
 /**
  * amdgpu_virt_request_full_gpu() - request full gpu access
  * @amdgpu:	amdgpu device.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 880ac11..8908ff9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -288,6 +288,8 @@ void amdgpu_free_static_csa(struct amdgpu_device *adev);
 void amdgpu_virt_init_setting(struct amdgpu_device *adev);
 uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
 void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
+signed long  amdgpu_virt_kiq_invalidate_tlb(struct amdgpu_device *adev, struct amdgpu_vmhub *hub,
+		unsigned eng, u32 req, uint32_t vmid);
 int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
 int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init);
 int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 6999042..812f71e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -331,6 +331,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
 	/* Use register 17 for GART */
 	const unsigned eng = 17;
 	unsigned i, j;
+	int r;
 
 	spin_lock(&adev->gmc.invalidate_lock);
 
@@ -338,6 +339,12 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
 		struct amdgpu_vmhub *hub = &adev->vmhub[i];
 		u32 tmp = gmc_v9_0_get_invalidate_req(vmid);
 
+		if (amdgpu_sriov_vf(adev) && amdgpu_sriov_runtime(adev)) {
+			r = amdgpu_virt_kiq_invalidate_tlb(adev, hub, eng, tmp, vmid);
+			if (!r)
+				continue;
+		}
+
 		WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
 
 		/* Busy wait for ACK.*/
-- 
2.7.4



More information about the amd-gfx mailing list