[PATCH 1/2] drm/amdgpu/sriov: For sriov runtime, use kiq to do invalidate tlb
Emily Deng
Emily.Deng at amd.com
Wed Aug 15 09:48:27 UTC 2018
To avoid the tlb flush not interrupted by world switch, use kiq and one
command to do tlb invalidate.
v2:
Add firmware version checking.
v3:
Refine the code, and move the firmware
checking into gfx_v9_0_ring_emit_reg_write_reg_wait.
SWDEV-161497
Signed-off-by: Emily Deng <Emily.Deng at amd.com>
---
drivers/gpu/drm/amd/amdgpu/amdgpu.h | 4 +++
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 3 --
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 15 +++++++-
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 61 ++++++++++++++++++++++++++++++++
4 files changed, 79 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 07924d4..67b584b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -210,6 +210,10 @@ enum amdgpu_kiq_irq {
AMDGPU_CP_KIQ_IRQ_LAST
};
+#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
+#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
+#define MAX_KIQ_REG_TRY 20
+
int amdgpu_device_ip_set_clockgating_state(void *dev,
enum amd_ip_block_type block_type,
enum amd_clockgating_state state);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 21adb1b6..3885636 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -22,9 +22,6 @@
*/
#include "amdgpu.h"
-#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
-#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
-#define MAX_KIQ_REG_TRY 20
uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 76d979e..c9b3db4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -4348,8 +4348,21 @@ static void gfx_v9_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
uint32_t ref, uint32_t mask)
{
int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
+ struct amdgpu_device *adev = ring->adev;
+ bool fw_version_ok = false;
- if (amdgpu_sriov_vf(ring->adev))
+ if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
+ if ((adev->gfx.me_fw_version >= 0x0000009c) && (adev->gfx.me_feature_version >= 42))
+ if ((adev->gfx.pfp_fw_version >= 0x000000b1) && (adev->gfx.pfp_feature_version >= 42))
+ fw_version_ok = true;
+ } else {
+ if ((adev->gfx.mec_fw_version >= 0x00000193) && (adev->gfx.mec_feature_version >= 42))
+ fw_version_ok = true;
+ }
+
+ fw_version_ok = (adev->asic_type == CHIP_VEGA10) ? fw_version_ok : false;
+
+ if (amdgpu_sriov_vf(adev) && fw_version_ok)
gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
ref, mask, 0x20);
else
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index ed467de..3419178 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -311,6 +311,60 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid)
return req;
}
+signed long amdgpu_kiq_invalidate_tlb(struct amdgpu_device *adev, struct amdgpu_vmhub *hub,
+ unsigned eng, u32 req, uint32_t vmid)
+{
+ signed long r, cnt = 0;
+ unsigned long flags;
+ uint32_t seq;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ struct amdgpu_ring *ring = &kiq->ring;
+
+ if (!ring->ready) {
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+
+ amdgpu_ring_alloc(ring, 32);
+ amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng,
+ hub->vm_inv_eng0_ack + eng,
+ req, 1 << vmid);
+ amdgpu_fence_emit_polling(ring, &seq);
+ amdgpu_ring_commit(ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+ /* don't wait anymore for gpu reset case because this way may
+ * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
+ * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
+ * never return if we keep waiting in virt_kiq_rreg, which cause
+ * gpu_recover() hang there.
+ *
+ * also don't wait anymore for IRQ context
+ * */
+ if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
+ goto failed_kiq;
+
+ if (in_interrupt())
+ might_sleep();
+
+ while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+ msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+ }
+
+ if (cnt > MAX_KIQ_REG_TRY)
+ goto failed_kiq;
+
+ return 0;
+
+failed_kiq:
+ pr_err("failed to invalidate tlb with kiq\n");
+ return r;
+}
+
/*
* GART
* VMID 0 is the physical GPU addresses as used by the kernel.
@@ -332,6 +386,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
/* Use register 17 for GART */
const unsigned eng = 17;
unsigned i, j;
+ int r;
spin_lock(&adev->gmc.invalidate_lock);
@@ -339,6 +394,12 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
struct amdgpu_vmhub *hub = &adev->vmhub[i];
u32 tmp = gmc_v9_0_get_invalidate_req(vmid);
+ if (amdgpu_sriov_vf(adev) ) {
+ r = amdgpu_kiq_invalidate_tlb(adev, hub, eng, tmp, vmid);
+ if (!r)
+ continue;
+ }
+
WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
/* Busy wait for ACK.*/
--
2.7.4
More information about the amd-gfx
mailing list