[PATCH WIP] drm/amdgpu: Fix kfd_locked locking issue
Yunxiang Li
Yunxiang.Li at amd.com
Tue Apr 23 02:30:44 UTC 2024
During SRIOV reset, if a step fails and we retry, it locks kfd_locked
again but later only unlocks it once. This means all future attempts at
opening /dev/kfd will fail.
Fix the locking error and rework the SRIOV path to use the same reset
loop from the caller. Hopefully this makes the code easier to reason about.
Signed-off-by: Yunxiang Li <Yunxiang.Li at amd.com>
---
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 76 +++++++++-------------
1 file changed, 30 insertions(+), 46 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 67da24e9f0a2..37d65f6db0e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -5051,24 +5051,11 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
* do VF FLR and reinitialize Asic
* return 0 means succeeded otherwise failed
*/
-static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
- bool from_hypervisor)
+static int amdgpu_device_post_reset_sriov(struct amdgpu_device *adev)
{
- int r;
+ int r = 0;
struct amdgpu_hive_info *hive = NULL;
- int retry_limit = 0;
-
-retry:
- amdgpu_amdkfd_pre_reset(adev);
-
- amdgpu_device_stop_pending_resets(adev);
- if (from_hypervisor)
- r = amdgpu_virt_request_full_gpu(adev, true);
- else
- r = amdgpu_virt_reset_gpu(adev);
- if (r)
- return r;
amdgpu_ras_set_fed(adev, false);
amdgpu_irq_gpu_reset_resume_helper(adev);
@@ -5078,7 +5065,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
/* Resume IP prior to SMC */
r = amdgpu_device_ip_reinit_early_sriov(adev);
if (r)
- goto error;
+ return r;
amdgpu_virt_init_data_exchange(adev);
@@ -5089,37 +5076,25 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
/* now we are okay to resume SMC/CP/SDMA */
r = amdgpu_device_ip_reinit_late_sriov(adev);
if (r)
- goto error;
+ return r;
hive = amdgpu_get_xgmi_hive(adev);
/* Update PSP FW topology after reset */
if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
r = amdgpu_xgmi_update_topology(hive, adev);
-
if (hive)
amdgpu_put_xgmi_hive(hive);
+ if (r)
+ return r;
- if (!r) {
- r = amdgpu_ib_ring_tests(adev);
-
- amdgpu_amdkfd_post_reset(adev);
- }
+ r = amdgpu_ib_ring_tests(adev);
+ if (r)
+ return r;
-error:
- if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
+ if (adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
amdgpu_inc_vram_lost(adev);
r = amdgpu_device_recover_vram(adev);
}
- amdgpu_virt_release_full_gpu(adev, true);
-
- if (AMDGPU_RETRY_SRIOV_RESET(r)) {
- if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) {
- retry_limit++;
- goto retry;
- } else
- DRM_ERROR("GPU reset retry is beyond the retry limit\n");
- }
-
return r;
}
@@ -5678,7 +5653,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
int i, r = 0;
bool need_emergency_restart = false;
bool audio_suspended = false;
- bool from_hypervisor = false;
+ int retry_limit = AMDGPU_MAX_RETRY_LIMIT;
/*
* Special case: RAS triggered and full reset isn't supported
@@ -5760,8 +5735,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
- if (!amdgpu_sriov_vf(tmp_adev))
- amdgpu_amdkfd_pre_reset(tmp_adev);
+ amdgpu_amdkfd_pre_reset(tmp_adev);
/*
* Mark these ASICs to be reseted as untracked first
@@ -5815,19 +5789,29 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
tmp_adev->asic_reset_res = r;
}
- if (!amdgpu_sriov_vf(tmp_adev))
- /*
- * Drop all pending non scheduler resets. Scheduler resets
- * were already dropped during drm_sched_stop
- */
- amdgpu_device_stop_pending_resets(tmp_adev);
+ /*
+ * Drop all pending non scheduler resets. Scheduler resets
+ * were already dropped during drm_sched_stop
+ */
+ amdgpu_device_stop_pending_resets(tmp_adev);
}
/* Actual ASIC resets if needed.*/
/* Host driver will handle XGMI hive reset for SRIOV */
if (amdgpu_sriov_vf(adev)) {
- from_hypervisor = test_bit(AMDGPU_HOST_FLR, &reset_context->flags);
- r = amdgpu_device_reset_sriov(adev, from_hypervisor);
+ if (test_bit(AMDGPU_HOST_FLR, &reset_context->flags))
+ r = amdgpu_virt_request_full_gpu(adev, true);
+ else
+ r = amdgpu_virt_reset_gpu(adev);
+ if (!r)
+ r = amdgpu_device_post_reset_sriov(adev);
+ if (AMDGPU_RETRY_SRIOV_RESET(r) && (retry_limit--) > 0) {
+ amdgpu_virt_release_full_gpu(adev, true);
+ goto retry;
+ } else {
+ amdgpu_amdkfd_post_reset(adev);
+ amdgpu_virt_release_full_gpu(adev, true);
+ }
if (r)
adev->asic_reset_res = r;
--
2.34.1
More information about the amd-gfx
mailing list