[PATCH 1/2] drm/amdgpu: Separate vf2pf work item init from virt data exchange
Victor Skvortsov
victor.skvortsov at amd.com
Thu Dec 9 16:48:08 UTC 2021
We want to be able to call virt data exchange conditionally
after gmc sw init to reserve bad pages as early as possible.
Since this is a conditional call, we will need to call
it again unconditionally later in the init sequence.
Refactor the data exchange function so it can be
called multiple times without re-initializing the work item.
Signed-off-by: Victor Skvortsov <victor.skvortsov at amd.com>
---
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 20 ++++++-----
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 42 ++++++++++++++--------
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 5 +--
drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c | 2 +-
drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c | 2 +-
5 files changed, 45 insertions(+), 26 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index ce9bdef185c0..3992c4086d26 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2181,7 +2181,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
/*get pf2vf msg info at it's earliest time*/
if (amdgpu_sriov_vf(adev))
- amdgpu_virt_init_data_exchange(adev);
+ amdgpu_virt_exchange_data(adev);
}
}
@@ -2345,8 +2345,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
}
}
- if (amdgpu_sriov_vf(adev))
- amdgpu_virt_init_data_exchange(adev);
+ if (amdgpu_sriov_vf(adev)) {
+ amdgpu_virt_exchange_data(adev);
+ amdgpu_virt_init_vf2pf_work_item(adev);
+ }
r = amdgpu_ib_pool_init(adev);
if (r) {
@@ -2949,7 +2951,7 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
int r;
if (amdgpu_sriov_vf(adev)) {
- amdgpu_virt_fini_data_exchange(adev);
+ amdgpu_virt_fini_vf2pf_work_item(adev);
amdgpu_virt_request_full_gpu(adev, false);
}
@@ -3839,7 +3841,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
* */
if (amdgpu_sriov_vf(adev)) {
amdgpu_virt_request_full_gpu(adev, false);
- amdgpu_virt_fini_data_exchange(adev);
+ amdgpu_virt_fini_vf2pf_work_item(adev);
}
/* disable all interrupts */
@@ -4317,7 +4319,9 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
if (r)
goto error;
- amdgpu_virt_init_data_exchange(adev);
+ amdgpu_virt_exchange_data(adev);
+ amdgpu_virt_init_vf2pf_work_item(adev);
+
/* we need recover gart prior to run SMC/CP/SDMA resume */
amdgpu_gtt_mgr_recover(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT));
@@ -4495,7 +4499,7 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
if (amdgpu_sriov_vf(adev)) {
/* stop the data exchange thread */
- amdgpu_virt_fini_data_exchange(adev);
+ amdgpu_virt_fini_vf2pf_work_item(adev);
}
/* block all schedulers and reset given job's ring */
@@ -4898,7 +4902,7 @@ static void amdgpu_device_recheck_guilty_jobs(
retry:
/* do hw reset */
if (amdgpu_sriov_vf(adev)) {
- amdgpu_virt_fini_data_exchange(adev);
+ amdgpu_virt_fini_vf2pf_work_item(adev);
r = amdgpu_device_reset_sriov(adev, false);
if (r)
adev->asic_reset_res = r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 3fc49823f527..b6e3d379a86a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -611,16 +611,7 @@ static void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work)
schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
}
-void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
-{
- if (adev->virt.vf2pf_update_interval_ms != 0) {
- DRM_INFO("clean up the vf2pf work item\n");
- cancel_delayed_work_sync(&adev->virt.vf2pf_work);
- adev->virt.vf2pf_update_interval_ms = 0;
- }
-}
-
-void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
+void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
{
uint64_t bp_block_offset = 0;
uint32_t bp_block_size = 0;
@@ -628,11 +619,8 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
adev->virt.fw_reserve.p_pf2vf = NULL;
adev->virt.fw_reserve.p_vf2pf = NULL;
- adev->virt.vf2pf_update_interval_ms = 0;
if (adev->mman.fw_vram_usage_va != NULL) {
- adev->virt.vf2pf_update_interval_ms = 2000;
-
adev->virt.fw_reserve.p_pf2vf =
(struct amd_sriov_msg_pf2vf_info_header *)
(adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
@@ -666,13 +654,39 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
return;
}
+}
+
+void amdgpu_virt_init_vf2pf_work_item(struct amdgpu_device *adev)
+{
+ if (adev->mman.fw_vram_usage_va != NULL) {
+
+ /* Inverval value is passed from the host.
+ * Correct too large or too little interval value
+ */
+ if (adev->virt.vf2pf_update_interval_ms < 200 || adev->virt.vf2pf_update_interval_ms > 10000)
+ adev->virt.vf2pf_update_interval_ms = 2000;
+
+ adev->virt.fw_reserve.p_pf2vf =
+ (struct amd_sriov_msg_pf2vf_info_header *)
+ (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
+ adev->virt.fw_reserve.p_vf2pf =
+ (struct amd_sriov_msg_vf2pf_info_header *)
+ (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
- if (adev->virt.vf2pf_update_interval_ms != 0) {
INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item);
schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
}
}
+void amdgpu_virt_fini_vf2pf_work_item(struct amdgpu_device *adev)
+{
+ if (adev->virt.vf2pf_update_interval_ms != 0) {
+ DRM_INFO("clean up the vf2pf work item\n");
+ cancel_delayed_work_sync(&adev->virt.vf2pf_work);
+ adev->virt.vf2pf_update_interval_ms = 0;
+ }
+}
+
void amdgpu_detect_virtualization(struct amdgpu_device *adev)
{
uint32_t reg;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 8d4c20bb71c5..c18a7daab693 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -307,8 +307,9 @@ int amdgpu_virt_wait_reset(struct amdgpu_device *adev);
int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev);
void amdgpu_virt_free_mm_table(struct amdgpu_device *adev);
void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev);
-void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
-void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev);
+void amdgpu_virt_exchange_data(struct amdgpu_device *adev);
+void amdgpu_virt_init_vf2pf_work_item(struct amdgpu_device *adev);
+void amdgpu_virt_fini_vf2pf_work_item(struct amdgpu_device *adev);
void amdgpu_detect_virtualization(struct amdgpu_device *adev);
bool amdgpu_virt_can_access_debugfs(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 23b066bcffb2..cd2719bc0139 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -255,7 +255,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
if (!down_write_trylock(&adev->reset_sem))
return;
- amdgpu_virt_fini_data_exchange(adev);
+ amdgpu_virt_fini_vf2pf_work_item(adev);
atomic_set(&adev->in_gpu_reset, 1);
xgpu_ai_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index a35e6d87e537..2bc93808469a 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -284,7 +284,7 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
if (!down_write_trylock(&adev->reset_sem))
return;
- amdgpu_virt_fini_data_exchange(adev);
+ amdgpu_virt_fini_vf2pf_work_item(adev);
atomic_set(&adev->in_gpu_reset, 1);
xgpu_nv_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0);
--
2.25.1
More information about the amd-gfx
mailing list