[PATCH v2] drm/amdkfd: Update return value of config_dequeue_wait_counts
Harish Kasiviswanathan
Harish.Kasiviswanathan at amd.com
Fri Mar 14 20:22:11 UTC 2025
.config_dequeue_wait_counts returns a nop case. Modify return parameter
to reflect that since the caller also needs to ignore this condition.
v2: Removed redudant code.
Tidy up code based on review comments
Fixes: <98a5af8103f> ("drm/amdkfd: Add pm_config_dequeue_wait_counts API")
Signed-off-by: Harish Kasiviswanathan <Harish.Kasiviswanathan at amd.com>
---
.../gpu/drm/amd/amdkfd/kfd_packet_manager.c | 14 ++++----
.../drm/amd/amdkfd/kfd_packet_manager_v9.c | 36 +++++++++++--------
2 files changed, 29 insertions(+), 21 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 3f574d82b5fc..502b89639a9f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -436,19 +436,19 @@ int pm_config_dequeue_wait_counts(struct packet_manager *pm,
retval = pm->pmf->config_dequeue_wait_counts(pm, buffer,
cmd, value);
- if (!retval)
+ if (retval > 0) {
retval = kq_submit_packet(pm->priv_queue);
+
+ /* If default value is modified, cache that in dqm->wait_times */
+ if (!retval && cmd == KFD_DEQUEUE_WAIT_INIT)
+ update_dqm_wait_times(pm->dqm);
+ }
else
kq_rollback_packet(pm->priv_queue);
}
-
- /* If default value is modified, cache that value in dqm->wait_times */
- if (!retval && cmd == KFD_DEQUEUE_WAIT_INIT)
- update_dqm_wait_times(pm->dqm);
-
out:
mutex_unlock(&pm->lock);
- return retval;
+ return retval < 0 ? retval : 0;
}
int pm_send_unmap_queue(struct packet_manager *pm,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
index d440df602393..3c6134d61b2b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
@@ -310,6 +310,13 @@ static inline void pm_build_dequeue_wait_counts_packet_info(struct packet_manage
reg_data);
}
+/* pm_config_dequeue_wait_counts_v9: Builds WRITE_DATA packet with
+ * register/value for configuring dequeue wait counts
+ *
+ * @return: -ve for failure, 0 for nop and +ve for success and buffer is
+ * filled in with packet
+ *
+ **/
static int pm_config_dequeue_wait_counts_v9(struct packet_manager *pm,
uint32_t *buffer,
enum kfd_config_dequeue_wait_counts_cmd cmd,
@@ -321,24 +328,25 @@ static int pm_config_dequeue_wait_counts_v9(struct packet_manager *pm,
switch (cmd) {
case KFD_DEQUEUE_WAIT_INIT: {
- uint32_t sch_wave = 0, que_sleep = 0;
- /* Reduce CP_IQ_WAIT_TIME2.QUE_SLEEP to 0x1 from default 0x40.
+ uint32_t sch_wave = 0, que_sleep = 1;
+
+ if (KFD_GC_VERSION(pm->dqm->dev) < IP_VERSION(9, 4, 1) ||
+ KFD_GC_VERSION(pm->dqm->dev) > IP_VERSION(10, 0, 0))
+ return 0;
+
+ /* For all other gfx9 ASICs,
+ * Reduce CP_IQ_WAIT_TIME2.QUE_SLEEP to 0x1 from default 0x40.
* On a 1GHz machine this is roughly 1 microsecond, which is
* about how long it takes to load data out of memory during
* queue connect
* QUE_SLEEP: Wait Count for Dequeue Retry.
+ *
+ * Set CWSR grace period to 1x1000 cycle for GFX9.4.3 APU
*/
- if (KFD_GC_VERSION(pm->dqm->dev) >= IP_VERSION(9, 4, 1) &&
- KFD_GC_VERSION(pm->dqm->dev) < IP_VERSION(10, 0, 0)) {
- que_sleep = 1;
-
- /* Set CWSR grace period to 1x1000 cycle for GFX9.4.3 APU */
- if (amdgpu_emu_mode == 0 && pm->dqm->dev->adev->gmc.is_app_apu &&
- (KFD_GC_VERSION(pm->dqm->dev) == IP_VERSION(9, 4, 3)))
- sch_wave = 1;
- } else {
- return 0;
- }
+ if (amdgpu_emu_mode == 0 && pm->dqm->dev->adev->gmc.is_app_apu &&
+ (KFD_GC_VERSION(pm->dqm->dev) == IP_VERSION(9, 4, 3)))
+ sch_wave = 1;
+
pm_build_dequeue_wait_counts_packet_info(pm, sch_wave, que_sleep,
®_offset, ®_data);
@@ -377,7 +385,7 @@ static int pm_config_dequeue_wait_counts_v9(struct packet_manager *pm,
packet->data = reg_data;
- return 0;
+ return sizeof(struct pm4_mec_write_data_mmio);
}
static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer,
--
2.34.1
More information about the amd-gfx
mailing list