[PATCH 1/4] drm/amdkfd: Rename grace_period to wait_times
Harish Kasiviswanathan
Harish.Kasiviswanathan at amd.com
Wed Feb 12 22:03:38 UTC 2025
Rename .set_grace_period() to .set_compute_queue_wait_counts(). The
function not only sets grace_period but also sets other compute queue
wait times. Up until now only grace_period was set/updated, however
other wait times also needs set/update. Change function name to reflect
this.
No functional change intended.
Signed-off-by: Harish Kasiviswanathan <Harish.Kasiviswanathan at amd.com>
---
.../drm/amd/amdkfd/kfd_device_queue_manager.c | 46 +++++++++----------
.../drm/amd/amdkfd/kfd_device_queue_manager.h | 2 +-
.../gpu/drm/amd/amdkfd/kfd_packet_manager.c | 18 ++++++--
.../drm/amd/amdkfd/kfd_packet_manager_v9.c | 16 +++----
.../drm/amd/amdkfd/kfd_packet_manager_vi.c | 4 +-
drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 6 +--
6 files changed, 52 insertions(+), 40 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 195085079eb2..b88a95b5ae0d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -976,7 +976,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q,
if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) {
if (!dqm->dev->kfd->shared_resources.enable_mes)
retval = unmap_queues_cpsch(dqm,
- KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD, false);
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, KFD_SET_DEFAULT_CP_QUEUE_WAIT_TIMES, false);
else if (prev_active)
retval = remove_queue_mes(dqm, q, &pdd->qpd);
@@ -1246,7 +1246,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
qpd->is_debug ?
KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0,
- USE_DEFAULT_GRACE_PERIOD);
+ KFD_SET_DEFAULT_CP_QUEUE_WAIT_TIMES);
out:
dqm_unlock(dqm);
@@ -1387,7 +1387,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
}
if (!dqm->dev->kfd->shared_resources.enable_mes)
retval = execute_queues_cpsch(dqm,
- KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD);
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, KFD_SET_DEFAULT_CP_QUEUE_WAIT_TIMES);
eviction_duration = get_jiffies_64() - pdd->last_evict_timestamp;
atomic64_add(eviction_duration, &pdd->evict_duration_counter);
vm_not_acquired:
@@ -1788,7 +1788,7 @@ static int halt_cpsch(struct device_queue_manager *dqm)
if (!dqm->dev->kfd->shared_resources.enable_mes)
ret = unmap_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0,
- USE_DEFAULT_GRACE_PERIOD, false);
+ KFD_SET_DEFAULT_CP_QUEUE_WAIT_TIMES, false);
else
ret = remove_all_kfd_queues_mes(dqm);
}
@@ -1815,7 +1815,7 @@ static int unhalt_cpsch(struct device_queue_manager *dqm)
if (!dqm->dev->kfd->shared_resources.enable_mes)
ret = execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES,
- 0, USE_DEFAULT_GRACE_PERIOD);
+ 0, KFD_SET_DEFAULT_CP_QUEUE_WAIT_TIMES);
else
ret = add_all_kfd_queues_mes(dqm);
@@ -1860,7 +1860,7 @@ static int start_cpsch(struct device_queue_manager *dqm)
dqm->sched_running = true;
if (!dqm->dev->kfd->shared_resources.enable_mes)
- execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD);
+ execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, KFD_SET_DEFAULT_CP_QUEUE_WAIT_TIMES);
/* Set CWSR grace period to 1x1000 cycle for GFX9.4.3 APU */
if (amdgpu_emu_mode == 0 && dqm->dev->adev->gmc.is_app_apu &&
@@ -1868,7 +1868,7 @@ static int start_cpsch(struct device_queue_manager *dqm)
uint32_t reg_offset = 0;
uint32_t grace_period = 1;
- retval = pm_update_grace_period(&dqm->packet_mgr,
+ retval = pm_set_compute_queue_wait_counts(&dqm->packet_mgr,
grace_period);
if (retval)
dev_err(dev, "Setting grace timeout failed\n");
@@ -1916,7 +1916,7 @@ static int stop_cpsch(struct device_queue_manager *dqm)
}
if (!dqm->dev->kfd->shared_resources.enable_mes)
- unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD, false);
+ unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, KFD_SET_DEFAULT_CP_QUEUE_WAIT_TIMES, false);
else
remove_all_kfd_queues_mes(dqm);
@@ -1959,7 +1959,7 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
increment_queue_count(dqm, qpd, kq->queue);
qpd->is_debug = true;
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0,
- USE_DEFAULT_GRACE_PERIOD);
+ KFD_SET_DEFAULT_CP_QUEUE_WAIT_TIMES);
dqm_unlock(dqm);
return 0;
@@ -1974,7 +1974,7 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
decrement_queue_count(dqm, qpd, kq->queue);
qpd->is_debug = false;
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0,
- USE_DEFAULT_GRACE_PERIOD);
+ KFD_SET_DEFAULT_CP_QUEUE_WAIT_TIMES);
/*
* Unconditionally decrement this counter, regardless of the queue's
* type.
@@ -2054,7 +2054,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
if (!dqm->dev->kfd->shared_resources.enable_mes)
retval = execute_queues_cpsch(dqm,
- KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD);
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, KFD_SET_DEFAULT_CP_QUEUE_WAIT_TIMES);
else
retval = add_queue_mes(dqm, q, qpd);
if (retval)
@@ -2294,8 +2294,8 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
if (!down_read_trylock(&dqm->dev->adev->reset_domain->sem))
return -EIO;
- if (grace_period != USE_DEFAULT_GRACE_PERIOD) {
- retval = pm_update_grace_period(&dqm->packet_mgr, grace_period);
+ if (grace_period != KFD_SET_DEFAULT_CP_QUEUE_WAIT_TIMES) {
+ retval = pm_set_compute_queue_wait_counts(&dqm->packet_mgr, grace_period);
if (retval)
goto out;
}
@@ -2338,9 +2338,9 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
}
/* We need to reset the grace period value for this device */
- if (grace_period != USE_DEFAULT_GRACE_PERIOD) {
- if (pm_update_grace_period(&dqm->packet_mgr,
- USE_DEFAULT_GRACE_PERIOD))
+ if (grace_period != KFD_SET_DEFAULT_CP_QUEUE_WAIT_TIMES) {
+ if (pm_set_compute_queue_wait_counts(&dqm->packet_mgr,
+ KFD_SET_DEFAULT_CP_QUEUE_WAIT_TIMES))
dev_err(dev, "Failed to reset grace period\n");
}
@@ -2360,7 +2360,7 @@ static int reset_queues_cpsch(struct device_queue_manager *dqm, uint16_t pasid)
dqm_lock(dqm);
retval = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_BY_PASID,
- pasid, USE_DEFAULT_GRACE_PERIOD, true);
+ pasid, KFD_SET_DEFAULT_CP_QUEUE_WAIT_TIMES, true);
dqm_unlock(dqm);
return retval;
@@ -2468,7 +2468,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
if (!dqm->dev->kfd->shared_resources.enable_mes) {
retval = execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0,
- USE_DEFAULT_GRACE_PERIOD);
+ KFD_SET_DEFAULT_CP_QUEUE_WAIT_TIMES);
if (retval == -ETIME)
qpd->reset_wavefronts = true;
} else {
@@ -2763,7 +2763,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
}
if (!dqm->dev->kfd->shared_resources.enable_mes)
- retval = execute_queues_cpsch(dqm, filter, 0, USE_DEFAULT_GRACE_PERIOD);
+ retval = execute_queues_cpsch(dqm, filter, 0, KFD_SET_DEFAULT_CP_QUEUE_WAIT_TIMES);
if ((retval || qpd->reset_wavefronts) &&
down_read_trylock(&dqm->dev->adev->reset_domain->sem)) {
@@ -3123,7 +3123,7 @@ int reserve_debug_trap_vmid(struct device_queue_manager *dqm,
}
r = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0,
- USE_DEFAULT_GRACE_PERIOD, false);
+ KFD_SET_DEFAULT_CP_QUEUE_WAIT_TIMES, false);
if (r)
goto out_unlock;
@@ -3172,7 +3172,7 @@ int release_debug_trap_vmid(struct device_queue_manager *dqm,
}
r = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0,
- USE_DEFAULT_GRACE_PERIOD, false);
+ KFD_SET_DEFAULT_CP_QUEUE_WAIT_TIMES, false);
if (r)
goto out_unlock;
@@ -3355,7 +3355,7 @@ int resume_queues(struct kfd_process *p,
r = execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES,
0,
- USE_DEFAULT_GRACE_PERIOD);
+ KFD_SET_DEFAULT_CP_QUEUE_WAIT_TIMES);
if (r) {
dev_err(dev, "Failed to resume process queues\n");
if (queue_ids) {
@@ -3734,7 +3734,7 @@ int dqm_debugfs_hang_hws(struct device_queue_manager *dqm)
}
dqm->active_runlist = true;
r = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES,
- 0, USE_DEFAULT_GRACE_PERIOD);
+ 0, KFD_SET_DEFAULT_CP_QUEUE_WAIT_TIMES);
dqm_unlock(dqm);
return r;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 09ab36f8e8c6..273c04a95568 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -37,7 +37,7 @@
#define KFD_MES_PROCESS_QUANTUM 100000
#define KFD_MES_GANG_QUANTUM 10000
-#define USE_DEFAULT_GRACE_PERIOD 0xffffffff
+#define KFD_SET_DEFAULT_CP_QUEUE_WAIT_TIMES 0xffffffff
struct device_process_node {
struct qcm_process_device *qpd;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 4984b41cd372..8d2f63a38724 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -396,14 +396,26 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
return retval;
}
-int pm_update_grace_period(struct packet_manager *pm, uint32_t grace_period)
+/* pm_set_compute_queue_wait_counts: Configure CP IQ Timer Wait Counts for Items
+ * Offloaded from the Compute Queues by writing to CP_IQ_WAIT_TIME2 registers.
+ *
+ * @wait_counts_config: Parameter overridden. Could be flag or grace_period
+ * Possible flag values:
+ * KFD_SET_DEFAULT_CP_QUEUE_WAIT_TIMES then reset to default value
+ *
+ * If not an above flag, Wait Count for Scheduling Wave Message (SCH_WAVE)
+ * is set to wait_counts_config value.
+ * NOTE: if 0, The CP cannot handle it and will result in
+ * an infinite grace period being set so set to 1 to prevent this.
+ */
+int pm_set_compute_queue_wait_counts(struct packet_manager *pm, uint32_t wait_counts_config)
{
struct kfd_node *node = pm->dqm->dev;
struct device *dev = node->adev->dev;
int retval = 0;
uint32_t *buffer, size;
- size = pm->pmf->set_grace_period_size;
+ size = pm->pmf->set_compute_queue_wait_counts_size;
mutex_lock(&pm->lock);
@@ -419,7 +431,7 @@ int pm_update_grace_period(struct packet_manager *pm, uint32_t grace_period)
goto out;
}
- retval = pm->pmf->set_grace_period(pm, buffer, grace_period);
+ retval = pm->pmf->set_compute_queue_wait_counts(pm, buffer, wait_counts_config);
if (!retval)
retval = kq_submit_packet(pm->priv_queue);
else
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
index d56525201155..8b693a9446e8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
@@ -297,9 +297,9 @@ static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer,
return 0;
}
-static int pm_set_grace_period_v9(struct packet_manager *pm,
+static int pm_set_compute_queue_wait_counts_v9(struct packet_manager *pm,
uint32_t *buffer,
- uint32_t grace_period)
+ uint32_t wait_counts_config)
{
struct pm4_mec_write_data_mmio *packet;
uint32_t reg_offset = 0;
@@ -308,11 +308,11 @@ static int pm_set_grace_period_v9(struct packet_manager *pm,
pm->dqm->dev->kfd2kgd->build_grace_period_packet_info(
pm->dqm->dev->adev,
pm->dqm->wait_times,
- grace_period,
+ wait_counts_config,
®_offset,
®_data);
- if (grace_period == USE_DEFAULT_GRACE_PERIOD)
+ if (wait_counts_config == KFD_SET_DEFAULT_CP_QUEUE_WAIT_TIMES)
reg_data = pm->dqm->wait_times;
packet = (struct pm4_mec_write_data_mmio *)buffer;
@@ -415,7 +415,7 @@ const struct packet_manager_funcs kfd_v9_pm_funcs = {
.set_resources = pm_set_resources_v9,
.map_queues = pm_map_queues_v9,
.unmap_queues = pm_unmap_queues_v9,
- .set_grace_period = pm_set_grace_period_v9,
+ .set_compute_queue_wait_counts = pm_set_compute_queue_wait_counts_v9,
.query_status = pm_query_status_v9,
.release_mem = NULL,
.map_process_size = sizeof(struct pm4_mes_map_process),
@@ -423,7 +423,7 @@ const struct packet_manager_funcs kfd_v9_pm_funcs = {
.set_resources_size = sizeof(struct pm4_mes_set_resources),
.map_queues_size = sizeof(struct pm4_mes_map_queues),
.unmap_queues_size = sizeof(struct pm4_mes_unmap_queues),
- .set_grace_period_size = sizeof(struct pm4_mec_write_data_mmio),
+ .set_compute_queue_wait_counts_size = sizeof(struct pm4_mec_write_data_mmio),
.query_status_size = sizeof(struct pm4_mes_query_status),
.release_mem_size = 0,
};
@@ -434,7 +434,7 @@ const struct packet_manager_funcs kfd_aldebaran_pm_funcs = {
.set_resources = pm_set_resources_v9,
.map_queues = pm_map_queues_v9,
.unmap_queues = pm_unmap_queues_v9,
- .set_grace_period = pm_set_grace_period_v9,
+ .set_compute_queue_wait_counts = pm_set_compute_queue_wait_counts_v9,
.query_status = pm_query_status_v9,
.release_mem = NULL,
.map_process_size = sizeof(struct pm4_mes_map_process_aldebaran),
@@ -442,7 +442,7 @@ const struct packet_manager_funcs kfd_aldebaran_pm_funcs = {
.set_resources_size = sizeof(struct pm4_mes_set_resources),
.map_queues_size = sizeof(struct pm4_mes_map_queues),
.unmap_queues_size = sizeof(struct pm4_mes_unmap_queues),
- .set_grace_period_size = sizeof(struct pm4_mec_write_data_mmio),
+ .set_compute_queue_wait_counts_size = sizeof(struct pm4_mec_write_data_mmio),
.query_status_size = sizeof(struct pm4_mes_query_status),
.release_mem_size = 0,
};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
index 347c86e1c378..de28fc585296 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
@@ -304,7 +304,7 @@ const struct packet_manager_funcs kfd_vi_pm_funcs = {
.set_resources = pm_set_resources_vi,
.map_queues = pm_map_queues_vi,
.unmap_queues = pm_unmap_queues_vi,
- .set_grace_period = NULL,
+ .set_compute_queue_wait_counts = NULL,
.query_status = pm_query_status_vi,
.release_mem = pm_release_mem_vi,
.map_process_size = sizeof(struct pm4_mes_map_process),
@@ -312,7 +312,7 @@ const struct packet_manager_funcs kfd_vi_pm_funcs = {
.set_resources_size = sizeof(struct pm4_mes_set_resources),
.map_queues_size = sizeof(struct pm4_mes_map_queues),
.unmap_queues_size = sizeof(struct pm4_mes_unmap_queues),
- .set_grace_period_size = 0,
+ .set_compute_queue_wait_counts_size = 0,
.query_status_size = sizeof(struct pm4_mes_query_status),
.release_mem_size = sizeof(struct pm4_mec_release_mem)
};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 59619f794b6b..ae58d50b8eb9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -1417,7 +1417,7 @@ struct packet_manager_funcs {
int (*unmap_queues)(struct packet_manager *pm, uint32_t *buffer,
enum kfd_unmap_queues_filter mode,
uint32_t filter_param, bool reset);
- int (*set_grace_period)(struct packet_manager *pm, uint32_t *buffer,
+ int (*set_compute_queue_wait_counts)(struct packet_manager *pm, uint32_t *buffer,
uint32_t grace_period);
int (*query_status)(struct packet_manager *pm, uint32_t *buffer,
uint64_t fence_address, uint64_t fence_value);
@@ -1429,7 +1429,7 @@ struct packet_manager_funcs {
int set_resources_size;
int map_queues_size;
int unmap_queues_size;
- int set_grace_period_size;
+ int set_compute_queue_wait_counts_size;
int query_status_size;
int release_mem_size;
};
@@ -1452,7 +1452,7 @@ int pm_send_unmap_queue(struct packet_manager *pm,
void pm_release_ib(struct packet_manager *pm);
-int pm_update_grace_period(struct packet_manager *pm, uint32_t grace_period);
+int pm_set_compute_queue_wait_counts(struct packet_manager *pm, uint32_t wait_counts_config);
/* Following PM funcs can be shared among VI and AI */
unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size);
--
2.34.1
More information about the amd-gfx
mailing list