[PATCH] drm/amdkfd: Make queue mapping interfaces more consistent
Felix Kuehling
Felix.Kuehling at amd.com
Wed Oct 18 02:55:13 UTC 2017
Pass unmap filter parameters directly to execute_queues_cpsch, same
as unmap_queues_cpsch.
Signed-off-by: Felix Kuehling <Felix.Kuehling at amd.com>
---
.../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 32 +++++++++++-----------
1 file changed, 16 insertions(+), 16 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 933adb5..da3b743 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -45,7 +45,8 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
static int execute_queues_cpsch(struct device_queue_manager *dqm,
- bool static_queues_included);
+ enum kfd_unmap_queues_filter filter,
+ uint32_t filter_param);
static int unmap_queues_cpsch(struct device_queue_manager *dqm,
enum kfd_unmap_queues_filter filter,
uint32_t filter_param);
@@ -741,7 +742,7 @@ static int start_cpsch(struct device_queue_manager *dqm)
init_interrupts(dqm);
mutex_lock(&dqm->lock);
- execute_queues_cpsch(dqm, false);
+ execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
mutex_unlock(&dqm->lock);
return 0;
@@ -787,7 +788,7 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
list_add(&kq->list, &qpd->priv_queue_list);
dqm->queue_count++;
qpd->is_debug = true;
- execute_queues_cpsch(dqm, false);
+ execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
mutex_unlock(&dqm->lock);
return 0;
@@ -801,7 +802,7 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
list_del(&kq->list);
dqm->queue_count--;
qpd->is_debug = false;
- execute_queues_cpsch(dqm, true);
+ execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
/*
* Unconditionally decrement this counter, regardless of the queue's
* type.
@@ -859,7 +860,8 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
qpd->queue_count++;
if (q->properties.is_active) {
dqm->queue_count++;
- retval = execute_queues_cpsch(dqm, false);
+ retval = execute_queues_cpsch(dqm,
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
}
if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
@@ -964,16 +966,12 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
/* dqm->lock mutex has to be locked before calling this function */
static int execute_queues_cpsch(struct device_queue_manager *dqm,
- bool static_queues_included)
+ enum kfd_unmap_queues_filter filter,
+ uint32_t filter_param)
{
int retval;
- enum kfd_unmap_queues_filter filter;
-
- filter = static_queues_included ?
- KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
- KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES;
- retval = unmap_queues_cpsch(dqm, filter, 0);
+ retval = unmap_queues_cpsch(dqm, filter, filter_param);
if (retval) {
pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
return retval;
@@ -1024,7 +1022,8 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
if (q->properties.is_active)
dqm->queue_count--;
- retval = execute_queues_cpsch(dqm, false);
+ retval = execute_queues_cpsch(dqm,
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
if (retval == -ETIME)
qpd->reset_wavefronts = true;
@@ -1157,7 +1156,8 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
struct kernel_queue *kq, *kq_next;
struct mqd_manager *mqd;
struct device_process_node *cur, *next_dpn;
- bool unmap_static_queues = false;
+ enum kfd_unmap_queues_filter filter =
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES;
retval = 0;
@@ -1169,7 +1169,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
dqm->queue_count--;
qpd->is_debug = false;
dqm->total_queue_count--;
- unmap_static_queues = true;
+ filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
}
/* Clear all user mode queues */
@@ -1193,7 +1193,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
}
}
- retval = execute_queues_cpsch(dqm, unmap_static_queues);
+ retval = execute_queues_cpsch(dqm, filter, 0);
if (retval || qpd->reset_wavefronts) {
pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev);
dbgdev_wave_reset_wavefronts(dqm->dev, qpd->pqm->process);
--
2.7.4
More information about the amd-gfx
mailing list