[PATCH 4/9] drm/amdkfd: Avoid submitting an unnecessary packet to HWS
Felix Kuehling
Felix.Kuehling at amd.com
Wed Sep 27 04:09:51 UTC 2017
From: Yong Zhao <yong.zhao at amd.com>
Signed-off-by: Yong Zhao <yong.zhao at amd.com>
Signed-off-by: Felix Kuehling <Felix.Kuehling at amd.com>
---
.../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 28 ++++++++++++----------
1 file changed, 16 insertions(+), 12 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index dccb493..0f2a756 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -44,7 +44,8 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
struct queue *q,
struct qcm_process_device *qpd);
-static int execute_queues_cpsch(struct device_queue_manager *dqm);
+static int execute_queues_cpsch(struct device_queue_manager *dqm,
+ bool static_queues_included);
static int unmap_queues_cpsch(struct device_queue_manager *dqm,
enum kfd_unmap_queues_filter filter,
uint32_t filter_param);
@@ -729,7 +730,7 @@ static int start_cpsch(struct device_queue_manager *dqm)
init_interrupts(dqm);
mutex_lock(&dqm->lock);
- execute_queues_cpsch(dqm);
+ execute_queues_cpsch(dqm, false);
mutex_unlock(&dqm->lock);
return 0;
@@ -775,7 +776,7 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
list_add(&kq->list, &qpd->priv_queue_list);
dqm->queue_count++;
qpd->is_debug = true;
- execute_queues_cpsch(dqm);
+ execute_queues_cpsch(dqm, false);
mutex_unlock(&dqm->lock);
return 0;
@@ -786,12 +787,10 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
mutex_lock(&dqm->lock);
- /* here we actually preempt the DIQ */
- unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
list_del(&kq->list);
dqm->queue_count--;
qpd->is_debug = false;
- execute_queues_cpsch(dqm);
+ execute_queues_cpsch(dqm, true);
/*
* Unconditionally decrement this counter, regardless of the queue's
* type.
@@ -850,7 +849,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
list_add(&q->list, &qpd->queues_list);
if (q->properties.is_active) {
dqm->queue_count++;
- retval = execute_queues_cpsch(dqm);
+ retval = execute_queues_cpsch(dqm, false);
}
if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
@@ -960,14 +959,19 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
}
/* dqm->lock mutex has to be locked before calling this function */
-static int execute_queues_cpsch(struct device_queue_manager *dqm)
+static int execute_queues_cpsch(struct device_queue_manager *dqm,
+ bool static_queues_included)
{
int retval;
+ enum kfd_unmap_queues_filter filter;
+
+ filter = static_queues_included ?
+ KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES;
- retval = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES,
- 0);
+ retval = unmap_queues_cpsch(dqm, filter, 0);
if (retval) {
- pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption");
+ pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
return retval;
}
@@ -1013,7 +1017,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
if (q->properties.is_active)
dqm->queue_count--;
- execute_queues_cpsch(dqm);
+ execute_queues_cpsch(dqm, false);
mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
--
2.7.4
More information about the amd-gfx
mailing list