<div dir="ltr"><div>Hi Christian,<br><br></div>Good patch. But it might lead to bad performance when we reschedule when the hardware queue of the engine on which the last job was pushed is not full.<br><div><br><div class="gmail_quote"><div dir="ltr">On Mon, Aug 6, 2018 at 5:49 PM Christian König <<a href="mailto:ckoenig.leichtzumerken@gmail.com" target="_blank">ckoenig.leichtzumerken@gmail.com</a>> wrote:<br></div><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">This fixes accessing the last_scheduled fence from multiple threads as<br>
well as makes it easier to move entities between schedulers.<br>
<br>
Signed-off-by: Christian König <<a href="mailto:christian.koenig@amd.com" target="_blank">christian.koenig@amd.com</a>><br>
---<br>
 drivers/gpu/drm/scheduler/gpu_scheduler.c | 34 +++++++++++++++++++++++++------<br>
 1 file changed, 28 insertions(+), 6 deletions(-)<br>
<br>
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c<br>
index 029863726c99..e4b71a543481 100644<br>
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c<br>
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c<br>
@@ -523,6 +523,29 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)<br>
        return false;<br>
 }<br>
<br>
+static bool drm_sched_entity_last_scheduled_dep(struct drm_sched_entity *entity)<br>
+{<br>
+       struct drm_sched_fence *s_fence;<br>
+<br>
+       if (!entity->last_scheduled)<br>
+               return false;<br>
+<br>
+       /*<br>
+        * Check if the last submission was handled by a different scheduler<br>
+        */<br>
+       s_fence = to_drm_sched_fence(entity->last_scheduled);<br>
+       if (s_fence && s_fence->sched != entity->rq->sched) {<br>
+               entity->dependency = dma_fence_get(entity->last_scheduled);<br>
+               if (!dma_fence_add_callback(entity->dependency, &entity->cb,<br>
+                                           drm_sched_entity_wakeup))<br>
+                       return true;<br>
+<br>
+               dma_fence_put(entity->dependency);<br>
+               entity->dependency = NULL;<br>
+       }<br>
+       return false;<br>
+}<br>
+<br>
 static struct drm_sched_job *<br>
 drm_sched_entity_pop_job(struct drm_sched_entity *entity)<br>
 {<br>
@@ -537,6 +560,9 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity)<br>
                if (drm_sched_entity_add_dependency_cb(entity))<br>
                        return NULL;<br>
<br>
+       if (drm_sched_entity_last_scheduled_dep(entity))<br>
+               return NULL;<br>
+<br>
        /* skip jobs from entity that marked guilty */<br>
        if (entity->guilty && atomic_read(entity->guilty))<br>
                dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);<br>
@@ -564,14 +590,10 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,<br>
                               struct drm_sched_entity *entity)<br>
 {<br>
        struct drm_sched_rq *rq = entity->rq;<br>
-       bool first, reschedule, idle;<br>
+       bool first;<br>
<br>
-       idle = entity->last_scheduled == NULL ||<br>
-               dma_fence_is_signaled(entity->last_scheduled);<br>
        first = spsc_queue_count(&entity->job_queue) == 0;<br>
-       reschedule = idle && first && (entity->num_rq_list > 1);<br>
-<br>
-       if (reschedule) {<br>
+       if (first && (entity->num_rq_list > 1)) {<br></blockquote><div>Something like this might be better:<br><br></div><div>if (first  && (entity->num_rq_list > 1) &&<br></div><div>    (hw_rq_count == hw_submission_limit))<br></div><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">
                rq = drm_sched_entity_get_free_sched(entity);<br>
                spin_lock(&entity->rq_lock);<br>
                drm_sched_rq_remove_entity(entity->rq, entity);<br>
-- <br>
2.14.1<br>
<br></blockquote><div>Regards,<br></div><div>Nayan <br></div></div></div></div>