<div dir="ltr"><br><br><div class="gmail_quote"><div dir="ltr">On Wed, Aug 8, 2018 at 4:44 PM Christian König <<a href="mailto:ckoenig.leichtzumerken@gmail.com">ckoenig.leichtzumerken@gmail.com</a>> wrote:<br></div><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">Looks like for correct debugging we need to know the scheduler even<br>
earlier. So move picking a rq for an entity into job creation.<br>
<br>
Signed-off-by: Christian König <<a href="mailto:christian.koenig@amd.com" target="_blank">christian.koenig@amd.com</a>><br>
---<br>
 drivers/gpu/drm/scheduler/gpu_scheduler.c | 50 ++++++++++++++++++++-----------<br>
 drivers/gpu/drm/scheduler/sched_fence.c   |  2 +-<br>
 2 files changed, 33 insertions(+), 19 deletions(-)<br>
<br>
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c<br>
index bd7883d1b964..bb2bd4c07e85 100644<br>
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c<br>
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c<br>
@@ -552,6 +552,34 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity)<br>
        return sched_job;<br>
 }<br>
<br>
+/**<br>
+ * drm_sched_entity_select_rq - select a new rq for the entity<br>
+ *<br>
+ * @entity: scheduler entity<br>
+ *<br>
+ * Check all prerequisites and select a new rq for the entity for load<br>
+ * balancing.<br>
+ */<br>
+static void drm_sched_entity_select_rq(struct drm_sched_entity *entity)<br>
+{<br>
+       struct dma_fence *fence;<br>
+       struct drm_sched_rq *rq;<br>
+<br>
+       if (!spsc_queue_count(&entity->job_queue) == 0 ||<br>
+           entity->num_rq_list <= 1)<br>
+               return;<br>
+<br>
+       fence = READ_ONCE(entity->last_scheduled);<br>
+       if (fence && !dma_fence_is_signaled(fence))<br>
+               return;<br>
+<br>
+       rq = drm_sched_entity_get_free_sched(entity);<br></blockquote><div>  We can add something like this here:<br></div><div>             if (rq == entity->rq)<br></div><div>                     return;<br><br></div><div>to avoid redundant reschedules. But then again this is a slight improvement over the original case so we might as well move it to a different patch.<br><br></div><div>With or without this change the patch is Reviewed-by: Nayan Deshmukh <<a href="mailto:nayan26deshmukh@gmail.com">nayan26deshmukh@gmail.com</a>> <br></div><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">
+       spin_lock(&entity->rq_lock);<br>
+       drm_sched_rq_remove_entity(entity->rq, entity);<br>
+       entity->rq = rq;<br>
+       spin_unlock(&entity->rq_lock);<br>
+}<br>
+<br>
 /**<br>
  * drm_sched_entity_push_job - Submit a job to the entity's job queue<br>
  *<br>
@@ -567,25 +595,8 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity)<br>
 void drm_sched_entity_push_job(struct drm_sched_job *sched_job,<br>
                               struct drm_sched_entity *entity)<br>
 {<br>
-       struct drm_sched_rq *rq = entity->rq;<br>
        bool first;<br>
<br>
-       first = spsc_queue_count(&entity->job_queue) == 0;<br>
-       if (first && (entity->num_rq_list > 1)) {<br>
-               struct dma_fence *fence;<br>
-<br>
-               fence = READ_ONCE(entity->last_scheduled);<br>
-               if (fence == NULL || dma_fence_is_signaled(fence)) {<br>
-                       rq = drm_sched_entity_get_free_sched(entity);<br>
-                       spin_lock(&entity->rq_lock);<br>
-                       drm_sched_rq_remove_entity(entity->rq, entity);<br>
-                       entity->rq = rq;<br>
-                       spin_unlock(&entity->rq_lock);<br>
-               }<br>
-       }<br>
-<br>
-       sched_job->sched = entity->rq->sched;<br>
-       sched_job->s_fence->sched = entity->rq->sched;<br>
        trace_drm_sched_job(sched_job, entity);<br>
        atomic_inc(&entity->rq->sched->num_jobs);<br>
        WRITE_ONCE(entity->last_user, current->group_leader);<br>
@@ -790,7 +801,10 @@ int drm_sched_job_init(struct drm_sched_job *job,<br>
                       struct drm_sched_entity *entity,<br>
                       void *owner)<br>
 {<br>
-       struct drm_gpu_scheduler *sched = entity->rq->sched;<br>
+       struct drm_gpu_scheduler *sched;<br>
+<br>
+       drm_sched_entity_select_rq(entity);<br>
+       sched = entity->rq->sched;<br>
<br>
        job->sched = sched;<br>
        job->entity = entity;<br>
diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c<br>
index 6dab18d288d7..4029312fdd81 100644<br>
--- a/drivers/gpu/drm/scheduler/sched_fence.c<br>
+++ b/drivers/gpu/drm/scheduler/sched_fence.c<br>
@@ -172,7 +172,7 @@ struct drm_sched_fence *drm_sched_fence_create(struct drm_sched_entity *entity,<br>
                return NULL;<br>
<br>
        fence->owner = owner;<br>
-       fence->sched = NULL;<br>
+       fence->sched = entity->rq->sched;<br>
        spin_lock_init(&fence->lock);<br>
<br>
        seq = atomic_inc_return(&entity->fence_seq);<br>
-- <br>
2.14.1<br>
<br>
</blockquote></div></div>