[PATCH 2/2] drm/scheduler: add last_scheduled dependency handling

Christian König ckoenig.leichtzumerken at gmail.com
Mon Aug 6 14:11:50 UTC 2018


Am 06.08.2018 um 15:23 schrieb Nayan Deshmukh:
> Hi Christian,
>
> Good patch. But it might lead to bad performance when we reschedule 
> when the hardware queue of the engine on which the last job was pushed 
> is not full.
>
> On Mon, Aug 6, 2018 at 5:49 PM Christian König 
> <ckoenig.leichtzumerken at gmail.com 
> <mailto:ckoenig.leichtzumerken at gmail.com>> wrote:
>
>     This fixes accessing the last_scheduled fence from multiple threads as
>     well as makes it easier to move entities between schedulers.
>
>     Signed-off-by: Christian König <christian.koenig at amd.com
>     <mailto:christian.koenig at amd.com>>
>     ---
>      drivers/gpu/drm/scheduler/gpu_scheduler.c | 34
>     +++++++++++++++++++++++++------
>      1 file changed, 28 insertions(+), 6 deletions(-)
>
>     diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c
>     b/drivers/gpu/drm/scheduler/gpu_scheduler.c
>     index 029863726c99..e4b71a543481 100644
>     --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
>     +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
>     @@ -523,6 +523,29 @@ static bool
>     drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
>             return false;
>      }
>
>     +static bool drm_sched_entity_last_scheduled_dep(struct
>     drm_sched_entity *entity)
>     +{
>     +       struct drm_sched_fence *s_fence;
>     +
>     +       if (!entity->last_scheduled)
>     +               return false;
>     +
>     +       /*
>     +        * Check if the last submission was handled by a different
>     scheduler
>     +        */
>     +       s_fence = to_drm_sched_fence(entity->last_scheduled);
>     +       if (s_fence && s_fence->sched != entity->rq->sched) {
>     +               entity->dependency =
>     dma_fence_get(entity->last_scheduled);
>     +               if (!dma_fence_add_callback(entity->dependency,
>     &entity->cb,
>     +  drm_sched_entity_wakeup))
>     +                       return true;
>     +
>     +               dma_fence_put(entity->dependency);
>     +               entity->dependency = NULL;
>     +       }
>     +       return false;
>     +}
>     +
>      static struct drm_sched_job *
>      drm_sched_entity_pop_job(struct drm_sched_entity *entity)
>      {
>     @@ -537,6 +560,9 @@ drm_sched_entity_pop_job(struct
>     drm_sched_entity *entity)
>                     if (drm_sched_entity_add_dependency_cb(entity))
>                             return NULL;
>
>     +       if (drm_sched_entity_last_scheduled_dep(entity))
>     +               return NULL;
>     +
>             /* skip jobs from entity that marked guilty */
>             if (entity->guilty && atomic_read(entity->guilty))
>     dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
>     @@ -564,14 +590,10 @@ void drm_sched_entity_push_job(struct
>     drm_sched_job *sched_job,
>                                    struct drm_sched_entity *entity)
>      {
>             struct drm_sched_rq *rq = entity->rq;
>     -       bool first, reschedule, idle;
>     +       bool first;
>
>     -       idle = entity->last_scheduled == NULL ||
>     -  dma_fence_is_signaled(entity->last_scheduled);
>             first = spsc_queue_count(&entity->job_queue) == 0;
>     -       reschedule = idle && first && (entity->num_rq_list > 1);
>     -
>     -       if (reschedule) {
>     +       if (first && (entity->num_rq_list > 1)) {
>
> Something like this might be better:
>
> if (first  && (entity->num_rq_list > 1) &&
>     (hw_rq_count == hw_submission_limit))
>
>                     rq = drm_sched_entity_get_free_sched(entity);
>                     spin_lock(&entity->rq_lock);
>                     drm_sched_rq_remove_entity(entity->rq, entity);
>     -- 
>     2.14.1
>

Hey, good idea. Going to take that into account.

Christian.

> Regards,
> Nayan

-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.freedesktop.org/archives/dri-devel/attachments/20180806/aa4bbf94/attachment-0001.html>


More information about the dri-devel mailing list