[PATCH] drm/scheduler: improve job distribution with multiple queues
Christian König
ckoenig.leichtzumerken at gmail.com
Wed Jan 15 14:35:35 UTC 2020
Am 15.01.20 um 15:16 schrieb Nirmoy Das:
> This patch uses score based logic to select a new rq for better
> loadbalance between multiple rq/scheds instead of num_jobs.
>
> Below are test results after running amdgpu_test from mesa drm
>
> Before this patch:
>
> sched_name num of many times it got scheduled
> ========= ==================================
> sdma0 314
> sdma1 32
> comp_1.0.0 56
> comp_1.0.1 0
> comp_1.1.0 0
> comp_1.1.1 0
> comp_1.2.0 0
> comp_1.2.1 0
> comp_1.3.0 0
> comp_1.3.1 0
> After this patch:
>
> sched_name num of many times it got scheduled
> ========= ==================================
> sdma0 218
> sdma1 211
> comp_1.0.0 39
> comp_1.0.1 9
> comp_1.1.0 12
> comp_1.1.1 0
> comp_1.2.0 12
> comp_1.2.1 0
> comp_1.3.0 12
> comp_1.3.1 0
>
> Signed-off-by: Nirmoy Das <nirmoy.das at amd.com>
> ---
> drivers/gpu/drm/scheduler/sched_entity.c | 9 ++++-----
> drivers/gpu/drm/scheduler/sched_main.c | 5 +++--
> include/drm/gpu_scheduler.h | 6 +++---
> 3 files changed, 10 insertions(+), 10 deletions(-)
>
> diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
> index 2e3a058fc239..7120eeec1a9b 100644
> --- a/drivers/gpu/drm/scheduler/sched_entity.c
> +++ b/drivers/gpu/drm/scheduler/sched_entity.c
> @@ -130,7 +130,7 @@ static struct drm_sched_rq *
> drm_sched_entity_get_free_sched(struct drm_sched_entity *entity)
> {
> struct drm_sched_rq *rq = NULL;
> - unsigned int min_jobs = UINT_MAX, num_jobs;
> + unsigned int min_score = UINT_MAX, num_score;
> int i;
>
> for (i = 0; i < entity->num_sched_list; ++i) {
> @@ -141,9 +141,9 @@ drm_sched_entity_get_free_sched(struct drm_sched_entity *entity)
> continue;
> }
>
> - num_jobs = atomic_read(&sched->num_jobs);
> - if (num_jobs < min_jobs) {
> - min_jobs = num_jobs;
> + num_score = atomic_read(&sched->score);
> + if (num_score < min_score) {
> + min_score = num_score;
> rq = &entity->sched_list[i]->sched_rq[entity->priority];
> }
> }
> @@ -498,7 +498,6 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
> bool first;
>
> trace_drm_sched_job(sched_job, entity);
> - atomic_inc(&entity->rq->sched->num_jobs);
> WRITE_ONCE(entity->last_user, current->group_leader);
> first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
>
> diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
> index 3fad5876a13f..f58a0e04ef2b 100644
> --- a/drivers/gpu/drm/scheduler/sched_main.c
> +++ b/drivers/gpu/drm/scheduler/sched_main.c
> @@ -92,6 +92,7 @@ void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
> if (!list_empty(&entity->list))
> return;
> spin_lock(&rq->lock);
> + atomic_inc(&rq->sched->score);
> list_add_tail(&entity->list, &rq->entities);
> spin_unlock(&rq->lock);
> }
> @@ -110,6 +111,7 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
> if (list_empty(&entity->list))
> return;
> spin_lock(&rq->lock);
> + atomic_dec(&rq->sched->score);
> list_del_init(&entity->list);
> if (rq->current_entity == entity)
> rq->current_entity = NULL;
> @@ -655,7 +657,6 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
> struct drm_gpu_scheduler *sched = s_fence->sched;
>
> atomic_dec(&sched->hw_rq_count);
> - atomic_dec(&sched->num_jobs);
Please keep incrementing/decrementing the score when jobs are
added/completed.
In other words we should have +1 score for each entity and +1 score for
each job in the scheduler.
Apart form that looks good to me now.
Thanks,
Christian.
>
> trace_drm_sched_process_job(s_fence);
>
> @@ -830,7 +831,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
> spin_lock_init(&sched->job_list_lock);
> atomic_set(&sched->hw_rq_count, 0);
> INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
> - atomic_set(&sched->num_jobs, 0);
> + atomic_set(&sched->score, 0);
> atomic64_set(&sched->job_id_count, 0);
>
> /* Each scheduler will run on a seperate kernel thread */
> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
> index 96a1a1b7526e..eda58b22cf76 100644
> --- a/include/drm/gpu_scheduler.h
> +++ b/include/drm/gpu_scheduler.h
> @@ -261,7 +261,7 @@ struct drm_sched_backend_ops {
> * @job_list_lock: lock to protect the ring_mirror_list.
> * @hang_limit: once the hangs by a job crosses this limit then it is marked
> * guilty and it will be considered for scheduling further.
> - * @num_jobs: the number of jobs in queue in the scheduler
> + * @score: score to help loadbalancer pick a most idle sched
> * @ready: marks if the underlying HW is ready to work
> * @free_guilty: A hit to time out handler to free the guilty job.
> *
> @@ -282,8 +282,8 @@ struct drm_gpu_scheduler {
> struct list_head ring_mirror_list;
> spinlock_t job_list_lock;
> int hang_limit;
> - atomic_t num_jobs;
> - bool ready;
> + atomic_t score;
> + bool ready;
> bool free_guilty;
> };
>
More information about the amd-gfx
mailing list