[PATCH 1/2] drm/sched: implement and export drm_sched_pick_best
Christian König
christian.koenig at amd.com
Fri Mar 13 13:39:02 UTC 2020
Am 13.03.20 um 13:05 schrieb Nirmoy Das:
> Refactor drm_sched_entity_get_free_sched() to move the logic of picking
> the least loaded drm scheduler from a drm scheduler list to implement
> drm_sched_pick_best(). This patch also exports drm_sched_pick_best() so
> that it can be utilized by other drm drivers.
>
> Signed-off-by: Nirmoy Das <nirmoy.das at amd.com>
> ---
> drivers/gpu/drm/scheduler/sched_entity.c | 20 +++-----------
> drivers/gpu/drm/scheduler/sched_main.c | 35 ++++++++++++++++++++++++
> include/drm/gpu_scheduler.h | 3 ++
> 3 files changed, 42 insertions(+), 16 deletions(-)
>
> diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
> index d631521a9679..3f6397d60bff 100644
> --- a/drivers/gpu/drm/scheduler/sched_entity.c
> +++ b/drivers/gpu/drm/scheduler/sched_entity.c
> @@ -147,24 +147,12 @@ bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
> static struct drm_sched_rq *
> drm_sched_entity_get_free_sched(struct drm_sched_entity *entity)
> {
> + struct drm_gpu_scheduler *sched;
> struct drm_sched_rq *rq = NULL;
> - unsigned int min_jobs = UINT_MAX, num_jobs;
> - int i;
> -
> - for (i = 0; i < entity->num_sched_list; ++i) {
> - struct drm_gpu_scheduler *sched = entity->sched_list[i];
>
> - if (!entity->sched_list[i]->ready) {
> - DRM_WARN("sched%s is not ready, skipping", sched->name);
> - continue;
> - }
> -
> - num_jobs = atomic_read(&sched->num_jobs);
> - if (num_jobs < min_jobs) {
> - min_jobs = num_jobs;
> - rq = &entity->sched_list[i]->sched_rq[entity->priority];
> - }
> - }
> + sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list);
> + if (sched)
> + rq = &sched->sched_rq[entity->priority];
Keeping the function just for the two lines doesn't make much sense.
IIRC it is called only once, just inline there.
>
> return rq;
> }
> diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
> index 3fad5876a13f..d640f4087795 100644
> --- a/drivers/gpu/drm/scheduler/sched_main.c
> +++ b/drivers/gpu/drm/scheduler/sched_main.c
> @@ -705,6 +705,41 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
> return job;
> }
>
> +/**
> + * drm_sched_pick_best - Get a drm sched from a sched_list with the least load
> + * @sched_list: list of drm_gpu_schedulers
> + * @num_sched_list: number of drm_gpu_schedulers in the sched_list
> + *
> + * Returns pointer of the sched with the least load or NULL if none of the
> + * drm_gpu_schedulers are ready
> + */
> +struct drm_gpu_scheduler *
> +drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
> + unsigned int num_sched_list)
> +{
> + struct drm_gpu_scheduler *sched, *picked_sched = NULL;
> + unsigned int min_jobs = UINT_MAX, num_jobs;
> + int i;
> +
> + for (i = 0; i < num_sched_list; ++i) {
> + sched = sched_list[i];
> +
> + if (!sched->ready) {
> + DRM_WARN("sched%s is not ready, skipping", sched->name);
There is a space missing here between "sched" and "%s". And maybe write
"scheduler" in the message.
Apart from that looks good of hand,
Christian.
> + continue;
> + }
> +
> + num_jobs = atomic_read(&sched->num_jobs);
> + if (num_jobs < min_jobs) {
> + min_jobs = num_jobs;
> + picked_sched = sched;
> + }
> + }
> +
> + return picked_sched;
> +}
> +EXPORT_SYMBOL(drm_sched_pick_best);
> +
> /**
> * drm_sched_blocked - check if the scheduler is blocked
> *
> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
> index ae39eacee250..ca6b8b01fac9 100644
> --- a/include/drm/gpu_scheduler.h
> +++ b/include/drm/gpu_scheduler.h
> @@ -341,5 +341,8 @@ void drm_sched_fence_finished(struct drm_sched_fence *fence);
> unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
> void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
> unsigned long remaining);
> +struct drm_gpu_scheduler *
> +drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
> + unsigned int num_sched_list);
>
> #endif
> --
> 2.25.0
>
More information about the amd-gfx
mailing list