[PATCH v2 2/9] drm/sched: Move schedule policy to scheduler / entity
Matthew Brost
matthew.brost at intel.com
Sat Aug 12 03:20:08 UTC 2023
On Fri, Aug 11, 2023 at 06:43:22PM -0300, Maira Canal wrote:
> Hi Matthew,
>
> I'm not sure in which tree you plan to apply this series, but if you
> plan to apply it on drm-misc-next, it would be nice to rebase on top of
> it. It would make it easier for driver maintainers to review it.
>
I rebased this on drm-tip but forgot the first patch in the series.
Let me make sure I get this correct and will send a rev3 early next week.
> Apart from the small nit below it, I tested the Xe tree on v3d and things
> seems to be running smoothly.
>
> On 8/10/23 23:31, Matthew Brost wrote:
> > Rather than a global modparam for scheduling policy, move the scheduling
> > policy to scheduler / entity so user can control each scheduler / entity
> > policy.
> >
> > v2:
> > - s/DRM_SCHED_POLICY_MAX/DRM_SCHED_POLICY_COUNT (Luben)
> > - Only include policy in scheduler (Luben)
> >
> > Signed-off-by: Matthew Brost <matthew.brost at intel.com>
> > ---
> > drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 +
> > drivers/gpu/drm/etnaviv/etnaviv_sched.c | 3 ++-
> > drivers/gpu/drm/lima/lima_sched.c | 3 ++-
> > drivers/gpu/drm/msm/msm_ringbuffer.c | 3 ++-
> > drivers/gpu/drm/nouveau/nouveau_sched.c | 3 ++-
> > drivers/gpu/drm/panfrost/panfrost_job.c | 3 ++-
> > drivers/gpu/drm/scheduler/sched_entity.c | 24 ++++++++++++++++++----
> > drivers/gpu/drm/scheduler/sched_main.c | 23 +++++++++++++++------
> > drivers/gpu/drm/v3d/v3d_sched.c | 15 +++++++++-----
> > include/drm/gpu_scheduler.h | 20 ++++++++++++------
> > 10 files changed, 72 insertions(+), 26 deletions(-)
> >
>
> [...]
>
> > diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
> > index 38e092ea41e6..5e3fe77fa991 100644
> > --- a/drivers/gpu/drm/v3d/v3d_sched.c
> > +++ b/drivers/gpu/drm/v3d/v3d_sched.c
> > @@ -391,7 +391,8 @@ v3d_sched_init(struct v3d_dev *v3d)
> > &v3d_bin_sched_ops, NULL,
> > hw_jobs_limit, job_hang_limit,
> > msecs_to_jiffies(hang_limit_ms), NULL,
> > - NULL, "v3d_bin", v3d->drm.dev);
> > + NULL, "v3d_bin", DRM_SCHED_POLICY_DEFAULT,
> > + v3d->drm.dev);
> > if (ret)
> > return ret;
> > @@ -399,7 +400,8 @@ v3d_sched_init(struct v3d_dev *v3d)
> > &v3d_render_sched_ops, NULL,
> > hw_jobs_limit, job_hang_limit,
> > msecs_to_jiffies(hang_limit_ms), NULL,
> > - NULL, "v3d_render", v3d->drm.dev);
> > + ULL, "v3d_render", DRM_SCHED_POLICY_DEFAULT,
>
> Small nit: s/ULL/NULL
>
Yep, will fix.
Matt
> Best Regards,
> - Maíra
>
> > + v3d->drm.dev);
> > if (ret)
> > goto fail;
> > @@ -407,7 +409,8 @@ v3d_sched_init(struct v3d_dev *v3d)
> > &v3d_tfu_sched_ops, NULL,
> > hw_jobs_limit, job_hang_limit,
> > msecs_to_jiffies(hang_limit_ms), NULL,
> > - NULL, "v3d_tfu", v3d->drm.dev);
> > + NULL, "v3d_tfu", DRM_SCHED_POLICY_DEFAULT,
> > + v3d->drm.dev);
> > if (ret)
> > goto fail;
> > @@ -416,7 +419,8 @@ v3d_sched_init(struct v3d_dev *v3d)
> > &v3d_csd_sched_ops, NULL,
> > hw_jobs_limit, job_hang_limit,
> > msecs_to_jiffies(hang_limit_ms), NULL,
> > - NULL, "v3d_csd", v3d->drm.dev);
> > + NULL, "v3d_csd", DRM_SCHED_POLICY_DEFAULT,
> > + v3d->drm.dev);
> > if (ret)
> > goto fail;
> > @@ -424,7 +428,8 @@ v3d_sched_init(struct v3d_dev *v3d)
> > &v3d_cache_clean_sched_ops, NULL,
> > hw_jobs_limit, job_hang_limit,
> > msecs_to_jiffies(hang_limit_ms), NULL,
> > - NULL, "v3d_cache_clean", v3d->drm.dev);
> > + NULL, "v3d_cache_clean",
> > + DRM_SCHED_POLICY_DEFAULT, v3d->drm.dev);
> > if (ret)
> > goto fail;
> > }
> > diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
> > index 278106e358a8..897d52a4ff4f 100644
> > --- a/include/drm/gpu_scheduler.h
> > +++ b/include/drm/gpu_scheduler.h
> > @@ -72,11 +72,15 @@ enum drm_sched_priority {
> > DRM_SCHED_PRIORITY_UNSET = -2
> > };
> > -/* Used to chose between FIFO and RR jobs scheduling */
> > -extern int drm_sched_policy;
> > -
> > -#define DRM_SCHED_POLICY_RR 0
> > -#define DRM_SCHED_POLICY_FIFO 1
> > +/* Used to chose default scheduling policy*/
> > +extern int default_drm_sched_policy;
> > +
> > +enum drm_sched_policy {
> > + DRM_SCHED_POLICY_DEFAULT,
> > + DRM_SCHED_POLICY_RR,
> > + DRM_SCHED_POLICY_FIFO,
> > + DRM_SCHED_POLICY_COUNT,
> > +};
> > /**
> > * struct drm_sched_entity - A wrapper around a job queue (typically
> > @@ -489,6 +493,7 @@ struct drm_sched_backend_ops {
> > * guilty and it will no longer be considered for scheduling.
> > * @score: score to help loadbalancer pick a idle sched
> > * @_score: score used when the driver doesn't provide one
> > + * @sched_policy: Schedule policy for scheduler
> > * @ready: marks if the underlying HW is ready to work
> > * @free_guilty: A hit to time out handler to free the guilty job.
> > * @pause_submit: pause queuing of @work_submit on @submit_wq
> > @@ -514,6 +519,7 @@ struct drm_gpu_scheduler {
> > int hang_limit;
> > atomic_t *score;
> > atomic_t _score;
> > + enum drm_sched_policy sched_policy;
> > bool ready;
> > bool free_guilty;
> > bool pause_submit;
> > @@ -525,7 +531,9 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
> > struct workqueue_struct *submit_wq,
> > uint32_t hw_submission, unsigned hang_limit,
> > long timeout, struct workqueue_struct *timeout_wq,
> > - atomic_t *score, const char *name, struct device *dev);
> > + atomic_t *score, const char *name,
> > + enum drm_sched_policy sched_policy,
> > + struct device *dev);
> > void drm_sched_fini(struct drm_gpu_scheduler *sched);
> > int drm_sched_job_init(struct drm_sched_job *job,
More information about the dri-devel
mailing list