[PATCH] drm/sched: Add device pointer to drm_gpu_scheduler
Christian König
christian.koenig at amd.com
Thu Feb 17 10:40:23 UTC 2022
Am 17.02.22 um 11:38 schrieb Jiawei Gu:
> Add device pointer so scheduler's printing can use
> DRM_DEV_ERROR() instead, which makes life easier under multiple GPU
> scenario.
>
> v2: amend all calls of drm_sched_init()
That looks better, but please send it to the dri-devel mailing list as well.
Thanks,
Christian.
>
> Signed-off-by: Jiawei Gu <Jiawei.Gu at amd.com>
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 2 +-
> drivers/gpu/drm/etnaviv/etnaviv_sched.c | 2 +-
> drivers/gpu/drm/lima/lima_sched.c | 2 +-
> drivers/gpu/drm/panfrost/panfrost_job.c | 2 +-
> drivers/gpu/drm/scheduler/sched_main.c | 9 +++++----
> drivers/gpu/drm/v3d/v3d_sched.c | 10 +++++-----
> include/drm/gpu_scheduler.h | 3 ++-
> 7 files changed, 16 insertions(+), 14 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
> index 4787cb3acaed..98217b1487fe 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
> @@ -508,7 +508,7 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
>
> r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
> num_hw_submission, amdgpu_job_hang_limit,
> - timeout, sched_score, ring->name);
> + timeout, sched_score, ring->name, adev->dev);
> if (r) {
> DRM_ERROR("Failed to create scheduler on ring %s.\n",
> ring->name);
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
> index 19826e504efc..4240f2ae0ab3 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
> @@ -190,7 +190,7 @@ int etnaviv_sched_init(struct etnaviv_gpu *gpu)
>
> ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops,
> etnaviv_hw_jobs_limit, etnaviv_job_hang_limit,
> - msecs_to_jiffies(500), NULL, dev_name(gpu->dev));
> + msecs_to_jiffies(500), NULL, dev_name(gpu->dev), NULL);
> if (ret)
> return ret;
>
> diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
> index ecf3267334ff..4b3a43a2b3e3 100644
> --- a/drivers/gpu/drm/lima/lima_sched.c
> +++ b/drivers/gpu/drm/lima/lima_sched.c
> @@ -509,7 +509,7 @@ int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name)
>
> return drm_sched_init(&pipe->base, &lima_sched_ops, 1,
> lima_job_hang_limit, msecs_to_jiffies(timeout),
> - NULL, name);
> + NULL, name, NULL);
> }
>
> void lima_sched_pipe_fini(struct lima_sched_pipe *pipe)
> diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
> index 2df3e999a38d..215709d61315 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_job.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_job.c
> @@ -627,7 +627,7 @@ int panfrost_job_init(struct panfrost_device *pfdev)
> ret = drm_sched_init(&js->queue[j].sched,
> &panfrost_sched_ops,
> 1, 0, msecs_to_jiffies(JOB_TIMEOUT_MS),
> - NULL, "pan_js");
> + NULL, "pan_js", NULL);
> if (ret) {
> dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret);
> goto err_sched;
> diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
> index 3e0bbc797eaa..4404af323321 100644
> --- a/drivers/gpu/drm/scheduler/sched_main.c
> +++ b/drivers/gpu/drm/scheduler/sched_main.c
> @@ -489,7 +489,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
> if (r == -ENOENT)
> drm_sched_job_done(s_job);
> else if (r)
> - DRM_ERROR("fence add callback failed (%d)\n",
> + DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
> r);
> } else
> drm_sched_job_done(s_job);
> @@ -815,7 +815,7 @@ static int drm_sched_main(void *param)
> if (r == -ENOENT)
> drm_sched_job_done(sched_job);
> else if (r)
> - DRM_ERROR("fence add callback failed (%d)\n",
> + DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
> r);
> dma_fence_put(fence);
> } else {
> @@ -846,7 +846,7 @@ static int drm_sched_main(void *param)
> int drm_sched_init(struct drm_gpu_scheduler *sched,
> const struct drm_sched_backend_ops *ops,
> unsigned hw_submission, unsigned hang_limit, long timeout,
> - atomic_t *score, const char *name)
> + atomic_t *score, const char *name, struct device *dev)
> {
> int i, ret;
> sched->ops = ops;
> @@ -855,6 +855,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
> sched->timeout = timeout;
> sched->hang_limit = hang_limit;
> sched->score = score ? score : &sched->_score;
> + sched->dev = dev;
> for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; i++)
> drm_sched_rq_init(sched, &sched->sched_rq[i]);
>
> @@ -872,7 +873,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
> if (IS_ERR(sched->thread)) {
> ret = PTR_ERR(sched->thread);
> sched->thread = NULL;
> - DRM_ERROR("Failed to create scheduler for %s.\n", name);
> + DRM_DEV_ERROR(sched->dev, "Failed to create scheduler for %s.\n", name);
> return ret;
> }
>
> diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
> index 8992480c88fa..6d68b155c3cc 100644
> --- a/drivers/gpu/drm/v3d/v3d_sched.c
> +++ b/drivers/gpu/drm/v3d/v3d_sched.c
> @@ -403,7 +403,7 @@ v3d_sched_init(struct v3d_dev *v3d)
> &v3d_bin_sched_ops,
> hw_jobs_limit, job_hang_limit,
> msecs_to_jiffies(hang_limit_ms),
> - NULL, "v3d_bin");
> + NULL, "v3d_bin", NULL);
> if (ret) {
> dev_err(v3d->drm.dev, "Failed to create bin scheduler: %d.", ret);
> return ret;
> @@ -413,7 +413,7 @@ v3d_sched_init(struct v3d_dev *v3d)
> &v3d_render_sched_ops,
> hw_jobs_limit, job_hang_limit,
> msecs_to_jiffies(hang_limit_ms),
> - NULL, "v3d_render");
> + NULL, "v3d_render", NULL);
> if (ret) {
> dev_err(v3d->drm.dev, "Failed to create render scheduler: %d.",
> ret);
> @@ -425,7 +425,7 @@ v3d_sched_init(struct v3d_dev *v3d)
> &v3d_tfu_sched_ops,
> hw_jobs_limit, job_hang_limit,
> msecs_to_jiffies(hang_limit_ms),
> - NULL, "v3d_tfu");
> + NULL, "v3d_tfu", NULL);
> if (ret) {
> dev_err(v3d->drm.dev, "Failed to create TFU scheduler: %d.",
> ret);
> @@ -438,7 +438,7 @@ v3d_sched_init(struct v3d_dev *v3d)
> &v3d_csd_sched_ops,
> hw_jobs_limit, job_hang_limit,
> msecs_to_jiffies(hang_limit_ms),
> - NULL, "v3d_csd");
> + NULL, "v3d_csd", NULL);
> if (ret) {
> dev_err(v3d->drm.dev, "Failed to create CSD scheduler: %d.",
> ret);
> @@ -450,7 +450,7 @@ v3d_sched_init(struct v3d_dev *v3d)
> &v3d_cache_clean_sched_ops,
> hw_jobs_limit, job_hang_limit,
> msecs_to_jiffies(hang_limit_ms),
> - NULL, "v3d_cache_clean");
> + NULL, "v3d_cache_clean", NULL);
> if (ret) {
> dev_err(v3d->drm.dev, "Failed to create CACHE_CLEAN scheduler: %d.",
> ret);
> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
> index d18af49fd009..38a479879fdb 100644
> --- a/include/drm/gpu_scheduler.h
> +++ b/include/drm/gpu_scheduler.h
> @@ -302,12 +302,13 @@ struct drm_gpu_scheduler {
> atomic_t _score;
> bool ready;
> bool free_guilty;
> + struct device *dev;
> };
>
> int drm_sched_init(struct drm_gpu_scheduler *sched,
> const struct drm_sched_backend_ops *ops,
> uint32_t hw_submission, unsigned hang_limit, long timeout,
> - atomic_t *score, const char *name);
> + atomic_t *score, const char *name, struct device *dev);
>
> void drm_sched_fini(struct drm_gpu_scheduler *sched);
> int drm_sched_job_init(struct drm_sched_job *job,
More information about the amd-gfx
mailing list