[Intel-xe] [PATCH v5 1/7] drm/sched: Add drm_sched_wqueue_* helpers

Luben Tuikov luben.tuikov at amd.com
Sat Oct 14 01:24:09 UTC 2023


On 2023-10-11 19:58, Matthew Brost wrote:
> Add scheduler wqueue ready, stop, and start helpers to hide the
> implementation details of the scheduler from the drivers.
> 
> v2:
>   - s/sched_wqueue/sched_wqueue (Luben)
>   - Remove the extra white line after the return-statement (Luben)
>   - update drm_sched_wqueue_ready comment (Luben)
> 
> Signed-off-by: Matthew Brost <matthew.brost at intel.com>
> Cc: Luben Tuikov <luben.tuikov at amd.com>

Cc comes before S-O-B, but I can fix this when applying it, so don't worry about
this patch anymore. I'll also add Link: and so on, but this is all automated for
me so don't worry about it.

Reviewed-by: Luben Tuikov <luben.tuikov at amd.com>
-- 
Regards,
Luben

> ---
>  .../drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c   |  2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c   | 15 +++----
>  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c    | 12 +++---
>  drivers/gpu/drm/msm/adreno/adreno_device.c    |  6 ++-
>  drivers/gpu/drm/scheduler/sched_main.c        | 39 ++++++++++++++++++-
>  include/drm/gpu_scheduler.h                   |  3 ++
>  6 files changed, 59 insertions(+), 18 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
> index 625db444df1c..10d56979fe3b 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
> @@ -290,7 +290,7 @@ static int suspend_resume_compute_scheduler(struct amdgpu_device *adev, bool sus
>  	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
>  		struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
>  
> -		if (!(ring && ring->sched.thread))
> +		if (!(ring && drm_sched_wqueue_ready(&ring->sched)))
>  			continue;
>  
>  		/* stop secheduler and drain ring. */
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
> index a4faea4fa0b5..a4c0bb358db7 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
> @@ -1659,9 +1659,9 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
>  	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
>  		struct amdgpu_ring *ring = adev->rings[i];
>  
> -		if (!ring || !ring->sched.thread)
> +		if (!ring || !drm_sched_wqueue_ready(&ring->sched))
>  			continue;
> -		kthread_park(ring->sched.thread);
> +		drm_sched_wqueue_stop(&ring->sched);
>  	}
>  
>  	seq_puts(m, "run ib test:\n");
> @@ -1675,9 +1675,9 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
>  	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
>  		struct amdgpu_ring *ring = adev->rings[i];
>  
> -		if (!ring || !ring->sched.thread)
> +		if (!ring || !drm_sched_wqueue_ready(&ring->sched))
>  			continue;
> -		kthread_unpark(ring->sched.thread);
> +		drm_sched_wqueue_start(&ring->sched);
>  	}
>  
>  	up_write(&adev->reset_domain->sem);
> @@ -1897,7 +1897,8 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
>  
>  	ring = adev->rings[val];
>  
> -	if (!ring || !ring->funcs->preempt_ib || !ring->sched.thread)
> +	if (!ring || !ring->funcs->preempt_ib ||
> +	    !drm_sched_wqueue_ready(&ring->sched))
>  		return -EINVAL;
>  
>  	/* the last preemption failed */
> @@ -1915,7 +1916,7 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
>  		goto pro_end;
>  
>  	/* stop the scheduler */
> -	kthread_park(ring->sched.thread);
> +	drm_sched_wqueue_stop(&ring->sched);
>  
>  	/* preempt the IB */
>  	r = amdgpu_ring_preempt_ib(ring);
> @@ -1949,7 +1950,7 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
>  
>  failure:
>  	/* restart the scheduler */
> -	kthread_unpark(ring->sched.thread);
> +	drm_sched_wqueue_start(&ring->sched);
>  
>  	up_read(&adev->reset_domain->sem);
>  
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> index 2b8356699f23..b1aafe815f28 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> @@ -4588,7 +4588,7 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
>  	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
>  		struct amdgpu_ring *ring = adev->rings[i];
>  
> -		if (!ring || !ring->sched.thread)
> +		if (!ring || !drm_sched_wqueue_ready(&ring->sched))
>  			continue;
>  
>  		spin_lock(&ring->sched.job_list_lock);
> @@ -4727,7 +4727,7 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
>  	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
>  		struct amdgpu_ring *ring = adev->rings[i];
>  
> -		if (!ring || !ring->sched.thread)
> +		if (!ring || !drm_sched_wqueue_ready(&ring->sched))
>  			continue;
>  
>  		/* Clear job fence from fence drv to avoid force_completion
> @@ -5266,7 +5266,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
>  		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
>  			struct amdgpu_ring *ring = tmp_adev->rings[i];
>  
> -			if (!ring || !ring->sched.thread)
> +			if (!ring || !drm_sched_wqueue_ready(&ring->sched))
>  				continue;
>  
>  			drm_sched_stop(&ring->sched, job ? &job->base : NULL);
> @@ -5341,7 +5341,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
>  		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
>  			struct amdgpu_ring *ring = tmp_adev->rings[i];
>  
> -			if (!ring || !ring->sched.thread)
> +			if (!ring || !drm_sched_wqueue_ready(&ring->sched))
>  				continue;
>  
>  			drm_sched_start(&ring->sched, true);
> @@ -5667,7 +5667,7 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
>  		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
>  			struct amdgpu_ring *ring = adev->rings[i];
>  
> -			if (!ring || !ring->sched.thread)
> +			if (!ring || !drm_sched_wqueue_ready(&ring->sched))
>  				continue;
>  
>  			drm_sched_stop(&ring->sched, NULL);
> @@ -5795,7 +5795,7 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
>  	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
>  		struct amdgpu_ring *ring = adev->rings[i];
>  
> -		if (!ring || !ring->sched.thread)
> +		if (!ring || !drm_sched_wqueue_ready(&ring->sched))
>  			continue;
>  
>  		drm_sched_start(&ring->sched, true);
> diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
> index fa527935ffd4..8fa9ce3746b6 100644
> --- a/drivers/gpu/drm/msm/adreno/adreno_device.c
> +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
> @@ -809,7 +809,8 @@ static void suspend_scheduler(struct msm_gpu *gpu)
>  	 */
>  	for (i = 0; i < gpu->nr_rings; i++) {
>  		struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
> -		kthread_park(sched->thread);
> +
> +		drm_sched_wqueue_stop(sched);
>  	}
>  }
>  
> @@ -819,7 +820,8 @@ static void resume_scheduler(struct msm_gpu *gpu)
>  
>  	for (i = 0; i < gpu->nr_rings; i++) {
>  		struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
> -		kthread_unpark(sched->thread);
> +
> +		drm_sched_wqueue_start(sched);
>  	}
>  }
>  
> diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
> index 5a3a622fc672..6f2f7dd4ba0a 100644
> --- a/drivers/gpu/drm/scheduler/sched_main.c
> +++ b/drivers/gpu/drm/scheduler/sched_main.c
> @@ -439,7 +439,7 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
>  {
>  	struct drm_sched_job *s_job, *tmp;
>  
> -	kthread_park(sched->thread);
> +	drm_sched_wqueue_stop(sched);
>  
>  	/*
>  	 * Reinsert back the bad job here - now it's safe as
> @@ -552,7 +552,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
>  		spin_unlock(&sched->job_list_lock);
>  	}
>  
> -	kthread_unpark(sched->thread);
> +	drm_sched_wqueue_start(sched);
>  }
>  EXPORT_SYMBOL(drm_sched_start);
>  
> @@ -1206,3 +1206,38 @@ void drm_sched_increase_karma(struct drm_sched_job *bad)
>  	}
>  }
>  EXPORT_SYMBOL(drm_sched_increase_karma);
> +
> +/**
> + * drm_sched_wqueue_ready - Is the scheduler ready for submission
> + *
> + * @sched: scheduler instance
> + *
> + * Returns true if submission is ready
> + */
> +bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched)
> +{
> +	return !!sched->thread;
> +}
> +EXPORT_SYMBOL(drm_sched_wqueue_ready);
> +
> +/**
> + * drm_sched_wqueue_stop - stop scheduler submission
> + *
> + * @sched: scheduler instance
> + */
> +void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched)
> +{
> +	kthread_park(sched->thread);
> +}
> +EXPORT_SYMBOL(drm_sched_wqueue_stop);
> +
> +/**
> + * drm_sched_wqueue_start - start scheduler submission
> + *
> + * @sched: scheduler instance
> + */
> +void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched)
> +{
> +	kthread_unpark(sched->thread);
> +}
> +EXPORT_SYMBOL(drm_sched_wqueue_start);
> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
> index f9544d9b670d..38578fe74573 100644
> --- a/include/drm/gpu_scheduler.h
> +++ b/include/drm/gpu_scheduler.h
> @@ -550,6 +550,9 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
>  
>  void drm_sched_job_cleanup(struct drm_sched_job *job);
>  void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched);
> +bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched);
> +void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched);
> +void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched);
>  void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
>  void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery);
>  void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);



More information about the Intel-xe mailing list