[Intel-xe] [PATCH 2/2] fixup! drm/sched: Convert drm scheduler to use a work queue rather than kthread

Matthew Brost matthew.brost at intel.com
Tue Aug 1 03:27:48 UTC 2023


---
 drivers/gpu/drm/scheduler/sched_main.c | 60 ++++++++++++--------------
 1 file changed, 28 insertions(+), 32 deletions(-)

diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index fd265efc75d4..55094bc54c96 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -286,9 +286,7 @@ drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq)
  */
 void drm_sched_run_wq_stop(struct drm_gpu_scheduler *sched)
 {
-	sched->pause_run_wq = true;
-	smp_wmb();
-
+	WRITE_ONCE(sched->pause_run_wq, true);
 	cancel_work_sync(&sched->work_run);
 }
 EXPORT_SYMBOL(drm_sched_run_wq_stop);
@@ -300,9 +298,7 @@ EXPORT_SYMBOL(drm_sched_run_wq_stop);
  */
 void drm_sched_run_wq_start(struct drm_gpu_scheduler *sched)
 {
-	sched->pause_run_wq = false;
-	smp_wmb();
-
+	WRITE_ONCE(sched->pause_run_wq, false);
 	queue_work(sched->run_wq, &sched->work_run);
 }
 EXPORT_SYMBOL(drm_sched_run_wq_start);
@@ -314,15 +310,13 @@ EXPORT_SYMBOL(drm_sched_run_wq_start);
  */
 static void drm_sched_run_wq_queue(struct drm_gpu_scheduler *sched)
 {
-	smp_rmb();
-
 	/*
 	 * Try not to schedule work if pause_run_wq set but not the end of world
 	 * if we do as either it will be cancelled by the above
 	 * cancel_work_sync, or drm_sched_main turns into a NOP while
 	 * pause_run_wq is set.
 	 */
-	if (!sched->pause_run_wq)
+	if (!READ_ONCE(sched->pause_run_wq))
 		queue_work(sched->run_wq, &sched->work_run);
 }
 
@@ -1106,7 +1100,7 @@ void drm_sched_add_msg(struct drm_gpu_scheduler *sched,
 	 * Same as above in drm_sched_run_wq_queue, try to kick worker if
 	 * paused, harmless if this races
 	 */
-	if (!sched->pause_run_wq)
+	if (!READ_ONCE(sched->pause_run_wq))
 		queue_work(sched->run_wq, &sched->work_run);
 }
 EXPORT_SYMBOL(drm_sched_add_msg);
@@ -1142,39 +1136,38 @@ static void drm_sched_main(struct work_struct *w)
 {
 	struct drm_gpu_scheduler *sched =
 		container_of(w, struct drm_gpu_scheduler, work_run);
+	struct drm_sched_entity *entity;
+	struct drm_sched_msg *msg;
+	struct drm_sched_job *cleanup_job;
 	int r;
 
-	while (!READ_ONCE(sched->pause_run_wq)) {
-		struct drm_sched_entity *entity;
-		struct drm_sched_msg *msg;
-		struct drm_sched_fence *s_fence;
-		struct drm_sched_job *sched_job;
-		struct dma_fence *fence;
-		struct drm_sched_job *cleanup_job;
+	if (READ_ONCE(sched->pause_run_wq))
+		return;
 
-		cleanup_job = drm_sched_get_cleanup_job(sched);
-		entity = drm_sched_select_entity(sched);
-		msg = drm_sched_get_msg(sched);
+	cleanup_job = drm_sched_get_cleanup_job(sched);
+	msg = drm_sched_get_msg(sched);
+	entity = drm_sched_select_entity(sched);
 
-		if (cleanup_job)
-			sched->ops->free_job(cleanup_job);
+	if (!entity && !cleanup_job && !msg)
+		return;	/* No more work */
 
-		if (msg)
-			sched->ops->process_msg(msg);
+	if (cleanup_job)
+		sched->ops->free_job(cleanup_job);
 
-		if (!entity) {
-			if (!cleanup_job && !msg)
-				break;
-			continue;
-		}
+	if (msg)
+		sched->ops->process_msg(msg);
 
-		sched_job = drm_sched_entity_pop_job(entity);
+	if (entity) {
+		struct dma_fence *fence;
+		struct drm_sched_fence *s_fence;
+		struct drm_sched_job *sched_job;
 
+		sched_job = drm_sched_entity_pop_job(entity);
 		if (!sched_job) {
 			complete_all(&entity->entity_idle);
 			if (!cleanup_job && !msg)
-				break;
-			continue;
+				return;	/* No more work */
+			goto again;
 		}
 
 		s_fence = sched_job->s_fence;
@@ -1206,6 +1199,9 @@ static void drm_sched_main(struct work_struct *w)
 
 		wake_up(&sched->job_scheduled);
 	}
+
+again:
+	drm_sched_run_wq_queue(sched);
 }
 
 /**
-- 
2.34.1



More information about the Intel-xe mailing list