[RFC 04/14] drm/sched: Consolidate entity run queue management

Tvrtko Ursulin tursulin at igalia.com
Mon Dec 30 16:52:49 UTC 2024


From: Tvrtko Ursulin <tvrtko.ursulin at igalia.com>

Move the code dealing with entities entering and exiting run queues to
helpers to logically separate it from jobs entering and exiting entities.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin at igalia.com>
Cc: Christian König <christian.koenig at amd.com>
Cc: Danilo Krummrich <dakr at redhat.com>
Cc: Matthew Brost <matthew.brost at intel.com>
Cc: Philipp Stanner <pstanner at redhat.com>
---
 drivers/gpu/drm/scheduler/sched_entity.c | 66 ++--------------
 drivers/gpu/drm/scheduler/sched_main.c   | 98 +++++++++++++++++++-----
 include/drm/gpu_scheduler.h              | 12 +--
 3 files changed, 90 insertions(+), 86 deletions(-)

diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index cb5f596b48b7..b93da068585e 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -473,20 +473,9 @@ drm_sched_job_dependency(struct drm_sched_job *job,
 	return NULL;
 }
 
-static ktime_t
-drm_sched_rq_get_rr_deadline(struct drm_sched_rq *rq)
-{
-	lockdep_assert_held(&rq->lock);
-
-	rq->rr_deadline = ktime_add_ns(rq->rr_deadline, 1);
-
-	return rq->rr_deadline;
-}
-
 struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
 {
-	struct drm_sched_job *sched_job, *next_job;
-	struct drm_sched_rq *rq;
+	struct drm_sched_job *sched_job;
 
 	sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
 	if (!sched_job)
@@ -516,32 +505,7 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
 	smp_wmb();
 
 	spsc_queue_pop(&entity->job_queue);
-
-	/*
-	 * Update the entity's location in the min heap according to
-	 * the timestamp of the next job, if any.
-	 */
-	next_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
-
-	spin_lock(&entity->lock);
-	rq = entity->rq;
-	spin_lock(&rq->lock);
-
-	if (next_job) {
-		ktime_t ts;
-
-		if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
-			ts = next_job->submit_ts;
-		else
-			ts = drm_sched_rq_get_rr_deadline(rq);
-
-		drm_sched_rq_update_fifo_locked(entity, rq, ts);
-	} else {
-		drm_sched_rq_remove_fifo_locked(entity, rq);
-	}
-
-	spin_unlock(&rq->lock);
-	spin_unlock(&entity->lock);
+	drm_sched_rq_pop_entity(entity->rq, entity);
 
 	/* Jobs and entities might have different lifecycles. Since we're
 	 * removing the job from the entities queue, set the jobs entity pointer
@@ -623,30 +587,10 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
 	/* first job wakes up scheduler */
 	if (first) {
 		struct drm_gpu_scheduler *sched;
-		struct drm_sched_rq *rq;
 
-		/* Add the entity to the run queue */
-		spin_lock(&entity->lock);
-		if (entity->stopped) {
-			spin_unlock(&entity->lock);
-
-			DRM_ERROR("Trying to push to a killed entity\n");
-			return;
-		}
-
-		rq = entity->rq;
-		sched = rq->sched;
-
-		spin_lock(&rq->lock);
-		drm_sched_rq_add_entity(rq, entity);
-		if (drm_sched_policy == DRM_SCHED_POLICY_RR)
-			submit_ts = drm_sched_rq_get_rr_deadline(rq);
-		drm_sched_rq_update_fifo_locked(entity, rq, submit_ts);
-
-		spin_unlock(&rq->lock);
-		spin_unlock(&entity->lock);
-
-		drm_sched_wakeup(sched);
+		sched = drm_sched_rq_add_entity(entity->rq, entity, submit_ts);
+		if (sched)
+			drm_sched_wakeup(sched);
 	}
 }
 EXPORT_SYMBOL(drm_sched_entity_push_job);
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index eb22b1b7de36..52c1a71d48e1 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -146,18 +146,19 @@ static __always_inline bool drm_sched_entity_compare_before(struct rb_node *a,
 	return ktime_before(ent_a->oldest_job_waiting, ent_b->oldest_job_waiting);
 }
 
-void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity,
-				     struct drm_sched_rq *rq)
+static void __drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity,
+					      struct drm_sched_rq *rq)
 {
-	if (!RB_EMPTY_NODE(&entity->rb_tree_node)) {
-		rb_erase_cached(&entity->rb_tree_node, &rq->rb_tree_root);
-		RB_CLEAR_NODE(&entity->rb_tree_node);
-	}
+	lockdep_assert_held(&entity->lock);
+	lockdep_assert_held(&rq->lock);
+
+	rb_erase_cached(&entity->rb_tree_node, &rq->rb_tree_root);
+	RB_CLEAR_NODE(&entity->rb_tree_node);
 }
 
-void drm_sched_rq_update_fifo_locked(struct drm_sched_entity *entity,
-				     struct drm_sched_rq *rq,
-				     ktime_t ts)
+static void __drm_sched_rq_add_fifo_locked(struct drm_sched_entity *entity,
+					   struct drm_sched_rq *rq,
+					   ktime_t ts)
 {
 	/*
 	 * Both locks need to be grabbed, one to protect from entity->rq change
@@ -167,8 +168,6 @@ void drm_sched_rq_update_fifo_locked(struct drm_sched_entity *entity,
 	lockdep_assert_held(&entity->lock);
 	lockdep_assert_held(&rq->lock);
 
-	drm_sched_rq_remove_fifo_locked(entity, rq);
-
 	entity->oldest_job_waiting = ts;
 
 	rb_add_cached(&entity->rb_tree_node, &rq->rb_tree_root,
@@ -192,6 +191,16 @@ static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
 	rq->sched = sched;
 }
 
+static ktime_t
+drm_sched_rq_get_rr_deadline(struct drm_sched_rq *rq)
+{
+	lockdep_assert_held(&rq->lock);
+
+	rq->rr_deadline = ktime_add_ns(rq->rr_deadline, 1);
+
+	return rq->rr_deadline;
+}
+
 /**
  * drm_sched_rq_add_entity - add an entity
  *
@@ -199,18 +208,41 @@ static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
  * @entity: scheduler entity
  *
  * Adds a scheduler entity to the run queue.
+ *
+ * Returns a DRM scheduler pre-selected to handle this entity.
  */
-void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
-			     struct drm_sched_entity *entity)
+struct drm_gpu_scheduler *
+drm_sched_rq_add_entity(struct drm_sched_rq *rq,
+			struct drm_sched_entity *entity,
+			ktime_t ts)
 {
-	lockdep_assert_held(&entity->lock);
-	lockdep_assert_held(&rq->lock);
+	struct drm_gpu_scheduler *sched;
+
+	if (entity->stopped) {
+		DRM_ERROR("Trying to push to a killed entity\n");
+		return NULL;
+	}
+
+	spin_lock(&entity->lock);
+	spin_lock(&rq->lock);
+
+	sched = rq->sched;
+	atomic_inc(sched->score);
 
 	if (!list_empty(&entity->list))
-		return;
+		list_add_tail(&entity->list, &rq->entities);
 
-	atomic_inc(rq->sched->score);
-	list_add_tail(&entity->list, &rq->entities);
+	if (drm_sched_policy == DRM_SCHED_POLICY_RR)
+		ts = drm_sched_rq_get_rr_deadline(rq);
+
+	if (!RB_EMPTY_NODE(&entity->rb_tree_node))
+		__drm_sched_rq_remove_fifo_locked(entity, rq);
+	__drm_sched_rq_add_fifo_locked(entity, rq, ts);
+
+	spin_unlock(&rq->lock);
+	spin_unlock(&entity->lock);
+
+	return sched;
 }
 
 /**
@@ -234,11 +266,39 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
 	atomic_dec(rq->sched->score);
 	list_del_init(&entity->list);
 
-	drm_sched_rq_remove_fifo_locked(entity, rq);
+	if (!RB_EMPTY_NODE(&entity->rb_tree_node))
+		__drm_sched_rq_remove_fifo_locked(entity, rq);
 
 	spin_unlock(&rq->lock);
 }
 
+void drm_sched_rq_pop_entity(struct drm_sched_rq *rq,
+			     struct drm_sched_entity *entity)
+{
+	struct drm_sched_job *next_job;
+
+	next_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
+
+	spin_lock(&entity->lock);
+	spin_lock(&rq->lock);
+
+	__drm_sched_rq_remove_fifo_locked(entity, rq);
+
+	if (next_job) {
+		ktime_t ts;
+
+		if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
+			ts = next_job->submit_ts;
+		else
+			ts = drm_sched_rq_get_rr_deadline(rq);
+
+		__drm_sched_rq_add_fifo_locked(entity, rq, ts);
+	}
+
+	spin_unlock(&rq->lock);
+	spin_unlock(&entity->lock);
+}
+
 /**
  * drm_sched_rq_select_entity - Select an entity which provides a job to run
  *
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index db65600732b9..23d5b1b0b048 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -585,15 +585,15 @@ bool drm_sched_dependency_optimized(struct dma_fence* fence,
 				    struct drm_sched_entity *entity);
 void drm_sched_fault(struct drm_gpu_scheduler *sched);
 
-void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
-			     struct drm_sched_entity *entity);
+struct drm_gpu_scheduler *
+drm_sched_rq_add_entity(struct drm_sched_rq *rq,
+			struct drm_sched_entity *entity,
+			ktime_t ts);
 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
 				struct drm_sched_entity *entity);
 
-void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity,
-				     struct drm_sched_rq *rq);
-void drm_sched_rq_update_fifo_locked(struct drm_sched_entity *entity,
-				     struct drm_sched_rq *rq, ktime_t ts);
+void drm_sched_rq_pop_entity(struct drm_sched_rq *rq,
+			     struct drm_sched_entity *entity);
 
 int drm_sched_entity_init(struct drm_sched_entity *entity,
 			  enum drm_sched_priority priority,
-- 
2.47.1



More information about the dri-devel mailing list