[RFC PATCH] drm/scheduler: use idle time to do better loadbalance

Nirmoy Das nirmoy.aiemd at gmail.com
Sun Jan 12 01:25:07 UTC 2020


This patch adds required fields to drm_sched_job and drm_gpu_scheduler
structure to cumulatively calculate amount of time a drm_gpu_scheduler
spend on serving a job.

Using least used drm scheduler to choose a run queue
improves drm_sched_entity_get_free_sched()'s job distribution

Below are test results after running amdgpu_test from mesa drm

Before this patch:

sched_name     num of many times it got scheduled
=========      ==================================
sdma0	       314
sdma1          32
comp_1.0.0     56

After this patch:

sched_name     num of many times it got scheduled
=========      ==================================
sdma0	       113
sdma1          383
comp_1.0.0     9
comp_1.0.1     9
comp_1.1.0     8
comp_1.1.1     8
comp_1.2.0    12
comp_1.2.1    13
comp_1.3.0    16
comp_1.3.1    9

Signed-off-by: Nirmoy Das <nirmoy.das at amd.com>
---
 drivers/gpu/drm/scheduler/sched_entity.c | 9 +++++----
 drivers/gpu/drm/scheduler/sched_main.c   | 2 ++
 include/drm/gpu_scheduler.h              | 2 ++
 3 files changed, 9 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 2e3a058fc239..b5555af787d0 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -130,7 +130,7 @@ static struct drm_sched_rq *
 drm_sched_entity_get_free_sched(struct drm_sched_entity *entity)
 {
 	struct drm_sched_rq *rq = NULL;
-	unsigned int min_jobs = UINT_MAX, num_jobs;
+	uint64_t min_time_consumed = -1, total_consumed_time;
 	int i;
 
 	for (i = 0; i < entity->num_sched_list; ++i) {
@@ -141,9 +141,9 @@ drm_sched_entity_get_free_sched(struct drm_sched_entity *entity)
 			continue;
 		}
 
-		num_jobs = atomic_read(&sched->num_jobs);
-		if (num_jobs < min_jobs) {
-			min_jobs = num_jobs;
+		total_consumed_time = sched->total_consumed_time;
+		if (total_consumed_time < min_time_consumed) {
+			min_time_consumed = total_consumed_time;
 			rq = &entity->sched_list[i]->sched_rq[entity->priority];
 		}
 	}
@@ -499,6 +499,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
 
 	trace_drm_sched_job(sched_job, entity);
 	atomic_inc(&entity->rq->sched->num_jobs);
+	sched_job->start_time = ktime_get_ns();
 	WRITE_ONCE(entity->last_user, current->group_leader);
 	first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
 
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 3fad5876a13f..67fdf4f248d4 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -653,6 +653,7 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
 	struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
 	struct drm_sched_fence *s_fence = s_job->s_fence;
 	struct drm_gpu_scheduler *sched = s_fence->sched;
+	uint64_t end = ktime_get_ns();
 
 	atomic_dec(&sched->hw_rq_count);
 	atomic_dec(&sched->num_jobs);
@@ -660,6 +661,7 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
 	trace_drm_sched_process_job(s_fence);
 
 	drm_sched_fence_finished(s_fence);
+	s_job->sched->total_consumed_time += end - s_job->start_time;
 	wake_up_interruptible(&sched->wake_up_worker);
 }
 
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 96a1a1b7526e..496d9b209d12 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -191,6 +191,7 @@ struct drm_sched_job {
 	struct dma_fence_cb		finish_cb;
 	struct list_head		node;
 	uint64_t			id;
+	uint64_t			start_time;
 	atomic_t			karma;
 	enum drm_sched_priority		s_priority;
 	struct drm_sched_entity  *entity;
@@ -285,6 +286,7 @@ struct drm_gpu_scheduler {
 	atomic_t                        num_jobs;
 	bool			ready;
 	bool				free_guilty;
+	uint64_t			total_consumed_time;
 };
 
 int drm_sched_init(struct drm_gpu_scheduler *sched,
-- 
2.24.1



More information about the amd-gfx mailing list