[PATCH v2 5/9] drm/i915/gvt: factor out the scheduler

Ping Gao ping.a.gao at intel.com
Tue Feb 14 04:25:51 UTC 2017


Factor out the scheduler to a more clear structure, the basic logic
is to find out next vGPU first and them schedule it. When decide to
pick up who is the next vgpu it does split out two parts:
first is to find a right sched head to keep fairness, second is to
choose the vgpu has timeslice left.

Signed-off-by: Ping Gao <ping.a.gao at intel.com>
---
 drivers/gpu/drm/i915/gvt/sched_policy.c | 73 ++++++++++++++++++++++++---------
 1 file changed, 54 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 6c53bf0..c174ce6 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -118,28 +118,12 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
 		wake_up(&scheduler->waitq[i]);
 }
 
-#define GVT_DEFAULT_TIME_SLICE 1000000
-
-static void tbs_sched_func(struct tbs_sched_data *sched_data)
+static struct intel_vgpu *get_vgpu_timeslice_left(struct list_head *head,
+					struct tbs_sched_data *sched_data)
 {
 	struct tbs_vgpu_data *vgpu_data;
-
-	struct intel_gvt *gvt = sched_data->gvt;
-	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
-
 	struct intel_vgpu *vgpu = NULL;
-	struct list_head *pos, *head;
-
-	/* no vgpu or has already had a target */
-	if (gvt->num_vgpu_sched <= 1 || scheduler->next_vgpu)
-		goto out;
-
-	if (scheduler->current_vgpu) {
-		vgpu_data = scheduler->current_vgpu->sched_data;
-		head = &vgpu_data->list;
-	} else {
-		head = &sched_data->runq_head;
-	}
+	struct list_head *pos;
 
 	/* search a vgpu with pending workload */
 	list_for_each(pos, head) {
@@ -154,6 +138,57 @@ static void tbs_sched_func(struct tbs_sched_data *sched_data)
 		break;
 	}
 
+	return vgpu;
+}
+
+static struct list_head *get_sched_head(struct tbs_sched_data *sched_data)
+{
+	struct intel_gvt *gvt = sched_data->gvt;
+	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+	struct tbs_vgpu_data *cur_vgpu_data;
+	struct list_head *head;
+
+	if (scheduler->current_vgpu) {
+		cur_vgpu_data = scheduler->current_vgpu->sched_data;
+		head = &cur_vgpu_data->list;
+	} else {
+		gvt_dbg_sched("no current vgpu search from q head\n");
+		head = &sched_data->runq_head;
+	}
+
+	return head;
+}
+
+static struct intel_vgpu *pickup_next_vgpu(struct tbs_sched_data *sched_data)
+{
+	struct intel_vgpu *next_vgpu = NULL;
+	struct list_head *head = NULL;
+
+	/* The scheduler is follow round-robin style, sched
+	 * head means where start to choose next vGPU, that's
+	 * important to keep fairness. */
+	head = get_sched_head(sched_data);
+
+	/* Choose the vGPU which has timeslice left */
+	next_vgpu = get_vgpu_timeslice_left(head, sched_data);
+
+	return next_vgpu;
+}
+
+#define GVT_DEFAULT_TIME_SLICE 1000000
+
+static void tbs_sched_func(struct tbs_sched_data *sched_data)
+{
+	struct intel_gvt *gvt = sched_data->gvt;
+	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+	struct intel_vgpu *vgpu = NULL;
+
+	/* no vgpu or has already had a target */
+	if (gvt->num_vgpu_sched <= 1 || scheduler->next_vgpu)
+		goto out;
+
+	/* determine which vGPU should choose as next */
+	vgpu = pickup_next_vgpu(sched_data);
 	if (vgpu) {
 		scheduler->next_vgpu = vgpu;
 		gvt_dbg_sched("pick next vgpu %d\n", vgpu->id);
-- 
2.7.4



More information about the intel-gvt-dev mailing list