[PATCH v4 5/9] drm/i915/gvt: factor out the scheduler

Ping Gao ping.a.gao at intel.com
Wed Mar 8 06:24:32 UTC 2017


Factor out the scheduler to a more clear structure, the basic logic
is to find out next vGPU first and then schedule it. When decide to
pick up who is the next vGPU it has two sequence parts: first is to
find out a sched head who has urgent requirement as it's near TDR
because of out-of-service for a long time, second is to choose
the vgpu who has pending workload follow round-robin style.

Signed-off-by: Ping Gao <ping.a.gao at intel.com>
---
 drivers/gpu/drm/i915/gvt/sched_policy.c | 78 ++++++++++++++++++++++++---------
 1 file changed, 58 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 8c1eb53..e8a9db1 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -125,29 +125,12 @@ enum {
 	HAS_ACTIVE_VGPU_SCHED,
 };
 
-#define GVT_DEFAULT_TIME_SLICE 1000000
-
-static void tbs_sched_func(struct gvt_sched_data *sched_data)
+static struct intel_vgpu *get_vgpu_has_workload(struct list_head *head,
+					struct gvt_sched_data *sched_data)
 {
 	struct vgpu_sched_data *vgpu_data;
-
-	struct intel_gvt *gvt = sched_data->gvt;
-	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
-
 	struct intel_vgpu *vgpu = NULL;
-	struct list_head *pos, *head;
-
-	/* no vgpu or has already had a target */
-	if (atomic_read(&gvt->num_vgpu_sched) <= ONLY_IDLE_VGPU_SCHED ||
-			scheduler->next_vgpu)
-		goto out;
-
-	if (scheduler->current_vgpu) {
-		vgpu_data = scheduler->current_vgpu->sched_data;
-		head = &vgpu_data->list;
-	} else {
-		head = &sched_data->runq_head;
-	}
+	struct list_head *pos;
 
 	/* search a vgpu with pending workload */
 	list_for_each(pos, head) {
@@ -162,6 +145,61 @@ static void tbs_sched_func(struct gvt_sched_data *sched_data)
 		break;
 	}
 
+	return vgpu;
+}
+
+static struct list_head *get_sched_head(struct gvt_sched_data *sched_data)
+{
+	struct intel_gvt *gvt = sched_data->gvt;
+	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+	struct vgpu_sched_data *cur_vgpu_data;
+	struct list_head *head;
+
+	if (scheduler->current_vgpu) {
+		cur_vgpu_data = scheduler->current_vgpu->sched_data;
+		head = &cur_vgpu_data->list;
+	} else {
+		gvt_dbg_sched("no current vgpu search from q head\n");
+		head = &sched_data->runq_head;
+	}
+
+	return head;
+}
+
+static struct intel_vgpu *pickup_next_vgpu(struct gvt_sched_data *sched_data)
+{
+	struct intel_vgpu *next_vgpu = NULL;
+	struct list_head *head = NULL;
+
+	/* The scheduler is follow round-robin style, sched
+	 * head means where start to choose next vGPU, it's
+	 * help to decide which vGPU is first one in the
+	 * round-robin queue at this schedule check point,
+	 * that's important to keep fairness.
+	 */
+	head = get_sched_head(sched_data);
+
+	/* Choose the vGPU which has pending workload. */
+	next_vgpu = get_vgpu_has_workload(head, sched_data);
+
+	return next_vgpu;
+}
+
+#define GVT_DEFAULT_TIME_SLICE 1000000
+
+static void tbs_sched_func(struct gvt_sched_data *sched_data)
+{
+	struct intel_gvt *gvt = sched_data->gvt;
+	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+	struct intel_vgpu *vgpu = NULL;
+
+	/* no active vgpu or has already had a target */
+	if (atomic_read(&gvt->num_vgpu_sched) <= ONLY_IDLE_VGPU_SCHED ||
+			scheduler->next_vgpu)
+		goto out;
+
+	/* determine which vGPU should choose as next */
+	vgpu = pickup_next_vgpu(sched_data);
 	if (vgpu) {
 		scheduler->next_vgpu = vgpu;
 		gvt_dbg_sched("pick next vgpu %d\n", vgpu->id);
-- 
2.7.4



More information about the intel-gvt-dev mailing list