[PATCH v5 8/8] drm/i915/gvt: let timeslice usage control vGPU scheduling

Ping Gao ping.a.gao at intel.com
Tue Mar 28 06:47:38 UTC 2017


The timeslice usage will determine vGPU whether has chance to
schedule or not at every vGPU switch checkpoint.

Signed-off-by: Ping Gao <ping.a.gao at intel.com>
---
 drivers/gpu/drm/i915/gvt/sched_policy.c | 48 ++++++++++++++++++++++++++-------
 1 file changed, 38 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index c7fd8b8..04f874b 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -190,7 +190,10 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
 		wake_up(&scheduler->waitq[i]);
 }
 
-static struct intel_vgpu *find_busy_vgpu(struct list_head *head,
+#define vgpu_has_ts_limit(vgpu_data) \
+	((vgpu_data)->allocated_ts < ms_to_ktime(GVT_TS_BALANCE_PERIOD_MS))
+
+static struct intel_vgpu *find_vgpu_timeslice_left(struct list_head *head,
 					struct gvt_sched_data *sched_data)
 {
 	struct vgpu_sched_data *vgpu_data;
@@ -206,8 +209,16 @@ static struct intel_vgpu *find_busy_vgpu(struct list_head *head,
 		if (!vgpu_has_pending_workload(vgpu_data->vgpu))
 			continue;
 
-		vgpu = vgpu_data->vgpu;
-		break;
+		/* Return the vGPU if there is no time slice limitation */
+		if (!vgpu_has_ts_limit(vgpu_data)) {
+			vgpu = vgpu_data->vgpu;
+			break;
+
+		/* Return the vGPU only if it has time slice left */
+		} else if (vgpu_data->left_ts > 0) {
+			vgpu = vgpu_data->vgpu;
+			break;
+		}
 	}
 
 	return vgpu;
@@ -243,6 +254,9 @@ static struct intel_vgpu *find_longest_unsched_vgpu(struct list_head *lru_vgpu_h
 	return NULL;
 }
 
+#define is_not_idle_vgpu(vgpu) \
+	((vgpu) && ((vgpu) != (vgpu)->gvt->idle_vgpu))
+
 static struct list_head *get_sched_head(struct gvt_sched_data *sched_data)
 {
 	struct intel_gvt *gvt = sched_data->gvt;
@@ -251,8 +265,12 @@ static struct list_head *get_sched_head(struct gvt_sched_data *sched_data)
 	struct list_head *head;
 
 	if (scheduler->current_vgpu) {
-		cur_vgpu_data = scheduler->current_vgpu->sched_data;
-		head = &cur_vgpu_data->list;
+		if (is_not_idle_vgpu(scheduler->current_vgpu)) {
+			cur_vgpu_data = scheduler->current_vgpu->sched_data;
+			head = &cur_vgpu_data->list;
+		} else {
+			head = &sched_data->runq_head;
+		}
 	} else {
 		struct vgpu_sched_data *lru_vgpu_data;
 
@@ -272,6 +290,7 @@ static struct list_head *get_sched_head(struct gvt_sched_data *sched_data)
 
 static struct intel_vgpu *pickup_next_vgpu(struct gvt_sched_data *sched_data)
 {
+	struct intel_gvt *gvt = sched_data->gvt;
 	struct vgpu_sched_data *next_vgpu_data;
 	struct intel_vgpu *next_vgpu;
 	struct list_head *head;
@@ -297,8 +316,14 @@ static struct intel_vgpu *pickup_next_vgpu(struct gvt_sched_data *sched_data)
 		list_del_init(&next_vgpu_data->list);
 		list_add(&next_vgpu_data->list, head);
 	} else {
-		/* Choose the vGPU which has pending workload */
-		next_vgpu = find_busy_vgpu(head, sched_data);
+		/* Choose the vGPU which has timeslice left */
+		next_vgpu = find_vgpu_timeslice_left(head, sched_data);
+
+		/* Switch to idle vGPU when all next candidates are
+		 * empty.
+		 */
+		if (!next_vgpu)
+			next_vgpu = gvt->idle_vgpu;
 	}
 
 	return next_vgpu;
@@ -329,9 +354,12 @@ static void tbs_sched_func(struct gvt_sched_data *sched_data)
 		gvt_dbg_sched("pick next vgpu %d\n", vgpu->id);
 
 		/* Move the last used vGPU to the tail of lru_list */
-		vgpu_data = vgpu->sched_data;
-		list_del_init(&vgpu_data->lru_list);
-		list_add_tail(&vgpu_data->lru_list, &sched_data->lru_vgpu_head);
+		if (vgpu != vgpu->gvt->idle_vgpu) {
+			vgpu_data = vgpu->sched_data;
+			list_del_init(&vgpu_data->lru_list);
+			list_add_tail(&vgpu_data->lru_list,
+					&sched_data->lru_vgpu_head);
+		}
 	}
 out:
 	if (scheduler->next_vgpu) {
-- 
2.7.4



More information about the intel-gvt-dev mailing list