[RFC v3 3/9] drm/i915/gvt: insert the idle vGPU into scheduler

Ping Gao ping.a.gao at intel.com
Thu Mar 2 02:19:37 UTC 2017


Let the idle vGPU take part in scheduling, it has no real HW effect
as it's workload queue is always empty.

Signed-off-by: Ping Gao <ping.a.gao at intel.com>
---
 drivers/gpu/drm/i915/gvt/gvt.c          |  1 +
 drivers/gpu/drm/i915/gvt/sched_policy.c | 18 ++++++++++--------
 2 files changed, 11 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 996467d..c66dc8e 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -301,6 +301,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
 		goto out_clean_types;
 	}
 	gvt->idle_vgpu = vgpu;
+	intel_vgpu_start_schedule(vgpu);
 
 	gvt_dbg_core("gvt device initialization is done\n");
 	dev_priv->gvt = gvt;
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 174f739..476470c 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -101,6 +101,12 @@ struct tbs_sched_data {
 	struct list_head runq_head;
 };
 
+enum {
+	NON_VGPU_SCHED = 0,
+	ONLY_IDLE_VGPU_SCHED,
+	HAS_ACTIVE_VGPU_SCHED,
+};
+
 #define GVT_DEFAULT_TIME_SLICE 1000000
 
 static void tbs_sched_func(struct tbs_sched_data *sched_data)
@@ -114,7 +120,8 @@ static void tbs_sched_func(struct tbs_sched_data *sched_data)
 	struct list_head *pos, *head;
 
 	/* no vgpu or has already had a target */
-	if (list_empty(&sched_data->runq_head) || scheduler->next_vgpu)
+	if (atomic_read(&gvt->num_vgpu_sched) <= ONLY_IDLE_VGPU_SCHED ||
+			scheduler->next_vgpu)
 		goto out;
 
 	if (scheduler->current_vgpu) {
@@ -205,11 +212,6 @@ static void tbs_sched_clean(struct intel_gvt *gvt)
 	scheduler->sched_data = NULL;
 }
 
-enum {
-	NON_VGPU_SCHED = 0,
-	HAS_VGPU_SCHED,
-};
-
 static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
 {
 	struct tbs_vgpu_data *data;
@@ -242,7 +244,7 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
 	list_add_tail(&vgpu_data->list, &sched_data->runq_head);
 
 	atomic_inc(&vgpu->gvt->num_vgpu_sched);
-	if (atomic_read(&vgpu->gvt->num_vgpu_sched) == HAS_VGPU_SCHED)
+	if (atomic_read(&vgpu->gvt->num_vgpu_sched) == HAS_ACTIVE_VGPU_SCHED)
 		hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(),
 			sched_data->period), HRTIMER_MODE_ABS);
 }
@@ -258,7 +260,7 @@ static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
 	list_del_init(&vgpu_data->list);
 
 	atomic_dec(&vgpu->gvt->num_vgpu_sched);
-	if (atomic_read(&vgpu->gvt->num_vgpu_sched) == NON_VGPU_SCHED)
+	if (atomic_read(&vgpu->gvt->num_vgpu_sched) == ONLY_IDLE_VGPU_SCHED)
 		hrtimer_cancel(&sched_data->timer);
 }
 
-- 
2.7.4



More information about the intel-gvt-dev mailing list