[PATCH]drm/i915/gvt: use hrtimer replace delayed_work in scheduler
Ping Gao
ping.a.gao at intel.com
Mon Feb 6 06:19:15 UTC 2017
Currently the scheduler is triggered by delayed_work, so the time slices
are varied and cannot be precise at microsecond level. For performance
control in the future, this patch use hrtimer instead.
Signed-off-by: Ping Gao <ping.a.gao at intel.com>
---
drivers/gpu/drm/i915/gvt/gvt.c | 5 +++
drivers/gpu/drm/i915/gvt/gvt.h | 1 +
drivers/gpu/drm/i915/gvt/sched_policy.c | 63 ++++++++++++++++++++++++---------
drivers/gpu/drm/i915/gvt/sched_policy.h | 2 ++
4 files changed, 55 insertions(+), 16 deletions(-)
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 9a636a2..79102c9 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -150,6 +150,11 @@ static int gvt_service_thread(void *data)
intel_gvt_emulate_vblank(gvt);
mutex_unlock(&gvt->lock);
}
+
+ if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED,
+ (void *)&gvt->service_request)) {
+ intel_gvt_schedule(gvt);
+ }
}
return 0;
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index e227caf..2031df7 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -247,6 +247,7 @@ static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915)
enum {
INTEL_GVT_REQUEST_EMULATE_VBLANK = 0,
+ INTEL_GVT_REQUEST_SCHED = 1,
};
static inline void intel_gvt_request_service(struct intel_gvt *gvt,
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 678b0be..66fef39 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -96,17 +96,26 @@ struct tbs_vgpu_data {
struct tbs_sched_data {
struct intel_gvt *gvt;
- struct delayed_work work;
+ struct hrtimer timer;
unsigned long period;
struct list_head runq_head;
};
-#define GVT_DEFAULT_TIME_SLICE (1 * HZ / 1000)
+static inline int vgpu_nr_in_runq(struct list_head *runq_head)
+{
+ int count = 0;
+ struct list_head *pos;
+
+ list_for_each(pos, runq_head)
+ count++;
+
+ return count;
+}
-static void tbs_sched_func(struct work_struct *work)
+#define GVT_DEFAULT_TIME_SLICE 1000000
+
+static void tbs_sched_func(struct tbs_sched_data *sched_data)
{
- struct tbs_sched_data *sched_data = container_of(work,
- struct tbs_sched_data, work.work);
struct tbs_vgpu_data *vgpu_data;
struct intel_gvt *gvt = sched_data->gvt;
@@ -115,8 +124,6 @@ static void tbs_sched_func(struct work_struct *work)
struct intel_vgpu *vgpu = NULL;
struct list_head *pos, *head;
- mutex_lock(&gvt->lock);
-
/* no vgpu or has already had a target */
if (list_empty(&sched_data->runq_head) || scheduler->next_vgpu)
goto out;
@@ -152,17 +159,30 @@ static void tbs_sched_func(struct work_struct *work)
scheduler->next_vgpu->id);
try_to_schedule_next_vgpu(gvt);
}
+}
- /*
- * still have vgpu on runq
- * or last schedule haven't finished due to running workload
- */
- if (!list_empty(&sched_data->runq_head) || scheduler->next_vgpu)
- schedule_delayed_work(&sched_data->work, sched_data->period);
+void intel_gvt_schedule(struct intel_gvt *gvt)
+{
+ struct tbs_sched_data *sched_data = gvt->scheduler.sched_data;
+ mutex_lock(&gvt->lock);
+ tbs_sched_func(sched_data);
mutex_unlock(&gvt->lock);
}
+static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data)
+{
+ struct tbs_sched_data *data;
+
+ data = container_of(timer_data, struct tbs_sched_data, timer);
+
+ intel_gvt_request_service(data->gvt, INTEL_GVT_REQUEST_SCHED);
+
+ hrtimer_add_expires_ns(&data->timer, data->period);
+
+ return HRTIMER_RESTART;
+}
+
static int tbs_sched_init(struct intel_gvt *gvt)
{
struct intel_gvt_workload_scheduler *scheduler =
@@ -175,11 +195,13 @@ static int tbs_sched_init(struct intel_gvt *gvt)
return -ENOMEM;
INIT_LIST_HEAD(&data->runq_head);
- INIT_DELAYED_WORK(&data->work, tbs_sched_func);
+ hrtimer_init(&data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ data->timer.function = tbs_timer_fn;
data->period = GVT_DEFAULT_TIME_SLICE;
data->gvt = gvt;
scheduler->sched_data = data;
+
return 0;
}
@@ -189,7 +211,8 @@ static void tbs_sched_clean(struct intel_gvt *gvt)
&gvt->scheduler;
struct tbs_sched_data *data = scheduler->sched_data;
- cancel_delayed_work(&data->work);
+ hrtimer_cancel(&data->timer);
+
kfree(data);
scheduler->sched_data = NULL;
}
@@ -224,14 +247,22 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
return;
list_add_tail(&vgpu_data->list, &sched_data->runq_head);
- schedule_delayed_work(&sched_data->work, sched_data->period);
+
+ if (vgpu_nr_in_runq(&sched_data->runq_head) == 1)
+ hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(),
+ sched_data->period), HRTIMER_MODE_ABS);
}
static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
{
+ struct tbs_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
struct tbs_vgpu_data *vgpu_data = vgpu->sched_data;
list_del_init(&vgpu_data->list);
+
+ if (list_empty(&sched_data->runq_head)) {
+ hrtimer_cancel(&sched_data->timer);
+ }
}
static struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.h b/drivers/gpu/drm/i915/gvt/sched_policy.h
index bb8b909..ba00a5f 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.h
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.h
@@ -43,6 +43,8 @@ struct intel_gvt_sched_policy_ops {
void (*stop_schedule)(struct intel_vgpu *vgpu);
};
+void intel_gvt_schedule(struct intel_gvt *gvt);
+
int intel_gvt_init_sched_policy(struct intel_gvt *gvt);
void intel_gvt_clean_sched_policy(struct intel_gvt *gvt);
--
2.7.4
More information about the intel-gvt-dev
mailing list