[PATCH 2/5] drm/i915/gvt: add basic funtion for fairness control
Ping Gao
ping.a.gao at intel.com
Thu Jan 26 01:19:15 UTC 2017
This method tries to guarantee precision in second level, with the
adjustment conducted in every 100ms. At the end of each ctx switch
calculate the sched time and subtract it from the time slice
allocated; the dedicate time slice for every 100ms together with
remaining timeslice, will be used to decide how much timeslice
allocated to this vGPU in the next 100ms slice, with the end goal
to guarantee fairness in second level.
Signed-off-by: Ping Gao <ping.a.gao at intel.com>
---
drivers/gpu/drm/i915/gvt/sched_policy.c | 76 +++++++++++++++++++++++++++++++++
1 file changed, 76 insertions(+)
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 03dc1f3..71c7913 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -67,6 +67,67 @@ struct tbs_sched_data {
struct list_head runq_head;
};
+static void gvt_timeslice_usage(struct intel_vgpu *pre_vgpu)
+{
+ int64_t ts_delta;
+ struct tbs_vgpu_data *vgpu_data = pre_vgpu->sched_data;
+
+ ts_delta = vgpu_data->sched_out_time - vgpu_data->sched_in_time;
+
+ vgpu_data->sched_time += ts_delta;
+ vgpu_data->ts_usage -= ts_delta / cpu_khz;
+}
+
+#define GVT_TS_BALANCE_PERIOD 100
+
+static void gvt_timeslice_balance(struct tbs_sched_data *sched_data)
+{
+ struct tbs_vgpu_data *vgpu_data;
+ struct list_head *pos;
+ static uint64_t stage_check;
+ int stage = stage_check++ % 10;
+
+ if (stage == 0) {
+ int total_weight = 0;
+ int fair_timeslice;
+
+ /* Every vgpu should set valid weight at the same time */
+ list_for_each(pos, &sched_data->runq_head) {
+ vgpu_data = container_of(pos, struct tbs_vgpu_data, list);
+ if (vgpu_data->sched_ctl.weight == 0) {
+ total_weight = 0;
+ break;
+ }
+ total_weight += vgpu_data->sched_ctl.weight;
+ }
+
+ /* The timeslice accumulation will reset at every one second */
+ list_for_each(pos, &sched_data->runq_head) {
+ vgpu_data = container_of(pos, struct tbs_vgpu_data, list);
+ if (total_weight)
+ fair_timeslice = GVT_TS_BALANCE_PERIOD *
+ vgpu_data->sched_ctl.weight /
+ total_weight;
+ else
+ fair_timeslice = GVT_TS_BALANCE_PERIOD;
+
+ vgpu_data->ts_alloc = fair_timeslice;
+ vgpu_data->ts_usage = vgpu_data->ts_alloc;
+ /* sched_in_time need reset also */
+ vgpu_data->sched_in_time = get_cycles();
+ }
+ } else {
+ list_for_each(pos, &sched_data->runq_head) {
+ vgpu_data = container_of(pos, struct tbs_vgpu_data, list);
+
+ /* timeslice for next 100ms should add the left/debt
+ * slice of previous stages.
+ */
+ vgpu_data->ts_usage += vgpu_data->ts_alloc;
+ }
+ }
+}
+
static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
@@ -103,6 +164,7 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
if (scheduler->current_vgpu) {
vgpu_data = scheduler->current_vgpu->sched_data;
vgpu_data->sched_out_time = cur_cycles;
+ gvt_timeslice_usage(scheduler->current_vgpu);
}
vgpu_data = scheduler->next_vgpu->sched_data;
vgpu_data->sched_in_time = cur_cycles;
@@ -118,6 +180,15 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
wake_up(&scheduler->waitq[i]);
}
+static inline int gvt_nr_in_runq(struct tbs_sched_data *sched_data)
+{
+ int count = 0;
+ struct list_head *pos;
+ list_for_each(pos, &sched_data->runq_head)
+ count++;
+ return count;
+}
+
#define GVT_DEFAULT_TIME_SLICE (1 * HZ / 1000)
static void tbs_sched_func(struct work_struct *work)
@@ -131,9 +202,14 @@ static void tbs_sched_func(struct work_struct *work)
struct intel_vgpu *vgpu = NULL;
struct list_head *pos, *head;
+ static uint64_t timer_check;
mutex_lock(&gvt->lock);
+ if (gvt_nr_in_runq(sched_data) > 1 &&
+ !(timer_check++ % GVT_TS_BALANCE_PERIOD))
+ gvt_timeslice_balance(sched_data);
+
/* no vgpu or has already had a target */
if (list_empty(&sched_data->runq_head) || scheduler->next_vgpu)
goto out;
--
2.7.4
More information about the intel-gvt-dev
mailing list