[RFC v3 7/9] drm/i915/gvt: add basic function for weight control
Ping Gao
ping.a.gao at intel.com
Thu Mar 2 02:19:41 UTC 2017
This method tries to guarantee precision in second level, with the
adjustment conducted in every 100ms. At the end of each vGPU switch
calculate the sched time and subtract it from the time slice
allocated; the dedicate time slice for every 100ms together with
remaining timeslice, will be used to decide how much timeslice
allocated to this vGPU in the next 100ms slice, with the end goal
to guarantee weight ratio in second level.
Signed-off-by: Ping Gao <ping.a.gao at intel.com>
---
drivers/gpu/drm/i915/gvt/sched_policy.c | 80 ++++++++++++++++++++++++++++++++-
1 file changed, 79 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index e0311c8..39b1011 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -70,6 +70,80 @@ struct gvt_sched_data {
struct list_head lru_vgpu_head;
};
+/* Calc the sched_time during vGPU switch, subtract it
+ * from the time slice allocated correspondingly.
+ */
+static void stat_timeslice_usage(struct intel_vgpu *pre_vgpu)
+{
+ int64_t ts_delta;
+ struct vgpu_sched_data *vgpu_data = pre_vgpu->sched_data;
+
+ ts_delta = vgpu_data->sched_out_time - vgpu_data->sched_in_time;
+
+ vgpu_data->sched_time += ts_delta;
+ vgpu_data->ts_usage -= ts_delta;
+}
+
+#define GVT_TS_BALANCE_PERIOD_MS 100
+#define GVT_TS_BALANCE_PERIOD_CYC ((cycles_t)tsc_khz * GVT_TS_BALANCE_PERIOD_MS)
+#define is_idle_vgpu(vgpu) ((vgpu)->id == 0)
+
+/* This function executed every 100ms, to alloc time slice
+ * for next 100ms.
+ */
+static void gvt_timeslice_balance(struct gvt_sched_data *sched_data)
+{
+ struct vgpu_sched_data *vgpu_data;
+ struct list_head *pos;
+ static uint64_t stage_check;
+ int stage = stage_check++ % 10;
+
+ if (stage == 0) {
+ int total_weight = 0;
+ int64_t fair_timeslice;
+
+ /* Every vgpu should set valid weight at the same time */
+ list_for_each(pos, &sched_data->runq_head) {
+ vgpu_data = container_of(pos, struct vgpu_sched_data, list);
+
+ if (is_idle_vgpu(vgpu_data->vgpu))
+ continue;
+
+ if (vgpu_data->sched_ctl.weight == 0) {
+ total_weight = 0;
+ break;
+ }
+ total_weight += vgpu_data->sched_ctl.weight;
+ }
+
+ /* The timeslice accumulation will reset every second */
+ list_for_each(pos, &sched_data->runq_head) {
+ vgpu_data = container_of(pos, struct vgpu_sched_data, list);
+ if (total_weight)
+ fair_timeslice = GVT_TS_BALANCE_PERIOD_CYC *
+ vgpu_data->sched_ctl.weight /
+ total_weight;
+ else
+ fair_timeslice = GVT_TS_BALANCE_PERIOD_CYC;
+
+ vgpu_data->ts_alloc = fair_timeslice;
+ vgpu_data->ts_usage = vgpu_data->ts_alloc;
+
+ /* sched_in_time need reset every second also */
+ vgpu_data->sched_in_time = get_cycles();
+ }
+ } else {
+ list_for_each(pos, &sched_data->runq_head) {
+ vgpu_data = container_of(pos, struct vgpu_sched_data, list);
+
+ /* timeslice for next 100ms should add the left/debt
+ * slice of previous stages.
+ */
+ vgpu_data->ts_usage += vgpu_data->ts_alloc;
+ }
+ }
+}
+
static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
@@ -106,6 +180,7 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
if (scheduler->current_vgpu) {
vgpu_data = scheduler->current_vgpu->sched_data;
vgpu_data->sched_out_time = cur_cycles;
+ stat_timeslice_usage(scheduler->current_vgpu);
}
vgpu_data = scheduler->next_vgpu->sched_data;
vgpu_data->sched_in_time = cur_cycles;
@@ -152,7 +227,6 @@ static struct intel_vgpu *get_vgpu_has_workload(struct list_head *head,
/* 1.5 second */
#define VGPU_TDR_THRES_MS ((cycles_t)tsc_khz * 1500)
-#define is_idle_vgpu(vgpu) ((vgpu)->id == 0)
static struct intel_vgpu *find_longest_unsched_vgpu(struct list_head *lru_vgpu_head)
{
@@ -250,6 +324,10 @@ static void tbs_sched_func(struct gvt_sched_data *sched_data)
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct vgpu_sched_data *vgpu_data;
struct intel_vgpu *vgpu = NULL;
+ static uint64_t timer_check;
+
+ if (!(timer_check++ % GVT_TS_BALANCE_PERIOD_MS))
+ gvt_timeslice_balance(sched_data);
/* no active vgpu or has already had a target */
if (atomic_read(&gvt->num_vgpu_sched) <= ONLY_IDLE_VGPU_SCHED ||
--
2.7.4
More information about the intel-gvt-dev
mailing list