[PATCH v5 8/8] drm/i915/gvt: let timeslice usage control vGPU scheduling
Tian, Kevin
kevin.tian at intel.com
Wed Mar 29 03:37:35 UTC 2017
> From: Ping Gao
> Sent: Tuesday, March 28, 2017 2:48 PM
>
> The timeslice usage will determine vGPU whether has chance to schedule or
> not at every vGPU switch checkpoint.
"The timeslice usage determines whether a vGPU still has chance to be
scheduled. Move to take this new scheme in scheduling policy."
>
> Signed-off-by: Ping Gao <ping.a.gao at intel.com>
> ---
> drivers/gpu/drm/i915/gvt/sched_policy.c | 48
> ++++++++++++++++++++++++++-------
> 1 file changed, 38 insertions(+), 10 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c
> b/drivers/gpu/drm/i915/gvt/sched_policy.c
> index c7fd8b8..04f874b 100644
> --- a/drivers/gpu/drm/i915/gvt/sched_policy.c
> +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
> @@ -190,7 +190,10 @@ static void try_to_schedule_next_vgpu(struct
> intel_gvt *gvt)
> wake_up(&scheduler->waitq[i]);
> }
>
> -static struct intel_vgpu *find_busy_vgpu(struct list_head *head,
> +#define vgpu_has_ts_limit(vgpu_data) \
> + ((vgpu_data)->allocated_ts <
> ms_to_ktime(GVT_TS_BALANCE_PERIOD_MS))
> +
I don't quite understand the meaning of this macro. either add
some comment or change to a better name.
From my understanding, looks you want to catch a vGPU which
is not scheduled in previous timeslice (in that cast the accumulated
slice would exceed 100ms, and then you want some special
handling?)
> +static struct intel_vgpu *find_vgpu_timeslice_left(struct list_head
> +*head,
> struct gvt_sched_data *sched_data) {
> struct vgpu_sched_data *vgpu_data;
> @@ -206,8 +209,16 @@ static struct intel_vgpu *find_busy_vgpu(struct
> list_head *head,
> if (!vgpu_has_pending_workload(vgpu_data->vgpu))
> continue;
>
> - vgpu = vgpu_data->vgpu;
> - break;
> + /* Return the vGPU if there is no time slice limitation */
> + if (!vgpu_has_ts_limit(vgpu_data)) {
> + vgpu = vgpu_data->vgpu;
> + break;
> +
> + /* Return the vGPU only if it has time slice left */
> + } else if (vgpu_data->left_ts > 0) {
> + vgpu = vgpu_data->vgpu;
> + break;
> + }
A bit confused here. Isn't it that you can always use left_ts to judge
regardless of whether allocated_ts exceeds limitation? Suppose
left_ts should be always >0 when allocated_ts>100ms...
> }
>
> return vgpu;
> @@ -243,6 +254,9 @@ static struct intel_vgpu
> *find_longest_unsched_vgpu(struct list_head *lru_vgpu_h
> return NULL;
> }
>
> +#define is_not_idle_vgpu(vgpu) \
> + ((vgpu) && ((vgpu) != (vgpu)->gvt->idle_vgpu))
I would change to the other way:
#define is_idle_vgpu(vgpu)
((vgpu) && ((vgpu) == (vgpu)->gvt->idle_vgpu))
Then you can use !is_idle_vgpu(vgpu) for original purpose. :-)
> +
> static struct list_head *get_sched_head(struct gvt_sched_data *sched_data)
> {
> struct intel_gvt *gvt = sched_data->gvt; @@ -251,8 +265,12 @@
> static struct list_head *get_sched_head(struct gvt_sched_data *sched_data)
> struct list_head *head;
>
> if (scheduler->current_vgpu) {
> - cur_vgpu_data = scheduler->current_vgpu->sched_data;
> - head = &cur_vgpu_data->list;
> + if (is_not_idle_vgpu(scheduler->current_vgpu)) {
> + cur_vgpu_data = scheduler->current_vgpu-
> >sched_data;
> + head = &cur_vgpu_data->list;
> + } else {
> + head = &sched_data->runq_head;
> + }
> } else {
> struct vgpu_sched_data *lru_vgpu_data;
>
> @@ -272,6 +290,7 @@ static struct list_head *get_sched_head(struct
> gvt_sched_data *sched_data)
>
> static struct intel_vgpu *pickup_next_vgpu(struct gvt_sched_data
> *sched_data) {
> + struct intel_gvt *gvt = sched_data->gvt;
> struct vgpu_sched_data *next_vgpu_data;
> struct intel_vgpu *next_vgpu;
> struct list_head *head;
> @@ -297,8 +316,14 @@ static struct intel_vgpu *pickup_next_vgpu(struct
> gvt_sched_data *sched_data)
> list_del_init(&next_vgpu_data->list);
> list_add(&next_vgpu_data->list, head);
> } else {
> - /* Choose the vGPU which has pending workload */
> - next_vgpu = find_busy_vgpu(head, sched_data);
> + /* Choose the vGPU which has timeslice left */
> + next_vgpu = find_vgpu_timeslice_left(head, sched_data);
find_eligible_vgpu?
> +
> + /* Switch to idle vGPU when all next candidates are
> + * empty.
> + */
> + if (!next_vgpu)
> + next_vgpu = gvt->idle_vgpu;
> }
>
> return next_vgpu;
> @@ -329,9 +354,12 @@ static void tbs_sched_func(struct gvt_sched_data
> *sched_data)
> gvt_dbg_sched("pick next vgpu %d\n", vgpu->id);
>
> /* Move the last used vGPU to the tail of lru_list */
> - vgpu_data = vgpu->sched_data;
> - list_del_init(&vgpu_data->lru_list);
> - list_add_tail(&vgpu_data->lru_list, &sched_data-
> >lru_vgpu_head);
> + if (vgpu != vgpu->gvt->idle_vgpu) {
> + vgpu_data = vgpu->sched_data;
> + list_del_init(&vgpu_data->lru_list);
> + list_add_tail(&vgpu_data->lru_list,
> + &sched_data->lru_vgpu_head);
> + }
> }
> out:
> if (scheduler->next_vgpu) {
> --
> 2.7.4
>
> _______________________________________________
> intel-gvt-dev mailing list
> intel-gvt-dev at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gvt-dev
More information about the intel-gvt-dev
mailing list