[PATCH v4 5/9] drm/i915/gvt: factor out the scheduler

Tian, Kevin kevin.tian at intel.com
Wed Mar 15 03:10:27 UTC 2017


> From: Ping Gao
> Sent: Wednesday, March 8, 2017 2:25 PM
> 
> Factor out the scheduler to a more clear structure, the basic logic is to find
> out next vGPU first and then schedule it. When decide to pick up who is the

when deciding...

> next vGPU it has two sequence parts: first is to find out a sched head who

"it includes two steps"

btw 'sched head' is just a structure. What is sched head used for is what you
want to explain here. Is 'sched head' corresponding to vGPU? If yes, then
use vGPU is more clear.

> has urgent requirement as it's near TDR because of out-of-service for a long
> time, second is to choose the vgpu who has pending workload follow round-
> robin style.

Looks the policy is to treat vGPUs which hasn't been scheduled for a long
time (based on defined threshold) with higher priority, and then handle
normal vGPUs in normal priority in round-robin policy...

If true, possibly the implementation would be cleaner if introducing some
simple priority concept here, which then can be easily extended to support
prioritized rendering in the future. :-)

> 
> Signed-off-by: Ping Gao <ping.a.gao at intel.com>
> ---
>  drivers/gpu/drm/i915/gvt/sched_policy.c | 78 ++++++++++++++++++++++++-
> --------
>  1 file changed, 58 insertions(+), 20 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c
> b/drivers/gpu/drm/i915/gvt/sched_policy.c
> index 8c1eb53..e8a9db1 100644
> --- a/drivers/gpu/drm/i915/gvt/sched_policy.c
> +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
> @@ -125,29 +125,12 @@ enum {
>  	HAS_ACTIVE_VGPU_SCHED,
>  };
> 
> -#define GVT_DEFAULT_TIME_SLICE 1000000
> -
> -static void tbs_sched_func(struct gvt_sched_data *sched_data)
> +static struct intel_vgpu *get_vgpu_has_workload(struct list_head *head,
> +					struct gvt_sched_data *sched_data)

find_busy_vgpu?

>  {
>  	struct vgpu_sched_data *vgpu_data;
> -
> -	struct intel_gvt *gvt = sched_data->gvt;
> -	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
> -
>  	struct intel_vgpu *vgpu = NULL;
> -	struct list_head *pos, *head;
> -
> -	/* no vgpu or has already had a target */
> -	if (atomic_read(&gvt->num_vgpu_sched) <=
> ONLY_IDLE_VGPU_SCHED ||
> -			scheduler->next_vgpu)
> -		goto out;
> -
> -	if (scheduler->current_vgpu) {
> -		vgpu_data = scheduler->current_vgpu->sched_data;
> -		head = &vgpu_data->list;
> -	} else {
> -		head = &sched_data->runq_head;
> -	}
> +	struct list_head *pos;
> 
>  	/* search a vgpu with pending workload */
>  	list_for_each(pos, head) {
> @@ -162,6 +145,61 @@ static void tbs_sched_func(struct gvt_sched_data
> *sched_data)
>  		break;
>  	}
> 
> +	return vgpu;
> +}
> +
> +static struct list_head *get_sched_head(struct gvt_sched_data
> +*sched_data) {
> +	struct intel_gvt *gvt = sched_data->gvt;
> +	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
> +	struct vgpu_sched_data *cur_vgpu_data;
> +	struct list_head *head;
> +
> +	if (scheduler->current_vgpu) {
> +		cur_vgpu_data = scheduler->current_vgpu->sched_data;
> +		head = &cur_vgpu_data->list;
> +	} else {
> +		gvt_dbg_sched("no current vgpu search from q head\n");
> +		head = &sched_data->runq_head;
> +	}

still don't understand why there's a case where current_vgpu is NULL...

> +
> +	return head;
> +}
> +
> +static struct intel_vgpu *pickup_next_vgpu(struct gvt_sched_data
> +*sched_data) {
> +	struct intel_vgpu *next_vgpu = NULL;
> +	struct list_head *head = NULL;
> +
> +	/* The scheduler is follow round-robin style, sched

is following or follows

> +	 * head means where start to choose next vGPU, it's
> +	 * help to decide which vGPU is first one in the
> +	 * round-robin queue at this schedule check point,
> +	 * that's important to keep fairness.

need a higher level explanation how many queues we
have and what's the purpose of each? I think sched_head
here implies an individual queue...

> +	 */
> +	head = get_sched_head(sched_data);
> +
> +	/* Choose the vGPU which has pending workload. */
> +	next_vgpu = get_vgpu_has_workload(head, sched_data);
> +
> +	return next_vgpu;
> +}
> +
> +#define GVT_DEFAULT_TIME_SLICE 1000000
> +
> +static void tbs_sched_func(struct gvt_sched_data *sched_data) {
> +	struct intel_gvt *gvt = sched_data->gvt;
> +	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
> +	struct intel_vgpu *vgpu = NULL;
> +
> +	/* no active vgpu or has already had a target */
> +	if (atomic_read(&gvt->num_vgpu_sched) <=
> ONLY_IDLE_VGPU_SCHED ||
> +			scheduler->next_vgpu)
> +		goto out;
> +
> +	/* determine which vGPU should choose as next */
> +	vgpu = pickup_next_vgpu(sched_data);
>  	if (vgpu) {
>  		scheduler->next_vgpu = vgpu;
>  		gvt_dbg_sched("pick next vgpu %d\n", vgpu->id);
> --
> 2.7.4
> 
> _______________________________________________
> intel-gvt-dev mailing list
> intel-gvt-dev at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gvt-dev


More information about the intel-gvt-dev mailing list