[PATCH v4 4/9] drm/i915/gvt: add some statistic routine for scheduler
Tian, Kevin
kevin.tian at intel.com
Wed Mar 15 02:59:06 UTC 2017
> From: Ping Gao
> Sent: Wednesday, March 8, 2017 2:25 PM
>
> Add some statistic routine to collect the time when vGPU schedule in/out and
when vGPU 'is scheduled'
> the time of the last ctx submission.
>
> Rename some schedule structure.
>
> Signed-off-by: Ping Gao <ping.a.gao at intel.com>
> ---
> drivers/gpu/drm/i915/gvt/gvt.h | 5 +++
> drivers/gpu/drm/i915/gvt/handlers.c | 1 +
> drivers/gpu/drm/i915/gvt/sched_policy.c | 70 +++++++++++++++++++++------
> ------
> 3 files changed, 50 insertions(+), 26 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
> index f3f5678..563ed22 100644
> --- a/drivers/gpu/drm/i915/gvt/gvt.h
> +++ b/drivers/gpu/drm/i915/gvt/gvt.h
> @@ -138,6 +138,10 @@ struct intel_vgpu_display {
> struct intel_vgpu_sbi sbi;
> };
>
> +struct intel_sched_ctl {
vgpu_sched_ctl
> + int weight;
> +};
> +
> struct intel_vgpu {
> struct intel_gvt *gvt;
> int id;
> @@ -160,6 +164,7 @@ struct intel_vgpu {
> struct list_head workload_q_head[I915_NUM_ENGINES];
> struct kmem_cache *workloads;
> atomic_t running_workload_num;
> + cycles_t last_ctx_submit_time;
> DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
> struct i915_gem_context *shadow_ctx;
> struct notifier_block shadow_ctx_notifier_block; diff --git
> a/drivers/gpu/drm/i915/gvt/handlers.c
> b/drivers/gpu/drm/i915/gvt/handlers.c
> index f6a7b8e..c24c00c 100644
> --- a/drivers/gpu/drm/i915/gvt/handlers.c
> +++ b/drivers/gpu/drm/i915/gvt/handlers.c
> @@ -1409,6 +1409,7 @@ static int elsp_mmio_write(struct intel_vgpu
> *vgpu, unsigned int offset,
>
> execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data;
> if (execlist->elsp_dwords.index == 3) {
> + vgpu->last_ctx_submit_time = get_cycles();
> ret = intel_vgpu_submit_execlist(vgpu, ring_id);
> if(ret)
> gvt_err("fail submit workload on ring %d\n", ring_id);
> diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c
> b/drivers/gpu/drm/i915/gvt/sched_policy.c
> index 476470c..8c1eb53 100644
> --- a/drivers/gpu/drm/i915/gvt/sched_policy.c
> +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
> @@ -47,11 +47,34 @@ static bool vgpu_has_pending_workload(struct
> intel_vgpu *vgpu)
> return false;
> }
>
> +struct vgpu_sched_data {
> + struct list_head list;
> + struct intel_vgpu *vgpu;
> +
> + /* per-vgpu sched stats */
obvious comment from this structure. Could just remove. :-)
> + int64_t sched_in_time;
> + int64_t sched_out_time;
> + int64_t sched_time;
> + int64_t ts_usage;
used_ts
> + int64_t ts_alloc;
allocated_ts
to align with other fields, is 'time' better than 'ts' here or
'ts' does for different measurement?
> +
> + struct intel_sched_ctl sched_ctl;
> +};
> +
> +struct gvt_sched_data {
> + struct intel_gvt *gvt;
> + struct hrtimer timer;
> + unsigned long period;
> + struct list_head runq_head;
> +};
> +
> static void try_to_schedule_next_vgpu(struct intel_gvt *gvt) {
> struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
> enum intel_engine_id i;
> struct intel_engine_cs *engine;
> + struct vgpu_sched_data *vgpu_data;
> + cycles_t cur_cycles;
>
> /* no target to schedule */
> if (!scheduler->next_vgpu)
> @@ -77,6 +100,14 @@ static void try_to_schedule_next_vgpu(struct
> intel_gvt *gvt)
> gvt_dbg_sched("switch to next vgpu %d\n",
> scheduler->next_vgpu->id);
>
> + cur_cycles = get_cycles();
> + if (scheduler->current_vgpu) {
can this condition ever be false? Since you've introduced an idle vGPU
here, I think current_vgpu would always be true in this manner. Then
you may introduce local variables (prev, next) to activate below logic
always.
> + vgpu_data = scheduler->current_vgpu->sched_data;
> + vgpu_data->sched_out_time = cur_cycles;
> + }
> + vgpu_data = scheduler->next_vgpu->sched_data;
> + vgpu_data->sched_in_time = cur_cycles;
> +
> /* switch current vgpu */
> scheduler->current_vgpu = scheduler->next_vgpu;
> scheduler->next_vgpu = NULL;
> @@ -88,19 +119,6 @@ static void try_to_schedule_next_vgpu(struct
> intel_gvt *gvt)
> wake_up(&scheduler->waitq[i]);
> }
>
> -struct tbs_vgpu_data {
> - struct list_head list;
> - struct intel_vgpu *vgpu;
> - /* put some per-vgpu sched stats here */
> -};
> -
> -struct tbs_sched_data {
> - struct intel_gvt *gvt;
> - struct hrtimer timer;
> - unsigned long period;
> - struct list_head runq_head;
> -};
> -
> enum {
> NON_VGPU_SCHED = 0,
> ONLY_IDLE_VGPU_SCHED,
> @@ -109,9 +127,9 @@ enum {
>
> #define GVT_DEFAULT_TIME_SLICE 1000000
>
> -static void tbs_sched_func(struct tbs_sched_data *sched_data)
> +static void tbs_sched_func(struct gvt_sched_data *sched_data)
> {
> - struct tbs_vgpu_data *vgpu_data;
> + struct vgpu_sched_data *vgpu_data;
>
> struct intel_gvt *gvt = sched_data->gvt;
> struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
> @@ -136,7 +154,7 @@ static void tbs_sched_func(struct tbs_sched_data
> *sched_data)
> if (pos == &sched_data->runq_head)
> continue;
>
> - vgpu_data = container_of(pos, struct tbs_vgpu_data, list);
> + vgpu_data = container_of(pos, struct vgpu_sched_data, list);
> if (!vgpu_has_pending_workload(vgpu_data->vgpu))
> continue;
>
> @@ -158,7 +176,7 @@ static void tbs_sched_func(struct tbs_sched_data
> *sched_data)
>
> void intel_gvt_schedule(struct intel_gvt *gvt) {
> - struct tbs_sched_data *sched_data = gvt->scheduler.sched_data;
> + struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
>
> mutex_lock(&gvt->lock);
> tbs_sched_func(sched_data);
> @@ -167,9 +185,9 @@ void intel_gvt_schedule(struct intel_gvt *gvt)
>
> static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data) {
> - struct tbs_sched_data *data;
> + struct gvt_sched_data *data;
>
> - data = container_of(timer_data, struct tbs_sched_data, timer);
> + data = container_of(timer_data, struct gvt_sched_data, timer);
>
> intel_gvt_request_service(data->gvt, INTEL_GVT_REQUEST_SCHED);
>
> @@ -183,7 +201,7 @@ static int tbs_sched_init(struct intel_gvt *gvt)
> struct intel_gvt_workload_scheduler *scheduler =
> &gvt->scheduler;
>
> - struct tbs_sched_data *data;
> + struct gvt_sched_data *data;
>
> data = kzalloc(sizeof(*data), GFP_KERNEL);
> if (!data)
> @@ -204,7 +222,7 @@ static void tbs_sched_clean(struct intel_gvt *gvt) {
> struct intel_gvt_workload_scheduler *scheduler =
> &gvt->scheduler;
> - struct tbs_sched_data *data = scheduler->sched_data;
> + struct gvt_sched_data *data = scheduler->sched_data;
>
> hrtimer_cancel(&data->timer);
>
> @@ -214,7 +232,7 @@ static void tbs_sched_clean(struct intel_gvt *gvt)
>
> static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu) {
> - struct tbs_vgpu_data *data;
> + struct vgpu_sched_data *data;
>
> data = kzalloc(sizeof(*data), GFP_KERNEL);
> if (!data)
> @@ -235,8 +253,8 @@ static void tbs_sched_clean_vgpu(struct intel_vgpu
> *vgpu)
>
> static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) {
> - struct tbs_sched_data *sched_data = vgpu->gvt-
> >scheduler.sched_data;
> - struct tbs_vgpu_data *vgpu_data = vgpu->sched_data;
> + struct gvt_sched_data *sched_data = vgpu->gvt-
> >scheduler.sched_data;
> + struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
>
> if (!list_empty(&vgpu_data->list))
> return;
> @@ -251,8 +269,8 @@ static void tbs_sched_start_schedule(struct
> intel_vgpu *vgpu)
>
> static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu) {
> - struct tbs_sched_data *sched_data = vgpu->gvt-
> >scheduler.sched_data;
> - struct tbs_vgpu_data *vgpu_data = vgpu->sched_data;
> + struct gvt_sched_data *sched_data = vgpu->gvt-
> >scheduler.sched_data;
> + struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
>
> if (list_empty(&vgpu_data->list))
> return;
> --
> 2.7.4
>
> _______________________________________________
> intel-gvt-dev mailing list
> intel-gvt-dev at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gvt-dev
More information about the intel-gvt-dev
mailing list