[PATCH v4 4/9] drm/i915/gvt: add some statistic routine for scheduler
Gao, Ping A
ping.a.gao at intel.com
Thu Mar 9 01:07:12 UTC 2017
On 2017/3/8 18:07, Zhenyu Wang wrote:
> On 2017.03.08 14:24:31 +0800, Ping Gao wrote:
>> Add some statistic routine to collect the time when vGPU
>> schedule in/out and the time of the last ctx submission.
>>
>> Rename some schedule structure.
>>
>> Signed-off-by: Ping Gao <ping.a.gao at intel.com>
>> ---
> Why must use cycles_t based counting? Looks the time precision for
> vgpu scheduling is not that high, e.g 100ms. ktime based solution
> is not enough?
Yeh, ms is precision enough, but to avoid division in many place use
cycles_t directly.
>>
>> +struct vgpu_sched_data {
>> + struct list_head list;
>> + struct intel_vgpu *vgpu;
>> +
>> + /* per-vgpu sched stats */
>> + int64_t sched_in_time;
>> + int64_t sched_out_time;
>> + int64_t sched_time;
>> + int64_t ts_usage;
>> + int64_t ts_alloc;
>> +
> why int64? To handle cycles count overflow?
ts_usage can be negative as we cannot stop the workload immediately when
the time slice run out, others also take as signed number as they need
to calculate with this unsigned number together.
>
>> + struct intel_sched_ctl sched_ctl;
>> +};
>> +
>> +struct gvt_sched_data {
>> + struct intel_gvt *gvt;
>> + struct hrtimer timer;
>> + unsigned long period;
>> + struct list_head runq_head;
>> +};
>> +
>> static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
>> {
>> struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
>> enum intel_engine_id i;
>> struct intel_engine_cs *engine;
>> + struct vgpu_sched_data *vgpu_data;
>> + cycles_t cur_cycles;
>>
>> /* no target to schedule */
>> if (!scheduler->next_vgpu)
>> @@ -77,6 +100,14 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
>> gvt_dbg_sched("switch to next vgpu %d\n",
>> scheduler->next_vgpu->id);
>>
>> + cur_cycles = get_cycles();
>> + if (scheduler->current_vgpu) {
>> + vgpu_data = scheduler->current_vgpu->sched_data;
>> + vgpu_data->sched_out_time = cur_cycles;
>> + }
>> + vgpu_data = scheduler->next_vgpu->sched_data;
>> + vgpu_data->sched_in_time = cur_cycles;
>> +
>> /* switch current vgpu */
>> scheduler->current_vgpu = scheduler->next_vgpu;
>> scheduler->next_vgpu = NULL;
>> @@ -88,19 +119,6 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
>> wake_up(&scheduler->waitq[i]);
>> }
>>
>> -struct tbs_vgpu_data {
>> - struct list_head list;
>> - struct intel_vgpu *vgpu;
>> - /* put some per-vgpu sched stats here */
>> -};
>> -
>> -struct tbs_sched_data {
>> - struct intel_gvt *gvt;
>> - struct hrtimer timer;
>> - unsigned long period;
>> - struct list_head runq_head;
>> -};
>> -
>> enum {
>> NON_VGPU_SCHED = 0,
>> ONLY_IDLE_VGPU_SCHED,
>> @@ -109,9 +127,9 @@ enum {
>>
>> #define GVT_DEFAULT_TIME_SLICE 1000000
>>
>> -static void tbs_sched_func(struct tbs_sched_data *sched_data)
>> +static void tbs_sched_func(struct gvt_sched_data *sched_data)
>> {
>> - struct tbs_vgpu_data *vgpu_data;
>> + struct vgpu_sched_data *vgpu_data;
>>
>> struct intel_gvt *gvt = sched_data->gvt;
>> struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
>> @@ -136,7 +154,7 @@ static void tbs_sched_func(struct tbs_sched_data *sched_data)
>> if (pos == &sched_data->runq_head)
>> continue;
>>
>> - vgpu_data = container_of(pos, struct tbs_vgpu_data, list);
>> + vgpu_data = container_of(pos, struct vgpu_sched_data, list);
>> if (!vgpu_has_pending_workload(vgpu_data->vgpu))
>> continue;
>>
>> @@ -158,7 +176,7 @@ static void tbs_sched_func(struct tbs_sched_data *sched_data)
>>
>> void intel_gvt_schedule(struct intel_gvt *gvt)
>> {
>> - struct tbs_sched_data *sched_data = gvt->scheduler.sched_data;
>> + struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
>>
>> mutex_lock(&gvt->lock);
>> tbs_sched_func(sched_data);
>> @@ -167,9 +185,9 @@ void intel_gvt_schedule(struct intel_gvt *gvt)
>>
>> static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data)
>> {
>> - struct tbs_sched_data *data;
>> + struct gvt_sched_data *data;
>>
>> - data = container_of(timer_data, struct tbs_sched_data, timer);
>> + data = container_of(timer_data, struct gvt_sched_data, timer);
>>
>> intel_gvt_request_service(data->gvt, INTEL_GVT_REQUEST_SCHED);
>>
>> @@ -183,7 +201,7 @@ static int tbs_sched_init(struct intel_gvt *gvt)
>> struct intel_gvt_workload_scheduler *scheduler =
>> &gvt->scheduler;
>>
>> - struct tbs_sched_data *data;
>> + struct gvt_sched_data *data;
>>
>> data = kzalloc(sizeof(*data), GFP_KERNEL);
>> if (!data)
>> @@ -204,7 +222,7 @@ static void tbs_sched_clean(struct intel_gvt *gvt)
>> {
>> struct intel_gvt_workload_scheduler *scheduler =
>> &gvt->scheduler;
>> - struct tbs_sched_data *data = scheduler->sched_data;
>> + struct gvt_sched_data *data = scheduler->sched_data;
>>
>> hrtimer_cancel(&data->timer);
>>
>> @@ -214,7 +232,7 @@ static void tbs_sched_clean(struct intel_gvt *gvt)
>>
>> static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
>> {
>> - struct tbs_vgpu_data *data;
>> + struct vgpu_sched_data *data;
>>
>> data = kzalloc(sizeof(*data), GFP_KERNEL);
>> if (!data)
>> @@ -235,8 +253,8 @@ static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
>>
>> static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
>> {
>> - struct tbs_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
>> - struct tbs_vgpu_data *vgpu_data = vgpu->sched_data;
>> + struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
>> + struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
>>
>> if (!list_empty(&vgpu_data->list))
>> return;
>> @@ -251,8 +269,8 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
>>
>> static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
>> {
>> - struct tbs_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
>> - struct tbs_vgpu_data *vgpu_data = vgpu->sched_data;
>> + struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
>> + struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
>>
>> if (list_empty(&vgpu_data->list))
>> return;
>> --
>> 2.7.4
>>
>> _______________________________________________
>> intel-gvt-dev mailing list
>> intel-gvt-dev at lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/intel-gvt-dev
More information about the intel-gvt-dev
mailing list