[PATCH v4 1/9] drm/i915/gvt: use hrtimer replace delayed_work in scheduler

Gao, Ping A ping.a.gao at intel.com
Mon Mar 20 11:41:38 UTC 2017


On 2017/3/14 18:03, Tian, Kevin wrote:
>> From: Ping Gao
>> Sent: Wednesday, March 8, 2017 2:24 PM
>>
>> Currently the scheduler is triggered by delayed_work, the time slices are
>> varied and cannot be precise at microsecond level. For performance control
>> purpose, should use hrtimer instead.
> Possibly revised as:
>
> Currently the scheduler is triggered by delayed_work, which doesn't
> provide precision at microsecond level. Move to hrtimer instead for
> more accurate control.

Sorry for the later reply.

Sure, thanks!

>
>> Signed-off-by: Ping Gao <ping.a.gao at intel.com>
>> ---
>>  drivers/gpu/drm/i915/gvt/gvt.c          |  5 +++
>>  drivers/gpu/drm/i915/gvt/gvt.h          |  2 ++
>>  drivers/gpu/drm/i915/gvt/sched_policy.c | 61 ++++++++++++++++++++++++-
>> --------  drivers/gpu/drm/i915/gvt/sched_policy.h |  2 ++
>>  4 files changed, 54 insertions(+), 16 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
>> index 3b9d59e..3e8c30f 100644
>> --- a/drivers/gpu/drm/i915/gvt/gvt.c
>> +++ b/drivers/gpu/drm/i915/gvt/gvt.c
>> @@ -143,6 +143,11 @@ static int gvt_service_thread(void *data)
>>  			intel_gvt_emulate_vblank(gvt);
>>  			mutex_unlock(&gvt->lock);
>>  		}
>> +
>> +		if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED,
>> +					(void *)&gvt->service_request)) {
>> +			intel_gvt_schedule(gvt);
>> +		}
>>  	}
>>
>>  	return 0;
>> diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
>> index 2379192..1839456 100644
>> --- a/drivers/gpu/drm/i915/gvt/gvt.h
>> +++ b/drivers/gpu/drm/i915/gvt/gvt.h
>> @@ -236,6 +236,7 @@ struct intel_gvt {
>>  	DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
>>  	struct intel_vgpu_type *types;
>>  	unsigned int num_types;
>> +	atomic_t num_vgpu_sched;
>>
>>  	struct task_struct *service_thread;
>>  	wait_queue_head_t service_thread_wq;
>> @@ -249,6 +250,7 @@ static inline struct intel_gvt *to_gvt(struct
>> drm_i915_private *i915)
>>
>>  enum {
>>  	INTEL_GVT_REQUEST_EMULATE_VBLANK = 0,
>> +	INTEL_GVT_REQUEST_SCHED = 1,
>>  };
>>
>>  static inline void intel_gvt_request_service(struct intel_gvt *gvt, diff --git
>> a/drivers/gpu/drm/i915/gvt/sched_policy.c
>> b/drivers/gpu/drm/i915/gvt/sched_policy.c
>> index 06c9584..174f739 100644
>> --- a/drivers/gpu/drm/i915/gvt/sched_policy.c
>> +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
>> @@ -96,17 +96,15 @@ struct tbs_vgpu_data {
>>
>>  struct tbs_sched_data {
>>  	struct intel_gvt *gvt;
>> -	struct delayed_work work;
>> +	struct hrtimer timer;
>>  	unsigned long period;
>>  	struct list_head runq_head;
>>  };
>>
>> -#define GVT_DEFAULT_TIME_SLICE (1 * HZ / 1000)
>> +#define GVT_DEFAULT_TIME_SLICE 1000000
>>
>> -static void tbs_sched_func(struct work_struct *work)
>> +static void tbs_sched_func(struct tbs_sched_data *sched_data)
>>  {
>> -	struct tbs_sched_data *sched_data = container_of(work,
>> -			struct tbs_sched_data, work.work);
>>  	struct tbs_vgpu_data *vgpu_data;
>>
>>  	struct intel_gvt *gvt = sched_data->gvt; @@ -115,8 +113,6 @@ static
>> void tbs_sched_func(struct work_struct *work)
>>  	struct intel_vgpu *vgpu = NULL;
>>  	struct list_head *pos, *head;
>>
>> -	mutex_lock(&gvt->lock);
>> -
>>  	/* no vgpu or has already had a target */
>>  	if (list_empty(&sched_data->runq_head) || scheduler->next_vgpu)
>>  		goto out;
>> @@ -151,17 +147,30 @@ static void tbs_sched_func(struct work_struct
>> *work)
>>  				scheduler->next_vgpu->id);
>>  		try_to_schedule_next_vgpu(gvt);
>>  	}
>> +}
>>
>> -	/*
>> -	 * still have vgpu on runq
>> -	 * or last schedule haven't finished due to running workload
>> -	 */
>> -	if (!list_empty(&sched_data->runq_head) || scheduler->next_vgpu)
>> -		schedule_delayed_work(&sched_data->work, sched_data-
>>> period);
>> +void intel_gvt_schedule(struct intel_gvt *gvt) {
>> +	struct tbs_sched_data *sched_data = gvt->scheduler.sched_data;
>>
>> +	mutex_lock(&gvt->lock);
>> +	tbs_sched_func(sched_data);
>>  	mutex_unlock(&gvt->lock);
>>  }
>>
>> +static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data) {
>> +	struct tbs_sched_data *data;
>> +
>> +	data = container_of(timer_data, struct tbs_sched_data, timer);
>> +
>> +	intel_gvt_request_service(data->gvt, INTEL_GVT_REQUEST_SCHED);
>> +
>> +	hrtimer_add_expires_ns(&data->timer, data->period);
>> +
>> +	return HRTIMER_RESTART;
>> +}
>> +
>>  static int tbs_sched_init(struct intel_gvt *gvt)  {
>>  	struct intel_gvt_workload_scheduler *scheduler = @@ -174,11
>> +183,13 @@ static int tbs_sched_init(struct intel_gvt *gvt)
>>  		return -ENOMEM;
>>
>>  	INIT_LIST_HEAD(&data->runq_head);
>> -	INIT_DELAYED_WORK(&data->work, tbs_sched_func);
>> +	hrtimer_init(&data->timer, CLOCK_MONOTONIC,
>> HRTIMER_MODE_ABS);
>> +	data->timer.function = tbs_timer_fn;
>>  	data->period = GVT_DEFAULT_TIME_SLICE;
>>  	data->gvt = gvt;
>>
>>  	scheduler->sched_data = data;
>> +
>>  	return 0;
>>  }
>>
>> @@ -188,11 +199,17 @@ static void tbs_sched_clean(struct intel_gvt *gvt)
>>  		&gvt->scheduler;
>>  	struct tbs_sched_data *data = scheduler->sched_data;
>>
>> -	cancel_delayed_work(&data->work);
>> +	hrtimer_cancel(&data->timer);
>> +
>>  	kfree(data);
>>  	scheduler->sched_data = NULL;
>>  }
>>
>> +enum {
>> +	NON_VGPU_SCHED = 0,
>> +	HAS_VGPU_SCHED,
>> +};
>> +
>>  static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)  {
>>  	struct tbs_vgpu_data *data;
>> @@ -223,14 +240,26 @@ static void tbs_sched_start_schedule(struct
>> intel_vgpu *vgpu)
>>  		return;
>>
>>  	list_add_tail(&vgpu_data->list, &sched_data->runq_head);
>> -	schedule_delayed_work(&sched_data->work, sched_data->period);
>> +
>> +	atomic_inc(&vgpu->gvt->num_vgpu_sched);
>> +	if (atomic_read(&vgpu->gvt->num_vgpu_sched) ==
>> HAS_VGPU_SCHED)
>> +		hrtimer_start(&sched_data->timer,
>> ktime_add_ns(ktime_get(),
>> +			sched_data->period), HRTIMER_MODE_ABS);
> Can we simply check whether runq is empty instead of introducing another
> accounting mechanism?

After idle_vgpu introduced, the runq is always not empty, the timer for
scheduler only start after active_vgpu created, at this point the
num_vgpu_sched >=2.

>>  }
>>
>>  static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)  {
>> +	struct tbs_sched_data *sched_data = vgpu->gvt-
>>> scheduler.sched_data;
>>  	struct tbs_vgpu_data *vgpu_data = vgpu->sched_data;
>>
>> +	if (list_empty(&vgpu_data->list))
>> +		return;
>> +
>>  	list_del_init(&vgpu_data->list);
>> +
>> +	atomic_dec(&vgpu->gvt->num_vgpu_sched);
>> +	if (atomic_read(&vgpu->gvt->num_vgpu_sched) ==
>> NON_VGPU_SCHED)
>> +		hrtimer_cancel(&sched_data->timer);
>>  }
>>
>>  static struct intel_gvt_sched_policy_ops tbs_schedule_ops = { diff --git
>> a/drivers/gpu/drm/i915/gvt/sched_policy.h
>> b/drivers/gpu/drm/i915/gvt/sched_policy.h
>> index bb8b909..ba00a5f 100644
>> --- a/drivers/gpu/drm/i915/gvt/sched_policy.h
>> +++ b/drivers/gpu/drm/i915/gvt/sched_policy.h
>> @@ -43,6 +43,8 @@ struct intel_gvt_sched_policy_ops {
>>  	void (*stop_schedule)(struct intel_vgpu *vgpu);  };
>>
>> +void intel_gvt_schedule(struct intel_gvt *gvt);
>> +
>>  int intel_gvt_init_sched_policy(struct intel_gvt *gvt);
>>
>>  void intel_gvt_clean_sched_policy(struct intel_gvt *gvt);
>> --
>> 2.7.4
>>
>> _______________________________________________
>> intel-gvt-dev mailing list
>> intel-gvt-dev at lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/intel-gvt-dev



More information about the intel-gvt-dev mailing list