[PATCH v2 1/9] drm/i915/gvt: use hrtimer replace delayed_work in scheduler
Zhenyu Wang
zhenyuw at linux.intel.com
Tue Feb 14 05:20:59 UTC 2017
On 2017.02.14 12:25:47 +0800, Ping Gao wrote:
> Currently the scheduler is triggered by delayed_work, the time slices
> are varied and cannot be precise at microsecond level. For performance
> control purpose, should use hrtimer instead.
>
> Signed-off-by: Ping Gao <ping.a.gao at intel.com>
> ---
> drivers/gpu/drm/i915/gvt/gvt.c | 5 +++
> drivers/gpu/drm/i915/gvt/gvt.h | 2 ++
> drivers/gpu/drm/i915/gvt/sched_policy.c | 56 +++++++++++++++++++++++----------
> drivers/gpu/drm/i915/gvt/sched_policy.h | 2 ++
> 4 files changed, 49 insertions(+), 16 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
> index 3b9d59e..3e8c30f 100644
> --- a/drivers/gpu/drm/i915/gvt/gvt.c
> +++ b/drivers/gpu/drm/i915/gvt/gvt.c
> @@ -143,6 +143,11 @@ static int gvt_service_thread(void *data)
> intel_gvt_emulate_vblank(gvt);
> mutex_unlock(&gvt->lock);
> }
> +
> + if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED,
> + (void *)&gvt->service_request)) {
> + intel_gvt_schedule(gvt);
> + }
> }
>
> return 0;
> diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
> index e227caf..dae157e 100644
> --- a/drivers/gpu/drm/i915/gvt/gvt.h
> +++ b/drivers/gpu/drm/i915/gvt/gvt.h
> @@ -234,6 +234,7 @@ struct intel_gvt {
> DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
> struct intel_vgpu_type *types;
> unsigned int num_types;
> + unsigned int num_vgpu_sched;
>
> struct task_struct *service_thread;
> wait_queue_head_t service_thread_wq;
> @@ -247,6 +248,7 @@ static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915)
>
> enum {
> INTEL_GVT_REQUEST_EMULATE_VBLANK = 0,
> + INTEL_GVT_REQUEST_SCHED = 1,
> };
>
> static inline void intel_gvt_request_service(struct intel_gvt *gvt,
> diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
> index 06c9584..e48bbad 100644
> --- a/drivers/gpu/drm/i915/gvt/sched_policy.c
> +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
> @@ -96,17 +96,15 @@ struct tbs_vgpu_data {
>
> struct tbs_sched_data {
> struct intel_gvt *gvt;
> - struct delayed_work work;
> + struct hrtimer timer;
> unsigned long period;
> struct list_head runq_head;
> };
>
> -#define GVT_DEFAULT_TIME_SLICE (1 * HZ / 1000)
> +#define GVT_DEFAULT_TIME_SLICE 1000000
>
> -static void tbs_sched_func(struct work_struct *work)
> +static void tbs_sched_func(struct tbs_sched_data *sched_data)
> {
> - struct tbs_sched_data *sched_data = container_of(work,
> - struct tbs_sched_data, work.work);
> struct tbs_vgpu_data *vgpu_data;
>
> struct intel_gvt *gvt = sched_data->gvt;
> @@ -115,8 +113,6 @@ static void tbs_sched_func(struct work_struct *work)
> struct intel_vgpu *vgpu = NULL;
> struct list_head *pos, *head;
>
> - mutex_lock(&gvt->lock);
> -
> /* no vgpu or has already had a target */
> if (list_empty(&sched_data->runq_head) || scheduler->next_vgpu)
> goto out;
> @@ -151,17 +147,30 @@ static void tbs_sched_func(struct work_struct *work)
> scheduler->next_vgpu->id);
> try_to_schedule_next_vgpu(gvt);
> }
> +}
>
> - /*
> - * still have vgpu on runq
> - * or last schedule haven't finished due to running workload
> - */
> - if (!list_empty(&sched_data->runq_head) || scheduler->next_vgpu)
> - schedule_delayed_work(&sched_data->work, sched_data->period);
> +void intel_gvt_schedule(struct intel_gvt *gvt)
> +{
> + struct tbs_sched_data *sched_data = gvt->scheduler.sched_data;
>
> + mutex_lock(&gvt->lock);
> + tbs_sched_func(sched_data);
> mutex_unlock(&gvt->lock);
> }
>
> +static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data)
> +{
> + struct tbs_sched_data *data;
> +
> + data = container_of(timer_data, struct tbs_sched_data, timer);
> +
> + intel_gvt_request_service(data->gvt, INTEL_GVT_REQUEST_SCHED);
> +
> + hrtimer_add_expires_ns(&data->timer, data->period);
> +
> + return HRTIMER_RESTART;
> +}
> +
> static int tbs_sched_init(struct intel_gvt *gvt)
> {
> struct intel_gvt_workload_scheduler *scheduler =
> @@ -174,11 +183,13 @@ static int tbs_sched_init(struct intel_gvt *gvt)
> return -ENOMEM;
>
> INIT_LIST_HEAD(&data->runq_head);
> - INIT_DELAYED_WORK(&data->work, tbs_sched_func);
> + hrtimer_init(&data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
> + data->timer.function = tbs_timer_fn;
> data->period = GVT_DEFAULT_TIME_SLICE;
> data->gvt = gvt;
>
> scheduler->sched_data = data;
> +
> return 0;
> }
>
> @@ -188,7 +199,8 @@ static void tbs_sched_clean(struct intel_gvt *gvt)
> &gvt->scheduler;
> struct tbs_sched_data *data = scheduler->sched_data;
>
> - cancel_delayed_work(&data->work);
> + hrtimer_cancel(&data->timer);
> +
> kfree(data);
> scheduler->sched_data = NULL;
> }
> @@ -223,14 +235,26 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
> return;
>
> list_add_tail(&vgpu_data->list, &sched_data->runq_head);
> - schedule_delayed_work(&sched_data->work, sched_data->period);
> +
> + vgpu->gvt->num_vgpu_sched++;
> + if (vgpu->gvt->num_vgpu_sched == 1)
> + hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(),
> + sched_data->period), HRTIMER_MODE_ABS);
> }
>
> static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
> {
> + struct tbs_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
> struct tbs_vgpu_data *vgpu_data = vgpu->sched_data;
>
> + if (list_empty(&vgpu_data->list))
> + return;
> +
> list_del_init(&vgpu_data->list);
> +
> + vgpu->gvt->num_vgpu_sched--;
> + if (vgpu->gvt->num_vgpu_sched == 0)
> + hrtimer_cancel(&sched_data->timer);
> }
>
Is this possible for race when one vgpu destroy but another one need to schedule?
Or better to just check vgpu run queue empty to stop timer.
> static struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
> diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.h b/drivers/gpu/drm/i915/gvt/sched_policy.h
> index bb8b909..ba00a5f 100644
> --- a/drivers/gpu/drm/i915/gvt/sched_policy.h
> +++ b/drivers/gpu/drm/i915/gvt/sched_policy.h
> @@ -43,6 +43,8 @@ struct intel_gvt_sched_policy_ops {
> void (*stop_schedule)(struct intel_vgpu *vgpu);
> };
>
> +void intel_gvt_schedule(struct intel_gvt *gvt);
> +
> int intel_gvt_init_sched_policy(struct intel_gvt *gvt);
>
> void intel_gvt_clean_sched_policy(struct intel_gvt *gvt);
> --
> 2.7.4
>
> _______________________________________________
> intel-gvt-dev mailing list
> intel-gvt-dev at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gvt-dev
--
Open Source Technology Center, Intel ltd.
$gpg --keyserver wwwkeys.pgp.net --recv-keys 4D781827
-------------- next part --------------
A non-text attachment was scrubbed...
Name: signature.asc
Type: application/pgp-signature
Size: 163 bytes
Desc: not available
URL: <https://lists.freedesktop.org/archives/intel-gvt-dev/attachments/20170214/cad3459e/attachment-0001.sig>
More information about the intel-gvt-dev
mailing list