[PATCH 3/3] drm/i915/gvt: [small locks] use scheduler lock

Zhang, Pei pei.zhang at intel.com
Thu Jan 25 07:52:15 UTC 2018


> -----Original Message-----
> From: intel-gvt-dev 
> [mailto:intel-gvt-dev-bounces at lists.freedesktop.org]
> On Behalf Of Zhenyu Wang
> Sent: Thursday, January 25, 2018 3:23 PM
> To: Zhang, Pei <pei.zhang at intel.com>
> Cc: intel-gvt-dev at lists.freedesktop.org; Wang, Zhi A 
> <zhi.a.wang at intel.com>; zhenyuw at linux.intel.com
> Subject: Re: [PATCH 3/3] drm/i915/gvt: [small locks] use scheduler 
> lock
> 
> On 2018.01.25 15:09:27 +0800, pei.zhang at intel.com wrote:
> > From: Pei Zhang <pei.zhang at intel.com>
> >
> > The scheduler lock(gvt->sched_lock) is used to protect gvt scheduler 
> > logic, including the gvt scheduler structure(gvt->scheduler and per 
> > vgpu schedule data(vgpu->sched_data, vgpu->sched_ctl).
> >
> > Signed-off-by: Pei Zhang <pei.zhang at intel.com>
> > ---
> >  drivers/gpu/drm/i915/gvt/display.c      |  3 ---
> >  drivers/gpu/drm/i915/gvt/gvt.c          |  1 +
> >  drivers/gpu/drm/i915/gvt/gvt.h          |  7 ++++++
> >  drivers/gpu/drm/i915/gvt/sched_policy.c | 41
> ++++++++++++++++++++++++++++-----
> >  drivers/gpu/drm/i915/gvt/scheduler.c    |  8 +++----
> >  5 files changed, 47 insertions(+), 13 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/i915/gvt/display.c
> > b/drivers/gpu/drm/i915/gvt/display.c
> > index 84197d3..8042fc1 100644
> > --- a/drivers/gpu/drm/i915/gvt/display.c
> > +++ b/drivers/gpu/drm/i915/gvt/display.c
> > @@ -328,9 +328,6 @@ void intel_gvt_check_vblank_emulation(struct
> intel_gvt *gvt)
> >  	struct intel_vgpu *vgpu;
> >  	int pipe, id;
> >
> > -	if (WARN_ON(!mutex_is_locked(&gvt->lock)))
> > -		return;
> > -
> >  	for_each_active_vgpu(gvt, vgpu, id) {
> >  		for (pipe = 0; pipe < I915_MAX_PIPES; pipe++) {
> >  			if (pipe_is_enabled(vgpu, pipe))
> 
> This is not for sched lock, looks should be in last patch, and as we 
> check all active vgpu here, still requires gvt lock for that purpose.
[Pei] you are right. I will add the gvt lock.

> 
> > diff --git a/drivers/gpu/drm/i915/gvt/gvt.c 
> > b/drivers/gpu/drm/i915/gvt/gvt.c index d8d5e6c..b119c53 100644
> > --- a/drivers/gpu/drm/i915/gvt/gvt.c
> > +++ b/drivers/gpu/drm/i915/gvt/gvt.c
> > @@ -377,6 +377,7 @@ int intel_gvt_init_device(struct 
> > drm_i915_private
> *dev_priv)
> >  	spin_lock_init(&gvt->scheduler.mmio_context_lock);
> >  	mutex_init(&gvt->lock);
> >  	mutex_init(&gvt->gtt_lock);
> > +	mutex_init(&gvt->sched_lock);
> >  	gvt->dev_priv = dev_priv;
> >
> >  	init_device_info(gvt);
> > diff --git a/drivers/gpu/drm/i915/gvt/gvt.h 
> > b/drivers/gpu/drm/i915/gvt/gvt.h index 76f6c24..1997419 100644
> > --- a/drivers/gpu/drm/i915/gvt/gvt.h
> > +++ b/drivers/gpu/drm/i915/gvt/gvt.h
> > @@ -181,6 +181,11 @@ struct intel_vgpu {
> >  	bool pv_notified;
> >  	bool failsafe;
> >  	unsigned int resetting_eng;
> > +
> > +	/* both sched_data and sched_ctl can be seen a part of the global
> gvt
> > +	 * scheduler structure. So these 2 vgpu data are protected by gvt-
> >sched_lock,
> > +	 * not vgpu->vgpu_lock.
> > +	 * */
> >  	void *sched_data;
> >
> >  	struct vgpu_sched_ctl sched_ctl;
> > @@ -295,6 +300,8 @@ struct intel_gvt {
> >  	 */
> >  	struct mutex lock;
> >  	struct mutex gtt_lock;
> > +	/* scheduler scope lock, protect gvt and vgpu schedule related 
> > +data
> */
> > +	struct mutex sched_lock;
> >
> >  	struct drm_i915_private *dev_priv;
> >  	struct idr vgpu_idr;	/* vGPU IDR pool */
> > diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c
> > b/drivers/gpu/drm/i915/gvt/sched_policy.c
> > index d031f64..f78fa21 100644
> > --- a/drivers/gpu/drm/i915/gvt/sched_policy.c
> > +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
> > @@ -228,8 +228,9 @@ void intel_gvt_schedule(struct intel_gvt *gvt)
> >  	struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
> >  	static uint64_t timer_check;
> >
> > -	mutex_lock(&gvt->lock);
> > -
> > +	/* Don't use external lock to protect service_request.
> > +	 * Instead, always use atomic bit ops on it.
> > +	 */
> >  	if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED,
> >  				(void *)&gvt->service_request)) {
> >  		if (!(timer_check++ % GVT_TS_BALANCE_PERIOD_MS)) @@ -237,9
> +238,9
> > @@ void intel_gvt_schedule(struct intel_gvt *gvt)
> >  	}
> >  	clear_bit(INTEL_GVT_REQUEST_EVENT_SCHED, (void 
> > *)&gvt->service_request);
> >
> > +	mutex_lock(&gvt->sched_lock);
> >  	tbs_sched_func(sched_data);
> > -
> > -	mutex_unlock(&gvt->lock);
> > +	mutex_unlock(&gvt->sched_lock);
> >  }
> >
> >  static enum hrtimer_restart tbs_timer_fn(struct hrtimer 
> > *timer_data) @@ -352,36 +353,62 @@ static struct 
> > intel_gvt_sched_policy_ops tbs_schedule_ops = {
> >
> >  int intel_gvt_init_sched_policy(struct intel_gvt *gvt)  {
> > +	int ret;
> > +
> > +	mutex_lock(&gvt->sched_lock);
> >  	gvt->scheduler.sched_ops = &tbs_schedule_ops;
> > +	ret = gvt->scheduler.sched_ops->init(gvt);
> > +	mutex_unlock(&gvt->sched_lock);
> >
> > -	return gvt->scheduler.sched_ops->init(gvt);
> > +	return ret;
> >  }
> >
> >  void intel_gvt_clean_sched_policy(struct intel_gvt *gvt)  {
> > +	mutex_lock(&gvt->sched_lock);
> >  	gvt->scheduler.sched_ops->clean(gvt);
> > +	mutex_unlock(&gvt->sched_lock);
> >  }
> >
> > +/* for per-vgpu scheduler policy, there are 2 per-vgpu data:
> > + * sched_data, and sched_ctl. We see these 2 data as part of
> > + * the global scheduler which are proteced by gvt->sched_lock.
> > + * Caller should make their decision if the vgpu_lock should
> > + * be hold outside.
> > + */
> > +
> >  int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu)  {
> > -	return vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu);
> > +	int ret;
> > +
> > +	mutex_lock(&vgpu->gvt->sched_lock);
> > +	ret = vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu);
> > +	mutex_unlock(&vgpu->gvt->sched_lock);
> > +
> > +	return ret;
> >  }
> >
> >  void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu)  {
> > +	mutex_lock(&vgpu->gvt->sched_lock);
> >  	vgpu->gvt->scheduler.sched_ops->clean_vgpu(vgpu);
> > +	mutex_unlock(&vgpu->gvt->sched_lock);
> >  }
> >
> >  void intel_vgpu_start_schedule(struct intel_vgpu *vgpu)  {
> >  	gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id);
> >
> > +	mutex_lock(&vgpu->gvt->sched_lock);
> >  	vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
> > +	mutex_unlock(&vgpu->gvt->sched_lock);
> >  }
> >
> >  void intel_gvt_kick_schedule(struct intel_gvt *gvt)  {
> > +	mutex_lock(&gvt->sched_lock);
> >  	intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
> > +	mutex_unlock(&gvt->sched_lock);
> >  }
> >
> >  void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) @@ -392,6
> > +419,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
> >
> >  	gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
> >
> > +	mutex_lock(&vgpu->gvt->sched_lock);
> >  	scheduler->sched_ops->stop_schedule(vgpu);
> >
> >  	if (scheduler->next_vgpu == vgpu)
> > @@ -411,4 +439,5 @@ void intel_vgpu_stop_schedule(struct intel_vgpu
> *vgpu)
> >  		}
> >  	}
> >  	spin_unlock_bh(&scheduler->mmio_context_lock);
> > +	mutex_unlock(&vgpu->gvt->sched_lock);
> >  }
> > diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c
> > b/drivers/gpu/drm/i915/gvt/scheduler.c
> > index 907195a..f1b191d 100644
> > --- a/drivers/gpu/drm/i915/gvt/scheduler.c
> > +++ b/drivers/gpu/drm/i915/gvt/scheduler.c
> > @@ -602,7 +602,7 @@ static struct intel_vgpu_workload
> *pick_next_workload(
> >  	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
> >  	struct intel_vgpu_workload *workload = NULL;
> >
> > -	mutex_lock(&gvt->lock);
> > +	mutex_lock(&gvt->sched_lock);
> >
> >  	/*
> >  	 * no current vgpu / will be scheduled out / no workload @@ -648,7
> > +648,7 @@ static struct intel_vgpu_workload *pick_next_workload(
> >
> >  	atomic_inc(&workload->vgpu->submission.running_workload_num);
> >  out:
> > -	mutex_unlock(&gvt->lock);
> > +	mutex_unlock(&gvt->sched_lock);
> >  	return workload;
> >  }
> >
> > @@ -749,8 +749,8 @@ static void complete_current_workload(struct
> intel_gvt *gvt, int ring_id)
> >  	struct intel_vgpu_submission *s = &vgpu->submission;
> >  	int event;
> >
> > -	mutex_lock(&gvt->lock);
> >  	mutex_lock(&vgpu->vgpu_lock);
> > +	mutex_lock(&gvt->sched_lock);
> >
> >  	/* For the workload w/ request, needs to wait for the context
> >  	 * switch to make sure request is completed.
> > @@ -828,8 +828,8 @@ static void complete_current_workload(struct
> intel_gvt *gvt, int ring_id)
> >  	if (gvt->scheduler.need_reschedule)
> >  		intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
> >
> > +	mutex_unlock(&gvt->sched_lock);
> >  	mutex_unlock(&vgpu->vgpu_lock);
> > -	mutex_unlock(&gvt->lock);
> >  }
> >
> >  struct workload_thread_param {
> > --
> > 2.7.4
> >
> 
> --
> Open Source Technology Center, Intel ltd.
> 
> $gpg --keyserver wwwkeys.pgp.net --recv-keys 4D781827


More information about the intel-gvt-dev mailing list