[PATCH v6 3/3] drm/i915/gvt: Use sched_lock to protect gvt scheduler logic.

Du, Changbin changbin.du at intel.com
Thu Apr 19 07:38:46 UTC 2018


Reviewed-by: Changbin Du <changbin.du at intel.com>

On Fri, Apr 20, 2018 at 03:26:27PM +0800, colin.xu at intel.com wrote:
> From: Pei Zhang <pei.zhang at intel.com>
> 
> The scheduler lock(gvt->sched_lock) is used to protect gvt
> scheduler logic, including the gvt scheduler structure(gvt->scheduler
> and per vgpu schedule data(vgpu->sched_data, vgpu->sched_ctl).
> 
> v6:
>   - Rebase to latest gvt-staging.
> v5:
>   - Rebase to latest gvt-staging.
> v4:
>   - Rebase to latest gvt-staging.
> v3: update to latest code base
> 
> Signed-off-by: Pei Zhang <pei.zhang at intel.com>
> Signed-off-by: Colin Xu <colin.xu at intel.com>
> ---
>  drivers/gpu/drm/i915/gvt/gvt.c          |  1 +
>  drivers/gpu/drm/i915/gvt/gvt.h          |  7 +++++
>  drivers/gpu/drm/i915/gvt/sched_policy.c | 37 ++++++++++++++++++++++---
>  drivers/gpu/drm/i915/gvt/scheduler.c    |  8 +++---
>  4 files changed, 45 insertions(+), 8 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
> index 129a7795cb1f..eeac956339e0 100644
> --- a/drivers/gpu/drm/i915/gvt/gvt.c
> +++ b/drivers/gpu/drm/i915/gvt/gvt.c
> @@ -377,6 +377,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
>  	spin_lock_init(&gvt->scheduler.mmio_context_lock);
>  	mutex_init(&gvt->lock);
>  	mutex_init(&gvt->gtt_lock);
> +	mutex_init(&gvt->sched_lock);
>  	gvt->dev_priv = dev_priv;
>  
>  	init_device_info(gvt);
> diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
> index 16a877c97e64..a45c9f223594 100644
> --- a/drivers/gpu/drm/i915/gvt/gvt.h
> +++ b/drivers/gpu/drm/i915/gvt/gvt.h
> @@ -178,6 +178,11 @@ struct intel_vgpu {
>  	bool pv_notified;
>  	bool failsafe;
>  	unsigned int resetting_eng;
> +
> +	/* Both sched_data and sched_ctl can be seen a part of the global gvt
> +	 * scheduler structure. So below 2 vgpu data are protected
> +	 * by sched_lock, not vgpu_lock.
> +	 */
>  	void *sched_data;
>  
>  	struct vgpu_sched_ctl sched_ctl;
> @@ -302,6 +307,8 @@ struct intel_gvt {
>  	 */
>  	struct mutex lock;
>  	struct mutex gtt_lock;
> +	/* scheduler scope lock, protect gvt and vgpu schedule related data */
> +	struct mutex sched_lock;
>  
>  	struct drm_i915_private *dev_priv;
>  	struct idr vgpu_idr;	/* vGPU IDR pool */
> diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
> index d053cbe1dc94..62c720be9877 100644
> --- a/drivers/gpu/drm/i915/gvt/sched_policy.c
> +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
> @@ -228,7 +228,7 @@ void intel_gvt_schedule(struct intel_gvt *gvt)
>  	struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
>  	ktime_t cur_time;
>  
> -	mutex_lock(&gvt->lock);
> +	mutex_lock(&gvt->sched_lock);
>  	cur_time = ktime_get();
>  
>  	if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED,
> @@ -241,10 +241,11 @@ void intel_gvt_schedule(struct intel_gvt *gvt)
>  	}
>  	clear_bit(INTEL_GVT_REQUEST_EVENT_SCHED, (void *)&gvt->service_request);
>  
> +
>  	vgpu_update_timeslice(gvt->scheduler.current_vgpu, cur_time);
>  	tbs_sched_func(sched_data);
>  
> -	mutex_unlock(&gvt->lock);
> +	mutex_unlock(&gvt->sched_lock);
>  }
>  
>  static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data)
> @@ -359,39 +360,65 @@ static struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
>  
>  int intel_gvt_init_sched_policy(struct intel_gvt *gvt)
>  {
> +	int ret;
> +
> +	mutex_lock(&gvt->sched_lock);
>  	gvt->scheduler.sched_ops = &tbs_schedule_ops;
> +	ret = gvt->scheduler.sched_ops->init(gvt);
> +	mutex_unlock(&gvt->sched_lock);
>  
> -	return gvt->scheduler.sched_ops->init(gvt);
> +	return ret;
>  }
>  
>  void intel_gvt_clean_sched_policy(struct intel_gvt *gvt)
>  {
> +	mutex_lock(&gvt->sched_lock);
>  	gvt->scheduler.sched_ops->clean(gvt);
> +	mutex_unlock(&gvt->sched_lock);
>  }
>  
> +/* for per-vgpu scheduler policy, there are 2 per-vgpu data:
> + * sched_data, and sched_ctl. We see these 2 data as part of
> + * the global scheduler which are proteced by gvt->sched_lock.
> + * Caller should make their decision if the vgpu_lock should
> + * be hold outside.
> + */
> +
>  int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu)
>  {
> -	return vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu);
> +	int ret;
> +
> +	mutex_lock(&vgpu->gvt->sched_lock);
> +	ret = vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu);
> +	mutex_unlock(&vgpu->gvt->sched_lock);
> +
> +	return ret;
>  }
>  
>  void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu)
>  {
> +	mutex_lock(&vgpu->gvt->sched_lock);
>  	vgpu->gvt->scheduler.sched_ops->clean_vgpu(vgpu);
> +	mutex_unlock(&vgpu->gvt->sched_lock);
>  }
>  
>  void intel_vgpu_start_schedule(struct intel_vgpu *vgpu)
>  {
>  	struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
>  
> +	mutex_lock(&vgpu->gvt->sched_lock);
>  	if (!vgpu_data->active) {
>  		gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id);
>  		vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
>  	}
> +	mutex_unlock(&vgpu->gvt->sched_lock);
>  }
>  
>  void intel_gvt_kick_schedule(struct intel_gvt *gvt)
>  {
> +	mutex_lock(&gvt->sched_lock);
>  	intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
> +	mutex_unlock(&gvt->sched_lock);
>  }
>  
>  void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
> @@ -406,6 +433,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
>  
>  	gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
>  
> +	mutex_lock(&vgpu->gvt->sched_lock);
>  	scheduler->sched_ops->stop_schedule(vgpu);
>  
>  	if (scheduler->next_vgpu == vgpu)
> @@ -425,4 +453,5 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
>  		}
>  	}
>  	spin_unlock_bh(&scheduler->mmio_context_lock);
> +	mutex_unlock(&vgpu->gvt->sched_lock);
>  }
> diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
> index 95664d2f0bfb..bf952a0543fa 100644
> --- a/drivers/gpu/drm/i915/gvt/scheduler.c
> +++ b/drivers/gpu/drm/i915/gvt/scheduler.c
> @@ -715,7 +715,7 @@ static struct intel_vgpu_workload *pick_next_workload(
>  	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
>  	struct intel_vgpu_workload *workload = NULL;
>  
> -	mutex_lock(&gvt->lock);
> +	mutex_lock(&gvt->sched_lock);
>  
>  	/*
>  	 * no current vgpu / will be scheduled out / no workload
> @@ -761,7 +761,7 @@ static struct intel_vgpu_workload *pick_next_workload(
>  
>  	atomic_inc(&workload->vgpu->submission.running_workload_num);
>  out:
> -	mutex_unlock(&gvt->lock);
> +	mutex_unlock(&gvt->sched_lock);
>  	return workload;
>  }
>  
> @@ -862,8 +862,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
>  	struct intel_vgpu_submission *s = &vgpu->submission;
>  	int event;
>  
> -	mutex_lock(&gvt->lock);
>  	mutex_lock(&vgpu->vgpu_lock);
> +	mutex_lock(&gvt->sched_lock);
>  
>  	/* For the workload w/ request, needs to wait for the context
>  	 * switch to make sure request is completed.
> @@ -941,8 +941,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
>  	if (gvt->scheduler.need_reschedule)
>  		intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
>  
> +	mutex_unlock(&gvt->sched_lock);
>  	mutex_unlock(&vgpu->vgpu_lock);
> -	mutex_unlock(&gvt->lock);
>  }
>  
>  struct workload_thread_param {
> -- 
> 2.17.0
> 
> _______________________________________________
> intel-gvt-dev mailing list
> intel-gvt-dev at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gvt-dev

-- 
Thanks,
Changbin Du


More information about the intel-gvt-dev mailing list