[PATCH v2] drm/i915/gvt: add a spin_lock to protect the current_workload

Dong, Chuanxiao chuanxiao.dong at intel.com
Thu Feb 16 06:32:19 UTC 2017


Any comments?

> -----Original Message-----
> From: intel-gvt-dev [mailto:intel-gvt-dev-bounces at lists.freedesktop.org] On
> Behalf Of Chuanxiao Dong
> Sent: Friday, February 10, 2017 5:30 PM
> To: intel-gvt-dev at lists.freedesktop.org
> Cc: Zhang, Pei <pei.zhang at intel.com>; Wang, Zhi A <zhi.a.wang at intel.com>
> Subject: [PATCH v2] drm/i915/gvt: add a spin_lock to protect the
> current_workload
> 
> There is a corner case which can cause kernel panic after a GPU reset.
> The reason for this kernel panic is that, sometimes there is still a context
> switch comes from HW to notify SW for a failed workload. But as it is a failed
> workload, GVT has already freed this workload and the current_workload in
> scheduler is NULL. Accessing NULL caused kernel panic. So this issue is
> caused by unsynchronized between the workload free and unexpected
> notification handling.
> 
> To protect the current_workload, add a spin_lock. So it can provide a chance
> for SW to synchronize with each other to avoid such kind of conflict.
> 
> v2: update the commit message;
>     add EINPROGRESS check for workload->status
> 
> Signed-off-by: Chuanxiao Dong <chuanxiao.dong at intel.com>
> ---
>  drivers/gpu/drm/i915/gvt/scheduler.c | 30
> ++++++++++++++++++++++++++++--  drivers/gpu/drm/i915/gvt/scheduler.h |
> 2 ++
>  2 files changed, 30 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c
> b/drivers/gpu/drm/i915/gvt/scheduler.c
> index 7ea68a7..c300197 100644
> --- a/drivers/gpu/drm/i915/gvt/scheduler.c
> +++ b/drivers/gpu/drm/i915/gvt/scheduler.c
> @@ -136,8 +136,30 @@ static int shadow_context_status_change(struct
> notifier_block *nb,
>  		(struct drm_i915_gem_request *)data;
>  	struct intel_gvt_workload_scheduler *scheduler =
>  		&vgpu->gvt->scheduler;
> -	struct intel_vgpu_workload *workload =
> -		scheduler->current_workload[req->engine->id];
> +	struct intel_vgpu_workload *workload;
> +	unsigned long flags;
> +
> +	spin_lock_irqsave(&scheduler->cur_workload_lock, flags);
> +	workload = scheduler->current_workload[req->engine->id];
> +	/* Sometimes a failed workload still has this notification
> +	 * from HW side.
> +	 * So at this point, the failed workload may
> +	 * already been freed. In this case, do nothing. Even the
> +	 * failed workload is not freed yet, still do nothing.
> +	 *
> +	 * Another case, when we goes here, this workload is actually
> +	 * a new workload already and we cannot do the context schedule
> +	 * out based on the last failed one.
> +	 */
> +	if (unlikely(!workload ||
> +		((workload->status != -EINPROGRESS && workload->status)
> ||
> +		 vgpu->resetting) ||
> +		(action == INTEL_CONTEXT_SCHEDULE_OUT &&
> +		!atomic_read(&workload->shadow_ctx_active)))) {
> +		spin_unlock_irqrestore(&scheduler->cur_workload_lock,
> flags);
> +		return NOTIFY_OK;
> +	}
> +	spin_unlock_irqrestore(&scheduler->cur_workload_lock, flags);
> 
>  	switch (action) {
>  	case INTEL_CONTEXT_SCHEDULE_IN:
> @@ -352,6 +374,7 @@ static void complete_current_workload(struct
> intel_gvt *gvt, int ring_id)
>  	struct intel_vgpu_workload *workload;
>  	struct intel_vgpu *vgpu;
>  	int event;
> +	unsigned long flags;
> 
>  	mutex_lock(&gvt->lock);
> 
> @@ -372,7 +395,9 @@ static void complete_current_workload(struct
> intel_gvt *gvt, int ring_id)
>  	gvt_dbg_sched("ring id %d complete workload %p status %d\n",
>  			ring_id, workload, workload->status);
> 
> +	spin_lock_irqsave(&scheduler->cur_workload_lock, flags);
>  	scheduler->current_workload[ring_id] = NULL;
> +	spin_unlock_irqrestore(&scheduler->cur_workload_lock, flags);
> 
>  	list_del_init(&workload->list);
>  	workload->complete(workload);
> @@ -513,6 +538,7 @@ int intel_gvt_init_workload_scheduler(struct
> intel_gvt *gvt)
> 
>  	gvt_dbg_core("init workload scheduler\n");
> 
> +	spin_lock_init(&scheduler->cur_workload_lock);
>  	init_waitqueue_head(&scheduler->workload_complete_wq);
> 
>  	for (i = 0; i < I915_NUM_ENGINES; i++) { diff --git
> a/drivers/gpu/drm/i915/gvt/scheduler.h
> b/drivers/gpu/drm/i915/gvt/scheduler.h
> index 2833dfa..b446ea9 100644
> --- a/drivers/gpu/drm/i915/gvt/scheduler.h
> +++ b/drivers/gpu/drm/i915/gvt/scheduler.h
> @@ -40,6 +40,8 @@ struct intel_gvt_workload_scheduler {
>  	struct intel_vgpu *current_vgpu;
>  	struct intel_vgpu *next_vgpu;
>  	struct intel_vgpu_workload
> *current_workload[I915_NUM_ENGINES];
> +	/* For current_workload handling sync */
> +	spinlock_t cur_workload_lock;
>  	bool need_reschedule;
> 
>  	wait_queue_head_t workload_complete_wq;
> --
> 2.7.4
> 
> _______________________________________________
> intel-gvt-dev mailing list
> intel-gvt-dev at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gvt-dev


More information about the intel-gvt-dev mailing list