[PATCH 2/3 - small lock v2] drm/i915/gvt: use per vgpu lock

Zhang, Pei pei.zhang at intel.com
Tue Jan 30 10:43:31 UTC 2018


Please see my comments inline.


> -----Original Message-----
> From: Zhenyu Wang [mailto:zhenyuw at linux.intel.com]
> Sent: Tuesday, January 30, 2018 4:49 PM
> To: Zhang, Pei <pei.zhang at intel.com>
> Cc: intel-gvt-dev at lists.freedesktop.org; Wang, Zhi A
> <zhi.a.wang at intel.com>
> Subject: Re: [PATCH 2/3 - small lock v2] drm/i915/gvt: use per vgpu lock
> 
> On 2018.01.25 16:25:21 +0800, pei.zhang at intel.com wrote:
> > From: Pei Zhang <pei.zhang at intel.com>
> >
> > Use vgpu lock to replace the gvt big lock. By doing this, the mmio
> > read/write trap path, vgpu virtual event emulation and other vgpu
> > related process, would be protected under per vgpu lock.
> >
> > v2: add gvt->lock in function gvt_check_vblank_emulation
> >
> > Signed-off-by: Pei Zhang <pei.zhang at intel.com>
> > ---
> >  drivers/gpu/drm/i915/gvt/display.c   | 35 +++++++++++-----------
> >  drivers/gpu/drm/i915/gvt/gvt.c       |  5 +---
> >  drivers/gpu/drm/i915/gvt/gvt.h       |  9 ++++++
> >  drivers/gpu/drm/i915/gvt/handlers.c  |  2 ++
> >  drivers/gpu/drm/i915/gvt/mmio.c      | 12 ++++----
> >  drivers/gpu/drm/i915/gvt/scheduler.c | 13 ++++++---
> >  drivers/gpu/drm/i915/gvt/vgpu.c      | 56 ++++++++++++++++-----------
> ---------
> >  7 files changed, 71 insertions(+), 61 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/i915/gvt/display.c
> > b/drivers/gpu/drm/i915/gvt/display.c
> > index dd96ffc..1ff88c1 100644
> > --- a/drivers/gpu/drm/i915/gvt/display.c
> > +++ b/drivers/gpu/drm/i915/gvt/display.c
> > @@ -327,26 +327,28 @@ void intel_gvt_check_vblank_emulation(struct
> intel_gvt *gvt)
> >  	struct intel_gvt_irq *irq = &gvt->irq;
> >  	struct intel_vgpu *vgpu;
> >  	int pipe, id;
> > +	int found = false;
> >
> > -	if (WARN_ON(!mutex_is_locked(&gvt->lock)))
> > -		return;
> > -
> > +	mutex_lock(&gvt->lock);
> >  	for_each_active_vgpu(gvt, vgpu, id) {
> >  		for (pipe = 0; pipe < I915_MAX_PIPES; pipe++) {
> > -			if (pipe_is_enabled(vgpu, pipe))
> > -				goto out;
> > +			if (pipe_is_enabled(vgpu, pipe)) {
> > +				found = true;
> > +				break;
> > +			}
> 
> The problem is you would walk all vgpu and check other vgpu's state
> without taking vgpu_lock which could be possibly unconsistent, as below
> mmio path lock would be per-vgpu only.
> 
> So for below pipeconf mmio handler lock change, for pipe enable, just
> start vbl timer. For pipe deactive, might send request for service
> thread to check any active vbl timer required instead.

[Pei] GVT lock is used to protect the vGPU list, and makes sure that when a vGPU instance is used, it's still alive and vGPU structure pointer is valid to reference. So here the GVT global lock is hold. At the same time, I think that vgpu_lock is not required in this code segment, because pipe_is_enabled(vgpu..) only 'read' the vgpu's state and no write happens. How do you think? 

But your method is also very good, I would be happy to do the modification if you still has concern. 

> 
> >  		}
> > +		if (found)
> > +			break;
> >  	}
> >
> >  	/* all the pipes are disabled */
> > -	hrtimer_cancel(&irq->vblank_timer.timer);
> > -	return;
> > -
> > -out:
> > -	hrtimer_start(&irq->vblank_timer.timer,
> > -		ktime_add_ns(ktime_get(), irq->vblank_timer.period),
> > -		HRTIMER_MODE_ABS);
> > -
> > +	if (!found)
> > +		hrtimer_cancel(&irq->vblank_timer.timer);
> > +	else
> > +		hrtimer_start(&irq->vblank_timer.timer,
> > +			ktime_add_ns(ktime_get(), irq->vblank_timer.period),
> > +			HRTIMER_MODE_ABS);
> > +	mutex_unlock(&gvt->lock);
> >  }
> >
> >  static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
> > @@ -383,8 +385,10 @@ static void emulate_vblank(struct intel_vgpu
> > *vgpu)  {
> >  	int pipe;
> >
> > +	mutex_lock(&vgpu->vgpu_lock);
> >  	for_each_pipe(vgpu->gvt->dev_priv, pipe)
> >  		emulate_vblank_on_pipe(vgpu, pipe);
> > +	mutex_unlock(&vgpu->vgpu_lock);
> >  }
> >
> >  /**
> > @@ -399,11 +403,10 @@ void intel_gvt_emulate_vblank(struct intel_gvt
> *gvt)
> >  	struct intel_vgpu *vgpu;
> >  	int id;
> >
> > -	if (WARN_ON(!mutex_is_locked(&gvt->lock)))
> > -		return;
> > -
> > +	mutex_lock(&gvt->lock);
> >  	for_each_active_vgpu(gvt, vgpu, id)
> >  		emulate_vblank(vgpu);
> > +	mutex_unlock(&gvt->lock);
> >  }
> >
> >  /**
> > diff --git a/drivers/gpu/drm/i915/gvt/gvt.c
> > b/drivers/gpu/drm/i915/gvt/gvt.c index 6cd4ca1..d8d5e6c 100644
> > --- a/drivers/gpu/drm/i915/gvt/gvt.c
> > +++ b/drivers/gpu/drm/i915/gvt/gvt.c
> > @@ -271,11 +271,8 @@ static int gvt_service_thread(void *data)
> >  			continue;
> >
> >  		if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK,
> > -					(void *)&gvt->service_request)) {
> > -			mutex_lock(&gvt->lock);
> > +					(void *)&gvt->service_request))
> >  			intel_gvt_emulate_vblank(gvt);
> > -			mutex_unlock(&gvt->lock);
> > -		}
> >
> >  		if (test_bit(INTEL_GVT_REQUEST_SCHED,
> >  				(void *)&gvt->service_request) || diff --git
> > a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
> > index 793224c..76f6c24 100644
> > --- a/drivers/gpu/drm/i915/gvt/gvt.h
> > +++ b/drivers/gpu/drm/i915/gvt/gvt.h
> > @@ -174,6 +174,7 @@ struct intel_vgpu_submission {
> >
> >  struct intel_vgpu {
> >  	struct intel_gvt *gvt;
> > +	struct mutex vgpu_lock;
> >  	int id;
> >  	unsigned long handle; /* vGPU handle used by hypervisor MPT modules
> */
> >  	bool active;
> > @@ -181,6 +182,7 @@ struct intel_vgpu {
> >  	bool failsafe;
> >  	unsigned int resetting_eng;
> >  	void *sched_data;
> > +
> >  	struct vgpu_sched_ctl sched_ctl;
> >
> >  	struct intel_vgpu_fence fence;
> > @@ -288,6 +290,9 @@ struct intel_vgpu_type {  };
> >
> >  struct intel_gvt {
> > +	/* GVT scope lock, protect GVT itself, and all resource currently
> > +	 * not yet protected by special locks(vgpu and scheduler lock).
> > +	 */
> >  	struct mutex lock;
> >  	struct mutex gtt_lock;
> >
> > @@ -310,6 +315,10 @@ struct intel_gvt {
> >
> >  	struct task_struct *service_thread;
> >  	wait_queue_head_t service_thread_wq;
> > +
> > +	/* service_request is always used in bit operation, we should
> always
> > +	 * use it with atomic bit ops so that no need to use gvt big lock.
> > +	 */
> >  	unsigned long service_request;
> >
> >  	struct engine_mmio *engine_mmio_list; diff --git
> > a/drivers/gpu/drm/i915/gvt/handlers.c
> > b/drivers/gpu/drm/i915/gvt/handlers.c
> > index 38f3b00..4244a81 100644
> > --- a/drivers/gpu/drm/i915/gvt/handlers.c
> > +++ b/drivers/gpu/drm/i915/gvt/handlers.c
> > @@ -421,7 +421,9 @@ static int pipeconf_mmio_write(struct intel_vgpu
> *vgpu, unsigned int offset,
> >  		vgpu_vreg(vgpu, offset) |= I965_PIPECONF_ACTIVE;
> >  	else
> >  		vgpu_vreg(vgpu, offset) &= ~I965_PIPECONF_ACTIVE;
> > +	mutex_unlock(&vgpu->vgpu_lock);
> >  	intel_gvt_check_vblank_emulation(vgpu->gvt);
> > +	mutex_lock(&vgpu->vgpu_lock);
> 
> >  	return 0;
> >  }
> >
> > diff --git a/drivers/gpu/drm/i915/gvt/mmio.c
> > b/drivers/gpu/drm/i915/gvt/mmio.c index 562b5ad..8161ab9 100644
> > --- a/drivers/gpu/drm/i915/gvt/mmio.c
> > +++ b/drivers/gpu/drm/i915/gvt/mmio.c
> > @@ -99,7 +99,7 @@ static void failsafe_emulate_mmio_rw(struct
> intel_vgpu *vgpu, uint64_t pa,
> >  		return;
> >
> >  	gvt = vgpu->gvt;
> > -	mutex_lock(&gvt->lock);
> > +	mutex_lock(&vgpu->vgpu_lock);
> >  	offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
> >  	if (reg_is_mmio(gvt, offset)) {
> >  		if (read)
> > @@ -118,7 +118,7 @@ static void failsafe_emulate_mmio_rw(struct
> intel_vgpu *vgpu, uint64_t pa,
> >  			memcpy(pt, p_data, bytes);
> >
> >  	}
> > -	mutex_unlock(&gvt->lock);
> > +	mutex_unlock(&vgpu->vgpu_lock);
> >  }
> >
> >  /**
> > @@ -142,7 +142,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu
> *vgpu, uint64_t pa,
> >  		failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, true);
> >  		return 0;
> >  	}
> > -	mutex_lock(&gvt->lock);
> > +	mutex_lock(&vgpu->vgpu_lock);
> >
> >  	if (vgpu_gpa_is_aperture(vgpu, pa)) {
> >  		ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, true); @@ -
> 194,7
> > +194,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu,
> uint64_t pa,
> >  	gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n",
> >  			offset, bytes);
> >  out:
> > -	mutex_unlock(&gvt->lock);
> > +	mutex_unlock(&vgpu->vgpu_lock);
> >  	return ret;
> >  }
> >
> > @@ -220,7 +220,7 @@ int intel_vgpu_emulate_mmio_write(struct
> intel_vgpu *vgpu, uint64_t pa,
> >  		return 0;
> >  	}
> >
> > -	mutex_lock(&gvt->lock);
> > +	mutex_lock(&vgpu->vgpu_lock);
> >
> >  	if (vgpu_gpa_is_aperture(vgpu, pa)) {
> >  		ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, false); @@ -
> 263,7
> > +263,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu,
> uint64_t pa,
> >  	gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset,
> >  		     bytes);
> >  out:
> > -	mutex_unlock(&gvt->lock);
> > +	mutex_unlock(&vgpu->vgpu_lock);
> >  	return ret;
> >  }
> >
> > diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c
> > b/drivers/gpu/drm/i915/gvt/scheduler.c
> > index 0056638..907195a 100644
> > --- a/drivers/gpu/drm/i915/gvt/scheduler.c
> > +++ b/drivers/gpu/drm/i915/gvt/scheduler.c
> > @@ -563,6 +563,11 @@ static int dispatch_workload(struct
> intel_vgpu_workload *workload)
> >  	gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
> >  		ring_id, workload);
> >
> > +	/* TODO.
> > +	 * Use vgpu_lock here is still big grain, cause lock conflict
> > +	 * with the mmio trap path. Split to small parts later.
> > +	 */
> > +	mutex_lock(&vgpu->vgpu_lock);
> >  	mutex_lock(&dev_priv->drm.struct_mutex);
> >
> >  	ret = intel_gvt_scan_and_shadow_workload(workload);
> > @@ -587,6 +592,7 @@ static int dispatch_workload(struct
> intel_vgpu_workload *workload)
> >  	}
> >
> >  	mutex_unlock(&dev_priv->drm.struct_mutex);
> > +	mutex_unlock(&vgpu->vgpu_lock);
> >  	return ret;
> >  }
> >
> > @@ -744,14 +750,14 @@ static void complete_current_workload(struct
> intel_gvt *gvt, int ring_id)
> >  	int event;
> >
> >  	mutex_lock(&gvt->lock);
> > +	mutex_lock(&vgpu->vgpu_lock);
> >
> >  	/* For the workload w/ request, needs to wait for the context
> >  	 * switch to make sure request is completed.
> >  	 * For the workload w/o request, directly complete the workload.
> >  	 */
> >  	if (workload->req) {
> > -		struct drm_i915_private *dev_priv =
> > -			workload->vgpu->gvt->dev_priv;
> > +		struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
> >  		struct intel_engine_cs *engine =
> >  			dev_priv->engine[workload->ring_id];
> >  		wait_event(workload->shadow_ctx_status_wq,
> > @@ -822,6 +828,7 @@ static void complete_current_workload(struct
> intel_gvt *gvt, int ring_id)
> >  	if (gvt->scheduler.need_reschedule)
> >  		intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
> >
> > +	mutex_unlock(&vgpu->vgpu_lock);
> >  	mutex_unlock(&gvt->lock);
> >  }
> >
> > @@ -874,9 +881,7 @@ static int workload_thread(void *priv)
> >  			intel_uncore_forcewake_get(gvt->dev_priv,
> >  					FORCEWAKE_ALL);
> >
> > -		mutex_lock(&gvt->lock);
> >  		ret = dispatch_workload(workload);
> > -		mutex_unlock(&gvt->lock);
> >
> >  		if (ret) {
> >  			vgpu = workload->vgpu;
> > diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c
> > b/drivers/gpu/drm/i915/gvt/vgpu.c index a8784fa..d8293f3 100644
> > --- a/drivers/gpu/drm/i915/gvt/vgpu.c
> > +++ b/drivers/gpu/drm/i915/gvt/vgpu.c
> > @@ -223,22 +223,17 @@ void intel_gvt_activate_vgpu(struct intel_vgpu
> *vgpu)
> >   */
> >  void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)  {
> > -	struct intel_gvt *gvt = vgpu->gvt;
> > -
> > -	mutex_lock(&gvt->lock);
> > +	mutex_lock(&vgpu->vgpu_lock);
> >
> >  	vgpu->active = false;
> >
> > -	if (atomic_read(&vgpu->submission.running_workload_num)) {
> > -		mutex_unlock(&gvt->lock);
> > +	if (atomic_read(&vgpu->submission.running_workload_num))
> >  		intel_gvt_wait_vgpu_idle(vgpu);
> > -		mutex_lock(&gvt->lock);
> > -	}
> >
> >  	intel_vgpu_stop_schedule(vgpu);
> >  	intel_vgpu_dmabuf_cleanup(vgpu);
> >
> > -	mutex_unlock(&gvt->lock);
> > +	mutex_unlock(&vgpu->vgpu_lock);
> >  }
> >
> >  /**
> > @@ -252,14 +247,10 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu
> > *vgpu)  {
> >  	struct intel_gvt *gvt = vgpu->gvt;
> >
> > -	mutex_lock(&gvt->lock);
> > -
> >  	WARN(vgpu->active, "vGPU is still active!\n");
> >
> > +	mutex_lock(&vgpu->vgpu_lock);
> >  	intel_gvt_debugfs_remove_vgpu(vgpu);
> > -	idr_remove(&gvt->vgpu_idr, vgpu->id);
> > -	if (idr_is_empty(&gvt->vgpu_idr))
> > -		intel_gvt_clean_irq(gvt);
> >  	intel_vgpu_clean_sched_policy(vgpu);
> >  	intel_vgpu_clean_submission(vgpu);
> >  	intel_vgpu_clean_display(vgpu);
> > @@ -269,10 +260,16 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu
> *vgpu)
> >  	intel_vgpu_free_resource(vgpu);
> >  	intel_vgpu_clean_mmio(vgpu);
> >  	intel_vgpu_dmabuf_cleanup(vgpu);
> > -	vfree(vgpu);
> > +	mutex_unlock(&vgpu->vgpu_lock);
> >
> > +	mutex_lock(&gvt->lock);
> > +	idr_remove(&gvt->vgpu_idr, vgpu->id);
> > +	if (idr_is_empty(&gvt->vgpu_idr))
> > +		intel_gvt_clean_irq(gvt);
> >  	intel_gvt_update_vgpu_types(gvt);
> >  	mutex_unlock(&gvt->lock);
> > +
> > +	vfree(vgpu);
> >  }
> >
> >  #define IDLE_VGPU_IDR 0
> > @@ -298,6 +295,7 @@ struct intel_vgpu
> > *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt)
> >
> >  	vgpu->id = IDLE_VGPU_IDR;
> >  	vgpu->gvt = gvt;
> > +	mutex_init(&vgpu->vgpu_lock);
> >
> >  	for (i = 0; i < I915_NUM_ENGINES; i++)
> >  		INIT_LIST_HEAD(&vgpu->submission.workload_q_head[i]);
> > @@ -324,7 +322,10 @@ struct intel_vgpu
> *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt)
> >   */
> >  void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu)  {
> > +	mutex_lock(&vgpu->vgpu_lock);
> >  	intel_vgpu_clean_sched_policy(vgpu);
> > +	mutex_unlock(&vgpu->vgpu_lock);
> > +
> >  	vfree(vgpu);
> >  }
> >
> > @@ -342,8 +343,6 @@ static struct intel_vgpu
> *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
> >  	if (!vgpu)
> >  		return ERR_PTR(-ENOMEM);
> >
> > -	mutex_lock(&gvt->lock);
> > -
> >  	ret = idr_alloc(&gvt->vgpu_idr, vgpu, IDLE_VGPU_IDR + 1,
> GVT_MAX_VGPU,
> >  		GFP_KERNEL);
> >  	if (ret < 0)
> > @@ -353,6 +352,7 @@ static struct intel_vgpu
> *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
> >  	vgpu->handle = param->handle;
> >  	vgpu->gvt = gvt;
> >  	vgpu->sched_ctl.weight = param->weight;
> > +	mutex_init(&vgpu->vgpu_lock);
> >  	INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
> >  	idr_init(&vgpu->object_idr);
> >  	intel_vgpu_init_cfg_space(vgpu, param->primary); @@ -399,8 +399,6
> @@
> > static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt
> *gvt,
> >  	if (ret)
> >  		goto out_clean_sched_policy;
> >
> > -	mutex_unlock(&gvt->lock);
> > -
> >  	return vgpu;
> >
> >  out_clean_sched_policy:
> > @@ -423,7 +421,6 @@ static struct intel_vgpu
> *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
> >  	idr_remove(&gvt->vgpu_idr, vgpu->id);
> >  out_free_vgpu:
> >  	vfree(vgpu);
> > -	mutex_unlock(&gvt->lock);
> >  	return ERR_PTR(ret);
> >  }
> >
> > @@ -455,12 +452,12 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct
> intel_gvt *gvt,
> >  	param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz);
> >  	param.high_gm_sz = BYTES_TO_MB(param.high_gm_sz);
> >
> > +	mutex_lock(&gvt->lock);
> >  	vgpu = __intel_gvt_create_vgpu(gvt, &param);
> > -	if (IS_ERR(vgpu))
> > -		return vgpu;
> > -
> > -	/* calculate left instance change for types */
> > -	intel_gvt_update_vgpu_types(gvt);
> > +	if (!IS_ERR(vgpu))
> > +		/* calculate left instance change for types */
> > +		intel_gvt_update_vgpu_types(gvt);
> > +	mutex_unlock(&gvt->lock);
> >
> >  	return vgpu;
> >  }
> > @@ -472,7 +469,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct
> intel_gvt *gvt,
> >   * @engine_mask: engines to reset for GT reset
> >   *
> >   * This function is called when user wants to reset a virtual GPU
> > through
> > - * device model reset or GT reset. The caller should hold the gvt
> lock.
> > + * device model reset or GT reset. The caller should hold the vgpu
> lock.
> >   *
> >   * vGPU Device Model Level Reset (DMLR) simulates the PCI level reset
> to reset
> >   * the whole vGPU to default state as when it is created. This vGPU
> > function @@ -511,11 +508,8 @@ void intel_gvt_reset_vgpu_locked(struct
> intel_vgpu *vgpu, bool dmlr,
> >  	 * The current_vgpu will set to NULL after stopping the
> >  	 * scheduler when the reset is triggered by current vgpu.
> >  	 */
> > -	if (scheduler->current_vgpu == NULL) {
> > -		mutex_unlock(&gvt->lock);
> > +	if (scheduler->current_vgpu == NULL)
> >  		intel_gvt_wait_vgpu_idle(vgpu);
> > -		mutex_lock(&gvt->lock);
> > -	}
> >
> >  	intel_vgpu_reset_submission(vgpu, resetting_eng);
> >  	/* full GPU reset or device model level reset */ @@ -554,7 +548,7
> @@
> > void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
> >   */
> >  void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu)  {
> > -	mutex_lock(&vgpu->gvt->lock);
> > +	mutex_lock(&vgpu->vgpu_lock);
> >  	intel_gvt_reset_vgpu_locked(vgpu, true, 0);
> > -	mutex_unlock(&vgpu->gvt->lock);
> > +	mutex_unlock(&vgpu->vgpu_lock);
> >  }
> > --
> > 2.7.4
> >
> > _______________________________________________
> > intel-gvt-dev mailing list
> > intel-gvt-dev at lists.freedesktop.org
> > https://lists.freedesktop.org/mailman/listinfo/intel-gvt-dev
> 
> --
> Open Source Technology Center, Intel ltd.
> 
> $gpg --keyserver wwwkeys.pgp.net --recv-keys 4D781827


More information about the intel-gvt-dev mailing list