[PATCH 1/3] drm/i915/gvt: [small locks] use gtt lock to protect gtt list
Zhenyu Wang
zhenyuw at linux.intel.com
Thu Jan 25 07:14:18 UTC 2018
On 2018.01.25 15:09:25 +0800, pei.zhang at intel.com wrote:
> From: Pei Zhang <pei.zhang at intel.com>
>
> GVT gtt contains 2 oos list, and one global mm list. Use a special
> gvt->gtt_lock to protect those 3 lists.
>
> Signed-off-by: Pei Zhang <pei.zhang at intel.com>
> ---
> drivers/gpu/drm/i915/gvt/gtt.c | 56 ++++++++++++++++++++++++++++++++++++------
> drivers/gpu/drm/i915/gvt/gvt.c | 1 +
> drivers/gpu/drm/i915/gvt/gvt.h | 2 ++
> 3 files changed, 51 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
> index 8d5317d..f27d2fc 100644
> --- a/drivers/gpu/drm/i915/gvt/gtt.c
> +++ b/drivers/gpu/drm/i915/gvt/gtt.c
> @@ -1158,7 +1158,7 @@ static int sync_oos_page(struct intel_vgpu *vgpu,
> return 0;
> }
>
> -static int detach_oos_page(struct intel_vgpu *vgpu,
> +static int detach_oos_page_locked(struct intel_vgpu *vgpu,
> struct intel_vgpu_oos_page *oos_page)
> {
> struct intel_gvt *gvt = vgpu->gvt;
> @@ -1178,7 +1178,19 @@ static int detach_oos_page(struct intel_vgpu *vgpu,
> return 0;
> }
>
> -static int attach_oos_page(struct intel_vgpu *vgpu,
> +static int detach_oos_page(struct intel_vgpu *vgpu,
> + struct intel_vgpu_oos_page *oos_page)
> +{
> + int ret;
> + struct intel_gvt *gvt = vgpu->gvt;
> +
> + mutex_lock(&gvt->gtt_lock);
> + ret = detach_oos_page(vgpu, oos_page);
> + mutex_unlock(&gvt->gtt_lock);
> + return ret;
> +}
> +
> +static int attach_oos_page_locked(struct intel_vgpu *vgpu,
> struct intel_vgpu_oos_page *oos_page,
> struct intel_vgpu_guest_page *gpt)
> {
> @@ -1201,6 +1213,19 @@ static int attach_oos_page(struct intel_vgpu *vgpu,
> return 0;
> }
>
> +static int attach_oos_page(struct intel_vgpu *vgpu,
> + struct intel_vgpu_oos_page *oos_page,
> + struct intel_vgpu_guest_page *gpt)
> +{
> + struct intel_gvt *gvt = vgpu->gvt;
> + int ret;
> +
> + mutex_lock(&gvt->gtt_lock);
> + ret = attach_oos_page_locked(vgpu, oos_page, gpt);
> + mutex_unlock(&gvt->gtt_lock);
> + return ret;
> +}
> +
> static int ppgtt_set_guest_page_sync(struct intel_vgpu *vgpu,
> struct intel_vgpu_guest_page *gpt)
> {
> @@ -1227,19 +1252,23 @@ static int ppgtt_allocate_oos_page(struct intel_vgpu *vgpu,
>
> WARN(oos_page, "shadow PPGTT page has already has a oos page\n");
>
> + mutex_lock(&gvt->gtt_lock);
> if (list_empty(>t->oos_page_free_list_head)) {
> oos_page = container_of(gtt->oos_page_use_list_head.next,
> struct intel_vgpu_oos_page, list);
> ret = ppgtt_set_guest_page_sync(vgpu, oos_page->guest_page);
> if (ret)
> - return ret;
> - ret = detach_oos_page(vgpu, oos_page);
> + goto out;
> + ret = detach_oos_page_locked(vgpu, oos_page);
> if (ret)
> - return ret;
> + goto out;
> } else
> oos_page = container_of(gtt->oos_page_free_list_head.next,
> struct intel_vgpu_oos_page, list);
> - return attach_oos_page(vgpu, oos_page, gpt);
> + ret = attach_oos_page_locked(vgpu, oos_page, gpt);
> + mutex_unlock(&gvt->gtt_lock);
> +out:
Looks error path is not doing correct unlock here.
> + return ret;
> }
>
> static int ppgtt_set_guest_page_oos(struct intel_vgpu *vgpu,
> @@ -1662,7 +1691,9 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
> ret = shadow_mm(mm);
> if (ret)
> goto fail;
> + mutex_lock(&gvt->gtt_lock);
> list_add_tail(&mm->lru_list, &gvt->gtt.mm_lru_list_head);
> + mutex_unlock(&gvt->gtt_lock);
> }
> return mm;
> fail:
> @@ -1712,15 +1743,20 @@ int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
>
> atomic_inc(&mm->pincount);
> list_del_init(&mm->lru_list);
> +
> + mutex_lock(&mm->vgpu->gvt->gtt_lock);
> list_add_tail(&mm->lru_list, &mm->vgpu->gvt->gtt.mm_lru_list_head);
> + mutex_unlock(&mm->vgpu->gvt->gtt_lock);
> return 0;
> }
>
> static int reclaim_one_mm(struct intel_gvt *gvt)
> {
> + int ret = 0;
> struct intel_vgpu_mm *mm;
> struct list_head *pos, *n;
>
> + mutex_lock(&gvt->gtt_lock);
> list_for_each_safe(pos, n, &gvt->gtt.mm_lru_list_head) {
> mm = container_of(pos, struct intel_vgpu_mm, lru_list);
>
> @@ -1731,9 +1767,11 @@ static int reclaim_one_mm(struct intel_gvt *gvt)
>
> list_del_init(&mm->lru_list);
> invalidate_mm(mm);
> - return 1;
> + ret = 1;
> }
> - return 0;
> + mutex_unlock(&gvt->gtt_lock);
> +
> + return ret;
> }
>
> /*
> @@ -2198,11 +2236,13 @@ static void clean_spt_oos(struct intel_gvt *gvt)
> WARN(!list_empty(>t->oos_page_use_list_head),
> "someone is still using oos page\n");
>
> + mutex_lock(&gvt->gtt_lock);
> list_for_each_safe(pos, n, >t->oos_page_free_list_head) {
> oos_page = container_of(pos, struct intel_vgpu_oos_page, list);
> list_del(&oos_page->list);
> kfree(oos_page);
> }
> + mutex_unlock(&gvt->gtt_lock);
> }
>
> static int setup_spt_oos(struct intel_gvt *gvt)
> diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
> index fac54f3..6cd4ca1 100644
> --- a/drivers/gpu/drm/i915/gvt/gvt.c
> +++ b/drivers/gpu/drm/i915/gvt/gvt.c
> @@ -379,6 +379,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
> idr_init(&gvt->vgpu_idr);
> spin_lock_init(&gvt->scheduler.mmio_context_lock);
> mutex_init(&gvt->lock);
> + mutex_init(&gvt->gtt_lock);
> gvt->dev_priv = dev_priv;
>
> init_device_info(gvt);
> diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
> index 7dc7a80..793224c 100644
> --- a/drivers/gpu/drm/i915/gvt/gvt.h
> +++ b/drivers/gpu/drm/i915/gvt/gvt.h
> @@ -289,6 +289,8 @@ struct intel_vgpu_type {
>
> struct intel_gvt {
> struct mutex lock;
> + struct mutex gtt_lock;
> +
> struct drm_i915_private *dev_priv;
> struct idr vgpu_idr; /* vGPU IDR pool */
>
> --
> 2.7.4
>
--
Open Source Technology Center, Intel ltd.
$gpg --keyserver wwwkeys.pgp.net --recv-keys 4D781827
-------------- next part --------------
A non-text attachment was scrubbed...
Name: signature.asc
Type: application/pgp-signature
Size: 195 bytes
Desc: not available
URL: <https://lists.freedesktop.org/archives/intel-gvt-dev/attachments/20180125/0499cc3e/attachment.sig>
More information about the intel-gvt-dev
mailing list