[PATCH 1/3 small lock V3] drm/i915/gvt: use gtt lock to protect gtt list

pei.zhang at intel.com pei.zhang at intel.com
Thu Feb 1 00:25:18 UTC 2018


From: Pei Zhang <pei.zhang at intel.com>

GVT gtt contains 2 oos list, and one global mm list. Use a special
gvt->gtt_lock to protect those 3 lists.

v2: use gtt_lock to cover correct scope in function ppgtt_allocate_oos_page

Signed-off-by: Pei Zhang <pei.zhang at intel.com>
---
 drivers/gpu/drm/i915/gvt/gtt.c | 43 ++++++++++++++++++++++++++++++++++--------
 drivers/gpu/drm/i915/gvt/gvt.c |  1 +
 drivers/gpu/drm/i915/gvt/gvt.h |  2 ++
 3 files changed, 38 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 8d5317d..783ab29 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1158,7 +1158,7 @@ static int sync_oos_page(struct intel_vgpu *vgpu,
 	return 0;
 }
 
-static int detach_oos_page(struct intel_vgpu *vgpu,
+static int detach_oos_page_locked(struct intel_vgpu *vgpu,
 		struct intel_vgpu_oos_page *oos_page)
 {
 	struct intel_gvt *gvt = vgpu->gvt;
@@ -1178,7 +1178,19 @@ static int detach_oos_page(struct intel_vgpu *vgpu,
 	return 0;
 }
 
-static int attach_oos_page(struct intel_vgpu *vgpu,
+static int detach_oos_page(struct intel_vgpu *vgpu,
+		struct intel_vgpu_oos_page *oos_page)
+{
+	int ret;
+	struct intel_gvt *gvt = vgpu->gvt;
+
+	mutex_lock(&gvt->gtt_lock);
+	ret = detach_oos_page(vgpu, oos_page);
+	mutex_unlock(&gvt->gtt_lock);
+	return ret;
+}
+
+static int attach_oos_page_locked(struct intel_vgpu *vgpu,
 		struct intel_vgpu_oos_page *oos_page,
 		struct intel_vgpu_guest_page *gpt)
 {
@@ -1227,19 +1239,23 @@ static int ppgtt_allocate_oos_page(struct intel_vgpu *vgpu,
 
 	WARN(oos_page, "shadow PPGTT page has already has a oos page\n");
 
+	mutex_lock(&gvt->gtt_lock);
 	if (list_empty(&gtt->oos_page_free_list_head)) {
 		oos_page = container_of(gtt->oos_page_use_list_head.next,
 			struct intel_vgpu_oos_page, list);
 		ret = ppgtt_set_guest_page_sync(vgpu, oos_page->guest_page);
 		if (ret)
-			return ret;
-		ret = detach_oos_page(vgpu, oos_page);
+			goto out;
+		ret = detach_oos_page_locked(vgpu, oos_page);
 		if (ret)
-			return ret;
+			goto out;
 	} else
 		oos_page = container_of(gtt->oos_page_free_list_head.next,
 			struct intel_vgpu_oos_page, list);
-	return attach_oos_page(vgpu, oos_page, gpt);
+	ret = attach_oos_page_locked(vgpu, oos_page, gpt);
+out:
+	mutex_unlock(&gvt->gtt_lock);
+	return ret;
 }
 
 static int ppgtt_set_guest_page_oos(struct intel_vgpu *vgpu,
@@ -1662,7 +1678,9 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
 		ret = shadow_mm(mm);
 		if (ret)
 			goto fail;
+		mutex_lock(&gvt->gtt_lock);
 		list_add_tail(&mm->lru_list, &gvt->gtt.mm_lru_list_head);
+		mutex_unlock(&gvt->gtt_lock);
 	}
 	return mm;
 fail:
@@ -1712,15 +1730,20 @@ int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
 
 	atomic_inc(&mm->pincount);
 	list_del_init(&mm->lru_list);
+
+	mutex_lock(&mm->vgpu->gvt->gtt_lock);
 	list_add_tail(&mm->lru_list, &mm->vgpu->gvt->gtt.mm_lru_list_head);
+	mutex_unlock(&mm->vgpu->gvt->gtt_lock);
 	return 0;
 }
 
 static int reclaim_one_mm(struct intel_gvt *gvt)
 {
+	int ret = 0;
 	struct intel_vgpu_mm *mm;
 	struct list_head *pos, *n;
 
+	mutex_lock(&gvt->gtt_lock);
 	list_for_each_safe(pos, n, &gvt->gtt.mm_lru_list_head) {
 		mm = container_of(pos, struct intel_vgpu_mm, lru_list);
 
@@ -1731,9 +1754,11 @@ static int reclaim_one_mm(struct intel_gvt *gvt)
 
 		list_del_init(&mm->lru_list);
 		invalidate_mm(mm);
-		return 1;
+		ret = 1;
 	}
-	return 0;
+	mutex_unlock(&gvt->gtt_lock);
+
+	return ret;
 }
 
 /*
@@ -2198,11 +2223,13 @@ static void clean_spt_oos(struct intel_gvt *gvt)
 	WARN(!list_empty(&gtt->oos_page_use_list_head),
 		"someone is still using oos page\n");
 
+	mutex_lock(&gvt->gtt_lock);
 	list_for_each_safe(pos, n, &gtt->oos_page_free_list_head) {
 		oos_page = container_of(pos, struct intel_vgpu_oos_page, list);
 		list_del(&oos_page->list);
 		kfree(oos_page);
 	}
+	mutex_unlock(&gvt->gtt_lock);
 }
 
 static int setup_spt_oos(struct intel_gvt *gvt)
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index fac54f3..6cd4ca1 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -379,6 +379,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
 	idr_init(&gvt->vgpu_idr);
 	spin_lock_init(&gvt->scheduler.mmio_context_lock);
 	mutex_init(&gvt->lock);
+	mutex_init(&gvt->gtt_lock);
 	gvt->dev_priv = dev_priv;
 
 	init_device_info(gvt);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index c88c489..269599a 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -289,6 +289,8 @@ struct intel_vgpu_type {
 
 struct intel_gvt {
 	struct mutex lock;
+	struct mutex gtt_lock;
+
 	struct drm_i915_private *dev_priv;
 	struct idr vgpu_idr;	/* vGPU IDR pool */
 
-- 
2.7.4



More information about the intel-gvt-dev mailing list