[PATCH 2/2] drm/i915/gtt: Pull global wc page stash under its own locking

Chris Wilson chris at chris-wilson.co.uk
Wed Jul 4 10:42:29 UTC 2018


Currently, the wc-stash used for providing flushed WC pages ready for
constructing the page directories is assumed to be protected by the
struct_mutex. However, we want to remove this global lock and so must
install a replacement global lock for accessing the global wc-stash (the
per-vm stash continues to be guarded by the vm).

We need to push ahead on this patch due to an oversight in hastily
removing the struct_mutex guard around the igt_ppgtt_alloc selftest. No
matter, it will prove very useful (i.e. will be required) in the near
future.

Fixes: 1f6f00238abf ("drm/i915/selftests: Drop struct_mutex around lowlevel pggtt allocation")
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h     |  5 +++
 drivers/gpu/drm/i915/i915_gem_gtt.c | 51 ++++++++++++++++++-----------
 2 files changed, 37 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 2cefe4c30f88..696c0b36f81e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -954,6 +954,11 @@ struct i915_gem_mm {
 	 */
 	struct pagevec wc_stash;
 
+	/**
+	 * Lock for the small stash of WC pages.
+	 */
+	spinlock_t wc_lock;
+
 	/**
 	 * tmpfs instance used for shmem backed objects
 	 */
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index c6aa761ca085..100442e2e7cf 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -379,6 +379,7 @@ static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
 {
 	struct pagevec *pvec = &vm->free_pages;
 	struct pagevec stash;
+	struct page *page;
 
 	if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
 		i915_gem_shrink_all(vm->i915);
@@ -389,13 +390,15 @@ static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
 	if (!vm->pt_kmap_wc)
 		return alloc_page(gfp);
 
-	/* A placeholder for a specific mutex to guard the WC stash */
-	lockdep_assert_held(&vm->i915->drm.struct_mutex);
-
 	/* Look in our global stash of WC pages... */
 	pvec = &vm->i915->mm.wc_stash;
+	page = NULL;
+	spin_lock(&vm->i915->mm.wc_lock);
 	if (likely(pvec->nr))
-		return pvec->pages[--pvec->nr];
+		page = pvec->pages[--pvec->nr];
+	spin_unlock(&vm->i915->mm.wc_lock);
+	if (page)
+		return page;
 
 	/*
 	 * Otherwise batch allocate pages to amoritize cost of set_pages_wc.
@@ -430,11 +433,14 @@ static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
 		pagevec_release(&stash);
 	}
 
-	return likely(pvec->nr) ? pvec->pages[--pvec->nr] : NULL;
+	page = NULL;
+	if (likely(pvec->nr))
+		page = pvec->pages[--pvec->nr];
+	return page;
 }
 
 static void vm_free_pages_release(struct i915_address_space *vm,
-				  bool immediate)
+				  unsigned int immediate)
 {
 	struct pagevec *pvec = &vm->free_pages;
 
@@ -442,28 +448,32 @@ static void vm_free_pages_release(struct i915_address_space *vm,
 
 	if (vm->pt_kmap_wc) {
 		struct pagevec *stash = &vm->i915->mm.wc_stash;
+		spinlock_t *lock = &vm->i915->mm.wc_lock;
 
-		/* When we use WC, first fill up the global stash and then
+		/*
+		 * When we use WC, first fill up the global stash and then
 		 * only if full immediately free the overflow.
 		 */
 
-		lockdep_assert_held(&vm->i915->drm.struct_mutex);
+		spin_lock(lock);
 		if (pagevec_space(stash)) {
 			do {
 				stash->pages[stash->nr++] =
 					pvec->pages[--pvec->nr];
 				if (!pvec->nr)
-					return;
+					break;
 			} while (pagevec_space(stash));
-
-			/* As we have made some room in the VM's free_pages,
-			 * we can wait for it to fill again. Unless we are
-			 * inside i915_address_space_fini() and must
-			 * immediately release the pages!
-			 */
-			if (!immediate)
-				return;
 		}
+		spin_unlock(lock);
+
+		/*
+		 * As we have made some room in the VM's free_pages,
+		 * we can wait for it to fill again. Unless we are
+		 * inside i915_address_space_fini() and must
+		 * immediately release the pages!
+		 */
+		if (pvec->nr <= immediate)
+			return;
 
 		set_pages_array_wb(pvec->pages, pvec->nr);
 	}
@@ -482,7 +492,7 @@ static void vm_free_page(struct i915_address_space *vm, struct page *page)
 	 */
 	might_sleep();
 	if (!pagevec_add(&vm->free_pages, page))
-		vm_free_pages_release(vm, false);
+		vm_free_pages_release(vm, PAGEVEC_SIZE - 1);
 }
 
 static int __setup_page_dma(struct i915_address_space *vm,
@@ -2123,7 +2133,7 @@ static void i915_address_space_init(struct i915_address_space *vm,
 static void i915_address_space_fini(struct i915_address_space *vm)
 {
 	if (pagevec_count(&vm->free_pages))
-		vm_free_pages_release(vm, true);
+		vm_free_pages_release(vm, 0);
 
 	drm_mm_takedown(&vm->mm);
 	list_del(&vm->global_link);
@@ -3518,6 +3528,9 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	int ret;
 
+	spin_lock_init(&dev_priv->mm.wc_lock);
+	pagevec_init(&dev_priv->mm.wc_stash);
+
 	INIT_LIST_HEAD(&dev_priv->vm_list);
 
 	/* Note that we use page colouring to enforce a guard page at the
-- 
2.18.0



More information about the Intel-gfx-trybot mailing list