[Intel-gfx] [PATCH] drm/i915: Handle concurrent GTT faults gracefully

Chris Wilson chris at chris-wilson.co.uk
Thu Jun 12 16:13:14 CEST 2014


remap_pfn_range() has a nasty surprise if you try to handle two faults
from the same vma concurrently: that is the second thread hits a BUG()
to assert that the range is clear. As we hold our struct_mutex whilst
manipulating the GTT, we have an opportunity to check ahead of time
whether a second thread already processed the pagefault for us. We also
have to take care of cleaning up the VMA should remap_pfn_range()
encounter an error (likely ENOMEM) partway through processing the PTE.

Fixes potential BUG from

commit c5158fabeaf53ed2c614c3333aaa4b3cce80f500
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jun 10 12:14:41 2014 +0100

Testcase: igt/gem_mmap_gtt/wip
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Cc: Brad Volkin <bradley.d.volkin at intel.com>
---
 drivers/gpu/drm/i915/i915_gem.c | 45 +++++++++++++++++++++++++++++------------
 1 file changed, 32 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 242b595a0403..90791e9546b7 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1736,6 +1736,21 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
 	return 0;
 }
 
+static bool pte_exists(struct vm_area_struct *vma, unsigned long addr)
+{
+	bool ret = false;
+	pte_t *pte;
+	spinlock_t *ptl;
+
+	pte = get_locked_pte(vma->vm_mm, addr, &ptl);
+	if (pte) {
+		ret = !pte_none(*pte);
+		pte_unmap_unlock(pte, ptl);
+	}
+
+	return ret;
+}
+
 /**
  * i915_gem_fault - fault a page into the GTT
  * vma: VMA in question
@@ -1783,6 +1798,10 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	if (ret)
 		goto unlock;
 
+	/* Check if a second thread completed the prefaulting for us */
+	if (obj->fault_mappable && pte_exists(vma, vma->vm_start))
+		goto unlock;
+
 	/* Access to snoopable pages through the GTT is incoherent. */
 	if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
 		ret = -EFAULT;
@@ -1806,20 +1825,20 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
 	pfn >>= PAGE_SHIFT;
 
-	if (!obj->fault_mappable) {
-		ret = remap_pfn_range(vma,
-				      vma->vm_start,
-				      pfn,
-				      obj->base.size,
-				      vma->vm_page_prot);
-		obj->fault_mappable = ret == 0;
-	} else {
-		ret = remap_pfn_range(vma,
-				      (unsigned long)vmf->virtual_address,
-				      pfn + page_offset,
-				      PAGE_SIZE,
-				      vma->vm_page_prot);
+	ret = remap_pfn_range(vma, vma->vm_start,
+			      pfn, vma->vm_end - vma->vm_start,
+			      vma->vm_page_prot);
+	if (ret) {
+		/* After passing the sanity checks on remap_pfn_range(), we may
+		 * abort whilst updating the pagetables due to ENOMEM and leave
+		 * the tables in an inconsistent state. Reset them now.
+		 */
+		zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
+		goto unpin;
 	}
+
+	obj->fault_mappable = true;
+
 unpin:
 	i915_gem_object_ggtt_unpin(obj);
 unlock:
-- 
2.0.0




More information about the Intel-gfx mailing list