[Intel-gfx] [CI 06/39] drm/i915: Fallback to single page pwrite/pread if unable to release fence

Chris Wilson chris at chris-wilson.co.uk
Thu Aug 18 16:16:45 UTC 2016


If we cannot release the fence (for example if someone is inexplicably
trying to write into a tiled framebuffer that is currently pinned to the
display! *cough* kms_frontbuffer_tracking *cough*) fallback to using the
page-by-page pwrite/pread interface, rather than fail the syscall
entirely.

Since this is triggerable by the user (along pwrite) we have to remove
the WARN_ON(fence->pin_count).

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen at linux.intel.com>
---
 drivers/gpu/drm/i915/i915_gem.c       | 30 ++++++++++++++++++------------
 drivers/gpu/drm/i915/i915_gem_fence.c |  2 +-
 2 files changed, 19 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 5adbf8c3b81c..a609522221ed 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -754,6 +754,15 @@ i915_gem_gtt_pread(struct drm_device *dev,
 	int ret;
 
 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+	if (!IS_ERR(vma)) {
+		node.start = i915_ggtt_offset(vma);
+		node.allocated = false;
+		ret = i915_gem_object_put_fence(obj);
+		if (ret) {
+			i915_vma_unpin(vma);
+			vma = ERR_PTR(ret);
+		}
+	}
 	if (IS_ERR(vma)) {
 		ret = insert_mappable_node(dev_priv, &node, PAGE_SIZE);
 		if (ret)
@@ -766,12 +775,6 @@ i915_gem_gtt_pread(struct drm_device *dev,
 		}
 
 		i915_gem_object_pin_pages(obj);
-	} else {
-		node.start = i915_ggtt_offset(vma);
-		node.allocated = false;
-		ret = i915_gem_object_put_fence(obj);
-		if (ret)
-			goto out_unpin;
 	}
 
 	ret = i915_gem_object_set_to_gtt_domain(obj, false);
@@ -1058,6 +1061,15 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
 
 	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
 				       PIN_MAPPABLE | PIN_NONBLOCK);
+	if (!IS_ERR(vma)) {
+		node.start = i915_ggtt_offset(vma);
+		node.allocated = false;
+		ret = i915_gem_object_put_fence(obj);
+		if (ret) {
+			i915_vma_unpin(vma);
+			vma = ERR_PTR(ret);
+		}
+	}
 	if (IS_ERR(vma)) {
 		ret = insert_mappable_node(i915, &node, PAGE_SIZE);
 		if (ret)
@@ -1070,12 +1082,6 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
 		}
 
 		i915_gem_object_pin_pages(obj);
-	} else {
-		node.start = i915_ggtt_offset(vma);
-		node.allocated = false;
-		ret = i915_gem_object_put_fence(obj);
-		if (ret)
-			goto out_unpin;
 	}
 
 	ret = i915_gem_object_set_to_gtt_domain(obj, true);
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
index 334c3c4e8357..b0c6c2777725 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -298,7 +298,7 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
 
 	fence = &dev_priv->fence_regs[obj->fence_reg];
 
-	if (WARN_ON(fence->pin_count))
+	if (fence->pin_count)
 		return -EBUSY;
 
 	i915_gem_object_fence_lost(obj);
-- 
2.9.3



More information about the Intel-gfx mailing list