[PATCH] drm/i915: Separate pinning of pages from i915_vma_insert()

Prathap Kumar Valsan prathap.kumar.valsan at intel.com
Wed Jul 31 20:22:48 UTC 2019


Currently i915_vma_insert() is responsible for allocating drm mm node
and also allocating or gathering physical pages. Move the latter to a
separate function for better readability.

Signed-off-by: Prathap Kumar Valsan <prathap.kumar.valsan at intel.com>
---
 drivers/gpu/drm/i915/i915_vma.c | 62 ++++++++++++++++++---------------
 1 file changed, 34 insertions(+), 28 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index eb16a1a93bbc..20b060d64bf3 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -584,35 +584,22 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 		return -ENOSPC;
 	}
 
-	if (vma->obj) {
-		ret = i915_gem_object_pin_pages(vma->obj);
-		if (ret)
-			return ret;
-
+	if (vma->obj)
 		cache_level = vma->obj->cache_level;
-	} else {
+	else
 		cache_level = 0;
-	}
-
-	GEM_BUG_ON(vma->pages);
-
-	ret = vma->ops->set_pages(vma);
-	if (ret)
-		goto err_unpin;
 
 	if (flags & PIN_OFFSET_FIXED) {
 		u64 offset = flags & PIN_OFFSET_MASK;
 		if (!IS_ALIGNED(offset, alignment) ||
-		    range_overflows(offset, size, end)) {
-			ret = -EINVAL;
-			goto err_clear;
-		}
+		    range_overflows(offset, size, end))
+			return -EINVAL;
 
 		ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
 					   size, offset, cache_level,
 					   flags);
 		if (ret)
-			goto err_clear;
+			return ret;
 	} else {
 		/*
 		 * We only support huge gtt pages through the 48b PPGTT,
@@ -651,7 +638,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 					  size, alignment, cache_level,
 					  start, end, flags);
 		if (ret)
-			goto err_clear;
+			return ret;
 
 		GEM_BUG_ON(vma->node.start < start);
 		GEM_BUG_ON(vma->node.start + vma->node.size > end);
@@ -669,13 +656,6 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 	}
 
 	return 0;
-
-err_clear:
-	vma->ops->clear_pages(vma);
-err_unpin:
-	if (vma->obj)
-		i915_gem_object_unpin_pages(vma->obj);
-	return ret;
 }
 
 static void
@@ -710,6 +690,25 @@ i915_vma_remove(struct i915_vma *vma)
 	}
 }
 
+int i915_vma_set_pages(struct i915_vma *vma)
+{
+	int ret;
+
+	if (vma->obj) {
+		ret = i915_gem_object_pin_pages(vma->obj);
+		if (ret)
+			return ret;
+	}
+
+	GEM_BUG_ON(vma->pages);
+
+	ret = vma->ops->set_pages(vma);
+	if (ret && vma->obj)
+		i915_gem_object_unpin_pages(vma->obj);
+
+	return ret;
+}
+
 int __i915_vma_do_pin(struct i915_vma *vma,
 		      u64 size, u64 alignment, u64 flags)
 {
@@ -729,12 +728,16 @@ int __i915_vma_do_pin(struct i915_vma *vma,
 		ret = i915_vma_insert(vma, size, alignment, flags);
 		if (ret)
 			goto err_unpin;
+		ret = i915_vma_set_pages(vma);
+		if (ret)
+			goto err_remove;
+
 	}
 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 
 	ret = i915_vma_bind(vma, vma->obj ? vma->obj->cache_level : 0, flags);
 	if (ret)
-		goto err_remove;
+		goto err_clear;
 
 	GEM_BUG_ON((vma->flags & I915_VMA_BIND_MASK) == 0);
 
@@ -743,7 +746,10 @@ int __i915_vma_do_pin(struct i915_vma *vma,
 
 	GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
 	return 0;
-
+err_clear:
+	vma->ops->clear_pages(vma);
+	if (vma->obj)
+		i915_gem_object_unpin_pages(vma->obj);
 err_remove:
 	if ((bound & I915_VMA_BIND_MASK) == 0) {
 		i915_vma_remove(vma);
-- 
2.20.1



More information about the Intel-gfx-trybot mailing list