[PATCH 2/2] drm/i915/seltests: test descending gtt offsets

Matthew Auld matthew.auld at intel.com
Tue Nov 21 15:23:24 UTC 2017


Make sure the higher gtt offsets don't feel left out, which is
especially true when dealing with the 48b PPGTT, where we timeout long
before we are able exhaust the address space.

Signed-off-by: Matthew Auld <matthew.auld at intel.com>
---
 drivers/gpu/drm/i915/selftests/huge_pages.c | 135 ++++++++++++++++++----------
 1 file changed, 87 insertions(+), 48 deletions(-)

diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
index e0275f249b6b..afbcd53dd4e0 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -1040,6 +1040,66 @@ static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
 	return err;
 }
 
+static int __igt_write_huge(struct i915_gem_context *ctx,
+			    struct intel_engine_cs *engine,
+			    struct drm_i915_gem_object *obj,
+			    u64 size, u64 offset,
+			    u32 dword, u32 val)
+{
+	struct drm_i915_private *i915 = to_i915(obj->base.dev);
+	struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+	unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
+	struct i915_vma *vma;
+	int err;
+
+	vma = i915_vma_instance(obj, vm, NULL);
+	if (IS_ERR(vma))
+		return PTR_ERR(vma);
+
+	err = i915_vma_unbind(vma);
+	if (err)
+		goto out_vma_close;
+
+	err = i915_vma_pin(vma, size, 0, flags | offset);
+	if (err) {
+		/*
+		 * The ggtt may have some pages reserved so
+		 * refrain from erroring out.
+		 */
+		if (err == -ENOSPC && i915_is_ggtt(vm))
+			err = 0;
+
+		goto out_vma_close;
+	}
+
+	err = igt_check_page_sizes(vma);
+	if (err)
+		goto out_vma_unpin;
+
+
+	err = gpu_write(vma, ctx, engine, dword, val);
+	if (err) {
+		pr_err("gpu-write failed at offset=%llx", offset);
+		goto out_vma_unpin;
+	}
+
+	err = cpu_check(obj, dword, val);
+	if (err) {
+		pr_err("cpu-check failed at offset=%llx", offset);
+		goto out_vma_unpin;
+	}
+
+	i915_vma_unpin(vma);
+
+out_vma_unpin:
+	if (i915_vma_is_pinned(vma))
+		i915_vma_unpin(vma);
+out_vma_close:
+	i915_vma_close(vma);
+
+	return err;
+}
+
 static int igt_write_huge(struct i915_gem_context *ctx,
 			  struct drm_i915_gem_object *obj)
 {
@@ -1047,12 +1107,10 @@ static int igt_write_huge(struct i915_gem_context *ctx,
 	struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
 	static struct intel_engine_cs *engines[I915_NUM_ENGINES];
 	struct intel_engine_cs *engine;
-	I915_RND_STATE(prng);
-	IGT_TIMEOUT(end_time);
-	struct i915_vma *vma;
-	unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
 	unsigned int max_page_size;
 	unsigned int id;
+	I915_RND_STATE(prng);
+	IGT_TIMEOUT(end_time);
 	u64 max;
 	u64 num;
 	u64 size;
@@ -1069,10 +1127,6 @@ static int igt_write_huge(struct i915_gem_context *ctx,
 	max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
 	max = div_u64((vm->total - size), max_page_size);
 
-	vma = i915_vma_instance(obj, vm, NULL);
-	if (IS_ERR(vma))
-		return PTR_ERR(vma);
-
 	n = 0;
 	for_each_engine(engine, i915, id) {
 		if (!intel_engine_can_store_dword(engine)) {
@@ -1086,54 +1140,43 @@ static int igt_write_huge(struct i915_gem_context *ctx,
 	order = i915_random_order(n, &prng);
 
 	/*
-	 * Try various offsets until we timeout -- we want to avoid
+	 * Try various ascending offsets until we timeout -- we want to avoid
 	 * issues hidden by effectively always using offset = 0.
 	 */
 	i = 0;
 	for_each_prime_number_from(num, 0, max) {
 		u64 offset = num * max_page_size;
-		u32 dword;
+		u32 dword = offset_in_page(num) / 4;
 
-		err = i915_vma_unbind(vma);
-		if (err)
-			goto out_vma_close;
-
-		err = i915_vma_pin(vma, size, max_page_size, flags | offset);
-		if (err) {
-			/*
-			 * The ggtt may have some pages reserved so
-			 * refrain from erroring out.
-			 */
-			if (err == -ENOSPC && i915_is_ggtt(vm)) {
-				err = 0;
-				continue;
-			}
-
-			goto out_vma_close;
-		}
+		engine = engines[order[i]];
+		i = (i + 1) % n;
 
-		err = igt_check_page_sizes(vma);
+		err = __igt_write_huge(ctx, engine, obj, size, offset, dword, num + 1);
 		if (err)
-			goto out_vma_unpin;
+			goto free_order;
 
-		dword = offset_in_page(num) / 4;
+		if (num > 0 &&
+		    igt_timeout(end_time,
+				"%s timed out on engine=%u at offset=%llx, max_page_size=%x\n",
+				__func__, engine->id, offset, max_page_size))
+			break;
+	}
+
+	/*
+	 * Try various descending until we timeout -- we want to avoid issues
+	 * hidden by effectively always using lower offsets.
+	 */
+	i = 0;
+	for_each_prime_number_from(num, 0, max) {
+		u64 offset = (max - num) * max_page_size;
+		u32 dword = offset_in_page(num) / 4;
 
 		engine = engines[order[i]];
 		i = (i + 1) % n;
 
-		err = gpu_write(vma, ctx, engine, dword, num + 1);
-		if (err) {
-			pr_err("gpu-write failed at offset=%llx", offset);
-			goto out_vma_unpin;
-		}
-
-		err = cpu_check(obj, dword, num + 1);
-		if (err) {
-			pr_err("cpu-check failed at offset=%llx", offset);
-			goto out_vma_unpin;
-		}
-
-		i915_vma_unpin(vma);
+		err = __igt_write_huge(ctx, engine, obj, size, offset, dword, num + 1);
+		if (err)
+			goto free_order;
 
 		if (num > 0 &&
 		    igt_timeout(end_time,
@@ -1142,11 +1185,7 @@ static int igt_write_huge(struct i915_gem_context *ctx,
 			break;
 	}
 
-out_vma_unpin:
-	if (i915_vma_is_pinned(vma))
-		i915_vma_unpin(vma);
-out_vma_close:
-	i915_vma_close(vma);
+free_order:
 	kfree(order);
 
 	return err;
-- 
2.14.3



More information about the Intel-gfx-trybot mailing list