[PATCH 2/4] smoke

Matthew Auld matthew.auld at intel.com
Sun Sep 22 21:31:24 UTC 2019


---
 .../gpu/drm/i915/gem/selftests/huge_pages.c   | 94 ++++++++++++-------
 .../drm/i915/gem/selftests/igt_gem_utils.c    | 23 ++++-
 .../drm/i915/gem/selftests/igt_gem_utils.h    |  6 ++
 3 files changed, 85 insertions(+), 38 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index d19f4f07449c..4d17a98c7762 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -982,7 +982,8 @@ static int __igt_write_huge(struct intel_context *ce,
 }
 
 static int igt_write_huge(struct i915_gem_context *ctx,
-			  struct drm_i915_gem_object *obj)
+			  struct drm_i915_gem_object *obj,
+			  bool smoke)
 {
 	struct i915_gem_engines *engines;
 	struct i915_gem_engines_iter it;
@@ -1028,52 +1029,73 @@ static int igt_write_huge(struct i915_gem_context *ctx,
 	if (!order)
 		return -ENOMEM;
 
+	engines = i915_gem_context_lock_engines(ctx);
 	max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
 	max = div_u64(max - size, max_page_size);
-
-	/*
-	 * Try various offsets in an ascending/descending fashion until we
-	 * timeout -- we want to avoid issues hidden by effectively always using
-	 * offset = 0.
-	 */
 	i = 0;
-	engines = i915_gem_context_lock_engines(ctx);
-	for_each_prime_number_from(num, 0, max) {
-		u64 offset_low = num * max_page_size;
-		u64 offset_high = (max - num) * max_page_size;
-		u32 dword = offset_in_page(num) / 4;
-		struct intel_context *ce;
 
+	if (smoke) {
+		struct intel_context *ce;
+		u64 gtt_offset;
+		u32 dword;
+		u32 align;
+retry:
 		ce = engines->engines[order[i] % engines->num_engines];
 		i = (i + 1) % (count * count);
 		if (!ce || !intel_engine_can_store_dword(ce->engine))
-			continue;
+			goto retry;
+
+		align = I915_GTT_MIN_ALIGNMENT;
+		if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
+			align = I915_GTT_PAGE_SIZE_2M;
 
+		dword = prandom_u32_state(&prng) % (PAGE_SIZE / sizeof(u32));
+		gtt_offset = igt_random_offset(&prng, 0, max, size, align);
+		err = __igt_write_huge(ce, obj, size, gtt_offset, dword, num + 1);
+	} else {
 		/*
-		 * In order to utilize 64K pages we need to both pad the vma
-		 * size and ensure the vma offset is at the start of the pt
-		 * boundary, however to improve coverage we opt for testing both
-		 * aligned and unaligned offsets.
+		 * Try various offsets in an ascending/descending fashion until we
+		 * timeout -- we want to avoid issues hidden by effectively always using
+		 * offset = 0.
 		 */
-		if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
-			offset_low = round_down(offset_low,
+		for_each_prime_number_from(num, 0, max) {
+			u64 offset_low = num * max_page_size;
+			u64 offset_high = (max - num) * max_page_size;
+			u32 dword = offset_in_page(num) / 4;
+			struct intel_context *ce;
+
+			ce = engines->engines[order[i] % engines->num_engines];
+			i = (i + 1) % (count * count);
+			if (!ce || !intel_engine_can_store_dword(ce->engine))
+				continue;
+
+			/*
+			 * In order to utilize 64K pages we need to both pad the vma
+			 * size and ensure the vma offset is at the start of the pt
+			 * boundary, however to improve coverage we opt for testing both
+			 * aligned and unaligned offsets.
+			 */
+			if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
+				offset_low = round_down(offset_low,
 						I915_GTT_PAGE_SIZE_2M);
 
-		err = __igt_write_huge(ce, obj, size, offset_low,
-				       dword, num + 1);
-		if (err)
-			break;
+			err = __igt_write_huge(ce, obj, size, offset_low,
+					       dword, num + 1);
+			if (err)
+				break;
 
-		err = __igt_write_huge(ce, obj, size, offset_high,
-				       dword, num + 1);
-		if (err)
-			break;
+			err = __igt_write_huge(ce, obj, size, offset_high,
+					       dword, num + 1);
+			if (err)
+				break;
 
-		if (igt_timeout(end_time,
-				"%s timed out on %s, offset_low=%llx offset_high=%llx, max_page_size=%x\n",
-				__func__, ce->engine->name, offset_low, offset_high,
-				max_page_size))
-			break;
+			if (igt_timeout(end_time,
+					"%s timed out on %s, offset_low=%llx offset_high=%llx, max_page_size=%x\n",
+					__func__, ce->engine->name,
+					offset_low, offset_high,
+					max_page_size))
+				break;
+		}
 	}
 	i915_gem_context_unlock_engines(ctx);
 
@@ -1159,7 +1181,7 @@ static int igt_ppgtt_exhaust_huge(void *arg)
 			/* Force the page-size for the gtt insertion */
 			obj->mm.page_sizes.sg = page_sizes;
 
-			err = igt_write_huge(ctx, obj);
+			err = igt_write_huge(ctx, obj, false);
 			if (err) {
 				pr_err("exhaust write-huge failed with size=%u\n",
 				       size);
@@ -1235,7 +1257,7 @@ static int igt_ppgtt_internal_huge(void *arg)
 			break;
 		}
 
-		err = igt_write_huge(ctx, obj);
+		err = igt_write_huge(ctx, obj, true);
 		if (err) {
 			pr_err("%s write-huge failed with size=%u\n",
 			       __func__, size);
@@ -1298,7 +1320,7 @@ static int igt_ppgtt_gemfs_huge(void *arg)
 			break;
 		}
 
-		err = igt_write_huge(ctx, obj);
+		err = igt_write_huge(ctx, obj, true);
 		if (err) {
 			pr_err("%s write-huge failed with size=%u\n",
 			       __func__, size);
diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
index ee5dc13a30b3..a2e2ea93c424 100644
--- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
+++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
@@ -10,10 +10,10 @@
 #include "gem/i915_gem_pm.h"
 #include "gt/intel_context.h"
 #include "gt/intel_gt.h"
-#include "i915_vma.h"
 #include "i915_drv.h"
-
 #include "i915_request.h"
+#include "i915_vma.h"
+#include "selftests/i915_random.h"
 
 struct i915_request *
 igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
@@ -169,3 +169,22 @@ int igt_gpu_fill_dw(struct intel_context *ce,
 	i915_vma_put(batch);
 	return err;
 }
+
+u64 igt_random_offset(struct rnd_state *prng,
+		      u64 start, u64 end,
+		      u64 len, u64 align)
+{
+	u64 range, addr;
+
+	GEM_BUG_ON(range_overflows(start, len, end));
+	GEM_BUG_ON(round_up(start, align) > round_down(end - len, align));
+
+	range = round_down(end - len, align) - round_up(start, align);
+	if (range) {
+		addr = i915_prandom_u64_state(prng);
+		div64_u64_rem(addr, range, &addr);
+		start += addr;
+	}
+
+	return round_up(start, align);
+}
diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h
index 4221cf84d175..a92eb3859965 100644
--- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h
+++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h
@@ -16,6 +16,8 @@ struct i915_vma;
 struct intel_context;
 struct intel_engine_cs;
 
+struct rnd_state;
+
 struct i915_request *
 igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine);
 
@@ -29,4 +31,8 @@ int igt_gpu_fill_dw(struct intel_context *ce,
 		    struct i915_vma *vma, u64 offset,
 		    unsigned long count, u32 val);
 
+u64 igt_random_offset(struct rnd_state *prng,
+		      u64 start, u64 end,
+		      u64 len, u64 align);
+
 #endif /* __IGT_GEM_UTILS_H__ */
-- 
2.20.1



More information about the Intel-gfx-trybot mailing list