[PATCH 28/29] subset

Matthew Auld matthew.auld at intel.com
Tue Sep 24 11:24:16 UTC 2019


---
 .../gpu/drm/i915/gem/selftests/huge_pages.c   | 77 +++++++++++++------
 1 file changed, 53 insertions(+), 24 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index e27ea8c6c423..45067a540cbc 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -956,6 +956,8 @@ static int igt_mock_ppgtt_64K(void *arg)
 
 static int gpu_write(struct intel_context *ce,
 		     struct i915_vma *vma,
+		     u64 offset,
+		     u64 size,
 		     u32 dw,
 		     u32 val)
 {
@@ -967,11 +969,13 @@ static int gpu_write(struct intel_context *ce,
 	if (err)
 		return err;
 
-	return igt_gpu_fill_dw(ce, vma, dw * sizeof(u32),
-			       vma->size >> PAGE_SHIFT, val);
+	return igt_gpu_fill_dw(ce, vma, offset + dw * sizeof(u32),
+			       size >> PAGE_SHIFT, val);
 }
 
-static int __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+static int __cpu_check_shmem(struct drm_i915_gem_object *obj,
+			     u64 offset, u64 size,
+			     u32 dword, u32 val)
 {
 	unsigned int needs_flush;
 	unsigned long n;
@@ -981,7 +985,7 @@ static int __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val
 	if (err)
 		return err;
 
-	for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
+	for (n = offset >> PAGE_SHIFT; n < size >> PAGE_SHIFT; ++n) {
 		u32 *ptr = kmap_atomic(i915_gem_object_get_page(obj, n));
 
 		if (needs_flush & CLFLUSH_BEFORE)
@@ -1003,7 +1007,9 @@ static int __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val
 	return err;
 }
 
-static int __cpu_check_lmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+static int __cpu_check_lmem(struct drm_i915_gem_object *obj,
+			    u64 offset, u64 size,
+			    u32 dword, u32 val)
 {
 	unsigned long n;
 	int err;
@@ -1018,7 +1024,7 @@ static int __cpu_check_lmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
 	if (err)
 		return err;
 
-	for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
+	for (n = offset >> PAGE_SHIFT; n < size >> PAGE_SHIFT; ++n) {
 		u32 __iomem *base;
 		u32 read_val;
 
@@ -1038,18 +1044,21 @@ static int __cpu_check_lmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
 	return err;
 }
 
-static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+static int cpu_check(struct drm_i915_gem_object *obj,
+		     u64 offset, u64 size,
+		     u32 dword, u32 val)
 {
 	if (i915_gem_object_has_struct_page(obj))
-		return __cpu_check_shmem(obj, dword, val);
+		return __cpu_check_shmem(obj, offset, size, dword, val);
 	else if (i915_gem_object_is_lmem(obj))
-		return __cpu_check_lmem(obj, dword, val);
+		return __cpu_check_lmem(obj, offset, size, dword, val);
 
 	return -ENODEV;
 }
 
 static int __igt_write_huge(struct intel_context *ce,
 			    struct drm_i915_gem_object *obj,
+			    u64 gtt_size, u64 gtt_offset,
 			    u64 size, u64 offset,
 			    u32 dword, u32 val)
 {
@@ -1065,7 +1074,7 @@ static int __igt_write_huge(struct intel_context *ce,
 	if (err)
 		goto out_vma_close;
 
-	err = i915_vma_pin(vma, size, 0, flags | offset);
+	err = i915_vma_pin(vma, gtt_size, 0, flags | gtt_offset);
 	if (err) {
 		/*
 		 * The ggtt may have some pages reserved so
@@ -1081,15 +1090,15 @@ static int __igt_write_huge(struct intel_context *ce,
 	if (err)
 		goto out_vma_unpin;
 
-	err = gpu_write(ce, vma, dword, val);
+	err = gpu_write(ce, vma, offset, size, dword, val);
 	if (err) {
-		pr_err("gpu-write failed at offset=%llx\n", offset);
+		pr_err("gpu-write failed at offset=%llx\n", gtt_offset);
 		goto out_vma_unpin;
 	}
 
-	err = cpu_check(obj, dword, val);
+	err = cpu_check(obj, offset, size, dword, val);
 	if (err) {
-		pr_err("cpu-check failed at offset=%llx\n", offset);
+		pr_err("cpu-check failed at offset=%llx\n", gtt_offset);
 		goto out_vma_unpin;
 	}
 
@@ -1150,13 +1159,13 @@ static int igt_write_huge(struct i915_gem_context *ctx,
 		return -ENOMEM;
 
 	engines = i915_gem_context_lock_engines(ctx);
-	max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
-	max = div_u64(max - size, max_page_size);
 	i = 0;
 
 	if (smoke) {
 		struct intel_context *ce;
 		u64 gtt_offset;
+		u64 sub_offset;
+		u64 sub_size;
 		u32 dword;
 		u32 align;
 retry:
@@ -1169,10 +1178,24 @@ static int igt_write_huge(struct i915_gem_context *ctx,
 		if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
 			align = I915_GTT_PAGE_SIZE_2M;
 
+		/*
+		 * Limit ourselves to a random 2M block within the object for
+		 * the gpu operation.
+		 */
+		sub_size = min_t(u64, SZ_2M, obj->base.size);
+		sub_offset = igt_random_offset(&prng, 0, obj->base.size,
+					       sub_size, PAGE_SIZE);
+
+		gtt_offset = igt_random_offset(&prng, 0, ce->vm->total, size, align);
 		dword = prandom_u32_state(&prng) % (PAGE_SIZE / sizeof(u32));
-		gtt_offset = igt_random_offset(&prng, 0, max, size, align);
-		err = __igt_write_huge(ce, obj, size, gtt_offset, dword, num + 1);
+
+		err = __igt_write_huge(ce, obj,
+				       size, gtt_offset,
+				       sub_size, sub_offset,
+				       dword, dword);
 	} else {
+		max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
+		max = div_u64(max - size, max_page_size);
 		/*
 		 * Try various offsets in an ascending/descending fashion until we
 		 * timeout -- we want to avoid issues hidden by effectively always using
@@ -1199,12 +1222,16 @@ static int igt_write_huge(struct i915_gem_context *ctx,
 				offset_low = round_down(offset_low,
 						I915_GTT_PAGE_SIZE_2M);
 
-			err = __igt_write_huge(ce, obj, size, offset_low,
+			err = __igt_write_huge(ce, obj,
+					       size, offset_low,
+					       obj->base.size, 0,
 					       dword, num + 1);
 			if (err)
 				break;
 
-			err = __igt_write_huge(ce, obj, size, offset_high,
+			err = __igt_write_huge(ce, obj,
+					       size, offset_high,
+					       obj->base.size, 0,
 					       dword, num + 1);
 			if (err)
 				break;
@@ -1248,6 +1275,8 @@ static int igt_ppgtt_exhaust_huge(void *arg)
 	for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1)
 		pages[n++] = BIT(i);
 
+	/* XXX: extend this for backing storage backed by device memory */
+
 	for (size_mask = 2; size_mask < BIT(n); size_mask++) {
 		unsigned int size = 0;
 
@@ -1641,7 +1670,7 @@ static int igt_ppgtt_pin_update(void *arg)
 		if (!intel_engine_can_store_dword(ce->engine))
 			continue;
 
-		err = gpu_write(ce, vma, n++, 0xdeadbeaf);
+		err = gpu_write(ce, vma, 0, vma->size, n++, 0xdeadbeaf);
 		if (err)
 			break;
 	}
@@ -1650,7 +1679,7 @@ static int igt_ppgtt_pin_update(void *arg)
 		goto out_unpin;
 
 	while (n--) {
-		err = cpu_check(obj, n, 0xdeadbeaf);
+		err = cpu_check(obj, 0, obj->base.size, n, 0xdeadbeaf);
 		if (err)
 			goto out_unpin;
 	}
@@ -1775,7 +1804,7 @@ static int igt_shrink_thp(void *arg)
 		if (!intel_engine_can_store_dword(ce->engine))
 			continue;
 
-		err = gpu_write(ce, vma, n++, 0xdeadbeaf);
+		err = gpu_write(ce, vma, 0, vma->size, n++, 0xdeadbeaf);
 		if (err)
 			break;
 	}
@@ -1806,7 +1835,7 @@ static int igt_shrink_thp(void *arg)
 		goto out_close;
 
 	while (n--) {
-		err = cpu_check(obj, n, 0xdeadbeaf);
+		err = cpu_check(obj, 0, obj->base.size, n, 0xdeadbeaf);
 		if (err)
 			break;
 	}
-- 
2.20.1



More information about the Intel-gfx-trybot mailing list