[PATCH 2/2] mixed

Matthew Auld matthew.auld at intel.com
Thu Sep 22 09:59:23 UTC 2022


---
 .../gpu/drm/i915/gem/selftests/huge_pages.c   | 137 ++++++++++++++++++
 1 file changed, 137 insertions(+)

diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index c570cf780079..0e0c9e045231 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -1540,6 +1540,140 @@ static int igt_ppgtt_compact(void *arg)
 	return err;
 }
 
+static int igt_ppgtt_mixed(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	const unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
+	struct drm_i915_gem_object *obj, *on;
+	struct i915_gem_engines_iter it;
+	struct i915_address_space *vm;
+	struct i915_gem_context *ctx;
+	struct intel_context *ce;
+	struct file *file;
+	I915_RND_STATE(prng);
+	LIST_HEAD(objects);
+	struct intel_memory_region *mr;
+	struct i915_vma *vma;
+	u32 i, rem, addr;
+	int err;
+
+	if (!HAS_64K_PAGES(i915)) {
+		pr_info("device lacks compact 64K page support, skipping\n");
+		return 0;
+	}
+
+	if (!HAS_LMEM(i915)) {
+		pr_info("device lacks LMEM support, skipping\n");
+		return 0;
+	}
+
+	if (NEEDS_COMPACT_PT(i915)) {
+		pr_info("device lacks PS64, skipping\n");
+		return 0;
+	}
+
+	file = mock_file(i915);
+	if (IS_ERR(file))
+		return PTR_ERR(file);
+
+	ctx = hugepage_ctx(i915, file);
+	if (IS_ERR(ctx)) {
+		err = PTR_ERR(ctx);
+		goto out;
+	}
+	vm = i915_gem_context_get_eb_vm(ctx);
+
+	i = i915_prandom_u32_max_state(2, &prng);
+	rem = SZ_16M;
+	addr = 0;
+	do {
+		u32 sz;
+
+		sz = i915_prandom_u32_max_state(min_t(u32, rem, SZ_4M), &prng);
+		sz = min_t(u32, rem, SZ_4K);
+
+		mr = i915->mm.regions[INTEL_REGION_LMEM_0];
+		if (i & 1)
+			mr = i915->mm.regions[INTEL_REGION_SMEM];
+
+		obj = i915_gem_object_create_region(mr, sz, 0, 0);
+		if (IS_ERR(obj)) {
+			err = PTR_ERR(obj);
+			goto out_vm;
+		}
+
+		list_add_tail(&obj->st_link, &objects);
+
+		vma = i915_vma_instance(obj, vm, NULL);
+		if (IS_ERR(vma)) {
+			err = PTR_ERR(vma);
+			goto err_put;
+		}
+
+		addr = round_up(addr, mr->min_page_size);
+		err = i915_vma_pin(vma, 0, 0, addr | flags);
+		if (err)
+			goto err_put;
+
+		if (mr->type == INTEL_MEMORY_LOCAL &&
+		    !(vma->resource->page_sizes_gtt & I915_GTT_PAGE_SIZE_64K)) {
+			err = -EINVAL;
+			goto err_put;
+		}
+
+		addr += obj->base.size;
+		rem -= sz;
+		i++;
+	} while (rem);
+
+	addr = 0;
+	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+		list_for_each_entry(obj, &objects, st_link) {
+			u32 dw = i915_prandom_u32_max_state(obj->base.size /
+							    sizeof(u32) - 1, &prng);
+
+			addr = round_up(addr, obj->mm.region->min_page_size);
+
+			pr_info("writing using %s to %s with sz=%llx at addr=%x\n",
+				ce->engine->name, obj->mm.region->name,
+				(u64)obj->base.size, addr);
+
+			__igt_write_huge(ce, obj, obj->base.size, addr, 0, dw);
+			__igt_write_huge(ce, obj, obj->base.size, addr, dw, dw + 1);
+			__igt_write_huge(ce, obj, obj->base.size, addr,
+					 obj->base.size / sizeof(u32) - 1, dw + 2);
+
+			addr += obj->base.size;
+		}
+
+		cond_resched();
+	}
+
+	i915_gem_context_unlock_engines(ctx);
+
+err_put:
+	list_for_each_entry_safe(obj, on, &objects, st_link) {
+		list_del(&obj->st_link);
+		i915_gem_object_put(obj);
+	}
+
+out_vm:
+	i915_vm_put(vm);
+out:
+	fput(file);
+	return err;
+}
+
+static int igt_ppgtt_mixed2(void *arg)
+{
+	return igt_ppgtt_mixed(arg);
+}
+
+static int igt_ppgtt_mixed3(void *arg)
+{
+	return igt_ppgtt_mixed(arg);
+}
+
 static int igt_tmpfs_fallback(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
@@ -1803,6 +1937,9 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
 		SUBTEST(igt_ppgtt_smoke_huge),
 		SUBTEST(igt_ppgtt_sanity_check),
 		SUBTEST(igt_ppgtt_compact),
+		SUBTEST(igt_ppgtt_mixed),
+		SUBTEST(igt_ppgtt_mixed2),
+		SUBTEST(igt_ppgtt_mixed3),
 	};
 
 	if (!HAS_PPGTT(i915)) {
-- 
2.37.3



More information about the Intel-gfx-trybot mailing list