[igt-dev] [RFC PATCH 4/4] tests/i915/gem_exec_basic: Iterate over all memory regions

Lukasz Kalamarz lukasz.kalamarz at intel.com
Wed Aug 14 10:21:40 UTC 2019


As a part of local memory effort we need to make sure, that basic
scenarios are covered for every available memory region. This patch is
an attempt for this problem. If it will be accepted it will be
replicated on each test that can benefit from it.

Signed-off-by: Lukasz Kalamarz <lukasz.kalamarz at intel.com>
Cc: Matthew Auld <matthew.auld at intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen at linux.intel.com>
---
 tests/i915/gem_exec_basic.c | 138 +++++++++++++++++++++++++++++-------
 1 file changed, 111 insertions(+), 27 deletions(-)

diff --git a/tests/i915/gem_exec_basic.c b/tests/i915/gem_exec_basic.c
index 1287860b..69a8c991 100644
--- a/tests/i915/gem_exec_basic.c
+++ b/tests/i915/gem_exec_basic.c
@@ -25,12 +25,29 @@
 
 IGT_TEST_DESCRIPTION("Basic sanity check of execbuf-ioctl rings.");
 
-static uint32_t batch_create(int fd)
+struct set_param_args {
+	struct local_i915_query_memory_region_info *query_info;
+	uint32_t num_regions;
+	uint32_t mem_type;
+	uint32_t id;
+	uint32_t batch_size;
+	bool set_param;
+};
+
+static uint32_t batch_create(int fd, struct set_param_args *spa)
 {
 	const uint32_t bbe = MI_BATCH_BUFFER_END;
 	uint32_t handle;
 
-	handle = gem_create(fd, 4096);
+	handle = gem_create(fd, spa->batch_size);
+
+	if (spa->set_param == true) {
+		int ret = -1;
+
+		ret = gem_bo_set_memory_region(fd, handle, spa->id);
+		igt_assert_eq(ret, 0);
+	}
+
 	gem_write(fd, handle, 0, &bbe, sizeof(bbe));
 
 	return handle;
@@ -42,16 +59,24 @@ static void batch_fini(int fd, uint32_t handle)
 	gem_close(fd, handle);
 }
 
-static void noop(int fd, uint64_t flags)
+static void noop(int fd, uint64_t flags, struct set_param_args *spa)
 {
 	struct drm_i915_gem_execbuffer2 execbuf;
 	struct drm_i915_gem_exec_object2 exec;
+	int ret = -1;
 
 	gem_require_ring(fd, flags);
 
+	if (spa->set_param == true) {
+		ret = gem_find_memory_region_in_query(spa->query_info,
+						      spa->num_regions,
+						      spa->id);
+		igt_require(ret);
+	}
+
 	memset(&exec, 0, sizeof(exec));
 
-	exec.handle = batch_create(fd);
+	exec.handle = batch_create(fd, spa);
 
 	memset(&execbuf, 0, sizeof(execbuf));
 	execbuf.buffers_ptr = to_user_pointer(&exec);
@@ -62,47 +87,70 @@ static void noop(int fd, uint64_t flags)
 	batch_fini(fd, exec.handle);
 }
 
-static void readonly(int fd, uint64_t flags)
+static void readonly(int fd, uint64_t flags, struct set_param_args *spa)
 {
 	struct drm_i915_gem_execbuffer2 *execbuf;
 	struct drm_i915_gem_exec_object2 exec;
+	int ret = -1;
 
 	gem_require_ring(fd, flags);
 
-	memset(&exec, 0, sizeof(exec));
-	exec.handle = batch_create(fd);
+	if (spa->set_param == true) {
+		ret = gem_find_memory_region_in_query(spa->query_info,
+						      spa->num_regions,
+						      spa->id);
+		igt_require(ret);
+	}
 
-	execbuf = mmap(NULL, 4096, PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
+	memset(&exec, 0, sizeof(exec));
+	exec.handle = batch_create(fd, spa);
+
+	execbuf = mmap(NULL, spa->batch_size, PROT_WRITE,
+		       MAP_ANON | MAP_PRIVATE, -1, 0);
 	igt_assert(execbuf != NULL);
 
 	execbuf->buffers_ptr = to_user_pointer(&exec);
 	execbuf->buffer_count = 1;
 	execbuf->flags = flags;
-	igt_assert(mprotect(execbuf, 4096, PROT_READ) == 0);
+	igt_assert(mprotect(execbuf, spa->batch_size, PROT_READ) == 0);
 
 	gem_execbuf(fd, execbuf);
 
-	munmap(execbuf, 4096);
+	munmap(execbuf, spa->batch_size);
 
 	batch_fini(fd, exec.handle);
 }
 
-static void gtt(int fd, uint64_t flags)
+static void gtt(int fd, uint64_t flags, struct set_param_args *spa)
 {
 	struct drm_i915_gem_execbuffer2 *execbuf;
 	struct drm_i915_gem_exec_object2 *exec;
 	uint32_t handle;
+	int ret = -1;
 
 	gem_require_ring(fd, flags);
 
-	handle = gem_create(fd, 4096);
+
+	if (spa->set_param == true) {
+		ret = gem_find_memory_region_in_query(spa->query_info,
+						      spa->num_regions,
+						      spa->id);
+		igt_require(ret);
+	}
+
+	handle = gem_create(fd, spa->batch_size);
+
+	if (spa->set_param == true) {
+		ret = gem_bo_set_memory_region(fd, handle, spa->id);
+		igt_assert_eq(ret, 0);
+	}
 
 	gem_set_domain(fd, handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
-	execbuf = gem_mmap__gtt(fd, handle, 4096, PROT_WRITE);
+	execbuf = gem_mmap__gtt(fd, handle, spa->batch_size, PROT_WRITE);
 	exec = (struct drm_i915_gem_exec_object2 *)(execbuf + 1);
 	gem_close(fd, handle);
 
-	exec->handle = batch_create(fd);
+	exec->handle = batch_create(fd, spa);
 
 	execbuf->buffers_ptr = to_user_pointer(exec);
 	execbuf->buffer_count = 1;
@@ -111,36 +159,38 @@ static void gtt(int fd, uint64_t flags)
 	gem_execbuf(fd, execbuf);
 
 	batch_fini(fd, exec->handle);
-	munmap(execbuf, 4096);
+	munmap(execbuf, spa->batch_size);
 }
 
-static void all(int i915)
+static void all(int i915, struct set_param_args *spa)
 {
 	const struct intel_execution_engine2 *e;
 
 	__for_each_physical_engine(i915, e)
-		noop(i915, e->flags);
+		noop(i915, e->flags, spa);
 }
 
-static void readonly_all(int i915)
+static void readonly_all(int i915, struct set_param_args *spa)
 {
 	const struct intel_execution_engine2 *e;
 
 	__for_each_physical_engine(i915, e)
-		readonly(i915, e->flags);
+		readonly(i915, e->flags, spa);
 }
 
-static void gtt_all(int i915)
+static void gtt_all(int i915, struct set_param_args *spa)
 {
 	const struct intel_execution_engine2 *e;
 
 	__for_each_physical_engine(i915, e)
-		gtt(i915, e->flags);
+		gtt(i915, e->flags, spa);
 }
 
 igt_main
 {
 	const struct intel_execution_engine2 *e;
+	const struct intel_memory_region *mr;
+	struct set_param_args spa;
 	int fd = -1;
 
 	igt_fixture {
@@ -148,27 +198,61 @@ igt_main
 		igt_require_gem(fd);
 
 		igt_fork_hang_detector(fd);
+
+		spa.query_info = gem_get_query_memory_regions(fd);
+		igt_assert(spa.query_info);
+		spa.num_regions = spa.query_info->num_regions;
 	}
 
+	spa.batch_size = 4096;
+	spa.set_param = false;
+
 	igt_subtest("basic-all")
-		all(fd);
+		all(fd, &spa);
 
 	igt_subtest("readonly-all")
-		readonly_all(fd);
+		readonly_all(fd, &spa);
 
 	igt_subtest("gtt-all")
-		gtt_all(fd);
+		gtt_all(fd, &spa);
 
 	__for_each_physical_engine(fd, e) {
 		igt_subtest_f("basic-%s", e->name)
-			noop(fd, e->flags);
+			noop(fd, e->flags, &spa);
 		igt_subtest_f("readonly-%s", e->name)
-			readonly(fd, e->flags);
+			readonly(fd, e->flags, &spa);
 		igt_subtest_f("gtt-%s", e->name)
-			gtt(fd, e->flags);
+			gtt(fd, e->flags, &spa);
+	}
+
+	spa.set_param = true;
+	for (mr = intel_memory_regions; mr->region_name; mr++) {
+		spa.mem_type = mr->mem_region_type;
+		spa.id = mr->id;
+		spa.batch_size = gem_get_batch_size(fd, spa.mem_type);
+
+		igt_subtest_f("basic-%s-all", mr->region_name)
+			all(fd, &spa);
+
+		igt_subtest_f("readonly-%s-all", mr->region_name)
+			readonly_all(fd, &spa);
+
+		igt_subtest_f("gtt-%s-all", mr->region_name)
+			gtt_all(fd, &spa);
+
+		__for_each_physical_engine(fd, e) {
+			igt_subtest_f("basic-%s-%s", mr->region_name, e->name)
+				noop(fd, e->flags, &spa);
+			igt_subtest_f("readonly-%s-%s", mr->region_name,
+				      e->name)
+				readonly(fd, e->flags, &spa);
+			igt_subtest_f("gtt-%s-%s", mr->region_name, e->name)
+				gtt(fd, e->flags, &spa);
+		}
 	}
 
 	igt_fixture {
+		free(spa.query_info);
 		igt_stop_hang_detector();
 		close(fd);
 	}
-- 
2.20.1



More information about the igt-dev mailing list