[PATCH i-g-t v2 06/10] tests/intel/xe_drm_fdinfo: Add tests to verify all class utilization

Umesh Nerlige Ramappa umesh.nerlige.ramappa at intel.com
Wed Jul 3 00:25:28 UTC 2024


Verify utilization for all classes with varying loads.

v2:
- Drop unused ISOLATION flag in some tests.
- s/measured_usleep/usleep
- remove xe_ prefix from helpers/structures
- Drop unused parameters to all_busy_check_all
- s/_class/class

Signed-off-by: Umesh Nerlige Ramappa <umesh.nerlige.ramappa at intel.com>
---
 tests/intel/xe_drm_fdinfo.c | 130 ++++++++++++++++++++++++++++++++++++
 1 file changed, 130 insertions(+)

diff --git a/tests/intel/xe_drm_fdinfo.c b/tests/intel/xe_drm_fdinfo.c
index 410c885e7..f2051c422 100644
--- a/tests/intel/xe_drm_fdinfo.c
+++ b/tests/intel/xe_drm_fdinfo.c
@@ -37,6 +37,15 @@
  * SUBTEST: drm-busy-idle-isolation
  * Description: Check that engine load does not spill over to other drm clients
  *
+ * SUBTEST: drm-busy-idle-check-all
+ * Description: Check that only the target engine shows load when idle after busy
+ *
+ * SUBTEST: drm-most-busy-idle-check-all
+ * Description: Check that only the target engine shows idle and all others are busy
+ *
+ * SUBTEST: drm-all-busy-idle-check-all
+ * Description: Check that all engines show busy when all are loaded
+ *
  * SUBTEST: drm-total-resident
  * Description: Create and compare total and resident memory consumption by client
  *
@@ -548,6 +557,116 @@ single(int fd, struct drm_xe_engine_class_instance *hwe, int width, int count,
 	xe_vm_destroy(fd, vm);
 }
 
+static void
+busy_check_all(int fd, struct drm_xe_engine_class_instance *hwe, int width, int count,
+	       unsigned int flags)
+{
+	struct pceu_cycles pceu1[DRM_XE_ENGINE_CLASS_COMPUTE + 1];
+	struct pceu_cycles pceu2[DRM_XE_ENGINE_CLASS_COMPUTE + 1];
+	struct spin_ctx *ctx = NULL;
+	uint32_t vm;
+	int class;
+
+	vm = xe_vm_create(fd, 0, 0);
+	if (flags & TEST_BUSY) {
+		ctx = spin_ctx_init(fd, hwe, vm, width, count);
+		spin_sync_start(fd, ctx);
+	}
+
+	read_engine_cycles(fd, pceu1);
+	usleep(batch_duration_ns / 1000);
+	if (flags & TEST_TRAILING_IDLE)
+		spin_sync_end(fd, ctx);
+	read_engine_cycles(fd, pceu2);
+
+	xe_for_each_engine_class(class)
+		check_results(pceu1, pceu2, class, width,
+			      hwe->engine_class == class ? flags : 0);
+
+	spin_sync_end(fd, ctx);
+	spin_ctx_destroy(fd, ctx);
+	xe_vm_destroy(fd, vm);
+}
+
+static void
+most_busy_check_all(int fd, struct drm_xe_engine_class_instance *hwe, int width, int count,
+		    unsigned int flags)
+{
+	struct pceu_cycles pceu1[DRM_XE_ENGINE_CLASS_COMPUTE + 1];
+	struct pceu_cycles pceu2[DRM_XE_ENGINE_CLASS_COMPUTE + 1];
+	struct spin_ctx *ctx[DRM_XE_ENGINE_CLASS_COMPUTE + 1] = {};
+	struct drm_xe_engine_class_instance *_hwe;
+	uint32_t vm;
+	int class;
+
+	vm = xe_vm_create(fd, 0, 0);
+	if (flags & TEST_BUSY) {
+		/* spin on one hwe per class except the target class hwes */
+		xe_for_each_engine(fd, _hwe) {
+			int _class = _hwe->engine_class;
+
+			if (_class == hwe->engine_class || ctx[_class])
+				continue;
+
+			ctx[_class] = spin_ctx_init(fd, _hwe, vm, width, count);
+			spin_sync_start(fd, ctx[_class]);
+		}
+	}
+
+	read_engine_cycles(fd, pceu1);
+	usleep(batch_duration_ns / 1000);
+	if (flags & TEST_TRAILING_IDLE)
+		xe_for_each_engine_class(class)
+			spin_sync_end(fd, ctx[class]);
+	read_engine_cycles(fd, pceu2);
+
+	xe_for_each_engine_class(class) {
+		check_results(pceu1, pceu2, class, width,
+			      hwe->engine_class == class ? 0 : flags);
+		spin_sync_end(fd, ctx[class]);
+		spin_ctx_destroy(fd, ctx[class]);
+	}
+	xe_vm_destroy(fd, vm);
+}
+
+static void
+all_busy_check_all(int fd, unsigned int flags)
+{
+	struct pceu_cycles pceu1[DRM_XE_ENGINE_CLASS_COMPUTE + 1];
+	struct pceu_cycles pceu2[DRM_XE_ENGINE_CLASS_COMPUTE + 1];
+	struct spin_ctx *ctx[DRM_XE_ENGINE_CLASS_COMPUTE + 1] = {};
+	struct drm_xe_engine_class_instance *hwe;
+	uint32_t vm;
+	int class;
+
+	vm = xe_vm_create(fd, 0, 0);
+	if (flags & TEST_BUSY) {
+		/* spin on one hwe per class */
+		xe_for_each_engine(fd, hwe) {
+			class = hwe->engine_class;
+
+			if (ctx[class])
+				continue;
+
+			ctx[class] = spin_ctx_init(fd, hwe, vm, 1, 1);
+			spin_sync_start(fd, ctx[class]);
+		}
+	}
+
+	read_engine_cycles(fd, pceu1);
+	usleep(batch_duration_ns / 1000);
+	if (flags & TEST_TRAILING_IDLE)
+		xe_for_each_engine_class(class)
+			spin_sync_end(fd, ctx[class]);
+	read_engine_cycles(fd, pceu2);
+
+	xe_for_each_engine_class(class) {
+		check_results(pceu1, pceu2, class, 1, flags);
+		spin_sync_end(fd, ctx[class]);
+		spin_ctx_destroy(fd, ctx[class]);
+	}
+	xe_vm_destroy(fd, vm);
+}
 igt_main
 {
 	struct drm_xe_engine_class_instance *hwe;
@@ -581,6 +700,17 @@ igt_main
 		xe_for_each_engine(xe, hwe)
 			single(xe, hwe, 1, 1, TEST_BUSY | TEST_TRAILING_IDLE | TEST_ISOLATION);
 
+	igt_subtest("drm-busy-idle-check-all")
+		xe_for_each_engine(xe, hwe)
+			busy_check_all(xe, hwe, 1, 1, TEST_BUSY | TEST_TRAILING_IDLE);
+
+	igt_subtest("drm-most-busy-idle-check-all")
+		xe_for_each_engine(xe, hwe)
+			most_busy_check_all(xe, hwe, 1, 1, TEST_BUSY | TEST_TRAILING_IDLE);
+
+	igt_subtest("drm-all-busy-idle-check-all")
+		all_busy_check_all(xe, TEST_BUSY | TEST_TRAILING_IDLE);
+
 	igt_describe("Create and compare total and resident memory consumption by client");
 	igt_subtest("drm-total-resident")
 		test_total_resident(xe);
-- 
2.38.1



More information about the igt-dev mailing list