[PATCH i-g-t 5/8] tests/intel/xe_drm_fdinfo: Add tests to verify all class utilization
Umesh Nerlige Ramappa
umesh.nerlige.ramappa at intel.com
Fri Jun 21 23:00:59 UTC 2024
Verify utilization for all classes with varying loads.
Signed-off-by: Umesh Nerlige Ramappa <umesh.nerlige.ramappa at intel.com>
---
tests/intel/xe_drm_fdinfo.c | 149 ++++++++++++++++++++++++++++++++++++
1 file changed, 149 insertions(+)
diff --git a/tests/intel/xe_drm_fdinfo.c b/tests/intel/xe_drm_fdinfo.c
index 852931d71..8f8b4d599 100644
--- a/tests/intel/xe_drm_fdinfo.c
+++ b/tests/intel/xe_drm_fdinfo.c
@@ -34,6 +34,15 @@
* SUBTEST: drm-busy-idle-isolation
* Description: Check that engine load does not spill over to other drm clients
*
+ * SUBTEST: drm-busy-idle-check-all
+ * Description: Check that only the target engine shows load when idle after busy
+ *
+ * SUBTEST: drm-most-busy-idle-check-all
+ * Description: Check that only the target engine shows idle and all others are busy
+ *
+ * SUBTEST: drm-all-busy-idle-check-all
+ * Description: Check that all engines show busy when all are loaded
+ *
* SUBTEST: drm-total-resident
* Description: Create and compare total and resident memory consumption by client
*
@@ -548,6 +557,135 @@ single(int fd, struct drm_xe_engine_class_instance *hwe, int width, int count,
close(local_fd);
}
+static void
+busy_check_all(int fd, struct drm_xe_engine_class_instance *hwe, int width, int count,
+ unsigned int flags)
+{
+ struct pceu_cycles pceu1[DRM_XE_ENGINE_CLASS_COMPUTE + 1];
+ struct pceu_cycles pceu2[DRM_XE_ENGINE_CLASS_COMPUTE + 1];
+ struct xe_spin_ctx *ctx = NULL;
+ int local_fd = fd, class;
+ uint32_t vm;
+
+ if (flags & TEST_ISOLATION)
+ local_fd = drm_reopen_driver(fd);
+
+ vm = xe_vm_create(local_fd, 0, 0);
+ if (flags & TEST_BUSY) {
+ ctx = xe_spin_ctx_init(local_fd, hwe, vm, width, count);
+ xe_spin_sync_start(local_fd, ctx);
+ }
+
+ read_engine_cycles(local_fd, pceu1);
+ measured_usleep(batch_duration_ns / 1000);
+ if (flags & TEST_TRAILING_IDLE)
+ xe_spin_sync_end(local_fd, ctx);
+ read_engine_cycles(local_fd, pceu2);
+
+ xe_for_each_engine_class(class)
+ check_results(pceu1, pceu2, class, width,
+ hwe->engine_class == class ? flags : 0);
+
+ xe_spin_sync_end(local_fd, ctx);
+ xe_spin_ctx_destroy(local_fd, ctx);
+ xe_vm_destroy(local_fd, vm);
+
+ if (flags & TEST_ISOLATION)
+ close(local_fd);
+}
+
+static void
+most_busy_check_all(int fd, struct drm_xe_engine_class_instance *hwe, int width, int count,
+ unsigned int flags)
+{
+ struct pceu_cycles pceu1[DRM_XE_ENGINE_CLASS_COMPUTE + 1];
+ struct pceu_cycles pceu2[DRM_XE_ENGINE_CLASS_COMPUTE + 1];
+ struct xe_spin_ctx *ctx[DRM_XE_ENGINE_CLASS_COMPUTE + 1] = {};
+ struct drm_xe_engine_class_instance *_hwe;
+ int local_fd = fd, class;
+ uint32_t vm;
+
+ if (flags & TEST_ISOLATION)
+ local_fd = drm_reopen_driver(fd);
+
+ vm = xe_vm_create(local_fd, 0, 0);
+ if (flags & TEST_BUSY) {
+ /* spin on one hwe per class except the target class hwes */
+ xe_for_each_engine(local_fd, _hwe) {
+ int _class = _hwe->engine_class;
+
+ if (_class == hwe->engine_class || ctx[_class])
+ continue;
+
+ ctx[_class] = xe_spin_ctx_init(local_fd, _hwe, vm, width, count);
+ xe_spin_sync_start(local_fd, ctx[_class]);
+ }
+ }
+
+ read_engine_cycles(local_fd, pceu1);
+ measured_usleep(batch_duration_ns / 1000);
+ if (flags & TEST_TRAILING_IDLE)
+ xe_for_each_engine_class(class)
+ xe_spin_sync_end(local_fd, ctx[class]);
+ read_engine_cycles(local_fd, pceu2);
+
+ xe_for_each_engine_class(class) {
+ check_results(pceu1, pceu2, class, width,
+ hwe->engine_class == class ? 0 : flags);
+ xe_spin_sync_end(local_fd, ctx[class]);
+ xe_spin_ctx_destroy(local_fd, ctx[class]);
+ }
+ xe_vm_destroy(local_fd, vm);
+
+ if (flags & TEST_ISOLATION)
+ close(local_fd);
+}
+
+static void
+all_busy_check_all(int fd, struct drm_xe_engine_class_instance *hwe, int width, int count,
+ unsigned int flags)
+{
+ struct pceu_cycles pceu1[DRM_XE_ENGINE_CLASS_COMPUTE + 1];
+ struct pceu_cycles pceu2[DRM_XE_ENGINE_CLASS_COMPUTE + 1];
+ struct xe_spin_ctx *ctx[DRM_XE_ENGINE_CLASS_COMPUTE + 1] = {};
+ struct drm_xe_engine_class_instance *_hwe;
+ int local_fd = fd, class;
+ uint32_t vm;
+
+ if (flags & TEST_ISOLATION)
+ local_fd = drm_reopen_driver(fd);
+
+ vm = xe_vm_create(local_fd, 0, 0);
+ if (flags & TEST_BUSY) {
+ /* spin on one hwe per class */
+ xe_for_each_engine(local_fd, _hwe) {
+ int _class = _hwe->engine_class;
+
+ if (ctx[_class])
+ continue;
+
+ ctx[_class] = xe_spin_ctx_init(local_fd, _hwe, vm, width, count);
+ xe_spin_sync_start(local_fd, ctx[_class]);
+ }
+ }
+
+ read_engine_cycles(local_fd, pceu1);
+ measured_usleep(batch_duration_ns / 1000);
+ if (flags & TEST_TRAILING_IDLE)
+ xe_for_each_engine_class(class)
+ xe_spin_sync_end(local_fd, ctx[class]);
+ read_engine_cycles(local_fd, pceu2);
+
+ xe_for_each_engine_class(class) {
+ check_results(pceu1, pceu2, class, width, flags);
+ xe_spin_sync_end(local_fd, ctx[class]);
+ xe_spin_ctx_destroy(local_fd, ctx[class]);
+ }
+ xe_vm_destroy(local_fd, vm);
+
+ if (flags & TEST_ISOLATION)
+ close(local_fd);
+}
igt_main
{
struct drm_xe_engine_class_instance *hwe;
@@ -594,6 +732,17 @@ igt_main
xe_for_each_engine(xe, hwe)
single(xe, hwe, 1, 1, TEST_BUSY | TEST_TRAILING_IDLE | TEST_ISOLATION);
+ igt_subtest("drm-busy-idle-check-all")
+ xe_for_each_engine(xe, hwe)
+ busy_check_all(xe, hwe, 1, 1, TEST_BUSY | TEST_TRAILING_IDLE);
+
+ igt_subtest("drm-most-busy-idle-check-all")
+ xe_for_each_engine(xe, hwe)
+ most_busy_check_all(xe, hwe, 1, 1, TEST_BUSY | TEST_TRAILING_IDLE);
+
+ igt_subtest("drm-all-busy-idle-check-all")
+ all_busy_check_all(xe, hwe, 1, 1, TEST_BUSY | TEST_TRAILING_IDLE);
+
igt_describe("Create and compare total and resident memory consumption by client");
igt_subtest("drm-total-resident")
test_total_resident(xe);
--
2.34.1
More information about the igt-dev
mailing list