[PATCH i-g-t] tests/xe_vm: Report OOM for vm_bind ioctl under memory pressure
priyanka.dandamudi at intel.com
priyanka.dandamudi at intel.com
Wed Jul 16 11:52:37 UTC 2025
From: Priyanka Dandamudi <priyanka.dandamudi at intel.com>
Add a test which create buffer objects on an LR vm and vm_binding
buffer objects in a loop until it reaches OOM.
This is to check that buffer objects on a single vm doesnot get evicted
and instead report with ENOMEM in non fault mode.
Signed-off-by: Priyanka Dandamudi <priyanka.dandamudi at intel.com>
---
tests/intel/xe_vm.c | 65 +++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 65 insertions(+)
diff --git a/tests/intel/xe_vm.c b/tests/intel/xe_vm.c
index c1abb08bb..e808a0b4e 100644
--- a/tests/intel/xe_vm.c
+++ b/tests/intel/xe_vm.c
@@ -2368,6 +2368,65 @@ static void invalid_vm_id(int fd)
do_ioctl_err(fd, DRM_IOCTL_XE_VM_DESTROY, &destroy, ENOENT);
}
+/**
+ * SUBTEST: out-of-memory
+ * Description: Test if vm_bind ioctl results in oom
+ * when creating and vm_binding buffer objects on an LR vm beyond available visible vram size.
+ * Functionality: oom
+ * Test category: functionality test
+ */
+static void test_oom(int fd)
+{
+#define USER_FENCE_VALUE 0xdeadbeefdeadbeefull
+#define BO_SIZE xe_bb_size(fd, SZ_512M)
+#define MAX_BUFS (int)(xe_visible_vram_size(fd, 0) / BO_SIZE)
+ uint64_t addr = 0x1a0000;
+ uint64_t vm_sync;
+ uint32_t bo[MAX_BUFS + 1];
+ uint32_t *data[MAX_BUFS + 1];
+ uint32_t vm;
+ struct drm_xe_sync sync[1] = {
+ { .type = DRM_XE_SYNC_TYPE_USER_FENCE, .flags = DRM_XE_SYNC_FLAG_SIGNAL,
+ .timeline_value = USER_FENCE_VALUE },
+ };
+ size_t bo_size = BO_SIZE;
+ int total_bufs = MAX_BUFS;
+
+ vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE, 0);
+ for (int iter = 0; iter <= total_bufs; iter++) {
+ bo[iter] = xe_bo_create(fd, 0, bo_size,
+ vram_if_possible(fd, 0),
+ DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING |
+ DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
+
+ sync[0].addr = to_user_pointer(&vm_sync);
+ if (iter < total_bufs)
+ xe_vm_bind_async(fd, vm, 0, bo[iter], 0,
+ addr + bo_size * iter, bo_size, sync, 1);
+ else {
+ igt_assert_eq(__xe_vm_bind(fd, vm, 0, bo[iter], 0,
+ addr + bo_size * iter, bo_size,
+ DRM_XE_VM_BIND_OP_MAP, 0, sync, 1, 0,
+ DEFAULT_PAT_INDEX, 0), -ENOMEM);
+ break;
+
+ }
+ xe_wait_ufence(fd, &vm_sync, USER_FENCE_VALUE, 0, NSEC_PER_SEC);
+ vm_sync = 0;
+ data[iter] = xe_bo_map(fd, bo[iter], bo_size);
+ memset(data[iter], 0, bo_size);
+ }
+
+ for (int iter = 0; iter < total_bufs; iter++) {
+ sync[0].addr = to_user_pointer(&vm_sync);
+ xe_vm_unbind_async(fd, vm, 0, 0, addr + bo_size * iter, bo_size,
+ sync, 1);
+ xe_wait_ufence(fd, &vm_sync, USER_FENCE_VALUE, 0, NSEC_PER_SEC);
+ munmap(data[iter], bo_size);
+ gem_close(fd, bo[iter]);
+ }
+}
+
igt_main
{
struct drm_xe_engine_class_instance *hwe, *hwe_non_copy = NULL;
@@ -2759,6 +2818,12 @@ igt_main
igt_subtest("invalid-vm-id")
invalid_vm_id(fd);
+ igt_subtest("out-of-memory") {
+ igt_require(xe_has_vram(fd));
+ igt_assert(xe_visible_vram_size(fd, 0));
+ test_oom(fd);
+ }
+
igt_fixture
drm_close_driver(fd);
}
--
2.34.1
More information about the igt-dev
mailing list