[PATCH] xe_vm: Add bind array -ENOBUFS section
Matthew Brost
matthew.brost at intel.com
Thu Jun 20 23:54:37 UTC 2024
Add section which has a large enough array of binds which triggers BB
suballocation failure. Verify -ENOBUFS is returned in this case.
Cc: Matthew Auld <matthew.auld at intel.com>
Signed-off-by: Matthew Brost <matthew.brost at intel.com>
---
lib/xe/xe_ioctl.c | 19 +++++++++++++++++
lib/xe/xe_ioctl.h | 4 ++++
tests/intel/xe_vm.c | 50 ++++++++++++++++++++++++++++++++++-----------
3 files changed, 61 insertions(+), 12 deletions(-)
diff --git a/lib/xe/xe_ioctl.c b/lib/xe/xe_ioctl.c
index a437fd828a..9072801ce1 100644
--- a/lib/xe/xe_ioctl.c
+++ b/lib/xe/xe_ioctl.c
@@ -115,6 +115,25 @@ void xe_vm_bind_array_enospc(int fd, uint32_t vm, uint32_t exec_queue,
igt_assert_eq(-errno, -ENOSPC);
}
+void xe_vm_bind_array_enobufs(int fd, uint32_t vm, uint32_t exec_queue,
+ struct drm_xe_vm_bind_op *bind_ops,
+ uint32_t num_bind, struct drm_xe_sync *sync,
+ uint32_t num_syncs)
+{
+ struct drm_xe_vm_bind bind = {
+ .vm_id = vm,
+ .num_binds = num_bind,
+ .vector_of_binds = (uintptr_t)bind_ops,
+ .num_syncs = num_syncs,
+ .syncs = (uintptr_t)sync,
+ .exec_queue_id = exec_queue,
+ };
+
+ igt_assert(num_bind > 1);
+ igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_XE_VM_BIND, &bind), -1);
+ igt_assert_eq(-errno, -ENOBUFS);
+}
+
int __xe_vm_bind(int fd, uint32_t vm, uint32_t exec_queue, uint32_t bo,
uint64_t offset, uint64_t addr, uint64_t size, uint32_t op,
uint32_t flags, struct drm_xe_sync *sync, uint32_t num_syncs,
diff --git a/lib/xe/xe_ioctl.h b/lib/xe/xe_ioctl.h
index 2c7506caaf..a9954d0d41 100644
--- a/lib/xe/xe_ioctl.h
+++ b/lib/xe/xe_ioctl.h
@@ -61,6 +61,10 @@ void xe_vm_bind_array_enospc(int fd, uint32_t vm, uint32_t exec_queue,
struct drm_xe_vm_bind_op *bind_ops,
uint32_t num_bind, struct drm_xe_sync *sync,
uint32_t num_syncs);
+void xe_vm_bind_array_enobufs(int fd, uint32_t vm, uint32_t exec_queue,
+ struct drm_xe_vm_bind_op *bind_ops,
+ uint32_t num_bind, struct drm_xe_sync *sync,
+ uint32_t num_syncs);
void xe_vm_unbind_all_async(int fd, uint32_t vm, uint32_t exec_queue,
uint32_t bo, struct drm_xe_sync *sync,
uint32_t num_syncs);
diff --git a/tests/intel/xe_vm.c b/tests/intel/xe_vm.c
index 7be85da62f..b116d7da1b 100644
--- a/tests/intel/xe_vm.c
+++ b/tests/intel/xe_vm.c
@@ -728,7 +728,7 @@ test_bind_execqueues_independent(int fd, struct drm_xe_engine_class_instance *ec
}
#define BIND_ARRAY_BIND_EXEC_QUEUE_FLAG (0x1 << 0)
-
+#define BIND_ARRAY_ENOBUFS_FLAG (0x1 << 1)
/**
* SUBTEST: bind-array-twice
@@ -741,6 +741,11 @@ test_bind_execqueues_independent(int fd, struct drm_xe_engine_class_instance *ec
* Functionality: bind exec_queues
* Test category: functionality test
*
+ * SUBTEST: bind-array-enobufs
+ * Description: Test bind array which too large are trigger -ENOBUFs error
+ * Functionality: bind exec_queues
+ * Test category: functionality test
+ *
* SUBTEST: bind-array-exec_queue-twice
* Description: Test bind array exec_queue twice
* Functionality: bind exec_queues
@@ -753,10 +758,10 @@ test_bind_execqueues_independent(int fd, struct drm_xe_engine_class_instance *ec
*/
static void
test_bind_array(int fd, struct drm_xe_engine_class_instance *eci, int n_execs,
- unsigned int flags)
+ uint64_t addr, size_t bo_size, unsigned int flags)
{
uint32_t vm;
- uint64_t addr = 0x1a0000, base_addr = 0x1a0000;
+ uint64_t base_addr = addr;
struct drm_xe_sync sync[2] = {
{ .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
{ .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
@@ -766,9 +771,7 @@ test_bind_array(int fd, struct drm_xe_engine_class_instance *eci, int n_execs,
.syncs = to_user_pointer(sync),
};
uint32_t exec_queue, bind_exec_queue = 0;
-#define BIND_ARRAY_MAX_N_EXEC 16
- struct drm_xe_vm_bind_op bind_ops[BIND_ARRAY_MAX_N_EXEC] = { };
- size_t bo_size;
+ struct drm_xe_vm_bind_op *bind_ops;
uint32_t bo = 0;
struct {
uint32_t batch[16];
@@ -777,10 +780,11 @@ test_bind_array(int fd, struct drm_xe_engine_class_instance *eci, int n_execs,
} *data;
int i, b;
- igt_assert(n_execs <= BIND_ARRAY_MAX_N_EXEC);
+ bind_ops = malloc(sizeof(*bind_ops) * n_execs);
+ igt_assert(bind_ops);
vm = xe_vm_create(fd, 0, 0);
- bo_size = sizeof(*data) * n_execs;
+ bo_size = bo_size ?: sizeof(*data) * n_execs;
bo_size = xe_bb_size(fd, bo_size);
bo = xe_bo_create(fd, vm, bo_size,
@@ -808,6 +812,22 @@ test_bind_array(int fd, struct drm_xe_engine_class_instance *eci, int n_execs,
}
sync[0].handle = syncobj_create(fd, 0);
+ if (flags & BIND_ARRAY_ENOBUFS_FLAG) {
+ struct xe_cork cork;
+
+ xe_cork_init(fd, eci, &cork);
+
+ sync[1].handle = xe_cork_sync_handle(&cork);
+ sync[1].flags &= ~DRM_XE_SYNC_FLAG_SIGNAL;
+
+ xe_vm_bind_array_enobufs(fd, vm, bind_exec_queue, bind_ops,
+ n_execs, sync, 2);
+ xe_cork_end(&cork);
+ xe_cork_wait_done(&cork);
+ xe_cork_fini(&cork);
+ n_execs = n_execs / 2;
+ }
+
xe_vm_bind_array(fd, vm, bind_exec_queue, bind_ops, n_execs, sync, 1);
addr = base_addr;
@@ -867,6 +887,7 @@ test_bind_array(int fd, struct drm_xe_engine_class_instance *eci, int n_execs,
munmap(data, bo_size);
gem_close(fd, bo);
xe_vm_destroy(fd, vm);
+ free(bind_ops);
}
/**
@@ -2251,20 +2272,25 @@ igt_main
igt_subtest("bind-array-twice")
xe_for_each_engine(fd, hwe)
- test_bind_array(fd, hwe, 2, 0);
+ test_bind_array(fd, hwe, 2, 0x1a0000, 0, 0);
igt_subtest("bind-array-many")
xe_for_each_engine(fd, hwe)
- test_bind_array(fd, hwe, 16, 0);
+ test_bind_array(fd, hwe, 16, 0x1a0000, 0, 0);
+
+ igt_subtest("bind-array-enobufs")
+ xe_for_each_engine(fd, hwe)
+ test_bind_array(fd, hwe, 512, 0x1a0000, SZ_2M,
+ BIND_ARRAY_ENOBUFS_FLAG);
igt_subtest("bind-array-exec_queue-twice")
xe_for_each_engine(fd, hwe)
- test_bind_array(fd, hwe, 2,
+ test_bind_array(fd, hwe, 2, 0x1a0000, 0,
BIND_ARRAY_BIND_EXEC_QUEUE_FLAG);
igt_subtest("bind-array-exec_queue-many")
xe_for_each_engine(fd, hwe)
- test_bind_array(fd, hwe, 16,
+ test_bind_array(fd, hwe, 16, 0x1a0000, 0,
BIND_ARRAY_BIND_EXEC_QUEUE_FLAG);
igt_subtest("bind-array-conflict")
--
2.34.1
More information about the igt-dev
mailing list