[PATCH] xe_exec_compute_mode: Add malloc-ufence test
Dandamudi, Priyanka
priyanka.dandamudi at intel.com
Tue Mar 5 04:50:50 UTC 2024
> -----Original Message-----
> From: igt-dev <igt-dev-bounces at lists.freedesktop.org> On Behalf Of Matthew
> Brost
> Sent: Tuesday, February 27, 2024 8:10 AM
> To: igt-dev at lists.freedesktop.org
> Cc: Brost, Matthew <matthew.brost at intel.com>
> Subject: [PATCH] xe_exec_compute_mode: Add malloc-ufence test
>
> The get user page ufence path is not triggered by the test as ufence memory
> is a mapped BO. To trigger this path use a malloc'd address.
>
> Signed-off-by: Matthew Brost <matthew.brost at intel.com>
> ---
> tests/intel/xe_exec_compute_mode.c | 28 ++++++++++++++++++++--------
> 1 file changed, 20 insertions(+), 8 deletions(-)
>
> diff --git a/tests/intel/xe_exec_compute_mode.c
> b/tests/intel/xe_exec_compute_mode.c
> index 7dad715093..c0cddbc02b 100644
> --- a/tests/intel/xe_exec_compute_mode.c
> +++ b/tests/intel/xe_exec_compute_mode.c
> @@ -31,6 +31,7 @@
> #define BIND_EXECQUEUE (0x1 << 4)
> #define VM_FOR_BO (0x1 << 5)
> #define EXEC_QUEUE_EARLY (0x1 << 6)
> +#define MALLOC_USER_FENCE (0x1 <<7)
>
> /**
> * SUBTEST: twice-%s
> @@ -48,6 +49,7 @@
> * arg[1]:
> *
> * @basic: basic
> + * @malloc-ufence: malloc user fence
> * @preempt-fence-early: preempt fence early
> * @userptr: userptr
> * @rebind: rebind
> @@ -110,6 +112,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance
> *eci,
> uint64_t exec_sync;
> uint32_t data;
> } *data;
> + uint64_t *vm_sync;
> int i, j, b;
> int map_fd = -1;
> int64_t fence_timeout;
> @@ -149,6 +152,12 @@ test_exec(int fd, struct
> drm_xe_engine_class_instance *eci,
> }
> memset(data, 0, bo_size);
>
> + if (flags & MALLOC_USER_FENCE)
> + vm_sync = malloc(sizeof(*vm_sync));
> + else
> + vm_sync = &data[0].vm_sync;
> + igt_assert(vm_sync);
> +
> for (i = 0; !(flags & EXEC_QUEUE_EARLY) && i < n_exec_queues; i++) {
> exec_queues[i] = xe_exec_queue_create(fd, vm, eci, 0);
> if (flags & BIND_EXECQUEUE)
> @@ -158,7 +167,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance
> *eci,
> bind_exec_queues[i] = 0;
> };
>
> - sync[0].addr = to_user_pointer(&data[0].vm_sync);
> + sync[0].addr = to_user_pointer(vm_sync);
> if (bo)
> xe_vm_bind_async(fd, vm, bind_exec_queues[0], bo, 0, addr,
> bo_size, sync, 1);
> @@ -171,9 +180,9 @@ test_exec(int fd, struct drm_xe_engine_class_instance
> *eci,
>
> fence_timeout = igt_run_in_simulation() ? HUNDRED_SEC : ONE_SEC;
>
> - xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE,
> + xe_wait_ufence(fd, vm_sync, USER_FENCE_VALUE,
> bind_exec_queues[0], fence_timeout);
> - data[0].vm_sync = 0;
> + *vm_sync = 0;
>
> for (i = 0; i < n_execs; i++) {
> uint64_t batch_offset = (char *)&data[i].batch - (char *)data;
> @@ -202,7 +211,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance
> *eci,
> xe_vm_unbind_async(fd, vm, bind_exec_queues[e],
> 0,
> addr, bo_size, NULL, 0);
>
> - sync[0].addr = to_user_pointer(&data[0].vm_sync);
> + sync[0].addr = to_user_pointer(vm_sync);
> addr += bo_size;
> if (bo)
> xe_vm_bind_async(fd, vm,
> bind_exec_queues[e], bo, @@ -213,9 +222,9 @@ test_exec(int fd, struct
> drm_xe_engine_class_instance *eci,
>
> to_user_pointer(data),
> addr, bo_size, sync,
> 1);
> - xe_wait_ufence(fd, &data[0].vm_sync,
> USER_FENCE_VALUE,
> + xe_wait_ufence(fd, vm_sync, USER_FENCE_VALUE,
> bind_exec_queues[e], fence_timeout);
> - data[0].vm_sync = 0;
> + *vm_sync = 0;
> }
>
> if (flags & INVALIDATE && i + 1 != n_execs) { @@ -264,10
> +273,10 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> if (flags & INVALIDATE)
> usleep(250000);
>
> - sync[0].addr = to_user_pointer(&data[0].vm_sync);
> + sync[0].addr = to_user_pointer(vm_sync);
> xe_vm_unbind_async(fd, vm, bind_exec_queues[0], 0, addr, bo_size,
> sync, 1);
> - xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE,
> + xe_wait_ufence(fd, vm_sync, USER_FENCE_VALUE,
> bind_exec_queues[0], fence_timeout);
>
> for (i = j; i < n_execs; i++)
> @@ -288,6 +297,8 @@ test_exec(int fd, struct drm_xe_engine_class_instance
> *eci,
> xe_vm_destroy(fd, vm);
> if (map_fd != -1)
> close(map_fd);
> + if (flags & MALLOC_USER_FENCE)
> + free(vm_sync);
> }
>
> /**
> @@ -408,6 +419,7 @@ igt_main
> unsigned int flags;
> } sections[] = {
> { "basic", 0 },
> + { "malloc-ufence", MALLOC_USER_FENCE },
> { "preempt-fence-early", VM_FOR_BO | EXEC_QUEUE_EARLY
> },
> { "userptr", USERPTR },
> { "rebind", REBIND },
> --
> 2.34.1
Change LGTM
Reviewed-by: Priyanka Dandamudi <Priyanka.dandamudi at intel.com>
Can you check the build once before it looks like some failure is there.
--Priyanka
More information about the igt-dev
mailing list