[PATCH i-g-t] tests/intel/xe_exec_compute_mode: Stress test to validate long running workload on compute mode
Dandamudi, Priyanka
priyanka.dandamudi at intel.com
Fri Feb 23 08:04:37 UTC 2024
> -----Original Message-----
> From: igt-dev <igt-dev-bounces at lists.freedesktop.org> On Behalf Of
> sai.gowtham.ch at intel.com
> Sent: Friday, February 23, 2024 12:05 PM
> To: igt-dev at lists.freedesktop.org; Kempczynski, Zbigniew
> <zbigniew.kempczynski at intel.com>; Kumar, Janga Rahul
> <janga.rahul.kumar at intel.com>; Ch, Sai Gowtham
> <sai.gowtham.ch at intel.com>
> Subject: [PATCH i-g-t] tests/intel/xe_exec_compute_mode: Stress test to
> validate long running workload on compute mode
>
> From: Sai Gowtham Ch <sai.gowtham.ch at intel.com>
>
> Test validates long running compute mode by submitting a spinner for 30s
> long.
>
> Cc: Zbigniew Kempczyński <zbigniew.kempczynski at intel.com>
> Cc: Janga Rahul Kumar <janga.rahul.kumar at intel.com>
> Signed-off-by: Sai Gowtham Ch <sai.gowtham.ch at intel.com>
> ---
> tests/intel/xe_exec_compute_mode.c | 73
> ++++++++++++++++++++++++++++++
> 1 file changed, 73 insertions(+)
>
> diff --git a/tests/intel/xe_exec_compute_mode.c
> b/tests/intel/xe_exec_compute_mode.c
> index 7dad71509..0a71bfdc5 100644
> --- a/tests/intel/xe_exec_compute_mode.c
> +++ b/tests/intel/xe_exec_compute_mode.c
> @@ -400,6 +400,76 @@ static void non_block(int fd, int expect)
> xe_vm_destroy(fd, vm);
> }
>
> +/**
> + * SUBTEST: lr-stress
> + * Description: Stress LR mode workload for 30s.
> + * Test category: functionality test
> + */
> +static void lr_stress(int fd)
> +{
> + uint64_t addr = 0x1a0000;
> + struct drm_xe_sync sync[1] = {
> + {.type = DRM_XE_SYNC_TYPE_USER_FENCE,
> + .flags = DRM_XE_SYNC_FLAG_SIGNAL,
> + .timeline_value = USER_FENCE_VALUE},
> + };
> + struct drm_xe_exec exec = {
> + .num_batch_buffer = 1,
> + .num_syncs = 1,
> + .syncs = to_user_pointer(&sync),
> + };
> + struct {
> + struct xe_spin spin;
> + uint64_t vm_sync;
> + uint32_t data;
> + uint64_t exec_sync;
> + } *data;
> + struct xe_spin_opts spin_opts = { .preempt = false };
> + struct drm_xe_engine *engine;
> + size_t bo_size;
> + uint32_t vm;
> + uint32_t exec_queue;
> + uint32_t bo;
> +
> + vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE, 0);
> + bo_size = sizeof(*data);
> + bo_size = xe_bb_size(fd, bo_size);
> +
> + engine = xe_engine(fd, 1);
> + bo = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd,
> +engine->instance.gt_id), 0);
> +
> + data = xe_bo_map(fd, bo, bo_size);
> + memset(data, 0, bo_size);
> + exec_queue = xe_exec_queue_create(fd, vm, &engine->instance, 0);
> +
> + sync[0].addr = to_user_pointer(&data[0].vm_sync);
> + xe_vm_bind_async(fd, vm, engine->instance.gt_id, bo, 0,
> + addr, bo_size, sync, 1);
> + xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0,
> ONE_SEC);
> + data[0].vm_sync = 0;
> +
> + spin_opts.addr = addr + (char *)&data[0].spin - (char *)data;
> + xe_spin_init(&data[0].spin, &spin_opts);
> + sync[0].addr = addr + (char *)&data[0].exec_sync - (char *)data;
> + exec.exec_queue_id = exec_queue;
> + exec.address = spin_opts.addr;
> + xe_exec(fd, &exec);
> + xe_spin_wait_started(&data[0].spin);
> + sleep(30);
> + igt_until_timeout(20) --> Can you please check this; I think sleep is generally not preferable
--Priyanka
> + xe_spin_end(&data[0].spin);
> + xe_wait_ufence(fd, &data[0].exec_sync, USER_FENCE_VALUE, 0,
> ONE_SEC);
> +
> + sync[0].addr = to_user_pointer(&data[0].vm_sync);
> + xe_vm_unbind_async(fd, vm, 0, 0, addr, bo_size, sync, 1);
> + xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0,
> ONE_SEC);
> + munmap(data, bo_size);
> + gem_close(fd, bo);
> +
> + xe_exec_queue_destroy(fd, exec_queue);
> + xe_vm_destroy(fd, vm);
> +}
> +
> igt_main
> {
> struct drm_xe_engine_class_instance *hwe; @@ -460,6 +530,9 @@
> igt_main
> igt_subtest("non-blocking")
> non_block(fd, EWOULDBLOCK);
>
> + igt_subtest("lr-stress")
> + lr_stress(fd);
> +
>
> igt_fixture
> drm_close_driver(fd);
> --
> 2.39.1
More information about the igt-dev
mailing list