[i-g-t 2/2] tests/intel/xe_exec_fault_mode: Test scratch page under fault mode
Matthew Brost
matthew.brost at intel.com
Mon Feb 10 17:22:14 UTC 2025
On Tue, Feb 04, 2025 at 01:17:42PM -0500, Oak Zeng wrote:
> On certain HW (such as lunarlake and battlemage), driver now allows
> scratch page be enabled under fault mode. Test this functionality
>
> Signed-off-by: Oak Zeng <oak.zeng at intel.com>
> ---
> tests/intel/xe_exec_fault_mode.c | 128 +++++++++++++++++++------------
> 1 file changed, 79 insertions(+), 49 deletions(-)
>
> diff --git a/tests/intel/xe_exec_fault_mode.c b/tests/intel/xe_exec_fault_mode.c
> index ae40e099b..935e6c044 100644
> --- a/tests/intel/xe_exec_fault_mode.c
> +++ b/tests/intel/xe_exec_fault_mode.c
> @@ -35,6 +35,7 @@
> #define INVALID_FAULT (0x1 << 7)
> #define INVALID_VA (0x1 << 8)
> #define ENABLE_SCRATCH (0x1 << 9)
> +#define ENABLE_FAULT (0x1 << 10)
>
> /**
> * SUBTEST: invalid-va
> @@ -45,6 +46,10 @@
> * Description: Access invalid va without pageafault with scratch page enabled.
> * Test category: functionality test
> *
> + * SUBTEST: scratch-fault
> + * Description: Enable scratch page and page fault at the same time.
> + * Test category: functionality test
> + *
> * SUBTEST: once-%s
> * Description: Run %arg[1] fault mode test only once
> * Test category: functionality test
> @@ -115,6 +120,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> int n_exec_queues, int n_execs, unsigned int flags)
> {
> uint32_t vm;
> + uint32_t vm_flags = DRM_XE_VM_CREATE_FLAG_LR_MODE;
> uint64_t addr = 0x1a0000;
> uint64_t sync_addr = 0x101a0000;
> #define USER_FENCE_VALUE 0xdeadbeefdeadbeefull
> @@ -145,11 +151,11 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> igt_assert_lte(n_exec_queues, MAX_N_EXEC_QUEUES);
>
> if (flags & ENABLE_SCRATCH)
> - vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
> - DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE, 0);
> - else
> - vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE |
> - DRM_XE_VM_CREATE_FLAG_FAULT_MODE, 0);
> + vm_flags |= DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE;
> + if (flags & ENABLE_FAULT)
> + vm_flags |= DRM_XE_VM_CREATE_FLAG_FAULT_MODE;
This is always set, right? I wouldn't add this new flag then.
> + vm = xe_vm_create(fd, vm_flags, 0);
> +
> bo_size = sizeof(*data) * n_execs;
> bo_size = xe_bb_size(fd, bo_size);
> sync_size = sizeof(*exec_sync) * n_execs;
> @@ -405,59 +411,76 @@ igt_main
> const char *name;
> unsigned int flags;
> } sections[] = {
> - { "basic", 0 },
> - { "userptr", USERPTR },
> - { "rebind", REBIND },
> - { "userptr-rebind", USERPTR | REBIND },
> - { "userptr-invalidate", USERPTR | INVALIDATE },
> - { "userptr-invalidate-race", USERPTR | INVALIDATE | RACE },
> - { "bindexecqueue", BIND_EXEC_QUEUE },
> - { "bindexecqueue-userptr", BIND_EXEC_QUEUE | USERPTR },
> - { "bindexecqueue-rebind", BIND_EXEC_QUEUE | REBIND },
> + { "basic", ENABLE_FAULT },
> + { "userptr", USERPTR | ENABLE_FAULT },
> + { "rebind", REBIND | ENABLE_FAULT },
> + { "userptr-rebind", USERPTR | REBIND | ENABLE_FAULT },
> + { "userptr-invalidate", USERPTR | INVALIDATE | ENABLE_FAULT },
> + { "userptr-invalidate-race", USERPTR | INVALIDATE | RACE |
> + ENABLE_FAULT },
> + { "bindexecqueue", BIND_EXEC_QUEUE | ENABLE_FAULT },
> + { "bindexecqueue-userptr", BIND_EXEC_QUEUE | USERPTR |
> + ENABLE_FAULT },
> + { "bindexecqueue-rebind", BIND_EXEC_QUEUE | REBIND |
> + ENABLE_FAULT },
> { "bindexecqueue-userptr-rebind", BIND_EXEC_QUEUE | USERPTR |
> - REBIND },
> - { "bindexecqueue-userptr-invalidate", BIND_EXEC_QUEUE | USERPTR |
> - INVALIDATE },
> - { "bindexecqueue-userptr-invalidate-race", BIND_EXEC_QUEUE | USERPTR |
> - INVALIDATE | RACE },
> - { "basic-imm", IMMEDIATE },
> - { "userptr-imm", IMMEDIATE | USERPTR },
> - { "rebind-imm", IMMEDIATE | REBIND },
> - { "userptr-rebind-imm", IMMEDIATE | USERPTR | REBIND },
> - { "userptr-invalidate-imm", IMMEDIATE | USERPTR | INVALIDATE },
> + REBIND | ENABLE_FAULT },
> + { "bindexecqueue-userptr-invalidate", BIND_EXEC_QUEUE |
> + USERPTR | INVALIDATE | ENABLE_FAULT },
> + { "bindexecqueue-userptr-invalidate-race", BIND_EXEC_QUEUE |
> + USERPTR | INVALIDATE | RACE | ENABLE_FAULT },
> + { "basic-imm", IMMEDIATE | ENABLE_FAULT },
> + { "userptr-imm", IMMEDIATE | USERPTR | ENABLE_FAULT },
> + { "rebind-imm", IMMEDIATE | REBIND | ENABLE_FAULT },
> + { "userptr-rebind-imm", IMMEDIATE | USERPTR | REBIND |
> + ENABLE_FAULT },
> + { "userptr-invalidate-imm", IMMEDIATE | USERPTR | INVALIDATE |
> + ENABLE_FAULT },
> { "userptr-invalidate-race-imm", IMMEDIATE | USERPTR |
> - INVALIDATE | RACE },
> - { "bindexecqueue-imm", IMMEDIATE | BIND_EXEC_QUEUE },
> - { "bindexecqueue-userptr-imm", IMMEDIATE | BIND_EXEC_QUEUE | USERPTR },
> - { "bindexecqueue-rebind-imm", IMMEDIATE | BIND_EXEC_QUEUE | REBIND },
> - { "bindexecqueue-userptr-rebind-imm", IMMEDIATE | BIND_EXEC_QUEUE |
> - USERPTR | REBIND },
> + INVALIDATE | RACE | ENABLE_FAULT },
> + { "bindexecqueue-imm", IMMEDIATE | BIND_EXEC_QUEUE |
> + ENABLE_FAULT },
> + { "bindexecqueue-userptr-imm", IMMEDIATE | BIND_EXEC_QUEUE |
> + USERPTR | ENABLE_FAULT },
> + { "bindexecqueue-rebind-imm", IMMEDIATE | BIND_EXEC_QUEUE |
> + REBIND | ENABLE_FAULT },
> + { "bindexecqueue-userptr-rebind-imm", IMMEDIATE |
> + BIND_EXEC_QUEUE | USERPTR | REBIND | ENABLE_FAULT },
> { "bindexecqueue-userptr-invalidate-imm", IMMEDIATE | BIND_EXEC_QUEUE |
> - USERPTR | INVALIDATE },
> + USERPTR | INVALIDATE | ENABLE_FAULT },
> { "bindexecqueue-userptr-invalidate-race-imm", IMMEDIATE |
> - BIND_EXEC_QUEUE | USERPTR | INVALIDATE | RACE },
> -
> - { "basic-prefetch", PREFETCH },
> - { "userptr-prefetch", PREFETCH | USERPTR },
> - { "rebind-prefetch", PREFETCH | REBIND },
> - { "userptr-rebind-prefetch", PREFETCH | USERPTR | REBIND },
> - { "userptr-invalidate-prefetch", PREFETCH | USERPTR | INVALIDATE },
> + BIND_EXEC_QUEUE | USERPTR | INVALIDATE | RACE |
> + ENABLE_FAULT },
> +
> + { "basic-prefetch", PREFETCH | ENABLE_FAULT },
> + { "userptr-prefetch", PREFETCH | USERPTR | ENABLE_FAULT },
> + { "rebind-prefetch", PREFETCH | REBIND | ENABLE_FAULT },
> + { "userptr-rebind-prefetch", PREFETCH | USERPTR | REBIND |
> + ENABLE_FAULT },
> + { "userptr-invalidate-prefetch", PREFETCH | USERPTR |
> + INVALIDATE | ENABLE_FAULT },
> { "userptr-invalidate-race-prefetch", PREFETCH | USERPTR |
> - INVALIDATE | RACE },
> - { "bindexecqueue-prefetch", PREFETCH | BIND_EXEC_QUEUE },
> - { "bindexecqueue-userptr-prefetch", PREFETCH | BIND_EXEC_QUEUE | USERPTR },
> - { "bindexecqueue-rebind-prefetch", PREFETCH | BIND_EXEC_QUEUE | REBIND },
> - { "bindexecqueue-userptr-rebind-prefetch", PREFETCH | BIND_EXEC_QUEUE |
> - USERPTR | REBIND },
> - { "bindexecqueue-userptr-invalidate-prefetch", PREFETCH | BIND_EXEC_QUEUE |
> - USERPTR | INVALIDATE },
> + INVALIDATE | RACE | ENABLE_FAULT },
> + { "bindexecqueue-prefetch", PREFETCH | BIND_EXEC_QUEUE |
> + ENABLE_FAULT },
> + { "bindexecqueue-userptr-prefetch", PREFETCH | BIND_EXEC_QUEUE |
> + USERPTR | ENABLE_FAULT },
> + { "bindexecqueue-rebind-prefetch", PREFETCH | BIND_EXEC_QUEUE |
> + REBIND | ENABLE_FAULT },
> + { "bindexecqueue-userptr-rebind-prefetch", PREFETCH |
> + BIND_EXEC_QUEUE | USERPTR | REBIND | ENABLE_FAULT },
> + { "bindexecqueue-userptr-invalidate-prefetch", PREFETCH |
> + BIND_EXEC_QUEUE | USERPTR | INVALIDATE | ENABLE_FAULT },
> { "bindexecqueue-userptr-invalidate-race-prefetch", PREFETCH |
> - BIND_EXEC_QUEUE | USERPTR | INVALIDATE | RACE },
> - { "invalid-fault", INVALID_FAULT },
> - { "invalid-userptr-fault", INVALID_FAULT | USERPTR },
> + BIND_EXEC_QUEUE | USERPTR | INVALIDATE | RACE |
> + ENABLE_FAULT },
> + { "invalid-fault", INVALID_FAULT | ENABLE_FAULT },
> + { "invalid-userptr-fault", INVALID_FAULT | USERPTR |
> + ENABLE_FAULT },
> { NULL },
> };
> int fd;
> + uint16_t dev_id;
>
> igt_fixture {
> struct timespec tv = {};
> @@ -466,6 +489,7 @@ igt_main
> int timeout = igt_run_in_simulation() ? 20 : 2;
>
> fd = drm_open_driver(DRIVER_XE);
> + dev_id = intel_get_drm_devid(fd);
> do {
> if (ret)
> usleep(5000);
> @@ -508,6 +532,12 @@ igt_main
> xe_for_each_engine(fd, hwe)
> test_exec(fd, hwe, 1, 1, ENABLE_SCRATCH | INVALID_VA);
>
> + igt_subtest("scratch-fault") {
> + igt_skip_on(!IS_LUNARLAKE(dev_id) && !IS_BATTLEMAGE(dev_id));
> + xe_for_each_engine(fd, hwe)
> + test_exec(fd, hwe, 1, 1, ENABLE_SCRATCH | ENABLE_FAULT);
This is a good start but we really need this section to roughly do
something like this to get good coverage:
- Run a batch with a scratch page access, wait for batch to complete
- Bind scratch page access /w immediate cleared
- Run a batch with access to newly bound address, wait for batch to
complete, verify access worked
'access' here likely is DW write.
Matt
> + }
> +
> igt_fixture {
> drm_close_driver(fd);
> }
> --
> 2.26.3
>
More information about the igt-dev
mailing list