[PATCH i-g-t] tests/intel/xe_exec_compute_mode: Long running test for fix duration of time.

Ch, Sai Gowtham sai.gowtham.ch at intel.com
Wed Mar 13 05:19:39 UTC 2024



>-----Original Message-----
>From: Kamil Konieczny <kamil.konieczny at linux.intel.com>
>Sent: Tuesday, March 12, 2024 9:56 PM
>To: igt-dev at lists.freedesktop.org
>Cc: Ch, Sai Gowtham <sai.gowtham.ch at intel.com>
>Subject: Re: [PATCH i-g-t] tests/intel/xe_exec_compute_mode: Long running test
>for fix duration of time.
>
>Hi igt-dev,
>On 2024-03-11 at 12:14:26 +0530, sai.gowtham.ch at intel.com wrote:
>> From: Sai Gowtham Ch <sai.gowtham.ch at intel.com>
>
>Please remove dot from end of subject:
>[PATCH i-g-t] tests/intel/xe_exec_compute_mode: Long running test for fix
>duration of time.
>
>s/time\./time/
>
>>
>> Test to validate LR mode flag for fixed duration of time.
>>
>> Signed-off-by: Sai Gowtham Ch <sai.gowtham.ch at intel.com>
>> ---
>>  tests/intel/xe_exec_compute_mode.c | 75
>> ++++++++++++++++++++++++++++++
>>  1 file changed, 75 insertions(+)
>>
>> diff --git a/tests/intel/xe_exec_compute_mode.c
>> b/tests/intel/xe_exec_compute_mode.c
>> index 7dad71509..01850d2b7 100644
>> --- a/tests/intel/xe_exec_compute_mode.c
>> +++ b/tests/intel/xe_exec_compute_mode.c
>> @@ -400,6 +400,78 @@ static void non_block(int fd, int expect)
>>  	xe_vm_destroy(fd, vm);
>>  }
>>
>> +#define DURATION_NS ((unsigned int)30000*NSEC_PER_SEC)
>------------------------ ^^^^^^^^^^^^^
>Why not 30ULL * NSEC_PER_SEC
>
>Also maybe change name? What about:
>
>#define DURATION_30_SEC (30ULL * NSEC_PER_SEC)
>
>or just multiply at assignment?
Will make this change, thanks for the suggestion it looks better.
>
>> +/**
>> + * SUBTEST: lr-with-duration
>> + * Description: Stress LR mode workload for 30s.
>> + * Test category: functionality test
>> + */
>> +static void lr_with_duration(int fd)
>> +{
>> +	uint64_t addr = 0x1a0000;
>> +	struct drm_xe_sync sync[1] = {
>> +		{.type = DRM_XE_SYNC_TYPE_USER_FENCE,
>> +		 .flags = DRM_XE_SYNC_FLAG_SIGNAL,
>> +		 .timeline_value = USER_FENCE_VALUE},
>> +	};
>> +	struct drm_xe_exec exec = {
>> +		.num_batch_buffer = 1,
>> +		.num_syncs = 1,
>> +		.syncs = to_user_pointer(&sync),
>> +	};
>> +	struct  {
>> +		struct xe_spin spin;
>> +		uint64_t vm_sync;
>> +		uint32_t data;
>> +		uint64_t exec_sync;
>> +	} *data;
>> +	struct xe_spin_opts spin_opts;
>> +	const uint64_t duration_ns = DURATION_NS;
>> +	struct drm_xe_engine *engine;
>> +	size_t bo_size;
>> +	uint32_t vm;
>> +	uint32_t exec_queue;
>> +	uint32_t bo;
>> +
>> +	vm = xe_vm_create(fd, DRM_XE_VM_CREATE_FLAG_LR_MODE, 0);
>> +	bo_size = sizeof(*data);
>> +	bo_size = xe_bb_size(fd, bo_size);
>> +
>> +	engine = xe_engine(fd, 1);
>> +	bo = xe_bo_create(fd, vm, bo_size, vram_if_possible(fd,
>> +engine->instance.gt_id), 0);
>> +
>> +	data = xe_bo_map(fd, bo, bo_size);
>> +	memset(data, 0, bo_size);
>> +	exec_queue = xe_exec_queue_create(fd, vm, &engine->instance, 0);
>> +	spin_opts.addr = addr + (char *)&data[0].spin - (char *)data;
>> +
>> +	sync[0].addr = to_user_pointer(&data[0].vm_sync);
>> +	xe_vm_bind_async(fd, vm, engine->instance.gt_id, bo, 0,
>> +				spin_opts.addr, bo_size, sync, 1);
>> +	xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0,
>ONE_SEC);
>> +	data[0].vm_sync = 0;
>> +
>> +	spin_opts.ctx_ticks = duration_to_ctx_ticks(fd, 0, duration_ns);
>> +	xe_spin_init(&data[0].spin, &spin_opts);
>> +	sync[0].addr = addr + (char *)&data[0].exec_sync - (char *)data;
>> +	exec.exec_queue_id = exec_queue;
>> +	exec.address = spin_opts.addr;
>> +	xe_exec(fd, &exec);
>> +	xe_spin_wait_started(&data[0].spin);
>> +
>> +	xe_spin_end(&data[0].spin);
>> +	xe_wait_ufence(fd, &data[0].exec_sync, USER_FENCE_VALUE, 0,
>> +ONE_SEC);
>> +
>> +	sync[0].addr = to_user_pointer(&data[0].vm_sync);
>> +	xe_vm_unbind_async(fd, vm, 0, 0, addr, bo_size, sync, 1);
>> +	xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, 0,
>ONE_SEC);
>> +	munmap(data, bo_size);
>> +	gem_close(fd, bo);
>> +
>> +	xe_exec_queue_destroy(fd, exec_queue);
>> +	xe_vm_destroy(fd, vm);
>> +}
>> +
>>  igt_main
>>  {
>>  	struct drm_xe_engine_class_instance *hwe; @@ -460,6 +532,9 @@
>> igt_main
>>  	igt_subtest("non-blocking")
>>  		non_block(fd, EWOULDBLOCK);
>>
>> +	igt_subtest("lr-with-duration")
>> +		lr_with_duration(fd);
>> +
>
>How are you verifing it took 30seconds to run?
>Also, how are you verifing that it was running during that period?
>
We have a conditional batch buffer in xe_spin_inti code which it breaks the loop once time_elapsed exceeds more that ctx_ticks. 

It should be handled from the xe_spin_init itself. 

Thanks,
Gowtham
>Regards,
>Kamil
>
>>
>>  	igt_fixture
>>  		drm_close_driver(fd);
>> --
>> 2.39.1
>>


More information about the igt-dev mailing list