[PATCH i-g-t] tests/intel/xe_exec_store: fix sync usage

Matthew Brost matthew.brost at intel.com
Mon Apr 8 17:55:24 UTC 2024


On Mon, Apr 08, 2024 at 06:41:13PM +0100, Matthew Auld wrote:
> If using async binds it looks like an in-fence for the exec is needed to
> ensure the exec happens after the out-fence from the binds are complete.
> Therefore we need to unset DRM_XE_SYNC_FLAG_SIGNAL after doing the
> binds, but before the exec, otherwise the sync is rather treated
> as an out-fence and the binds can then happen after the exec, leading to
> various failures. In addition it looks like async unbind should be
> waited on before tearing down the queue/vm which has the bind engine
> attached, since the scheduler timeout is immediately set to zero on
> destroy, which might then trigger job timeouts. However it looks like
> it's also fine to rather just destroy the object and leave KMD to unbind
> everything itself. Update the various subtests here to conform to this.
> 
> In the case of the persistent subtest it looks simpler to use sync
> vm_bind since we don't have another sync for the in-fence at hand, plus
> we don't seem to need a dedicated bind engine.
> 
> Closes: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/1270
> Signed-off-by: Matthew Auld <matthew.auld at intel.com>
> Cc: Matthew Brost <matthew.brost at intel.com>

Changes LGTM but IMO we jus delete this test as I'm unsure what coverage
this test is providing.

Anyways:
Reviewed-by: Matthew Brost <matthew.brost at intel.com>

> ---
>  tests/intel/xe_exec_store.c | 22 ++++++++++++----------
>  1 file changed, 12 insertions(+), 10 deletions(-)
> 
> diff --git a/tests/intel/xe_exec_store.c b/tests/intel/xe_exec_store.c
> index c57bcb852..728ce826b 100644
> --- a/tests/intel/xe_exec_store.c
> +++ b/tests/intel/xe_exec_store.c
> @@ -102,13 +102,13 @@ static void persistance_batch(struct data *data, uint64_t addr)
>   */
>  static void basic_inst(int fd, int inst_type, struct drm_xe_engine_class_instance *eci)
>  {
> -	struct drm_xe_sync sync = {
> -		.type = DRM_XE_SYNC_TYPE_SYNCOBJ,
> -		.flags = DRM_XE_SYNC_FLAG_SIGNAL,
> +	struct drm_xe_sync sync[2] = {
> +		{ .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, },
> +		{ .type = DRM_XE_SYNC_TYPE_SYNCOBJ, .flags = DRM_XE_SYNC_FLAG_SIGNAL, }
>  	};
>  	struct drm_xe_exec exec = {
>  		.num_batch_buffer = 1,
> -		.num_syncs = 1,
> +		.num_syncs = 2,
>  		.syncs = to_user_pointer(&sync),
>  	};
>  	struct data *data;
> @@ -122,7 +122,8 @@ static void basic_inst(int fd, int inst_type, struct drm_xe_engine_class_instanc
>  	uint32_t bo = 0;
>  
>  	syncobj = syncobj_create(fd, 0);
> -	sync.handle = syncobj;
> +	sync[0].handle = syncobj_create(fd, 0);
> +	sync[1].handle = syncobj;
>  
>  	vm = xe_vm_create(fd, 0, 0);
>  	bo_size = sizeof(*data);
> @@ -134,7 +135,7 @@ static void basic_inst(int fd, int inst_type, struct drm_xe_engine_class_instanc
>  
>  	exec_queue = xe_exec_queue_create(fd, vm, eci, 0);
>  	bind_engine = xe_bind_exec_queue_create(fd, vm, 0);
> -	xe_vm_bind_async(fd, vm, bind_engine, bo, 0, addr, bo_size, &sync, 1);
> +	xe_vm_bind_async(fd, vm, bind_engine, bo, 0, addr, bo_size, sync, 1);
>  	data = xe_bo_map(fd, bo, bo_size);
>  
>  	if (inst_type == STORE)
> @@ -149,12 +150,14 @@ static void basic_inst(int fd, int inst_type, struct drm_xe_engine_class_instanc
>  
>  	exec.exec_queue_id = exec_queue;
>  	exec.address = data->addr;
> -	sync.flags &= DRM_XE_SYNC_FLAG_SIGNAL;
> +	sync[0].flags &= ~DRM_XE_SYNC_FLAG_SIGNAL;
> +	sync[1].flags |= DRM_XE_SYNC_FLAG_SIGNAL;
>  	xe_exec(fd, &exec);
>  
>  	igt_assert(syncobj_wait(fd, &syncobj, 1, INT64_MAX, 0, NULL));
>  	igt_assert_eq(data->data, value);
>  
> +	syncobj_destroy(fd, sync[0].handle);
>  	syncobj_destroy(fd, syncobj);
>  	munmap(data, bo_size);
>  	gem_close(fd, bo);
> @@ -232,7 +235,7 @@ static void store_cachelines(int fd, struct drm_xe_engine_class_instance *eci,
>  		batch_map[b++] = value[n];
>  	}
>  	batch_map[b++] = MI_BATCH_BUFFER_END;
> -	sync[0].flags &= DRM_XE_SYNC_FLAG_SIGNAL;
> +	sync[0].flags &= ~DRM_XE_SYNC_FLAG_SIGNAL;
>  	sync[1].flags |= DRM_XE_SYNC_FLAG_SIGNAL;
>  	sync[1].handle = syncobjs;
>  	exec.exec_queue_id = exec_queues;
> @@ -250,7 +253,6 @@ static void store_cachelines(int fd, struct drm_xe_engine_class_instance *eci,
>  
>  	for (i = 0; i < count; i++) {
>  		munmap(bo_map[i], bo_size);
> -		xe_vm_unbind_async(fd, vm, 0, 0, dst_offset[i], bo_size, sync, 1);
>  		gem_close(fd, bo[i]);
>  	}
>  
> @@ -300,7 +302,7 @@ static void persistent(int fd)
>  			      vram_if_possible(fd, engine->instance.gt_id),
>  			      DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM);
>  
> -	xe_vm_bind_async(fd, vm, 0, sd_batch, 0, addr, batch_size, &sync, 1);
> +	xe_vm_bind_sync(fd, vm, sd_batch, 0, addr, batch_size);
>  	sd_data = xe_bo_map(fd, sd_batch, batch_size);
>  	prt_data = xe_bo_map(fd, prt_batch, batch_size);
>  
> -- 
> 2.44.0
> 


More information about the igt-dev mailing list