[igt-dev] [PATCH 1/1] xe: Rename engine to exec_queue

Rodrigo Vivi rodrigo.vivi at intel.com
Wed Aug 2 16:22:25 UTC 2023


On Wed, Aug 02, 2023 at 01:58:14PM +0000, Matthew Brost wrote:
> On Wed, Aug 02, 2023 at 11:07:50AM +0000, Francois Dugast wrote:
> > Engine was inappropriately used to refer to execution queues and
> > it also created some confusion with hardware engines. Update to
> > the kernel header where this was fixed and also update variable
> > names in tests.
> > 
> > Signed-off-by: Francois Dugast <francois.dugast at intel.com>
> 
> LGTM.
> 
> Reviewed-by: Matthew Brost <matthew.brost at intel.com>
> 
> > ---
> >  include/drm-uapi/xe_drm.h                | 104 +++++-----
> >  lib/igt_fb.c                             |   4 +-
> >  lib/intel_batchbuffer.c                  |   4 +-
> >  lib/intel_ctx.c                          |   2 +-
> >  lib/xe/xe_compute.c                      |   8 +-
> >  lib/xe/xe_ioctl.c                        |  80 ++++----
> >  lib/xe/xe_ioctl.h                        |  32 ++--
> >  lib/xe/xe_spin.c                         |  24 +--
> >  lib/xe/xe_spin.h                         |   2 +-
> >  tests/intel-ci/xe-fast-feedback.testlist |  60 +++---
> >  tests/xe/xe_ccs.c                        |  16 +-
> >  tests/xe/xe_create.c                     |  60 +++---
> >  tests/xe/xe_dma_buf_sync.c               |   8 +-
> >  tests/xe/xe_evict.c                      | 234 +++++++++++------------
> >  tests/xe/xe_exec_balancer.c              |  96 +++++-----
> >  tests/xe/xe_exec_basic.c                 | 116 +++++------
> >  tests/xe/xe_exec_compute_mode.c          | 126 ++++++------
> >  tests/xe/xe_exec_fault_mode.c            | 176 ++++++++---------
> >  tests/xe/xe_exec_reset.c                 | 214 ++++++++++-----------
> >  tests/xe/xe_exec_store.c                 |  20 +-
> >  tests/xe/xe_exec_threads.c               | 172 ++++++++---------
> >  tests/xe/xe_exercise_blt.c               |   8 +-
> >  tests/xe/xe_guc_pc.c                     |  46 ++---
> >  tests/xe/xe_huc_copy.c                   |   8 +-
> >  tests/xe/xe_intel_bb.c                   |   4 +-
> >  tests/xe/xe_noexec_ping_pong.c           |  16 +-
> >  tests/xe/xe_pm.c                         |  34 ++--
> >  tests/xe/xe_spin_batch.c                 |  20 +-
> >  tests/xe/xe_vm.c                         | 214 ++++++++++-----------
> >  29 files changed, 959 insertions(+), 949 deletions(-)
> > 
> > diff --git a/include/drm-uapi/xe_drm.h b/include/drm-uapi/xe_drm.h
> > index 1ea453737..86f16d50e 100644
> > --- a/include/drm-uapi/xe_drm.h
> > +++ b/include/drm-uapi/xe_drm.h
> > @@ -3,8 +3,8 @@
> >   * Copyright © 2023 Intel Corporation
> >   */
> >  
> > -#ifndef _XE_DRM_H_
> > -#define _XE_DRM_H_
> > +#ifndef _UAPI_XE_DRM_H_
> > +#define _UAPI_XE_DRM_H_

hmmm...

> >  
> >  #include "drm.h"
> >  
> > @@ -16,6 +16,16 @@ extern "C" {
> >   * subject to backwards-compatibility constraints.
> >   */
> >  
> > +/**
> > + * DOC: uevent generated by xe on it's pci node.
> > + *
> > + * XE_RESET_FAILED_UEVENT - Event is generated when attempt to reset gt
> > + * fails. The value supplied with the event is always "NEEDS_RESET".
> > + * Additional information supplied is tile id and gt id of the gt unit for
> > + * which reset has failed.
> > + */
> > +#define XE_RESET_FAILED_UEVENT "DEVICE_STATUS"
> > +
> >  /**
> >   * struct xe_user_extension - Base class for defining a chain of extensions
> >   *
> > @@ -29,7 +39,7 @@ extern "C" {
> >   * redefine the interface more easily than an ever growing struct of
> >   * increasing complexity, and for large parts of that interface to be
> >   * entirely optional. The downside is more pointer chasing; chasing across
> > - * the boundary with pointers encapsulated inside u64.
> > + * the __user boundary with pointers encapsulated inside u64.

could you please generate this header directly from the kernel instead
of copying the source?

$ make -j21 headers_install  INSTALL_HDR_PATH=/tmp/blah
$ cp /tmp/blah/include/drm/xe_drm.h include/drm-uapi/xe_drm.h

Then, also mention in the commit message that xe_drm.h uapi
header is now in sync with drm-xe-next
commit ("drm/xe: move mem_access to xe_pm.")

> >   *
> >   * Example chaining:
> >   *
> > @@ -93,14 +103,14 @@ struct xe_user_extension {
> >  #define DRM_XE_VM_CREATE		0x03
> >  #define DRM_XE_VM_DESTROY		0x04
> >  #define DRM_XE_VM_BIND			0x05
> > -#define DRM_XE_ENGINE_CREATE		0x06
> > -#define DRM_XE_ENGINE_DESTROY		0x07
> > +#define DRM_XE_EXEC_QUEUE_CREATE		0x06
> > +#define DRM_XE_EXEC_QUEUE_DESTROY		0x07
> >  #define DRM_XE_EXEC			0x08
> >  #define DRM_XE_MMIO			0x09
> > -#define DRM_XE_ENGINE_SET_PROPERTY	0x0a
> > +#define DRM_XE_EXEC_QUEUE_SET_PROPERTY	0x0a
> >  #define DRM_XE_WAIT_USER_FENCE		0x0b
> >  #define DRM_XE_VM_MADVISE		0x0c
> > -#define DRM_XE_ENGINE_GET_PROPERTY	0x0d
> > +#define DRM_XE_EXEC_QUEUE_GET_PROPERTY	0x0d
> >  
> >  /* Must be kept compact -- no holes */
> >  #define DRM_IOCTL_XE_DEVICE_QUERY		DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_DEVICE_QUERY, struct drm_xe_device_query)
> > @@ -109,12 +119,12 @@ struct xe_user_extension {
> >  #define DRM_IOCTL_XE_VM_CREATE			DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_CREATE, struct drm_xe_vm_create)
> >  #define DRM_IOCTL_XE_VM_DESTROY			 DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_DESTROY, struct drm_xe_vm_destroy)
> >  #define DRM_IOCTL_XE_VM_BIND			 DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_BIND, struct drm_xe_vm_bind)
> > -#define DRM_IOCTL_XE_ENGINE_CREATE		DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_ENGINE_CREATE, struct drm_xe_engine_create)
> > -#define DRM_IOCTL_XE_ENGINE_GET_PROPERTY	DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_ENGINE_GET_PROPERTY, struct drm_xe_engine_get_property)
> > -#define DRM_IOCTL_XE_ENGINE_DESTROY		 DRM_IOW(DRM_COMMAND_BASE + DRM_XE_ENGINE_DESTROY, struct drm_xe_engine_destroy)
> > +#define DRM_IOCTL_XE_EXEC_QUEUE_CREATE		DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_CREATE, struct drm_xe_exec_queue_create)
> > +#define DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY	DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_GET_PROPERTY, struct drm_xe_exec_queue_get_property)
> > +#define DRM_IOCTL_XE_EXEC_QUEUE_DESTROY		 DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_DESTROY, struct drm_xe_exec_queue_destroy)
> >  #define DRM_IOCTL_XE_EXEC			 DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec)
> >  #define DRM_IOCTL_XE_MMIO			DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_MMIO, struct drm_xe_mmio)
> > -#define DRM_IOCTL_XE_ENGINE_SET_PROPERTY	 DRM_IOW(DRM_COMMAND_BASE + DRM_XE_ENGINE_SET_PROPERTY, struct drm_xe_engine_set_property)
> > +#define DRM_IOCTL_XE_EXEC_QUEUE_SET_PROPERTY	 DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_SET_PROPERTY, struct drm_xe_exec_queue_set_property)
> >  #define DRM_IOCTL_XE_WAIT_USER_FENCE		DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
> >  #define DRM_IOCTL_XE_VM_MADVISE			 DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_MADVISE, struct drm_xe_vm_madvise)
> >  
> > @@ -639,11 +649,11 @@ struct drm_xe_vm_bind {
> >  	__u32 vm_id;
> >  
> >  	/**
> > -	 * @engine_id: engine_id, must be of class DRM_XE_ENGINE_CLASS_VM_BIND
> > -	 * and engine must have same vm_id. If zero, the default VM bind engine
> > +	 * @exec_queue_id: exec_queue_id, must be of class DRM_XE_ENGINE_CLASS_VM_BIND
> > +	 * and exec queue must have same vm_id. If zero, the default VM bind engine
> >  	 * is used.
> >  	 */
> > -	__u32 engine_id;
> > +	__u32 exec_queue_id;
> >  
> >  	/** @num_binds: number of binds in this IOCTL */
> >  	__u32 num_binds;
> > @@ -675,8 +685,8 @@ struct drm_xe_vm_bind {
> >  	__u64 reserved[2];
> >  };
> >  
> > -/** struct drm_xe_ext_engine_set_property - engine set property extension */
> > -struct drm_xe_ext_engine_set_property {
> > +/** struct drm_xe_ext_exec_queue_set_property - exec queue set property extension */
> > +struct drm_xe_ext_exec_queue_set_property {
> >  	/** @base: base user extension */
> >  	struct xe_user_extension base;
> >  
> > @@ -691,32 +701,32 @@ struct drm_xe_ext_engine_set_property {
> >  };
> >  
> >  /**
> > - * struct drm_xe_engine_set_property - engine set property
> > + * struct drm_xe_exec_queue_set_property - exec queue set property
> >   *
> > - * Same namespace for extensions as drm_xe_engine_create
> > + * Same namespace for extensions as drm_xe_exec_queue_create
> >   */
> > -struct drm_xe_engine_set_property {
> > +struct drm_xe_exec_queue_set_property {
> >  	/** @extensions: Pointer to the first extension struct, if any */
> >  	__u64 extensions;
> >  
> > -	/** @engine_id: Engine ID */
> > -	__u32 engine_id;
> > +	/** @exec_queue_id: Exec queue ID */
> > +	__u32 exec_queue_id;
> >  
> > -#define XE_ENGINE_SET_PROPERTY_PRIORITY			0
> > -#define XE_ENGINE_SET_PROPERTY_TIMESLICE		1
> > -#define XE_ENGINE_SET_PROPERTY_PREEMPTION_TIMEOUT	2
> > +#define XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY			0
> > +#define XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE		1
> > +#define XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT	2
> >  	/*
> >  	 * Long running or ULLS engine mode. DMA fences not allowed in this
> >  	 * mode. Must match the value of DRM_XE_VM_CREATE_COMPUTE_MODE, serves
> >  	 * as a sanity check the UMD knows what it is doing. Can only be set at
> >  	 * engine create time.
> >  	 */
> > -#define XE_ENGINE_SET_PROPERTY_COMPUTE_MODE		3
> > -#define XE_ENGINE_SET_PROPERTY_PERSISTENCE		4
> > -#define XE_ENGINE_SET_PROPERTY_JOB_TIMEOUT		5
> > -#define XE_ENGINE_SET_PROPERTY_ACC_TRIGGER		6
> > -#define XE_ENGINE_SET_PROPERTY_ACC_NOTIFY		7
> > -#define XE_ENGINE_SET_PROPERTY_ACC_GRANULARITY		8
> > +#define XE_EXEC_QUEUE_SET_PROPERTY_COMPUTE_MODE		3
> > +#define XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE		4
> > +#define XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT		5
> > +#define XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER		6
> > +#define XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY		7
> > +#define XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY		8
> >  	/** @property: property to set */
> >  	__u32 property;
> >  
> > @@ -745,25 +755,25 @@ struct drm_xe_engine_class_instance {
> >  	__u16 gt_id;
> >  };
> >  
> > -struct drm_xe_engine_create {
> > -#define XE_ENGINE_EXTENSION_SET_PROPERTY               0
> > +struct drm_xe_exec_queue_create {
> > +#define XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY               0
> >  	/** @extensions: Pointer to the first extension struct, if any */
> >  	__u64 extensions;
> >  
> > -	/** @width: submission width (number BB per exec) for this engine */
> > +	/** @width: submission width (number BB per exec) for this exec queue */
> >  	__u16 width;
> >  
> > -	/** @num_placements: number of valid placements for this engine */
> > +	/** @num_placements: number of valid placements for this exec queue */
> >  	__u16 num_placements;
> >  
> > -	/** @vm_id: VM to use for this engine */
> > +	/** @vm_id: VM to use for this exec queue */
> >  	__u32 vm_id;
> >  
> >  	/** @flags: MBZ */
> >  	__u32 flags;
> >  
> > -	/** @engine_id: Returned engine ID */
> > -	__u32 engine_id;
> > +	/** @exec_queue_id: Returned exec queue ID */
> > +	__u32 exec_queue_id;
> >  
> >  	/**
> >  	 * @instances: user pointer to a 2-d array of struct
> > @@ -778,14 +788,14 @@ struct drm_xe_engine_create {
> >  	__u64 reserved[2];
> >  };
> >  
> > -struct drm_xe_engine_get_property {
> > +struct drm_xe_exec_queue_get_property {
> >  	/** @extensions: Pointer to the first extension struct, if any */
> >  	__u64 extensions;
> >  
> > -	/** @engine_id: Engine ID */
> > -	__u32 engine_id;
> > +	/** @exec_queue_id: Exec queue ID */
> > +	__u32 exec_queue_id;
> >  
> > -#define XE_ENGINE_GET_PROPERTY_BAN			0
> > +#define XE_EXEC_QUEUE_GET_PROPERTY_BAN			0
> >  	/** @property: property to get */
> >  	__u32 property;
> >  
> > @@ -796,9 +806,9 @@ struct drm_xe_engine_get_property {
> >  	__u64 reserved[2];
> >  };
> >  
> > -struct drm_xe_engine_destroy {
> > -	/** @engine_id: Engine ID */
> > -	__u32 engine_id;
> > +struct drm_xe_exec_queue_destroy {
> > +	/** @exec_queue_id: Exec queue ID */
> > +	__u32 exec_queue_id;
> >  
> >  	/** @pad: MBZ */
> >  	__u32 pad;
> > @@ -845,8 +855,8 @@ struct drm_xe_exec {
> >  	/** @extensions: Pointer to the first extension struct, if any */
> >  	__u64 extensions;
> >  
> > -	/** @engine_id: Engine ID for the batch buffer */
> > -	__u32 engine_id;
> > +	/** @exec_queue_id: Exec queue ID for the batch buffer */
> > +	__u32 exec_queue_id;
> >  
> >  	/** @num_syncs: Amount of struct drm_xe_sync in array. */
> >  	__u32 num_syncs;
> > @@ -1050,4 +1060,4 @@ struct drm_xe_vm_madvise {
> >  }
> >  #endif
> >  
> > -#endif /* _XE_DRM_H_ */
> > +#endif /* _UAPI_XE_DRM_H_ */
> > diff --git a/lib/igt_fb.c b/lib/igt_fb.c
> > index b33c639ab..8f5e3db9f 100644
> > --- a/lib/igt_fb.c
> > +++ b/lib/igt_fb.c
> > @@ -2881,7 +2881,7 @@ static void blitcopy(const struct igt_fb *dst_fb,
> >  							  mem_region) == 0);
> >  	} else if (is_xe) {
> >  		vm = xe_vm_create(dst_fb->fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
> > -		engine = xe_engine_create(dst_fb->fd, vm, &inst, 0);
> > +		engine = xe_exec_queue_create(dst_fb->fd, vm, &inst, 0);
> >  		xe_ctx = intel_ctx_xe(dst_fb->fd, vm, engine, 0, 0, 0);
> >  		mem_region = vram_if_possible(dst_fb->fd, 0);
> >  
> > @@ -3007,7 +3007,7 @@ static void blitcopy(const struct igt_fb *dst_fb,
> >  
> >  	if(is_xe) {
> >  		gem_close(dst_fb->fd, xe_bb);
> > -		xe_engine_destroy(dst_fb->fd, engine);
> > +		xe_exec_queue_destroy(dst_fb->fd, engine);
> >  		xe_vm_destroy(dst_fb->fd, vm);
> >  		free(xe_ctx);
> >  	}
> > diff --git a/lib/intel_batchbuffer.c b/lib/intel_batchbuffer.c
> > index 163d39d6b..682c0fe1d 100644
> > --- a/lib/intel_batchbuffer.c
> > +++ b/lib/intel_batchbuffer.c
> > @@ -2337,10 +2337,10 @@ __xe_bb_exec(struct intel_bb *ibb, uint64_t flags, bool sync)
> >  		igt_debug("Run on %s\n", xe_engine_class_string(inst.engine_class));
> >  
> >  		if (ibb->engine_id)
> > -			xe_engine_destroy(ibb->fd, ibb->engine_id);
> > +			xe_exec_queue_destroy(ibb->fd, ibb->engine_id);
> >  
> >  		ibb->engine_id = engine_id =
> > -			xe_engine_create(ibb->fd, ibb->vm_id, &inst, 0);
> > +			xe_exec_queue_create(ibb->fd, ibb->vm_id, &inst, 0);
> >  	} else {
> >  		engine_id = ibb->engine_id;
> >  	}
> > diff --git a/lib/intel_ctx.c b/lib/intel_ctx.c
> > index 0c1ce6d10..364ede76f 100644
> > --- a/lib/intel_ctx.c
> > +++ b/lib/intel_ctx.c
> > @@ -427,7 +427,7 @@ int __intel_ctx_xe_exec(const intel_ctx_t *ctx, uint64_t ahnd, uint64_t bb_offse
> >  		{ .flags = DRM_XE_SYNC_SYNCOBJ | DRM_XE_SYNC_SIGNAL, },
> >  	};
> >  	struct drm_xe_exec exec = {
> > -		.engine_id = ctx->engine,
> > +		.exec_queue_id = ctx->engine,
> >  		.syncs = (uintptr_t)syncs,
> >  		.num_syncs = 2,
> >  		.address = bb_offset,
> > diff --git a/lib/xe/xe_compute.c b/lib/xe/xe_compute.c
> > index 2a3686a1b..3e8112a04 100644
> > --- a/lib/xe/xe_compute.c
> > +++ b/lib/xe/xe_compute.c
> > @@ -389,7 +389,7 @@ static void tgllp_compute_exec_compute(uint32_t *addr_bo_buffer_batch,
> >  static void tgl_compute_exec(int fd, const unsigned char *kernel,
> >  			     unsigned int size)
> >  {
> > -	uint32_t vm, engine;
> > +	uint32_t vm, exec_queue;
> >  	float *dinput;
> >  	struct drm_xe_sync sync = { 0 };
> >  #define TGL_BO_DICT_ENTRIES 7
> > @@ -407,7 +407,7 @@ static void tgl_compute_exec(int fd, const unsigned char *kernel,
> >  	bo_dict[0].size = ALIGN(size, 0x1000);
> >  
> >  	vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
> > -	engine = xe_engine_create_class(fd, vm, DRM_XE_ENGINE_CLASS_RENDER);
> > +	exec_queue = xe_exec_queue_create_class(fd, vm, DRM_XE_ENGINE_CLASS_RENDER);
> >  	sync.flags = DRM_XE_SYNC_SYNCOBJ | DRM_XE_SYNC_SIGNAL;
> >  	sync.handle = syncobj_create(fd, 0);
> >  
> > @@ -429,7 +429,7 @@ static void tgl_compute_exec(int fd, const unsigned char *kernel,
> >  
> >  	tgllp_compute_exec_compute(bo_dict[6].data, ADDR_SURFACE_STATE_BASE, ADDR_DYNAMIC_STATE_BASE, ADDR_INDIRECT_OBJECT_BASE, OFFSET_INDIRECT_DATA_START);
> >  
> > -	xe_exec_wait(fd, engine, ADDR_BATCH);
> > +	xe_exec_wait(fd, exec_queue, ADDR_BATCH);
> >  
> >  	for (int i = 0; i < SIZE_DATA; i++)
> >  		igt_assert(((float *)bo_dict[5].data)[i] == ((float *)bo_dict[4].data)[i] * ((float *) bo_dict[4].data)[i]);
> > @@ -441,7 +441,7 @@ static void tgl_compute_exec(int fd, const unsigned char *kernel,
> >  	}
> >  
> >  	syncobj_destroy(fd, sync.handle);
> > -	xe_engine_destroy(fd, engine);
> > +	xe_exec_queue_destroy(fd, exec_queue);
> >  	xe_vm_destroy(fd, vm);
> >  }
> >  
> > diff --git a/lib/xe/xe_ioctl.c b/lib/xe/xe_ioctl.c
> > index a1799e09f..730dcfd16 100644
> > --- a/lib/xe/xe_ioctl.c
> > +++ b/lib/xe/xe_ioctl.c
> > @@ -62,16 +62,16 @@ uint32_t xe_vm_create(int fd, uint32_t flags, uint64_t ext)
> >  	return create.vm_id;
> >  }
> >  
> > -void xe_vm_unbind_all_async(int fd, uint32_t vm, uint32_t engine,
> > +void xe_vm_unbind_all_async(int fd, uint32_t vm, uint32_t exec_queue,
> >  			    uint32_t bo, struct drm_xe_sync *sync,
> >  			    uint32_t num_syncs)
> >  {
> > -	__xe_vm_bind_assert(fd, vm, engine, bo, 0, 0, 0,
> > +	__xe_vm_bind_assert(fd, vm, exec_queue, bo, 0, 0, 0,
> >  			    XE_VM_BIND_OP_UNMAP_ALL | XE_VM_BIND_FLAG_ASYNC,
> >  			    sync, num_syncs, 0, 0);
> >  }
> >  
> > -void xe_vm_bind_array(int fd, uint32_t vm, uint32_t engine,
> > +void xe_vm_bind_array(int fd, uint32_t vm, uint32_t exec_queue,
> >  		      struct drm_xe_vm_bind_op *bind_ops,
> >  		      uint32_t num_bind, struct drm_xe_sync *sync,
> >  		      uint32_t num_syncs)
> > @@ -82,14 +82,14 @@ void xe_vm_bind_array(int fd, uint32_t vm, uint32_t engine,
> >  		.vector_of_binds = (uintptr_t)bind_ops,
> >  		.num_syncs = num_syncs,
> >  		.syncs = (uintptr_t)sync,
> > -		.engine_id = engine,
> > +		.exec_queue_id = exec_queue,
> >  	};
> >  
> >  	igt_assert(num_bind > 1);
> >  	igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_XE_VM_BIND, &bind), 0);
> >  }
> >  
> > -int  __xe_vm_bind(int fd, uint32_t vm, uint32_t engine, uint32_t bo,
> > +int  __xe_vm_bind(int fd, uint32_t vm, uint32_t exec_queue, uint32_t bo,
> >  		  uint64_t offset, uint64_t addr, uint64_t size, uint32_t op,
> >  		  struct drm_xe_sync *sync, uint32_t num_syncs, uint32_t region,
> >  		  uint64_t ext)
> > @@ -106,7 +106,7 @@ int  __xe_vm_bind(int fd, uint32_t vm, uint32_t engine, uint32_t bo,
> >  		.bind.region = region,
> >  		.num_syncs = num_syncs,
> >  		.syncs = (uintptr_t)sync,
> > -		.engine_id = engine,
> > +		.exec_queue_id = exec_queue,
> >  	};
> >  
> >  	if (igt_ioctl(fd, DRM_IOCTL_XE_VM_BIND, &bind))
> > @@ -115,12 +115,12 @@ int  __xe_vm_bind(int fd, uint32_t vm, uint32_t engine, uint32_t bo,
> >  	return 0;
> >  }
> >  
> > -void  __xe_vm_bind_assert(int fd, uint32_t vm, uint32_t engine, uint32_t bo,
> > +void  __xe_vm_bind_assert(int fd, uint32_t vm, uint32_t exec_queue, uint32_t bo,
> >  			  uint64_t offset, uint64_t addr, uint64_t size,
> >  			  uint32_t op, struct drm_xe_sync *sync,
> >  			  uint32_t num_syncs, uint32_t region, uint64_t ext)
> >  {
> > -	igt_assert_eq(__xe_vm_bind(fd, vm, engine, bo, offset, addr, size,
> > +	igt_assert_eq(__xe_vm_bind(fd, vm, exec_queue, bo, offset, addr, size,
> >  				   op, sync, num_syncs, region, ext), 0);
> >  }
> >  
> > @@ -140,59 +140,59 @@ void xe_vm_unbind(int fd, uint32_t vm, uint64_t offset,
> >  			    XE_VM_BIND_OP_UNMAP, sync, num_syncs, 0, 0);
> >  }
> >  
> > -void xe_vm_prefetch_async(int fd, uint32_t vm, uint32_t engine, uint64_t offset,
> > +void xe_vm_prefetch_async(int fd, uint32_t vm, uint32_t exec_queue, uint64_t offset,
> >  			  uint64_t addr, uint64_t size,
> >  			  struct drm_xe_sync *sync, uint32_t num_syncs,
> >  			  uint32_t region)
> >  {
> > -	__xe_vm_bind_assert(fd, vm, engine, 0, offset, addr, size,
> > +	__xe_vm_bind_assert(fd, vm, exec_queue, 0, offset, addr, size,
> >  			    XE_VM_BIND_OP_PREFETCH | XE_VM_BIND_FLAG_ASYNC,
> >  			    sync, num_syncs, region, 0);
> >  }
> >  
> > -void xe_vm_bind_async(int fd, uint32_t vm, uint32_t engine, uint32_t bo,
> > +void xe_vm_bind_async(int fd, uint32_t vm, uint32_t exec_queue, uint32_t bo,
> >  		      uint64_t offset, uint64_t addr, uint64_t size,
> >  		      struct drm_xe_sync *sync, uint32_t num_syncs)
> >  {
> > -	__xe_vm_bind_assert(fd, vm, engine, bo, offset, addr, size,
> > +	__xe_vm_bind_assert(fd, vm, exec_queue, bo, offset, addr, size,
> >  			    XE_VM_BIND_OP_MAP | XE_VM_BIND_FLAG_ASYNC, sync,
> >  			    num_syncs, 0, 0);
> >  }
> >  
> > -void xe_vm_bind_async_flags(int fd, uint32_t vm, uint32_t engine, uint32_t bo,
> > +void xe_vm_bind_async_flags(int fd, uint32_t vm, uint32_t exec_queue, uint32_t bo,
> >  			    uint64_t offset, uint64_t addr, uint64_t size,
> >  			    struct drm_xe_sync *sync, uint32_t num_syncs,
> >  			    uint32_t flags)
> >  {
> > -	__xe_vm_bind_assert(fd, vm, engine, bo, offset, addr, size,
> > +	__xe_vm_bind_assert(fd, vm, exec_queue, bo, offset, addr, size,
> >  			    XE_VM_BIND_OP_MAP | XE_VM_BIND_FLAG_ASYNC | flags,
> >  			    sync, num_syncs, 0, 0);
> >  }
> >  
> > -void xe_vm_bind_userptr_async(int fd, uint32_t vm, uint32_t engine,
> > +void xe_vm_bind_userptr_async(int fd, uint32_t vm, uint32_t exec_queue,
> >  			      uint64_t userptr, uint64_t addr, uint64_t size,
> >  			      struct drm_xe_sync *sync, uint32_t num_syncs)
> >  {
> > -	__xe_vm_bind_assert(fd, vm, engine, 0, userptr, addr, size,
> > +	__xe_vm_bind_assert(fd, vm, exec_queue, 0, userptr, addr, size,
> >  			    XE_VM_BIND_OP_MAP_USERPTR | XE_VM_BIND_FLAG_ASYNC,
> >  			    sync, num_syncs, 0, 0);
> >  }
> >  
> > -void xe_vm_bind_userptr_async_flags(int fd, uint32_t vm, uint32_t engine,
> > +void xe_vm_bind_userptr_async_flags(int fd, uint32_t vm, uint32_t exec_queue,
> >  				    uint64_t userptr, uint64_t addr,
> >  				    uint64_t size, struct drm_xe_sync *sync,
> >  				    uint32_t num_syncs, uint32_t flags)
> >  {
> > -	__xe_vm_bind_assert(fd, vm, engine, 0, userptr, addr, size,
> > +	__xe_vm_bind_assert(fd, vm, exec_queue, 0, userptr, addr, size,
> >  			    XE_VM_BIND_OP_MAP_USERPTR | XE_VM_BIND_FLAG_ASYNC |
> >  			    flags, sync, num_syncs, 0, 0);
> >  }
> >  
> > -void xe_vm_unbind_async(int fd, uint32_t vm, uint32_t engine,
> > +void xe_vm_unbind_async(int fd, uint32_t vm, uint32_t exec_queue,
> >  			uint64_t offset, uint64_t addr, uint64_t size,
> >  			struct drm_xe_sync *sync, uint32_t num_syncs)
> >  {
> > -	__xe_vm_bind_assert(fd, vm, engine, 0, offset, addr, size,
> > +	__xe_vm_bind_assert(fd, vm, exec_queue, 0, offset, addr, size,
> >  			    XE_VM_BIND_OP_UNMAP | XE_VM_BIND_FLAG_ASYNC, sync,
> >  			    num_syncs, 0, 0);
> >  }
> > @@ -275,12 +275,12 @@ uint32_t xe_bo_create(int fd, int gt, uint32_t vm, uint64_t size)
> >  	return create.handle;
> >  }
> >  
> > -uint32_t xe_bind_engine_create(int fd, uint32_t vm, uint64_t ext)
> > +uint32_t xe_bind_exec_queue_create(int fd, uint32_t vm, uint64_t ext)
> >  {
> >  	struct drm_xe_engine_class_instance instance = {
> >  		.engine_class = DRM_XE_ENGINE_CLASS_VM_BIND,
> >  	};
> > -	struct drm_xe_engine_create create = {
> > +	struct drm_xe_exec_queue_create create = {
> >  		.extensions = ext,
> >  		.vm_id = vm,
> >  		.width = 1,
> > @@ -288,16 +288,16 @@ uint32_t xe_bind_engine_create(int fd, uint32_t vm, uint64_t ext)
> >  		.instances = to_user_pointer(&instance),
> >  	};
> >  
> > -	igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_XE_ENGINE_CREATE, &create), 0);
> > +	igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &create), 0);
> >  
> > -	return create.engine_id;
> > +	return create.exec_queue_id;
> >  }
> >  
> > -uint32_t xe_engine_create(int fd, uint32_t vm,
> > +uint32_t xe_exec_queue_create(int fd, uint32_t vm,
> >  			  struct drm_xe_engine_class_instance *instance,
> >  			  uint64_t ext)
> >  {
> > -	struct drm_xe_engine_create create = {
> > +	struct drm_xe_exec_queue_create create = {
> >  		.extensions = ext,
> >  		.vm_id = vm,
> >  		.width = 1,
> > @@ -305,37 +305,37 @@ uint32_t xe_engine_create(int fd, uint32_t vm,
> >  		.instances = to_user_pointer(instance),
> >  	};
> >  
> > -	igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_XE_ENGINE_CREATE, &create), 0);
> > +	igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &create), 0);
> >  
> > -	return create.engine_id;
> > +	return create.exec_queue_id;
> >  }
> >  
> > -uint32_t xe_engine_create_class(int fd, uint32_t vm, uint16_t class)
> > +uint32_t xe_exec_queue_create_class(int fd, uint32_t vm, uint16_t class)
> >  {
> >  	struct drm_xe_engine_class_instance instance = {
> >  		.engine_class = class,
> >  		.engine_instance = 0,
> >  		.gt_id = 0,
> >  	};
> > -	struct drm_xe_engine_create create = {
> > +	struct drm_xe_exec_queue_create create = {
> >  		.vm_id = vm,
> >  		.width = 1,
> >  		.num_placements = 1,
> >  		.instances = to_user_pointer(&instance),
> >  	};
> >  
> > -	igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_XE_ENGINE_CREATE, &create), 0);
> > +	igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &create), 0);
> >  
> > -	return create.engine_id;
> > +	return create.exec_queue_id;
> >  }
> >  
> > -void xe_engine_destroy(int fd, uint32_t engine)
> > +void xe_exec_queue_destroy(int fd, uint32_t exec_queue)
> >  {
> > -	struct drm_xe_engine_destroy destroy = {
> > -		.engine_id = engine,
> > +	struct drm_xe_exec_queue_destroy destroy = {
> > +		.exec_queue_id = exec_queue,
> >  	};
> >  
> > -	igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_XE_ENGINE_DESTROY, &destroy), 0);
> > +	igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_DESTROY, &destroy), 0);
> >  }
> >  
> >  uint64_t xe_bo_mmap_offset(int fd, uint32_t bo)
> > @@ -388,11 +388,11 @@ void xe_exec(int fd, struct drm_xe_exec *exec)
> >  	igt_assert_eq(__xe_exec(fd, exec), 0);
> >  }
> >  
> > -void xe_exec_sync(int fd, uint32_t engine, uint64_t addr,
> > +void xe_exec_sync(int fd, uint32_t exec_queue, uint64_t addr,
> >  		  struct drm_xe_sync *sync, uint32_t num_syncs)
> >  {
> >  	struct drm_xe_exec exec = {
> > -		.engine_id = engine,
> > +		.exec_queue_id = exec_queue,
> >  		.syncs = (uintptr_t)sync,
> >  		.num_syncs = num_syncs,
> >  		.address = addr,
> > @@ -402,14 +402,14 @@ void xe_exec_sync(int fd, uint32_t engine, uint64_t addr,
> >  	igt_assert_eq(__xe_exec(fd, &exec), 0);
> >  }
> >  
> > -void xe_exec_wait(int fd, uint32_t engine, uint64_t addr)
> > +void xe_exec_wait(int fd, uint32_t exec_queue, uint64_t addr)
> >  {
> >  	struct drm_xe_sync sync = {
> >  		.flags = DRM_XE_SYNC_SYNCOBJ | DRM_XE_SYNC_SIGNAL,
> >  		.handle = syncobj_create(fd, 0),
> >  	};
> >  
> > -	xe_exec_sync(fd, engine, addr, &sync, 1);
> > +	xe_exec_sync(fd, exec_queue, addr, &sync, 1);
> >  
> >  	igt_assert(syncobj_wait(fd, &sync.handle, 1, INT64_MAX, 0, NULL));
> >  	syncobj_destroy(fd, sync.handle);
> > diff --git a/lib/xe/xe_ioctl.h b/lib/xe/xe_ioctl.h
> > index f9c3acb4a..6c281b3bf 100644
> > --- a/lib/xe/xe_ioctl.h
> > +++ b/lib/xe/xe_ioctl.h
> > @@ -17,11 +17,11 @@
> >  
> >  uint32_t xe_cs_prefetch_size(int fd);
> >  uint32_t xe_vm_create(int fd, uint32_t flags, uint64_t ext);
> > -int  __xe_vm_bind(int fd, uint32_t vm, uint32_t engine, uint32_t bo,
> > +int  __xe_vm_bind(int fd, uint32_t vm, uint32_t exec_queue, uint32_t bo,
> >  		  uint64_t offset, uint64_t addr, uint64_t size, uint32_t op,
> >  		  struct drm_xe_sync *sync, uint32_t num_syncs, uint32_t region,
> >  		  uint64_t ext);
> > -void  __xe_vm_bind_assert(int fd, uint32_t vm, uint32_t engine, uint32_t bo,
> > +void  __xe_vm_bind_assert(int fd, uint32_t vm, uint32_t exec_queue, uint32_t bo,
> >  			  uint64_t offset, uint64_t addr, uint64_t size,
> >  			  uint32_t op, struct drm_xe_sync *sync,
> >  			  uint32_t num_syncs, uint32_t region, uint64_t ext);
> > @@ -31,36 +31,36 @@ void xe_vm_bind(int fd, uint32_t vm, uint32_t bo, uint64_t offset,
> >  void xe_vm_unbind(int fd, uint32_t vm, uint64_t offset,
> >  		  uint64_t addr, uint64_t size,
> >  		  struct drm_xe_sync *sync, uint32_t num_syncs);
> > -void xe_vm_prefetch_async(int fd, uint32_t vm, uint32_t engine,
> > +void xe_vm_prefetch_async(int fd, uint32_t vm, uint32_t exec_queue,
> >  			  uint64_t offset, uint64_t addr, uint64_t size,
> >  			  struct drm_xe_sync *sync, uint32_t num_syncs,
> >  			  uint32_t region);
> > -void xe_vm_bind_async(int fd, uint32_t vm, uint32_t engine, uint32_t bo,
> > +void xe_vm_bind_async(int fd, uint32_t vm, uint32_t exec_queue, uint32_t bo,
> >  		      uint64_t offset, uint64_t addr, uint64_t size,
> >  		      struct drm_xe_sync *sync, uint32_t num_syncs);
> > -void xe_vm_bind_userptr_async(int fd, uint32_t vm, uint32_t engine,
> > +void xe_vm_bind_userptr_async(int fd, uint32_t vm, uint32_t exec_queue,
> >  			      uint64_t userptr, uint64_t addr, uint64_t size,
> >  			      struct drm_xe_sync *sync, uint32_t num_syncs);
> > -void xe_vm_bind_async_flags(int fd, uint32_t vm, uint32_t engine, uint32_t bo,
> > +void xe_vm_bind_async_flags(int fd, uint32_t vm, uint32_t exec_queue, uint32_t bo,
> >  			    uint64_t offset, uint64_t addr, uint64_t size,
> >  			    struct drm_xe_sync *sync, uint32_t num_syncs,
> >  			    uint32_t flags);
> > -void xe_vm_bind_userptr_async_flags(int fd, uint32_t vm, uint32_t engine,
> > +void xe_vm_bind_userptr_async_flags(int fd, uint32_t vm, uint32_t exec_queue,
> >  				    uint64_t userptr, uint64_t addr,
> >  				    uint64_t size, struct drm_xe_sync *sync,
> >  				    uint32_t num_syncs, uint32_t flags);
> > -void xe_vm_unbind_async(int fd, uint32_t vm, uint32_t engine,
> > +void xe_vm_unbind_async(int fd, uint32_t vm, uint32_t exec_queue,
> >  			uint64_t offset, uint64_t addr, uint64_t size,
> >  			struct drm_xe_sync *sync, uint32_t num_syncs);
> >  void xe_vm_bind_sync(int fd, uint32_t vm, uint32_t bo, uint64_t offset,
> >  		     uint64_t addr, uint64_t size);
> >  void xe_vm_unbind_sync(int fd, uint32_t vm, uint64_t offset,
> >  		       uint64_t addr, uint64_t size);
> > -void xe_vm_bind_array(int fd, uint32_t vm, uint32_t engine,
> > +void xe_vm_bind_array(int fd, uint32_t vm, uint32_t exec_queue,
> >  		      struct drm_xe_vm_bind_op *bind_ops,
> >  		      uint32_t num_bind, struct drm_xe_sync *sync,
> >  		      uint32_t num_syncs);
> > -void xe_vm_unbind_all_async(int fd, uint32_t vm, uint32_t engine,
> > +void xe_vm_unbind_all_async(int fd, uint32_t vm, uint32_t exec_queue,
> >  			    uint32_t bo, struct drm_xe_sync *sync,
> >  			    uint32_t num_syncs);
> >  void xe_vm_destroy(int fd, uint32_t vm);
> > @@ -68,20 +68,20 @@ uint32_t __xe_bo_create_flags(int fd, uint32_t vm, uint64_t size, uint32_t flags
> >  			      uint32_t *handle);
> >  uint32_t xe_bo_create_flags(int fd, uint32_t vm, uint64_t size, uint32_t flags);
> >  uint32_t xe_bo_create(int fd, int gt, uint32_t vm, uint64_t size);
> > -uint32_t xe_engine_create(int fd, uint32_t vm,
> > +uint32_t xe_exec_queue_create(int fd, uint32_t vm,
> >  			  struct drm_xe_engine_class_instance *instance,
> >  			  uint64_t ext);
> > -uint32_t xe_bind_engine_create(int fd, uint32_t vm, uint64_t ext);
> > -uint32_t xe_engine_create_class(int fd, uint32_t vm, uint16_t class);
> > -void xe_engine_destroy(int fd, uint32_t engine);
> > +uint32_t xe_bind_exec_queue_create(int fd, uint32_t vm, uint64_t ext);
> > +uint32_t xe_exec_queue_create_class(int fd, uint32_t vm, uint16_t class);
> > +void xe_exec_queue_destroy(int fd, uint32_t exec_queue);
> >  uint64_t xe_bo_mmap_offset(int fd, uint32_t bo);
> >  void *xe_bo_map(int fd, uint32_t bo, size_t size);
> >  void *xe_bo_mmap_ext(int fd, uint32_t bo, size_t size, int prot);
> >  int __xe_exec(int fd, struct drm_xe_exec *exec);
> >  void xe_exec(int fd, struct drm_xe_exec *exec);
> > -void xe_exec_sync(int fd, uint32_t engine, uint64_t addr,
> > +void xe_exec_sync(int fd, uint32_t exec_queue, uint64_t addr,
> >  		  struct drm_xe_sync *sync, uint32_t num_syncs);
> > -void xe_exec_wait(int fd, uint32_t engine, uint64_t addr);
> > +void xe_exec_wait(int fd, uint32_t exec_queue, uint64_t addr);
> >  int64_t xe_wait_ufence(int fd, uint64_t *addr, uint64_t value,
> >  		       struct drm_xe_engine_class_instance *eci,
> >  		       int64_t timeout);
> > diff --git a/lib/xe/xe_spin.c b/lib/xe/xe_spin.c
> > index e799fcfc5..cd9c1a7d3 100644
> > --- a/lib/xe/xe_spin.c
> > +++ b/lib/xe/xe_spin.c
> > @@ -84,7 +84,7 @@ void xe_spin_end(struct xe_spin *spin)
> >  
> >  /**
> >   * xe_spin_create:
> > - *@opt: controlling options such as allocator handle, engine, vm etc
> > + *@opt: controlling options such as allocator handle, exec_queue, vm etc
> >   *
> >   * igt_spin_new for xe, xe_spin_create submits a batch using xe_spin_init
> >   * which wraps around vm bind and unbinding the object associated to it.
> > @@ -121,9 +121,9 @@ xe_spin_create(int fd, const struct igt_spin_factory *opt)
> >  
> >  	if (!spin->engine) {
> >  		if (opt->hwe)
> > -			spin->engine = xe_engine_create(fd, spin->vm, opt->hwe, 0);
> > +			spin->engine = xe_exec_queue_create(fd, spin->vm, opt->hwe, 0);
> >  		else
> > -			spin->engine = xe_engine_create_class(fd, spin->vm, DRM_XE_ENGINE_CLASS_COPY);
> > +			spin->engine = xe_exec_queue_create_class(fd, spin->vm, DRM_XE_ENGINE_CLASS_COPY);
> >  	}
> >  
> >  	spin->handle = xe_bo_create_flags(fd, spin->vm, bo_size,
> > @@ -137,7 +137,7 @@ xe_spin_create(int fd, const struct igt_spin_factory *opt)
> >  	else
> >  		xe_spin_init(xe_spin, addr, false);
> >  
> > -	exec.engine_id = spin->engine;
> > +	exec.exec_queue_id = spin->engine;
> >  	exec.address = addr;
> >  	sync.handle = spin->syncobj;
> >  	igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_XE_EXEC, &exec), 0);
> > @@ -161,8 +161,8 @@ void xe_spin_sync_wait(int fd, struct igt_spin *spin)
> >   *@spin: spin state from igt_spin_new()
> >   *
> >   * Wrapper to free spinner whhich is triggered by xe_spin_create.
> > - * which distroys vm, engine and unbinds the vm which is binded to
> > - * the engine and bo.
> > + * which distroys vm, exec_queue and unbinds the vm which is binded to
> > + * the exec_queue and bo.
> >   *
> >   */
> >  void xe_spin_free(int fd, struct igt_spin *spin)
> > @@ -176,7 +176,7 @@ void xe_spin_free(int fd, struct igt_spin *spin)
> >  	gem_close(fd, spin->handle);
> >  
> >  	if (!spin->opts.engine)
> > -		xe_engine_destroy(fd, spin->engine);
> > +		xe_exec_queue_destroy(fd, spin->engine);
> >  
> >  	if (!spin->opts.vm)
> >  		xe_vm_destroy(fd, spin->vm);
> > @@ -189,7 +189,7 @@ void xe_cork_init(int fd, struct drm_xe_engine_class_instance *hwe,
> >  {
> >  	uint64_t addr = xe_get_default_alignment(fd);
> >  	size_t bo_size = xe_get_default_alignment(fd);
> > -	uint32_t vm, bo, engine, syncobj;
> > +	uint32_t vm, bo, exec_queue, syncobj;
> >  	struct xe_spin *spin;
> >  	struct drm_xe_sync sync = {
> >  		.flags = DRM_XE_SYNC_SYNCOBJ | DRM_XE_SYNC_SIGNAL,
> > @@ -208,11 +208,11 @@ void xe_cork_init(int fd, struct drm_xe_engine_class_instance *hwe,
> >  
> >  	xe_vm_bind_sync(fd, vm, bo, 0, addr, bo_size);
> >  
> > -	engine = xe_engine_create(fd, vm, hwe, 0);
> > +	exec_queue = xe_exec_queue_create(fd, vm, hwe, 0);
> >  	syncobj = syncobj_create(fd, 0);
> >  
> >  	xe_spin_init(spin, addr, true);
> > -	exec.engine_id = engine;
> > +	exec.exec_queue_id = exec_queue;
> >  	exec.address = addr;
> >  	sync.handle = syncobj;
> >  	igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_XE_EXEC, &exec), 0);
> > @@ -221,7 +221,7 @@ void xe_cork_init(int fd, struct drm_xe_engine_class_instance *hwe,
> >  	cork->fd = fd;
> >  	cork->vm = vm;
> >  	cork->bo = bo;
> > -	cork->engine = engine;
> > +	cork->exec_queue = exec_queue;
> >  	cork->syncobj = syncobj;
> >  }
> >  
> > @@ -249,7 +249,7 @@ void xe_cork_wait_done(struct xe_cork *cork)
> >  void xe_cork_fini(struct xe_cork *cork)
> >  {
> >  	syncobj_destroy(cork->fd, cork->syncobj);
> > -	xe_engine_destroy(cork->fd, cork->engine);
> > +	xe_exec_queue_destroy(cork->fd, cork->exec_queue);
> >  	xe_vm_destroy(cork->fd, cork->vm);
> >  	gem_close(cork->fd, cork->bo);
> >  }
> > diff --git a/lib/xe/xe_spin.h b/lib/xe/xe_spin.h
> > index 54a52b403..c84db175d 100644
> > --- a/lib/xe/xe_spin.h
> > +++ b/lib/xe/xe_spin.h
> > @@ -35,7 +35,7 @@ struct xe_cork {
> >  	int fd;
> >  	uint32_t vm;
> >  	uint32_t bo;
> > -	uint32_t engine;
> > +	uint32_t exec_queue;
> >  	uint32_t syncobj;
> >  };
> >  
> > diff --git a/tests/intel-ci/xe-fast-feedback.testlist b/tests/intel-ci/xe-fast-feedback.testlist
> > index 2799d9cfb..78814f650 100644
> > --- a/tests/intel-ci/xe-fast-feedback.testlist
> > +++ b/tests/intel-ci/xe-fast-feedback.testlist
> > @@ -2,8 +2,8 @@
> >  igt at xe_module_load@load
> >  
> >  igt at xe_compute@compute-square
> > -igt at xe_create@create-engines-noleak
> > -igt at xe_create@create-engines-leak
> > +igt at xe_create@create-execqueues-noleak
> > +igt at xe_create@create-execqueues-leak
> >  igt at xe_create@create-massive-size
> >  igt at xe_debugfs@base
> >  igt at xe_debugfs@gt
> > @@ -53,54 +53,54 @@ igt at xe_exec_basic@twice-rebind
> >  igt at xe_exec_basic@twice-userptr-rebind
> >  igt at xe_exec_basic@twice-userptr-invalidate
> >  igt at xe_exec_basic@no-exec-userptr-invalidate
> > -igt at xe_exec_basic@twice-bindengine
> > -igt at xe_exec_basic@no-exec-bindengine
> > -igt at xe_exec_basic@twice-bindengine-userptr
> > -igt at xe_exec_basic@twice-bindengine-rebind
> > -igt at xe_exec_basic@twice-bindengine-userptr-rebind
> > -igt at xe_exec_basic@twice-bindengine-userptr-invalidate
> > +igt at xe_exec_basic@twice-bindexecqueue
> > +igt at xe_exec_basic@no-exec-bindexecqueue
> > +igt at xe_exec_basic@twice-bindexecqueue-userptr
> > +igt at xe_exec_basic@twice-bindexecqueue-rebind
> > +igt at xe_exec_basic@twice-bindexecqueue-userptr-rebind
> > +igt at xe_exec_basic@twice-bindexecqueue-userptr-invalidate
> >  igt at xe_exec_compute_mode@twice-basic
> >  igt at xe_exec_compute_mode@twice-preempt-fence-early
> >  igt at xe_exec_compute_mode@twice-userptr
> >  igt at xe_exec_compute_mode@twice-rebind
> >  igt at xe_exec_compute_mode@twice-userptr-rebind
> >  igt at xe_exec_compute_mode@twice-userptr-invalidate
> > -igt at xe_exec_compute_mode@twice-bindengine
> > -igt at xe_exec_compute_mode@twice-bindengine-userptr
> > -igt at xe_exec_compute_mode@twice-bindengine-rebind
> > -igt at xe_exec_compute_mode@twice-bindengine-userptr-rebind
> > -igt at xe_exec_compute_mode@twice-bindengine-userptr-invalidate
> > +igt at xe_exec_compute_mode@twice-bindexecqueue
> > +igt at xe_exec_compute_mode@twice-bindexecqueue-userptr
> > +igt at xe_exec_compute_mode@twice-bindexecqueue-rebind
> > +igt at xe_exec_compute_mode@twice-bindexecqueue-userptr-rebind
> > +igt at xe_exec_compute_mode@twice-bindexecqueue-userptr-invalidate
> >  igt at xe_exec_fault_mode@twice-basic
> >  igt at xe_exec_fault_mode@many-basic
> >  igt at xe_exec_fault_mode@twice-userptr
> >  igt at xe_exec_fault_mode@twice-rebind
> >  igt at xe_exec_fault_mode@twice-userptr-rebind
> >  igt at xe_exec_fault_mode@twice-userptr-invalidate
> > -igt at xe_exec_fault_mode@twice-bindengine
> > -igt at xe_exec_fault_mode@twice-bindengine-userptr
> > -igt at xe_exec_fault_mode@twice-bindengine-rebind
> > -igt at xe_exec_fault_mode@twice-bindengine-userptr-rebind
> > -igt at xe_exec_fault_mode@twice-bindengine-userptr-invalidate
> > +igt at xe_exec_fault_mode@twice-bindexecqueue
> > +igt at xe_exec_fault_mode@twice-bindexecqueue-userptr
> > +igt at xe_exec_fault_mode@twice-bindexecqueue-rebind
> > +igt at xe_exec_fault_mode@twice-bindexecqueue-userptr-rebind
> > +igt at xe_exec_fault_mode@twice-bindexecqueue-userptr-invalidate
> >  igt at xe_exec_fault_mode@twice-basic-imm
> >  igt at xe_exec_fault_mode@twice-userptr-imm
> >  igt at xe_exec_fault_mode@twice-rebind-imm
> >  igt at xe_exec_fault_mode@twice-userptr-rebind-imm
> >  igt at xe_exec_fault_mode@twice-userptr-invalidate-imm
> > -igt at xe_exec_fault_mode@twice-bindengine-imm
> > -igt at xe_exec_fault_mode@twice-bindengine-userptr-imm
> > -igt at xe_exec_fault_mode@twice-bindengine-rebind-imm
> > -igt at xe_exec_fault_mode@twice-bindengine-userptr-rebind-imm
> > -igt at xe_exec_fault_mode@twice-bindengine-userptr-invalidate-imm
> > +igt at xe_exec_fault_mode@twice-bindexecqueue-imm
> > +igt at xe_exec_fault_mode@twice-bindexecqueue-userptr-imm
> > +igt at xe_exec_fault_mode@twice-bindexecqueue-rebind-imm
> > +igt at xe_exec_fault_mode@twice-bindexecqueue-userptr-rebind-imm
> > +igt at xe_exec_fault_mode@twice-bindexecqueue-userptr-invalidate-imm
> >  igt at xe_exec_fault_mode@twice-basic-prefetch
> >  igt at xe_exec_fault_mode@twice-userptr-prefetch
> >  igt at xe_exec_fault_mode@twice-rebind-prefetch
> >  igt at xe_exec_fault_mode@twice-userptr-rebind-prefetch
> >  igt at xe_exec_fault_mode@twice-userptr-invalidate-prefetch
> > -igt at xe_exec_fault_mode@twice-bindengine-prefetch
> > -igt at xe_exec_fault_mode@twice-bindengine-userptr-prefetch
> > -igt at xe_exec_fault_mode@twice-bindengine-rebind-prefetch
> > -igt at xe_exec_fault_mode@twice-bindengine-userptr-rebind-prefetch
> > -igt at xe_exec_fault_mode@twice-bindengine-userptr-invalidate-prefetch
> > +igt at xe_exec_fault_mode@twice-bindexecqueue-prefetch
> > +igt at xe_exec_fault_mode@twice-bindexecqueue-userptr-prefetch
> > +igt at xe_exec_fault_mode@twice-bindexecqueue-rebind-prefetch
> > +igt at xe_exec_fault_mode@twice-bindexecqueue-userptr-rebind-prefetch
> > +igt at xe_exec_fault_mode@twice-bindexecqueue-userptr-invalidate-prefetch
> >  igt at xe_exec_fault_mode@twice-invalid-fault
> >  igt at xe_exec_fault_mode@twice-invalid-userptr-fault
> >  igt at xe_exec_reset@close-fd-no-exec
> > @@ -153,7 +153,7 @@ igt at xe_pm_residency@gt-c6-on-idle
> >  igt at xe_prime_self_import@basic-with_one_bo
> >  igt at xe_prime_self_import@basic-with_fd_dup
> >  #igt at xe_prime_self_import@basic-llseek-size
> > -igt at xe_query@query-engines
> > +igt at xe_query@query-execqueues
> >  igt at xe_query@query-mem-usage
> >  igt at xe_query@query-gts
> >  igt at xe_query@query-config
> > @@ -170,7 +170,7 @@ igt at xe_vm@shared-pte-page
> >  igt at xe_vm@shared-pde-page
> >  igt at xe_vm@shared-pde2-page
> >  igt at xe_vm@shared-pde3-page
> > -igt at xe_vm@bind-engines-independent
> > +igt at xe_vm@bind-execqueues-independent
> >  igt at xe_vm@munmap-style-unbind-one-partial
> >  igt at xe_vm@munmap-style-unbind-end
> >  igt at xe_vm@munmap-style-unbind-front
> > diff --git a/tests/xe/xe_ccs.c b/tests/xe/xe_ccs.c
> > index 2d5ae33fa..1dbfccf6b 100644
> > --- a/tests/xe/xe_ccs.c
> > +++ b/tests/xe/xe_ccs.c
> > @@ -337,12 +337,12 @@ static void block_copy(int xe,
> >  		};
> >  		intel_ctx_t *surf_ctx = ctx;
> >  		uint64_t surf_ahnd = ahnd;
> > -		uint32_t vm, engine;
> > +		uint32_t vm, exec_queue;
> >  
> >  		if (config->new_ctx) {
> >  			vm = xe_vm_create(xe, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
> > -			engine = xe_engine_create(xe, vm, &inst, 0);
> > -			surf_ctx = intel_ctx_xe(xe, vm, engine, 0, 0, 0);
> > +			exec_queue = xe_exec_queue_create(xe, vm, &inst, 0);
> > +			surf_ctx = intel_ctx_xe(xe, vm, exec_queue, 0, 0, 0);
> >  			surf_ahnd = intel_allocator_open(xe, surf_ctx->vm,
> >  							 INTEL_ALLOCATOR_RELOC);
> >  		}
> > @@ -350,7 +350,7 @@ static void block_copy(int xe,
> >  			  config->suspend_resume);
> >  
> >  		if (surf_ctx != ctx) {
> > -			xe_engine_destroy(xe, engine);
> > +			xe_exec_queue_destroy(xe, exec_queue);
> >  			xe_vm_destroy(xe, vm);
> >  			free(surf_ctx);
> >  			put_ahnd(surf_ahnd);
> > @@ -512,7 +512,7 @@ static void block_copy_test(int xe,
> >  	};
> >  	intel_ctx_t *ctx;
> >  	struct igt_collection *regions;
> > -	uint32_t vm, engine;
> > +	uint32_t vm, exec_queue;
> >  	int tiling;
> >  
> >  	if (config->compression && !blt_block_copy_supports_compression(xe))
> > @@ -548,17 +548,17 @@ static void block_copy_test(int xe,
> >  				uint32_t sync_bind, sync_out;
> >  
> >  				vm = xe_vm_create(xe, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
> > -				engine = xe_engine_create(xe, vm, &inst, 0);
> > +				exec_queue = xe_exec_queue_create(xe, vm, &inst, 0);
> >  				sync_bind = syncobj_create(xe, 0);
> >  				sync_out = syncobj_create(xe, 0);
> > -				ctx = intel_ctx_xe(xe, vm, engine,
> > +				ctx = intel_ctx_xe(xe, vm, exec_queue,
> >  						   0, sync_bind, sync_out);
> >  
> >  				copyfns[copy_function].copyfn(xe, ctx,
> >  							      region1, region2,
> >  							      tiling, config);
> >  
> > -				xe_engine_destroy(xe, engine);
> > +				xe_exec_queue_destroy(xe, exec_queue);
> >  				xe_vm_destroy(xe, vm);
> >  				syncobj_destroy(xe, sync_bind);
> >  				syncobj_destroy(xe, sync_out);
> > diff --git a/tests/xe/xe_create.c b/tests/xe/xe_create.c
> > index e39e89f7a..20e7a7da8 100644
> > --- a/tests/xe/xe_create.c
> > +++ b/tests/xe/xe_create.c
> > @@ -88,17 +88,17 @@ static void create_invalid_size(int fd)
> >  	xe_vm_destroy(fd, vm);
> >  }
> >  
> > -enum engine_destroy {
> > +enum exec_queue_destroy {
> >  	NOLEAK,
> >  	LEAK
> >  };
> >  
> > -static uint32_t __xe_engine_create(int fd, uint32_t vm,
> > +static uint32_t __xe_exec_queue_create(int fd, uint32_t vm,
> >  				   struct drm_xe_engine_class_instance *instance,
> >  				   uint64_t ext,
> > -				   uint32_t *enginep)
> > +				   uint32_t *exec_queuep)
> >  {
> > -	struct drm_xe_engine_create create = {
> > +	struct drm_xe_exec_queue_create create = {
> >  		.extensions = ext,
> >  		.vm_id = vm,
> >  		.width = 1,
> > @@ -107,10 +107,10 @@ static uint32_t __xe_engine_create(int fd, uint32_t vm,
> >  	};
> >  	int err = 0;
> >  
> > -	if (igt_ioctl(fd, DRM_IOCTL_XE_ENGINE_CREATE, &create) == 0) {
> > -		*enginep = create.engine_id;
> > +	if (igt_ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &create) == 0) {
> > +		*exec_queuep = create.exec_queue_id;
> >  	} else {
> > -		igt_warn("Can't create engine, errno: %d\n", errno);
> > +		igt_warn("Can't create exec_queue, errno: %d\n", errno);
> >  		err = -errno;
> >  		igt_assume(err);
> >  	}
> > @@ -119,58 +119,58 @@ static uint32_t __xe_engine_create(int fd, uint32_t vm,
> >  	return err;
> >  }
> >  
> > -#define MAXENGINES 2048
> > +#define MAXEXECQUEUES 2048
> >  #define MAXTIME 5
> >  
> >  /**
> > - * SUBTEST: create-engines-%s
> > - * Description: Check process ability of multiple engines creation
> > + * SUBTEST: create-execqueues-%s
> > + * Description: Check process ability of multiple exec_queues creation
> >   * Run type: FULL
> >   *
> >   * arg[1]:
> >   *
> > - * @noleak:				destroy engines in the code
> > - * @leak:				destroy engines in close() path
> > + * @noleak:				destroy exec_queues in the code
> > + * @leak:				destroy exec_queues in close() path
> >   */
> > -static void create_engines(int fd, enum engine_destroy ed)
> > +static void create_execqueues(int fd, enum exec_queue_destroy ed)
> >  {
> >  	struct timespec tv = { };
> > -	uint32_t num_engines, engines_per_process, vm;
> > +	uint32_t num_engines, exec_queues_per_process, vm;
> >  	int nproc = sysconf(_SC_NPROCESSORS_ONLN), seconds;
> >  
> >  	fd = drm_reopen_driver(fd);
> >  	num_engines = xe_number_hw_engines(fd);
> >  	vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
> >  
> > -	engines_per_process = max_t(uint32_t, 1, MAXENGINES / nproc);
> > -	igt_debug("nproc: %u, engines per process: %u\n", nproc, engines_per_process);
> > +	exec_queues_per_process = max_t(uint32_t, 1, MAXEXECQUEUES / nproc);
> > +	igt_debug("nproc: %u, exec_queues per process: %u\n", nproc, exec_queues_per_process);
> >  
> >  	igt_nsec_elapsed(&tv);
> >  
> >  	igt_fork(n, nproc) {
> >  		struct drm_xe_engine_class_instance *hwe;
> > -		uint32_t engine, engines[engines_per_process];
> > +		uint32_t exec_queue, exec_queues[exec_queues_per_process];
> >  		int idx, err, i;
> >  
> >  		srandom(n);
> >  
> > -		for (i = 0; i < engines_per_process; i++) {
> > +		for (i = 0; i < exec_queues_per_process; i++) {
> >  			idx = rand() % num_engines;
> >  			hwe = xe_hw_engine(fd, idx);
> > -			err = __xe_engine_create(fd, vm, hwe, 0, &engine);
> > -			igt_debug("[%2d] Create engine: err=%d, engine=%u [idx = %d]\n",
> > -				  n, err, engine, i);
> > +			err = __xe_exec_queue_create(fd, vm, hwe, 0, &exec_queue);
> > +			igt_debug("[%2d] Create exec_queue: err=%d, exec_queue=%u [idx = %d]\n",
> > +				  n, err, exec_queue, i);
> >  			if (err)
> >  				break;
> >  
> >  			if (ed == NOLEAK)
> > -				engines[i] = engine;
> > +				exec_queues[i] = exec_queue;
> >  		}
> >  
> >  		if (ed == NOLEAK) {
> >  			while (--i >= 0) {
> > -				igt_debug("[%2d] Destroy engine: %u\n", n, engines[i]);
> > -				xe_engine_destroy(fd, engines[i]);
> > +				igt_debug("[%2d] Destroy exec_queue: %u\n", n, exec_queues[i]);
> > +				xe_exec_queue_destroy(fd, exec_queues[i]);
> >  			}
> >  		}
> >  	}
> > @@ -181,8 +181,8 @@ static void create_engines(int fd, enum engine_destroy ed)
> >  
> >  	seconds = igt_seconds_elapsed(&tv);
> >  	igt_assert_f(seconds < MAXTIME,
> > -		     "Creating %d engines tooks too long: %d [limit: %d]\n",
> > -		     MAXENGINES, seconds, MAXTIME);
> > +		     "Creating %d exec_queues tooks too long: %d [limit: %d]\n",
> > +		     MAXEXECQUEUES, seconds, MAXTIME);
> >  }
> >  
> >  /**
> > @@ -216,11 +216,11 @@ igt_main
> >  		create_invalid_size(xe);
> >  	}
> >  
> > -	igt_subtest("create-engines-noleak")
> > -		create_engines(xe, NOLEAK);
> > +	igt_subtest("create-execqueues-noleak")
> > +		create_execqueues(xe, NOLEAK);
> >  
> > -	igt_subtest("create-engines-leak")
> > -		create_engines(xe, LEAK);
> > +	igt_subtest("create-execqueues-leak")
> > +		create_execqueues(xe, LEAK);
> >  
> >  	igt_subtest("create-massive-size") {
> >  		create_massive_size(xe);
> > diff --git a/tests/xe/xe_dma_buf_sync.c b/tests/xe/xe_dma_buf_sync.c
> > index 4e76d85ab..29d675154 100644
> > --- a/tests/xe/xe_dma_buf_sync.c
> > +++ b/tests/xe/xe_dma_buf_sync.c
> > @@ -98,7 +98,7 @@ test_export_dma_buf(struct drm_xe_engine_class_instance *hwe0,
> >  	int dma_buf_fd[MAX_N_BO];
> >  	uint32_t import_bo[MAX_N_BO];
> >  	uint32_t vm[N_FD];
> > -	uint32_t engine[N_FD];
> > +	uint32_t exec_queue[N_FD];
> >  	size_t bo_size;
> >  	struct {
> >  		struct xe_spin spin;
> > @@ -113,7 +113,7 @@ test_export_dma_buf(struct drm_xe_engine_class_instance *hwe0,
> >  	for (i = 0; i < N_FD; ++i) {
> >  		fd[i] = drm_open_driver(DRIVER_XE);
> >  		vm[i] = xe_vm_create(fd[i], 0, 0);
> > -		engine[i] = xe_engine_create(fd[i], vm[i], !i ? hwe0 : hwe1, 0);
> > +		exec_queue[i] = xe_exec_queue_create(fd[i], vm[i], !i ? hwe0 : hwe1, 0);
> >  	}
> >  
> >  	bo_size = sizeof(*data[0]) * N_FD;
> > @@ -159,7 +159,7 @@ test_export_dma_buf(struct drm_xe_engine_class_instance *hwe0,
> >  
> >  		/* Write spinner on FD[0] */
> >  		xe_spin_init(&data[i]->spin, spin_addr, true);
> > -		exec.engine_id = engine[0];
> > +		exec.exec_queue_id = exec_queue[0];
> >  		exec.address = spin_addr;
> >  		xe_exec(fd[0], &exec);
> >  
> > @@ -187,7 +187,7 @@ test_export_dma_buf(struct drm_xe_engine_class_instance *hwe0,
> >  		igt_assert(b <= ARRAY_SIZE(data[i]->batch));
> >  		sync[0].handle = syncobj;
> >  		sync[1].handle = syncobj_create(fd[1], 0);
> > -		exec.engine_id = engine[1];
> > +		exec.exec_queue_id = exec_queue[1];
> >  		exec.address = batch_addr;
> >  		exec.num_syncs = 2;
> >  		xe_exec(fd[1], &exec);
> > diff --git a/tests/xe/xe_evict.c b/tests/xe/xe_evict.c
> > index c44cb80dc..b8f186809 100644
> > --- a/tests/xe/xe_evict.c
> > +++ b/tests/xe/xe_evict.c
> > @@ -20,22 +20,22 @@
> >  #include "xe/xe_query.h"
> >  #include <string.h>
> >  
> > -#define MAX_N_ENGINES 16
> > -#define MULTI_VM	(0x1 << 0)
> > -#define THREADED	(0x1 << 1)
> > -#define MIXED_THREADS	(0x1 << 2)
> > -#define LEGACY_THREAD	(0x1 << 3)
> > -#define COMPUTE_THREAD	(0x1 << 4)
> > -#define EXTERNAL_OBJ	(0x1 << 5)
> > -#define BIND_ENGINE	(0x1 << 6)
> > +#define MAX_N_EXEC_QUEUES	16
> > +#define MULTI_VM			(0x1 << 0)
> > +#define THREADED			(0x1 << 1)
> > +#define MIXED_THREADS		(0x1 << 2)
> > +#define LEGACY_THREAD		(0x1 << 3)
> > +#define COMPUTE_THREAD		(0x1 << 4)
> > +#define EXTERNAL_OBJ		(0x1 << 5)
> > +#define BIND_EXEC_QUEUE		(0x1 << 6)
> >  
> >  static void
> >  test_evict(int fd, struct drm_xe_engine_class_instance *eci,
> > -	   int n_engines, int n_execs, size_t bo_size,
> > +	   int n_exec_queues, int n_execs, size_t bo_size,
> >  	   unsigned long flags, pthread_barrier_t *barrier)
> >  {
> >  	uint32_t vm, vm2, vm3;
> > -	uint32_t bind_engines[3] = { 0, 0, 0 };
> > +	uint32_t bind_exec_queues[3] = { 0, 0, 0 };
> >  	uint64_t addr = 0x100000000, base_addr = 0x100000000;
> >  	struct drm_xe_sync sync[2] = {
> >  		{ .flags = DRM_XE_SYNC_SYNCOBJ | DRM_XE_SYNC_SIGNAL, },
> > @@ -46,8 +46,8 @@ test_evict(int fd, struct drm_xe_engine_class_instance *eci,
> >  		.num_syncs = 2,
> >  		.syncs = to_user_pointer(sync),
> >  	};
> > -	uint32_t engines[MAX_N_ENGINES];
> > -	uint32_t syncobjs[MAX_N_ENGINES];
> > +	uint32_t exec_queues[MAX_N_EXEC_QUEUES];
> > +	uint32_t syncobjs[MAX_N_EXEC_QUEUES];
> >  	uint32_t *bo;
> >  	struct {
> >  		uint32_t batch[16];
> > @@ -56,7 +56,7 @@ test_evict(int fd, struct drm_xe_engine_class_instance *eci,
> >  	} *data;
> >  	int i, b;
> >  
> > -	igt_assert(n_engines <= MAX_N_ENGINES);
> > +	igt_assert(n_exec_queues <= MAX_N_EXEC_QUEUES);
> >  
> >  	bo = calloc(n_execs / 2, sizeof(*bo));
> >  	igt_assert(bo);
> > @@ -64,23 +64,23 @@ test_evict(int fd, struct drm_xe_engine_class_instance *eci,
> >  	fd = drm_open_driver(DRIVER_XE);
> >  
> >  	vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
> > -	if (flags & BIND_ENGINE)
> > -		bind_engines[0] = xe_bind_engine_create(fd, vm, 0);
> > +	if (flags & BIND_EXEC_QUEUE)
> > +		bind_exec_queues[0] = xe_bind_exec_queue_create(fd, vm, 0);
> >  	if (flags & MULTI_VM) {
> >  		vm2 = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
> >  		vm3 = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
> > -		if (flags & BIND_ENGINE) {
> > -			bind_engines[1] = xe_bind_engine_create(fd, vm2, 0);
> > -			bind_engines[2] = xe_bind_engine_create(fd, vm3, 0);
> > +		if (flags & BIND_EXEC_QUEUE) {
> > +			bind_exec_queues[1] = xe_bind_exec_queue_create(fd, vm2, 0);
> > +			bind_exec_queues[2] = xe_bind_exec_queue_create(fd, vm3, 0);
> >  		}
> >  	}
> >  
> > -	for (i = 0; i < n_engines; i++) {
> > +	for (i = 0; i < n_exec_queues; i++) {
> >  		if (flags & MULTI_VM)
> > -			engines[i] = xe_engine_create(fd, i & 1 ? vm2 : vm ,
> > +			exec_queues[i] = xe_exec_queue_create(fd, i & 1 ? vm2 : vm ,
> >  						      eci, 0);
> >  		else
> > -			engines[i] = xe_engine_create(fd, vm, eci, 0);
> > +			exec_queues[i] = xe_exec_queue_create(fd, vm, eci, 0);
> >  		syncobjs[i] = syncobj_create(fd, 0);
> >  	};
> >  
> > @@ -90,7 +90,7 @@ test_evict(int fd, struct drm_xe_engine_class_instance *eci,
> >  		uint64_t sdi_offset = (char *)&data[i].data - (char *)data;
> >  		uint64_t sdi_addr = addr + sdi_offset;
> >  		uint32_t __bo;
> > -		int e = i % n_engines;
> > +		int e = i % n_exec_queues;
> >  
> >  		if (i < n_execs / 2) {
> >                          uint32_t _vm = (flags & EXTERNAL_OBJ) &&
> > @@ -122,17 +122,17 @@ test_evict(int fd, struct drm_xe_engine_class_instance *eci,
> >  			sync[0].flags |= DRM_XE_SYNC_SIGNAL;
> >  			sync[0].handle = syncobj_create(fd, 0);
> >  			if (flags & MULTI_VM) {
> > -				xe_vm_bind_async(fd, vm3, bind_engines[2], __bo,
> > +				xe_vm_bind_async(fd, vm3, bind_exec_queues[2], __bo,
> >  						 0, addr,
> >  						 bo_size, sync, 1);
> >  				igt_assert(syncobj_wait(fd, &sync[0].handle, 1,
> >  							INT64_MAX, 0, NULL));
> >  				xe_vm_bind_async(fd, i & 1 ? vm2 : vm,
> > -						 i & 1 ? bind_engines[1] :
> > -						 bind_engines[0], __bo,
> > +						 i & 1 ? bind_exec_queues[1] :
> > +						 bind_exec_queues[0], __bo,
> >  						 0, addr, bo_size, sync, 1);
> >  			} else {
> > -				xe_vm_bind_async(fd, vm, bind_engines[0],
> > +				xe_vm_bind_async(fd, vm, bind_exec_queues[0],
> >  						 __bo, 0, addr, bo_size,
> >  						 sync, 1);
> >  			}
> > @@ -148,11 +148,11 @@ test_evict(int fd, struct drm_xe_engine_class_instance *eci,
> >  		igt_assert(b <= ARRAY_SIZE(data[i].batch));
> >  
> >  		sync[0].flags &= ~DRM_XE_SYNC_SIGNAL;
> > -		if (i >= n_engines)
> > +		if (i >= n_exec_queues)
> >  			syncobj_reset(fd, &syncobjs[e], 1);
> >  		sync[1].handle = syncobjs[e];
> >  
> > -		exec.engine_id = engines[e];
> > +		exec.exec_queue_id = exec_queues[e];
> >  		exec.address = batch_addr;
> >  		igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_XE_EXEC, &exec), 0);
> >  
> > @@ -166,7 +166,7 @@ test_evict(int fd, struct drm_xe_engine_class_instance *eci,
> >  	}
> >  	munmap(data, ALIGN(sizeof(*data) * n_execs, 0x1000));
> >  
> > -	for (i = 0; i < n_engines; i++)
> > +	for (i = 0; i < n_exec_queues; i++)
> >  		igt_assert(syncobj_wait(fd, &syncobjs[i], 1, INT64_MAX, 0,
> >  					NULL));
> >  	igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
> > @@ -184,14 +184,14 @@ test_evict(int fd, struct drm_xe_engine_class_instance *eci,
> >  	munmap(data, ALIGN(sizeof(*data) * n_execs, 0x1000));
> >  
> >  	syncobj_destroy(fd, sync[0].handle);
> > -	for (i = 0; i < n_engines; i++) {
> > +	for (i = 0; i < n_exec_queues; i++) {
> >  		syncobj_destroy(fd, syncobjs[i]);
> > -		xe_engine_destroy(fd, engines[i]);
> > +		xe_exec_queue_destroy(fd, exec_queues[i]);
> >  	}
> >  
> >  	for (i = 0; i < 3; i++)
> > -		if (bind_engines[i])
> > -			xe_engine_destroy(fd, bind_engines[i]);
> > +		if (bind_exec_queues[i])
> > +			xe_exec_queue_destroy(fd, bind_exec_queues[i]);
> >  
> >  	for (i = 0; i < n_execs / 2; i++)
> >  		gem_close(fd, bo[i]);
> > @@ -206,11 +206,11 @@ test_evict(int fd, struct drm_xe_engine_class_instance *eci,
> >  
> >  static void
> >  test_evict_cm(int fd, struct drm_xe_engine_class_instance *eci,
> > -	      int n_engines, int n_execs, size_t bo_size, unsigned long flags,
> > +	      int n_exec_queues, int n_execs, size_t bo_size, unsigned long flags,
> >  	      pthread_barrier_t *barrier)
> >  {
> >  	uint32_t vm, vm2;
> > -	uint32_t bind_engines[2] = { 0, 0 };
> > +	uint32_t bind_exec_queues[2] = { 0, 0 };
> >  	uint64_t addr = 0x100000000, base_addr = 0x100000000;
> >  #define USER_FENCE_VALUE	0xdeadbeefdeadbeefull
> >  	struct drm_xe_sync sync[1] = {
> > @@ -222,7 +222,7 @@ test_evict_cm(int fd, struct drm_xe_engine_class_instance *eci,
> >  		.num_syncs = 1,
> >  		.syncs = to_user_pointer(sync),
> >  	};
> > -	uint32_t engines[MAX_N_ENGINES];
> > +	uint32_t exec_queues[MAX_N_EXEC_QUEUES];
> >  	uint32_t *bo;
> >  	struct {
> >  		uint32_t batch[16];
> > @@ -233,7 +233,7 @@ test_evict_cm(int fd, struct drm_xe_engine_class_instance *eci,
> >  	} *data;
> >  	int i, b;
> >  
> > -	igt_assert(n_engines <= MAX_N_ENGINES);
> > +	igt_assert(n_exec_queues <= MAX_N_EXEC_QUEUES);
> >  
> >  	bo = calloc(n_execs / 2, sizeof(*bo));
> >  	igt_assert(bo);
> > @@ -242,28 +242,28 @@ test_evict_cm(int fd, struct drm_xe_engine_class_instance *eci,
> >  
> >  	vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS |
> >  			  DRM_XE_VM_CREATE_COMPUTE_MODE, 0);
> > -	if (flags & BIND_ENGINE)
> > -		bind_engines[0] = xe_bind_engine_create(fd, vm, 0);
> > +	if (flags & BIND_EXEC_QUEUE)
> > +		bind_exec_queues[0] = xe_bind_exec_queue_create(fd, vm, 0);
> >  	if (flags & MULTI_VM) {
> >  		vm2 = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS |
> >  				   DRM_XE_VM_CREATE_COMPUTE_MODE, 0);
> > -		if (flags & BIND_ENGINE)
> > -			bind_engines[1] = xe_bind_engine_create(fd, vm2, 0);
> > +		if (flags & BIND_EXEC_QUEUE)
> > +			bind_exec_queues[1] = xe_bind_exec_queue_create(fd, vm2, 0);
> >  	}
> >  
> > -	for (i = 0; i < n_engines; i++) {
> > -		struct drm_xe_ext_engine_set_property ext = {
> > +	for (i = 0; i < n_exec_queues; i++) {
> > +		struct drm_xe_ext_exec_queue_set_property ext = {
> >  			.base.next_extension = 0,
> > -			.base.name = XE_ENGINE_EXTENSION_SET_PROPERTY,
> > -			.property = XE_ENGINE_SET_PROPERTY_COMPUTE_MODE,
> > +			.base.name = XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY,
> > +			.property = XE_EXEC_QUEUE_SET_PROPERTY_COMPUTE_MODE,
> >  			.value = 1,
> >  		};
> >  
> >  		if (flags & MULTI_VM)
> > -			engines[i] = xe_engine_create(fd, i & 1 ? vm2 : vm, eci,
> > +			exec_queues[i] = xe_exec_queue_create(fd, i & 1 ? vm2 : vm, eci,
> >  						      to_user_pointer(&ext));
> >  		else
> > -			engines[i] = xe_engine_create(fd, vm, eci,
> > +			exec_queues[i] = xe_exec_queue_create(fd, vm, eci,
> >  						      to_user_pointer(&ext));
> >  	}
> >  
> > @@ -273,7 +273,7 @@ test_evict_cm(int fd, struct drm_xe_engine_class_instance *eci,
> >  		uint64_t sdi_offset = (char *)&data[i].data - (char *)data;
> >  		uint64_t sdi_addr = addr + sdi_offset;
> >  		uint32_t __bo;
> > -		int e = i % n_engines;
> > +		int e = i % n_exec_queues;
> >  
> >  		if (i < n_execs / 2) {
> >                          uint32_t _vm = (flags & EXTERNAL_OBJ) &&
> > @@ -307,11 +307,11 @@ test_evict_cm(int fd, struct drm_xe_engine_class_instance *eci,
> >  			sync[0].addr = to_user_pointer(&data[i].vm_sync);
> >  			if (flags & MULTI_VM) {
> >  				xe_vm_bind_async(fd, i & 1 ? vm2 : vm,
> > -						 i & 1 ? bind_engines[1] :
> > -						 bind_engines[0], __bo,
> > +						 i & 1 ? bind_exec_queues[1] :
> > +						 bind_exec_queues[0], __bo,
> >  						 0, addr, bo_size, sync, 1);
> >  			} else {
> > -				xe_vm_bind_async(fd, vm, bind_engines[0], __bo,
> > +				xe_vm_bind_async(fd, vm, bind_exec_queues[0], __bo,
> >  						 0, addr, bo_size, sync, 1);
> >  			}
> >  #define TWENTY_SEC	MS_TO_NS(20000)
> > @@ -330,7 +330,7 @@ test_evict_cm(int fd, struct drm_xe_engine_class_instance *eci,
> >  		data[i].batch[b++] = MI_BATCH_BUFFER_END;
> >  		igt_assert(b <= ARRAY_SIZE(data[i].batch));
> >  
> > -		exec.engine_id = engines[e];
> > +		exec.exec_queue_id = exec_queues[e];
> >  		exec.address = batch_addr;
> >  		igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_XE_EXEC, &exec), 0);
> >  
> > @@ -356,12 +356,12 @@ test_evict_cm(int fd, struct drm_xe_engine_class_instance *eci,
> >  	}
> >  	munmap(data, ALIGN(sizeof(*data) * n_execs, 0x1000));
> >  
> > -	for (i = 0; i < n_engines; i++)
> > -		xe_engine_destroy(fd, engines[i]);
> > +	for (i = 0; i < n_exec_queues; i++)
> > +		xe_exec_queue_destroy(fd, exec_queues[i]);
> >  
> >  	for (i = 0; i < 2; i++)
> > -		if (bind_engines[i])
> > -			xe_engine_destroy(fd, bind_engines[i]);
> > +		if (bind_exec_queues[i])
> > +			xe_exec_queue_destroy(fd, bind_exec_queues[i]);
> >  
> >  	for (i = 0; i < n_execs / 2; i++)
> >  		gem_close(fd, bo[i]);
> > @@ -379,7 +379,7 @@ struct thread_data {
> >  	pthread_barrier_t *barrier;
> >  	int fd;
> >  	struct drm_xe_engine_class_instance *eci;
> > -	int n_engines;
> > +	int n_exec_queues;
> >  	int n_execs;
> >  	uint64_t bo_size;
> >  	int flags;
> > @@ -396,10 +396,10 @@ static void *thread(void *data)
> >  	pthread_mutex_unlock(t->mutex);
> >  
> >  	if (t->flags & COMPUTE_THREAD)
> > -		test_evict_cm(t->fd, t->eci, t->n_engines, t->n_execs,
> > +		test_evict_cm(t->fd, t->eci, t->n_exec_queues, t->n_execs,
> >  			      t->bo_size, t->flags, t->barrier);
> >  	else
> > -		test_evict(t->fd, t->eci, t->n_engines, t->n_execs,
> > +		test_evict(t->fd, t->eci, t->n_exec_queues, t->n_execs,
> >  			   t->bo_size, t->flags, t->barrier);
> >  
> >  	return NULL;
> > @@ -407,7 +407,7 @@ static void *thread(void *data)
> >  
> >  static void
> >  threads(int fd, struct drm_xe_engine_class_instance *eci,
> > -	int n_threads, int n_engines, int n_execs, size_t bo_size,
> > +	int n_threads, int n_exec_queues, int n_execs, size_t bo_size,
> >  	unsigned long flags)
> >  {
> >  	pthread_barrier_t barrier;
> > @@ -430,7 +430,7 @@ threads(int fd, struct drm_xe_engine_class_instance *eci,
> >  		threads_data[i].barrier = &barrier;
> >  		threads_data[i].fd = fd;
> >  		threads_data[i].eci = eci;
> > -		threads_data[i].n_engines = n_engines;
> > +		threads_data[i].n_exec_queues = n_exec_queues;
> >  		threads_data[i].n_execs = n_execs;
> >  		threads_data[i].bo_size = bo_size;
> >  		threads_data[i].flags = flags;
> > @@ -477,12 +477,12 @@ static uint64_t calc_bo_size(uint64_t vram_size, int mul, int div)
> >   * @large:			large
> >   * @large-external:		large external
> >   * @large-multi-vm:		large multi VM
> > - * @beng-small:			small bind engine
> > - * @beng-small-external:	small external bind engine
> > + * @beng-small:			small bind exec_queue
> > + * @beng-small-external:	small external bind exec_queue
> >   * @beng-small-multi-vm:	small multi VM bind ending
> > - * @beng-large:			large bind engine
> > - * @beng-large-external:	large external bind engine
> > - * @beng-large-multi-vm:	large multi VM bind engine
> > + * @beng-large:			large bind exec_queue
> > + * @beng-large-external:	large external bind exec_queue
> > + * @beng-large-multi-vm:	large multi VM bind exec_queue
> >   *
> >   * @small-cm:			small compute machine
> >   * @small-external-cm:		small external compute machine
> > @@ -490,12 +490,12 @@ static uint64_t calc_bo_size(uint64_t vram_size, int mul, int div)
> >   * @large-cm:			large compute machine
> >   * @large-external-cm:		large external compute machine
> >   * @large-multi-vm-cm:		large multi VM compute machine
> > - * @beng-small-cm:		small bind engine compute machine
> > - * @beng-small-external-cm:	small external bind engine compute machine
> > + * @beng-small-cm:		small bind exec_queue compute machine
> > + * @beng-small-external-cm:	small external bind exec_queue compute machine
> >   * @beng-small-multi-vm-cm:	small multi VM bind ending compute machine
> > - * @beng-large-cm:		large bind engine compute machine
> > - * @beng-large-external-cm:	large external bind engine compute machine
> > - * @beng-large-multi-vm-cm:	large multi VM bind engine compute machine
> > + * @beng-large-cm:		large bind exec_queue compute machine
> > + * @beng-large-external-cm:	large external bind exec_queue compute machine
> > + * @beng-large-multi-vm-cm:	large multi VM bind exec_queue compute machine
> >   *
> >   * @threads-small:		threads small
> >   * @cm-threads-small:		compute mode threads small
> > @@ -513,28 +513,28 @@ static uint64_t calc_bo_size(uint64_t vram_size, int mul, int div)
> >   * @cm-threads-large-multi-vm:	compute mode threads large multi vm
> >   * @mixed-threads-large-multi-vm:
> >   *				mixed threads large multi vm
> > - * @beng-threads-small:		bind engine threads small
> > - * @beng-cm-threads-small:	bind engine compute mode threads small
> > - * @beng-mixed-threads-small:	bind engine mixed threads small
> > + * @beng-threads-small:		bind exec_queue threads small
> > + * @beng-cm-threads-small:	bind exec_queue compute mode threads small
> > + * @beng-mixed-threads-small:	bind exec_queue mixed threads small
> >   * @beng-mixed-many-threads-small:
> > - *				bind engine mixed many threads small
> > - * @beng-threads-large:		bind engine threads large
> > - * @beng-cm-threads-large:	bind engine compute mode threads large
> > - * @beng-mixed-threads-large:	bind engine mixed threads large
> > + *				bind exec_queue mixed many threads small
> > + * @beng-threads-large:		bind exec_queue threads large
> > + * @beng-cm-threads-large:	bind exec_queue compute mode threads large
> > + * @beng-mixed-threads-large:	bind exec_queue mixed threads large
> >   * @beng-mixed-many-threads-large:
> > - *				bind engine mixed many threads large
> > + *				bind exec_queue mixed many threads large
> >   * @beng-threads-small-multi-vm:
> > - *				bind engine threads small multi vm
> > + *				bind exec_queue threads small multi vm
> >   * @beng-cm-threads-small-multi-vm:
> > - *				bind engine compute mode threads small multi vm
> > + *				bind exec_queue compute mode threads small multi vm
> >   * @beng-mixed-threads-small-multi-vm:
> > - *				bind engine mixed threads small multi vm
> > + *				bind exec_queue mixed threads small multi vm
> >   * @beng-threads-large-multi-vm:
> > - *				bind engine threads large multi vm
> > + *				bind exec_queue threads large multi vm
> >   * @beng-cm-threads-large-multi-vm:
> > - *				bind engine compute mode threads large multi vm
> > + *				bind exec_queue compute mode threads large multi vm
> >   * @beng-mixed-threads-large-multi-vm:
> > - *				bind engine mixed threads large multi vm
> > + *				bind exec_queue mixed threads large multi vm
> >   */
> >  
> >  /*
> > @@ -552,7 +552,7 @@ igt_main
> >  	struct drm_xe_engine_class_instance *hwe;
> >  	const struct section {
> >  		const char *name;
> > -		int n_engines;
> > +		int n_exec_queues;
> >  		int n_execs;
> >  		int mul;
> >  		int div;
> > @@ -564,20 +564,20 @@ igt_main
> >  		{ "large", 4, 16, 1, 4, 0 },
> >  		{ "large-external", 4, 16, 1, 4, EXTERNAL_OBJ },
> >  		{ "large-multi-vm", 4, 8, 3, 8, MULTI_VM },
> > -		{ "beng-small", 16, 448, 1, 128, BIND_ENGINE },
> > -		{ "beng-small-external", 16, 448, 1, 128, BIND_ENGINE |
> > +		{ "beng-small", 16, 448, 1, 128, BIND_EXEC_QUEUE },
> > +		{ "beng-small-external", 16, 448, 1, 128, BIND_EXEC_QUEUE |
> >  			EXTERNAL_OBJ },
> > -		{ "beng-small-multi-vm", 16, 256, 1, 128, BIND_ENGINE |
> > +		{ "beng-small-multi-vm", 16, 256, 1, 128, BIND_EXEC_QUEUE |
> >  			MULTI_VM },
> > -		{ "beng-large", 4, 16, 1, 4, BIND_ENGINE },
> > -		{ "beng-large-external", 4, 16, 1, 4, BIND_ENGINE |
> > +		{ "beng-large", 4, 16, 1, 4, BIND_EXEC_QUEUE },
> > +		{ "beng-large-external", 4, 16, 1, 4, BIND_EXEC_QUEUE |
> >  			EXTERNAL_OBJ },
> > -		{ "beng-large-multi-vm", 4, 8, 3, 8, BIND_ENGINE | MULTI_VM },
> > +		{ "beng-large-multi-vm", 4, 8, 3, 8, BIND_EXEC_QUEUE | MULTI_VM },
> >  		{ NULL },
> >  	};
> >  	const struct section_cm {
> >  		const char *name;
> > -		int n_engines;
> > +		int n_exec_queues;
> >  		int n_execs;
> >  		int mul;
> >  		int div;
> > @@ -589,22 +589,22 @@ igt_main
> >  		{ "large-cm", 4, 16, 1, 4, 0 },
> >  		{ "large-external-cm", 4, 16, 1, 4, EXTERNAL_OBJ },
> >  		{ "large-multi-vm-cm", 4, 8, 3, 8, MULTI_VM },
> > -		{ "beng-small-cm", 16, 448, 1, 128, BIND_ENGINE },
> > -		{ "beng-small-external-cm", 16, 448, 1, 128, BIND_ENGINE |
> > +		{ "beng-small-cm", 16, 448, 1, 128, BIND_EXEC_QUEUE },
> > +		{ "beng-small-external-cm", 16, 448, 1, 128, BIND_EXEC_QUEUE |
> >  			EXTERNAL_OBJ },
> > -		{ "beng-small-multi-vm-cm", 16, 256, 1, 128, BIND_ENGINE |
> > +		{ "beng-small-multi-vm-cm", 16, 256, 1, 128, BIND_EXEC_QUEUE |
> >  			MULTI_VM },
> > -		{ "beng-large-cm", 4, 16, 1, 4, BIND_ENGINE },
> > -		{ "beng-large-external-cm", 4, 16, 1, 4, BIND_ENGINE |
> > +		{ "beng-large-cm", 4, 16, 1, 4, BIND_EXEC_QUEUE },
> > +		{ "beng-large-external-cm", 4, 16, 1, 4, BIND_EXEC_QUEUE |
> >  			EXTERNAL_OBJ },
> > -		{ "beng-large-multi-vm-cm", 4, 8, 3, 8, BIND_ENGINE |
> > +		{ "beng-large-multi-vm-cm", 4, 8, 3, 8, BIND_EXEC_QUEUE |
> >  			MULTI_VM },
> >  		{ NULL },
> >  	};
> >  	const struct section_threads {
> >  		const char *name;
> >  		int n_threads;
> > -		int n_engines;
> > +		int n_exec_queues;
> >  		int n_execs;
> >  		int mul;
> >  		int div;
> > @@ -639,33 +639,33 @@ igt_main
> >  		{ "mixed-threads-large-multi-vm", 2, 2, 4, 3, 8,
> >  			MIXED_THREADS | MULTI_VM | THREADED },
> >  		{ "beng-threads-small", 2, 16, 128, 1, 128,
> > -			THREADED | BIND_ENGINE },
> > +			THREADED | BIND_EXEC_QUEUE },
> >  		{ "beng-cm-threads-small", 2, 16, 128, 1, 128,
> > -			COMPUTE_THREAD | THREADED | BIND_ENGINE },
> > +			COMPUTE_THREAD | THREADED | BIND_EXEC_QUEUE },
> >  		{ "beng-mixed-threads-small", 2, 16, 128, 1, 128,
> > -			MIXED_THREADS | THREADED | BIND_ENGINE },
> > +			MIXED_THREADS | THREADED | BIND_EXEC_QUEUE },
> >  		{ "beng-mixed-many-threads-small", 3, 16, 128, 1, 128,
> > -			THREADED | BIND_ENGINE },
> > +			THREADED | BIND_EXEC_QUEUE },
> >  		{ "beng-threads-large", 2, 2, 4, 3, 8,
> > -			THREADED | BIND_ENGINE },
> > +			THREADED | BIND_EXEC_QUEUE },
> >  		{ "beng-cm-threads-large", 2, 2, 4, 3, 8,
> > -			COMPUTE_THREAD | THREADED | BIND_ENGINE },
> > +			COMPUTE_THREAD | THREADED | BIND_EXEC_QUEUE },
> >  		{ "beng-mixed-threads-large", 2, 2, 4, 3, 8,
> > -			MIXED_THREADS | THREADED | BIND_ENGINE },
> > +			MIXED_THREADS | THREADED | BIND_EXEC_QUEUE },
> >  		{ "beng-mixed-many-threads-large", 3, 2, 4, 3, 8,
> > -			THREADED | BIND_ENGINE },
> > +			THREADED | BIND_EXEC_QUEUE },
> >  		{ "beng-threads-small-multi-vm", 2, 16, 128, 1, 128,
> > -			MULTI_VM | THREADED | BIND_ENGINE },
> > +			MULTI_VM | THREADED | BIND_EXEC_QUEUE },
> >  		{ "beng-cm-threads-small-multi-vm", 2, 16, 128, 1, 128,
> > -			COMPUTE_THREAD | MULTI_VM | THREADED | BIND_ENGINE },
> > +			COMPUTE_THREAD | MULTI_VM | THREADED | BIND_EXEC_QUEUE },
> >  		{ "beng-mixed-threads-small-multi-vm", 2, 16, 128, 1, 128,
> > -			MIXED_THREADS | MULTI_VM | THREADED | BIND_ENGINE },
> > +			MIXED_THREADS | MULTI_VM | THREADED | BIND_EXEC_QUEUE },
> >  		{ "beng-threads-large-multi-vm", 2, 2, 4, 3, 8,
> > -			MULTI_VM | THREADED | BIND_ENGINE },
> > +			MULTI_VM | THREADED | BIND_EXEC_QUEUE },
> >  		{ "beng-cm-threads-large-multi-vm", 2, 2, 4, 3, 8,
> > -			COMPUTE_THREAD | MULTI_VM | THREADED | BIND_ENGINE },
> > +			COMPUTE_THREAD | MULTI_VM | THREADED | BIND_EXEC_QUEUE },
> >  		{ "beng-mixed-threads-large-multi-vm", 2, 2, 4, 3, 8,
> > -			MIXED_THREADS | MULTI_VM | THREADED | BIND_ENGINE },
> > +			MIXED_THREADS | MULTI_VM | THREADED | BIND_EXEC_QUEUE },
> >  		{ NULL },
> >  	};
> >  	uint64_t vram_size;
> > @@ -684,21 +684,21 @@ igt_main
> >  
> >  	for (const struct section *s = sections; s->name; s++) {
> >  		igt_subtest_f("evict-%s", s->name)
> > -			test_evict(-1, hwe, s->n_engines, s->n_execs,
> > +			test_evict(-1, hwe, s->n_exec_queues, s->n_execs,
> >  				   calc_bo_size(vram_size, s->mul, s->div),
> >  				   s->flags, NULL);
> >  	}
> >  
> >  	for (const struct section_cm *s = sections_cm; s->name; s++) {
> >  		igt_subtest_f("evict-%s", s->name)
> > -			test_evict_cm(-1, hwe, s->n_engines, s->n_execs,
> > +			test_evict_cm(-1, hwe, s->n_exec_queues, s->n_execs,
> >  				      calc_bo_size(vram_size, s->mul, s->div),
> >  				      s->flags, NULL);
> >  	}
> >  
> >  	for (const struct section_threads *s = sections_threads; s->name; s++) {
> >  		igt_subtest_f("evict-%s", s->name)
> > -			threads(-1, hwe, s->n_threads, s->n_engines,
> > +			threads(-1, hwe, s->n_threads, s->n_exec_queues,
> >  				 s->n_execs,
> >  				 calc_bo_size(vram_size, s->mul, s->div),
> >  				 s->flags);
> > diff --git a/tests/xe/xe_exec_balancer.c b/tests/xe/xe_exec_balancer.c
> > index 0b00d93de..f364a4b7a 100644
> > --- a/tests/xe/xe_exec_balancer.c
> > +++ b/tests/xe/xe_exec_balancer.c
> > @@ -4,10 +4,10 @@
> >   */
> >  
> >  /**
> > - * TEST: Basic tests for execbuf functionality for virtual and parallel engines
> > + * TEST: Basic tests for execbuf functionality for virtual and parallel exec_queues
> >   * Category: Hardware building block
> >   * Sub-category: execbuf
> > - * Functionality: virtual and parallel engines
> > + * Functionality: virtual and parallel exec_queues
> >   * Test category: functionality test
> >   */
> >  
> > @@ -28,7 +28,7 @@
> >  /**
> >   * SUBTEST: virtual-all-active
> >   * Description:
> > - * 	Run a test to check if virtual engines can be running on all instances
> > + * 	Run a test to check if virtual exec_queues can be running on all instances
> >   *	of a class simultaneously
> >   * Run type: FULL
> >   */
> > @@ -45,7 +45,7 @@ static void test_all_active(int fd, int gt, int class)
> >  		.num_syncs = 2,
> >  		.syncs = to_user_pointer(sync),
> >  	};
> > -	uint32_t engines[MAX_INSTANCE];
> > +	uint32_t exec_queues[MAX_INSTANCE];
> >  	uint32_t syncobjs[MAX_INSTANCE];
> >  	size_t bo_size;
> >  	uint32_t bo = 0;
> > @@ -73,16 +73,16 @@ static void test_all_active(int fd, int gt, int class)
> >  	data = xe_bo_map(fd, bo, bo_size);
> >  
> >  	for (i = 0; i < num_placements; i++) {
> > -		struct drm_xe_engine_create create = {
> > +		struct drm_xe_exec_queue_create create = {
> >  			.vm_id = vm,
> >  			.width = 1,
> >  			.num_placements = num_placements,
> >  			.instances = to_user_pointer(eci),
> >  		};
> >  
> > -		igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_XE_ENGINE_CREATE,
> > +		igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE,
> >  					&create), 0);
> > -		engines[i] = create.engine_id;
> > +		exec_queues[i] = create.exec_queue_id;
> >  		syncobjs[i] = syncobj_create(fd, 0);
> >  	};
> >  
> > @@ -98,7 +98,7 @@ static void test_all_active(int fd, int gt, int class)
> >  		sync[1].flags |= DRM_XE_SYNC_SIGNAL;
> >  		sync[1].handle = syncobjs[i];
> >  
> > -		exec.engine_id = engines[i];
> > +		exec.exec_queue_id = exec_queues[i];
> >  		exec.address = spin_addr;
> >  		xe_exec(fd, &exec);
> >  		xe_spin_wait_started(&data[i].spin);
> > @@ -118,7 +118,7 @@ static void test_all_active(int fd, int gt, int class)
> >  	syncobj_destroy(fd, sync[0].handle);
> >  	for (i = 0; i < num_placements; i++) {
> >  		syncobj_destroy(fd, syncobjs[i]);
> > -		xe_engine_destroy(fd, engines[i]);
> > +		xe_exec_queue_destroy(fd, exec_queues[i]);
> >  	}
> >  
> >  	munmap(data, bo_size);
> > @@ -126,8 +126,8 @@ static void test_all_active(int fd, int gt, int class)
> >  	xe_vm_destroy(fd, vm);
> >  }
> >  
> > -#define MAX_N_ENGINES 16
> > -#define USERPTR		(0x1 << 0)
> > +#define MAX_N_EXEC_QUEUES	16
> > +#define USERPTR				(0x1 << 0)
> >  #define REBIND		(0x1 << 1)
> >  #define INVALIDATE	(0x1 << 2)
> >  #define RACE		(0x1 << 3)
> > @@ -143,8 +143,8 @@ static void test_all_active(int fd, int gt, int class)
> >   * Description: Run %arg[1] test many times
> >   * Run type: FULL
> >   *
> > - * SUBTEST: many-engines-%s
> > - * Description: Run %arg[1] test on many engines
> > + * SUBTEST: many-execqueues-%s
> > + * Description: Run %arg[1] test on many exec_queues
> >   * Run type: FULL
> >   *
> >   * SUBTEST: twice-%s
> > @@ -171,7 +171,7 @@ static void test_all_active(int fd, int gt, int class)
> >   * @parallel-userptr-invalidate-race:	parallel userptr invalidate racy
> >   */
> >  static void
> > -test_exec(int fd, int gt, int class, int n_engines, int n_execs,
> > +test_exec(int fd, int gt, int class, int n_exec_queues, int n_execs,
> >  	  unsigned int flags)
> >  {
> >  	uint32_t vm;
> > @@ -184,8 +184,8 @@ test_exec(int fd, int gt, int class, int n_engines, int n_execs,
> >  		.num_syncs = 2,
> >  		.syncs = to_user_pointer(sync),
> >  	};
> > -	uint32_t engines[MAX_N_ENGINES];
> > -	uint32_t syncobjs[MAX_N_ENGINES];
> > +	uint32_t exec_queues[MAX_N_EXEC_QUEUES];
> > +	uint32_t syncobjs[MAX_N_EXEC_QUEUES];
> >  	size_t bo_size;
> >  	uint32_t bo = 0;
> >  	struct {
> > @@ -197,7 +197,7 @@ test_exec(int fd, int gt, int class, int n_engines, int n_execs,
> >  	struct drm_xe_engine_class_instance eci[MAX_INSTANCE];
> >  	int i, j, b, num_placements = 0;
> >  
> > -	igt_assert(n_engines <= MAX_N_ENGINES);
> > +	igt_assert(n_exec_queues <= MAX_N_EXEC_QUEUES);
> >  
> >  	xe_for_each_hw_engine(fd, hwe) {
> >  		if (hwe->engine_class != class || hwe->gt_id != gt)
> > @@ -229,17 +229,17 @@ test_exec(int fd, int gt, int class, int n_engines, int n_execs,
> >  		data = xe_bo_map(fd, bo, bo_size);
> >  	}
> >  
> > -	for (i = 0; i < n_engines; i++) {
> > -		struct drm_xe_engine_create create = {
> > +	for (i = 0; i < n_exec_queues; i++) {
> > +		struct drm_xe_exec_queue_create create = {
> >  			.vm_id = vm,
> >  			.width = flags & PARALLEL ? num_placements : 1,
> >  			.num_placements = flags & PARALLEL ? 1 : num_placements,
> >  			.instances = to_user_pointer(eci),
> >  		};
> >  
> > -		igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_XE_ENGINE_CREATE,
> > +		igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE,
> >  					&create), 0);
> > -		engines[i] = create.engine_id;
> > +		exec_queues[i] = create.exec_queue_id;
> >  		syncobjs[i] = syncobj_create(fd, 0);
> >  	};
> >  	exec.num_batch_buffer = flags & PARALLEL ? num_placements : 1;
> > @@ -257,7 +257,7 @@ test_exec(int fd, int gt, int class, int n_engines, int n_execs,
> >  		uint64_t sdi_offset = (char *)&data[i].data - (char *)data;
> >  		uint64_t sdi_addr = addr + sdi_offset;
> >  		uint64_t batches[MAX_INSTANCE];
> > -		int e = i % n_engines;
> > +		int e = i % n_exec_queues;
> >  
> >  		for (j = 0; j < num_placements && flags & PARALLEL; ++j)
> >  			batches[j] = batch_addr;
> > @@ -274,7 +274,7 @@ test_exec(int fd, int gt, int class, int n_engines, int n_execs,
> >  		sync[1].flags |= DRM_XE_SYNC_SIGNAL;
> >  		sync[1].handle = syncobjs[e];
> >  
> > -		exec.engine_id = engines[e];
> > +		exec.exec_queue_id = exec_queues[e];
> >  		exec.address = flags & PARALLEL ?
> >  			to_user_pointer(batches) : batch_addr;
> >  		if (e != i)
> > @@ -325,7 +325,7 @@ test_exec(int fd, int gt, int class, int n_engines, int n_execs,
> >  		}
> >  	}
> >  
> > -	for (i = 0; i < n_engines && n_execs; i++)
> > +	for (i = 0; i < n_exec_queues && n_execs; i++)
> >  		igt_assert(syncobj_wait(fd, &syncobjs[i], 1, INT64_MAX, 0,
> >  					NULL));
> >  	igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
> > @@ -339,9 +339,9 @@ test_exec(int fd, int gt, int class, int n_engines, int n_execs,
> >  		igt_assert_eq(data[i].data, 0xc0ffee);
> >  
> >  	syncobj_destroy(fd, sync[0].handle);
> > -	for (i = 0; i < n_engines; i++) {
> > +	for (i = 0; i < n_exec_queues; i++) {
> >  		syncobj_destroy(fd, syncobjs[i]);
> > -		xe_engine_destroy(fd, engines[i]);
> > +		xe_exec_queue_destroy(fd, exec_queues[i]);
> >  	}
> >  
> >  	if (bo) {
> > @@ -355,25 +355,25 @@ test_exec(int fd, int gt, int class, int n_engines, int n_execs,
> >  
> >  /**
> >   * SUBTEST: once-cm-%s
> > - * Description: Run compute mode virtual engine arg[1] test only once
> > + * Description: Run compute mode virtual exec_queue arg[1] test only once
> >   *
> >   * Run type: FULL
> >   *
> >   * SUBTEST: twice-cm-%s
> > - * Description: Run compute mode virtual engine arg[1] test twice
> > + * Description: Run compute mode virtual exec_queue arg[1] test twice
> >   * Run type: BAT
> >   *
> >   * SUBTEST: many-cm-%s
> > - * Description: Run compute mode virtual engine arg[1] test many times
> > + * Description: Run compute mode virtual exec_queue arg[1] test many times
> >   * Run type: FULL
> >   *
> > - * SUBTEST: many-engines-cm-%s
> > - * Description: Run compute mode virtual engine arg[1] test on many engines
> > + * SUBTEST: many-execqueues-cm-%s
> > + * Description: Run compute mode virtual exec_queue arg[1] test on many exec_queues
> >   * Run type: FULL
> >   *
> >   *
> >   * SUBTEST: no-exec-cm-%s
> > - * Description: Run compute mode virtual engine arg[1] no-exec test
> > + * Description: Run compute mode virtual exec_queue arg[1] no-exec test
> >   * Run type: BAT
> >   *
> >   * arg[1]:
> > @@ -387,7 +387,7 @@ test_exec(int fd, int gt, int class, int n_engines, int n_execs,
> >   */
> >  
> >  static void
> > -test_cm(int fd, int gt, int class, int n_engines, int n_execs,
> > +test_cm(int fd, int gt, int class, int n_exec_queues, int n_execs,
> >  	unsigned int flags)
> >  {
> >  	uint32_t vm;
> > @@ -402,7 +402,7 @@ test_cm(int fd, int gt, int class, int n_engines, int n_execs,
> >  		.num_syncs = 1,
> >  		.syncs = to_user_pointer(sync),
> >  	};
> > -	uint32_t engines[MAX_N_ENGINES];
> > +	uint32_t exec_queues[MAX_N_EXEC_QUEUES];
> >  	size_t bo_size;
> >  	uint32_t bo = 0;
> >  	struct {
> > @@ -417,7 +417,7 @@ test_cm(int fd, int gt, int class, int n_engines, int n_execs,
> >  	int i, j, b, num_placements = 0;
> >  	int map_fd = -1;
> >  
> > -	igt_assert(n_engines <= MAX_N_ENGINES);
> > +	igt_assert(n_exec_queues <= MAX_N_EXEC_QUEUES);
> >  
> >  	xe_for_each_hw_engine(fd, hwe) {
> >  		if (hwe->engine_class != class || hwe->gt_id != gt)
> > @@ -452,14 +452,14 @@ test_cm(int fd, int gt, int class, int n_engines, int n_execs,
> >  	}
> >  	memset(data, 0, bo_size);
> >  
> > -	for (i = 0; i < n_engines; i++) {
> > -		struct drm_xe_ext_engine_set_property ext = {
> > +	for (i = 0; i < n_exec_queues; i++) {
> > +		struct drm_xe_ext_exec_queue_set_property ext = {
> >  			.base.next_extension = 0,
> > -			.base.name = XE_ENGINE_EXTENSION_SET_PROPERTY,
> > -			.property = XE_ENGINE_SET_PROPERTY_COMPUTE_MODE,
> > +			.base.name = XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY,
> > +			.property = XE_EXEC_QUEUE_SET_PROPERTY_COMPUTE_MODE,
> >  			.value = 1,
> >  		};
> > -		struct drm_xe_engine_create create = {
> > +		struct drm_xe_exec_queue_create create = {
> >  			.vm_id = vm,
> >  			.width = 1,
> >  			.num_placements = num_placements,
> > @@ -467,9 +467,9 @@ test_cm(int fd, int gt, int class, int n_engines, int n_execs,
> >  			.extensions = to_user_pointer(&ext),
> >  		};
> >  
> > -		igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_XE_ENGINE_CREATE,
> > +		igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE,
> >  					&create), 0);
> > -		engines[i] = create.engine_id;
> > +		exec_queues[i] = create.exec_queue_id;
> >  	}
> >  
> >  	sync[0].addr = to_user_pointer(&data[0].vm_sync);
> > @@ -488,7 +488,7 @@ test_cm(int fd, int gt, int class, int n_engines, int n_execs,
> >  		uint64_t batch_addr = addr + batch_offset;
> >  		uint64_t sdi_offset = (char *)&data[i].data - (char *)data;
> >  		uint64_t sdi_addr = addr + sdi_offset;
> > -		int e = i % n_engines;
> > +		int e = i % n_exec_queues;
> >  
> >  		b = 0;
> >  		data[i].batch[b++] = MI_STORE_DWORD_IMM_GEN4;
> > @@ -500,7 +500,7 @@ test_cm(int fd, int gt, int class, int n_engines, int n_execs,
> >  
> >  		sync[0].addr = addr + (char *)&data[i].exec_sync - (char *)data;
> >  
> > -		exec.engine_id = engines[e];
> > +		exec.exec_queue_id = exec_queues[e];
> >  		exec.address = batch_addr;
> >  		xe_exec(fd, &exec);
> >  
> > @@ -578,8 +578,8 @@ test_cm(int fd, int gt, int class, int n_engines, int n_execs,
> >  	     i < n_execs; i++)
> >  		igt_assert_eq(data[i].data, 0xc0ffee);
> >  
> > -	for (i = 0; i < n_engines; i++)
> > -		xe_engine_destroy(fd, engines[i]);
> > +	for (i = 0; i < n_exec_queues; i++)
> > +		xe_exec_queue_destroy(fd, exec_queues[i]);
> >  
> >  	if (bo) {
> >  		munmap(data, bo_size);
> > @@ -648,7 +648,7 @@ igt_main
> >  						  64 : 1024,
> >  						  s->flags);
> >  
> > -		igt_subtest_f("many-engines-%s", s->name)
> > +		igt_subtest_f("many-execqueues-%s", s->name)
> >  			xe_for_each_gt(fd, gt)
> >  				xe_for_each_hw_engine_class(class)
> >  					test_exec(fd, gt, class, 16,
> > @@ -683,7 +683,7 @@ igt_main
> >  						64 : 1024,
> >  						s->flags);
> >  
> > -		igt_subtest_f("many-engines-cm-%s", s->name)
> > +		igt_subtest_f("many-execqueues-cm-%s", s->name)
> >  			xe_for_each_gt(fd, gt)
> >  				xe_for_each_hw_engine_class(class)
> >  					test_cm(fd, gt, class, 16,
> > diff --git a/tests/xe/xe_exec_basic.c b/tests/xe/xe_exec_basic.c
> > index bf3863d48..46b8709a7 100644
> > --- a/tests/xe/xe_exec_basic.c
> > +++ b/tests/xe/xe_exec_basic.c
> > @@ -7,7 +7,7 @@
> >   * TEST: Basic tests for execbuf functionality
> >   * Category: Hardware building block
> >   * Sub-category: execbuf
> > - * Functionality: engines
> > + * Functionality: exec_queues
> >   * Test category: functionality test
> >   */
> >  
> > @@ -20,15 +20,15 @@
> >  #include "xe/xe_query.h"
> >  #include <string.h>
> >  
> > -#define MAX_N_ENGINES 16
> > -#define USERPTR		(0x1 << 0)
> > -#define REBIND		(0x1 << 1)
> > -#define INVALIDATE	(0x1 << 2)
> > -#define RACE		(0x1 << 3)
> > -#define BIND_ENGINE	(0x1 << 4)
> > -#define DEFER_ALLOC	(0x1 << 5)
> > -#define DEFER_BIND	(0x1 << 6)
> > -#define SPARSE		(0x1 << 7)
> > +#define MAX_N_EXEC_QUEUES 16
> > +#define USERPTR			(0x1 << 0)
> > +#define REBIND			(0x1 << 1)
> > +#define INVALIDATE		(0x1 << 2)
> > +#define RACE			(0x1 << 3)
> > +#define BIND_EXEC_QUEUE	(0x1 << 4)
> > +#define DEFER_ALLOC		(0x1 << 5)
> > +#define DEFER_BIND		(0x1 << 6)
> > +#define SPARSE			(0x1 << 7)
> >  
> >  /**
> >   * SUBTEST: once-%s
> > @@ -39,12 +39,12 @@
> >   * Description: Run %arg[1] test many times
> >   * Run type: FULL
> >   *
> > - * SUBTEST: many-engines-%s
> > - * Description: Run %arg[1] test on many engines
> > + * SUBTEST: many-execqueues-%s
> > + * Description: Run %arg[1] test on many exec_queues
> >   * Run type: FULL
> >   *
> > - * SUBTEST: many-engines-many-vm-%s
> > - * Description: Run %arg[1] test on many engines and many VMs
> > + * SUBTEST: many-execqueues-many-vm-%s
> > + * Description: Run %arg[1] test on many exec_queues and many VMs
> >   * Run type: FULL
> >   *
> >   * SUBTEST: twice-%s
> > @@ -65,12 +65,12 @@
> >   * @userptr-rebind:			userptr rebind
> >   * @userptr-invalidate:			userptr invalidate
> >   * @userptr-invalidate-race:		userptr invalidate racy
> > - * @bindengine:				bind engine
> > - * @bindengine-userptr:			bind engine userptr description
> > - * @bindengine-rebind:			bind engine rebind description
> > - * @bindengine-userptr-rebind:		bind engine userptr rebind
> > - * @bindengine-userptr-invalidate:	bind engine userptr invalidate
> > - * @bindengine-userptr-invalidate-race:	bind engine userptr invalidate racy
> > + * @bindexecqueue:				bind exec_queue
> > + * @bindexecqueue-userptr:			bind exec_queue userptr description
> > + * @bindexecqueue-rebind:			bind exec_queue rebind description
> > + * @bindexecqueue-userptr-rebind:		bind exec_queue userptr rebind
> > + * @bindexecqueue-userptr-invalidate:	bind exec_queue userptr invalidate
> > + * @bindexecqueue-userptr-invalidate-race:	bind exec_queue userptr invalidate racy
> >   * @null:				null
> >   * @null-defer-mmap:			null defer mmap
> >   * @null-defer-bind:			null defer bind
> > @@ -79,7 +79,7 @@
> >  
> >  static void
> >  test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> > -	  int n_engines, int n_execs, int n_vm, unsigned int flags)
> > +	  int n_exec_queues, int n_execs, int n_vm, unsigned int flags)
> >  {
> >  	struct drm_xe_sync sync[2] = {
> >  		{ .flags = DRM_XE_SYNC_SYNCOBJ | DRM_XE_SYNC_SIGNAL, },
> > @@ -90,13 +90,13 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> >  		.num_syncs = 2,
> >  		.syncs = to_user_pointer(sync),
> >  	};
> > -	uint64_t addr[MAX_N_ENGINES];
> > -	uint64_t sparse_addr[MAX_N_ENGINES];
> > -	uint32_t vm[MAX_N_ENGINES];
> > -	uint32_t engines[MAX_N_ENGINES];
> > -	uint32_t bind_engines[MAX_N_ENGINES];
> > -	uint32_t syncobjs[MAX_N_ENGINES];
> > -	uint32_t bind_syncobjs[MAX_N_ENGINES];
> > +	uint64_t addr[MAX_N_EXEC_QUEUES];
> > +	uint64_t sparse_addr[MAX_N_EXEC_QUEUES];
> > +	uint32_t vm[MAX_N_EXEC_QUEUES];
> > +	uint32_t exec_queues[MAX_N_EXEC_QUEUES];
> > +	uint32_t bind_exec_queues[MAX_N_EXEC_QUEUES];
> > +	uint32_t syncobjs[MAX_N_EXEC_QUEUES];
> > +	uint32_t bind_syncobjs[MAX_N_EXEC_QUEUES];
> >  	size_t bo_size;
> >  	uint32_t bo = 0;
> >  	struct {
> > @@ -106,8 +106,8 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> >  	} *data;
> >  	int i, b;
> >  
> > -	igt_assert(n_engines <= MAX_N_ENGINES);
> > -	igt_assert(n_vm <= MAX_N_ENGINES);
> > +	igt_assert(n_exec_queues <= MAX_N_EXEC_QUEUES);
> > +	igt_assert(n_vm <= MAX_N_EXEC_QUEUES);
> >  
> >  	for (i = 0; i < n_vm; ++i)
> >  		vm[i] = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
> > @@ -117,7 +117,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> >  
> >  	addr[0] = 0x1a0000;
> >  	sparse_addr[0] = 0x301a0000;
> > -	for (i = 1; i < MAX_N_ENGINES; ++i) {
> > +	for (i = 1; i < MAX_N_EXEC_QUEUES; ++i) {
> >  		addr[i] = addr[i - 1] + (0x1ull << 32);
> >  		sparse_addr[i] = sparse_addr[i - 1] + (0x1ull << 32);
> >  	}
> > @@ -147,14 +147,14 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> >  			data = xe_bo_map(fd, bo, bo_size);
> >  	}
> >  
> > -	for (i = 0; i < n_engines; i++) {
> > +	for (i = 0; i < n_exec_queues; i++) {
> >  		uint32_t __vm = vm[i % n_vm];
> >  
> > -		engines[i] = xe_engine_create(fd, __vm, eci, 0);
> > -		if (flags & BIND_ENGINE)
> > -			bind_engines[i] = xe_bind_engine_create(fd, __vm, 0);
> > +		exec_queues[i] = xe_exec_queue_create(fd, __vm, eci, 0);
> > +		if (flags & BIND_EXEC_QUEUE)
> > +			bind_exec_queues[i] = xe_bind_exec_queue_create(fd, __vm, 0);
> >  		else
> > -			bind_engines[i] = 0;
> > +			bind_exec_queues[i] = 0;
> >  		syncobjs[i] = syncobj_create(fd, 0);
> >  		bind_syncobjs[i] = syncobj_create(fd, 0);
> >  	};
> > @@ -162,14 +162,14 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> >  	for (i = 0; i < n_vm; ++i) {
> >  		sync[0].handle = bind_syncobjs[i];
> >  		if (bo)
> > -			xe_vm_bind_async(fd, vm[i], bind_engines[i], bo, 0,
> > +			xe_vm_bind_async(fd, vm[i], bind_exec_queues[i], bo, 0,
> >  					 addr[i], bo_size, sync, 1);
> >  		else
> > -			xe_vm_bind_userptr_async(fd, vm[i], bind_engines[i],
> > +			xe_vm_bind_userptr_async(fd, vm[i], bind_exec_queues[i],
> >  						 to_user_pointer(data), addr[i],
> >  						 bo_size, sync, 1);
> >  		if (flags & SPARSE)
> > -			__xe_vm_bind_assert(fd, vm[i], bind_engines[i],
> > +			__xe_vm_bind_assert(fd, vm[i], bind_exec_queues[i],
> >  					    0, 0, sparse_addr[i], bo_size,
> >  					    XE_VM_BIND_OP_MAP |
> >  					    XE_VM_BIND_FLAG_ASYNC |
> > @@ -188,7 +188,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> >  		uint64_t sdi_offset = (char *)&data[i].data - (char *)data;
> >  		uint64_t sdi_addr = (flags & SPARSE ? sparse_addr[i % n_vm] :
> >  				     __addr)+ sdi_offset;
> > -		int e = i % n_engines;
> > +		int e = i % n_exec_queues;
> >  
> >  		b = 0;
> >  		data[i].batch[b++] = MI_STORE_DWORD_IMM_GEN4;
> > @@ -203,7 +203,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> >  		sync[1].flags |= DRM_XE_SYNC_SIGNAL;
> >  		sync[1].handle = syncobjs[e];
> >  
> > -		exec.engine_id = engines[e];
> > +		exec.exec_queue_id = exec_queues[e];
> >  		exec.address = batch_addr;
> >  		if (e != i)
> >  			 syncobj_reset(fd, &syncobjs[e], 1);
> > @@ -213,18 +213,18 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> >  			uint32_t __vm = vm[cur_vm];
> >  
> >  			sync[1].flags &= ~DRM_XE_SYNC_SIGNAL;
> > -			xe_vm_unbind_async(fd, __vm, bind_engines[e], 0,
> > +			xe_vm_unbind_async(fd, __vm, bind_exec_queues[e], 0,
> >  					   __addr, bo_size, sync + 1, 1);
> >  
> >  			sync[0].flags |= DRM_XE_SYNC_SIGNAL;
> >  			addr[i % n_vm] += bo_size;
> >  			__addr = addr[i % n_vm];
> >  			if (bo)
> > -				xe_vm_bind_async(fd, __vm, bind_engines[e], bo,
> > +				xe_vm_bind_async(fd, __vm, bind_exec_queues[e], bo,
> >  						 0, __addr, bo_size, sync, 1);
> >  			else
> >  				xe_vm_bind_userptr_async(fd, __vm,
> > -							 bind_engines[e],
> > +							 bind_exec_queues[e],
> >  							 to_user_pointer(data),
> >  							 __addr, bo_size, sync,
> >  							 1);
> > @@ -257,7 +257,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> >  		}
> >  	}
> >  
> > -	for (i = 0; i < n_engines && n_execs; i++)
> > +	for (i = 0; i < n_exec_queues && n_execs; i++)
> >  		igt_assert(syncobj_wait(fd, &syncobjs[i], 1, INT64_MAX, 0,
> >  					NULL));
> >  
> > @@ -268,7 +268,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> >  	sync[0].flags |= DRM_XE_SYNC_SIGNAL;
> >  	for (i = 0; i < n_vm; ++i) {
> >  		syncobj_reset(fd, &sync[0].handle, 1);
> > -		xe_vm_unbind_async(fd, vm[i], bind_engines[i], 0, addr[i],
> > +		xe_vm_unbind_async(fd, vm[i], bind_exec_queues[i], 0, addr[i],
> >  				   bo_size, sync, 1);
> >  		igt_assert(syncobj_wait(fd, &sync[0].handle, 1,
> >  					INT64_MAX, 0, NULL));
> > @@ -280,11 +280,11 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> >  			igt_assert_eq(data[i].data, 0xc0ffee);
> >  	}
> >  
> > -	for (i = 0; i < n_engines; i++) {
> > +	for (i = 0; i < n_exec_queues; i++) {
> >  		syncobj_destroy(fd, syncobjs[i]);
> > -		xe_engine_destroy(fd, engines[i]);
> > -		if (bind_engines[i])
> > -			xe_engine_destroy(fd, bind_engines[i]);
> > +		xe_exec_queue_destroy(fd, exec_queues[i]);
> > +		if (bind_exec_queues[i])
> > +			xe_exec_queue_destroy(fd, bind_exec_queues[i]);
> >  	}
> >  
> >  	if (bo) {
> > @@ -318,13 +318,13 @@ igt_main
> >  		{ "userptr-rebind", USERPTR | REBIND },
> >  		{ "userptr-invalidate", USERPTR | INVALIDATE },
> >  		{ "userptr-invalidate-race", USERPTR | INVALIDATE | RACE },
> > -		{ "bindengine", BIND_ENGINE },
> > -		{ "bindengine-userptr", BIND_ENGINE | USERPTR },
> > -		{ "bindengine-rebind", BIND_ENGINE | REBIND },
> > -		{ "bindengine-userptr-rebind", BIND_ENGINE | USERPTR | REBIND },
> > -		{ "bindengine-userptr-invalidate", BIND_ENGINE | USERPTR |
> > +		{ "bindexecqueue", BIND_EXEC_QUEUE },
> > +		{ "bindexecqueue-userptr", BIND_EXEC_QUEUE | USERPTR },
> > +		{ "bindexecqueue-rebind", BIND_EXEC_QUEUE | REBIND },
> > +		{ "bindexecqueue-userptr-rebind", BIND_EXEC_QUEUE | USERPTR | REBIND },
> > +		{ "bindexecqueue-userptr-invalidate", BIND_EXEC_QUEUE | USERPTR |
> >  			INVALIDATE },
> > -		{ "bindengine-userptr-invalidate-race", BIND_ENGINE | USERPTR |
> > +		{ "bindexecqueue-userptr-invalidate-race", BIND_EXEC_QUEUE | USERPTR |
> >  			INVALIDATE | RACE },
> >  		{ NULL },
> >  	};
> > @@ -349,14 +349,14 @@ igt_main
> >  					  64 : 1024, 1,
> >  					  s->flags);
> >  
> > -		igt_subtest_f("many-engines-%s", s->name)
> > +		igt_subtest_f("many-execqueues-%s", s->name)
> >  			xe_for_each_hw_engine(fd, hwe)
> >  				test_exec(fd, hwe, 16,
> >  					  s->flags & (REBIND | INVALIDATE) ?
> >  					  64 : 1024, 1,
> >  					  s->flags);
> >  
> > -		igt_subtest_f("many-engines-many-vm-%s", s->name)
> > +		igt_subtest_f("many-execqueues-many-vm-%s", s->name)
> >  			xe_for_each_hw_engine(fd, hwe)
> >  				test_exec(fd, hwe, 16,
> >  					  s->flags & (REBIND | INVALIDATE) ?
> > diff --git a/tests/xe/xe_exec_compute_mode.c b/tests/xe/xe_exec_compute_mode.c
> > index ee9756c21..679b84fa1 100644
> > --- a/tests/xe/xe_exec_compute_mode.c
> > +++ b/tests/xe/xe_exec_compute_mode.c
> > @@ -22,14 +22,14 @@
> >  #include "xe/xe_query.h"
> >  #include <string.h>
> >  
> > -#define MAX_N_ENGINES 16
> > -#define USERPTR		(0x1 << 0)
> > -#define REBIND		(0x1 << 1)
> > -#define INVALIDATE	(0x1 << 2)
> > -#define RACE		(0x1 << 3)
> > -#define BIND_ENGINE	(0x1 << 4)
> > -#define VM_FOR_BO	(0x1 << 5)
> > -#define ENGINE_EARLY	(0x1 << 6)
> > +#define MAX_N_EXECQUEUES 	16
> > +#define USERPTR				(0x1 << 0)
> > +#define REBIND				(0x1 << 1)
> > +#define INVALIDATE			(0x1 << 2)
> > +#define RACE				(0x1 << 3)
> > +#define BIND_EXECQUEUE		(0x1 << 4)
> > +#define VM_FOR_BO			(0x1 << 5)
> > +#define EXEC_QUEUE_EARLY	(0x1 << 6)
> >  
> >  /**
> >   * SUBTEST: twice-%s
> > @@ -53,18 +53,18 @@
> >   * @userptr-rebind:			userptr rebind
> >   * @userptr-invalidate:			userptr invalidate
> >   * @userptr-invalidate-race:		userptr invalidate race
> > - * @bindengine:				bindengine
> > - * @bindengine-userptr:			bindengine userptr
> > - * @bindengine-rebind:			bindengine rebind
> > - * @bindengine-userptr-rebind:		bindengine userptr rebind
> > - * @bindengine-userptr-invalidate:	bindengine userptr invalidate
> > - * @bindengine-userptr-invalidate-race:	bindengine-userptr invalidate race
> > + * @bindexecqueue:				bindexecqueue
> > + * @bindexecqueue-userptr:			bindexecqueue userptr
> > + * @bindexecqueue-rebind:			bindexecqueue rebind
> > + * @bindexecqueue-userptr-rebind:		bindexecqueue userptr rebind
> > + * @bindexecqueue-userptr-invalidate:	bindexecqueue userptr invalidate
> > + * @bindexecqueue-userptr-invalidate-race:	bindexecqueue-userptr invalidate race
> >   */
> >  
> >  /**
> >   *
> > - * SUBTEST: many-engines-%s
> > - * Description: Run %arg[1] compute machine test on many engines
> > + * SUBTEST: many-execqueues-%s
> > + * Description: Run %arg[1] compute machine test on many exec_queues
> >   * Run type: FULL
> >   *
> >   * arg[1]:
> > @@ -75,15 +75,15 @@
> >   * @rebind:				rebind
> >   * @userptr-rebind:			userptr rebind
> >   * @userptr-invalidate:			userptr invalidate
> > - * @bindengine:				bindengine
> > - * @bindengine-userptr:			bindengine userptr
> > - * @bindengine-rebind:			bindengine rebind
> > - * @bindengine-userptr-rebind:		bindengine userptr rebind
> > - * @bindengine-userptr-invalidate:	bindengine userptr invalidate
> > + * @bindexecqueue:				bindexec_queue
> > + * @bindexecqueue-userptr:			bindexecqueue userptr
> > + * @bindexecqueue-rebind:			bindexecqueue rebind
> > + * @bindexecqueue-userptr-rebind:		bindexecqueue userptr rebind
> > + * @bindexecqueue-userptr-invalidate:	bindexecqueue userptr invalidate
> >   */
> >  static void
> >  test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> > -	  int n_engines, int n_execs, unsigned int flags)
> > +	  int n_exec_queues, int n_execs, unsigned int flags)
> >  {
> >  	uint32_t vm;
> >  	uint64_t addr = 0x1a0000;
> > @@ -97,8 +97,8 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> >  		.num_syncs = 1,
> >  		.syncs = to_user_pointer(sync),
> >  	};
> > -	uint32_t engines[MAX_N_ENGINES];
> > -	uint32_t bind_engines[MAX_N_ENGINES];
> > +	uint32_t exec_queues[MAX_N_EXECQUEUES];
> > +	uint32_t bind_exec_queues[MAX_N_EXECQUEUES];
> >  	size_t bo_size;
> >  	uint32_t bo = 0;
> >  	struct {
> > @@ -112,7 +112,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> >  	int map_fd = -1;
> >  	int64_t fence_timeout;
> >  
> > -	igt_assert(n_engines <= MAX_N_ENGINES);
> > +	igt_assert(n_exec_queues <= MAX_N_EXECQUEUES);
> >  
> >  	vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS |
> >  			  DRM_XE_VM_CREATE_COMPUTE_MODE, 0);
> > @@ -120,21 +120,21 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> >  	bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd),
> >  			xe_get_default_alignment(fd));
> >  
> > -	for (i = 0; (flags & ENGINE_EARLY) && i < n_engines; i++) {
> > -		struct drm_xe_ext_engine_set_property ext = {
> > +	for (i = 0; (flags & EXEC_QUEUE_EARLY) && i < n_exec_queues; i++) {
> > +		struct drm_xe_ext_exec_queue_set_property ext = {
> >  			.base.next_extension = 0,
> > -			.base.name = XE_ENGINE_EXTENSION_SET_PROPERTY,
> > -			.property = XE_ENGINE_SET_PROPERTY_COMPUTE_MODE,
> > +			.base.name = XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY,
> > +			.property = XE_EXEC_QUEUE_SET_PROPERTY_COMPUTE_MODE,
> >  			.value = 1,
> >  		};
> >  
> > -		engines[i] = xe_engine_create(fd, vm, eci,
> > +		exec_queues[i] = xe_exec_queue_create(fd, vm, eci,
> >  					      to_user_pointer(&ext));
> > -		if (flags & BIND_ENGINE)
> > -			bind_engines[i] =
> > -				xe_bind_engine_create(fd, vm, 0);
> > +		if (flags & BIND_EXECQUEUE)
> > +			bind_exec_queues[i] =
> > +				xe_bind_exec_queue_create(fd, vm, 0);
> >  		else
> > -			bind_engines[i] = 0;
> > +			bind_exec_queues[i] = 0;
> >  	};
> >  
> >  	if (flags & USERPTR) {
> > @@ -156,29 +156,29 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> >  	}
> >  	memset(data, 0, bo_size);
> >  
> > -	for (i = 0; !(flags & ENGINE_EARLY) && i < n_engines; i++) {
> > -		struct drm_xe_ext_engine_set_property ext = {
> > +	for (i = 0; !(flags & EXEC_QUEUE_EARLY) && i < n_exec_queues; i++) {
> > +		struct drm_xe_ext_exec_queue_set_property ext = {
> >  			.base.next_extension = 0,
> > -			.base.name = XE_ENGINE_EXTENSION_SET_PROPERTY,
> > -			.property = XE_ENGINE_SET_PROPERTY_COMPUTE_MODE,
> > +			.base.name = XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY,
> > +			.property = XE_EXEC_QUEUE_SET_PROPERTY_COMPUTE_MODE,
> >  			.value = 1,
> >  		};
> >  
> > -		engines[i] = xe_engine_create(fd, vm, eci,
> > +		exec_queues[i] = xe_exec_queue_create(fd, vm, eci,
> >  					      to_user_pointer(&ext));
> > -		if (flags & BIND_ENGINE)
> > -			bind_engines[i] =
> > -				xe_bind_engine_create(fd, vm, 0);
> > +		if (flags & BIND_EXECQUEUE)
> > +			bind_exec_queues[i] =
> > +				xe_bind_exec_queue_create(fd, vm, 0);
> >  		else
> > -			bind_engines[i] = 0;
> > +			bind_exec_queues[i] = 0;
> >  	};
> >  
> >  	sync[0].addr = to_user_pointer(&data[0].vm_sync);
> >  	if (bo)
> > -		xe_vm_bind_async(fd, vm, bind_engines[0], bo, 0, addr,
> > +		xe_vm_bind_async(fd, vm, bind_exec_queues[0], bo, 0, addr,
> >  				 bo_size, sync, 1);
> >  	else
> > -		xe_vm_bind_userptr_async(fd, vm, bind_engines[0],
> > +		xe_vm_bind_userptr_async(fd, vm, bind_exec_queues[0],
> >  					 to_user_pointer(data), addr,
> >  					 bo_size, sync, 1);
> >  #define ONE_SEC	MS_TO_NS(1000)
> > @@ -195,7 +195,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> >  		uint64_t batch_addr = addr + batch_offset;
> >  		uint64_t sdi_offset = (char *)&data[i].data - (char *)data;
> >  		uint64_t sdi_addr = addr + sdi_offset;
> > -		int e = i % n_engines;
> > +		int e = i % n_exec_queues;
> >  
> >  		b = 0;
> >  		data[i].batch[b++] = MI_STORE_DWORD_IMM_GEN4;
> > @@ -207,24 +207,24 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> >  
> >  		sync[0].addr = addr + (char *)&data[i].exec_sync - (char *)data;
> >  
> > -		exec.engine_id = engines[e];
> > +		exec.exec_queue_id = exec_queues[e];
> >  		exec.address = batch_addr;
> >  		xe_exec(fd, &exec);
> >  
> >  		if (flags & REBIND && i + 1 != n_execs) {
> >  			xe_wait_ufence(fd, &data[i].exec_sync, USER_FENCE_VALUE,
> >  				       NULL, fence_timeout);
> > -			xe_vm_unbind_async(fd, vm, bind_engines[e], 0,
> > +			xe_vm_unbind_async(fd, vm, bind_exec_queues[e], 0,
> >  					   addr, bo_size, NULL, 0);
> >  
> >  			sync[0].addr = to_user_pointer(&data[0].vm_sync);
> >  			addr += bo_size;
> >  			if (bo)
> > -				xe_vm_bind_async(fd, vm, bind_engines[e], bo,
> > +				xe_vm_bind_async(fd, vm, bind_exec_queues[e], bo,
> >  						 0, addr, bo_size, sync, 1);
> >  			else
> >  				xe_vm_bind_userptr_async(fd, vm,
> > -							 bind_engines[e],
> > +							 bind_exec_queues[e],
> >  							 to_user_pointer(data),
> >  							 addr, bo_size, sync,
> >  							 1);
> > @@ -280,7 +280,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> >  		usleep(250000);
> >  
> >  	sync[0].addr = to_user_pointer(&data[0].vm_sync);
> > -	xe_vm_unbind_async(fd, vm, bind_engines[0], 0, addr, bo_size,
> > +	xe_vm_unbind_async(fd, vm, bind_exec_queues[0], 0, addr, bo_size,
> >  			   sync, 1);
> >  	xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, NULL,
> >  		       fence_timeout);
> > @@ -288,10 +288,10 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> >  	for (i = j; i < n_execs; i++)
> >  		igt_assert_eq(data[i].data, 0xc0ffee);
> >  
> > -	for (i = 0; i < n_engines; i++) {
> > -		xe_engine_destroy(fd, engines[i]);
> > -		if (bind_engines[i])
> > -			xe_engine_destroy(fd, bind_engines[i]);
> > +	for (i = 0; i < n_exec_queues; i++) {
> > +		xe_exec_queue_destroy(fd, exec_queues[i]);
> > +		if (bind_exec_queues[i])
> > +			xe_exec_queue_destroy(fd, bind_exec_queues[i]);
> >  	}
> >  
> >  	if (bo) {
> > @@ -313,20 +313,20 @@ igt_main
> >  		unsigned int flags;
> >  	} sections[] = {
> >  		{ "basic", 0 },
> > -		{ "preempt-fence-early", VM_FOR_BO | ENGINE_EARLY },
> > +		{ "preempt-fence-early", VM_FOR_BO | EXEC_QUEUE_EARLY },
> >  		{ "userptr", USERPTR },
> >  		{ "rebind", REBIND },
> >  		{ "userptr-rebind", USERPTR | REBIND },
> >  		{ "userptr-invalidate", USERPTR | INVALIDATE },
> >  		{ "userptr-invalidate-race", USERPTR | INVALIDATE | RACE },
> > -		{ "bindengine", BIND_ENGINE },
> > -		{ "bindengine-userptr", BIND_ENGINE | USERPTR },
> > -		{ "bindengine-rebind",  BIND_ENGINE | REBIND },
> > -		{ "bindengine-userptr-rebind",  BIND_ENGINE | USERPTR |
> > +		{ "bindexecqueue", BIND_EXECQUEUE },
> > +		{ "bindexecqueue-userptr", BIND_EXECQUEUE | USERPTR },
> > +		{ "bindexecqueue-rebind",  BIND_EXECQUEUE | REBIND },
> > +		{ "bindexecqueue-userptr-rebind",  BIND_EXECQUEUE | USERPTR |
> >  			REBIND },
> > -		{ "bindengine-userptr-invalidate",  BIND_ENGINE | USERPTR |
> > +		{ "bindexecqueue-userptr-invalidate",  BIND_EXECQUEUE | USERPTR |
> >  			INVALIDATE },
> > -		{ "bindengine-userptr-invalidate-race", BIND_ENGINE | USERPTR |
> > +		{ "bindexecqueue-userptr-invalidate-race", BIND_EXECQUEUE | USERPTR |
> >  			INVALIDATE | RACE },
> >  		{ NULL },
> >  	};
> > @@ -354,7 +354,7 @@ igt_main
> >  		if (s->flags & RACE)
> >  			continue;
> >  
> > -		igt_subtest_f("many-engines-%s", s->name)
> > +		igt_subtest_f("many-execqueues-%s", s->name)
> >  			xe_for_each_hw_engine(fd, hwe)
> >  				test_exec(fd, hwe, 16,
> >  					  s->flags & (REBIND | INVALIDATE) ?
> > diff --git a/tests/xe/xe_exec_fault_mode.c b/tests/xe/xe_exec_fault_mode.c
> > index 7dcbb3c45..85c010039 100644
> > --- a/tests/xe/xe_exec_fault_mode.c
> > +++ b/tests/xe/xe_exec_fault_mode.c
> > @@ -4,7 +4,7 @@
> >   */
> >  
> >  /**
> > - * TEST: Basic tests for execbuf functionality for virtual and parallel engines
> > + * TEST: Basic tests for execbuf functionality for virtual and parallel exec_queues
> >   * Category: Hardware building block
> >   * Sub-category: execbuf
> >   * Functionality: fault mode
> > @@ -23,16 +23,16 @@
> >  #include "xe/xe_query.h"
> >  #include <string.h>
> >  
> > -#define MAX_N_ENGINES 16
> > -#define USERPTR		(0x1 << 0)
> > -#define REBIND		(0x1 << 1)
> > -#define INVALIDATE	(0x1 << 2)
> > -#define RACE		(0x1 << 3)
> > -#define BIND_ENGINE	(0x1 << 4)
> > -#define WAIT_ATOMIC	(0x1 << 5)
> > -#define IMMEDIATE	(0x1 << 6)
> > -#define PREFETCH	(0x1 << 7)
> > -#define INVALID_FAULT	(0x1 << 8)
> > +#define MAX_N_EXEC_QUEUES	16
> > +#define USERPTR				(0x1 << 0)
> > +#define REBIND				(0x1 << 1)
> > +#define INVALIDATE			(0x1 << 2)
> > +#define RACE				(0x1 << 3)
> > +#define BIND_EXEC_QUEUE		(0x1 << 4)
> > +#define WAIT_ATOMIC			(0x1 << 5)
> > +#define IMMEDIATE			(0x1 << 6)
> > +#define PREFETCH			(0x1 << 7)
> > +#define INVALID_FAULT		(0x1 << 8)
> >  
> >  /**
> >   * SUBTEST: once-%s
> > @@ -47,8 +47,8 @@
> >   * Description: Run %arg[1] fault mode test many times
> >   * Run type: FULL
> >   *
> > - * SUBTEST: many-engines-%s
> > - * Description: Run %arg[1] fault mode test on many engines
> > + * SUBTEST: many-execqueues-%s
> > + * Description: Run %arg[1] fault mode test on many exec_queues
> >   * Run type: FULL
> >   *
> >   * arg[1]:
> > @@ -59,50 +59,50 @@
> >   * @userptr-rebind:			userptr rebind
> >   * @userptr-invalidate:			userptr invalidate
> >   * @userptr-invalidate-race:		userptr invalidate race
> > - * @bindengine:				bindengine
> > - * @bindengine-userptr:			bindengine userptr
> > - * @bindengine-rebind:			bindengine rebind
> > - * @bindengine-userptr-rebind:		bindengine userptr rebind
> > - * @bindengine-userptr-invalidate:
> > - *					bindengine userptr invalidate
> > - * @bindengine-userptr-invalidate-race:
> > - *					bindengine userptr invalidate race
> > + * @bindexecqueue:				bindexecqueue
> > + * @bindexecqueue-userptr:			bindexecqueue userptr
> > + * @bindexecqueue-rebind:			bindexecqueue rebind
> > + * @bindexecqueue-userptr-rebind:		bindexecqueue userptr rebind
> > + * @bindexecqueue-userptr-invalidate:
> > + *					bindexecqueue userptr invalidate
> > + * @bindexecqueue-userptr-invalidate-race:
> > + *					bindexecqueue userptr invalidate race
> >   * @basic-imm:				basic imm
> >   * @userptr-imm:			userptr imm
> >   * @rebind-imm:				rebind imm
> >   * @userptr-rebind-imm:			userptr rebind imm
> >   * @userptr-invalidate-imm:		userptr invalidate imm
> >   * @userptr-invalidate-race-imm:	userptr invalidate race imm
> > - * @bindengine-imm:			bindengine imm
> > - * @bindengine-userptr-imm:		bindengine userptr imm
> > - * @bindengine-rebind-imm:		bindengine rebind imm
> > - * @bindengine-userptr-rebind-imm:
> > - *					bindengine userptr rebind imm
> > - * @bindengine-userptr-invalidate-imm:
> > - *					bindengine userptr invalidate imm
> > - * @bindengine-userptr-invalidate-race-imm:
> > - *					bindengine userptr invalidate race imm
> > + * @bindexecqueue-imm:			bindexecqueue imm
> > + * @bindexecqueue-userptr-imm:		bindexecqueue userptr imm
> > + * @bindexecqueue-rebind-imm:		bindexecqueue rebind imm
> > + * @bindexecqueue-userptr-rebind-imm:
> > + *					bindexecqueue userptr rebind imm
> > + * @bindexecqueue-userptr-invalidate-imm:
> > + *					bindexecqueue userptr invalidate imm
> > + * @bindexecqueue-userptr-invalidate-race-imm:
> > + *					bindexecqueue userptr invalidate race imm
> >   * @basic-prefetch:			basic prefetch
> >   * @userptr-prefetch:			userptr prefetch
> >   * @rebind-prefetch:			rebind prefetch
> >   * @userptr-rebind-prefetch:		userptr rebind prefetch
> >   * @userptr-invalidate-prefetch:	userptr invalidate prefetch
> >   * @userptr-invalidate-race-prefetch:	userptr invalidate race prefetch
> > - * @bindengine-prefetch:		bindengine prefetch
> > - * @bindengine-userptr-prefetch:	bindengine userptr prefetch
> > - * @bindengine-rebind-prefetch:		bindengine rebind prefetch
> > - * @bindengine-userptr-rebind-prefetch:	bindengine userptr rebind prefetch
> > - * @bindengine-userptr-invalidate-prefetch:
> > - *					bindengine userptr invalidate prefetch
> > - * @bindengine-userptr-invalidate-race-prefetch:
> > - *					bindengine userptr invalidate race prefetch
> > + * @bindexecqueue-prefetch:		bindexecqueue prefetch
> > + * @bindexecqueue-userptr-prefetch:	bindexecqueue userptr prefetch
> > + * @bindexecqueue-rebind-prefetch:		bindexecqueue rebind prefetch
> > + * @bindexecqueue-userptr-rebind-prefetch:	bindexecqueue userptr rebind prefetch
> > + * @bindexecqueue-userptr-invalidate-prefetch:
> > + *					bindexecqueue userptr invalidate prefetch
> > + * @bindexecqueue-userptr-invalidate-race-prefetch:
> > + *					bindexecqueue userptr invalidate race prefetch
> >   * @invalid-fault:			invalid fault
> >   * @invalid-userptr-fault:		invalid userptr fault
> >   */
> >  
> >  static void
> >  test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> > -	  int n_engines, int n_execs, unsigned int flags)
> > +	  int n_exec_queues, int n_execs, unsigned int flags)
> >  {
> >  	uint32_t vm;
> >  	uint64_t addr = 0x1a0000;
> > @@ -116,8 +116,8 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> >  		.num_syncs = 1,
> >  		.syncs = to_user_pointer(sync),
> >  	};
> > -	uint32_t engines[MAX_N_ENGINES];
> > -	uint32_t bind_engines[MAX_N_ENGINES];
> > +	uint32_t exec_queues[MAX_N_EXEC_QUEUES];
> > +	uint32_t bind_exec_queues[MAX_N_EXEC_QUEUES];
> >  	size_t bo_size;
> >  	uint32_t bo = 0;
> >  	struct {
> > @@ -130,7 +130,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> >  	int i, j, b;
> >  	int map_fd = -1;
> >  
> > -	igt_assert(n_engines <= MAX_N_ENGINES);
> > +	igt_assert(n_exec_queues <= MAX_N_EXEC_QUEUES);
> >  
> >  	vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS |
> >  			  DRM_XE_VM_CREATE_FAULT_MODE, 0);
> > @@ -162,32 +162,32 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> >  	}
> >  	memset(data, 0, bo_size);
> >  
> > -	for (i = 0; i < n_engines; i++) {
> > -		engines[i] = xe_engine_create(fd, vm, eci, 0);
> > -		if (flags & BIND_ENGINE)
> > -			bind_engines[i] =
> > -				xe_bind_engine_create(fd, vm, 0);
> > +	for (i = 0; i < n_exec_queues; i++) {
> > +		exec_queues[i] = xe_exec_queue_create(fd, vm, eci, 0);
> > +		if (flags & BIND_EXEC_QUEUE)
> > +			bind_exec_queues[i] =
> > +				xe_bind_exec_queue_create(fd, vm, 0);
> >  		else
> > -			bind_engines[i] = 0;
> > +			bind_exec_queues[i] = 0;
> >  	};
> >  
> >  	sync[0].addr = to_user_pointer(&data[0].vm_sync);
> >  	if (flags & IMMEDIATE) {
> >  		if (bo)
> > -			xe_vm_bind_async_flags(fd, vm, bind_engines[0], bo, 0,
> > +			xe_vm_bind_async_flags(fd, vm, bind_exec_queues[0], bo, 0,
> >  					       addr, bo_size, sync, 1,
> >  					       XE_VM_BIND_FLAG_IMMEDIATE);
> >  		else
> > -			xe_vm_bind_userptr_async_flags(fd, vm, bind_engines[0],
> > +			xe_vm_bind_userptr_async_flags(fd, vm, bind_exec_queues[0],
> >  						       to_user_pointer(data),
> >  						       addr, bo_size, sync, 1,
> >  						       XE_VM_BIND_FLAG_IMMEDIATE);
> >  	} else {
> >  		if (bo)
> > -			xe_vm_bind_async(fd, vm, bind_engines[0], bo, 0, addr,
> > +			xe_vm_bind_async(fd, vm, bind_exec_queues[0], bo, 0, addr,
> >  					 bo_size, sync, 1);
> >  		else
> > -			xe_vm_bind_userptr_async(fd, vm, bind_engines[0],
> > +			xe_vm_bind_userptr_async(fd, vm, bind_exec_queues[0],
> >  						 to_user_pointer(data), addr,
> >  						 bo_size, sync, 1);
> >  	}
> > @@ -198,7 +198,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> >  
> >  	if (flags & PREFETCH) {
> >  		/* Should move to system memory */
> > -		xe_vm_prefetch_async(fd, vm, bind_engines[0], 0, addr,
> > +		xe_vm_prefetch_async(fd, vm, bind_exec_queues[0], 0, addr,
> >  				     bo_size, sync, 1, 0);
> >  		xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, NULL,
> >  			       ONE_SEC);
> > @@ -210,7 +210,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> >  		uint64_t batch_addr = addr + batch_offset;
> >  		uint64_t sdi_offset = (char *)&data[i].data - (char *)data;
> >  		uint64_t sdi_addr = addr + sdi_offset;
> > -		int e = i % n_engines;
> > +		int e = i % n_exec_queues;
> >  
> >  		b = 0;
> >  		data[i].batch[b++] = MI_STORE_DWORD_IMM_GEN4;
> > @@ -222,24 +222,24 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> >  
> >  		sync[0].addr = addr + (char *)&data[i].exec_sync - (char *)data;
> >  
> > -		exec.engine_id = engines[e];
> > +		exec.exec_queue_id = exec_queues[e];
> >  		exec.address = batch_addr;
> >  		xe_exec(fd, &exec);
> >  
> >  		if (flags & REBIND && i + 1 != n_execs) {
> >  			xe_wait_ufence(fd, &data[i].exec_sync, USER_FENCE_VALUE,
> >  				       NULL, ONE_SEC);
> > -			xe_vm_unbind_async(fd, vm, bind_engines[e], 0,
> > +			xe_vm_unbind_async(fd, vm, bind_exec_queues[e], 0,
> >  					   addr, bo_size, NULL, 0);
> >  
> >  			sync[0].addr = to_user_pointer(&data[0].vm_sync);
> >  			addr += bo_size;
> >  			if (bo)
> > -				xe_vm_bind_async(fd, vm, bind_engines[e], bo,
> > +				xe_vm_bind_async(fd, vm, bind_exec_queues[e], bo,
> >  						 0, addr, bo_size, sync, 1);
> >  			else
> >  				xe_vm_bind_userptr_async(fd, vm,
> > -							 bind_engines[e],
> > +							 bind_exec_queues[e],
> >  							 to_user_pointer(data),
> >  							 addr, bo_size, sync,
> >  							 1);
> > @@ -292,7 +292,7 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> >  	}
> >  
> >  	sync[0].addr = to_user_pointer(&data[0].vm_sync);
> > -	xe_vm_unbind_async(fd, vm, bind_engines[0], 0, addr, bo_size,
> > +	xe_vm_unbind_async(fd, vm, bind_exec_queues[0], 0, addr, bo_size,
> >  			   sync, 1);
> >  	xe_wait_ufence(fd, &data[0].vm_sync, USER_FENCE_VALUE, NULL, ONE_SEC);
> >  
> > @@ -301,10 +301,10 @@ test_exec(int fd, struct drm_xe_engine_class_instance *eci,
> >  			igt_assert_eq(data[i].data, 0xc0ffee);
> >  	}
> >  
> > -	for (i = 0; i < n_engines; i++) {
> > -		xe_engine_destroy(fd, engines[i]);
> > -		if (bind_engines[i])
> > -			xe_engine_destroy(fd, bind_engines[i]);
> > +	for (i = 0; i < n_exec_queues; i++) {
> > +		xe_exec_queue_destroy(fd, exec_queues[i]);
> > +		if (bind_exec_queues[i])
> > +			xe_exec_queue_destroy(fd, bind_exec_queues[i]);
> >  	}
> >  
> >  	if (bo) {
> > @@ -356,7 +356,7 @@ test_atomic(int fd, struct drm_xe_engine_class_instance *eci,
> >  		.num_syncs = 1,
> >  		.syncs = to_user_pointer(sync),
> >  	};
> > -	uint32_t engine;
> > +	uint32_t exec_queue;
> >  	size_t bo_size;
> >  	uint32_t bo, bo_wait;
> >  	struct {
> > @@ -394,7 +394,7 @@ test_atomic(int fd, struct drm_xe_engine_class_instance *eci,
> >  	memset(data, 0, bo_size);
> >  	memset(wait, 0, bo_size);
> >  
> > -	engine = xe_engine_create(fd, vm, eci, 0);
> > +	exec_queue = xe_exec_queue_create(fd, vm, eci, 0);
> >  
> >  	sync[0].addr = to_user_pointer(&wait[wait_idx].vm_sync);
> >  	xe_vm_bind_async(fd, vm, 0, bo, 0, addr, bo_size, sync, 1);
> > @@ -426,7 +426,7 @@ test_atomic(int fd, struct drm_xe_engine_class_instance *eci,
> >  		sync[0].addr = addr_wait +
> >  			(char *)&wait[i].exec_sync - (char *)wait;
> >  
> > -		exec.engine_id = engine;
> > +		exec.exec_queue_id = exec_queue;
> >  		exec.address = batch_addr;
> >  		xe_exec(fd, &exec);
> >  
> > @@ -450,7 +450,7 @@ test_atomic(int fd, struct drm_xe_engine_class_instance *eci,
> >  	xe_wait_ufence(fd, &wait[wait_idx++].vm_sync, USER_FENCE_VALUE, NULL,
> >  		       ONE_SEC);
> >  
> > -	xe_engine_destroy(fd, engine);
> > +	xe_exec_queue_destroy(fd, exec_queue);
> >  	munmap(data, bo_size);
> >  	munmap(wait, bo_size);
> >  	gem_close(fd, bo);
> > @@ -471,14 +471,14 @@ igt_main
> >  		{ "userptr-rebind", USERPTR | REBIND },
> >  		{ "userptr-invalidate", USERPTR | INVALIDATE },
> >  		{ "userptr-invalidate-race", USERPTR | INVALIDATE | RACE },
> > -		{ "bindengine", BIND_ENGINE },
> > -		{ "bindengine-userptr", BIND_ENGINE | USERPTR },
> > -		{ "bindengine-rebind",  BIND_ENGINE | REBIND },
> > -		{ "bindengine-userptr-rebind", BIND_ENGINE | USERPTR |
> > +		{ "bindexecqueue", BIND_EXEC_QUEUE },
> > +		{ "bindexecqueue-userptr", BIND_EXEC_QUEUE | USERPTR },
> > +		{ "bindexecqueue-rebind",  BIND_EXEC_QUEUE | REBIND },
> > +		{ "bindexecqueue-userptr-rebind", BIND_EXEC_QUEUE | USERPTR |
> >  			REBIND },
> > -		{ "bindengine-userptr-invalidate", BIND_ENGINE | USERPTR |
> > +		{ "bindexecqueue-userptr-invalidate", BIND_EXEC_QUEUE | USERPTR |
> >  			INVALIDATE },
> > -		{ "bindengine-userptr-invalidate-race", BIND_ENGINE | USERPTR |
> > +		{ "bindexecqueue-userptr-invalidate-race", BIND_EXEC_QUEUE | USERPTR |
> >  			INVALIDATE | RACE },
> >  		{ "basic-imm", IMMEDIATE },
> >  		{ "userptr-imm", IMMEDIATE | USERPTR },
> > @@ -487,15 +487,15 @@ igt_main
> >  		{ "userptr-invalidate-imm", IMMEDIATE | USERPTR | INVALIDATE },
> >  		{ "userptr-invalidate-race-imm", IMMEDIATE | USERPTR |
> >  			INVALIDATE | RACE },
> > -		{ "bindengine-imm", IMMEDIATE | BIND_ENGINE },
> > -		{ "bindengine-userptr-imm", IMMEDIATE | BIND_ENGINE | USERPTR },
> > -		{ "bindengine-rebind-imm", IMMEDIATE | BIND_ENGINE | REBIND },
> > -		{ "bindengine-userptr-rebind-imm", IMMEDIATE | BIND_ENGINE |
> > +		{ "bindexecqueue-imm", IMMEDIATE | BIND_EXEC_QUEUE },
> > +		{ "bindexecqueue-userptr-imm", IMMEDIATE | BIND_EXEC_QUEUE | USERPTR },
> > +		{ "bindexecqueue-rebind-imm", IMMEDIATE | BIND_EXEC_QUEUE | REBIND },
> > +		{ "bindexecqueue-userptr-rebind-imm", IMMEDIATE | BIND_EXEC_QUEUE |
> >  			USERPTR | REBIND },
> > -		{ "bindengine-userptr-invalidate-imm", IMMEDIATE | BIND_ENGINE |
> > +		{ "bindexecqueue-userptr-invalidate-imm", IMMEDIATE | BIND_EXEC_QUEUE |
> >  			USERPTR | INVALIDATE },
> > -		{ "bindengine-userptr-invalidate-race-imm", IMMEDIATE |
> > -			BIND_ENGINE | USERPTR | INVALIDATE | RACE },
> > +		{ "bindexecqueue-userptr-invalidate-race-imm", IMMEDIATE |
> > +			BIND_EXEC_QUEUE | USERPTR | INVALIDATE | RACE },
> >  		{ "basic-prefetch", PREFETCH },
> >  		{ "userptr-prefetch", PREFETCH | USERPTR },
> >  		{ "rebind-prefetch", PREFETCH | REBIND },
> > @@ -503,15 +503,15 @@ igt_main
> >  		{ "userptr-invalidate-prefetch", PREFETCH | USERPTR | INVALIDATE },
> >  		{ "userptr-invalidate-race-prefetch", PREFETCH | USERPTR |
> >  			INVALIDATE | RACE },
> > -		{ "bindengine-prefetch", PREFETCH | BIND_ENGINE },
> > -		{ "bindengine-userptr-prefetch", PREFETCH | BIND_ENGINE | USERPTR },
> > -		{ "bindengine-rebind-prefetch", PREFETCH | BIND_ENGINE | REBIND },
> > -		{ "bindengine-userptr-rebind-prefetch", PREFETCH | BIND_ENGINE |
> > +		{ "bindexecqueue-prefetch", PREFETCH | BIND_EXEC_QUEUE },
> > +		{ "bindexecqueue-userptr-prefetch", PREFETCH | BIND_EXEC_QUEUE | USERPTR },
> > +		{ "bindexecqueue-rebind-prefetch", PREFETCH | BIND_EXEC_QUEUE | REBIND },
> > +		{ "bindexecqueue-userptr-rebind-prefetch", PREFETCH | BIND_EXEC_QUEUE |
> >  			USERPTR | REBIND },
> > -		{ "bindengine-userptr-invalidate-prefetch", PREFETCH | BIND_ENGINE |
> > +		{ "bindexecqueue-userptr-invalidate-prefetch", PREFETCH | BIND_EXEC_QUEUE |
> >  			USERPTR | INVALIDATE },
> > -		{ "bindengine-userptr-invalidate-race-prefetch", PREFETCH |
> > -			BIND_ENGINE | USERPTR | INVALIDATE | RACE },
> > +		{ "bindexecqueue-userptr-invalidate-race-prefetch", PREFETCH |
> > +			BIND_EXEC_QUEUE | USERPTR | INVALIDATE | RACE },
> >  		{ "invalid-fault", INVALID_FAULT },
> >  		{ "invalid-userptr-fault", INVALID_FAULT | USERPTR },
> >  		{ NULL },
> > @@ -539,7 +539,7 @@ igt_main
> >  					  64 : 128,
> >  					  s->flags);
> >  
> > -		igt_subtest_f("many-engines-%s", s->name)
> > +		igt_subtest_f("many-execqueues-%s", s->name)
> >  			xe_for_each_hw_engine(fd, hwe)
> >  				test_exec(fd, hwe, 16,
> >  					  s->flags & (REBIND | INVALIDATE) ?
> > diff --git a/tests/xe/xe_exec_reset.c b/tests/xe/xe_exec_reset.c
> > index dfbaa6035..a2d33baf1 100644
> > --- a/tests/xe/xe_exec_reset.c
> > +++ b/tests/xe/xe_exec_reset.c
> > @@ -4,7 +4,7 @@
> >   */
> >  
> >  /**
> > - * TEST: Basic tests for execbuf functionality for virtual and parallel engines
> > + * TEST: Basic tests for execbuf functionality for virtual and parallel exec_queues
> >   * Category: Hardware building block
> >   * Sub-category: execbuf
> >   * Functionality: reset
> > @@ -39,7 +39,7 @@ static void test_spin(int fd, struct drm_xe_engine_class_instance *eci)
> >  		.num_syncs = 2,
> >  		.syncs = to_user_pointer(sync),
> >  	};
> > -	uint32_t engine;
> > +	uint32_t exec_queue;
> >  	uint32_t syncobj;
> >  	size_t bo_size;
> >  	uint32_t bo = 0;
> > @@ -54,7 +54,7 @@ static void test_spin(int fd, struct drm_xe_engine_class_instance *eci)
> >  				visible_vram_if_possible(fd, eci->gt_id));
> >  	spin = xe_bo_map(fd, bo, bo_size);
> >  
> > -	engine = xe_engine_create(fd, vm, eci, 0);
> > +	exec_queue = xe_exec_queue_create(fd, vm, eci, 0);
> >  	syncobj = syncobj_create(fd, 0);
> >  
> >  	sync[0].handle = syncobj_create(fd, 0);
> > @@ -66,7 +66,7 @@ static void test_spin(int fd, struct drm_xe_engine_class_instance *eci)
> >  	sync[1].flags |= DRM_XE_SYNC_SIGNAL;
> >  	sync[1].handle = syncobj;
> >  
> > -	exec.engine_id = engine;
> > +	exec.exec_queue_id = exec_queue;
> >  	exec.address = addr;
> >  	xe_exec(fd, &exec);
> >  
> > @@ -84,31 +84,31 @@ static void test_spin(int fd, struct drm_xe_engine_class_instance *eci)
> >  
> >  	syncobj_destroy(fd, sync[0].handle);
> >  	syncobj_destroy(fd, syncobj);
> > -	xe_engine_destroy(fd, engine);
> > +	xe_exec_queue_destroy(fd, exec_queue);
> >  
> >  	munmap(spin, bo_size);
> >  	gem_close(fd, bo);
> >  	xe_vm_destroy(fd, vm);
> >  }
> >  
> > -#define MAX_N_ENGINES 16
> > -#define MAX_INSTANCE 9
> > -#define CANCEL		(0x1 << 0)
> > -#define ENGINE_RESET	(0x1 << 1)
> > -#define GT_RESET	(0x1 << 2)
> > -#define CLOSE_FD	(0x1 << 3)
> > -#define CLOSE_ENGINES	(0x1 << 4)
> > -#define VIRTUAL		(0x1 << 5)
> > -#define PARALLEL	(0x1 << 6)
> > -#define CAT_ERROR	(0x1 << 7)
> > +#define MAX_N_EXECQUEUES	16
> > +#define MAX_INSTANCE		9
> > +#define CANCEL				(0x1 << 0)
> > +#define EXEC_QUEUE_RESET	(0x1 << 1)
> > +#define GT_RESET			(0x1 << 2)
> > +#define CLOSE_FD			(0x1 << 3)
> > +#define CLOSE_EXEC_QUEUES	(0x1 << 4)
> > +#define VIRTUAL				(0x1 << 5)
> > +#define PARALLEL			(0x1 << 6)
> > +#define CAT_ERROR			(0x1 << 7)
> >  
> >  /**
> >   * SUBTEST: %s-cancel
> >   * Description: Test %arg[1] cancel
> >   * Run type: FULL
> >   *
> > - * SUBTEST: %s-engine-reset
> > - * Description: Test %arg[1] engine reset
> > + * SUBTEST: %s-execqueue-reset
> > + * Description: Test %arg[1] exec_queue reset
> >   * Run type: FULL
> >   *
> >   * SUBTEST: %s-cat-error
> > @@ -131,8 +131,8 @@ static void test_spin(int fd, struct drm_xe_engine_class_instance *eci)
> >   * Description: Test %arg[1] close fd
> >   * Run type: FULL
> >   *
> > - * SUBTEST: %s-close-engines-close-fd
> > - * Description: Test %arg[1] close engines close fd
> > + * SUBTEST: %s-close-execqueues-close-fd
> > + * Description: Test %arg[1] close exec_queues close fd
> >   * Run type: FULL
> >   *
> >   * arg[1]:
> > @@ -142,7 +142,7 @@ static void test_spin(int fd, struct drm_xe_engine_class_instance *eci)
> >   */
> >  
> >  static void
> > -test_balancer(int fd, int gt, int class, int n_engines, int n_execs,
> > +test_balancer(int fd, int gt, int class, int n_exec_queues, int n_execs,
> >  	      unsigned int flags)
> >  {
> >  	uint32_t vm;
> > @@ -155,8 +155,8 @@ test_balancer(int fd, int gt, int class, int n_engines, int n_execs,
> >  		.num_syncs = 2,
> >  		.syncs = to_user_pointer(sync),
> >  	};
> > -	uint32_t engines[MAX_N_ENGINES];
> > -	uint32_t syncobjs[MAX_N_ENGINES];
> > +	uint32_t exec_queues[MAX_N_EXECQUEUES];
> > +	uint32_t syncobjs[MAX_N_EXECQUEUES];
> >  	size_t bo_size;
> >  	uint32_t bo = 0;
> >  	struct {
> > @@ -169,7 +169,7 @@ test_balancer(int fd, int gt, int class, int n_engines, int n_execs,
> >  	struct drm_xe_engine_class_instance eci[MAX_INSTANCE];
> >  	int i, j, b, num_placements = 0, bad_batches = 1;
> >  
> > -	igt_assert(n_engines <= MAX_N_ENGINES);
> > +	igt_assert(n_exec_queues <= MAX_N_EXECQUEUES);
> >  
> >  	if (flags & CLOSE_FD)
> >  		fd = drm_open_driver(DRIVER_XE);
> > @@ -191,20 +191,20 @@ test_balancer(int fd, int gt, int class, int n_engines, int n_execs,
> >  	bo = xe_bo_create_flags(fd, vm, bo_size, visible_vram_if_possible(fd, gt));
> >  	data = xe_bo_map(fd, bo, bo_size);
> >  
> > -	for (i = 0; i < n_engines; i++) {
> > -		struct drm_xe_ext_engine_set_property job_timeout = {
> > +	for (i = 0; i < n_exec_queues; i++) {
> > +		struct drm_xe_ext_exec_queue_set_property job_timeout = {
> >  			.base.next_extension = 0,
> > -			.base.name = XE_ENGINE_EXTENSION_SET_PROPERTY,
> > -			.property = XE_ENGINE_SET_PROPERTY_JOB_TIMEOUT,
> > +			.base.name = XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY,
> > +			.property = XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT,
> >  			.value = 50,
> >  		};
> > -		struct drm_xe_ext_engine_set_property preempt_timeout = {
> > +		struct drm_xe_ext_exec_queue_set_property preempt_timeout = {
> >  			.base.next_extension = 0,
> > -			.base.name = XE_ENGINE_EXTENSION_SET_PROPERTY,
> > -			.property = XE_ENGINE_SET_PROPERTY_PREEMPTION_TIMEOUT,
> > +			.base.name = XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY,
> > +			.property = XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT,
> >  			.value = 1000,
> >  		};
> > -		struct drm_xe_engine_create create = {
> > +		struct drm_xe_exec_queue_create create = {
> >  			.vm_id = vm,
> >  			.width = flags & PARALLEL ? num_placements : 1,
> >  			.num_placements = flags & PARALLEL ? 1 : num_placements,
> > @@ -213,12 +213,12 @@ test_balancer(int fd, int gt, int class, int n_engines, int n_execs,
> >  
> >  		if (flags & CANCEL)
> >  			create.extensions = to_user_pointer(&job_timeout);
> > -		else if (flags & ENGINE_RESET)
> > +		else if (flags & EXEC_QUEUE_RESET)
> >  			create.extensions = to_user_pointer(&preempt_timeout);
> >  
> > -		igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_XE_ENGINE_CREATE,
> > +		igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE,
> >  					&create), 0);
> > -		engines[i] = create.engine_id;
> > +		exec_queues[i] = create.exec_queue_id;
> >  		syncobjs[i] = syncobj_create(fd, 0);
> >  	};
> >  	exec.num_batch_buffer = flags & PARALLEL ? num_placements : 1;
> > @@ -226,7 +226,7 @@ test_balancer(int fd, int gt, int class, int n_engines, int n_execs,
> >  	sync[0].handle = syncobj_create(fd, 0);
> >  	xe_vm_bind_async(fd, vm, 0, bo, 0, addr, bo_size, sync, 1);
> >  
> > -	if (flags & VIRTUAL && (flags & CAT_ERROR || flags & ENGINE_RESET ||
> > +	if (flags & VIRTUAL && (flags & CAT_ERROR || flags & EXEC_QUEUE_RESET ||
> >  				flags & GT_RESET))
> >  		bad_batches = num_placements;
> >  
> > @@ -241,7 +241,7 @@ test_balancer(int fd, int gt, int class, int n_engines, int n_execs,
> >  		uint64_t sdi_addr = base_addr + sdi_offset;
> >  		uint64_t exec_addr;
> >  		uint64_t batches[MAX_INSTANCE];
> > -		int e = i % n_engines;
> > +		int e = i % n_exec_queues;
> >  
> >  		for (j = 0; j < num_placements && flags & PARALLEL; ++j)
> >  			batches[j] = batch_addr;
> > @@ -268,7 +268,7 @@ test_balancer(int fd, int gt, int class, int n_engines, int n_execs,
> >  		sync[1].flags |= DRM_XE_SYNC_SIGNAL;
> >  		sync[1].handle = syncobjs[e];
> >  
> > -		exec.engine_id = engines[e];
> > +		exec.exec_queue_id = exec_queues[e];
> >  		exec.address = flags & PARALLEL ?
> >  			to_user_pointer(batches) : exec_addr;
> >  		if (e != i)
> > @@ -280,9 +280,9 @@ test_balancer(int fd, int gt, int class, int n_engines, int n_execs,
> >  		xe_force_gt_reset(fd, gt);
> >  
> >  	if (flags & CLOSE_FD) {
> > -		if (flags & CLOSE_ENGINES) {
> > -			for (i = 0; i < n_engines; i++)
> > -				xe_engine_destroy(fd, engines[i]);
> > +		if (flags & CLOSE_EXEC_QUEUES) {
> > +			for (i = 0; i < n_exec_queues; i++)
> > +				xe_exec_queue_destroy(fd, exec_queues[i]);
> >  		}
> >  		drm_close_driver(fd);
> >  		/* FIXME: wait for idle */
> > @@ -290,7 +290,7 @@ test_balancer(int fd, int gt, int class, int n_engines, int n_execs,
> >  		return;
> >  	}
> >  
> > -	for (i = 0; i < n_engines && n_execs; i++)
> > +	for (i = 0; i < n_exec_queues && n_execs; i++)
> >  		igt_assert(syncobj_wait(fd, &syncobjs[i], 1, INT64_MAX, 0,
> >  					NULL));
> >  	igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
> > @@ -303,9 +303,9 @@ test_balancer(int fd, int gt, int class, int n_engines, int n_execs,
> >  		igt_assert_eq(data[i].data, 0xc0ffee);
> >  
> >  	syncobj_destroy(fd, sync[0].handle);
> > -	for (i = 0; i < n_engines; i++) {
> > +	for (i = 0; i < n_exec_queues; i++) {
> >  		syncobj_destroy(fd, syncobjs[i]);
> > -		xe_engine_destroy(fd, engines[i]);
> > +		xe_exec_queue_destroy(fd, exec_queues[i]);
> >  	}
> >  
> >  	munmap(data, bo_size);
> > @@ -318,8 +318,8 @@ test_balancer(int fd, int gt, int class, int n_engines, int n_execs,
> >   * Description: Test cancel
> >   * Run type: FULL
> >   *
> > - * SUBTEST: engine-reset
> > - * Description: Test engine reset
> > + * SUBTEST: execqueue-reset
> > + * Description: Test exec_queue reset
> >   * Run type: FULL
> >   *
> >   * SUBTEST: cat-error
> > @@ -338,14 +338,14 @@ test_balancer(int fd, int gt, int class, int n_engines, int n_execs,
> >   * Description: Test close fd
> >   * Run type: FULL
> >   *
> > - * SUBTEST: close-engines-close-fd
> > - * Description: Test close engines close fd
> > + * SUBTEST: close-execqueues-close-fd
> > + * Description: Test close exec_queues close fd
> >   * Run type: FULL
> >   */
> >  
> >  static void
> >  test_legacy_mode(int fd, struct drm_xe_engine_class_instance *eci,
> > -		 int n_engines, int n_execs, unsigned int flags)
> > +		 int n_exec_queues, int n_execs, unsigned int flags)
> >  {
> >  	uint32_t vm;
> >  	uint64_t addr = 0x1a0000;
> > @@ -358,8 +358,8 @@ test_legacy_mode(int fd, struct drm_xe_engine_class_instance *eci,
> >  		.num_syncs = 2,
> >  		.syncs = to_user_pointer(sync),
> >  	};
> > -	uint32_t engines[MAX_N_ENGINES];
> > -	uint32_t syncobjs[MAX_N_ENGINES];
> > +	uint32_t exec_queues[MAX_N_EXECQUEUES];
> > +	uint32_t syncobjs[MAX_N_EXECQUEUES];
> >  	size_t bo_size;
> >  	uint32_t bo = 0;
> >  	struct {
> > @@ -370,7 +370,7 @@ test_legacy_mode(int fd, struct drm_xe_engine_class_instance *eci,
> >  	} *data;
> >  	int i, b;
> >  
> > -	igt_assert(n_engines <= MAX_N_ENGINES);
> > +	igt_assert(n_exec_queues <= MAX_N_EXECQUEUES);
> >  
> >  	if (flags & CLOSE_FD)
> >  		fd = drm_open_driver(DRIVER_XE);
> > @@ -384,27 +384,27 @@ test_legacy_mode(int fd, struct drm_xe_engine_class_instance *eci,
> >  				visible_vram_if_possible(fd, eci->gt_id));
> >  	data = xe_bo_map(fd, bo, bo_size);
> >  
> > -	for (i = 0; i < n_engines; i++) {
> > -		struct drm_xe_ext_engine_set_property job_timeout = {
> > +	for (i = 0; i < n_exec_queues; i++) {
> > +		struct drm_xe_ext_exec_queue_set_property job_timeout = {
> >  			.base.next_extension = 0,
> > -			.base.name = XE_ENGINE_EXTENSION_SET_PROPERTY,
> > -			.property = XE_ENGINE_SET_PROPERTY_JOB_TIMEOUT,
> > +			.base.name = XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY,
> > +			.property = XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT,
> >  			.value = 50,
> >  		};
> > -		struct drm_xe_ext_engine_set_property preempt_timeout = {
> > +		struct drm_xe_ext_exec_queue_set_property preempt_timeout = {
> >  			.base.next_extension = 0,
> > -			.base.name = XE_ENGINE_EXTENSION_SET_PROPERTY,
> > -			.property = XE_ENGINE_SET_PROPERTY_PREEMPTION_TIMEOUT,
> > +			.base.name = XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY,
> > +			.property = XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT,
> >  			.value = 1000,
> >  		};
> >  		uint64_t ext = 0;
> >  
> >  		if (flags & CANCEL)
> >  			ext = to_user_pointer(&job_timeout);
> > -		else if (flags & ENGINE_RESET)
> > +		else if (flags & EXEC_QUEUE_RESET)
> >  			ext = to_user_pointer(&preempt_timeout);
> >  
> > -		engines[i] = xe_engine_create(fd, vm, eci, ext);
> > +		exec_queues[i] = xe_exec_queue_create(fd, vm, eci, ext);
> >  		syncobjs[i] = syncobj_create(fd, 0);
> >  	};
> >  
> > @@ -421,7 +421,7 @@ test_legacy_mode(int fd, struct drm_xe_engine_class_instance *eci,
> >  		uint64_t sdi_offset = (char *)&data[i].data - (char *)data;
> >  		uint64_t sdi_addr = base_addr + sdi_offset;
> >  		uint64_t exec_addr;
> > -		int e = i % n_engines;
> > +		int e = i % n_exec_queues;
> >  
> >  		if (!i) {
> >  			xe_spin_init(&data[i].spin, spin_addr, false);
> > @@ -442,7 +442,7 @@ test_legacy_mode(int fd, struct drm_xe_engine_class_instance *eci,
> >  		sync[1].flags |= DRM_XE_SYNC_SIGNAL;
> >  		sync[1].handle = syncobjs[e];
> >  
> > -		exec.engine_id = engines[e];
> > +		exec.exec_queue_id = exec_queues[e];
> >  		exec.address = exec_addr;
> >  		if (e != i)
> >  			 syncobj_reset(fd, &syncobjs[e], 1);
> > @@ -453,9 +453,9 @@ test_legacy_mode(int fd, struct drm_xe_engine_class_instance *eci,
> >  		xe_force_gt_reset(fd, eci->gt_id);
> >  
> >  	if (flags & CLOSE_FD) {
> > -		if (flags & CLOSE_ENGINES) {
> > -			for (i = 0; i < n_engines; i++)
> > -				xe_engine_destroy(fd, engines[i]);
> > +		if (flags & CLOSE_EXEC_QUEUES) {
> > +			for (i = 0; i < n_exec_queues; i++)
> > +				xe_exec_queue_destroy(fd, exec_queues[i]);
> >  		}
> >  		drm_close_driver(fd);
> >  		/* FIXME: wait for idle */
> > @@ -463,7 +463,7 @@ test_legacy_mode(int fd, struct drm_xe_engine_class_instance *eci,
> >  		return;
> >  	}
> >  
> > -	for (i = 0; i < n_engines && n_execs; i++)
> > +	for (i = 0; i < n_exec_queues && n_execs; i++)
> >  		igt_assert(syncobj_wait(fd, &syncobjs[i], 1, INT64_MAX, 0,
> >  					NULL));
> >  	igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
> > @@ -476,9 +476,9 @@ test_legacy_mode(int fd, struct drm_xe_engine_class_instance *eci,
> >  		igt_assert_eq(data[i].data, 0xc0ffee);
> >  
> >  	syncobj_destroy(fd, sync[0].handle);
> > -	for (i = 0; i < n_engines; i++) {
> > +	for (i = 0; i < n_exec_queues; i++) {
> >  		syncobj_destroy(fd, syncobjs[i]);
> > -		xe_engine_destroy(fd, engines[i]);
> > +		xe_exec_queue_destroy(fd, exec_queues[i]);
> >  	}
> >  
> >  	munmap(data, bo_size);
> > @@ -487,8 +487,8 @@ test_legacy_mode(int fd, struct drm_xe_engine_class_instance *eci,
> >  }
> >  
> >  /**
> > - * SUBTEST: cm-engine-reset
> > - * Description: Test compute mode engine reset
> > + * SUBTEST: cm-execqueue-reset
> > + * Description: Test compute mode exec_queue reset
> >   * Run type: FULL
> >   *
> >   * SUBTEST: cm-cat-error
> > @@ -507,14 +507,14 @@ test_legacy_mode(int fd, struct drm_xe_engine_class_instance *eci,
> >   * Description: Test compute mode close fd
> >   * Run type: FULL
> >   *
> > - * SUBTEST: cm-close-engines-close-fd
> > - * Description: Test compute mode close engines close fd
> > + * SUBTEST: cm-close-execqueues-close-fd
> > + * Description: Test compute mode close exec_queues close fd
> >   * Run type: FULL
> >   */
> >  
> >  static void
> >  test_compute_mode(int fd, struct drm_xe_engine_class_instance *eci,
> > -		  int n_engines, int n_execs, unsigned int flags)
> > +		  int n_exec_queues, int n_execs, unsigned int flags)
> >  {
> >  	uint32_t vm;
> >  	uint64_t addr = 0x1a0000;
> > @@ -528,7 +528,7 @@ test_compute_mode(int fd, struct drm_xe_engine_class_instance *eci,
> >  		.num_syncs = 1,
> >  		.syncs = to_user_pointer(sync),
> >  	};
> > -	uint32_t engines[MAX_N_ENGINES];
> > +	uint32_t exec_queues[MAX_N_EXECQUEUES];
> >  	size_t bo_size;
> >  	uint32_t bo = 0;
> >  	struct {
> > @@ -541,7 +541,7 @@ test_compute_mode(int fd, struct drm_xe_engine_class_instance *eci,
> >  	} *data;
> >  	int i, b;
> >  
> > -	igt_assert(n_engines <= MAX_N_ENGINES);
> > +	igt_assert(n_exec_queues <= MAX_N_EXECQUEUES);
> >  
> >  	if (flags & CLOSE_FD)
> >  		fd = drm_open_driver(DRIVER_XE);
> > @@ -557,27 +557,27 @@ test_compute_mode(int fd, struct drm_xe_engine_class_instance *eci,
> >  	data = xe_bo_map(fd, bo, bo_size);
> >  	memset(data, 0, bo_size);
> >  
> > -	for (i = 0; i < n_engines; i++) {
> > -		struct drm_xe_ext_engine_set_property compute = {
> > +	for (i = 0; i < n_exec_queues; i++) {
> > +		struct drm_xe_ext_exec_queue_set_property compute = {
> >  			.base.next_extension = 0,
> > -			.base.name = XE_ENGINE_EXTENSION_SET_PROPERTY,
> > -			.property = XE_ENGINE_SET_PROPERTY_COMPUTE_MODE,
> > +			.base.name = XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY,
> > +			.property = XE_EXEC_QUEUE_SET_PROPERTY_COMPUTE_MODE,
> >  			.value = 1,
> >  		};
> > -		struct drm_xe_ext_engine_set_property preempt_timeout = {
> > +		struct drm_xe_ext_exec_queue_set_property preempt_timeout = {
> >  			.base.next_extension = to_user_pointer(&compute),
> > -			.base.name = XE_ENGINE_EXTENSION_SET_PROPERTY,
> > -			.property = XE_ENGINE_SET_PROPERTY_PREEMPTION_TIMEOUT,
> > +			.base.name = XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY,
> > +			.property = XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT,
> >  			.value = 1000,
> >  		};
> >  		uint64_t ext = 0;
> >  
> > -		if (flags & ENGINE_RESET)
> > +		if (flags & EXEC_QUEUE_RESET)
> >  			ext = to_user_pointer(&preempt_timeout);
> >  		else
> >  			ext = to_user_pointer(&compute);
> >  
> > -		engines[i] = xe_engine_create(fd, vm, eci, ext);
> > +		exec_queues[i] = xe_exec_queue_create(fd, vm, eci, ext);
> >  	};
> >  
> >  	sync[0].addr = to_user_pointer(&data[0].vm_sync);
> > @@ -597,7 +597,7 @@ test_compute_mode(int fd, struct drm_xe_engine_class_instance *eci,
> >  		uint64_t sdi_offset = (char *)&data[i].data - (char *)data;
> >  		uint64_t sdi_addr = base_addr + sdi_offset;
> >  		uint64_t exec_addr;
> > -		int e = i % n_engines;
> > +		int e = i % n_exec_queues;
> >  
> >  		if (!i) {
> >  			xe_spin_init(&data[i].spin, spin_addr, false);
> > @@ -617,7 +617,7 @@ test_compute_mode(int fd, struct drm_xe_engine_class_instance *eci,
> >  		sync[0].addr = base_addr +
> >  			(char *)&data[i].exec_sync - (char *)data;
> >  
> > -		exec.engine_id = engines[e];
> > +		exec.exec_queue_id = exec_queues[e];
> >  		exec.address = exec_addr;
> >  		xe_exec(fd, &exec);
> >  	}
> > @@ -626,9 +626,9 @@ test_compute_mode(int fd, struct drm_xe_engine_class_instance *eci,
> >  		xe_force_gt_reset(fd, eci->gt_id);
> >  
> >  	if (flags & CLOSE_FD) {
> > -		if (flags & CLOSE_ENGINES) {
> > -			for (i = 0; i < n_engines; i++)
> > -				xe_engine_destroy(fd, engines[i]);
> > +		if (flags & CLOSE_EXEC_QUEUES) {
> > +			for (i = 0; i < n_exec_queues; i++)
> > +				xe_exec_queue_destroy(fd, exec_queues[i]);
> >  		}
> >  		drm_close_driver(fd);
> >  		/* FIXME: wait for idle */
> > @@ -647,8 +647,8 @@ test_compute_mode(int fd, struct drm_xe_engine_class_instance *eci,
> >  	for (i = 1; i < n_execs; i++)
> >  		igt_assert_eq(data[i].data, 0xc0ffee);
> >  
> > -	for (i = 0; i < n_engines; i++)
> > -		xe_engine_destroy(fd, engines[i]);
> > +	for (i = 0; i < n_exec_queues; i++)
> > +		xe_exec_queue_destroy(fd, exec_queues[i]);
> >  
> >  	munmap(data, bo_size);
> >  	gem_close(fd, bo);
> > @@ -697,7 +697,7 @@ static void submit_jobs(struct gt_thread_data *t)
> >  			.engine_instance = 0,
> >  			.gt_id = 0,
> >  		};
> > -		struct drm_xe_engine_create create = {
> > +		struct drm_xe_exec_queue_create create = {
> >  			.vm_id = vm,
> >  			.width = 1,
> >  			.num_placements = 1,
> > @@ -707,15 +707,15 @@ static void submit_jobs(struct gt_thread_data *t)
> >  		int ret;
> >  
> >  		/* GuC IDs can get exhausted */
> > -		ret = igt_ioctl(fd, DRM_IOCTL_XE_ENGINE_CREATE, &create);
> > +		ret = igt_ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &create);
> >  		if (ret)
> >  			continue;
> >  
> > -		exec.engine_id = create.engine_id;
> > +		exec.exec_queue_id = create.exec_queue_id;
> >  		exec.address = addr;
> >  		exec.num_batch_buffer = 1;
> >  		xe_exec(fd, &exec);
> > -		xe_engine_destroy(fd, create.engine_id);
> > +		xe_exec_queue_destroy(fd, create.exec_queue_id);
> >  	}
> >  
> >  	munmap(data, bo_size);
> > @@ -817,9 +817,9 @@ igt_main
> >  		xe_for_each_hw_engine(fd, hwe)
> >  			test_legacy_mode(fd, hwe, 1, 1, CANCEL);
> >  
> > -	igt_subtest("engine-reset")
> > +	igt_subtest("execqueue-reset")
> >  		xe_for_each_hw_engine(fd, hwe)
> > -			test_legacy_mode(fd, hwe, 2, 2, ENGINE_RESET);
> > +			test_legacy_mode(fd, hwe, 2, 2, EXEC_QUEUE_RESET);
> >  
> >  	igt_subtest("cat-error")
> >  		xe_for_each_hw_engine(fd, hwe)
> > @@ -837,14 +837,14 @@ igt_main
> >  		xe_for_each_hw_engine(fd, hwe)
> >  			test_legacy_mode(-1, hwe, 16, 256, CLOSE_FD);
> >  
> > -	igt_subtest("close-engines-close-fd")
> > +	igt_subtest("close-execqueues-close-fd")
> >  		xe_for_each_hw_engine(fd, hwe)
> >  			test_legacy_mode(-1, hwe, 16, 256, CLOSE_FD |
> > -					 CLOSE_ENGINES);
> > +					 CLOSE_EXEC_QUEUES);
> >  
> > -	igt_subtest("cm-engine-reset")
> > +	igt_subtest("cm-execqueue-reset")
> >  		xe_for_each_hw_engine(fd, hwe)
> > -			test_compute_mode(fd, hwe, 2, 2, ENGINE_RESET);
> > +			test_compute_mode(fd, hwe, 2, 2, EXEC_QUEUE_RESET);
> >  
> >  	igt_subtest("cm-cat-error")
> >  		xe_for_each_hw_engine(fd, hwe)
> > @@ -862,10 +862,10 @@ igt_main
> >  		xe_for_each_hw_engine(fd, hwe)
> >  			test_compute_mode(-1, hwe, 16, 256, CLOSE_FD);
> >  
> > -	igt_subtest("cm-close-engines-close-fd")
> > +	igt_subtest("cm-close-execqueues-close-fd")
> >  		xe_for_each_hw_engine(fd, hwe)
> >  			test_compute_mode(-1, hwe, 16, 256, CLOSE_FD |
> > -					  CLOSE_ENGINES);
> > +					  CLOSE_EXEC_QUEUES);
> >  
> >  	for (const struct section *s = sections; s->name; s++) {
> >  		igt_subtest_f("%s-cancel", s->name)
> > @@ -874,12 +874,12 @@ igt_main
> >  					test_balancer(fd, gt, class, 1, 1,
> >  						      CANCEL | s->flags);
> >  
> > -		igt_subtest_f("%s-engine-reset", s->name)
> > +		igt_subtest_f("%s-execqueue-reset", s->name)
> >  			xe_for_each_gt(fd, gt)
> >  				xe_for_each_hw_engine_class(class)
> >  					test_balancer(fd, gt, class, MAX_INSTANCE + 1,
> >  						      MAX_INSTANCE + 1,
> > -						      ENGINE_RESET | s->flags);
> > +						      EXEC_QUEUE_RESET | s->flags);
> >  
> >  		igt_subtest_f("%s-cat-error", s->name)
> >  			xe_for_each_gt(fd, gt)
> > @@ -907,11 +907,11 @@ igt_main
> >  					test_balancer(-1, gt, class, 16, 256,
> >  						      CLOSE_FD | s->flags);
> >  
> > -		igt_subtest_f("%s-close-engines-close-fd", s->name)
> > +		igt_subtest_f("%s-close-execqueues-close-fd", s->name)
> >  			xe_for_each_gt(fd, gt)
> >  				xe_for_each_hw_engine_class(class)
> >  					test_balancer(-1, gt, class, 16, 256, CLOSE_FD |
> > -						      CLOSE_ENGINES | s->flags);
> > +						      CLOSE_EXEC_QUEUES | s->flags);
> >  	}
> >  
> >  	igt_subtest("gt-reset-stress")
> > diff --git a/tests/xe/xe_exec_store.c b/tests/xe/xe_exec_store.c
> > index fbce1aecc..bab5ea5bc 100644
> > --- a/tests/xe/xe_exec_store.c
> > +++ b/tests/xe/xe_exec_store.c
> > @@ -66,7 +66,7 @@ static void store(int fd)
> >  	struct data *data;
> >  	struct drm_xe_engine_class_instance *hw_engine;
> >  	uint32_t vm;
> > -	uint32_t engine;
> > +	uint32_t exec_queue;
> >  	uint32_t syncobj;
> >  	size_t bo_size;
> >  	int value = 0x123456;
> > @@ -89,8 +89,8 @@ static void store(int fd)
> >  	data = xe_bo_map(fd, bo, bo_size);
> >  	store_dword_batch(data, addr, value);
> >  
> > -	engine = xe_engine_create(fd, vm, hw_engine, 0);
> > -	exec.engine_id = engine;
> > +	exec_queue = xe_exec_queue_create(fd, vm, hw_engine, 0);
> > +	exec.exec_queue_id = exec_queue;
> >  	exec.address = data->addr;
> >  	sync.flags &= DRM_XE_SYNC_SIGNAL;
> >  	xe_exec(fd, &exec);
> > @@ -102,7 +102,7 @@ static void store(int fd)
> >  	munmap(data, bo_size);
> >  	gem_close(fd, bo);
> >  
> > -	xe_engine_destroy(fd, engine);
> > +	xe_exec_queue_destroy(fd, exec_queue);
> >  	xe_vm_destroy(fd, vm);
> >  }
> >  
> > @@ -125,7 +125,7 @@ static void store_all(int fd, int gt, int class)
> >  
> >  	struct data *data;
> >  	uint32_t syncobjs[MAX_INSTANCE];
> > -	uint32_t engines[MAX_INSTANCE];
> > +	uint32_t exec_queues[MAX_INSTANCE];
> >  	uint32_t vm;
> >  	size_t bo_size;
> >  	uint64_t addr = 0x100000;
> > @@ -152,16 +152,16 @@ static void store_all(int fd, int gt, int class)
> >  	igt_require(num_placements);
> >  
> >  	for (i = 0; i < num_placements; i++) {
> > -		struct drm_xe_engine_create create = {
> > +		struct drm_xe_exec_queue_create create = {
> >  			.vm_id = vm,
> >  			.width = 1,
> >  			.num_placements = num_placements,
> >  			.instances = to_user_pointer(eci),
> >  		};
> >  
> > -		igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_XE_ENGINE_CREATE,
> > +		igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE,
> >  					&create), 0);
> > -		engines[i] = create.engine_id;
> > +		exec_queues[i] = create.exec_queue_id;
> >  		syncobjs[i] = syncobj_create(fd, 0);
> >  	};
> >  
> > @@ -175,7 +175,7 @@ static void store_all(int fd, int gt, int class)
> >  		sync[1].flags |= DRM_XE_SYNC_SIGNAL;
> >  		sync[1].handle = syncobjs[i];
> >  
> > -		exec.engine_id = engines[i];
> > +		exec.exec_queue_id = exec_queues[i];
> >  		exec.address = data->addr;
> >  		xe_exec(fd, &exec);
> >  
> > @@ -190,7 +190,7 @@ static void store_all(int fd, int gt, int class)
> >  
> >  	for (i = 0; i < num_placements; i++) {
> >  		syncobj_destroy(fd, syncobjs[i]);
> > -		xe_engine_destroy(fd, engines[i]);
> > +		xe_exec_queue_destroy(fd, exec_queues[i]);
> >  	}
> >  	xe_vm_destroy(fd, vm);
> >  }
> > diff --git a/tests/xe/xe_exec_threads.c b/tests/xe/xe_exec_threads.c
> > index 396398984..e64c1639a 100644
> > --- a/tests/xe/xe_exec_threads.c
> > +++ b/tests/xe/xe_exec_threads.c
> > @@ -23,7 +23,7 @@
> >  #include "xe/xe_spin.h"
> >  #include <string.h>
> >  
> > -#define MAX_N_ENGINES	16
> > +#define MAX_N_EXEC_QUEUES	16
> >  #define MAX_INSTANCE	9
> >  #define USERPTR		(0x1 << 0)
> >  #define REBIND		(0x1 << 1)
> > @@ -38,25 +38,25 @@
> >  #define VIRTUAL		(0x1 << 10)
> >  #define HANG		(0x1 << 11)
> >  #define REBIND_ERROR	(0x1 << 12)
> > -#define BIND_ENGINE	(0x1 << 13)
> > +#define BIND_EXEC_QUEUE	(0x1 << 13)
> >  
> >  pthread_barrier_t barrier;
> >  
> >  static void
> >  test_balancer(int fd, int gt, uint32_t vm, uint64_t addr, uint64_t userptr,
> > -	      int class, int n_engines, int n_execs, unsigned int flags)
> > +	      int class, int n_exec_queues, int n_execs, unsigned int flags)
> >  {
> >  	struct drm_xe_sync sync[2] = {
> >  		{ .flags = DRM_XE_SYNC_SYNCOBJ | DRM_XE_SYNC_SIGNAL, },
> >  		{ .flags = DRM_XE_SYNC_SYNCOBJ | DRM_XE_SYNC_SIGNAL, },
> >  	};
> > -	struct drm_xe_sync sync_all[MAX_N_ENGINES];
> > +	struct drm_xe_sync sync_all[MAX_N_EXEC_QUEUES];
> >  	struct drm_xe_exec exec = {
> >  		.num_syncs = 2,
> >  		.syncs = to_user_pointer(sync),
> >  	};
> > -	uint32_t engines[MAX_N_ENGINES];
> > -	uint32_t syncobjs[MAX_N_ENGINES];
> > +	uint32_t exec_queues[MAX_N_EXEC_QUEUES];
> > +	uint32_t syncobjs[MAX_N_EXEC_QUEUES];
> >  	size_t bo_size;
> >  	uint32_t bo = 0;
> >  	struct {
> > @@ -69,7 +69,7 @@ test_balancer(int fd, int gt, uint32_t vm, uint64_t addr, uint64_t userptr,
> >  	int i, j, b, num_placements = 0;
> >  	bool owns_vm = false, owns_fd = false;
> >  
> > -	igt_assert(n_engines <= MAX_N_ENGINES);
> > +	igt_assert(n_exec_queues <= MAX_N_EXEC_QUEUES);
> >  
> >  	if (!fd) {
> >  		fd = drm_open_driver(DRIVER_XE);
> > @@ -113,17 +113,17 @@ test_balancer(int fd, int gt, uint32_t vm, uint64_t addr, uint64_t userptr,
> >  	memset(data, 0, bo_size);
> >  
> >  	memset(sync_all, 0, sizeof(sync_all));
> > -	for (i = 0; i < n_engines; i++) {
> > -		struct drm_xe_engine_create create = {
> > +	for (i = 0; i < n_exec_queues; i++) {
> > +		struct drm_xe_exec_queue_create create = {
> >  			.vm_id = vm,
> >  			.width = flags & PARALLEL ? num_placements : 1,
> >  			.num_placements = flags & PARALLEL ? 1 : num_placements,
> >  			.instances = to_user_pointer(eci),
> >  		};
> >  
> > -		igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_XE_ENGINE_CREATE,
> > +		igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE,
> >  					&create), 0);
> > -		engines[i] = create.engine_id;
> > +		exec_queues[i] = create.exec_queue_id;
> >  		syncobjs[i] = syncobj_create(fd, 0);
> >  		sync_all[i].flags = DRM_XE_SYNC_SYNCOBJ;
> >  		sync_all[i].handle = syncobjs[i];
> > @@ -145,7 +145,7 @@ test_balancer(int fd, int gt, uint32_t vm, uint64_t addr, uint64_t userptr,
> >  		uint64_t sdi_offset = (char *)&data[i].data - (char *)data;
> >  		uint64_t sdi_addr = addr + sdi_offset;
> >  		uint64_t batches[MAX_INSTANCE];
> > -		int e = i % n_engines;
> > +		int e = i % n_exec_queues;
> >  
> >  		for (j = 0; j < num_placements && flags & PARALLEL; ++j)
> >  			batches[j] = batch_addr;
> > @@ -162,7 +162,7 @@ test_balancer(int fd, int gt, uint32_t vm, uint64_t addr, uint64_t userptr,
> >  		sync[1].flags |= DRM_XE_SYNC_SIGNAL;
> >  		sync[1].handle = syncobjs[e];
> >  
> > -		exec.engine_id = engines[e];
> > +		exec.exec_queue_id = exec_queues[e];
> >  		exec.address = flags & PARALLEL ?
> >  			to_user_pointer(batches) : batch_addr;
> >  		if (e != i)
> > @@ -171,7 +171,7 @@ test_balancer(int fd, int gt, uint32_t vm, uint64_t addr, uint64_t userptr,
> >  
> >  		if (flags & REBIND && i && !(i & 0x1f)) {
> >  			xe_vm_unbind_async(fd, vm, 0, 0, addr, bo_size,
> > -					   sync_all, n_engines);
> > +					   sync_all, n_exec_queues);
> >  
> >  			sync[0].flags |= DRM_XE_SYNC_SIGNAL;
> >  			addr += bo_size;
> > @@ -193,7 +193,7 @@ test_balancer(int fd, int gt, uint32_t vm, uint64_t addr, uint64_t userptr,
> >  				 * physical memory on next mmap call triggering
> >  				 * an invalidate.
> >  				 */
> > -				for (j = 0; j < n_engines; ++j)
> > +				for (j = 0; j < n_exec_queues; ++j)
> >  					igt_assert(syncobj_wait(fd,
> >  								&syncobjs[j], 1,
> >  								INT64_MAX, 0,
> > @@ -216,7 +216,7 @@ test_balancer(int fd, int gt, uint32_t vm, uint64_t addr, uint64_t userptr,
> >  		}
> >  	}
> >  
> > -	for (i = 0; i < n_engines; i++)
> > +	for (i = 0; i < n_exec_queues; i++)
> >  		igt_assert(syncobj_wait(fd, &syncobjs[i], 1, INT64_MAX, 0,
> >  					NULL));
> >  	igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
> > @@ -230,9 +230,9 @@ test_balancer(int fd, int gt, uint32_t vm, uint64_t addr, uint64_t userptr,
> >  		igt_assert_eq(data[i].data, 0xc0ffee);
> >  
> >  	syncobj_destroy(fd, sync[0].handle);
> > -	for (i = 0; i < n_engines; i++) {
> > +	for (i = 0; i < n_exec_queues; i++) {
> >  		syncobj_destroy(fd, syncobjs[i]);
> > -		xe_engine_destroy(fd, engines[i]);
> > +		xe_exec_queue_destroy(fd, exec_queues[i]);
> >  	}
> >  
> >  	if (bo) {
> > @@ -250,7 +250,7 @@ test_balancer(int fd, int gt, uint32_t vm, uint64_t addr, uint64_t userptr,
> >  static void
> >  test_compute_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
> >  		  struct drm_xe_engine_class_instance *eci,
> > -		  int n_engines, int n_execs, unsigned int flags)
> > +		  int n_exec_queues, int n_execs, unsigned int flags)
> >  {
> >  #define USER_FENCE_VALUE	0xdeadbeefdeadbeefull
> >  	struct drm_xe_sync sync[1] = {
> > @@ -263,7 +263,7 @@ test_compute_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
> >  		.syncs = to_user_pointer(sync),
> >  	};
> >  	int64_t fence_timeout;
> > -	uint32_t engines[MAX_N_ENGINES];
> > +	uint32_t exec_queues[MAX_N_EXEC_QUEUES];
> >  	size_t bo_size;
> >  	uint32_t bo = 0;
> >  	struct {
> > @@ -277,7 +277,7 @@ test_compute_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
> >  	int map_fd = -1;
> >  	bool owns_vm = false, owns_fd = false;
> >  
> > -	igt_assert(n_engines <= MAX_N_ENGINES);
> > +	igt_assert(n_exec_queues <= MAX_N_EXEC_QUEUES);
> >  
> >  	if (!fd) {
> >  		fd = drm_open_driver(DRIVER_XE);
> > @@ -286,7 +286,7 @@ test_compute_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
> >  
> >  	if (!vm) {
> >  		vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS |
> > -				  XE_ENGINE_SET_PROPERTY_COMPUTE_MODE, 0);
> > +				  XE_EXEC_QUEUE_SET_PROPERTY_COMPUTE_MODE, 0);
> >  		owns_vm = true;
> >  	}
> >  
> > @@ -313,15 +313,15 @@ test_compute_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
> >  	}
> >  	memset(data, 0, bo_size);
> >  
> > -	for (i = 0; i < n_engines; i++) {
> > -		struct drm_xe_ext_engine_set_property ext = {
> > +	for (i = 0; i < n_exec_queues; i++) {
> > +		struct drm_xe_ext_exec_queue_set_property ext = {
> >  			.base.next_extension = 0,
> > -			.base.name = XE_ENGINE_EXTENSION_SET_PROPERTY,
> > -			.property = XE_ENGINE_SET_PROPERTY_COMPUTE_MODE,
> > +			.base.name = XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY,
> > +			.property = XE_EXEC_QUEUE_SET_PROPERTY_COMPUTE_MODE,
> >  			.value = 1,
> >  		};
> >  
> > -		engines[i] = xe_engine_create(fd, vm, eci,
> > +		exec_queues[i] = xe_exec_queue_create(fd, vm, eci,
> >  					      to_user_pointer(&ext));
> >  	};
> >  
> > @@ -346,7 +346,7 @@ test_compute_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
> >  		uint64_t batch_addr = addr + batch_offset;
> >  		uint64_t sdi_offset = (char *)&data[i].data - (char *)data;
> >  		uint64_t sdi_addr = addr + sdi_offset;
> > -		int e = i % n_engines;
> > +		int e = i % n_exec_queues;
> >  
> >  		b = 0;
> >  		data[i].batch[b++] = MI_STORE_DWORD_IMM_GEN4;
> > @@ -358,7 +358,7 @@ test_compute_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
> >  
> >  		sync[0].addr = addr + (char *)&data[i].exec_sync - (char *)data;
> >  
> > -		exec.engine_id = engines[e];
> > +		exec.exec_queue_id = exec_queues[e];
> >  		exec.address = batch_addr;
> >  		xe_exec(fd, &exec);
> >  
> > @@ -442,8 +442,8 @@ test_compute_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
> >  	for (i = j; i < n_execs; i++)
> >  		igt_assert_eq(data[i].data, 0xc0ffee);
> >  
> > -	for (i = 0; i < n_engines; i++)
> > -		xe_engine_destroy(fd, engines[i]);
> > +	for (i = 0; i < n_exec_queues; i++)
> > +		xe_exec_queue_destroy(fd, exec_queues[i]);
> >  
> >  	if (bo) {
> >  		munmap(data, bo_size);
> > @@ -462,22 +462,22 @@ test_compute_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
> >  
> >  static void
> >  test_legacy_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
> > -		 struct drm_xe_engine_class_instance *eci, int n_engines,
> > +		 struct drm_xe_engine_class_instance *eci, int n_exec_queues,
> >  		 int n_execs, int rebind_error_inject, unsigned int flags)
> >  {
> >  	struct drm_xe_sync sync[2] = {
> >  		{ .flags = DRM_XE_SYNC_SYNCOBJ | DRM_XE_SYNC_SIGNAL, },
> >  		{ .flags = DRM_XE_SYNC_SYNCOBJ | DRM_XE_SYNC_SIGNAL, },
> >  	};
> > -	struct drm_xe_sync sync_all[MAX_N_ENGINES];
> > +	struct drm_xe_sync sync_all[MAX_N_EXEC_QUEUES];
> >  	struct drm_xe_exec exec = {
> >  		.num_batch_buffer = 1,
> >  		.num_syncs = 2,
> >  		.syncs = to_user_pointer(sync),
> >  	};
> > -	uint32_t engines[MAX_N_ENGINES];
> > -	uint32_t bind_engines[MAX_N_ENGINES];
> > -	uint32_t syncobjs[MAX_N_ENGINES];
> > +	uint32_t exec_queues[MAX_N_EXEC_QUEUES];
> > +	uint32_t bind_exec_queues[MAX_N_EXEC_QUEUES];
> > +	uint32_t syncobjs[MAX_N_EXEC_QUEUES];
> >  	size_t bo_size;
> >  	uint32_t bo = 0;
> >  	struct {
> > @@ -486,10 +486,10 @@ test_legacy_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
> >  		uint64_t pad;
> >  		uint32_t data;
> >  	} *data;
> > -	int i, j, b, hang_engine = n_engines / 2;
> > +	int i, j, b, hang_exec_queue = n_exec_queues / 2;
> >  	bool owns_vm = false, owns_fd = false;
> >  
> > -	igt_assert(n_engines <= MAX_N_ENGINES);
> > +	igt_assert(n_exec_queues <= MAX_N_EXEC_QUEUES);
> >  
> >  	if (!fd) {
> >  		fd = drm_open_driver(DRIVER_XE);
> > @@ -525,23 +525,23 @@ test_legacy_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
> >  	memset(data, 0, bo_size);
> >  
> >  	memset(sync_all, 0, sizeof(sync_all));
> > -	for (i = 0; i < n_engines; i++) {
> > -		struct drm_xe_ext_engine_set_property preempt_timeout = {
> > +	for (i = 0; i < n_exec_queues; i++) {
> > +		struct drm_xe_ext_exec_queue_set_property preempt_timeout = {
> >  			.base.next_extension = 0,
> > -			.base.name = XE_ENGINE_EXTENSION_SET_PROPERTY,
> > -			.property = XE_ENGINE_SET_PROPERTY_PREEMPTION_TIMEOUT,
> > +			.base.name = XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY,
> > +			.property = XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT,
> >  			.value = 1000,
> >  		};
> >  		uint64_t ext = to_user_pointer(&preempt_timeout);
> >  
> > -		if (flags & HANG && i == hang_engine)
> > -			engines[i] = xe_engine_create(fd, vm, eci, ext);
> > +		if (flags & HANG && i == hang_exec_queue)
> > +			exec_queues[i] = xe_exec_queue_create(fd, vm, eci, ext);
> >  		else
> > -			engines[i] = xe_engine_create(fd, vm, eci, 0);
> > -		if (flags & BIND_ENGINE)
> > -			bind_engines[i] = xe_bind_engine_create(fd, vm, 0);
> > +			exec_queues[i] = xe_exec_queue_create(fd, vm, eci, 0);
> > +		if (flags & BIND_EXEC_QUEUE)
> > +			bind_exec_queues[i] = xe_bind_exec_queue_create(fd, vm, 0);
> >  		else
> > -			bind_engines[i] = 0;
> > +			bind_exec_queues[i] = 0;
> >  		syncobjs[i] = syncobj_create(fd, 0);
> >  		sync_all[i].flags = DRM_XE_SYNC_SYNCOBJ;
> >  		sync_all[i].handle = syncobjs[i];
> > @@ -551,10 +551,10 @@ test_legacy_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
> >  
> >  	sync[0].handle = syncobj_create(fd, 0);
> >  	if (bo)
> > -		xe_vm_bind_async(fd, vm, bind_engines[0], bo, 0, addr,
> > +		xe_vm_bind_async(fd, vm, bind_exec_queues[0], bo, 0, addr,
> >  				 bo_size, sync, 1);
> >  	else
> > -		xe_vm_bind_userptr_async(fd, vm, bind_engines[0],
> > +		xe_vm_bind_userptr_async(fd, vm, bind_exec_queues[0],
> >  					 to_user_pointer(data), addr,
> >  					 bo_size, sync, 1);
> >  
> > @@ -566,9 +566,9 @@ test_legacy_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
> >  		uint64_t sdi_offset = (char *)&data[i].data - (char *)data;
> >  		uint64_t sdi_addr = addr + sdi_offset;
> >  		uint64_t exec_addr;
> > -		int e = i % n_engines;
> > +		int e = i % n_exec_queues;
> >  
> > -		if (flags & HANG && e == hang_engine && i == e) {
> > +		if (flags & HANG && e == hang_exec_queue && i == e) {
> >  			xe_spin_init(&data[i].spin, spin_addr, false);
> >  			exec_addr = spin_addr;
> >  		} else {
> > @@ -587,11 +587,11 @@ test_legacy_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
> >  		sync[1].flags |= DRM_XE_SYNC_SIGNAL;
> >  		sync[1].handle = syncobjs[e];
> >  
> > -		exec.engine_id = engines[e];
> > +		exec.exec_queue_id = exec_queues[e];
> >  		exec.address = exec_addr;
> >  		if (e != i && !(flags & HANG))
> >  			 syncobj_reset(fd, &syncobjs[e], 1);
> > -		if ((flags & HANG && e == hang_engine) ||
> > +		if ((flags & HANG && e == hang_exec_queue) ||
> >  		    rebind_error_inject > 0) {
> >  			int err;
> >  
> > @@ -606,25 +606,25 @@ test_legacy_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
> >  		    (!(i & 0x1f) || rebind_error_inject == i)) {
> >  #define INJECT_ERROR	(0x1 << 31)
> >  			if (rebind_error_inject == i)
> > -				__xe_vm_bind_assert(fd, vm, bind_engines[e],
> > +				__xe_vm_bind_assert(fd, vm, bind_exec_queues[e],
> >  						    0, 0, addr, bo_size,
> >  						    XE_VM_BIND_OP_UNMAP |
> >  						    XE_VM_BIND_FLAG_ASYNC |
> >  						    INJECT_ERROR, sync_all,
> > -						    n_engines, 0, 0);
> > +						    n_exec_queues, 0, 0);
> >  			else
> > -				xe_vm_unbind_async(fd, vm, bind_engines[e],
> > +				xe_vm_unbind_async(fd, vm, bind_exec_queues[e],
> >  						   0, addr, bo_size,
> > -						   sync_all, n_engines);
> > +						   sync_all, n_exec_queues);
> >  
> >  			sync[0].flags |= DRM_XE_SYNC_SIGNAL;
> >  			addr += bo_size;
> >  			if (bo)
> > -				xe_vm_bind_async(fd, vm, bind_engines[e],
> > +				xe_vm_bind_async(fd, vm, bind_exec_queues[e],
> >  						 bo, 0, addr, bo_size, sync, 1);
> >  			else
> >  				xe_vm_bind_userptr_async(fd, vm,
> > -							 bind_engines[e],
> > +							 bind_exec_queues[e],
> >  							 to_user_pointer(data),
> >  							 addr, bo_size, sync,
> >  							 1);
> > @@ -638,12 +638,12 @@ test_legacy_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
> >  				 * physical memory on next mmap call triggering
> >  				 * an invalidate.
> >  				 */
> > -				for (j = 0; j < n_engines; ++j)
> > +				for (j = 0; j < n_exec_queues; ++j)
> >  					igt_assert(syncobj_wait(fd,
> >  								&syncobjs[j], 1,
> >  								INT64_MAX, 0,
> >  								NULL));
> > -				if (!(flags & HANG && e == hang_engine))
> > +				if (!(flags & HANG && e == hang_exec_queue))
> >  					igt_assert_eq(data[i].data, 0xc0ffee);
> >  			} else if (i * 2 != n_execs) {
> >  				/*
> > @@ -662,32 +662,32 @@ test_legacy_mode(int fd, uint32_t vm, uint64_t addr, uint64_t userptr,
> >  		}
> >  	}
> >  
> > -	for (i = 0; i < n_engines; i++)
> > +	for (i = 0; i < n_exec_queues; i++)
> >  		igt_assert(syncobj_wait(fd, &syncobjs[i], 1, INT64_MAX, 0,
> >  					NULL));
> >  	igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
> >  
> >  	sync[0].flags |= DRM_XE_SYNC_SIGNAL;
> > -	xe_vm_unbind_async(fd, vm, bind_engines[0], 0, addr,
> > +	xe_vm_unbind_async(fd, vm, bind_exec_queues[0], 0, addr,
> >  			   bo_size, sync, 1);
> >  	igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
> >  
> >  	for (i = flags & INVALIDATE ? n_execs - 1 : 0;
> >  	     i < n_execs; i++) {
> > -		int e = i % n_engines;
> > +		int e = i % n_exec_queues;
> >  
> > -		if (flags & HANG && e == hang_engine)
> > +		if (flags & HANG && e == hang_exec_queue)
> >  			igt_assert_eq(data[i].data, 0x0);
> >  		else
> >  			igt_assert_eq(data[i].data, 0xc0ffee);
> >  	}
> >  
> >  	syncobj_destroy(fd, sync[0].handle);
> > -	for (i = 0; i < n_engines; i++) {
> > +	for (i = 0; i < n_exec_queues; i++) {
> >  		syncobj_destroy(fd, syncobjs[i]);
> > -		xe_engine_destroy(fd, engines[i]);
> > -		if (bind_engines[i])
> > -			xe_engine_destroy(fd, bind_engines[i]);
> > +		xe_exec_queue_destroy(fd, exec_queues[i]);
> > +		if (bind_exec_queues[i])
> > +			xe_exec_queue_destroy(fd, bind_exec_queues[i]);
> >  	}
> >  
> >  	if (bo) {
> > @@ -714,7 +714,7 @@ struct thread_data {
> >  	uint32_t vm_legacy_mode;
> >  	uint32_t vm_compute_mode;
> >  	struct drm_xe_engine_class_instance *eci;
> > -	int n_engine;
> > +	int n_exec_queue;
> >  	int n_exec;
> >  	int flags;
> >  	int rebind_error_inject;
> > @@ -732,15 +732,15 @@ static void *thread(void *data)
> >  
> >  	if (t->flags & PARALLEL || t->flags & VIRTUAL)
> >  		test_balancer(t->fd, t->gt, t->vm_legacy_mode, t->addr,
> > -			      t->userptr, t->class, t->n_engine, t->n_exec,
> > +			      t->userptr, t->class, t->n_exec_queue, t->n_exec,
> >  			      t->flags);
> >  	else if (t->flags & COMPUTE_MODE)
> >  		test_compute_mode(t->fd, t->vm_compute_mode, t->addr,
> > -				  t->userptr, t->eci, t->n_engine, t->n_exec,
> > +				  t->userptr, t->eci, t->n_exec_queue, t->n_exec,
> >  				  t->flags);
> >  	else
> >  		test_legacy_mode(t->fd, t->vm_legacy_mode, t->addr, t->userptr,
> > -				 t->eci, t->n_engine, t->n_exec,
> > +				 t->eci, t->n_exec_queue, t->n_exec,
> >  				 t->rebind_error_inject, t->flags);
> >  
> >  	return NULL;
> > @@ -818,8 +818,8 @@ static void *vm_async_ops_err_thread(void *data)
> >   *	userptr
> >   * @rebind:
> >   *	rebind
> > - * @rebind-bindengine:
> > - *	rebind bindengine
> > + * @rebind-bindexecqueue:
> > + *	rebind bindexecqueue
> >   * @userptr-rebind:
> >   *	userptr rebind
> >   * @userptr-invalidate:
> > @@ -830,8 +830,8 @@ static void *vm_async_ops_err_thread(void *data)
> >   *	shared vm userptr
> >   * @shared-vm-rebind:
> >   *	shared vm rebind
> > - * @shared-vm-rebind-bindengine:
> > - *	shared vm rebind bindengine
> > + * @shared-vm-rebind-bindexecqueue:
> > + *	shared vm rebind bindexecqueue
> >   * @shared-vm-userptr-rebind:
> >   *	shared vm userptr rebind
> >   * @shared-vm-rebind-err:
> > @@ -1077,7 +1077,7 @@ static void threads(int fd, int flags)
> >  					      to_user_pointer(&ext));
> >  		vm_compute_mode = xe_vm_create(fd,
> >  					       DRM_XE_VM_CREATE_ASYNC_BIND_OPS |
> > -					       XE_ENGINE_SET_PROPERTY_COMPUTE_MODE,
> > +					       XE_EXEC_QUEUE_SET_PROPERTY_COMPUTE_MODE,
> >  					       0);
> >  
> >  		vm_err_thread.capture = &capture;
> > @@ -1101,8 +1101,8 @@ static void threads(int fd, int flags)
> >  		threads_data[i].vm_legacy_mode = vm_legacy_mode;
> >  		threads_data[i].vm_compute_mode = vm_compute_mode;
> >  		threads_data[i].eci = hwe;
> > -#define N_ENGINE	16
> > -		threads_data[i].n_engine = N_ENGINE;
> > +#define N_EXEC_QUEUE	16
> > +		threads_data[i].n_exec_queue = N_EXEC_QUEUE;
> >  #define N_EXEC		1024
> >  		threads_data[i].n_exec = N_EXEC;
> >  		if (flags & REBIND_ERROR)
> > @@ -1154,7 +1154,7 @@ static void threads(int fd, int flags)
> >  					threads_data[i].vm_legacy_mode =
> >  						vm_legacy_mode;
> >  					threads_data[i].class = class;
> > -					threads_data[i].n_engine = N_ENGINE;
> > +					threads_data[i].n_exec_queue = N_EXEC_QUEUE;
> >  					threads_data[i].n_exec = N_EXEC;
> >  					threads_data[i].flags = flags;
> >  					threads_data[i].flags &= ~BALANCER;
> > @@ -1182,7 +1182,7 @@ static void threads(int fd, int flags)
> >  					threads_data[i].vm_legacy_mode =
> >  						vm_legacy_mode;
> >  					threads_data[i].class = class;
> > -					threads_data[i].n_engine = N_ENGINE;
> > +					threads_data[i].n_exec_queue = N_EXEC_QUEUE;
> >  					threads_data[i].n_exec = N_EXEC;
> >  					threads_data[i].flags = flags;
> >  					threads_data[i].flags &= ~BALANCER;
> > @@ -1226,15 +1226,15 @@ igt_main
> >  		{ "basic", 0 },
> >  		{ "userptr", USERPTR },
> >  		{ "rebind", REBIND },
> > -		{ "rebind-bindengine", REBIND | BIND_ENGINE },
> > +		{ "rebind-bindexecqueue", REBIND | BIND_EXEC_QUEUE },
> >  		{ "userptr-rebind", USERPTR | REBIND },
> >  		{ "userptr-invalidate", USERPTR | INVALIDATE },
> >  		{ "userptr-invalidate-race", USERPTR | INVALIDATE | RACE },
> >  		{ "shared-vm-basic", SHARED_VM },
> >  		{ "shared-vm-userptr", SHARED_VM | USERPTR },
> >  		{ "shared-vm-rebind", SHARED_VM | REBIND },
> > -		{ "shared-vm-rebind-bindengine", SHARED_VM | REBIND |
> > -			BIND_ENGINE },
> > +		{ "shared-vm-rebind-bindexecqueue", SHARED_VM | REBIND |
> > +			BIND_EXEC_QUEUE },
> >  		{ "shared-vm-userptr-rebind", SHARED_VM | USERPTR | REBIND },
> >  		{ "shared-vm-rebind-err", SHARED_VM | REBIND | REBIND_ERROR },
> >  		{ "shared-vm-userptr-rebind-err", SHARED_VM | USERPTR |
> > diff --git a/tests/xe/xe_exercise_blt.c b/tests/xe/xe_exercise_blt.c
> > index 2caed48ff..6d61889b3 100644
> > --- a/tests/xe/xe_exercise_blt.c
> > +++ b/tests/xe/xe_exercise_blt.c
> > @@ -273,15 +273,15 @@ static void fast_copy_test(int xe,
> >  
> >  		for_each_variation_r(regions, 2, set) {
> >  			uint32_t region1, region2;
> > -			uint32_t vm, engine;
> > +			uint32_t vm, exec_queue;
> >  			char *regtxt, *test_name;
> >  
> >  			region1 = igt_collection_get_value(regions, 0);
> >  			region2 = igt_collection_get_value(regions, 1);
> >  
> >  			vm = xe_vm_create(xe, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
> > -			engine = xe_engine_create(xe, vm, &inst, 0);
> > -			ctx = intel_ctx_xe(xe, vm, engine, 0, 0, 0);
> > +			exec_queue = xe_exec_queue_create(xe, vm, &inst, 0);
> > +			ctx = intel_ctx_xe(xe, vm, exec_queue, 0, 0, 0);
> >  
> >  			copy_func = (func == FAST_COPY) ? fast_copy : fast_copy_emit;
> >  			regtxt = xe_memregion_dynamic_subtest_name(xe, regions);
> > @@ -295,7 +295,7 @@ static void fast_copy_test(int xe,
> >  
> >  			free(regtxt);
> >  			free(test_name);
> > -			xe_engine_destroy(xe, engine);
> > +			xe_exec_queue_destroy(xe, exec_queue);
> >  			xe_vm_destroy(xe, vm);
> >  			free(ctx);
> >  		}
> > diff --git a/tests/xe/xe_guc_pc.c b/tests/xe/xe_guc_pc.c
> > index 8cdd8ba74..032816921 100644
> > --- a/tests/xe/xe_guc_pc.c
> > +++ b/tests/xe/xe_guc_pc.c
> > @@ -23,7 +23,7 @@
> >  #include <string.h>
> >  #include <sys/time.h>
> >  
> > -#define MAX_N_ENGINES 16
> > +#define MAX_N_EXEC_QUEUES 16
> >  
> >  /*
> >   * Too many intermediate components and steps before freq is adjusted
> > @@ -32,7 +32,7 @@
> >  #define ACT_FREQ_LATENCY_US 100000
> >  
> >  static void exec_basic(int fd, struct drm_xe_engine_class_instance *eci,
> > -		       int n_engines, int n_execs)
> > +		       int n_exec_queues, int n_execs)
> >  {
> >  	uint32_t vm;
> >  	uint64_t addr = 0x1a0000;
> > @@ -45,9 +45,9 @@ static void exec_basic(int fd, struct drm_xe_engine_class_instance *eci,
> >  		.num_syncs = 2,
> >  		.syncs = to_user_pointer(sync),
> >  	};
> > -	uint32_t engines[MAX_N_ENGINES];
> > -	uint32_t bind_engines[MAX_N_ENGINES];
> > -	uint32_t syncobjs[MAX_N_ENGINES];
> > +	uint32_t exec_queues[MAX_N_EXEC_QUEUES];
> > +	uint32_t bind_exec_queues[MAX_N_EXEC_QUEUES];
> > +	uint32_t syncobjs[MAX_N_EXEC_QUEUES];
> >  	size_t bo_size;
> >  	uint32_t bo = 0;
> >  	struct {
> > @@ -57,7 +57,7 @@ static void exec_basic(int fd, struct drm_xe_engine_class_instance *eci,
> >  	} *data;
> >  	int i, b;
> >  
> > -	igt_assert(n_engines <= MAX_N_ENGINES);
> > +	igt_assert(n_exec_queues <= MAX_N_EXEC_QUEUES);
> >  	igt_assert(n_execs > 0);
> >  
> >  	vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
> > @@ -69,15 +69,15 @@ static void exec_basic(int fd, struct drm_xe_engine_class_instance *eci,
> >  				visible_vram_if_possible(fd, eci->gt_id));
> >  	data = xe_bo_map(fd, bo, bo_size);
> >  
> > -	for (i = 0; i < n_engines; i++) {
> > -		engines[i] = xe_engine_create(fd, vm, eci, 0);
> > -		bind_engines[i] = 0;
> > +	for (i = 0; i < n_exec_queues; i++) {
> > +		exec_queues[i] = xe_exec_queue_create(fd, vm, eci, 0);
> > +		bind_exec_queues[i] = 0;
> >  		syncobjs[i] = syncobj_create(fd, 0);
> >  	};
> >  
> >  	sync[0].handle = syncobj_create(fd, 0);
> >  
> > -	xe_vm_bind_async(fd, vm, bind_engines[0], bo, 0, addr,
> > +	xe_vm_bind_async(fd, vm, bind_exec_queues[0], bo, 0, addr,
> >  			 bo_size, sync, 1);
> >  
> >  	for (i = 0; i < n_execs; i++) {
> > @@ -85,7 +85,7 @@ static void exec_basic(int fd, struct drm_xe_engine_class_instance *eci,
> >  		uint64_t batch_addr = addr + batch_offset;
> >  		uint64_t sdi_offset = (char *)&data[i].data - (char *)data;
> >  		uint64_t sdi_addr = addr + sdi_offset;
> > -		int e = i % n_engines;
> > +		int e = i % n_exec_queues;
> >  
> >  		b = 0;
> >  		data[i].batch[b++] = MI_STORE_DWORD_IMM_GEN4;
> > @@ -99,7 +99,7 @@ static void exec_basic(int fd, struct drm_xe_engine_class_instance *eci,
> >  		sync[1].flags |= DRM_XE_SYNC_SIGNAL;
> >  		sync[1].handle = syncobjs[e];
> >  
> > -		exec.engine_id = engines[e];
> > +		exec.exec_queue_id = exec_queues[e];
> >  		exec.address = batch_addr;
> >  
> >  		if (e != i)
> > @@ -115,7 +115,7 @@ static void exec_basic(int fd, struct drm_xe_engine_class_instance *eci,
> >  	igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
> >  
> >  	sync[0].flags |= DRM_XE_SYNC_SIGNAL;
> > -	xe_vm_unbind_async(fd, vm, bind_engines[0], 0, addr,
> > +	xe_vm_unbind_async(fd, vm, bind_exec_queues[0], 0, addr,
> >  			   bo_size, sync, 1);
> >  	igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
> >  
> > @@ -123,11 +123,11 @@ static void exec_basic(int fd, struct drm_xe_engine_class_instance *eci,
> >  		igt_assert_eq(data[i].data, 0xc0ffee);
> >  
> >  	syncobj_destroy(fd, sync[0].handle);
> > -	for (i = 0; i < n_engines; i++) {
> > +	for (i = 0; i < n_exec_queues; i++) {
> >  		syncobj_destroy(fd, syncobjs[i]);
> > -		xe_engine_destroy(fd, engines[i]);
> > -		if (bind_engines[i])
> > -			xe_engine_destroy(fd, bind_engines[i]);
> > +		xe_exec_queue_destroy(fd, exec_queues[i]);
> > +		if (bind_exec_queues[i])
> > +			xe_exec_queue_destroy(fd, bind_exec_queues[i]);
> >  	}
> >  
> >  	munmap(data, bo_size);
> > @@ -212,11 +212,11 @@ static void test_freq_basic_api(int fd, int gt_id)
> >  
> >  /**
> >   * SUBTEST: freq_fixed_idle
> > - * Description: Test fixed frequency request with engine in idle state
> > + * Description: Test fixed frequency request with exec_queue in idle state
> >   * Run type: BAT
> >   *
> >   * SUBTEST: freq_fixed_exec
> > - * Description: Test fixed frequency request when engine is doing some work
> > + * Description: Test fixed frequency request when exec_queue is doing some work
> >   * Run type: FULL
> >   */
> >  
> > @@ -278,11 +278,11 @@ static void test_freq_fixed(int fd, int gt_id, bool gt_idle)
> >  
> >  /**
> >   * SUBTEST: freq_range_idle
> > - * Description: Test range frequency request with engine in idle state
> > + * Description: Test range frequency request with exec_queue in idle state
> >   * Run type: BAT
> >   *
> >   * SUBTEST: freq_range_exec
> > - * Description: Test range frequency request when engine is doing some work
> > + * Description: Test range frequency request when exec_queue is doing some work
> >   * Run type: FULL
> >   */
> >  
> > @@ -421,7 +421,7 @@ igt_main
> >  			xe_for_each_hw_engine(fd, hwe)
> >  				igt_fork(child, ncpus) {
> >  					igt_debug("Execution Started\n");
> > -					exec_basic(fd, hwe, MAX_N_ENGINES, 16);
> > +					exec_basic(fd, hwe, MAX_N_EXEC_QUEUES, 16);
> >  					igt_debug("Execution Finished\n");
> >  				}
> >  			/* While exec in threads above, let's check the freq */
> > @@ -442,7 +442,7 @@ igt_main
> >  			xe_for_each_hw_engine(fd, hwe)
> >  				igt_fork(child, ncpus) {
> >  					igt_debug("Execution Started\n");
> > -					exec_basic(fd, hwe, MAX_N_ENGINES, 16);
> > +					exec_basic(fd, hwe, MAX_N_EXEC_QUEUES, 16);
> >  					igt_debug("Execution Finished\n");
> >  				}
> >  			/* While exec in threads above, let's check the freq */
> > diff --git a/tests/xe/xe_huc_copy.c b/tests/xe/xe_huc_copy.c
> > index d4377f9a1..cbc30a8f6 100644
> > --- a/tests/xe/xe_huc_copy.c
> > +++ b/tests/xe/xe_huc_copy.c
> > @@ -107,7 +107,7 @@ gen12_create_batch_huc_copy(uint32_t *batch,
> >  static void
> >  test_huc_copy(int fd)
> >  {
> > -	uint32_t vm, engine;
> > +	uint32_t vm, exec_queue;
> >  	char *dinput;
> >  	struct drm_xe_sync sync = { 0 };
> >  
> > @@ -119,7 +119,7 @@ test_huc_copy(int fd)
> >  	};
> >  
> >  	vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
> > -	engine = xe_engine_create_class(fd, vm, DRM_XE_ENGINE_CLASS_VIDEO_DECODE);
> > +	exec_queue = xe_exec_queue_create_class(fd, vm, DRM_XE_ENGINE_CLASS_VIDEO_DECODE);
> >  	sync.flags = DRM_XE_SYNC_SYNCOBJ | DRM_XE_SYNC_SIGNAL;
> >  	sync.handle = syncobj_create(fd, 0);
> >  
> > @@ -136,7 +136,7 @@ test_huc_copy(int fd)
> >  	}
> >  	gen12_create_batch_huc_copy(bo_dict[2].data, bo_dict[0].addr, bo_dict[1].addr);
> >  
> > -	xe_exec_wait(fd, engine, ADDR_BATCH);
> > +	xe_exec_wait(fd, exec_queue, ADDR_BATCH);
> >  	for(int i = 0; i < SIZE_DATA; i++) {
> >  		igt_assert(((char*) bo_dict[1].data)[i] == ((char*) bo_dict[0].data)[i]);
> >  	}
> > @@ -148,7 +148,7 @@ test_huc_copy(int fd)
> >  	}
> >  
> >  	syncobj_destroy(fd, sync.handle);
> > -	xe_engine_destroy(fd, engine);
> > +	xe_exec_queue_destroy(fd, exec_queue);
> >  	xe_vm_destroy(fd, vm);
> >  }
> >  
> > diff --git a/tests/xe/xe_intel_bb.c b/tests/xe/xe_intel_bb.c
> > index 539f44a0f..a5531cf80 100644
> > --- a/tests/xe/xe_intel_bb.c
> > +++ b/tests/xe/xe_intel_bb.c
> > @@ -195,7 +195,7 @@ static void simple_bb(struct buf_ops *bops, bool new_context)
> >  
> >  	if (new_context) {
> >  		vm = xe_vm_create(xe, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
> > -		ctx = xe_engine_create(xe, vm, xe_hw_engine(xe, 0), 0);
> > +		ctx = xe_exec_queue_create(xe, vm, xe_hw_engine(xe, 0), 0);
> >  		intel_bb_destroy(ibb);
> >  		ibb = intel_bb_create_with_context(xe, ctx, vm, NULL, PAGE_SIZE);
> >  		intel_bb_out(ibb, MI_BATCH_BUFFER_END);
> > @@ -203,7 +203,7 @@ static void simple_bb(struct buf_ops *bops, bool new_context)
> >  		intel_bb_exec(ibb, intel_bb_offset(ibb),
> >  			      I915_EXEC_DEFAULT | I915_EXEC_NO_RELOC,
> >  			      true);
> > -		xe_engine_destroy(xe, ctx);
> > +		xe_exec_queue_destroy(xe, ctx);
> >  		xe_vm_destroy(xe, vm);
> >  	}
> >  
> > diff --git a/tests/xe/xe_noexec_ping_pong.c b/tests/xe/xe_noexec_ping_pong.c
> > index 49cf11fa6..cc61a77c1 100644
> > --- a/tests/xe/xe_noexec_ping_pong.c
> > +++ b/tests/xe/xe_noexec_ping_pong.c
> > @@ -24,7 +24,7 @@
> >   *
> >   * SUBTEST:
> >   * Description:
> > - *	This test creates compute vms, binds a couple of bos and an engine each,
> > + *	This test creates compute vms, binds a couple of bos and an exec_queue each,
> >   *	thus redying it for execution. However, VRAM memory is over-
> >   *	committed and while there is still nothing to execute, an eviction
> >   *	will trigger the VM's rebind worker to rebind the evicted bo, which
> > @@ -34,7 +34,7 @@
> >   *	the rebind kworkers using a lot of CPU while the test idles.
> >   *
> >   *	The correct driver behaviour should be not to rebind anything unless
> > - *	there is worked queued on one of the VM's compute engines.
> > + *	there is worked queued on one of the VM's compute exec_queues.
> >   * Run type: FULL
> >   */
> >  static void test_ping_pong(int fd, struct drm_xe_engine_class_instance *eci)
> > @@ -44,7 +44,7 @@ static void test_ping_pong(int fd, struct drm_xe_engine_class_instance *eci)
> >  	size_t bo_size = vram_size / NUM_VMS / NUM_BOS;
> >  	uint32_t vm[NUM_VMS];
> >  	uint32_t bo[NUM_VMS][NUM_BOS];
> > -	uint32_t engines[NUM_VMS];
> > +	uint32_t exec_queues[NUM_VMS];
> >  	unsigned int i, j;
> >  
> >  	igt_skip_on(!bo_size);
> > @@ -58,10 +58,10 @@ static void test_ping_pong(int fd, struct drm_xe_engine_class_instance *eci)
> >  	 * stats.
> >  	 */
> >  	for (i = 0; i < NUM_VMS; ++i) {
> > -		struct drm_xe_ext_engine_set_property ext = {
> > +		struct drm_xe_ext_exec_queue_set_property ext = {
> >  			.base.next_extension = 0,
> > -			.base.name = XE_ENGINE_EXTENSION_SET_PROPERTY,
> > -			.property = XE_ENGINE_SET_PROPERTY_COMPUTE_MODE,
> > +			.base.name = XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY,
> > +			.property = XE_EXEC_QUEUE_SET_PROPERTY_COMPUTE_MODE,
> >  			.value = 1,
> >  		};
> >  
> > @@ -76,7 +76,7 @@ static void test_ping_pong(int fd, struct drm_xe_engine_class_instance *eci)
> >  			xe_vm_bind(fd, vm[i], bo[i][j], 0, 0x40000 + j*bo_size,
> >  				   bo_size, NULL, 0);
> >  		}
> > -		engines[i] = xe_engine_create(fd, vm[i], eci,
> > +		exec_queues[i] = xe_exec_queue_create(fd, vm[i], eci,
> >  					      to_user_pointer(&ext));
> >  	}
> >  
> > @@ -85,7 +85,7 @@ static void test_ping_pong(int fd, struct drm_xe_engine_class_instance *eci)
> >  	sleep(SECONDS_TO_WAIT);
> >  
> >  	for (i = 0; i < NUM_VMS; ++i) {
> > -		xe_engine_destroy(fd, engines[i]);
> > +		xe_exec_queue_destroy(fd, exec_queues[i]);
> >  		for (j = 0; j < NUM_BOS; ++j)
> >  			gem_close(fd, bo[i][j]);
> >  		xe_vm_destroy(fd, vm[i]);
> > diff --git a/tests/xe/xe_pm.c b/tests/xe/xe_pm.c
> > index 559eccdeb..16af38883 100644
> > --- a/tests/xe/xe_pm.c
> > +++ b/tests/xe/xe_pm.c
> > @@ -25,7 +25,7 @@
> >  #include "xe/xe_ioctl.h"
> >  #include "xe/xe_query.h"
> >  
> > -#define MAX_N_ENGINES 16
> > +#define MAX_N_EXEC_QUEUES 16
> >  #define NO_SUSPEND -1
> >  #define NO_RPM -1
> >  
> > @@ -208,7 +208,7 @@ static bool out_of_d3(device_t device, enum igt_acpi_d_state state)
> >  
> >  static void
> >  test_exec(device_t device, struct drm_xe_engine_class_instance *eci,
> > -	  int n_engines, int n_execs, enum igt_suspend_state s_state,
> > +	  int n_exec_queues, int n_execs, enum igt_suspend_state s_state,
> >  	  enum igt_acpi_d_state d_state)
> >  {
> >  	uint32_t vm;
> > @@ -222,9 +222,9 @@ test_exec(device_t device, struct drm_xe_engine_class_instance *eci,
> >  		.num_syncs = 2,
> >  		.syncs = to_user_pointer(sync),
> >  	};
> > -	uint32_t engines[MAX_N_ENGINES];
> > -	uint32_t bind_engines[MAX_N_ENGINES];
> > -	uint32_t syncobjs[MAX_N_ENGINES];
> > +	uint32_t exec_queues[MAX_N_EXEC_QUEUES];
> > +	uint32_t bind_exec_queues[MAX_N_EXEC_QUEUES];
> > +	uint32_t syncobjs[MAX_N_EXEC_QUEUES];
> >  	size_t bo_size;
> >  	uint32_t bo = 0;
> >  	struct {
> > @@ -236,7 +236,7 @@ test_exec(device_t device, struct drm_xe_engine_class_instance *eci,
> >  	bool check_rpm = (d_state == IGT_ACPI_D3Hot ||
> >  			  d_state == IGT_ACPI_D3Cold);
> >  
> > -	igt_assert(n_engines <= MAX_N_ENGINES);
> > +	igt_assert(n_exec_queues <= MAX_N_EXEC_QUEUES);
> >  	igt_assert(n_execs > 0);
> >  
> >  	if (check_rpm)
> > @@ -258,15 +258,15 @@ test_exec(device_t device, struct drm_xe_engine_class_instance *eci,
> >  				visible_vram_if_possible(device.fd_xe, eci->gt_id));
> >  	data = xe_bo_map(device.fd_xe, bo, bo_size);
> >  
> > -	for (i = 0; i < n_engines; i++) {
> > -		engines[i] = xe_engine_create(device.fd_xe, vm, eci, 0);
> > -		bind_engines[i] = 0;
> > +	for (i = 0; i < n_exec_queues; i++) {
> > +		exec_queues[i] = xe_exec_queue_create(device.fd_xe, vm, eci, 0);
> > +		bind_exec_queues[i] = 0;
> >  		syncobjs[i] = syncobj_create(device.fd_xe, 0);
> >  	};
> >  
> >  	sync[0].handle = syncobj_create(device.fd_xe, 0);
> >  
> > -	xe_vm_bind_async(device.fd_xe, vm, bind_engines[0], bo, 0, addr,
> > +	xe_vm_bind_async(device.fd_xe, vm, bind_exec_queues[0], bo, 0, addr,
> >  			 bo_size, sync, 1);
> >  
> >  	if (check_rpm && runtime_usage_available(device.pci_xe))
> > @@ -277,7 +277,7 @@ test_exec(device_t device, struct drm_xe_engine_class_instance *eci,
> >  		uint64_t batch_addr = addr + batch_offset;
> >  		uint64_t sdi_offset = (char *)&data[i].data - (char *)data;
> >  		uint64_t sdi_addr = addr + sdi_offset;
> > -		int e = i % n_engines;
> > +		int e = i % n_exec_queues;
> >  
> >  		b = 0;
> >  		data[i].batch[b++] = MI_STORE_DWORD_IMM_GEN4;
> > @@ -291,7 +291,7 @@ test_exec(device_t device, struct drm_xe_engine_class_instance *eci,
> >  		sync[1].flags |= DRM_XE_SYNC_SIGNAL;
> >  		sync[1].handle = syncobjs[e];
> >  
> > -		exec.engine_id = engines[e];
> > +		exec.exec_queue_id = exec_queues[e];
> >  		exec.address = batch_addr;
> >  
> >  		if (e != i)
> > @@ -315,7 +315,7 @@ test_exec(device_t device, struct drm_xe_engine_class_instance *eci,
> >  		rpm_usage = igt_pm_get_runtime_usage(device.pci_xe);
> >  
> >  	sync[0].flags |= DRM_XE_SYNC_SIGNAL;
> > -	xe_vm_unbind_async(device.fd_xe, vm, bind_engines[0], 0, addr,
> > +	xe_vm_unbind_async(device.fd_xe, vm, bind_exec_queues[0], 0, addr,
> >  			   bo_size, sync, 1);
> >  	igt_assert(syncobj_wait(device.fd_xe, &sync[0].handle, 1, INT64_MAX, 0,
> >  NULL));
> > @@ -324,11 +324,11 @@ NULL));
> >  		igt_assert_eq(data[i].data, 0xc0ffee);
> >  
> >  	syncobj_destroy(device.fd_xe, sync[0].handle);
> > -	for (i = 0; i < n_engines; i++) {
> > +	for (i = 0; i < n_exec_queues; i++) {
> >  		syncobj_destroy(device.fd_xe, syncobjs[i]);
> > -		xe_engine_destroy(device.fd_xe, engines[i]);
> > -		if (bind_engines[i])
> > -			xe_engine_destroy(device.fd_xe, bind_engines[i]);
> > +		xe_exec_queue_destroy(device.fd_xe, exec_queues[i]);
> > +		if (bind_exec_queues[i])
> > +			xe_exec_queue_destroy(device.fd_xe, bind_exec_queues[i]);
> >  	}
> >  
> >  	munmap(data, bo_size);
> > diff --git a/tests/xe/xe_spin_batch.c b/tests/xe/xe_spin_batch.c
> > index 56581f760..26f9daf36 100644
> > --- a/tests/xe/xe_spin_batch.c
> > +++ b/tests/xe/xe_spin_batch.c
> > @@ -41,18 +41,18 @@ static void spin_basic(int fd)
> >  static void spin(int fd, struct drm_xe_engine_class_instance *hwe)
> >  {
> >  	uint64_t ahnd;
> > -	unsigned int engine;
> > +	unsigned int exec_queue;
> >  	uint32_t vm;
> >  	igt_spin_t *spin;
> >  
> >  	vm = xe_vm_create(fd, 0, 0);
> > -	engine = xe_engine_create(fd, vm, hwe, 0);
> > +	exec_queue = xe_exec_queue_create(fd, vm, hwe, 0);
> >  	ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_RELOC);
> >  
> > -	spin = igt_spin_new(fd, .ahnd = ahnd, .engine = engine, .vm = vm);
> > +	spin = igt_spin_new(fd, .ahnd = ahnd, .engine = exec_queue, .vm = vm);
> >  
> >  	igt_spin_free(fd, spin);
> > -	xe_engine_destroy(fd, engine);
> > +	xe_exec_queue_destroy(fd, exec_queue);
> >  	xe_vm_destroy(fd, vm);
> >  
> >  	put_ahnd(ahnd);
> > @@ -98,7 +98,7 @@ static void spin_basic_all(int fd)
> >  static void spin_all(int fd, int gt, int class)
> >  {
> >  	uint64_t ahnd;
> > -	uint32_t engines[MAX_INSTANCE], vm;
> > +	uint32_t exec_queues[MAX_INSTANCE], vm;
> >  	int i, num_placements = 0;
> >  	struct drm_xe_engine_class_instance eci[MAX_INSTANCE];
> >  	igt_spin_t *spin[MAX_INSTANCE];
> > @@ -116,22 +116,22 @@ static void spin_all(int fd, int gt, int class)
> >  	vm = xe_vm_create(fd, 0, 0);
> >  
> >  	for (i = 0; i < num_placements; i++) {
> > -		struct drm_xe_engine_create create = {
> > +		struct drm_xe_exec_queue_create create = {
> >  			.vm_id = vm,
> >  			.width = 1,
> >  			.num_placements = num_placements,
> >  			.instances = to_user_pointer(eci),
> >  		};
> >  
> > -		igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_XE_ENGINE_CREATE,
> > +		igt_assert_eq(igt_ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE,
> >  					&create), 0);
> > -		engines[i] = create.engine_id;
> > -		spin[i] = igt_spin_new(fd, .ahnd = ahnd, .engine = engines[i], .vm = vm);
> > +		exec_queues[i] = create.exec_queue_id;
> > +		spin[i] = igt_spin_new(fd, .ahnd = ahnd, .engine = exec_queues[i], .vm = vm);
> >  	}
> >  
> >  	for (i = 0; i < num_placements; i++) {
> >  		igt_spin_free(fd, spin[i]);
> > -		xe_engine_destroy(fd, engines[i]);
> > +		xe_exec_queue_destroy(fd, exec_queues[i]);
> >  	}
> >  
> >  	put_ahnd(ahnd);
> > diff --git a/tests/xe/xe_vm.c b/tests/xe/xe_vm.c
> > index 5703538cc..e42c04e33 100644
> > --- a/tests/xe/xe_vm.c
> > +++ b/tests/xe/xe_vm.c
> > @@ -45,7 +45,7 @@ hash_addr(uint64_t addr)
> >  static void
> >  write_dwords(int fd, uint32_t vm, int n_dwords, uint64_t *addrs)
> >  {
> > -	uint32_t batch_size, batch_bo, *batch_map, engine;
> > +	uint32_t batch_size, batch_bo, *batch_map, exec_queue;
> >  	uint64_t batch_addr = 0x1a0000;
> >  	int i, b = 0;
> >  
> > @@ -72,12 +72,12 @@ write_dwords(int fd, uint32_t vm, int n_dwords, uint64_t *addrs)
> >  	munmap(batch_map, batch_size);
> >  
> >  	xe_vm_bind_sync(fd, vm, batch_bo, 0, batch_addr, batch_size);
> > -	engine = xe_engine_create_class(fd, vm, DRM_XE_ENGINE_CLASS_COPY);
> > -	xe_exec_wait(fd, engine, batch_addr);
> > +	exec_queue = xe_exec_queue_create_class(fd, vm, DRM_XE_ENGINE_CLASS_COPY);
> > +	xe_exec_wait(fd, exec_queue, batch_addr);
> >  	xe_vm_unbind_sync(fd, vm, 0, batch_addr, batch_size);
> >  
> >  	gem_close(fd, batch_bo);
> > -	xe_engine_destroy(fd, engine);
> > +	xe_exec_queue_destroy(fd, exec_queue);
> >  }
> >  
> >  /**
> > @@ -515,7 +515,7 @@ struct shared_pte_page_data {
> >  	uint32_t data;
> >  };
> >  
> > -#define MAX_N_ENGINES 4
> > +#define MAX_N_EXEC_QUEUES 4
> >  
> >  static void
> >  shared_pte_page(int fd, struct drm_xe_engine_class_instance *eci, int n_bo,
> > @@ -527,21 +527,21 @@ shared_pte_page(int fd, struct drm_xe_engine_class_instance *eci, int n_bo,
> >  		{ .flags = DRM_XE_SYNC_SYNCOBJ | DRM_XE_SYNC_SIGNAL, },
> >  		{ .flags = DRM_XE_SYNC_SYNCOBJ | DRM_XE_SYNC_SIGNAL, },
> >  	};
> > -	struct drm_xe_sync sync_all[MAX_N_ENGINES + 1];
> > +	struct drm_xe_sync sync_all[MAX_N_EXEC_QUEUES + 1];
> >  	struct drm_xe_exec exec = {
> >  		.num_batch_buffer = 1,
> >  		.num_syncs = 2,
> >  		.syncs = to_user_pointer(sync),
> >  	};
> > -	uint32_t engines[MAX_N_ENGINES];
> > -	uint32_t syncobjs[MAX_N_ENGINES];
> > +	uint32_t exec_queues[MAX_N_EXEC_QUEUES];
> > +	uint32_t syncobjs[MAX_N_EXEC_QUEUES];
> >  	size_t bo_size;
> >  	uint32_t *bo;
> >  	struct shared_pte_page_data **data;
> > -	int n_engines = n_bo, n_execs = n_bo;
> > +	int n_exec_queues = n_bo, n_execs = n_bo;
> >  	int i, b;
> >  
> > -	igt_assert(n_engines <= MAX_N_ENGINES);
> > +	igt_assert(n_exec_queues <= MAX_N_EXEC_QUEUES);
> >  
> >  	bo = malloc(sizeof(*bo) * n_bo);
> >  	igt_assert(bo);
> > @@ -561,8 +561,8 @@ shared_pte_page(int fd, struct drm_xe_engine_class_instance *eci, int n_bo,
> >  	}
> >  
> >  	memset(sync_all, 0, sizeof(sync_all));
> > -	for (i = 0; i < n_engines; i++) {
> > -		engines[i] = xe_engine_create(fd, vm, eci, 0);
> > +	for (i = 0; i < n_exec_queues; i++) {
> > +		exec_queues[i] = xe_exec_queue_create(fd, vm, eci, 0);
> >  		syncobjs[i] = syncobj_create(fd, 0);
> >  		sync_all[i].flags = DRM_XE_SYNC_SYNCOBJ;
> >  		sync_all[i].handle = syncobjs[i];
> > @@ -579,7 +579,7 @@ shared_pte_page(int fd, struct drm_xe_engine_class_instance *eci, int n_bo,
> >  		uint64_t batch_addr = addr + i * addr_stride + batch_offset;
> >  		uint64_t sdi_offset = (char *)&data[i]->data - (char *)data[i];
> >  		uint64_t sdi_addr = addr + i * addr_stride + sdi_offset;
> > -		int e = i % n_engines;
> > +		int e = i % n_exec_queues;
> >  
> >  		b = 0;
> >  		data[i]->batch[b++] = MI_STORE_DWORD_IMM_GEN4;
> > @@ -593,7 +593,7 @@ shared_pte_page(int fd, struct drm_xe_engine_class_instance *eci, int n_bo,
> >  		sync[1].flags |= DRM_XE_SYNC_SIGNAL;
> >  		sync[1].handle = syncobjs[e];
> >  
> > -		exec.engine_id = engines[e];
> > +		exec.exec_queue_id = exec_queues[e];
> >  		exec.address = batch_addr;
> >  		xe_exec(fd, &exec);
> >  	}
> > @@ -624,7 +624,7 @@ shared_pte_page(int fd, struct drm_xe_engine_class_instance *eci, int n_bo,
> >  		uint64_t batch_addr = addr + i * addr_stride + batch_offset;
> >  		uint64_t sdi_offset = (char *)&data[i]->data - (char *)data[i];
> >  		uint64_t sdi_addr = addr + i * addr_stride + sdi_offset;
> > -		int e = i % n_engines;
> > +		int e = i % n_exec_queues;
> >  
> >  		if (!(i % 2))
> >  			continue;
> > @@ -642,7 +642,7 @@ shared_pte_page(int fd, struct drm_xe_engine_class_instance *eci, int n_bo,
> >  		sync[1].flags |= DRM_XE_SYNC_SIGNAL;
> >  		sync[1].handle = syncobjs[e];
> >  
> > -		exec.engine_id = engines[e];
> > +		exec.exec_queue_id = exec_queues[e];
> >  		exec.address = batch_addr;
> >  		syncobj_reset(fd, &syncobjs[e], 1);
> >  		xe_exec(fd, &exec);
> > @@ -672,9 +672,9 @@ shared_pte_page(int fd, struct drm_xe_engine_class_instance *eci, int n_bo,
> >  		igt_assert_eq(data[i]->data, 0xc0ffee);
> >  
> >  	syncobj_destroy(fd, sync[0].handle);
> > -	for (i = 0; i < n_engines; i++) {
> > +	for (i = 0; i < n_exec_queues; i++) {
> >  		syncobj_destroy(fd, syncobjs[i]);
> > -		xe_engine_destroy(fd, engines[i]);
> > +		xe_exec_queue_destroy(fd, exec_queues[i]);
> >  	}
> >  
> >  	for (i = 0; i < n_bo; ++i) {
> > @@ -687,21 +687,21 @@ shared_pte_page(int fd, struct drm_xe_engine_class_instance *eci, int n_bo,
> >  
> >  
> >  /**
> > - * SUBTEST: bind-engines-independent
> > - * Description: Test independent bind engines
> > - * Functionality: bind engines
> > + * SUBTEST: bind-execqueues-independent
> > + * Description: Test independent bind exec_queues
> > + * Functionality: bind exec_queues
> >   * Run type: BAT
> >   *
> > - * SUBTEST: bind-engines-conflict
> > - * Description: Test conflict bind engines
> > - * Functionality: bind engines
> > + * SUBTEST: bind-execqueues-conflict
> > + * Description: Test conflict bind exec_queues
> > + * Functionality: bind exec_queues
> >   * Run type: BAT
> >   */
> >  
> >  #define CONFLICT	(0x1 << 0)
> >  
> >  static void
> > -test_bind_engines_independent(int fd, struct drm_xe_engine_class_instance *eci,
> > +test_bind_execqueues_independent(int fd, struct drm_xe_engine_class_instance *eci,
> >  			      unsigned int flags)
> >  {
> >  	uint32_t vm;
> > @@ -715,10 +715,10 @@ test_bind_engines_independent(int fd, struct drm_xe_engine_class_instance *eci,
> >  		.num_syncs = 2,
> >  		.syncs = to_user_pointer(sync),
> >  	};
> > -#define N_ENGINES	2
> > -	uint32_t engines[N_ENGINES];
> > -	uint32_t bind_engines[N_ENGINES];
> > -	uint32_t syncobjs[N_ENGINES + 1];
> > +#define N_EXEC_QUEUES	2
> > +	uint32_t exec_queues[N_EXEC_QUEUES];
> > +	uint32_t bind_exec_queues[N_EXEC_QUEUES];
> > +	uint32_t syncobjs[N_EXEC_QUEUES + 1];
> >  	size_t bo_size;
> >  	uint32_t bo = 0;
> >  	struct {
> > @@ -730,26 +730,26 @@ test_bind_engines_independent(int fd, struct drm_xe_engine_class_instance *eci,
> >  	int i, b;
> >  
> >  	vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
> > -	bo_size = sizeof(*data) * N_ENGINES;
> > +	bo_size = sizeof(*data) * N_EXEC_QUEUES;
> >  	bo_size = ALIGN(bo_size + xe_cs_prefetch_size(fd),
> >  			xe_get_default_alignment(fd));
> >  	bo = xe_bo_create_flags(fd, vm, bo_size,
> >  				visible_vram_if_possible(fd, eci->gt_id));
> >  	data = xe_bo_map(fd, bo, bo_size);
> >  
> > -	for (i = 0; i < N_ENGINES; i++) {
> > -		engines[i] = xe_engine_create(fd, vm, eci, 0);
> > -		bind_engines[i] = xe_bind_engine_create(fd, vm, 0);
> > +	for (i = 0; i < N_EXEC_QUEUES; i++) {
> > +		exec_queues[i] = xe_exec_queue_create(fd, vm, eci, 0);
> > +		bind_exec_queues[i] = xe_bind_exec_queue_create(fd, vm, 0);
> >  		syncobjs[i] = syncobj_create(fd, 0);
> >  	}
> > -	syncobjs[N_ENGINES] = syncobj_create(fd, 0);
> > +	syncobjs[N_EXEC_QUEUES] = syncobj_create(fd, 0);
> >  
> >  	/* Initial bind, needed for spinner */
> >  	sync[0].handle = syncobj_create(fd, 0);
> > -	xe_vm_bind_async(fd, vm, bind_engines[0], bo, 0, addr, bo_size,
> > +	xe_vm_bind_async(fd, vm, bind_exec_queues[0], bo, 0, addr, bo_size,
> >  			 sync, 1);
> >  
> > -	for (i = 0; i < N_ENGINES; i++) {
> > +	for (i = 0; i < N_EXEC_QUEUES; i++) {
> >  		uint64_t batch_offset = (char *)&data[i].batch - (char *)data;
> >  		uint64_t batch_addr = addr + batch_offset;
> >  		uint64_t sdi_offset = (char *)&data[i].data - (char *)data;
> > @@ -759,9 +759,9 @@ test_bind_engines_independent(int fd, struct drm_xe_engine_class_instance *eci,
> >  		int e = i;
> >  
> >  		if (i == 0) {
> > -			/* Cork 1st engine with a spinner */
> > +			/* Cork 1st exec_queue with a spinner */
> >  			xe_spin_init(&data[i].spin, spin_addr, true);
> > -			exec.engine_id = engines[e];
> > +			exec.exec_queue_id = exec_queues[e];
> >  			exec.address = spin_addr;
> >  			sync[0].flags &= ~DRM_XE_SYNC_SIGNAL;
> >  			sync[1].flags |= DRM_XE_SYNC_SIGNAL;
> > @@ -769,22 +769,22 @@ test_bind_engines_independent(int fd, struct drm_xe_engine_class_instance *eci,
> >  			xe_exec(fd, &exec);
> >  			xe_spin_wait_started(&data[i].spin);
> >  
> > -			/* Do bind to 1st engine blocked on cork */
> > +			/* Do bind to 1st exec_queue blocked on cork */
> >  			addr += (flags & CONFLICT) ? (0x1 << 21) : bo_size;
> >  			sync[1].flags &= ~DRM_XE_SYNC_SIGNAL;
> >  			sync[1].handle = syncobjs[e];
> > -			xe_vm_bind_async(fd, vm, bind_engines[e], bo, 0, addr,
> > +			xe_vm_bind_async(fd, vm, bind_exec_queues[e], bo, 0, addr,
> >  					 bo_size, sync + 1, 1);
> >  			addr += bo_size;
> >  		} else {
> > -			/* Do bind to 2nd engine which blocks write below */
> > +			/* Do bind to 2nd exec_queue which blocks write below */
> >  			sync[0].flags |= DRM_XE_SYNC_SIGNAL;
> > -			xe_vm_bind_async(fd, vm, bind_engines[e], bo, 0, addr,
> > +			xe_vm_bind_async(fd, vm, bind_exec_queues[e], bo, 0, addr,
> >  					 bo_size, sync, 1);
> >  		}
> >  
> >  		/*
> > -		 * Write to either engine, 1st blocked on spinner + bind, 2nd
> > +		 * Write to either exec_queue, 1st blocked on spinner + bind, 2nd
> >  		 * just blocked on bind. The 2nd should make independent
> >  		 * progress.
> >  		 */
> > @@ -798,16 +798,16 @@ test_bind_engines_independent(int fd, struct drm_xe_engine_class_instance *eci,
> >  
> >  		sync[0].flags &= ~DRM_XE_SYNC_SIGNAL;
> >  		sync[1].flags |= DRM_XE_SYNC_SIGNAL;
> > -		sync[1].handle = syncobjs[!i ? N_ENGINES : e];
> > +		sync[1].handle = syncobjs[!i ? N_EXEC_QUEUES : e];
> >  
> >  		exec.num_syncs = 2;
> > -		exec.engine_id = engines[e];
> > +		exec.exec_queue_id = exec_queues[e];
> >  		exec.address = batch_addr;
> >  		xe_exec(fd, &exec);
> >  	}
> >  
> >  	if (!(flags & CONFLICT)) {
> > -		/* Verify initial bind, bind + write to 2nd engine done */
> > +		/* Verify initial bind, bind + write to 2nd exec_queue done */
> >  		igt_assert(syncobj_wait(fd, &syncobjs[1], 1, INT64_MAX, 0,
> >  					NULL));
> >  		igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0,
> > @@ -816,24 +816,24 @@ test_bind_engines_independent(int fd, struct drm_xe_engine_class_instance *eci,
> >  	} else {
> >  		/* Let jobs runs for a bit */
> >  		usleep(100000);
> > -		/* bind + write to 2nd engine waiting */
> > +		/* bind + write to 2nd exec_queue waiting */
> >  		igt_assert(!syncobj_wait(fd, &syncobjs[1], 1, 1, 0, NULL));
> >  		igt_assert(!syncobj_wait(fd, &sync[0].handle, 1, 0, 0, NULL));
> >  	}
> >  
> > -	/* Verify bind + write to 1st engine still inflight */
> > +	/* Verify bind + write to 1st exec_queue still inflight */
> >  	igt_assert(!syncobj_wait(fd, &syncobjs[0], 1, 1, 0, NULL));
> > -	igt_assert(!syncobj_wait(fd, &syncobjs[N_ENGINES], 1, 1, 0, NULL));
> > +	igt_assert(!syncobj_wait(fd, &syncobjs[N_EXEC_QUEUES], 1, 1, 0, NULL));
> >  
> > -	/* Verify bind + write to 1st engine done after ending spinner */
> > +	/* Verify bind + write to 1st exec_queue done after ending spinner */
> >  	xe_spin_end(&data[0].spin);
> >  	igt_assert(syncobj_wait(fd, &syncobjs[0], 1, INT64_MAX, 0, NULL));
> > -	igt_assert(syncobj_wait(fd, &syncobjs[N_ENGINES], 1, INT64_MAX, 0,
> > +	igt_assert(syncobj_wait(fd, &syncobjs[N_EXEC_QUEUES], 1, INT64_MAX, 0,
> >  				NULL));
> >  	igt_assert_eq(data[0].data, 0xc0ffee);
> >  
> >  	if (flags & CONFLICT) {
> > -		/* Verify bind + write to 2nd engine done */
> > +		/* Verify bind + write to 2nd exec_queue done */
> >  		igt_assert(syncobj_wait(fd, &syncobjs[1], 1, INT64_MAX, 0,
> >  					NULL));
> >  		igt_assert_eq(data[1].data, 0xc0ffee);
> > @@ -846,10 +846,10 @@ test_bind_engines_independent(int fd, struct drm_xe_engine_class_instance *eci,
> >  	igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
> >  
> >  	syncobj_destroy(fd, sync[0].handle);
> > -	for (i = 0; i < N_ENGINES; i++) {
> > +	for (i = 0; i < N_EXEC_QUEUES; i++) {
> >  		syncobj_destroy(fd, syncobjs[i]);
> > -		xe_engine_destroy(fd, engines[i]);
> > -		xe_engine_destroy(fd, bind_engines[i]);
> > +		xe_exec_queue_destroy(fd, exec_queues[i]);
> > +		xe_exec_queue_destroy(fd, bind_exec_queues[i]);
> >  	}
> >  
> >  	munmap(data, bo_size);
> > @@ -857,28 +857,28 @@ test_bind_engines_independent(int fd, struct drm_xe_engine_class_instance *eci,
> >  	xe_vm_destroy(fd, vm);
> >  }
> >  
> > -#define BIND_ARRAY_BIND_ENGINE_FLAG	(0x1 << 0)
> > +#define BIND_ARRAY_BIND_EXEC_QUEUE_FLAG	(0x1 << 0)
> >  
> >  
> >  /**
> >   * SUBTEST: bind-array-twice
> >   * Description: Test bind array twice
> > - * Functionality: bind engines
> > + * Functionality: bind exec_queues
> >   * Run type: FULL
> >   *
> >   * SUBTEST: bind-array-many
> >   * Description: Test bind array many times
> > - * Functionality: bind engines
> > + * Functionality: bind exec_queues
> >   * Run type: FULL
> >   *
> > - * SUBTEST: bind-array-engine-twice
> > - * Description: Test bind array engine twice
> > - * Functionality: bind engines
> > + * SUBTEST: bind-array-exec_queue-twice
> > + * Description: Test bind array exec_queue twice
> > + * Functionality: bind exec_queues
> >   * Run type: FULL
> >   *
> > - * SUBTEST: bind-array-engine-many
> > - * Description: Test bind array engine many times
> > - * Functionality: bind engines
> > + * SUBTEST: bind-array-exec_queue-many
> > + * Description: Test bind array exec_queue many times
> > + * Functionality: bind exec_queues
> >   * Run type: FULL
> >   */
> >  static void
> > @@ -895,7 +895,7 @@ test_bind_array(int fd, struct drm_xe_engine_class_instance *eci, int n_execs,
> >  		.num_batch_buffer = 1,
> >  		.syncs = to_user_pointer(sync),
> >  	};
> > -	uint32_t engine, bind_engine = 0;
> > +	uint32_t exec_queue, bind_exec_queue = 0;
> >  #define BIND_ARRAY_MAX_N_EXEC	16
> >  	struct drm_xe_vm_bind_op bind_ops[BIND_ARRAY_MAX_N_EXEC] = { };
> >  	size_t bo_size;
> > @@ -918,9 +918,9 @@ test_bind_array(int fd, struct drm_xe_engine_class_instance *eci, int n_execs,
> >  				visible_vram_if_possible(fd, eci->gt_id));
> >  	data = xe_bo_map(fd, bo, bo_size);
> >  
> > -	if (flags & BIND_ARRAY_BIND_ENGINE_FLAG)
> > -		bind_engine = xe_bind_engine_create(fd, vm, 0);
> > -	engine = xe_engine_create(fd, vm, eci, 0);
> > +	if (flags & BIND_ARRAY_BIND_EXEC_QUEUE_FLAG)
> > +		bind_exec_queue = xe_bind_exec_queue_create(fd, vm, 0);
> > +	exec_queue = xe_exec_queue_create(fd, vm, eci, 0);
> >  
> >  	for (i = 0; i < n_execs; ++i) {
> >  		bind_ops[i].obj = bo;
> > @@ -937,7 +937,7 @@ test_bind_array(int fd, struct drm_xe_engine_class_instance *eci, int n_execs,
> >  	}
> >  
> >  	sync[0].handle = syncobj_create(fd, 0);
> > -	xe_vm_bind_array(fd, vm, bind_engine, bind_ops, n_execs, sync, 1);
> > +	xe_vm_bind_array(fd, vm, bind_exec_queue, bind_ops, n_execs, sync, 1);
> >  
> >  	addr = base_addr;
> >  	for (i = 0; i < n_execs; i++) {
> > @@ -963,7 +963,7 @@ test_bind_array(int fd, struct drm_xe_engine_class_instance *eci, int n_execs,
> >  			exec.num_syncs = 1;
> >  		}
> >  
> > -		exec.engine_id = engine;
> > +		exec.exec_queue_id = exec_queue;
> >  		exec.address = batch_addr;
> >  		xe_exec(fd, &exec);
> >  
> > @@ -978,7 +978,7 @@ test_bind_array(int fd, struct drm_xe_engine_class_instance *eci, int n_execs,
> >  	syncobj_reset(fd, &sync[0].handle, 1);
> >  	sync[0].flags |= DRM_XE_SYNC_SIGNAL;
> >  	sync[1].flags &= ~DRM_XE_SYNC_SIGNAL;
> > -	xe_vm_bind_array(fd, vm, bind_engine, bind_ops, n_execs, sync, 2);
> > +	xe_vm_bind_array(fd, vm, bind_exec_queue, bind_ops, n_execs, sync, 2);
> >  
> >  	igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
> >  	igt_assert(syncobj_wait(fd, &sync[1].handle, 1, INT64_MAX, 0, NULL));
> > @@ -988,9 +988,9 @@ test_bind_array(int fd, struct drm_xe_engine_class_instance *eci, int n_execs,
> >  
> >  	syncobj_destroy(fd, sync[0].handle);
> >  	syncobj_destroy(fd, sync[1].handle);
> > -	xe_engine_destroy(fd, engine);
> > -	if (bind_engine)
> > -		xe_engine_destroy(fd, bind_engine);
> > +	xe_exec_queue_destroy(fd, exec_queue);
> > +	if (bind_exec_queue)
> > +		xe_exec_queue_destroy(fd, bind_exec_queue);
> >  
> >  	munmap(data, bo_size);
> >  	gem_close(fd, bo);
> > @@ -1070,7 +1070,7 @@ test_bind_array(int fd, struct drm_xe_engine_class_instance *eci, int n_execs,
> >  
> >  static void
> >  test_large_binds(int fd, struct drm_xe_engine_class_instance *eci,
> > -		 int n_engines, int n_execs, size_t bo_size,
> > +		 int n_exec_queues, int n_execs, size_t bo_size,
> >  		 unsigned int flags)
> >  {
> >  	struct drm_xe_sync sync[2] = {
> > @@ -1084,8 +1084,8 @@ test_large_binds(int fd, struct drm_xe_engine_class_instance *eci,
> >  	};
> >  	uint64_t addr = 0x1ull << 30, base_addr = 0x1ull << 30;
> >  	uint32_t vm;
> > -	uint32_t engines[MAX_N_ENGINES];
> > -	uint32_t syncobjs[MAX_N_ENGINES];
> > +	uint32_t exec_queues[MAX_N_EXEC_QUEUES];
> > +	uint32_t syncobjs[MAX_N_EXEC_QUEUES];
> >  	uint32_t bo = 0;
> >  	void *map;
> >  	struct {
> > @@ -1100,7 +1100,7 @@ test_large_binds(int fd, struct drm_xe_engine_class_instance *eci,
> >  		base_addr -= xe_get_default_alignment(fd);
> >  	}
> >  
> > -	igt_assert(n_engines <= MAX_N_ENGINES);
> > +	igt_assert(n_exec_queues <= MAX_N_EXEC_QUEUES);
> >  	vm = xe_vm_create(fd, DRM_XE_VM_CREATE_ASYNC_BIND_OPS, 0);
> >  
> >  	if (flags & LARGE_BIND_FLAG_USERPTR) {
> > @@ -1115,8 +1115,8 @@ test_large_binds(int fd, struct drm_xe_engine_class_instance *eci,
> >  		map = xe_bo_map(fd, bo, bo_size);
> >  	}
> >  
> > -	for (i = 0; i < n_engines; i++) {
> > -		engines[i] = xe_engine_create(fd, vm, eci, 0);
> > +	for (i = 0; i < n_exec_queues; i++) {
> > +		exec_queues[i] = xe_exec_queue_create(fd, vm, eci, 0);
> >  		syncobjs[i] = syncobj_create(fd, 0);
> >  	};
> >  
> > @@ -1147,7 +1147,7 @@ test_large_binds(int fd, struct drm_xe_engine_class_instance *eci,
> >  		uint64_t batch_addr = addr + batch_offset;
> >  		uint64_t sdi_offset = (char *)&data[i].data - (char *)data;
> >  		uint64_t sdi_addr = addr + sdi_offset;
> > -		int e = i % n_engines;
> > +		int e = i % n_exec_queues;
> >  
> >  		data = map + (addr - base_addr);
> >  		b = 0;
> > @@ -1165,7 +1165,7 @@ test_large_binds(int fd, struct drm_xe_engine_class_instance *eci,
> >  		if (i != e)
> >  			syncobj_reset(fd, &sync[1].handle, 1);
> >  
> > -		exec.engine_id = engines[e];
> > +		exec.exec_queue_id = exec_queues[e];
> >  		exec.address = batch_addr;
> >  		xe_exec(fd, &exec);
> >  
> > @@ -1175,7 +1175,7 @@ test_large_binds(int fd, struct drm_xe_engine_class_instance *eci,
> >  			addr = base_addr + bo_size - 0x1000;
> >  	}
> >  
> > -	for (i = 0; i < n_engines; i++)
> > +	for (i = 0; i < n_exec_queues; i++)
> >  		igt_assert(syncobj_wait(fd, &syncobjs[i], 1, INT64_MAX, 0,
> >  					NULL));
> >  	igt_assert(syncobj_wait(fd, &sync[0].handle, 1, INT64_MAX, 0, NULL));
> > @@ -1205,9 +1205,9 @@ test_large_binds(int fd, struct drm_xe_engine_class_instance *eci,
> >  	}
> >  
> >  	syncobj_destroy(fd, sync[0].handle);
> > -	for (i = 0; i < n_engines; i++) {
> > +	for (i = 0; i < n_exec_queues; i++) {
> >  		syncobj_destroy(fd, syncobjs[i]);
> > -		xe_engine_destroy(fd, engines[i]);
> > +		xe_exec_queue_destroy(fd, exec_queues[i]);
> >  	}
> >  
> >  	if (bo) {
> > @@ -1246,7 +1246,7 @@ static void *hammer_thread(void *tdata)
> >  		uint64_t pad;
> >  		uint32_t data;
> >  	} *data = t->map;
> > -	uint32_t engine = xe_engine_create(t->fd, t->vm, t->eci, 0);
> > +	uint32_t exec_queue = xe_exec_queue_create(t->fd, t->vm, t->eci, 0);
> >  	int b;
> >  	int i = 0;
> >  
> > @@ -1267,7 +1267,7 @@ static void *hammer_thread(void *tdata)
> >  		data->batch[b++] = MI_BATCH_BUFFER_END;
> >  		igt_assert(b <= ARRAY_SIZE(data->batch));
> >  
> > -		exec.engine_id = engine;
> > +		exec.exec_queue_id = exec_queue;
> >  		exec.address = batch_addr;
> >  		if (i % 32) {
> >  			exec.num_syncs = 0;
> > @@ -1283,7 +1283,7 @@ static void *hammer_thread(void *tdata)
> >  	}
> >  
> >  	syncobj_destroy(t->fd, sync[0].handle);
> > -	xe_engine_destroy(t->fd, engine);
> > +	xe_exec_queue_destroy(t->fd, exec_queue);
> >  
> >  	return NULL;
> >  }
> > @@ -1368,7 +1368,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
> >  	};
> >  	uint64_t addr = 0x1a00000, base_addr = 0x1a00000;
> >  	uint32_t vm;
> > -	uint32_t engine;
> > +	uint32_t exec_queue;
> >  	size_t bo_size;
> >  	uint32_t bo = 0;
> >  	uint64_t bind_size;
> > @@ -1408,7 +1408,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
> >  	}
> >  	memset(map, 0, bo_size);
> >  
> > -	engine = xe_engine_create(fd, vm, eci, 0);
> > +	exec_queue = xe_exec_queue_create(fd, vm, eci, 0);
> >  
> >  	sync[0].handle = syncobj_create(fd, 0);
> >  	sync[1].handle = syncobj_create(fd, 0);
> > @@ -1466,7 +1466,7 @@ test_munmap_style_unbind(int fd, struct drm_xe_engine_class_instance *eci,
> >  			syncobj_reset(fd, &sync[1].handle, 1);
> >  		sync[1].flags |= DRM_XE_SYNC_SIGNAL;
> >  
> > -		exec.engine_id = engine;
> > +		exec.exec_queue_id = exec_queue;
> >  		exec.address = batch_addr;
> >  		xe_exec(fd, &exec);
> >  
> > @@ -1522,7 +1522,7 @@ try_again_after_invalidate:
> >  			syncobj_reset(fd, &sync[1].handle, 1);
> >  			sync[1].flags |= DRM_XE_SYNC_SIGNAL;
> >  
> > -			exec.engine_id = engine;
> > +			exec.exec_queue_id = exec_queue;
> >  			exec.address = batch_addr;
> >  			xe_exec(fd, &exec);
> >  		}
> > @@ -1593,7 +1593,7 @@ try_again_after_invalidate:
> >  		syncobj_reset(fd, &sync[1].handle, 1);
> >  		sync[1].flags |= DRM_XE_SYNC_SIGNAL;
> >  
> > -		exec.engine_id = engine;
> > +		exec.exec_queue_id = exec_queue;
> >  		exec.address = batch_addr;
> >  		xe_exec(fd, &exec);
> >  
> > @@ -1618,7 +1618,7 @@ try_again_after_invalidate:
> >  
> >  	syncobj_destroy(fd, sync[0].handle);
> >  	syncobj_destroy(fd, sync[1].handle);
> > -	xe_engine_destroy(fd, engine);
> > +	xe_exec_queue_destroy(fd, exec_queue);
> >  	munmap(map, bo_size);
> >  	if (bo)
> >  		gem_close(fd, bo);
> > @@ -1669,7 +1669,7 @@ test_mmap_style_bind(int fd, struct drm_xe_engine_class_instance *eci,
> >  	};
> >  	uint64_t addr = 0x1a00000, base_addr = 0x1a00000;
> >  	uint32_t vm;
> > -	uint32_t engine;
> > +	uint32_t exec_queue;
> >  	size_t bo_size;
> >  	uint32_t bo0 = 0, bo1 = 0;
> >  	uint64_t bind_size;
> > @@ -1714,7 +1714,7 @@ test_mmap_style_bind(int fd, struct drm_xe_engine_class_instance *eci,
> >  	memset(map0, 0, bo_size);
> >  	memset(map1, 0, bo_size);
> >  
> > -	engine = xe_engine_create(fd, vm, eci, 0);
> > +	exec_queue = xe_exec_queue_create(fd, vm, eci, 0);
> >  
> >  	sync[0].handle = syncobj_create(fd, 0);
> >  	sync[1].handle = syncobj_create(fd, 0);
> > @@ -1772,7 +1772,7 @@ test_mmap_style_bind(int fd, struct drm_xe_engine_class_instance *eci,
> >  			syncobj_reset(fd, &sync[1].handle, 1);
> >  		sync[1].flags |= DRM_XE_SYNC_SIGNAL;
> >  
> > -		exec.engine_id = engine;
> > +		exec.exec_queue_id = exec_queue;
> >  		exec.address = batch_addr;
> >  		xe_exec(fd, &exec);
> >  
> > @@ -1840,7 +1840,7 @@ test_mmap_style_bind(int fd, struct drm_xe_engine_class_instance *eci,
> >  			syncobj_reset(fd, &sync[1].handle, 1);
> >  		sync[1].flags |= DRM_XE_SYNC_SIGNAL;
> >  
> > -		exec.engine_id = engine;
> > +		exec.exec_queue_id = exec_queue;
> >  		exec.address = batch_addr;
> >  		xe_exec(fd, &exec);
> >  
> > @@ -1872,7 +1872,7 @@ test_mmap_style_bind(int fd, struct drm_xe_engine_class_instance *eci,
> >  
> >  	syncobj_destroy(fd, sync[0].handle);
> >  	syncobj_destroy(fd, sync[1].handle);
> > -	xe_engine_destroy(fd, engine);
> > +	xe_exec_queue_destroy(fd, exec_queue);
> >  	munmap(map0, bo_size);
> >  	munmap(map1, bo_size);
> >  	if (bo0)
> > @@ -2033,13 +2033,13 @@ igt_main
> >  		xe_for_each_hw_engine(fd, hwe)
> >  			shared_pte_page(fd, hwe, 4, 0x1000ul * 512 * 512 * 512);
> >  
> > -	igt_subtest("bind-engines-independent")
> > +	igt_subtest("bind-execqueues-independent")
> >  		xe_for_each_hw_engine(fd, hwe)
> > -			test_bind_engines_independent(fd, hwe, 0);
> > +			test_bind_execqueues_independent(fd, hwe, 0);
> >  
> > -	igt_subtest("bind-engines-conflict")
> > +	igt_subtest("bind-execqueues-conflict")
> >  		xe_for_each_hw_engine(fd, hwe)
> > -			test_bind_engines_independent(fd, hwe, CONFLICT);
> > +			test_bind_execqueues_independent(fd, hwe, CONFLICT);
> >  
> >  	igt_subtest("bind-array-twice")
> >  		xe_for_each_hw_engine(fd, hwe)
> > @@ -2049,15 +2049,15 @@ igt_main
> >  		xe_for_each_hw_engine(fd, hwe)
> >  			test_bind_array(fd, hwe, 16, 0);
> >  
> > -	igt_subtest("bind-array-engine-twice")
> > +	igt_subtest("bind-array-exec_queue-twice")
> >  		xe_for_each_hw_engine(fd, hwe)
> >  			test_bind_array(fd, hwe, 2,
> > -					BIND_ARRAY_BIND_ENGINE_FLAG);
> > +					BIND_ARRAY_BIND_EXEC_QUEUE_FLAG);
> >  
> > -	igt_subtest("bind-array-engine-many")
> > +	igt_subtest("bind-array-exec_queue-many")
> >  		xe_for_each_hw_engine(fd, hwe)
> >  			test_bind_array(fd, hwe, 16,
> > -					BIND_ARRAY_BIND_ENGINE_FLAG);
> > +					BIND_ARRAY_BIND_EXEC_QUEUE_FLAG);
> >  
> >  	for (bind_size = 0x1ull << 21; bind_size <= 0x1ull << 31;
> >  	     bind_size = bind_size << 1) {
> > -- 
> > 2.34.1
> > 


More information about the igt-dev mailing list