[Intel-xe] [RFC PATCH 1/1] drm/xe: Rename engine to exec_queue
Francois Dugast
francois.dugast at intel.com
Mon Jul 31 19:19:43 UTC 2023
On Mon, Jul 31, 2023 at 02:03:43PM -0400, Rodrigo Vivi wrote:
> On Mon, Jul 31, 2023 at 04:08:49PM +0000, Francois Dugast wrote:
> > Engine was inappropriately used to refer to execution queues and
>
> exec_queue is much better than engine here indeed.
Thanks for the fast feedback.
>
> > it also created some confusion with hardware engines. Where it applies
> > the exec_queue variable name is changed to q and comments are also
> > updated.
>
> my only bikeshed on this is on the 'q'... could we make that 'eq'
> instead?
That would be fine with me but I based this choice on the discussion in the
Gitlab issue, where it seemed we had a consensus on going with 'q'.
>
> it will be very hard to run a detailed review, but based on what
> I saw so far everything looks correct. So, we should probably
> move fast with this after passing CI. Anyone else conflicting
> with this would have to rebase and resubmit.
>
> Acked-by: Rodrigo Vivi <rodrigo.vivi at intel.com>
Great, I would like at least another pair of eyes to take a look and also I
already found some minor fixes so I will have to sent a new version anyway.
>
> >
> > Link: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/162
> > Signed-off-by: Francois Dugast <francois.dugast at intel.com>
> > ---
> > drivers/gpu/drm/xe/Makefile | 2 +-
> > drivers/gpu/drm/xe/tests/xe_migrate.c | 18 +-
> > drivers/gpu/drm/xe/xe_bb.c | 26 +-
> > drivers/gpu/drm/xe/xe_bb.h | 8 +-
> > drivers/gpu/drm/xe/xe_devcoredump.c | 40 +-
> > drivers/gpu/drm/xe/xe_devcoredump.h | 6 +-
> > drivers/gpu/drm/xe/xe_devcoredump_types.h | 2 +-
> > drivers/gpu/drm/xe/xe_device.c | 62 +-
> > drivers/gpu/drm/xe/xe_device.h | 8 +-
> > drivers/gpu/drm/xe/xe_device_types.h | 2 +-
> > drivers/gpu/drm/xe/xe_engine.c | 850 -------------
> > drivers/gpu/drm/xe/xe_engine.h | 63 -
> > drivers/gpu/drm/xe/xe_exec.c | 62 +-
> > drivers/gpu/drm/xe/xe_exec_queue.c | 850 +++++++++++++
> > drivers/gpu/drm/xe/xe_exec_queue.h | 63 +
> > ...e_engine_types.h => xe_exec_queue_types.h} | 110 +-
> > drivers/gpu/drm/xe/xe_execlist.c | 126 +-
> > drivers/gpu/drm/xe/xe_execlist_types.h | 8 +-
> > drivers/gpu/drm/xe/xe_gt.c | 72 +-
> > drivers/gpu/drm/xe/xe_gt_types.h | 6 +-
> > drivers/gpu/drm/xe/xe_guc_ads.c | 2 +-
> > drivers/gpu/drm/xe/xe_guc_ct.c | 10 +-
> > ...gine_types.h => xe_guc_exec_queue_types.h} | 10 +-
> > drivers/gpu/drm/xe/xe_guc_fwif.h | 6 +-
> > drivers/gpu/drm/xe/xe_guc_submit.c | 1074 ++++++++---------
> > drivers/gpu/drm/xe/xe_guc_submit.h | 20 +-
> > drivers/gpu/drm/xe/xe_guc_submit_types.h | 20 +-
> > drivers/gpu/drm/xe/xe_guc_types.h | 4 +-
> > drivers/gpu/drm/xe/xe_lrc.c | 10 +-
> > drivers/gpu/drm/xe/xe_lrc.h | 4 +-
> > drivers/gpu/drm/xe/xe_migrate.c | 62 +-
> > drivers/gpu/drm/xe/xe_migrate.h | 6 +-
> > drivers/gpu/drm/xe/xe_mocs.c | 2 +-
> > drivers/gpu/drm/xe/xe_mocs.h | 2 +-
> > drivers/gpu/drm/xe/xe_preempt_fence.c | 32 +-
> > drivers/gpu/drm/xe/xe_preempt_fence.h | 4 +-
> > drivers/gpu/drm/xe/xe_preempt_fence_types.h | 5 +-
> > drivers/gpu/drm/xe/xe_pt.c | 18 +-
> > drivers/gpu/drm/xe/xe_pt.h | 6 +-
> > drivers/gpu/drm/xe/xe_query.c | 4 +-
> > drivers/gpu/drm/xe/xe_ring_ops.c | 38 +-
> > drivers/gpu/drm/xe/xe_sched_job.c | 76 +-
> > drivers/gpu/drm/xe/xe_sched_job.h | 4 +-
> > drivers/gpu/drm/xe/xe_sched_job_types.h | 6 +-
> > drivers/gpu/drm/xe/xe_trace.h | 140 +--
> > drivers/gpu/drm/xe/xe_vm.c | 178 +--
> > drivers/gpu/drm/xe/xe_vm.h | 4 +-
> > drivers/gpu/drm/xe/xe_vm_types.h | 6 +-
> > include/uapi/drm/xe_drm.h | 86 +-
> > 49 files changed, 2111 insertions(+), 2112 deletions(-)
> > delete mode 100644 drivers/gpu/drm/xe/xe_engine.c
> > delete mode 100644 drivers/gpu/drm/xe/xe_engine.h
> > create mode 100644 drivers/gpu/drm/xe/xe_exec_queue.c
> > create mode 100644 drivers/gpu/drm/xe/xe_exec_queue.h
> > rename drivers/gpu/drm/xe/{xe_engine_types.h => xe_exec_queue_types.h} (52%)
> > rename drivers/gpu/drm/xe/{xe_guc_engine_types.h => xe_guc_exec_queue_types.h} (88%)
> >
> > diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
> > index 4ea9e3150c20..e79624ab2cb3 100644
> > --- a/drivers/gpu/drm/xe/Makefile
> > +++ b/drivers/gpu/drm/xe/Makefile
> > @@ -51,9 +51,9 @@ xe-y += xe_bb.o \
> > xe_device.o \
> > xe_device_sysfs.o \
> > xe_dma_buf.o \
> > - xe_engine.o \
> > xe_exec.o \
> > xe_execlist.o \
> > + xe_exec_queue.o \
> > xe_force_wake.o \
> > xe_ggtt.o \
> > xe_gt.o \
> > diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c
> > index 9e9b228fe315..5c8d5e78d9bc 100644
> > --- a/drivers/gpu/drm/xe/tests/xe_migrate.c
> > +++ b/drivers/gpu/drm/xe/tests/xe_migrate.c
> > @@ -38,7 +38,7 @@ static int run_sanity_job(struct xe_migrate *m, struct xe_device *xe,
> > struct kunit *test)
> > {
> > u64 batch_base = xe_migrate_batch_base(m, xe->info.supports_usm);
> > - struct xe_sched_job *job = xe_bb_create_migration_job(m->eng, bb,
> > + struct xe_sched_job *job = xe_bb_create_migration_job(m->q, bb,
> > batch_base,
> > second_idx);
> > struct dma_fence *fence;
> > @@ -215,7 +215,7 @@ static void test_pt_update(struct xe_migrate *m, struct xe_bo *pt,
> > xe_map_memset(xe, &pt->vmap, 0, (u8)expected, pt->size);
> >
> > then = ktime_get();
> > - fence = xe_migrate_update_pgtables(m, NULL, NULL, m->eng, &update, 1,
> > + fence = xe_migrate_update_pgtables(m, NULL, NULL, m->q, &update, 1,
> > NULL, 0, &pt_update);
> > now = ktime_get();
> > if (sanity_fence_failed(xe, fence, "Migration pagetable update", test))
> > @@ -257,7 +257,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
> > return;
> > }
> >
> > - big = xe_bo_create_pin_map(xe, tile, m->eng->vm, SZ_4M,
> > + big = xe_bo_create_pin_map(xe, tile, m->q->vm, SZ_4M,
> > ttm_bo_type_kernel,
> > XE_BO_CREATE_VRAM_IF_DGFX(tile) |
> > XE_BO_CREATE_PINNED_BIT);
> > @@ -266,7 +266,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
> > goto vunmap;
> > }
> >
> > - pt = xe_bo_create_pin_map(xe, tile, m->eng->vm, XE_PAGE_SIZE,
> > + pt = xe_bo_create_pin_map(xe, tile, m->q->vm, XE_PAGE_SIZE,
> > ttm_bo_type_kernel,
> > XE_BO_CREATE_VRAM_IF_DGFX(tile) |
> > XE_BO_CREATE_PINNED_BIT);
> > @@ -276,7 +276,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
> > goto free_big;
> > }
> >
> > - tiny = xe_bo_create_pin_map(xe, tile, m->eng->vm,
> > + tiny = xe_bo_create_pin_map(xe, tile, m->q->vm,
> > 2 * SZ_4K,
> > ttm_bo_type_kernel,
> > XE_BO_CREATE_VRAM_IF_DGFX(tile) |
> > @@ -295,14 +295,14 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
> > }
> >
> > kunit_info(test, "Starting tests, top level PT addr: %lx, special pagetable base addr: %lx\n",
> > - (unsigned long)xe_bo_main_addr(m->eng->vm->pt_root[id]->bo, XE_PAGE_SIZE),
> > + (unsigned long)xe_bo_main_addr(m->q->vm->pt_root[id]->bo, XE_PAGE_SIZE),
> > (unsigned long)xe_bo_main_addr(m->pt_bo, XE_PAGE_SIZE));
> >
> > /* First part of the test, are we updating our pagetable bo with a new entry? */
> > xe_map_wr(xe, &bo->vmap, XE_PAGE_SIZE * (NUM_KERNEL_PDE - 1), u64,
> > 0xdeaddeadbeefbeef);
> > expected = xe_pte_encode(pt, 0, XE_CACHE_WB, 0);
> > - if (m->eng->vm->flags & XE_VM_FLAG_64K)
> > + if (m->q->vm->flags & XE_VM_FLAG_64K)
> > expected |= XE_PTE_PS64;
> > if (xe_bo_is_vram(pt))
> > xe_res_first(pt->ttm.resource, 0, pt->size, &src_it);
> > @@ -399,11 +399,11 @@ static int migrate_test_run_device(struct xe_device *xe)
> > struct ww_acquire_ctx ww;
> >
> > kunit_info(test, "Testing tile id %d.\n", id);
> > - xe_vm_lock(m->eng->vm, &ww, 0, true);
> > + xe_vm_lock(m->q->vm, &ww, 0, true);
> > xe_device_mem_access_get(xe);
> > xe_migrate_sanity_test(m, test);
> > xe_device_mem_access_put(xe);
> > - xe_vm_unlock(m->eng->vm, &ww);
> > + xe_vm_unlock(m->q->vm, &ww);
> > }
> >
> > return 0;
> > diff --git a/drivers/gpu/drm/xe/xe_bb.c b/drivers/gpu/drm/xe/xe_bb.c
> > index b15a7cb7db4c..38f4ce83a207 100644
> > --- a/drivers/gpu/drm/xe/xe_bb.c
> > +++ b/drivers/gpu/drm/xe/xe_bb.c
> > @@ -7,7 +7,7 @@
> >
> > #include "regs/xe_gpu_commands.h"
> > #include "xe_device.h"
> > -#include "xe_engine_types.h"
> > +#include "xe_exec_queue_types.h"
> > #include "xe_gt.h"
> > #include "xe_hw_fence.h"
> > #include "xe_sa.h"
> > @@ -60,30 +60,30 @@ struct xe_bb *xe_bb_new(struct xe_gt *gt, u32 dwords, bool usm)
> > }
> >
> > static struct xe_sched_job *
> > -__xe_bb_create_job(struct xe_engine *kernel_eng, struct xe_bb *bb, u64 *addr)
> > +__xe_bb_create_job(struct xe_exec_queue *q, struct xe_bb *bb, u64 *addr)
> > {
> > u32 size = drm_suballoc_size(bb->bo);
> >
> > bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
> >
> > - WARN_ON(bb->len * 4 + bb_prefetch(kernel_eng->gt) > size);
> > + WARN_ON(bb->len * 4 + bb_prefetch(q->gt) > size);
> >
> > xe_sa_bo_flush_write(bb->bo);
> >
> > - return xe_sched_job_create(kernel_eng, addr);
> > + return xe_sched_job_create(q, addr);
> > }
> >
> > -struct xe_sched_job *xe_bb_create_wa_job(struct xe_engine *wa_eng,
> > +struct xe_sched_job *xe_bb_create_wa_job(struct xe_exec_queue *q,
> > struct xe_bb *bb, u64 batch_base_ofs)
> > {
> > u64 addr = batch_base_ofs + drm_suballoc_soffset(bb->bo);
> >
> > - XE_WARN_ON(!(wa_eng->vm->flags & XE_VM_FLAG_MIGRATION));
> > + XE_WARN_ON(!(q->vm->flags & XE_VM_FLAG_MIGRATION));
> >
> > - return __xe_bb_create_job(wa_eng, bb, &addr);
> > + return __xe_bb_create_job(q, bb, &addr);
> > }
> >
> > -struct xe_sched_job *xe_bb_create_migration_job(struct xe_engine *kernel_eng,
> > +struct xe_sched_job *xe_bb_create_migration_job(struct xe_exec_queue *q,
> > struct xe_bb *bb,
> > u64 batch_base_ofs,
> > u32 second_idx)
> > @@ -95,18 +95,18 @@ struct xe_sched_job *xe_bb_create_migration_job(struct xe_engine *kernel_eng,
> > };
> >
> > XE_WARN_ON(second_idx > bb->len);
> > - XE_WARN_ON(!(kernel_eng->vm->flags & XE_VM_FLAG_MIGRATION));
> > + XE_WARN_ON(!(q->vm->flags & XE_VM_FLAG_MIGRATION));
> >
> > - return __xe_bb_create_job(kernel_eng, bb, addr);
> > + return __xe_bb_create_job(q, bb, addr);
> > }
> >
> > -struct xe_sched_job *xe_bb_create_job(struct xe_engine *kernel_eng,
> > +struct xe_sched_job *xe_bb_create_job(struct xe_exec_queue *q,
> > struct xe_bb *bb)
> > {
> > u64 addr = xe_sa_bo_gpu_addr(bb->bo);
> >
> > - XE_WARN_ON(kernel_eng->vm && kernel_eng->vm->flags & XE_VM_FLAG_MIGRATION);
> > - return __xe_bb_create_job(kernel_eng, bb, &addr);
> > + XE_WARN_ON(q->vm && q->vm->flags & XE_VM_FLAG_MIGRATION);
> > + return __xe_bb_create_job(q, bb, &addr);
> > }
> >
> > void xe_bb_free(struct xe_bb *bb, struct dma_fence *fence)
> > diff --git a/drivers/gpu/drm/xe/xe_bb.h b/drivers/gpu/drm/xe/xe_bb.h
> > index 0cc9260c9634..c5ae0770bab5 100644
> > --- a/drivers/gpu/drm/xe/xe_bb.h
> > +++ b/drivers/gpu/drm/xe/xe_bb.h
> > @@ -11,16 +11,16 @@
> > struct dma_fence;
> >
> > struct xe_gt;
> > -struct xe_engine;
> > +struct xe_exec_queue;
> > struct xe_sched_job;
> >
> > struct xe_bb *xe_bb_new(struct xe_gt *gt, u32 size, bool usm);
> > -struct xe_sched_job *xe_bb_create_job(struct xe_engine *kernel_eng,
> > +struct xe_sched_job *xe_bb_create_job(struct xe_exec_queue *q,
> > struct xe_bb *bb);
> > -struct xe_sched_job *xe_bb_create_migration_job(struct xe_engine *kernel_eng,
> > +struct xe_sched_job *xe_bb_create_migration_job(struct xe_exec_queue *q,
> > struct xe_bb *bb, u64 batch_ofs,
> > u32 second_idx);
> > -struct xe_sched_job *xe_bb_create_wa_job(struct xe_engine *wa_eng,
> > +struct xe_sched_job *xe_bb_create_wa_job(struct xe_exec_queue *q,
> > struct xe_bb *bb, u64 batch_ofs);
> > void xe_bb_free(struct xe_bb *bb, struct dma_fence *fence);
> >
> > diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c
> > index f53f4b51233a..68abc0b195be 100644
> > --- a/drivers/gpu/drm/xe/xe_devcoredump.c
> > +++ b/drivers/gpu/drm/xe/xe_devcoredump.c
> > @@ -10,7 +10,7 @@
> > #include <generated/utsrelease.h>
> >
> > #include "xe_device.h"
> > -#include "xe_engine.h"
> > +#include "xe_exec_queue.h"
> > #include "xe_force_wake.h"
> > #include "xe_gt.h"
> > #include "xe_guc_ct.h"
> > @@ -53,9 +53,9 @@ static struct xe_device *coredump_to_xe(const struct xe_devcoredump *coredump)
> > return container_of(coredump, struct xe_device, devcoredump);
> > }
> >
> > -static struct xe_guc *engine_to_guc(struct xe_engine *e)
> > +static struct xe_guc *exec_queue_to_guc(struct xe_exec_queue *q)
> > {
> > - return &e->gt->uc.guc;
> > + return &q->gt->uc.guc;
> > }
> >
> > static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
> > @@ -91,7 +91,7 @@ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
> >
> > drm_printf(&p, "\n**** GuC CT ****\n");
> > xe_guc_ct_snapshot_print(coredump->snapshot.ct, &p);
> > - xe_guc_engine_snapshot_print(coredump->snapshot.ge, &p);
> > + xe_guc_exec_queue_snapshot_print(coredump->snapshot.ge, &p);
> >
> > drm_printf(&p, "\n**** HW Engines ****\n");
> > for (i = 0; i < XE_NUM_HW_ENGINES; i++)
> > @@ -112,7 +112,7 @@ static void xe_devcoredump_free(void *data)
> > return;
> >
> > xe_guc_ct_snapshot_free(coredump->snapshot.ct);
> > - xe_guc_engine_snapshot_free(coredump->snapshot.ge);
> > + xe_guc_exec_queue_snapshot_free(coredump->snapshot.ge);
> > for (i = 0; i < XE_NUM_HW_ENGINES; i++)
> > if (coredump->snapshot.hwe[i])
> > xe_hw_engine_snapshot_free(coredump->snapshot.hwe[i]);
> > @@ -123,14 +123,14 @@ static void xe_devcoredump_free(void *data)
> > }
> >
> > static void devcoredump_snapshot(struct xe_devcoredump *coredump,
> > - struct xe_engine *e)
> > + struct xe_exec_queue *q)
> > {
> > struct xe_devcoredump_snapshot *ss = &coredump->snapshot;
> > - struct xe_guc *guc = engine_to_guc(e);
> > + struct xe_guc *guc = exec_queue_to_guc(q);
> > struct xe_hw_engine *hwe;
> > enum xe_hw_engine_id id;
> > - u32 adj_logical_mask = e->logical_mask;
> > - u32 width_mask = (0x1 << e->width) - 1;
> > + u32 adj_logical_mask = q->logical_mask;
> > + u32 width_mask = (0x1 << q->width) - 1;
> > int i;
> > bool cookie;
> >
> > @@ -138,22 +138,22 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
> > ss->boot_time = ktime_get_boottime();
> >
> > cookie = dma_fence_begin_signalling();
> > - for (i = 0; e->width > 1 && i < XE_HW_ENGINE_MAX_INSTANCE;) {
> > + for (i = 0; q->width > 1 && i < XE_HW_ENGINE_MAX_INSTANCE;) {
> > if (adj_logical_mask & BIT(i)) {
> > adj_logical_mask |= width_mask << i;
> > - i += e->width;
> > + i += q->width;
> > } else {
> > ++i;
> > }
> > }
> >
> > - xe_force_wake_get(gt_to_fw(e->gt), XE_FORCEWAKE_ALL);
> > + xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL);
> >
> > coredump->snapshot.ct = xe_guc_ct_snapshot_capture(&guc->ct, true);
> > - coredump->snapshot.ge = xe_guc_engine_snapshot_capture(e);
> > + coredump->snapshot.ge = xe_guc_exec_queue_snapshot_capture(q);
> >
> > - for_each_hw_engine(hwe, e->gt, id) {
> > - if (hwe->class != e->hwe->class ||
> > + for_each_hw_engine(hwe, q->gt, id) {
> > + if (hwe->class != q->hwe->class ||
> > !(BIT(hwe->logical_instance) & adj_logical_mask)) {
> > coredump->snapshot.hwe[id] = NULL;
> > continue;
> > @@ -161,21 +161,21 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
> > coredump->snapshot.hwe[id] = xe_hw_engine_snapshot_capture(hwe);
> > }
> >
> > - xe_force_wake_put(gt_to_fw(e->gt), XE_FORCEWAKE_ALL);
> > + xe_force_wake_put(gt_to_fw(q->gt), XE_FORCEWAKE_ALL);
> > dma_fence_end_signalling(cookie);
> > }
> >
> > /**
> > * xe_devcoredump - Take the required snapshots and initialize coredump device.
> > - * @e: The faulty xe_engine, where the issue was detected.
> > + * @q: The faulty xe_exec_queue, where the issue was detected.
> > *
> > * This function should be called at the crash time within the serialized
> > * gt_reset. It is skipped if we still have the core dump device available
> > * with the information of the 'first' snapshot.
> > */
> > -void xe_devcoredump(struct xe_engine *e)
> > +void xe_devcoredump(struct xe_exec_queue *q)
> > {
> > - struct xe_device *xe = gt_to_xe(e->gt);
> > + struct xe_device *xe = gt_to_xe(q->gt);
> > struct xe_devcoredump *coredump = &xe->devcoredump;
> >
> > if (coredump->captured) {
> > @@ -184,7 +184,7 @@ void xe_devcoredump(struct xe_engine *e)
> > }
> >
> > coredump->captured = true;
> > - devcoredump_snapshot(coredump, e);
> > + devcoredump_snapshot(coredump, q);
> >
> > drm_info(&xe->drm, "Xe device coredump has been created\n");
> > drm_info(&xe->drm, "Check your /sys/class/drm/card%d/device/devcoredump/data\n",
> > diff --git a/drivers/gpu/drm/xe/xe_devcoredump.h b/drivers/gpu/drm/xe/xe_devcoredump.h
> > index 854882129227..6ac218a5c194 100644
> > --- a/drivers/gpu/drm/xe/xe_devcoredump.h
> > +++ b/drivers/gpu/drm/xe/xe_devcoredump.h
> > @@ -7,12 +7,12 @@
> > #define _XE_DEVCOREDUMP_H_
> >
> > struct xe_device;
> > -struct xe_engine;
> > +struct xe_exec_queue;
> >
> > #ifdef CONFIG_DEV_COREDUMP
> > -void xe_devcoredump(struct xe_engine *e);
> > +void xe_devcoredump(struct xe_exec_queue *q);
> > #else
> > -static inline void xe_devcoredump(struct xe_engine *e)
> > +static inline void xe_devcoredump(struct xe_exec_queue *q)
> > {
> > }
> > #endif
> > diff --git a/drivers/gpu/drm/xe/xe_devcoredump_types.h b/drivers/gpu/drm/xe/xe_devcoredump_types.h
> > index c0d711eb6ab3..7fdad9c3d3dd 100644
> > --- a/drivers/gpu/drm/xe/xe_devcoredump_types.h
> > +++ b/drivers/gpu/drm/xe/xe_devcoredump_types.h
> > @@ -30,7 +30,7 @@ struct xe_devcoredump_snapshot {
> > /** @ct: GuC CT snapshot */
> > struct xe_guc_ct_snapshot *ct;
> > /** @ge: Guc Engine snapshot */
> > - struct xe_guc_submit_engine_snapshot *ge;
> > + struct xe_guc_submit_exec_queue_snapshot *ge;
> > /** @hwe: HW Engine snapshot array */
> > struct xe_hw_engine_snapshot *hwe[XE_NUM_HW_ENGINES];
> > };
> > diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
> > index 5409cf7895d3..766df07de979 100644
> > --- a/drivers/gpu/drm/xe/xe_device.c
> > +++ b/drivers/gpu/drm/xe/xe_device.c
> > @@ -19,7 +19,7 @@
> > #include "xe_display.h"
> > #include "xe_dma_buf.h"
> > #include "xe_drv.h"
> > -#include "xe_engine.h"
> > +#include "xe_exec_queue.h"
> > #include "xe_exec.h"
> > #include "xe_gt.h"
> > #include "xe_irq.h"
> > @@ -54,33 +54,33 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file)
> > mutex_init(&xef->vm.lock);
> > xa_init_flags(&xef->vm.xa, XA_FLAGS_ALLOC1);
> >
> > - mutex_init(&xef->engine.lock);
> > - xa_init_flags(&xef->engine.xa, XA_FLAGS_ALLOC1);
> > + mutex_init(&xef->exec_queue.lock);
> > + xa_init_flags(&xef->exec_queue.xa, XA_FLAGS_ALLOC1);
> >
> > file->driver_priv = xef;
> > return 0;
> > }
> >
> > -static void device_kill_persistent_engines(struct xe_device *xe,
> > - struct xe_file *xef);
> > +static void device_kill_persistent_exec_queues(struct xe_device *xe,
> > + struct xe_file *xef);
> >
> > static void xe_file_close(struct drm_device *dev, struct drm_file *file)
> > {
> > struct xe_device *xe = to_xe_device(dev);
> > struct xe_file *xef = file->driver_priv;
> > struct xe_vm *vm;
> > - struct xe_engine *e;
> > + struct xe_exec_queue *q;
> > unsigned long idx;
> >
> > - mutex_lock(&xef->engine.lock);
> > - xa_for_each(&xef->engine.xa, idx, e) {
> > - xe_engine_kill(e);
> > - xe_engine_put(e);
> > + mutex_lock(&xef->exec_queue.lock);
> > + xa_for_each(&xef->exec_queue.xa, idx, q) {
> > + xe_exec_queue_kill(q);
> > + xe_exec_queue_put(q);
> > }
> > - mutex_unlock(&xef->engine.lock);
> > - xa_destroy(&xef->engine.xa);
> > - mutex_destroy(&xef->engine.lock);
> > - device_kill_persistent_engines(xe, xef);
> > + mutex_unlock(&xef->exec_queue.lock);
> > + xa_destroy(&xef->exec_queue.xa);
> > + mutex_destroy(&xef->exec_queue.lock);
> > + device_kill_persistent_exec_queues(xe, xef);
> >
> > mutex_lock(&xef->vm.lock);
> > xa_for_each(&xef->vm.xa, idx, vm)
> > @@ -100,15 +100,15 @@ static const struct drm_ioctl_desc xe_ioctls[] = {
> > DRM_IOCTL_DEF_DRV(XE_VM_CREATE, xe_vm_create_ioctl, DRM_RENDER_ALLOW),
> > DRM_IOCTL_DEF_DRV(XE_VM_DESTROY, xe_vm_destroy_ioctl, DRM_RENDER_ALLOW),
> > DRM_IOCTL_DEF_DRV(XE_VM_BIND, xe_vm_bind_ioctl, DRM_RENDER_ALLOW),
> > - DRM_IOCTL_DEF_DRV(XE_ENGINE_CREATE, xe_engine_create_ioctl,
> > + DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_CREATE, xe_exec_queue_create_ioctl,
> > DRM_RENDER_ALLOW),
> > - DRM_IOCTL_DEF_DRV(XE_ENGINE_GET_PROPERTY, xe_engine_get_property_ioctl,
> > + DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_GET_PROPERTY, xe_exec_queue_get_property_ioctl,
> > DRM_RENDER_ALLOW),
> > - DRM_IOCTL_DEF_DRV(XE_ENGINE_DESTROY, xe_engine_destroy_ioctl,
> > + DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_DESTROY, xe_exec_queue_destroy_ioctl,
> > DRM_RENDER_ALLOW),
> > DRM_IOCTL_DEF_DRV(XE_EXEC, xe_exec_ioctl, DRM_RENDER_ALLOW),
> > DRM_IOCTL_DEF_DRV(XE_MMIO, xe_mmio_ioctl, DRM_RENDER_ALLOW),
> > - DRM_IOCTL_DEF_DRV(XE_ENGINE_SET_PROPERTY, xe_engine_set_property_ioctl,
> > + DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_SET_PROPERTY, xe_exec_queue_set_property_ioctl,
> > DRM_RENDER_ALLOW),
> > DRM_IOCTL_DEF_DRV(XE_WAIT_USER_FENCE, xe_wait_user_fence_ioctl,
> > DRM_RENDER_ALLOW),
> > @@ -372,33 +372,33 @@ void xe_device_shutdown(struct xe_device *xe)
> > {
> > }
> >
> > -void xe_device_add_persistent_engines(struct xe_device *xe, struct xe_engine *e)
> > +void xe_device_add_persistent_exec_queues(struct xe_device *xe, struct xe_exec_queue *q)
> > {
> > mutex_lock(&xe->persistent_engines.lock);
> > - list_add_tail(&e->persistent.link, &xe->persistent_engines.list);
> > + list_add_tail(&q->persistent.link, &xe->persistent_engines.list);
> > mutex_unlock(&xe->persistent_engines.lock);
> > }
> >
> > -void xe_device_remove_persistent_engines(struct xe_device *xe,
> > - struct xe_engine *e)
> > +void xe_device_remove_persistent_exec_queues(struct xe_device *xe,
> > + struct xe_exec_queue *q)
> > {
> > mutex_lock(&xe->persistent_engines.lock);
> > - if (!list_empty(&e->persistent.link))
> > - list_del(&e->persistent.link);
> > + if (!list_empty(&q->persistent.link))
> > + list_del(&q->persistent.link);
> > mutex_unlock(&xe->persistent_engines.lock);
> > }
> >
> > -static void device_kill_persistent_engines(struct xe_device *xe,
> > - struct xe_file *xef)
> > +static void device_kill_persistent_exec_queues(struct xe_device *xe,
> > + struct xe_file *xef)
> > {
> > - struct xe_engine *e, *next;
> > + struct xe_exec_queue *q, *next;
> >
> > mutex_lock(&xe->persistent_engines.lock);
> > - list_for_each_entry_safe(e, next, &xe->persistent_engines.list,
> > + list_for_each_entry_safe(q, next, &xe->persistent_engines.list,
> > persistent.link)
> > - if (e->persistent.xef == xef) {
> > - xe_engine_kill(e);
> > - list_del_init(&e->persistent.link);
> > + if (q->persistent.xef == xef) {
> > + xe_exec_queue_kill(q);
> > + list_del_init(&q->persistent.link);
> > }
> > mutex_unlock(&xe->persistent_engines.lock);
> > }
> > diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h
> > index 61a5cf1f7300..71582094834c 100644
> > --- a/drivers/gpu/drm/xe/xe_device.h
> > +++ b/drivers/gpu/drm/xe/xe_device.h
> > @@ -6,7 +6,7 @@
> > #ifndef _XE_DEVICE_H_
> > #define _XE_DEVICE_H_
> >
> > -struct xe_engine;
> > +struct xe_exec_queue;
> > struct xe_file;
> >
> > #include <drm/drm_util.h>
> > @@ -41,9 +41,9 @@ int xe_device_probe(struct xe_device *xe);
> > void xe_device_remove(struct xe_device *xe);
> > void xe_device_shutdown(struct xe_device *xe);
> >
> > -void xe_device_add_persistent_engines(struct xe_device *xe, struct xe_engine *e);
> > -void xe_device_remove_persistent_engines(struct xe_device *xe,
> > - struct xe_engine *e);
> > +void xe_device_add_persistent_exec_queues(struct xe_device *xe, struct xe_exec_queue *q);
> > +void xe_device_remove_persistent_exec_queues(struct xe_device *xe,
> > + struct xe_exec_queue *q);
> >
> > void xe_device_wmb(struct xe_device *xe);
> >
> > diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
> > index b156f69d7320..f6665cd16a66 100644
> > --- a/drivers/gpu/drm/xe/xe_device_types.h
> > +++ b/drivers/gpu/drm/xe/xe_device_types.h
> > @@ -494,7 +494,7 @@ struct xe_file {
> > struct xarray xa;
> > /** @lock: protects file engine state */
> > struct mutex lock;
> > - } engine;
> > + } exec_queue;
> > };
> >
> > #endif
> > diff --git a/drivers/gpu/drm/xe/xe_engine.c b/drivers/gpu/drm/xe/xe_engine.c
> > deleted file mode 100644
> > index c30810a687b1..000000000000
> > --- a/drivers/gpu/drm/xe/xe_engine.c
> > +++ /dev/null
> > @@ -1,850 +0,0 @@
> > -// SPDX-License-Identifier: MIT
> > -/*
> > - * Copyright © 2021 Intel Corporation
> > - */
> > -
> > -#include "xe_engine.h"
> > -
> > -#include <linux/nospec.h>
> > -
> > -#include <drm/drm_device.h>
> > -#include <drm/drm_file.h>
> > -#include <drm/xe_drm.h>
> > -
> > -#include "xe_device.h"
> > -#include "xe_gt.h"
> > -#include "xe_hw_fence.h"
> > -#include "xe_lrc.h"
> > -#include "xe_macros.h"
> > -#include "xe_migrate.h"
> > -#include "xe_pm.h"
> > -#include "xe_ring_ops_types.h"
> > -#include "xe_trace.h"
> > -#include "xe_vm.h"
> > -
> > -static struct xe_engine *__xe_engine_create(struct xe_device *xe,
> > - struct xe_vm *vm,
> > - u32 logical_mask,
> > - u16 width, struct xe_hw_engine *hwe,
> > - u32 flags)
> > -{
> > - struct xe_engine *e;
> > - struct xe_gt *gt = hwe->gt;
> > - int err;
> > - int i;
> > -
> > - e = kzalloc(sizeof(*e) + sizeof(struct xe_lrc) * width, GFP_KERNEL);
> > - if (!e)
> > - return ERR_PTR(-ENOMEM);
> > -
> > - kref_init(&e->refcount);
> > - e->flags = flags;
> > - e->hwe = hwe;
> > - e->gt = gt;
> > - if (vm)
> > - e->vm = xe_vm_get(vm);
> > - e->class = hwe->class;
> > - e->width = width;
> > - e->logical_mask = logical_mask;
> > - e->fence_irq = >->fence_irq[hwe->class];
> > - e->ring_ops = gt->ring_ops[hwe->class];
> > - e->ops = gt->engine_ops;
> > - INIT_LIST_HEAD(&e->persistent.link);
> > - INIT_LIST_HEAD(&e->compute.link);
> > - INIT_LIST_HEAD(&e->multi_gt_link);
> > -
> > - /* FIXME: Wire up to configurable default value */
> > - e->sched_props.timeslice_us = 1 * 1000;
> > - e->sched_props.preempt_timeout_us = 640 * 1000;
> > -
> > - if (xe_engine_is_parallel(e)) {
> > - e->parallel.composite_fence_ctx = dma_fence_context_alloc(1);
> > - e->parallel.composite_fence_seqno = XE_FENCE_INITIAL_SEQNO;
> > - }
> > - if (e->flags & ENGINE_FLAG_VM) {
> > - e->bind.fence_ctx = dma_fence_context_alloc(1);
> > - e->bind.fence_seqno = XE_FENCE_INITIAL_SEQNO;
> > - }
> > -
> > - for (i = 0; i < width; ++i) {
> > - err = xe_lrc_init(e->lrc + i, hwe, e, vm, SZ_16K);
> > - if (err)
> > - goto err_lrc;
> > - }
> > -
> > - err = e->ops->init(e);
> > - if (err)
> > - goto err_lrc;
> > -
> > - /*
> > - * Normally the user vm holds an rpm ref to keep the device
> > - * awake, and the context holds a ref for the vm, however for
> > - * some engines we use the kernels migrate vm underneath which
> > - * offers no such rpm ref. Make sure we keep a ref here, so we
> > - * can perform GuC CT actions when needed. Caller is expected to
> > - * have already grabbed the rpm ref outside any sensitive locks.
> > - */
> > - if (e->flags & ENGINE_FLAG_VM)
> > - drm_WARN_ON(&xe->drm, !xe_device_mem_access_get_if_ongoing(xe));
> > -
> > - return e;
> > -
> > -err_lrc:
> > - for (i = i - 1; i >= 0; --i)
> > - xe_lrc_finish(e->lrc + i);
> > - kfree(e);
> > - return ERR_PTR(err);
> > -}
> > -
> > -struct xe_engine *xe_engine_create(struct xe_device *xe, struct xe_vm *vm,
> > - u32 logical_mask, u16 width,
> > - struct xe_hw_engine *hwe, u32 flags)
> > -{
> > - struct ww_acquire_ctx ww;
> > - struct xe_engine *e;
> > - int err;
> > -
> > - if (vm) {
> > - err = xe_vm_lock(vm, &ww, 0, true);
> > - if (err)
> > - return ERR_PTR(err);
> > - }
> > - e = __xe_engine_create(xe, vm, logical_mask, width, hwe, flags);
> > - if (vm)
> > - xe_vm_unlock(vm, &ww);
> > -
> > - return e;
> > -}
> > -
> > -struct xe_engine *xe_engine_create_class(struct xe_device *xe, struct xe_gt *gt,
> > - struct xe_vm *vm,
> > - enum xe_engine_class class, u32 flags)
> > -{
> > - struct xe_hw_engine *hwe, *hwe0 = NULL;
> > - enum xe_hw_engine_id id;
> > - u32 logical_mask = 0;
> > -
> > - for_each_hw_engine(hwe, gt, id) {
> > - if (xe_hw_engine_is_reserved(hwe))
> > - continue;
> > -
> > - if (hwe->class == class) {
> > - logical_mask |= BIT(hwe->logical_instance);
> > - if (!hwe0)
> > - hwe0 = hwe;
> > - }
> > - }
> > -
> > - if (!logical_mask)
> > - return ERR_PTR(-ENODEV);
> > -
> > - return xe_engine_create(xe, vm, logical_mask, 1, hwe0, flags);
> > -}
> > -
> > -void xe_engine_destroy(struct kref *ref)
> > -{
> > - struct xe_engine *e = container_of(ref, struct xe_engine, refcount);
> > - struct xe_engine *engine, *next;
> > -
> > - if (!(e->flags & ENGINE_FLAG_BIND_ENGINE_CHILD)) {
> > - list_for_each_entry_safe(engine, next, &e->multi_gt_list,
> > - multi_gt_link)
> > - xe_engine_put(engine);
> > - }
> > -
> > - e->ops->fini(e);
> > -}
> > -
> > -void xe_engine_fini(struct xe_engine *e)
> > -{
> > - int i;
> > -
> > - for (i = 0; i < e->width; ++i)
> > - xe_lrc_finish(e->lrc + i);
> > - if (e->vm)
> > - xe_vm_put(e->vm);
> > - if (e->flags & ENGINE_FLAG_VM)
> > - xe_device_mem_access_put(gt_to_xe(e->gt));
> > -
> > - kfree(e);
> > -}
> > -
> > -struct xe_engine *xe_engine_lookup(struct xe_file *xef, u32 id)
> > -{
> > - struct xe_engine *e;
> > -
> > - mutex_lock(&xef->engine.lock);
> > - e = xa_load(&xef->engine.xa, id);
> > - if (e)
> > - xe_engine_get(e);
> > - mutex_unlock(&xef->engine.lock);
> > -
> > - return e;
> > -}
> > -
> > -enum drm_sched_priority
> > -xe_engine_device_get_max_priority(struct xe_device *xe)
> > -{
> > - return capable(CAP_SYS_NICE) ? DRM_SCHED_PRIORITY_HIGH :
> > - DRM_SCHED_PRIORITY_NORMAL;
> > -}
> > -
> > -static int engine_set_priority(struct xe_device *xe, struct xe_engine *e,
> > - u64 value, bool create)
> > -{
> > - if (XE_IOCTL_DBG(xe, value > DRM_SCHED_PRIORITY_HIGH))
> > - return -EINVAL;
> > -
> > - if (XE_IOCTL_DBG(xe, value > xe_engine_device_get_max_priority(xe)))
> > - return -EPERM;
> > -
> > - return e->ops->set_priority(e, value);
> > -}
> > -
> > -static int engine_set_timeslice(struct xe_device *xe, struct xe_engine *e,
> > - u64 value, bool create)
> > -{
> > - if (!capable(CAP_SYS_NICE))
> > - return -EPERM;
> > -
> > - return e->ops->set_timeslice(e, value);
> > -}
> > -
> > -static int engine_set_preemption_timeout(struct xe_device *xe,
> > - struct xe_engine *e, u64 value,
> > - bool create)
> > -{
> > - if (!capable(CAP_SYS_NICE))
> > - return -EPERM;
> > -
> > - return e->ops->set_preempt_timeout(e, value);
> > -}
> > -
> > -static int engine_set_compute_mode(struct xe_device *xe, struct xe_engine *e,
> > - u64 value, bool create)
> > -{
> > - if (XE_IOCTL_DBG(xe, !create))
> > - return -EINVAL;
> > -
> > - if (XE_IOCTL_DBG(xe, e->flags & ENGINE_FLAG_COMPUTE_MODE))
> > - return -EINVAL;
> > -
> > - if (XE_IOCTL_DBG(xe, e->flags & ENGINE_FLAG_VM))
> > - return -EINVAL;
> > -
> > - if (value) {
> > - struct xe_vm *vm = e->vm;
> > - int err;
> > -
> > - if (XE_IOCTL_DBG(xe, xe_vm_in_fault_mode(vm)))
> > - return -EOPNOTSUPP;
> > -
> > - if (XE_IOCTL_DBG(xe, !xe_vm_in_compute_mode(vm)))
> > - return -EOPNOTSUPP;
> > -
> > - if (XE_IOCTL_DBG(xe, e->width != 1))
> > - return -EINVAL;
> > -
> > - e->compute.context = dma_fence_context_alloc(1);
> > - spin_lock_init(&e->compute.lock);
> > -
> > - err = xe_vm_add_compute_engine(vm, e);
> > - if (XE_IOCTL_DBG(xe, err))
> > - return err;
> > -
> > - e->flags |= ENGINE_FLAG_COMPUTE_MODE;
> > - e->flags &= ~ENGINE_FLAG_PERSISTENT;
> > - }
> > -
> > - return 0;
> > -}
> > -
> > -static int engine_set_persistence(struct xe_device *xe, struct xe_engine *e,
> > - u64 value, bool create)
> > -{
> > - if (XE_IOCTL_DBG(xe, !create))
> > - return -EINVAL;
> > -
> > - if (XE_IOCTL_DBG(xe, e->flags & ENGINE_FLAG_COMPUTE_MODE))
> > - return -EINVAL;
> > -
> > - if (value)
> > - e->flags |= ENGINE_FLAG_PERSISTENT;
> > - else
> > - e->flags &= ~ENGINE_FLAG_PERSISTENT;
> > -
> > - return 0;
> > -}
> > -
> > -static int engine_set_job_timeout(struct xe_device *xe, struct xe_engine *e,
> > - u64 value, bool create)
> > -{
> > - if (XE_IOCTL_DBG(xe, !create))
> > - return -EINVAL;
> > -
> > - if (!capable(CAP_SYS_NICE))
> > - return -EPERM;
> > -
> > - return e->ops->set_job_timeout(e, value);
> > -}
> > -
> > -static int engine_set_acc_trigger(struct xe_device *xe, struct xe_engine *e,
> > - u64 value, bool create)
> > -{
> > - if (XE_IOCTL_DBG(xe, !create))
> > - return -EINVAL;
> > -
> > - if (XE_IOCTL_DBG(xe, !xe->info.supports_usm))
> > - return -EINVAL;
> > -
> > - e->usm.acc_trigger = value;
> > -
> > - return 0;
> > -}
> > -
> > -static int engine_set_acc_notify(struct xe_device *xe, struct xe_engine *e,
> > - u64 value, bool create)
> > -{
> > - if (XE_IOCTL_DBG(xe, !create))
> > - return -EINVAL;
> > -
> > - if (XE_IOCTL_DBG(xe, !xe->info.supports_usm))
> > - return -EINVAL;
> > -
> > - e->usm.acc_notify = value;
> > -
> > - return 0;
> > -}
> > -
> > -static int engine_set_acc_granularity(struct xe_device *xe, struct xe_engine *e,
> > - u64 value, bool create)
> > -{
> > - if (XE_IOCTL_DBG(xe, !create))
> > - return -EINVAL;
> > -
> > - if (XE_IOCTL_DBG(xe, !xe->info.supports_usm))
> > - return -EINVAL;
> > -
> > - e->usm.acc_granularity = value;
> > -
> > - return 0;
> > -}
> > -
> > -typedef int (*xe_engine_set_property_fn)(struct xe_device *xe,
> > - struct xe_engine *e,
> > - u64 value, bool create);
> > -
> > -static const xe_engine_set_property_fn engine_set_property_funcs[] = {
> > - [XE_ENGINE_SET_PROPERTY_PRIORITY] = engine_set_priority,
> > - [XE_ENGINE_SET_PROPERTY_TIMESLICE] = engine_set_timeslice,
> > - [XE_ENGINE_SET_PROPERTY_PREEMPTION_TIMEOUT] = engine_set_preemption_timeout,
> > - [XE_ENGINE_SET_PROPERTY_COMPUTE_MODE] = engine_set_compute_mode,
> > - [XE_ENGINE_SET_PROPERTY_PERSISTENCE] = engine_set_persistence,
> > - [XE_ENGINE_SET_PROPERTY_JOB_TIMEOUT] = engine_set_job_timeout,
> > - [XE_ENGINE_SET_PROPERTY_ACC_TRIGGER] = engine_set_acc_trigger,
> > - [XE_ENGINE_SET_PROPERTY_ACC_NOTIFY] = engine_set_acc_notify,
> > - [XE_ENGINE_SET_PROPERTY_ACC_GRANULARITY] = engine_set_acc_granularity,
> > -};
> > -
> > -static int engine_user_ext_set_property(struct xe_device *xe,
> > - struct xe_engine *e,
> > - u64 extension,
> > - bool create)
> > -{
> > - u64 __user *address = u64_to_user_ptr(extension);
> > - struct drm_xe_ext_engine_set_property ext;
> > - int err;
> > - u32 idx;
> > -
> > - err = __copy_from_user(&ext, address, sizeof(ext));
> > - if (XE_IOCTL_DBG(xe, err))
> > - return -EFAULT;
> > -
> > - if (XE_IOCTL_DBG(xe, ext.property >=
> > - ARRAY_SIZE(engine_set_property_funcs)) ||
> > - XE_IOCTL_DBG(xe, ext.pad))
> > - return -EINVAL;
> > -
> > - idx = array_index_nospec(ext.property, ARRAY_SIZE(engine_set_property_funcs));
> > - return engine_set_property_funcs[idx](xe, e, ext.value, create);
> > -}
> > -
> > -typedef int (*xe_engine_user_extension_fn)(struct xe_device *xe,
> > - struct xe_engine *e,
> > - u64 extension,
> > - bool create);
> > -
> > -static const xe_engine_set_property_fn engine_user_extension_funcs[] = {
> > - [XE_ENGINE_EXTENSION_SET_PROPERTY] = engine_user_ext_set_property,
> > -};
> > -
> > -#define MAX_USER_EXTENSIONS 16
> > -static int engine_user_extensions(struct xe_device *xe, struct xe_engine *e,
> > - u64 extensions, int ext_number, bool create)
> > -{
> > - u64 __user *address = u64_to_user_ptr(extensions);
> > - struct xe_user_extension ext;
> > - int err;
> > - u32 idx;
> > -
> > - if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
> > - return -E2BIG;
> > -
> > - err = __copy_from_user(&ext, address, sizeof(ext));
> > - if (XE_IOCTL_DBG(xe, err))
> > - return -EFAULT;
> > -
> > - if (XE_IOCTL_DBG(xe, ext.pad) ||
> > - XE_IOCTL_DBG(xe, ext.name >=
> > - ARRAY_SIZE(engine_user_extension_funcs)))
> > - return -EINVAL;
> > -
> > - idx = array_index_nospec(ext.name,
> > - ARRAY_SIZE(engine_user_extension_funcs));
> > - err = engine_user_extension_funcs[idx](xe, e, extensions, create);
> > - if (XE_IOCTL_DBG(xe, err))
> > - return err;
> > -
> > - if (ext.next_extension)
> > - return engine_user_extensions(xe, e, ext.next_extension,
> > - ++ext_number, create);
> > -
> > - return 0;
> > -}
> > -
> > -static const enum xe_engine_class user_to_xe_engine_class[] = {
> > - [DRM_XE_ENGINE_CLASS_RENDER] = XE_ENGINE_CLASS_RENDER,
> > - [DRM_XE_ENGINE_CLASS_COPY] = XE_ENGINE_CLASS_COPY,
> > - [DRM_XE_ENGINE_CLASS_VIDEO_DECODE] = XE_ENGINE_CLASS_VIDEO_DECODE,
> > - [DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE] = XE_ENGINE_CLASS_VIDEO_ENHANCE,
> > - [DRM_XE_ENGINE_CLASS_COMPUTE] = XE_ENGINE_CLASS_COMPUTE,
> > -};
> > -
> > -static struct xe_hw_engine *
> > -find_hw_engine(struct xe_device *xe,
> > - struct drm_xe_engine_class_instance eci)
> > -{
> > - u32 idx;
> > -
> > - if (eci.engine_class > ARRAY_SIZE(user_to_xe_engine_class))
> > - return NULL;
> > -
> > - if (eci.gt_id >= xe->info.gt_count)
> > - return NULL;
> > -
> > - idx = array_index_nospec(eci.engine_class,
> > - ARRAY_SIZE(user_to_xe_engine_class));
> > -
> > - return xe_gt_hw_engine(xe_device_get_gt(xe, eci.gt_id),
> > - user_to_xe_engine_class[idx],
> > - eci.engine_instance, true);
> > -}
> > -
> > -static u32 bind_engine_logical_mask(struct xe_device *xe, struct xe_gt *gt,
> > - struct drm_xe_engine_class_instance *eci,
> > - u16 width, u16 num_placements)
> > -{
> > - struct xe_hw_engine *hwe;
> > - enum xe_hw_engine_id id;
> > - u32 logical_mask = 0;
> > -
> > - if (XE_IOCTL_DBG(xe, width != 1))
> > - return 0;
> > - if (XE_IOCTL_DBG(xe, num_placements != 1))
> > - return 0;
> > - if (XE_IOCTL_DBG(xe, eci[0].engine_instance != 0))
> > - return 0;
> > -
> > - eci[0].engine_class = DRM_XE_ENGINE_CLASS_COPY;
> > -
> > - for_each_hw_engine(hwe, gt, id) {
> > - if (xe_hw_engine_is_reserved(hwe))
> > - continue;
> > -
> > - if (hwe->class ==
> > - user_to_xe_engine_class[DRM_XE_ENGINE_CLASS_COPY])
> > - logical_mask |= BIT(hwe->logical_instance);
> > - }
> > -
> > - return logical_mask;
> > -}
> > -
> > -static u32 calc_validate_logical_mask(struct xe_device *xe, struct xe_gt *gt,
> > - struct drm_xe_engine_class_instance *eci,
> > - u16 width, u16 num_placements)
> > -{
> > - int len = width * num_placements;
> > - int i, j, n;
> > - u16 class;
> > - u16 gt_id;
> > - u32 return_mask = 0, prev_mask;
> > -
> > - if (XE_IOCTL_DBG(xe, !xe_device_guc_submission_enabled(xe) &&
> > - len > 1))
> > - return 0;
> > -
> > - for (i = 0; i < width; ++i) {
> > - u32 current_mask = 0;
> > -
> > - for (j = 0; j < num_placements; ++j) {
> > - struct xe_hw_engine *hwe;
> > -
> > - n = j * width + i;
> > -
> > - hwe = find_hw_engine(xe, eci[n]);
> > - if (XE_IOCTL_DBG(xe, !hwe))
> > - return 0;
> > -
> > - if (XE_IOCTL_DBG(xe, xe_hw_engine_is_reserved(hwe)))
> > - return 0;
> > -
> > - if (XE_IOCTL_DBG(xe, n && eci[n].gt_id != gt_id) ||
> > - XE_IOCTL_DBG(xe, n && eci[n].engine_class != class))
> > - return 0;
> > -
> > - class = eci[n].engine_class;
> > - gt_id = eci[n].gt_id;
> > -
> > - if (width == 1 || !i)
> > - return_mask |= BIT(eci[n].engine_instance);
> > - current_mask |= BIT(eci[n].engine_instance);
> > - }
> > -
> > - /* Parallel submissions must be logically contiguous */
> > - if (i && XE_IOCTL_DBG(xe, current_mask != prev_mask << 1))
> > - return 0;
> > -
> > - prev_mask = current_mask;
> > - }
> > -
> > - return return_mask;
> > -}
> > -
> > -int xe_engine_create_ioctl(struct drm_device *dev, void *data,
> > - struct drm_file *file)
> > -{
> > - struct xe_device *xe = to_xe_device(dev);
> > - struct xe_file *xef = to_xe_file(file);
> > - struct drm_xe_engine_create *args = data;
> > - struct drm_xe_engine_class_instance eci[XE_HW_ENGINE_MAX_INSTANCE];
> > - struct drm_xe_engine_class_instance __user *user_eci =
> > - u64_to_user_ptr(args->instances);
> > - struct xe_hw_engine *hwe;
> > - struct xe_vm *vm, *migrate_vm;
> > - struct xe_gt *gt;
> > - struct xe_engine *e = NULL;
> > - u32 logical_mask;
> > - u32 id;
> > - u32 len;
> > - int err;
> > -
> > - if (XE_IOCTL_DBG(xe, args->flags) ||
> > - XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
> > - return -EINVAL;
> > -
> > - len = args->width * args->num_placements;
> > - if (XE_IOCTL_DBG(xe, !len || len > XE_HW_ENGINE_MAX_INSTANCE))
> > - return -EINVAL;
> > -
> > - err = __copy_from_user(eci, user_eci,
> > - sizeof(struct drm_xe_engine_class_instance) *
> > - len);
> > - if (XE_IOCTL_DBG(xe, err))
> > - return -EFAULT;
> > -
> > - if (XE_IOCTL_DBG(xe, eci[0].gt_id >= xe->info.gt_count))
> > - return -EINVAL;
> > -
> > - if (eci[0].engine_class == DRM_XE_ENGINE_CLASS_VM_BIND) {
> > - for_each_gt(gt, xe, id) {
> > - struct xe_engine *new;
> > -
> > - if (xe_gt_is_media_type(gt))
> > - continue;
> > -
> > - eci[0].gt_id = gt->info.id;
> > - logical_mask = bind_engine_logical_mask(xe, gt, eci,
> > - args->width,
> > - args->num_placements);
> > - if (XE_IOCTL_DBG(xe, !logical_mask))
> > - return -EINVAL;
> > -
> > - hwe = find_hw_engine(xe, eci[0]);
> > - if (XE_IOCTL_DBG(xe, !hwe))
> > - return -EINVAL;
> > -
> > - /* The migration vm doesn't hold rpm ref */
> > - xe_device_mem_access_get(xe);
> > -
> > - migrate_vm = xe_migrate_get_vm(gt_to_tile(gt)->migrate);
> > - new = xe_engine_create(xe, migrate_vm, logical_mask,
> > - args->width, hwe,
> > - ENGINE_FLAG_PERSISTENT |
> > - ENGINE_FLAG_VM |
> > - (id ?
> > - ENGINE_FLAG_BIND_ENGINE_CHILD :
> > - 0));
> > -
> > - xe_device_mem_access_put(xe); /* now held by engine */
> > -
> > - xe_vm_put(migrate_vm);
> > - if (IS_ERR(new)) {
> > - err = PTR_ERR(new);
> > - if (e)
> > - goto put_engine;
> > - return err;
> > - }
> > - if (id == 0)
> > - e = new;
> > - else
> > - list_add_tail(&new->multi_gt_list,
> > - &e->multi_gt_link);
> > - }
> > - } else {
> > - gt = xe_device_get_gt(xe, eci[0].gt_id);
> > - logical_mask = calc_validate_logical_mask(xe, gt, eci,
> > - args->width,
> > - args->num_placements);
> > - if (XE_IOCTL_DBG(xe, !logical_mask))
> > - return -EINVAL;
> > -
> > - hwe = find_hw_engine(xe, eci[0]);
> > - if (XE_IOCTL_DBG(xe, !hwe))
> > - return -EINVAL;
> > -
> > - vm = xe_vm_lookup(xef, args->vm_id);
> > - if (XE_IOCTL_DBG(xe, !vm))
> > - return -ENOENT;
> > -
> > - err = down_read_interruptible(&vm->lock);
> > - if (err) {
> > - xe_vm_put(vm);
> > - return err;
> > - }
> > -
> > - if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
> > - up_read(&vm->lock);
> > - xe_vm_put(vm);
> > - return -ENOENT;
> > - }
> > -
> > - e = xe_engine_create(xe, vm, logical_mask,
> > - args->width, hwe,
> > - xe_vm_no_dma_fences(vm) ? 0 :
> > - ENGINE_FLAG_PERSISTENT);
> > - up_read(&vm->lock);
> > - xe_vm_put(vm);
> > - if (IS_ERR(e))
> > - return PTR_ERR(e);
> > - }
> > -
> > - if (args->extensions) {
> > - err = engine_user_extensions(xe, e, args->extensions, 0, true);
> > - if (XE_IOCTL_DBG(xe, err))
> > - goto put_engine;
> > - }
> > -
> > - if (XE_IOCTL_DBG(xe, e->vm && xe_vm_in_compute_mode(e->vm) !=
> > - !!(e->flags & ENGINE_FLAG_COMPUTE_MODE))) {
> > - err = -EOPNOTSUPP;
> > - goto put_engine;
> > - }
> > -
> > - e->persistent.xef = xef;
> > -
> > - mutex_lock(&xef->engine.lock);
> > - err = xa_alloc(&xef->engine.xa, &id, e, xa_limit_32b, GFP_KERNEL);
> > - mutex_unlock(&xef->engine.lock);
> > - if (err)
> > - goto put_engine;
> > -
> > - args->engine_id = id;
> > -
> > - return 0;
> > -
> > -put_engine:
> > - xe_engine_kill(e);
> > - xe_engine_put(e);
> > - return err;
> > -}
> > -
> > -int xe_engine_get_property_ioctl(struct drm_device *dev, void *data,
> > - struct drm_file *file)
> > -{
> > - struct xe_device *xe = to_xe_device(dev);
> > - struct xe_file *xef = to_xe_file(file);
> > - struct drm_xe_engine_get_property *args = data;
> > - struct xe_engine *e;
> > - int ret;
> > -
> > - if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
> > - return -EINVAL;
> > -
> > - e = xe_engine_lookup(xef, args->engine_id);
> > - if (XE_IOCTL_DBG(xe, !e))
> > - return -ENOENT;
> > -
> > - switch (args->property) {
> > - case XE_ENGINE_GET_PROPERTY_BAN:
> > - args->value = !!(e->flags & ENGINE_FLAG_BANNED);
> > - ret = 0;
> > - break;
> > - default:
> > - ret = -EINVAL;
> > - }
> > -
> > - xe_engine_put(e);
> > -
> > - return ret;
> > -}
> > -
> > -static void engine_kill_compute(struct xe_engine *e)
> > -{
> > - if (!xe_vm_in_compute_mode(e->vm))
> > - return;
> > -
> > - down_write(&e->vm->lock);
> > - list_del(&e->compute.link);
> > - --e->vm->preempt.num_engines;
> > - if (e->compute.pfence) {
> > - dma_fence_enable_sw_signaling(e->compute.pfence);
> > - dma_fence_put(e->compute.pfence);
> > - e->compute.pfence = NULL;
> > - }
> > - up_write(&e->vm->lock);
> > -}
> > -
> > -/**
> > - * xe_engine_is_lr() - Whether an engine is long-running
> > - * @e: The engine
> > - *
> > - * Return: True if the engine is long-running, false otherwise.
> > - */
> > -bool xe_engine_is_lr(struct xe_engine *e)
> > -{
> > - return e->vm && xe_vm_no_dma_fences(e->vm) &&
> > - !(e->flags & ENGINE_FLAG_VM);
> > -}
> > -
> > -static s32 xe_engine_num_job_inflight(struct xe_engine *e)
> > -{
> > - return e->lrc->fence_ctx.next_seqno - xe_lrc_seqno(e->lrc) - 1;
> > -}
> > -
> > -/**
> > - * xe_engine_ring_full() - Whether an engine's ring is full
> > - * @e: The engine
> > - *
> > - * Return: True if the engine's ring is full, false otherwise.
> > - */
> > -bool xe_engine_ring_full(struct xe_engine *e)
> > -{
> > - struct xe_lrc *lrc = e->lrc;
> > - s32 max_job = lrc->ring.size / MAX_JOB_SIZE_BYTES;
> > -
> > - return xe_engine_num_job_inflight(e) >= max_job;
> > -}
> > -
> > -/**
> > - * xe_engine_is_idle() - Whether an engine is idle.
> > - * @engine: The engine
> > - *
> > - * FIXME: Need to determine what to use as the short-lived
> > - * timeline lock for the engines, so that the return value
> > - * of this function becomes more than just an advisory
> > - * snapshot in time. The timeline lock must protect the
> > - * seqno from racing submissions on the same engine.
> > - * Typically vm->resv, but user-created timeline locks use the migrate vm
> > - * and never grabs the migrate vm->resv so we have a race there.
> > - *
> > - * Return: True if the engine is idle, false otherwise.
> > - */
> > -bool xe_engine_is_idle(struct xe_engine *engine)
> > -{
> > - if (XE_WARN_ON(xe_engine_is_parallel(engine)))
> > - return false;
> > -
> > - return xe_lrc_seqno(&engine->lrc[0]) ==
> > - engine->lrc[0].fence_ctx.next_seqno - 1;
> > -}
> > -
> > -void xe_engine_kill(struct xe_engine *e)
> > -{
> > - struct xe_engine *engine = e, *next;
> > -
> > - list_for_each_entry_safe(engine, next, &engine->multi_gt_list,
> > - multi_gt_link) {
> > - e->ops->kill(engine);
> > - engine_kill_compute(engine);
> > - }
> > -
> > - e->ops->kill(e);
> > - engine_kill_compute(e);
> > -}
> > -
> > -int xe_engine_destroy_ioctl(struct drm_device *dev, void *data,
> > - struct drm_file *file)
> > -{
> > - struct xe_device *xe = to_xe_device(dev);
> > - struct xe_file *xef = to_xe_file(file);
> > - struct drm_xe_engine_destroy *args = data;
> > - struct xe_engine *e;
> > -
> > - if (XE_IOCTL_DBG(xe, args->pad) ||
> > - XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
> > - return -EINVAL;
> > -
> > - mutex_lock(&xef->engine.lock);
> > - e = xa_erase(&xef->engine.xa, args->engine_id);
> > - mutex_unlock(&xef->engine.lock);
> > - if (XE_IOCTL_DBG(xe, !e))
> > - return -ENOENT;
> > -
> > - if (!(e->flags & ENGINE_FLAG_PERSISTENT))
> > - xe_engine_kill(e);
> > - else
> > - xe_device_add_persistent_engines(xe, e);
> > -
> > - trace_xe_engine_close(e);
> > - xe_engine_put(e);
> > -
> > - return 0;
> > -}
> > -
> > -int xe_engine_set_property_ioctl(struct drm_device *dev, void *data,
> > - struct drm_file *file)
> > -{
> > - struct xe_device *xe = to_xe_device(dev);
> > - struct xe_file *xef = to_xe_file(file);
> > - struct drm_xe_engine_set_property *args = data;
> > - struct xe_engine *e;
> > - int ret;
> > - u32 idx;
> > -
> > - if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
> > - return -EINVAL;
> > -
> > - e = xe_engine_lookup(xef, args->engine_id);
> > - if (XE_IOCTL_DBG(xe, !e))
> > - return -ENOENT;
> > -
> > - if (XE_IOCTL_DBG(xe, args->property >=
> > - ARRAY_SIZE(engine_set_property_funcs))) {
> > - ret = -EINVAL;
> > - goto out;
> > - }
> > -
> > - idx = array_index_nospec(args->property,
> > - ARRAY_SIZE(engine_set_property_funcs));
> > - ret = engine_set_property_funcs[idx](xe, e, args->value, false);
> > - if (XE_IOCTL_DBG(xe, ret))
> > - goto out;
> > -
> > - if (args->extensions)
> > - ret = engine_user_extensions(xe, e, args->extensions, 0,
> > - false);
> > -out:
> > - xe_engine_put(e);
> > -
> > - return ret;
> > -}
> > diff --git a/drivers/gpu/drm/xe/xe_engine.h b/drivers/gpu/drm/xe/xe_engine.h
> > deleted file mode 100644
> > index 2e60f6d90226..000000000000
> > --- a/drivers/gpu/drm/xe/xe_engine.h
> > +++ /dev/null
> > @@ -1,63 +0,0 @@
> > -/* SPDX-License-Identifier: MIT */
> > -/*
> > - * Copyright © 2021 Intel Corporation
> > - */
> > -
> > -#ifndef _XE_ENGINE_H_
> > -#define _XE_ENGINE_H_
> > -
> > -#include "xe_engine_types.h"
> > -#include "xe_vm_types.h"
> > -
> > -struct drm_device;
> > -struct drm_file;
> > -struct xe_device;
> > -struct xe_file;
> > -
> > -struct xe_engine *xe_engine_create(struct xe_device *xe, struct xe_vm *vm,
> > - u32 logical_mask, u16 width,
> > - struct xe_hw_engine *hw_engine, u32 flags);
> > -struct xe_engine *xe_engine_create_class(struct xe_device *xe, struct xe_gt *gt,
> > - struct xe_vm *vm,
> > - enum xe_engine_class class, u32 flags);
> > -
> > -void xe_engine_fini(struct xe_engine *e);
> > -void xe_engine_destroy(struct kref *ref);
> > -
> > -struct xe_engine *xe_engine_lookup(struct xe_file *xef, u32 id);
> > -
> > -static inline struct xe_engine *xe_engine_get(struct xe_engine *engine)
> > -{
> > - kref_get(&engine->refcount);
> > - return engine;
> > -}
> > -
> > -static inline void xe_engine_put(struct xe_engine *engine)
> > -{
> > - kref_put(&engine->refcount, xe_engine_destroy);
> > -}
> > -
> > -static inline bool xe_engine_is_parallel(struct xe_engine *engine)
> > -{
> > - return engine->width > 1;
> > -}
> > -
> > -bool xe_engine_is_lr(struct xe_engine *e);
> > -
> > -bool xe_engine_ring_full(struct xe_engine *e);
> > -
> > -bool xe_engine_is_idle(struct xe_engine *engine);
> > -
> > -void xe_engine_kill(struct xe_engine *e);
> > -
> > -int xe_engine_create_ioctl(struct drm_device *dev, void *data,
> > - struct drm_file *file);
> > -int xe_engine_destroy_ioctl(struct drm_device *dev, void *data,
> > - struct drm_file *file);
> > -int xe_engine_set_property_ioctl(struct drm_device *dev, void *data,
> > - struct drm_file *file);
> > -int xe_engine_get_property_ioctl(struct drm_device *dev, void *data,
> > - struct drm_file *file);
> > -enum drm_sched_priority xe_engine_device_get_max_priority(struct xe_device *xe);
> > -
> > -#endif
> > diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
> > index fff4a9d9d12a..8a5b614df090 100644
> > --- a/drivers/gpu/drm/xe/xe_exec.c
> > +++ b/drivers/gpu/drm/xe/xe_exec.c
> > @@ -12,7 +12,7 @@
> >
> > #include "xe_bo.h"
> > #include "xe_device.h"
> > -#include "xe_engine.h"
> > +#include "xe_exec_queue.h"
> > #include "xe_macros.h"
> > #include "xe_ring_ops_types.h"
> > #include "xe_sched_job.h"
> > @@ -95,19 +95,19 @@
> >
> > #define XE_EXEC_BIND_RETRY_TIMEOUT_MS 1000
> >
> > -static int xe_exec_begin(struct xe_engine *e, struct ww_acquire_ctx *ww,
> > +static int xe_exec_begin(struct xe_exec_queue *q, struct ww_acquire_ctx *ww,
> > struct ttm_validate_buffer tv_onstack[],
> > struct ttm_validate_buffer **tv,
> > struct list_head *objs)
> > {
> > - struct xe_vm *vm = e->vm;
> > + struct xe_vm *vm = q->vm;
> > struct xe_vma *vma;
> > LIST_HEAD(dups);
> > ktime_t end = 0;
> > int err = 0;
> >
> > *tv = NULL;
> > - if (xe_vm_no_dma_fences(e->vm))
> > + if (xe_vm_no_dma_fences(q->vm))
> > return 0;
> >
> > retry:
> > @@ -153,14 +153,14 @@ static int xe_exec_begin(struct xe_engine *e, struct ww_acquire_ctx *ww,
> > return err;
> > }
> >
> > -static void xe_exec_end(struct xe_engine *e,
> > +static void xe_exec_end(struct xe_exec_queue *q,
> > struct ttm_validate_buffer *tv_onstack,
> > struct ttm_validate_buffer *tv,
> > struct ww_acquire_ctx *ww,
> > struct list_head *objs)
> > {
> > - if (!xe_vm_no_dma_fences(e->vm))
> > - xe_vm_unlock_dma_resv(e->vm, tv_onstack, tv, ww, objs);
> > + if (!xe_vm_no_dma_fences(q->vm))
> > + xe_vm_unlock_dma_resv(q->vm, tv_onstack, tv, ww, objs);
> > }
> >
> > int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
> > @@ -170,7 +170,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
> > struct drm_xe_exec *args = data;
> > struct drm_xe_sync __user *syncs_user = u64_to_user_ptr(args->syncs);
> > u64 __user *addresses_user = u64_to_user_ptr(args->address);
> > - struct xe_engine *engine;
> > + struct xe_exec_queue *q;
> > struct xe_sync_entry *syncs = NULL;
> > u64 addresses[XE_HW_ENGINE_MAX_INSTANCE];
> > struct ttm_validate_buffer tv_onstack[XE_ONSTACK_TV];
> > @@ -189,30 +189,30 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
> > XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
> > return -EINVAL;
> >
> > - engine = xe_engine_lookup(xef, args->engine_id);
> > - if (XE_IOCTL_DBG(xe, !engine))
> > + q = xe_exec_queue_lookup(xef, args->exec_queue_id);
> > + if (XE_IOCTL_DBG(xe, !q))
> > return -ENOENT;
> >
> > - if (XE_IOCTL_DBG(xe, engine->flags & ENGINE_FLAG_VM))
> > + if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_VM))
> > return -EINVAL;
> >
> > - if (XE_IOCTL_DBG(xe, engine->width != args->num_batch_buffer))
> > + if (XE_IOCTL_DBG(xe, q->width != args->num_batch_buffer))
> > return -EINVAL;
> >
> > - if (XE_IOCTL_DBG(xe, engine->flags & ENGINE_FLAG_BANNED)) {
> > + if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_BANNED)) {
> > err = -ECANCELED;
> > - goto err_engine;
> > + goto err_exec_queue;
> > }
> >
> > if (args->num_syncs) {
> > syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
> > if (!syncs) {
> > err = -ENOMEM;
> > - goto err_engine;
> > + goto err_exec_queue;
> > }
> > }
> >
> > - vm = engine->vm;
> > + vm = q->vm;
> >
> > for (i = 0; i < args->num_syncs; i++) {
> > err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs++],
> > @@ -222,9 +222,9 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
> > goto err_syncs;
> > }
> >
> > - if (xe_engine_is_parallel(engine)) {
> > + if (xe_exec_queue_is_parallel(q)) {
> > err = __copy_from_user(addresses, addresses_user, sizeof(u64) *
> > - engine->width);
> > + q->width);
> > if (err) {
> > err = -EFAULT;
> > goto err_syncs;
> > @@ -294,26 +294,26 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
> > goto err_unlock_list;
> > }
> >
> > - err = xe_exec_begin(engine, &ww, tv_onstack, &tv, &objs);
> > + err = xe_exec_begin(q, &ww, tv_onstack, &tv, &objs);
> > if (err)
> > goto err_unlock_list;
> >
> > - if (xe_vm_is_closed_or_banned(engine->vm)) {
> > + if (xe_vm_is_closed_or_banned(q->vm)) {
> > drm_warn(&xe->drm, "Trying to schedule after vm is closed or banned\n");
> > err = -ECANCELED;
> > - goto err_engine_end;
> > + goto err_exec_queue_end;
> > }
> >
> > - if (xe_engine_is_lr(engine) && xe_engine_ring_full(engine)) {
> > + if (xe_exec_queue_is_lr(q) && xe_exec_queue_ring_full(q)) {
> > err = -EWOULDBLOCK;
> > - goto err_engine_end;
> > + goto err_exec_queue_end;
> > }
> >
> > - job = xe_sched_job_create(engine, xe_engine_is_parallel(engine) ?
> > + job = xe_sched_job_create(q, xe_exec_queue_is_parallel(q) ?
> > addresses : &args->address);
> > if (IS_ERR(job)) {
> > err = PTR_ERR(job);
> > - goto err_engine_end;
> > + goto err_exec_queue_end;
> > }
> >
> > /*
> > @@ -395,8 +395,8 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
> > xe_sync_entry_signal(&syncs[i], job,
> > &job->drm.s_fence->finished);
> >
> > - if (xe_engine_is_lr(engine))
> > - engine->ring_ops->emit_job(job);
> > + if (xe_exec_queue_is_lr(q))
> > + q->ring_ops->emit_job(job);
> > xe_sched_job_push(job);
> > xe_vm_reactivate_rebind(vm);
> >
> > @@ -412,8 +412,8 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
> > err_put_job:
> > if (err)
> > xe_sched_job_put(job);
> > -err_engine_end:
> > - xe_exec_end(engine, tv_onstack, tv, &ww, &objs);
> > +err_exec_queue_end:
> > + xe_exec_end(q, tv_onstack, tv, &ww, &objs);
> > err_unlock_list:
> > if (write_locked)
> > up_write(&vm->lock);
> > @@ -425,8 +425,8 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
> > for (i = 0; i < num_syncs; i++)
> > xe_sync_entry_cleanup(&syncs[i]);
> > kfree(syncs);
> > -err_engine:
> > - xe_engine_put(engine);
> > +err_exec_queue:
> > + xe_exec_queue_put(q);
> >
> > return err;
> > }
> > diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
> > new file mode 100644
> > index 000000000000..49cff0dd2bc7
> > --- /dev/null
> > +++ b/drivers/gpu/drm/xe/xe_exec_queue.c
> > @@ -0,0 +1,850 @@
> > +// SPDX-License-Identifier: MIT
> > +/*
> > + * Copyright © 2021 Intel Corporation
> > + */
> > +
> > +#include "xe_exec_queue.h"
> > +
> > +#include <linux/nospec.h>
> > +
> > +#include <drm/drm_device.h>
> > +#include <drm/drm_file.h>
> > +#include <drm/xe_drm.h>
> > +
> > +#include "xe_device.h"
> > +#include "xe_gt.h"
> > +#include "xe_hw_fence.h"
> > +#include "xe_lrc.h"
> > +#include "xe_macros.h"
> > +#include "xe_migrate.h"
> > +#include "xe_pm.h"
> > +#include "xe_ring_ops_types.h"
> > +#include "xe_trace.h"
> > +#include "xe_vm.h"
> > +
> > +static struct xe_exec_queue *__xe_exec_queue_create(struct xe_device *xe,
> > + struct xe_vm *vm,
> > + u32 logical_mask,
> > + u16 width, struct xe_hw_engine *hwe,
> > + u32 flags)
> > +{
> > + struct xe_exec_queue *q;
> > + struct xe_gt *gt = hwe->gt;
> > + int err;
> > + int i;
> > +
> > + q = kzalloc(sizeof(*q) + sizeof(struct xe_lrc) * width, GFP_KERNEL);
> > + if (!q)
> > + return ERR_PTR(-ENOMEM);
> > +
> > + kref_init(&q->refcount);
> > + q->flags = flags;
> > + q->hwe = hwe;
> > + q->gt = gt;
> > + if (vm)
> > + q->vm = xe_vm_get(vm);
> > + q->class = hwe->class;
> > + q->width = width;
> > + q->logical_mask = logical_mask;
> > + q->fence_irq = >->fence_irq[hwe->class];
> > + q->ring_ops = gt->ring_ops[hwe->class];
> > + q->ops = gt->exec_queue_ops;
> > + INIT_LIST_HEAD(&q->persistent.link);
> > + INIT_LIST_HEAD(&q->compute.link);
> > + INIT_LIST_HEAD(&q->multi_gt_link);
> > +
> > + /* FIXME: Wire up to configurable default value */
> > + q->sched_props.timeslice_us = 1 * 1000;
> > + q->sched_props.preempt_timeout_us = 640 * 1000;
> > +
> > + if (xe_exec_queue_is_parallel(q)) {
> > + q->parallel.composite_fence_ctx = dma_fence_context_alloc(1);
> > + q->parallel.composite_fence_seqno = XE_FENCE_INITIAL_SEQNO;
> > + }
> > + if (q->flags & EXEC_QUEUE_FLAG_VM) {
> > + q->bind.fence_ctx = dma_fence_context_alloc(1);
> > + q->bind.fence_seqno = XE_FENCE_INITIAL_SEQNO;
> > + }
> > +
> > + for (i = 0; i < width; ++i) {
> > + err = xe_lrc_init(q->lrc + i, hwe, q, vm, SZ_16K);
> > + if (err)
> > + goto err_lrc;
> > + }
> > +
> > + err = q->ops->init(q);
> > + if (err)
> > + goto err_lrc;
> > +
> > + /*
> > + * Normally the user vm holds an rpm ref to keep the device
> > + * awake, and the context holds a ref for the vm, however for
> > + * some engines we use the kernels migrate vm underneath which
> > + * offers no such rpm ref. Make sure we keep a ref here, so we
> > + * can perform GuC CT actions when needed. Caller is expected to
> > + * have already grabbed the rpm ref outside any sensitive locks.
> > + */
> > + if (q->flags & EXEC_QUEUE_FLAG_VM)
> > + drm_WARN_ON(&xe->drm, !xe_device_mem_access_get_if_ongoing(xe));
> > +
> > + return q;
> > +
> > +err_lrc:
> > + for (i = i - 1; i >= 0; --i)
> > + xe_lrc_finish(q->lrc + i);
> > + kfree(q);
> > + return ERR_PTR(err);
> > +}
> > +
> > +struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
> > + u32 logical_mask, u16 width,
> > + struct xe_hw_engine *hwe, u32 flags)
> > +{
> > + struct ww_acquire_ctx ww;
> > + struct xe_exec_queue *q;
> > + int err;
> > +
> > + if (vm) {
> > + err = xe_vm_lock(vm, &ww, 0, true);
> > + if (err)
> > + return ERR_PTR(err);
> > + }
> > + q = __xe_exec_queue_create(xe, vm, logical_mask, width, hwe, flags);
> > + if (vm)
> > + xe_vm_unlock(vm, &ww);
> > +
> > + return q;
> > +}
> > +
> > +struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
> > + struct xe_vm *vm,
> > + enum xe_engine_class class, u32 flags)
> > +{
> > + struct xe_hw_engine *hwe, *hwe0 = NULL;
> > + enum xe_hw_engine_id id;
> > + u32 logical_mask = 0;
> > +
> > + for_each_hw_engine(hwe, gt, id) {
> > + if (xe_hw_engine_is_reserved(hwe))
> > + continue;
> > +
> > + if (hwe->class == class) {
> > + logical_mask |= BIT(hwe->logical_instance);
> > + if (!hwe0)
> > + hwe0 = hwe;
> > + }
> > + }
> > +
> > + if (!logical_mask)
> > + return ERR_PTR(-ENODEV);
> > +
> > + return xe_exec_queue_create(xe, vm, logical_mask, 1, hwe0, flags);
> > +}
> > +
> > +void xe_exec_queue_destroy(struct kref *ref)
> > +{
> > + struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount);
> > + struct xe_exec_queue *eq, *next;
> > +
> > + if (!(q->flags & EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD)) {
> > + list_for_each_entry_safe(eq, next, &q->multi_gt_list,
> > + multi_gt_link)
> > + xe_exec_queue_put(eq);
> > + }
> > +
> > + q->ops->fini(q);
> > +}
> > +
> > +void xe_exec_queue_fini(struct xe_exec_queue *q)
> > +{
> > + int i;
> > +
> > + for (i = 0; i < q->width; ++i)
> > + xe_lrc_finish(q->lrc + i);
> > + if (q->vm)
> > + xe_vm_put(q->vm);
> > + if (q->flags & EXEC_QUEUE_FLAG_VM)
> > + xe_device_mem_access_put(gt_to_xe(q->gt));
> > +
> > + kfree(q);
> > +}
> > +
> > +struct xe_exec_queue *xe_exec_queue_lookup(struct xe_file *xef, u32 id)
> > +{
> > + struct xe_exec_queue *q;
> > +
> > + mutex_lock(&xef->exec_queue.lock);
> > + q = xa_load(&xef->exec_queue.xa, id);
> > + if (q)
> > + xe_exec_queue_get(q);
> > + mutex_unlock(&xef->exec_queue.lock);
> > +
> > + return q;
> > +}
> > +
> > +enum drm_sched_priority
> > +xe_exec_queue_device_get_max_priority(struct xe_device *xe)
> > +{
> > + return capable(CAP_SYS_NICE) ? DRM_SCHED_PRIORITY_HIGH :
> > + DRM_SCHED_PRIORITY_NORMAL;
> > +}
> > +
> > +static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q,
> > + u64 value, bool create)
> > +{
> > + if (XE_IOCTL_DBG(xe, value > DRM_SCHED_PRIORITY_HIGH))
> > + return -EINVAL;
> > +
> > + if (XE_IOCTL_DBG(xe, value > xe_exec_queue_device_get_max_priority(xe)))
> > + return -EPERM;
> > +
> > + return q->ops->set_priority(q, value);
> > +}
> > +
> > +static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *q,
> > + u64 value, bool create)
> > +{
> > + if (!capable(CAP_SYS_NICE))
> > + return -EPERM;
> > +
> > + return q->ops->set_timeslice(q, value);
> > +}
> > +
> > +static int exec_queue_set_preemption_timeout(struct xe_device *xe,
> > + struct xe_exec_queue *q, u64 value,
> > + bool create)
> > +{
> > + if (!capable(CAP_SYS_NICE))
> > + return -EPERM;
> > +
> > + return q->ops->set_preempt_timeout(q, value);
> > +}
> > +
> > +static int exec_queue_set_compute_mode(struct xe_device *xe, struct xe_exec_queue *q,
> > + u64 value, bool create)
> > +{
> > + if (XE_IOCTL_DBG(xe, !create))
> > + return -EINVAL;
> > +
> > + if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_COMPUTE_MODE))
> > + return -EINVAL;
> > +
> > + if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_VM))
> > + return -EINVAL;
> > +
> > + if (value) {
> > + struct xe_vm *vm = q->vm;
> > + int err;
> > +
> > + if (XE_IOCTL_DBG(xe, xe_vm_in_fault_mode(vm)))
> > + return -EOPNOTSUPP;
> > +
> > + if (XE_IOCTL_DBG(xe, !xe_vm_in_compute_mode(vm)))
> > + return -EOPNOTSUPP;
> > +
> > + if (XE_IOCTL_DBG(xe, q->width != 1))
> > + return -EINVAL;
> > +
> > + q->compute.context = dma_fence_context_alloc(1);
> > + spin_lock_init(&q->compute.lock);
> > +
> > + err = xe_vm_add_compute_exec_queue(vm, q);
> > + if (XE_IOCTL_DBG(xe, err))
> > + return err;
> > +
> > + q->flags |= EXEC_QUEUE_FLAG_COMPUTE_MODE;
> > + q->flags &= ~EXEC_QUEUE_FLAG_PERSISTENT;
> > + }
> > +
> > + return 0;
> > +}
> > +
> > +static int exec_queue_set_persistence(struct xe_device *xe, struct xe_exec_queue *q,
> > + u64 value, bool create)
> > +{
> > + if (XE_IOCTL_DBG(xe, !create))
> > + return -EINVAL;
> > +
> > + if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_COMPUTE_MODE))
> > + return -EINVAL;
> > +
> > + if (value)
> > + q->flags |= EXEC_QUEUE_FLAG_PERSISTENT;
> > + else
> > + q->flags &= ~EXEC_QUEUE_FLAG_PERSISTENT;
> > +
> > + return 0;
> > +}
> > +
> > +static int exec_queue_set_job_timeout(struct xe_device *xe, struct xe_exec_queue *q,
> > + u64 value, bool create)
> > +{
> > + if (XE_IOCTL_DBG(xe, !create))
> > + return -EINVAL;
> > +
> > + if (!capable(CAP_SYS_NICE))
> > + return -EPERM;
> > +
> > + return q->ops->set_job_timeout(q, value);
> > +}
> > +
> > +static int exec_queue_set_acc_trigger(struct xe_device *xe, struct xe_exec_queue *q,
> > + u64 value, bool create)
> > +{
> > + if (XE_IOCTL_DBG(xe, !create))
> > + return -EINVAL;
> > +
> > + if (XE_IOCTL_DBG(xe, !xe->info.supports_usm))
> > + return -EINVAL;
> > +
> > + q->usm.acc_trigger = value;
> > +
> > + return 0;
> > +}
> > +
> > +static int exec_queue_set_acc_notify(struct xe_device *xe, struct xe_exec_queue *q,
> > + u64 value, bool create)
> > +{
> > + if (XE_IOCTL_DBG(xe, !create))
> > + return -EINVAL;
> > +
> > + if (XE_IOCTL_DBG(xe, !xe->info.supports_usm))
> > + return -EINVAL;
> > +
> > + q->usm.acc_notify = value;
> > +
> > + return 0;
> > +}
> > +
> > +static int exec_queue_set_acc_granularity(struct xe_device *xe, struct xe_exec_queue *q,
> > + u64 value, bool create)
> > +{
> > + if (XE_IOCTL_DBG(xe, !create))
> > + return -EINVAL;
> > +
> > + if (XE_IOCTL_DBG(xe, !xe->info.supports_usm))
> > + return -EINVAL;
> > +
> > + q->usm.acc_granularity = value;
> > +
> > + return 0;
> > +}
> > +
> > +typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
> > + struct xe_exec_queue *q,
> > + u64 value, bool create);
> > +
> > +static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = {
> > + [XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
> > + [XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice,
> > + [XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT] = exec_queue_set_preemption_timeout,
> > + [XE_EXEC_QUEUE_SET_PROPERTY_COMPUTE_MODE] = exec_queue_set_compute_mode,
> > + [XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE] = exec_queue_set_persistence,
> > + [XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT] = exec_queue_set_job_timeout,
> > + [XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER] = exec_queue_set_acc_trigger,
> > + [XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY] = exec_queue_set_acc_notify,
> > + [XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY] = exec_queue_set_acc_granularity,
> > +};
> > +
> > +static int exec_queue_user_ext_set_property(struct xe_device *xe,
> > + struct xe_exec_queue *q,
> > + u64 extension,
> > + bool create)
> > +{
> > + u64 __user *address = u64_to_user_ptr(extension);
> > + struct drm_xe_ext_exec_queue_set_property ext;
> > + int err;
> > + u32 idx;
> > +
> > + err = __copy_from_user(&ext, address, sizeof(ext));
> > + if (XE_IOCTL_DBG(xe, err))
> > + return -EFAULT;
> > +
> > + if (XE_IOCTL_DBG(xe, ext.property >=
> > + ARRAY_SIZE(exec_queue_set_property_funcs)) ||
> > + XE_IOCTL_DBG(xe, ext.pad))
> > + return -EINVAL;
> > +
> > + idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs));
> > + return exec_queue_set_property_funcs[idx](xe, q, ext.value, create);
> > +}
> > +
> > +typedef int (*xe_exec_queue_user_extension_fn)(struct xe_device *xe,
> > + struct xe_exec_queue *q,
> > + u64 extension,
> > + bool create);
> > +
> > +static const xe_exec_queue_set_property_fn exec_queue_user_extension_funcs[] = {
> > + [XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY] = exec_queue_user_ext_set_property,
> > +};
> > +
> > +#define MAX_USER_EXTENSIONS 16
> > +static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
> > + u64 extensions, int ext_number, bool create)
> > +{
> > + u64 __user *address = u64_to_user_ptr(extensions);
> > + struct xe_user_extension ext;
> > + int err;
> > + u32 idx;
> > +
> > + if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
> > + return -E2BIG;
> > +
> > + err = __copy_from_user(&ext, address, sizeof(ext));
> > + if (XE_IOCTL_DBG(xe, err))
> > + return -EFAULT;
> > +
> > + if (XE_IOCTL_DBG(xe, ext.pad) ||
> > + XE_IOCTL_DBG(xe, ext.name >=
> > + ARRAY_SIZE(exec_queue_user_extension_funcs)))
> > + return -EINVAL;
> > +
> > + idx = array_index_nospec(ext.name,
> > + ARRAY_SIZE(exec_queue_user_extension_funcs));
> > + err = exec_queue_user_extension_funcs[idx](xe, q, extensions, create);
> > + if (XE_IOCTL_DBG(xe, err))
> > + return err;
> > +
> > + if (ext.next_extension)
> > + return exec_queue_user_extensions(xe, q, ext.next_extension,
> > + ++ext_number, create);
> > +
> > + return 0;
> > +}
> > +
> > +static const enum xe_engine_class user_to_xe_engine_class[] = {
> > + [DRM_XE_ENGINE_CLASS_RENDER] = XE_ENGINE_CLASS_RENDER,
> > + [DRM_XE_ENGINE_CLASS_COPY] = XE_ENGINE_CLASS_COPY,
> > + [DRM_XE_ENGINE_CLASS_VIDEO_DECODE] = XE_ENGINE_CLASS_VIDEO_DECODE,
> > + [DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE] = XE_ENGINE_CLASS_VIDEO_ENHANCE,
> > + [DRM_XE_ENGINE_CLASS_COMPUTE] = XE_ENGINE_CLASS_COMPUTE,
> > +};
> > +
> > +static struct xe_hw_engine *
> > +find_hw_engine(struct xe_device *xe,
> > + struct drm_xe_engine_class_instance eci)
> > +{
> > + u32 idx;
> > +
> > + if (eci.engine_class > ARRAY_SIZE(user_to_xe_engine_class))
> > + return NULL;
> > +
> > + if (eci.gt_id >= xe->info.gt_count)
> > + return NULL;
> > +
> > + idx = array_index_nospec(eci.engine_class,
> > + ARRAY_SIZE(user_to_xe_engine_class));
> > +
> > + return xe_gt_hw_engine(xe_device_get_gt(xe, eci.gt_id),
> > + user_to_xe_engine_class[idx],
> > + eci.engine_instance, true);
> > +}
> > +
> > +static u32 bind_exec_queue_logical_mask(struct xe_device *xe, struct xe_gt *gt,
> > + struct drm_xe_engine_class_instance *eci,
> > + u16 width, u16 num_placements)
> > +{
> > + struct xe_hw_engine *hwe;
> > + enum xe_hw_engine_id id;
> > + u32 logical_mask = 0;
> > +
> > + if (XE_IOCTL_DBG(xe, width != 1))
> > + return 0;
> > + if (XE_IOCTL_DBG(xe, num_placements != 1))
> > + return 0;
> > + if (XE_IOCTL_DBG(xe, eci[0].engine_instance != 0))
> > + return 0;
> > +
> > + eci[0].engine_class = DRM_XE_ENGINE_CLASS_COPY;
> > +
> > + for_each_hw_engine(hwe, gt, id) {
> > + if (xe_hw_engine_is_reserved(hwe))
> > + continue;
> > +
> > + if (hwe->class ==
> > + user_to_xe_engine_class[DRM_XE_ENGINE_CLASS_COPY])
> > + logical_mask |= BIT(hwe->logical_instance);
> > + }
> > +
> > + return logical_mask;
> > +}
> > +
> > +static u32 calc_validate_logical_mask(struct xe_device *xe, struct xe_gt *gt,
> > + struct drm_xe_engine_class_instance *eci,
> > + u16 width, u16 num_placements)
> > +{
> > + int len = width * num_placements;
> > + int i, j, n;
> > + u16 class;
> > + u16 gt_id;
> > + u32 return_mask = 0, prev_mask;
> > +
> > + if (XE_IOCTL_DBG(xe, !xe_device_guc_submission_enabled(xe) &&
> > + len > 1))
> > + return 0;
> > +
> > + for (i = 0; i < width; ++i) {
> > + u32 current_mask = 0;
> > +
> > + for (j = 0; j < num_placements; ++j) {
> > + struct xe_hw_engine *hwe;
> > +
> > + n = j * width + i;
> > +
> > + hwe = find_hw_engine(xe, eci[n]);
> > + if (XE_IOCTL_DBG(xe, !hwe))
> > + return 0;
> > +
> > + if (XE_IOCTL_DBG(xe, xe_hw_engine_is_reserved(hwe)))
> > + return 0;
> > +
> > + if (XE_IOCTL_DBG(xe, n && eci[n].gt_id != gt_id) ||
> > + XE_IOCTL_DBG(xe, n && eci[n].engine_class != class))
> > + return 0;
> > +
> > + class = eci[n].engine_class;
> > + gt_id = eci[n].gt_id;
> > +
> > + if (width == 1 || !i)
> > + return_mask |= BIT(eci[n].engine_instance);
> > + current_mask |= BIT(eci[n].engine_instance);
> > + }
> > +
> > + /* Parallel submissions must be logically contiguous */
> > + if (i && XE_IOCTL_DBG(xe, current_mask != prev_mask << 1))
> > + return 0;
> > +
> > + prev_mask = current_mask;
> > + }
> > +
> > + return return_mask;
> > +}
> > +
> > +int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
> > + struct drm_file *file)
> > +{
> > + struct xe_device *xe = to_xe_device(dev);
> > + struct xe_file *xef = to_xe_file(file);
> > + struct drm_xe_exec_queue_create *args = data;
> > + struct drm_xe_engine_class_instance eci[XE_HW_ENGINE_MAX_INSTANCE];
> > + struct drm_xe_engine_class_instance __user *user_eci =
> > + u64_to_user_ptr(args->instances);
> > + struct xe_hw_engine *hwe;
> > + struct xe_vm *vm, *migrate_vm;
> > + struct xe_gt *gt;
> > + struct xe_exec_queue *q = NULL;
> > + u32 logical_mask;
> > + u32 id;
> > + u32 len;
> > + int err;
> > +
> > + if (XE_IOCTL_DBG(xe, args->flags) ||
> > + XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
> > + return -EINVAL;
> > +
> > + len = args->width * args->num_placements;
> > + if (XE_IOCTL_DBG(xe, !len || len > XE_HW_ENGINE_MAX_INSTANCE))
> > + return -EINVAL;
> > +
> > + err = __copy_from_user(eci, user_eci,
> > + sizeof(struct drm_xe_engine_class_instance) *
> > + len);
> > + if (XE_IOCTL_DBG(xe, err))
> > + return -EFAULT;
> > +
> > + if (XE_IOCTL_DBG(xe, eci[0].gt_id >= xe->info.gt_count))
> > + return -EINVAL;
> > +
> > + if (eci[0].engine_class == DRM_XE_ENGINE_CLASS_VM_BIND) {
> > + for_each_gt(gt, xe, id) {
> > + struct xe_exec_queue *new;
> > +
> > + if (xe_gt_is_media_type(gt))
> > + continue;
> > +
> > + eci[0].gt_id = gt->info.id;
> > + logical_mask = bind_exec_queue_logical_mask(xe, gt, eci,
> > + args->width,
> > + args->num_placements);
> > + if (XE_IOCTL_DBG(xe, !logical_mask))
> > + return -EINVAL;
> > +
> > + hwe = find_hw_engine(xe, eci[0]);
> > + if (XE_IOCTL_DBG(xe, !hwe))
> > + return -EINVAL;
> > +
> > + /* The migration vm doesn't hold rpm ref */
> > + xe_device_mem_access_get(xe);
> > +
> > + migrate_vm = xe_migrate_get_vm(gt_to_tile(gt)->migrate);
> > + new = xe_exec_queue_create(xe, migrate_vm, logical_mask,
> > + args->width, hwe,
> > + EXEC_QUEUE_FLAG_PERSISTENT |
> > + EXEC_QUEUE_FLAG_VM |
> > + (id ?
> > + EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD :
> > + 0));
> > +
> > + xe_device_mem_access_put(xe); /* now held by engine */
> > +
> > + xe_vm_put(migrate_vm);
> > + if (IS_ERR(new)) {
> > + err = PTR_ERR(new);
> > + if (q)
> > + goto put_exec_queue;
> > + return err;
> > + }
> > + if (id == 0)
> > + q = new;
> > + else
> > + list_add_tail(&new->multi_gt_list,
> > + &q->multi_gt_link);
> > + }
> > + } else {
> > + gt = xe_device_get_gt(xe, eci[0].gt_id);
> > + logical_mask = calc_validate_logical_mask(xe, gt, eci,
> > + args->width,
> > + args->num_placements);
> > + if (XE_IOCTL_DBG(xe, !logical_mask))
> > + return -EINVAL;
> > +
> > + hwe = find_hw_engine(xe, eci[0]);
> > + if (XE_IOCTL_DBG(xe, !hwe))
> > + return -EINVAL;
> > +
> > + vm = xe_vm_lookup(xef, args->vm_id);
> > + if (XE_IOCTL_DBG(xe, !vm))
> > + return -ENOENT;
> > +
> > + err = down_read_interruptible(&vm->lock);
> > + if (err) {
> > + xe_vm_put(vm);
> > + return err;
> > + }
> > +
> > + if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
> > + up_read(&vm->lock);
> > + xe_vm_put(vm);
> > + return -ENOENT;
> > + }
> > +
> > + q = xe_exec_queue_create(xe, vm, logical_mask,
> > + args->width, hwe,
> > + xe_vm_no_dma_fences(vm) ? 0 :
> > + EXEC_QUEUE_FLAG_PERSISTENT);
> > + up_read(&vm->lock);
> > + xe_vm_put(vm);
> > + if (IS_ERR(q))
> > + return PTR_ERR(q);
> > + }
> > +
> > + if (args->extensions) {
> > + err = exec_queue_user_extensions(xe, q, args->extensions, 0, true);
> > + if (XE_IOCTL_DBG(xe, err))
> > + goto put_exec_queue;
> > + }
> > +
> > + if (XE_IOCTL_DBG(xe, q->vm && xe_vm_in_compute_mode(q->vm) !=
> > + !!(q->flags & EXEC_QUEUE_FLAG_COMPUTE_MODE))) {
> > + err = -EOPNOTSUPP;
> > + goto put_exec_queue;
> > + }
> > +
> > + q->persistent.xef = xef;
> > +
> > + mutex_lock(&xef->exec_queue.lock);
> > + err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL);
> > + mutex_unlock(&xef->exec_queue.lock);
> > + if (err)
> > + goto put_exec_queue;
> > +
> > + args->exec_queue_id = id;
> > +
> > + return 0;
> > +
> > +put_exec_queue:
> > + xe_exec_queue_kill(q);
> > + xe_exec_queue_put(q);
> > + return err;
> > +}
> > +
> > +int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data,
> > + struct drm_file *file)
> > +{
> > + struct xe_device *xe = to_xe_device(dev);
> > + struct xe_file *xef = to_xe_file(file);
> > + struct drm_xe_exec_queue_get_property *args = data;
> > + struct xe_exec_queue *q;
> > + int ret;
> > +
> > + if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
> > + return -EINVAL;
> > +
> > + q = xe_exec_queue_lookup(xef, args->exec_queue_id);
> > + if (XE_IOCTL_DBG(xe, !q))
> > + return -ENOENT;
> > +
> > + switch (args->property) {
> > + case XE_EXEC_QUEUE_GET_PROPERTY_BAN:
> > + args->value = !!(q->flags & EXEC_QUEUE_FLAG_BANNED);
> > + ret = 0;
> > + break;
> > + default:
> > + ret = -EINVAL;
> > + }
> > +
> > + xe_exec_queue_put(q);
> > +
> > + return ret;
> > +}
> > +
> > +static void exec_queue_kill_compute(struct xe_exec_queue *q)
> > +{
> > + if (!xe_vm_in_compute_mode(q->vm))
> > + return;
> > +
> > + down_write(&q->vm->lock);
> > + list_del(&q->compute.link);
> > + --q->vm->preempt.num_engines;
> > + if (q->compute.pfence) {
> > + dma_fence_enable_sw_signaling(q->compute.pfence);
> > + dma_fence_put(q->compute.pfence);
> > + q->compute.pfence = NULL;
> > + }
> > + up_write(&q->vm->lock);
> > +}
> > +
> > +/**
> > + * xe_exec_queue_is_lr() - Whether an exec_queue is long-running
> > + * @q: The exec_queue
> > + *
> > + * Return: True if the exec_queue is long-running, false otherwise.
> > + */
> > +bool xe_exec_queue_is_lr(struct xe_exec_queue *q)
> > +{
> > + return q->vm && xe_vm_no_dma_fences(q->vm) &&
> > + !(q->flags & EXEC_QUEUE_FLAG_VM);
> > +}
> > +
> > +static s32 xe_exec_queue_num_job_inflight(struct xe_exec_queue *q)
> > +{
> > + return q->lrc->fence_ctx.next_seqno - xe_lrc_seqno(q->lrc) - 1;
> > +}
> > +
> > +/**
> > + * xe_exec_queue_ring_full() - Whether an exec_queue's ring is full
> > + * @q: The exec_queue
> > + *
> > + * Return: True if the exec_queue's ring is full, false otherwise.
> > + */
> > +bool xe_exec_queue_ring_full(struct xe_exec_queue *q)
> > +{
> > + struct xe_lrc *lrc = q->lrc;
> > + s32 max_job = lrc->ring.size / MAX_JOB_SIZE_BYTES;
> > +
> > + return xe_exec_queue_num_job_inflight(q) >= max_job;
> > +}
> > +
> > +/**
> > + * xe_exec_queue_is_idle() - Whether an exec_queue is idle.
> > + * @q: The exec_queue
> > + *
> > + * FIXME: Need to determine what to use as the short-lived
> > + * timeline lock for the exec_queues, so that the return value
> > + * of this function becomes more than just an advisory
> > + * snapshot in time. The timeline lock must protect the
> > + * seqno from racing submissions on the same exec_queue.
> > + * Typically vm->resv, but user-created timeline locks use the migrate vm
> > + * and never grabs the migrate vm->resv so we have a race there.
> > + *
> > + * Return: True if the exec_queue is idle, false otherwise.
> > + */
> > +bool xe_exec_queue_is_idle(struct xe_exec_queue *q)
> > +{
> > + if (XE_WARN_ON(xe_exec_queue_is_parallel(q)))
> > + return false;
> > +
> > + return xe_lrc_seqno(&q->lrc[0]) ==
> > + q->lrc[0].fence_ctx.next_seqno - 1;
> > +}
> > +
> > +void xe_exec_queue_kill(struct xe_exec_queue *q)
> > +{
> > + struct xe_exec_queue *eq = q, *next;
> > +
> > + list_for_each_entry_safe(eq, next, &eq->multi_gt_list,
> > + multi_gt_link) {
> > + q->ops->kill(eq);
> > + exec_queue_kill_compute(eq);
> > + }
> > +
> > + q->ops->kill(q);
> > + exec_queue_kill_compute(q);
> > +}
> > +
> > +int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
> > + struct drm_file *file)
> > +{
> > + struct xe_device *xe = to_xe_device(dev);
> > + struct xe_file *xef = to_xe_file(file);
> > + struct drm_xe_exec_queue_destroy *args = data;
> > + struct xe_exec_queue *q;
> > +
> > + if (XE_IOCTL_DBG(xe, args->pad) ||
> > + XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
> > + return -EINVAL;
> > +
> > + mutex_lock(&xef->exec_queue.lock);
> > + q = xa_erase(&xef->exec_queue.xa, args->exec_queue_id);
> > + mutex_unlock(&xef->exec_queue.lock);
> > + if (XE_IOCTL_DBG(xe, !q))
> > + return -ENOENT;
> > +
> > + if (!(q->flags & EXEC_QUEUE_FLAG_PERSISTENT))
> > + xe_exec_queue_kill(q);
> > + else
> > + xe_device_add_persistent_exec_queues(xe, q);
> > +
> > + trace_xe_exec_queue_close(q);
> > + xe_exec_queue_put(q);
> > +
> > + return 0;
> > +}
> > +
> > +int xe_exec_queue_set_property_ioctl(struct drm_device *dev, void *data,
> > + struct drm_file *file)
> > +{
> > + struct xe_device *xe = to_xe_device(dev);
> > + struct xe_file *xef = to_xe_file(file);
> > + struct drm_xe_exec_queue_set_property *args = data;
> > + struct xe_exec_queue *q;
> > + int ret;
> > + u32 idx;
> > +
> > + if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
> > + return -EINVAL;
> > +
> > + q = xe_exec_queue_lookup(xef, args->exec_queue_id);
> > + if (XE_IOCTL_DBG(xe, !q))
> > + return -ENOENT;
> > +
> > + if (XE_IOCTL_DBG(xe, args->property >=
> > + ARRAY_SIZE(exec_queue_set_property_funcs))) {
> > + ret = -EINVAL;
> > + goto out;
> > + }
> > +
> > + idx = array_index_nospec(args->property,
> > + ARRAY_SIZE(exec_queue_set_property_funcs));
> > + ret = exec_queue_set_property_funcs[idx](xe, q, args->value, false);
> > + if (XE_IOCTL_DBG(xe, ret))
> > + goto out;
> > +
> > + if (args->extensions)
> > + ret = exec_queue_user_extensions(xe, q, args->extensions, 0,
> > + false);
> > +out:
> > + xe_exec_queue_put(q);
> > +
> > + return ret;
> > +}
> > diff --git a/drivers/gpu/drm/xe/xe_exec_queue.h b/drivers/gpu/drm/xe/xe_exec_queue.h
> > new file mode 100644
> > index 000000000000..024bc26a7006
> > --- /dev/null
> > +++ b/drivers/gpu/drm/xe/xe_exec_queue.h
> > @@ -0,0 +1,63 @@
> > +/* SPDX-License-Identifier: MIT */
> > +/*
> > + * Copyright © 2021 Intel Corporation
> > + */
> > +
> > +#ifndef _XE_EXEC_QUEUE_H_
> > +#define _XE_EXEC_QUEUE_H_
> > +
> > +#include "xe_exec_queue_types.h"
> > +#include "xe_vm_types.h"
> > +
> > +struct drm_device;
> > +struct drm_file;
> > +struct xe_device;
> > +struct xe_file;
> > +
> > +struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
> > + u32 logical_mask, u16 width,
> > + struct xe_hw_engine *hw_engine, u32 flags);
> > +struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
> > + struct xe_vm *vm,
> > + enum xe_engine_class class, u32 flags);
> > +
> > +void xe_exec_queue_fini(struct xe_exec_queue *q);
> > +void xe_exec_queue_destroy(struct kref *ref);
> > +
> > +struct xe_exec_queue *xe_exec_queue_lookup(struct xe_file *xef, u32 id);
> > +
> > +static inline struct xe_exec_queue *xe_exec_queue_get(struct xe_exec_queue *q)
> > +{
> > + kref_get(&q->refcount);
> > + return q;
> > +}
> > +
> > +static inline void xe_exec_queue_put(struct xe_exec_queue *q)
> > +{
> > + kref_put(&q->refcount, xe_exec_queue_destroy);
> > +}
> > +
> > +static inline bool xe_exec_queue_is_parallel(struct xe_exec_queue *q)
> > +{
> > + return q->width > 1;
> > +}
> > +
> > +bool xe_exec_queue_is_lr(struct xe_exec_queue *q);
> > +
> > +bool xe_exec_queue_ring_full(struct xe_exec_queue *q);
> > +
> > +bool xe_exec_queue_is_idle(struct xe_exec_queue *q);
> > +
> > +void xe_exec_queue_kill(struct xe_exec_queue *q);
> > +
> > +int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
> > + struct drm_file *file);
> > +int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
> > + struct drm_file *file);
> > +int xe_exec_queue_set_property_ioctl(struct drm_device *dev, void *data,
> > + struct drm_file *file);
> > +int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data,
> > + struct drm_file *file);
> > +enum drm_sched_priority xe_exec_queue_device_get_max_priority(struct xe_device *xe);
> > +
> > +#endif
> > diff --git a/drivers/gpu/drm/xe/xe_engine_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
> > similarity index 52%
> > rename from drivers/gpu/drm/xe/xe_engine_types.h
> > rename to drivers/gpu/drm/xe/xe_exec_queue_types.h
> > index 7aa5d9ef7896..e3ae11b63e4a 100644
> > --- a/drivers/gpu/drm/xe/xe_engine_types.h
> > +++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
> > @@ -3,8 +3,8 @@
> > * Copyright © 2022 Intel Corporation
> > */
> >
> > -#ifndef _XE_ENGINE_TYPES_H_
> > -#define _XE_ENGINE_TYPES_H_
> > +#ifndef _XE_EXEC_QUEUE_TYPES_H_
> > +#define _XE_EXEC_QUEUE_TYPES_H_
> >
> > #include <linux/kref.h>
> >
> > @@ -14,20 +14,20 @@
> > #include "xe_hw_fence_types.h"
> > #include "xe_lrc_types.h"
> >
> > -struct xe_execlist_engine;
> > +struct xe_execlist_exec_queue;
> > struct xe_gt;
> > -struct xe_guc_engine;
> > +struct xe_guc_exec_queue;
> > struct xe_hw_engine;
> > struct xe_vm;
> >
> > /**
> > - * struct xe_engine - Submission engine
> > + * struct xe_exec_queue - Execution queue
> > *
> > * Contains all state necessary for submissions. Can either be a user object or
> > * a kernel object.
> > */
> > -struct xe_engine {
> > - /** @gt: graphics tile this engine can submit to */
> > +struct xe_exec_queue {
> > + /** @gt: graphics tile this exec queue can submit to */
> > struct xe_gt *gt;
> > /**
> > * @hwe: A hardware of the same class. May (physical engine) or may not
> > @@ -35,34 +35,34 @@ struct xe_engine {
> > * really be used for submissions.
> > */
> > struct xe_hw_engine *hwe;
> > - /** @refcount: ref count of this engine */
> > + /** @refcount: ref count of this exec queue */
> > struct kref refcount;
> > - /** @vm: VM (address space) for this engine */
> > + /** @vm: VM (address space) for this exec queue */
> > struct xe_vm *vm;
> > - /** @class: class of this engine */
> > + /** @class: class of this exec queue */
> > enum xe_engine_class class;
> > /**
> > - * @logical_mask: logical mask of where job submitted to engine can run
> > + * @logical_mask: logical mask of where job submitted to exec queue can run
> > */
> > u32 logical_mask;
> > - /** @name: name of this engine */
> > + /** @name: name of this exec queue */
> > char name[MAX_FENCE_NAME_LEN];
> > - /** @width: width (number BB submitted per exec) of this engine */
> > + /** @width: width (number BB submitted per exec) of this exec queue */
> > u16 width;
> > /** @fence_irq: fence IRQ used to signal job completion */
> > struct xe_hw_fence_irq *fence_irq;
> >
> > -#define ENGINE_FLAG_BANNED BIT(0)
> > -#define ENGINE_FLAG_KERNEL BIT(1)
> > -#define ENGINE_FLAG_PERSISTENT BIT(2)
> > -#define ENGINE_FLAG_COMPUTE_MODE BIT(3)
> > -/* Caller needs to hold rpm ref when creating engine with ENGINE_FLAG_VM */
> > -#define ENGINE_FLAG_VM BIT(4)
> > -#define ENGINE_FLAG_BIND_ENGINE_CHILD BIT(5)
> > -#define ENGINE_FLAG_WA BIT(6)
> > +#define EXEC_QUEUE_FLAG_BANNED BIT(0)
> > +#define EXEC_QUEUE_FLAG_KERNEL BIT(1)
> > +#define EXEC_QUEUE_FLAG_PERSISTENT BIT(2)
> > +#define EXEC_QUEUE_FLAG_COMPUTE_MODE BIT(3)
> > +/* Caller needs to hold rpm ref when creating engine with EXEC_QUEUE_FLAG_VM */
> > +#define EXEC_QUEUE_FLAG_VM BIT(4)
> > +#define EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD BIT(5)
> > +#define EXEC_QUEUE_FLAG_WA BIT(6)
> >
> > /**
> > - * @flags: flags for this engine, should statically setup aside from ban
> > + * @flags: flags for this exec queue, should statically setup aside from ban
> > * bit
> > */
> > unsigned long flags;
> > @@ -75,19 +75,19 @@ struct xe_engine {
> > };
> >
> > union {
> > - /** @execlist: execlist backend specific state for engine */
> > - struct xe_execlist_engine *execlist;
> > - /** @guc: GuC backend specific state for engine */
> > - struct xe_guc_engine *guc;
> > + /** @execlist: execlist backend specific state for exec queue */
> > + struct xe_execlist_exec_queue *execlist;
> > + /** @guc: GuC backend specific state for exec queue */
> > + struct xe_guc_exec_queue *guc;
> > };
> >
> > /**
> > - * @persistent: persistent engine state
> > + * @persistent: persistent exec queue state
> > */
> > struct {
> > - /** @xef: file which this engine belongs to */
> > + /** @xef: file which this exec queue belongs to */
> > struct xe_file *xef;
> > - /** @link: link in list of persistent engines */
> > + /** @link: link in list of persistent exec queues */
> > struct list_head link;
> > } persistent;
> >
> > @@ -144,53 +144,53 @@ struct xe_engine {
> > u32 acc_granularity;
> > } usm;
> >
> > - /** @ops: submission backend engine operations */
> > - const struct xe_engine_ops *ops;
> > + /** @ops: submission backend exec queue operations */
> > + const struct xe_exec_queue_ops *ops;
> >
> > - /** @ring_ops: ring operations for this engine */
> > + /** @ring_ops: ring operations for this exec queue */
> > const struct xe_ring_ops *ring_ops;
> > - /** @entity: DRM sched entity for this engine (1 to 1 relationship) */
> > + /** @entity: DRM sched entity for this exec queue (1 to 1 relationship) */
> > struct drm_sched_entity *entity;
> > - /** @lrc: logical ring context for this engine */
> > + /** @lrc: logical ring context for this exec queue */
> > struct xe_lrc lrc[];
> > };
> >
> > /**
> > - * struct xe_engine_ops - Submission backend engine operations
> > + * struct xe_exec_queue_ops - Submission backend exec queue operations
> > */
> > -struct xe_engine_ops {
> > - /** @init: Initialize engine for submission backend */
> > - int (*init)(struct xe_engine *e);
> > +struct xe_exec_queue_ops {
> > + /** @init: Initialize exec queue for submission backend */
> > + int (*init)(struct xe_exec_queue *q);
> > /** @kill: Kill inflight submissions for backend */
> > - void (*kill)(struct xe_engine *e);
> > - /** @fini: Fini engine for submission backend */
> > - void (*fini)(struct xe_engine *e);
> > - /** @set_priority: Set priority for engine */
> > - int (*set_priority)(struct xe_engine *e,
> > + void (*kill)(struct xe_exec_queue *q);
> > + /** @fini: Fini exec queue for submission backend */
> > + void (*fini)(struct xe_exec_queue *q);
> > + /** @set_priority: Set priority for exec queue */
> > + int (*set_priority)(struct xe_exec_queue *q,
> > enum drm_sched_priority priority);
> > - /** @set_timeslice: Set timeslice for engine */
> > - int (*set_timeslice)(struct xe_engine *e, u32 timeslice_us);
> > - /** @set_preempt_timeout: Set preemption timeout for engine */
> > - int (*set_preempt_timeout)(struct xe_engine *e, u32 preempt_timeout_us);
> > - /** @set_job_timeout: Set job timeout for engine */
> > - int (*set_job_timeout)(struct xe_engine *e, u32 job_timeout_ms);
> > + /** @set_timeslice: Set timeslice for exec queue */
> > + int (*set_timeslice)(struct xe_exec_queue *q, u32 timeslice_us);
> > + /** @set_preempt_timeout: Set preemption timeout for exec queue */
> > + int (*set_preempt_timeout)(struct xe_exec_queue *q, u32 preempt_timeout_us);
> > + /** @set_job_timeout: Set job timeout for exec queue */
> > + int (*set_job_timeout)(struct xe_exec_queue *q, u32 job_timeout_ms);
> > /**
> > - * @suspend: Suspend engine from executing, allowed to be called
> > + * @suspend: Suspend exec queue from executing, allowed to be called
> > * multiple times in a row before resume with the caveat that
> > * suspend_wait returns before calling suspend again.
> > */
> > - int (*suspend)(struct xe_engine *e);
> > + int (*suspend)(struct xe_exec_queue *q);
> > /**
> > - * @suspend_wait: Wait for an engine to suspend executing, should be
> > + * @suspend_wait: Wait for an exec queue to suspend executing, should be
> > * call after suspend.
> > */
> > - void (*suspend_wait)(struct xe_engine *e);
> > + void (*suspend_wait)(struct xe_exec_queue *q);
> > /**
> > - * @resume: Resume engine execution, engine must be in a suspended
> > + * @resume: Resume exec queue execution, exec queue must be in a suspended
> > * state and dma fence returned from most recent suspend call must be
> > * signalled when this function is called.
> > */
> > - void (*resume)(struct xe_engine *e);
> > + void (*resume)(struct xe_exec_queue *q);
> > };
> >
> > #endif
> > diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
> > index 0f9d919405b0..8ef98c7fd8e2 100644
> > --- a/drivers/gpu/drm/xe/xe_execlist.c
> > +++ b/drivers/gpu/drm/xe/xe_execlist.c
> > @@ -14,7 +14,7 @@
> > #include "regs/xe_regs.h"
> > #include "xe_bo.h"
> > #include "xe_device.h"
> > -#include "xe_engine.h"
> > +#include "xe_exec_queue.h"
> > #include "xe_gt.h"
> > #include "xe_hw_fence.h"
> > #include "xe_lrc.h"
> > @@ -91,7 +91,7 @@ static void __start_lrc(struct xe_hw_engine *hwe, struct xe_lrc *lrc,
> > }
> >
> > static void __xe_execlist_port_start(struct xe_execlist_port *port,
> > - struct xe_execlist_engine *exl)
> > + struct xe_execlist_exec_queue *exl)
> > {
> > struct xe_device *xe = gt_to_xe(port->hwe->gt);
> > int max_ctx = FIELD_MAX(GEN11_SW_CTX_ID);
> > @@ -109,7 +109,7 @@ static void __xe_execlist_port_start(struct xe_execlist_port *port,
> > port->last_ctx_id = 1;
> > }
> >
> > - __start_lrc(port->hwe, exl->engine->lrc, port->last_ctx_id);
> > + __start_lrc(port->hwe, exl->q->lrc, port->last_ctx_id);
> > port->running_exl = exl;
> > exl->has_run = true;
> > }
> > @@ -128,16 +128,16 @@ static void __xe_execlist_port_idle(struct xe_execlist_port *port)
> > port->running_exl = NULL;
> > }
> >
> > -static bool xe_execlist_is_idle(struct xe_execlist_engine *exl)
> > +static bool xe_execlist_is_idle(struct xe_execlist_exec_queue *exl)
> > {
> > - struct xe_lrc *lrc = exl->engine->lrc;
> > + struct xe_lrc *lrc = exl->q->lrc;
> >
> > return lrc->ring.tail == lrc->ring.old_tail;
> > }
> >
> > static void __xe_execlist_port_start_next_active(struct xe_execlist_port *port)
> > {
> > - struct xe_execlist_engine *exl = NULL;
> > + struct xe_execlist_exec_queue *exl = NULL;
> > int i;
> >
> > xe_execlist_port_assert_held(port);
> > @@ -145,7 +145,7 @@ static void __xe_execlist_port_start_next_active(struct xe_execlist_port *port)
> > for (i = ARRAY_SIZE(port->active) - 1; i >= 0; i--) {
> > while (!list_empty(&port->active[i])) {
> > exl = list_first_entry(&port->active[i],
> > - struct xe_execlist_engine,
> > + struct xe_execlist_exec_queue,
> > active_link);
> > list_del(&exl->active_link);
> >
> > @@ -208,7 +208,7 @@ static void xe_execlist_port_wake_locked(struct xe_execlist_port *port,
> > __xe_execlist_port_start_next_active(port);
> > }
> >
> > -static void xe_execlist_make_active(struct xe_execlist_engine *exl)
> > +static void xe_execlist_make_active(struct xe_execlist_exec_queue *exl)
> > {
> > struct xe_execlist_port *port = exl->port;
> > enum drm_sched_priority priority = exl->entity.priority;
> > @@ -293,10 +293,10 @@ static struct dma_fence *
> > execlist_run_job(struct drm_sched_job *drm_job)
> > {
> > struct xe_sched_job *job = to_xe_sched_job(drm_job);
> > - struct xe_engine *e = job->engine;
> > - struct xe_execlist_engine *exl = job->engine->execlist;
> > + struct xe_exec_queue *q = job->q;
> > + struct xe_execlist_exec_queue *exl = job->q->execlist;
> >
> > - e->ring_ops->emit_job(job);
> > + q->ring_ops->emit_job(job);
> > xe_execlist_make_active(exl);
> >
> > return dma_fence_get(job->fence);
> > @@ -314,11 +314,11 @@ static const struct drm_sched_backend_ops drm_sched_ops = {
> > .free_job = execlist_job_free,
> > };
> >
> > -static int execlist_engine_init(struct xe_engine *e)
> > +static int execlist_exec_queue_init(struct xe_exec_queue *q)
> > {
> > struct drm_gpu_scheduler *sched;
> > - struct xe_execlist_engine *exl;
> > - struct xe_device *xe = gt_to_xe(e->gt);
> > + struct xe_execlist_exec_queue *exl;
> > + struct xe_device *xe = gt_to_xe(q->gt);
> > int err;
> >
> > XE_WARN_ON(xe_device_guc_submission_enabled(xe));
> > @@ -329,14 +329,14 @@ static int execlist_engine_init(struct xe_engine *e)
> > if (!exl)
> > return -ENOMEM;
> >
> > - exl->engine = e;
> > + exl->q = q;
> >
> > err = drm_sched_init(&exl->sched, &drm_sched_ops, NULL,
> > - e->lrc[0].ring.size / MAX_JOB_SIZE_BYTES,
> > + q->lrc[0].ring.size / MAX_JOB_SIZE_BYTES,
> > XE_SCHED_HANG_LIMIT, XE_SCHED_JOB_TIMEOUT,
> > - NULL, NULL, e->hwe->name,
> > + NULL, NULL, q->hwe->name,
> > DRM_SCHED_POLICY_SINGLE_ENTITY,
> > - gt_to_xe(e->gt)->drm.dev);
> > + gt_to_xe(q->gt)->drm.dev);
> > if (err)
> > goto err_free;
> >
> > @@ -346,30 +346,30 @@ static int execlist_engine_init(struct xe_engine *e)
> > if (err)
> > goto err_sched;
> >
> > - exl->port = e->hwe->exl_port;
> > + exl->port = q->hwe->exl_port;
> > exl->has_run = false;
> > exl->active_priority = DRM_SCHED_PRIORITY_UNSET;
> > - e->execlist = exl;
> > - e->entity = &exl->entity;
> > + q->execlist = exl;
> > + q->entity = &exl->entity;
> >
> > - switch (e->class) {
> > + switch (q->class) {
> > case XE_ENGINE_CLASS_RENDER:
> > - sprintf(e->name, "rcs%d", ffs(e->logical_mask) - 1);
> > + sprintf(q->name, "rcs%d", ffs(q->logical_mask) - 1);
> > break;
> > case XE_ENGINE_CLASS_VIDEO_DECODE:
> > - sprintf(e->name, "vcs%d", ffs(e->logical_mask) - 1);
> > + sprintf(q->name, "vcs%d", ffs(q->logical_mask) - 1);
> > break;
> > case XE_ENGINE_CLASS_VIDEO_ENHANCE:
> > - sprintf(e->name, "vecs%d", ffs(e->logical_mask) - 1);
> > + sprintf(q->name, "vecs%d", ffs(q->logical_mask) - 1);
> > break;
> > case XE_ENGINE_CLASS_COPY:
> > - sprintf(e->name, "bcs%d", ffs(e->logical_mask) - 1);
> > + sprintf(q->name, "bcs%d", ffs(q->logical_mask) - 1);
> > break;
> > case XE_ENGINE_CLASS_COMPUTE:
> > - sprintf(e->name, "ccs%d", ffs(e->logical_mask) - 1);
> > + sprintf(q->name, "ccs%d", ffs(q->logical_mask) - 1);
> > break;
> > default:
> > - XE_WARN_ON(e->class);
> > + XE_WARN_ON(q->class);
> > }
> >
> > return 0;
> > @@ -381,96 +381,96 @@ static int execlist_engine_init(struct xe_engine *e)
> > return err;
> > }
> >
> > -static void execlist_engine_fini_async(struct work_struct *w)
> > +static void execlist_exec_queue_fini_async(struct work_struct *w)
> > {
> > - struct xe_execlist_engine *ee =
> > - container_of(w, struct xe_execlist_engine, fini_async);
> > - struct xe_engine *e = ee->engine;
> > - struct xe_execlist_engine *exl = e->execlist;
> > + struct xe_execlist_exec_queue *ee =
> > + container_of(w, struct xe_execlist_exec_queue, fini_async);
> > + struct xe_exec_queue *q = ee->q;
> > + struct xe_execlist_exec_queue *exl = q->execlist;
> > unsigned long flags;
> >
> > - XE_WARN_ON(xe_device_guc_submission_enabled(gt_to_xe(e->gt)));
> > + XE_WARN_ON(xe_device_guc_submission_enabled(gt_to_xe(q->gt)));
> >
> > spin_lock_irqsave(&exl->port->lock, flags);
> > if (WARN_ON(exl->active_priority != DRM_SCHED_PRIORITY_UNSET))
> > list_del(&exl->active_link);
> > spin_unlock_irqrestore(&exl->port->lock, flags);
> >
> > - if (e->flags & ENGINE_FLAG_PERSISTENT)
> > - xe_device_remove_persistent_engines(gt_to_xe(e->gt), e);
> > + if (q->flags & EXEC_QUEUE_FLAG_PERSISTENT)
> > + xe_device_remove_persistent_exec_queues(gt_to_xe(q->gt), q);
> > drm_sched_entity_fini(&exl->entity);
> > drm_sched_fini(&exl->sched);
> > kfree(exl);
> >
> > - xe_engine_fini(e);
> > + xe_exec_queue_fini(q);
> > }
> >
> > -static void execlist_engine_kill(struct xe_engine *e)
> > +static void execlist_exec_queue_kill(struct xe_exec_queue *q)
> > {
> > /* NIY */
> > }
> >
> > -static void execlist_engine_fini(struct xe_engine *e)
> > +static void execlist_exec_queue_fini(struct xe_exec_queue *q)
> > {
> > - INIT_WORK(&e->execlist->fini_async, execlist_engine_fini_async);
> > - queue_work(system_unbound_wq, &e->execlist->fini_async);
> > + INIT_WORK(&q->execlist->fini_async, execlist_exec_queue_fini_async);
> > + queue_work(system_unbound_wq, &q->execlist->fini_async);
> > }
> >
> > -static int execlist_engine_set_priority(struct xe_engine *e,
> > - enum drm_sched_priority priority)
> > +static int execlist_exec_queue_set_priority(struct xe_exec_queue *q,
> > + enum drm_sched_priority priority)
> > {
> > /* NIY */
> > return 0;
> > }
> >
> > -static int execlist_engine_set_timeslice(struct xe_engine *e, u32 timeslice_us)
> > +static int execlist_exec_queue_set_timeslice(struct xe_exec_queue *q, u32 timeslice_us)
> > {
> > /* NIY */
> > return 0;
> > }
> >
> > -static int execlist_engine_set_preempt_timeout(struct xe_engine *e,
> > - u32 preempt_timeout_us)
> > +static int execlist_exec_queue_set_preempt_timeout(struct xe_exec_queue *q,
> > + u32 preempt_timeout_us)
> > {
> > /* NIY */
> > return 0;
> > }
> >
> > -static int execlist_engine_set_job_timeout(struct xe_engine *e,
> > - u32 job_timeout_ms)
> > +static int execlist_exec_queue_set_job_timeout(struct xe_exec_queue *q,
> > + u32 job_timeout_ms)
> > {
> > /* NIY */
> > return 0;
> > }
> >
> > -static int execlist_engine_suspend(struct xe_engine *e)
> > +static int execlist_exec_queue_suspend(struct xe_exec_queue *q)
> > {
> > /* NIY */
> > return 0;
> > }
> >
> > -static void execlist_engine_suspend_wait(struct xe_engine *e)
> > +static void execlist_exec_queue_suspend_wait(struct xe_exec_queue *q)
> >
> > {
> > /* NIY */
> > }
> >
> > -static void execlist_engine_resume(struct xe_engine *e)
> > +static void execlist_exec_queue_resume(struct xe_exec_queue *q)
> > {
> > /* NIY */
> > }
> >
> > -static const struct xe_engine_ops execlist_engine_ops = {
> > - .init = execlist_engine_init,
> > - .kill = execlist_engine_kill,
> > - .fini = execlist_engine_fini,
> > - .set_priority = execlist_engine_set_priority,
> > - .set_timeslice = execlist_engine_set_timeslice,
> > - .set_preempt_timeout = execlist_engine_set_preempt_timeout,
> > - .set_job_timeout = execlist_engine_set_job_timeout,
> > - .suspend = execlist_engine_suspend,
> > - .suspend_wait = execlist_engine_suspend_wait,
> > - .resume = execlist_engine_resume,
> > +static const struct xe_exec_queue_ops execlist_exec_queue_ops = {
> > + .init = execlist_exec_queue_init,
> > + .kill = execlist_exec_queue_kill,
> > + .fini = execlist_exec_queue_fini,
> > + .set_priority = execlist_exec_queue_set_priority,
> > + .set_timeslice = execlist_exec_queue_set_timeslice,
> > + .set_preempt_timeout = execlist_exec_queue_set_preempt_timeout,
> > + .set_job_timeout = execlist_exec_queue_set_job_timeout,
> > + .suspend = execlist_exec_queue_suspend,
> > + .suspend_wait = execlist_exec_queue_suspend_wait,
> > + .resume = execlist_exec_queue_resume,
> > };
> >
> > int xe_execlist_init(struct xe_gt *gt)
> > @@ -479,7 +479,7 @@ int xe_execlist_init(struct xe_gt *gt)
> > if (xe_device_guc_submission_enabled(gt_to_xe(gt)))
> > return 0;
> >
> > - gt->engine_ops = &execlist_engine_ops;
> > + gt->exec_queue_ops = &execlist_exec_queue_ops;
> >
> > return 0;
> > }
> > diff --git a/drivers/gpu/drm/xe/xe_execlist_types.h b/drivers/gpu/drm/xe/xe_execlist_types.h
> > index 05a620940209..ee1fccd4ee8b 100644
> > --- a/drivers/gpu/drm/xe/xe_execlist_types.h
> > +++ b/drivers/gpu/drm/xe/xe_execlist_types.h
> > @@ -13,7 +13,7 @@
> > #include <drm/gpu_scheduler.h>
> >
> > struct xe_hw_engine;
> > -struct xe_execlist_engine;
> > +struct xe_execlist_exec_queue;
> >
> > struct xe_execlist_port {
> > struct xe_hw_engine *hwe;
> > @@ -24,13 +24,13 @@ struct xe_execlist_port {
> >
> > u32 last_ctx_id;
> >
> > - struct xe_execlist_engine *running_exl;
> > + struct xe_execlist_exec_queue *running_exl;
> >
> > struct timer_list irq_fail;
> > };
> >
> > -struct xe_execlist_engine {
> > - struct xe_engine *engine;
> > +struct xe_execlist_exec_queue {
> > + struct xe_exec_queue *q;
> >
> > struct drm_gpu_scheduler sched;
> >
> > diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
> > index 28bf577c7bf2..3077faa1e792 100644
> > --- a/drivers/gpu/drm/xe/xe_gt.c
> > +++ b/drivers/gpu/drm/xe/xe_gt.c
> > @@ -14,7 +14,7 @@
> > #include "xe_bb.h"
> > #include "xe_bo.h"
> > #include "xe_device.h"
> > -#include "xe_engine.h"
> > +#include "xe_exec_queue.h"
> > #include "xe_execlist.h"
> > #include "xe_force_wake.h"
> > #include "xe_ggtt.h"
> > @@ -26,7 +26,7 @@
> > #include "xe_gt_sysfs.h"
> > #include "xe_gt_tlb_invalidation.h"
> > #include "xe_gt_topology.h"
> > -#include "xe_guc_engine_types.h"
> > +#include "xe_guc_exec_queue_types.h"
> > #include "xe_hw_fence.h"
> > #include "xe_irq.h"
> > #include "xe_lrc.h"
> > @@ -81,7 +81,7 @@ static void gt_fini(struct drm_device *drm, void *arg)
> >
> > static void gt_reset_worker(struct work_struct *w);
> >
> > -static int emit_nop_job(struct xe_gt *gt, struct xe_engine *e)
> > +static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q)
> > {
> > struct xe_sched_job *job;
> > struct xe_bb *bb;
> > @@ -94,7 +94,7 @@ static int emit_nop_job(struct xe_gt *gt, struct xe_engine *e)
> > return PTR_ERR(bb);
> >
> > batch_ofs = xe_bo_ggtt_addr(gt_to_tile(gt)->mem.kernel_bb_pool->bo);
> > - job = xe_bb_create_wa_job(e, bb, batch_ofs);
> > + job = xe_bb_create_wa_job(q, bb, batch_ofs);
> > if (IS_ERR(job)) {
> > xe_bb_free(bb, NULL);
> > return PTR_ERR(job);
> > @@ -115,9 +115,9 @@ static int emit_nop_job(struct xe_gt *gt, struct xe_engine *e)
> > return 0;
> > }
> >
> > -static int emit_wa_job(struct xe_gt *gt, struct xe_engine *e)
> > +static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
> > {
> > - struct xe_reg_sr *sr = &e->hwe->reg_lrc;
> > + struct xe_reg_sr *sr = &q->hwe->reg_lrc;
> > struct xe_reg_sr_entry *entry;
> > unsigned long reg;
> > struct xe_sched_job *job;
> > @@ -143,7 +143,7 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_engine *e)
> > }
> >
> > batch_ofs = xe_bo_ggtt_addr(gt_to_tile(gt)->mem.kernel_bb_pool->bo);
> > - job = xe_bb_create_wa_job(e, bb, batch_ofs);
> > + job = xe_bb_create_wa_job(q, bb, batch_ofs);
> > if (IS_ERR(job)) {
> > xe_bb_free(bb, NULL);
> > return PTR_ERR(job);
> > @@ -173,7 +173,7 @@ int xe_gt_record_default_lrcs(struct xe_gt *gt)
> > int err = 0;
> >
> > for_each_hw_engine(hwe, gt, id) {
> > - struct xe_engine *e, *nop_e;
> > + struct xe_exec_queue *q, *nop_q;
> > struct xe_vm *vm;
> > void *default_lrc;
> >
> > @@ -192,58 +192,58 @@ int xe_gt_record_default_lrcs(struct xe_gt *gt)
> > return -ENOMEM;
> >
> > vm = xe_migrate_get_vm(tile->migrate);
> > - e = xe_engine_create(xe, vm, BIT(hwe->logical_instance), 1,
> > - hwe, ENGINE_FLAG_WA);
> > - if (IS_ERR(e)) {
> > - err = PTR_ERR(e);
> > - xe_gt_err(gt, "hwe %s: xe_engine_create failed (%pe)\n",
> > - hwe->name, e);
> > + q = xe_exec_queue_create(xe, vm, BIT(hwe->logical_instance), 1,
> > + hwe, EXEC_QUEUE_FLAG_WA);
> > + if (IS_ERR(q)) {
> > + err = PTR_ERR(q);
> > + xe_gt_err(gt, "hwe %s: xe_exec_queue_create failed (%pe)\n",
> > + hwe->name, q);
> > goto put_vm;
> > }
> >
> > /* Prime golden LRC with known good state */
> > - err = emit_wa_job(gt, e);
> > + err = emit_wa_job(gt, q);
> > if (err) {
> > xe_gt_err(gt, "hwe %s: emit_wa_job failed (%pe) guc_id=%u\n",
> > - hwe->name, ERR_PTR(err), e->guc->id);
> > - goto put_engine;
> > + hwe->name, ERR_PTR(err), q->guc->id);
> > + goto put_exec_queue;
> > }
> >
> > - nop_e = xe_engine_create(xe, vm, BIT(hwe->logical_instance),
> > - 1, hwe, ENGINE_FLAG_WA);
> > - if (IS_ERR(nop_e)) {
> > - err = PTR_ERR(nop_e);
> > - xe_gt_err(gt, "hwe %s: nop xe_engine_create failed (%pe)\n",
> > - hwe->name, nop_e);
> > - goto put_engine;
> > + nop_q = xe_exec_queue_create(xe, vm, BIT(hwe->logical_instance),
> > + 1, hwe, EXEC_QUEUE_FLAG_WA);
> > + if (IS_ERR(nop_q)) {
> > + err = PTR_ERR(nop_q);
> > + xe_gt_err(gt, "hwe %s: nop xe_exec_queue_create failed (%pe)\n",
> > + hwe->name, nop_q);
> > + goto put_exec_queue;
> > }
> >
> > /* Switch to different LRC */
> > - err = emit_nop_job(gt, nop_e);
> > + err = emit_nop_job(gt, nop_q);
> > if (err) {
> > xe_gt_err(gt, "hwe %s: nop emit_nop_job failed (%pe) guc_id=%u\n",
> > - hwe->name, ERR_PTR(err), nop_e->guc->id);
> > - goto put_nop_e;
> > + hwe->name, ERR_PTR(err), nop_q->guc->id);
> > + goto put_nop_q;
> > }
> >
> > /* Reload golden LRC to record the effect of any indirect W/A */
> > - err = emit_nop_job(gt, e);
> > + err = emit_nop_job(gt, q);
> > if (err) {
> > xe_gt_err(gt, "hwe %s: emit_nop_job failed (%pe) guc_id=%u\n",
> > - hwe->name, ERR_PTR(err), e->guc->id);
> > - goto put_nop_e;
> > + hwe->name, ERR_PTR(err), q->guc->id);
> > + goto put_nop_q;
> > }
> >
> > xe_map_memcpy_from(xe, default_lrc,
> > - &e->lrc[0].bo->vmap,
> > - xe_lrc_pphwsp_offset(&e->lrc[0]),
> > + &q->lrc[0].bo->vmap,
> > + xe_lrc_pphwsp_offset(&q->lrc[0]),
> > xe_lrc_size(xe, hwe->class));
> >
> > gt->default_lrc[hwe->class] = default_lrc;
> > -put_nop_e:
> > - xe_engine_put(nop_e);
> > -put_engine:
> > - xe_engine_put(e);
> > +put_nop_q:
> > + xe_exec_queue_put(nop_q);
> > +put_exec_queue:
> > + xe_exec_queue_put(q);
> > put_vm:
> > xe_vm_put(vm);
> > if (err)
> > diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h
> > index 78a9fe9f0bd3..c326932e53d7 100644
> > --- a/drivers/gpu/drm/xe/xe_gt_types.h
> > +++ b/drivers/gpu/drm/xe/xe_gt_types.h
> > @@ -14,7 +14,7 @@
> > #include "xe_sa_types.h"
> > #include "xe_uc_types.h"
> >
> > -struct xe_engine_ops;
> > +struct xe_exec_queue_ops;
> > struct xe_migrate;
> > struct xe_ring_ops;
> >
> > @@ -269,8 +269,8 @@ struct xe_gt {
> > /** @gtidle: idle properties of GT */
> > struct xe_gt_idle gtidle;
> >
> > - /** @engine_ops: submission backend engine operations */
> > - const struct xe_engine_ops *engine_ops;
> > + /** @exec_queue_ops: submission backend exec queue operations */
> > + const struct xe_exec_queue_ops *exec_queue_ops;
> >
> > /**
> > * @ring_ops: ring operations for this hw engine (1 per engine class)
> > diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c
> > index a7da29be2e51..7d1244df959d 100644
> > --- a/drivers/gpu/drm/xe/xe_guc_ads.c
> > +++ b/drivers/gpu/drm/xe/xe_guc_ads.c
> > @@ -495,7 +495,7 @@ static void guc_mmio_reg_state_init(struct xe_guc_ads *ads)
> > u8 gc;
> >
> > /*
> > - * 1. Write all MMIO entries for this engine to the table. No
> > + * 1. Write all MMIO entries for this exec queue to the table. No
> > * need to worry about fused-off engines and when there are
> > * entries in the regset: the reg_state_list has been zero'ed
> > * by xe_guc_ads_populate()
> > diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
> > index 7fb2690425f8..3d705c64b85d 100644
> > --- a/drivers/gpu/drm/xe/xe_guc_ct.c
> > +++ b/drivers/gpu/drm/xe/xe_guc_ct.c
> > @@ -889,11 +889,11 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
> > ret = xe_guc_deregister_done_handler(guc, payload, adj_len);
> > break;
> > case XE_GUC_ACTION_CONTEXT_RESET_NOTIFICATION:
> > - ret = xe_guc_engine_reset_handler(guc, payload, adj_len);
> > + ret = xe_guc_exec_queue_reset_handler(guc, payload, adj_len);
> > break;
> > case XE_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION:
> > - ret = xe_guc_engine_reset_failure_handler(guc, payload,
> > - adj_len);
> > + ret = xe_guc_exec_queue_reset_failure_handler(guc, payload,
> > + adj_len);
> > break;
> > case XE_GUC_ACTION_SCHED_ENGINE_MODE_DONE:
> > /* Selftest only at the moment */
> > @@ -903,8 +903,8 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
> > /* FIXME: Handle this */
> > break;
> > case XE_GUC_ACTION_NOTIFY_MEMORY_CAT_ERROR:
> > - ret = xe_guc_engine_memory_cat_error_handler(guc, payload,
> > - adj_len);
> > + ret = xe_guc_exec_queue_memory_cat_error_handler(guc, payload,
> > + adj_len);
> > break;
> > case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
> > ret = xe_guc_pagefault_handler(guc, payload, adj_len);
> > diff --git a/drivers/gpu/drm/xe/xe_guc_engine_types.h b/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
> > similarity index 88%
> > rename from drivers/gpu/drm/xe/xe_guc_engine_types.h
> > rename to drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
> > index 5d83132034a6..395ed3fba218 100644
> > --- a/drivers/gpu/drm/xe/xe_guc_engine_types.h
> > +++ b/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
> > @@ -12,14 +12,14 @@
> > #include <drm/gpu_scheduler.h>
> >
> > struct dma_fence;
> > -struct xe_engine;
> > +struct xe_exec_queue;
> >
> > /**
> > - * struct xe_guc_engine - GuC specific state for an xe_engine
> > + * struct xe_guc_exec_queue - GuC specific state for an xe_engine
> > */
> > -struct xe_guc_engine {
> > +struct xe_guc_exec_queue {
> > /** @engine: Backpointer to parent xe_engine */
> > - struct xe_engine *engine;
> > + struct xe_exec_queue *q;
> > /** @sched: GPU scheduler for this xe_engine */
> > struct drm_gpu_scheduler sched;
> > /** @entity: Scheduler entity for this xe_engine */
> > @@ -43,7 +43,7 @@ struct xe_guc_engine {
> > u32 wqi_head;
> > /** @wqi_tail: work queue item tail */
> > u32 wqi_tail;
> > - /** @id: GuC id for this xe_engine */
> > + /** @id: GuC id for this exec_queue */
> > u16 id;
> > /** @suspend_wait: wait queue used to wait on pending suspends */
> > wait_queue_head_t suspend_wait;
> > diff --git a/drivers/gpu/drm/xe/xe_guc_fwif.h b/drivers/gpu/drm/xe/xe_guc_fwif.h
> > index 7515d7fbb723..4216a6d9e478 100644
> > --- a/drivers/gpu/drm/xe/xe_guc_fwif.h
> > +++ b/drivers/gpu/drm/xe/xe_guc_fwif.h
> > @@ -69,13 +69,13 @@ struct guc_klv_generic_dw_t {
> > } __packed;
> >
> > /* Format of the UPDATE_CONTEXT_POLICIES H2G data packet */
> > -struct guc_update_engine_policy_header {
> > +struct guc_update_exec_queue_policy_header {
> > u32 action;
> > u32 guc_id;
> > } __packed;
> >
> > -struct guc_update_engine_policy {
> > - struct guc_update_engine_policy_header header;
> > +struct guc_update_exec_queue_policy {
> > + struct guc_update_exec_queue_policy_header header;
> > struct guc_klv_generic_dw_t klv[GUC_CONTEXT_POLICIES_KLV_NUM_IDS];
> > } __packed;
> >
> > diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
> > index 8d2b50e03ed2..448075411191 100644
> > --- a/drivers/gpu/drm/xe/xe_guc_submit.c
> > +++ b/drivers/gpu/drm/xe/xe_guc_submit.c
> > @@ -16,12 +16,12 @@
> > #include "regs/xe_lrc_layout.h"
> > #include "xe_devcoredump.h"
> > #include "xe_device.h"
> > -#include "xe_engine.h"
> > +#include "xe_exec_queue.h"
> > #include "xe_force_wake.h"
> > #include "xe_gt.h"
> > #include "xe_guc.h"
> > #include "xe_guc_ct.h"
> > -#include "xe_guc_engine_types.h"
> > +#include "xe_guc_exec_queue_types.h"
> > #include "xe_guc_submit_types.h"
> > #include "xe_hw_engine.h"
> > #include "xe_hw_fence.h"
> > @@ -47,9 +47,9 @@ guc_to_xe(struct xe_guc *guc)
> > }
> >
> > static struct xe_guc *
> > -engine_to_guc(struct xe_engine *e)
> > +exec_queue_to_guc(struct xe_exec_queue *q)
> > {
> > - return &e->gt->uc.guc;
> > + return &q->gt->uc.guc;
> > }
> >
> > /*
> > @@ -57,140 +57,140 @@ engine_to_guc(struct xe_engine *e)
> > * as the same time (e.g. a suspend can be happning at the same time as schedule
> > * engine done being processed).
> > */
> > -#define ENGINE_STATE_REGISTERED (1 << 0)
> > +#define EXEC_QUEUE_STATE_REGISTERED (1 << 0)
> > #define ENGINE_STATE_ENABLED (1 << 1)
> > -#define ENGINE_STATE_PENDING_ENABLE (1 << 2)
> > -#define ENGINE_STATE_PENDING_DISABLE (1 << 3)
> > -#define ENGINE_STATE_DESTROYED (1 << 4)
> > +#define EXEC_QUEUE_STATE_PENDING_ENABLE (1 << 2)
> > +#define EXEC_QUEUE_STATE_PENDING_DISABLE (1 << 3)
> > +#define EXEC_QUEUE_STATE_DESTROYED (1 << 4)
> > #define ENGINE_STATE_SUSPENDED (1 << 5)
> > -#define ENGINE_STATE_RESET (1 << 6)
> > +#define EXEC_QUEUE_STATE_RESET (1 << 6)
> > #define ENGINE_STATE_KILLED (1 << 7)
> >
> > -static bool engine_registered(struct xe_engine *e)
> > +static bool exec_queue_registered(struct xe_exec_queue *q)
> > {
> > - return atomic_read(&e->guc->state) & ENGINE_STATE_REGISTERED;
> > + return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_REGISTERED;
> > }
> >
> > -static void set_engine_registered(struct xe_engine *e)
> > +static void set_exec_queue_registered(struct xe_exec_queue *q)
> > {
> > - atomic_or(ENGINE_STATE_REGISTERED, &e->guc->state);
> > + atomic_or(EXEC_QUEUE_STATE_REGISTERED, &q->guc->state);
> > }
> >
> > -static void clear_engine_registered(struct xe_engine *e)
> > +static void clear_exec_queue_registered(struct xe_exec_queue *q)
> > {
> > - atomic_and(~ENGINE_STATE_REGISTERED, &e->guc->state);
> > + atomic_and(~EXEC_QUEUE_STATE_REGISTERED, &q->guc->state);
> > }
> >
> > -static bool engine_enabled(struct xe_engine *e)
> > +static bool exec_queue_enabled(struct xe_exec_queue *q)
> > {
> > - return atomic_read(&e->guc->state) & ENGINE_STATE_ENABLED;
> > + return atomic_read(&q->guc->state) & ENGINE_STATE_ENABLED;
> > }
> >
> > -static void set_engine_enabled(struct xe_engine *e)
> > +static void set_exec_queue_enabled(struct xe_exec_queue *q)
> > {
> > - atomic_or(ENGINE_STATE_ENABLED, &e->guc->state);
> > + atomic_or(ENGINE_STATE_ENABLED, &q->guc->state);
> > }
> >
> > -static void clear_engine_enabled(struct xe_engine *e)
> > +static void clear_exec_queue_enabled(struct xe_exec_queue *q)
> > {
> > - atomic_and(~ENGINE_STATE_ENABLED, &e->guc->state);
> > + atomic_and(~ENGINE_STATE_ENABLED, &q->guc->state);
> > }
> >
> > -static bool engine_pending_enable(struct xe_engine *e)
> > +static bool exec_queue_pending_enable(struct xe_exec_queue *q)
> > {
> > - return atomic_read(&e->guc->state) & ENGINE_STATE_PENDING_ENABLE;
> > + return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_ENABLE;
> > }
> >
> > -static void set_engine_pending_enable(struct xe_engine *e)
> > +static void set_exec_queue_pending_enable(struct xe_exec_queue *q)
> > {
> > - atomic_or(ENGINE_STATE_PENDING_ENABLE, &e->guc->state);
> > + atomic_or(EXEC_QUEUE_STATE_PENDING_ENABLE, &q->guc->state);
> > }
> >
> > -static void clear_engine_pending_enable(struct xe_engine *e)
> > +static void clear_exec_queue_pending_enable(struct xe_exec_queue *q)
> > {
> > - atomic_and(~ENGINE_STATE_PENDING_ENABLE, &e->guc->state);
> > + atomic_and(~EXEC_QUEUE_STATE_PENDING_ENABLE, &q->guc->state);
> > }
> >
> > -static bool engine_pending_disable(struct xe_engine *e)
> > +static bool exec_queue_pending_disable(struct xe_exec_queue *q)
> > {
> > - return atomic_read(&e->guc->state) & ENGINE_STATE_PENDING_DISABLE;
> > + return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_DISABLE;
> > }
> >
> > -static void set_engine_pending_disable(struct xe_engine *e)
> > +static void set_exec_queue_pending_disable(struct xe_exec_queue *q)
> > {
> > - atomic_or(ENGINE_STATE_PENDING_DISABLE, &e->guc->state);
> > + atomic_or(EXEC_QUEUE_STATE_PENDING_DISABLE, &q->guc->state);
> > }
> >
> > -static void clear_engine_pending_disable(struct xe_engine *e)
> > +static void clear_exec_queue_pending_disable(struct xe_exec_queue *q)
> > {
> > - atomic_and(~ENGINE_STATE_PENDING_DISABLE, &e->guc->state);
> > + atomic_and(~EXEC_QUEUE_STATE_PENDING_DISABLE, &q->guc->state);
> > }
> >
> > -static bool engine_destroyed(struct xe_engine *e)
> > +static bool exec_queue_destroyed(struct xe_exec_queue *q)
> > {
> > - return atomic_read(&e->guc->state) & ENGINE_STATE_DESTROYED;
> > + return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_DESTROYED;
> > }
> >
> > -static void set_engine_destroyed(struct xe_engine *e)
> > +static void set_exec_queue_destroyed(struct xe_exec_queue *q)
> > {
> > - atomic_or(ENGINE_STATE_DESTROYED, &e->guc->state);
> > + atomic_or(EXEC_QUEUE_STATE_DESTROYED, &q->guc->state);
> > }
> >
> > -static bool engine_banned(struct xe_engine *e)
> > +static bool exec_queue_banned(struct xe_exec_queue *q)
> > {
> > - return (e->flags & ENGINE_FLAG_BANNED);
> > + return (q->flags & EXEC_QUEUE_FLAG_BANNED);
> > }
> >
> > -static void set_engine_banned(struct xe_engine *e)
> > +static void set_exec_queue_banned(struct xe_exec_queue *q)
> > {
> > - e->flags |= ENGINE_FLAG_BANNED;
> > + q->flags |= EXEC_QUEUE_FLAG_BANNED;
> > }
> >
> > -static bool engine_suspended(struct xe_engine *e)
> > +static bool exec_queue_suspended(struct xe_exec_queue *q)
> > {
> > - return atomic_read(&e->guc->state) & ENGINE_STATE_SUSPENDED;
> > + return atomic_read(&q->guc->state) & ENGINE_STATE_SUSPENDED;
> > }
> >
> > -static void set_engine_suspended(struct xe_engine *e)
> > +static void set_exec_queue_suspended(struct xe_exec_queue *q)
> > {
> > - atomic_or(ENGINE_STATE_SUSPENDED, &e->guc->state);
> > + atomic_or(ENGINE_STATE_SUSPENDED, &q->guc->state);
> > }
> >
> > -static void clear_engine_suspended(struct xe_engine *e)
> > +static void clear_exec_queue_suspended(struct xe_exec_queue *q)
> > {
> > - atomic_and(~ENGINE_STATE_SUSPENDED, &e->guc->state);
> > + atomic_and(~ENGINE_STATE_SUSPENDED, &q->guc->state);
> > }
> >
> > -static bool engine_reset(struct xe_engine *e)
> > +static bool exec_queue_reset(struct xe_exec_queue *q)
> > {
> > - return atomic_read(&e->guc->state) & ENGINE_STATE_RESET;
> > + return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_RESET;
> > }
> >
> > -static void set_engine_reset(struct xe_engine *e)
> > +static void set_exec_queue_reset(struct xe_exec_queue *q)
> > {
> > - atomic_or(ENGINE_STATE_RESET, &e->guc->state);
> > + atomic_or(EXEC_QUEUE_STATE_RESET, &q->guc->state);
> > }
> >
> > -static bool engine_killed(struct xe_engine *e)
> > +static bool exec_queue_killed(struct xe_exec_queue *q)
> > {
> > - return atomic_read(&e->guc->state) & ENGINE_STATE_KILLED;
> > + return atomic_read(&q->guc->state) & ENGINE_STATE_KILLED;
> > }
> >
> > -static void set_engine_killed(struct xe_engine *e)
> > +static void set_exec_queue_killed(struct xe_exec_queue *q)
> > {
> > - atomic_or(ENGINE_STATE_KILLED, &e->guc->state);
> > + atomic_or(ENGINE_STATE_KILLED, &q->guc->state);
> > }
> >
> > -static bool engine_killed_or_banned(struct xe_engine *e)
> > +static bool exec_queue_killed_or_banned(struct xe_exec_queue *q)
> > {
> > - return engine_killed(e) || engine_banned(e);
> > + return exec_queue_killed(q) || exec_queue_banned(q);
> > }
> >
> > static void guc_submit_fini(struct drm_device *drm, void *arg)
> > {
> > struct xe_guc *guc = arg;
> >
> > - xa_destroy(&guc->submission_state.engine_lookup);
> > + xa_destroy(&guc->submission_state.exec_queue_lookup);
> > ida_destroy(&guc->submission_state.guc_ids);
> > bitmap_free(guc->submission_state.guc_ids_bitmap);
> > }
> > @@ -200,7 +200,7 @@ static void guc_submit_fini(struct drm_device *drm, void *arg)
> > #define GUC_ID_NUMBER_SLRC (GUC_ID_MAX - GUC_ID_NUMBER_MLRC)
> > #define GUC_ID_START_MLRC GUC_ID_NUMBER_SLRC
> >
> > -static const struct xe_engine_ops guc_engine_ops;
> > +static const struct xe_exec_queue_ops guc_exec_queue_ops;
> >
> > static void primelockdep(struct xe_guc *guc)
> > {
> > @@ -227,10 +227,10 @@ int xe_guc_submit_init(struct xe_guc *guc)
> > if (!guc->submission_state.guc_ids_bitmap)
> > return -ENOMEM;
> >
> > - gt->engine_ops = &guc_engine_ops;
> > + gt->exec_queue_ops = &guc_exec_queue_ops;
> >
> > mutex_init(&guc->submission_state.lock);
> > - xa_init(&guc->submission_state.engine_lookup);
> > + xa_init(&guc->submission_state.exec_queue_lookup);
> > ida_init(&guc->submission_state.guc_ids);
> >
> > spin_lock_init(&guc->submission_state.suspend.lock);
> > @@ -245,7 +245,7 @@ int xe_guc_submit_init(struct xe_guc *guc)
> > return 0;
> > }
> >
> > -static int alloc_guc_id(struct xe_guc *guc, struct xe_engine *e)
> > +static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
> > {
> > int ret;
> > void *ptr;
> > @@ -259,11 +259,11 @@ static int alloc_guc_id(struct xe_guc *guc, struct xe_engine *e)
> > */
> > lockdep_assert_held(&guc->submission_state.lock);
> >
> > - if (xe_engine_is_parallel(e)) {
> > + if (xe_exec_queue_is_parallel(q)) {
> > void *bitmap = guc->submission_state.guc_ids_bitmap;
> >
> > ret = bitmap_find_free_region(bitmap, GUC_ID_NUMBER_MLRC,
> > - order_base_2(e->width));
> > + order_base_2(q->width));
> > } else {
> > ret = ida_simple_get(&guc->submission_state.guc_ids, 0,
> > GUC_ID_NUMBER_SLRC, GFP_NOWAIT);
> > @@ -271,12 +271,12 @@ static int alloc_guc_id(struct xe_guc *guc, struct xe_engine *e)
> > if (ret < 0)
> > return ret;
> >
> > - e->guc->id = ret;
> > - if (xe_engine_is_parallel(e))
> > - e->guc->id += GUC_ID_START_MLRC;
> > + q->guc->id = ret;
> > + if (xe_exec_queue_is_parallel(q))
> > + q->guc->id += GUC_ID_START_MLRC;
> >
> > - ptr = xa_store(&guc->submission_state.engine_lookup,
> > - e->guc->id, e, GFP_NOWAIT);
> > + ptr = xa_store(&guc->submission_state.exec_queue_lookup,
> > + q->guc->id, q, GFP_NOWAIT);
> > if (IS_ERR(ptr)) {
> > ret = PTR_ERR(ptr);
> > goto err_release;
> > @@ -285,29 +285,29 @@ static int alloc_guc_id(struct xe_guc *guc, struct xe_engine *e)
> > return 0;
> >
> > err_release:
> > - ida_simple_remove(&guc->submission_state.guc_ids, e->guc->id);
> > + ida_simple_remove(&guc->submission_state.guc_ids, q->guc->id);
> > return ret;
> > }
> >
> > -static void release_guc_id(struct xe_guc *guc, struct xe_engine *e)
> > +static void release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
> > {
> > mutex_lock(&guc->submission_state.lock);
> > - xa_erase(&guc->submission_state.engine_lookup, e->guc->id);
> > - if (xe_engine_is_parallel(e))
> > + xa_erase(&guc->submission_state.exec_queue_lookup, q->guc->id);
> > + if (xe_exec_queue_is_parallel(q))
> > bitmap_release_region(guc->submission_state.guc_ids_bitmap,
> > - e->guc->id - GUC_ID_START_MLRC,
> > - order_base_2(e->width));
> > + q->guc->id - GUC_ID_START_MLRC,
> > + order_base_2(q->width));
> > else
> > - ida_simple_remove(&guc->submission_state.guc_ids, e->guc->id);
> > + ida_simple_remove(&guc->submission_state.guc_ids, q->guc->id);
> > mutex_unlock(&guc->submission_state.lock);
> > }
> >
> > -struct engine_policy {
> > +struct exec_queue_policy {
> > u32 count;
> > - struct guc_update_engine_policy h2g;
> > + struct guc_update_exec_queue_policy h2g;
> > };
> >
> > -static u32 __guc_engine_policy_action_size(struct engine_policy *policy)
> > +static u32 __guc_exec_queue_policy_action_size(struct exec_queue_policy *policy)
> > {
> > size_t bytes = sizeof(policy->h2g.header) +
> > (sizeof(policy->h2g.klv[0]) * policy->count);
> > @@ -315,8 +315,8 @@ static u32 __guc_engine_policy_action_size(struct engine_policy *policy)
> > return bytes / sizeof(u32);
> > }
> >
> > -static void __guc_engine_policy_start_klv(struct engine_policy *policy,
> > - u16 guc_id)
> > +static void __guc_exec_queue_policy_start_klv(struct exec_queue_policy *policy,
> > + u16 guc_id)
> > {
> > policy->h2g.header.action =
> > XE_GUC_ACTION_HOST2GUC_UPDATE_CONTEXT_POLICIES;
> > @@ -324,8 +324,8 @@ static void __guc_engine_policy_start_klv(struct engine_policy *policy,
> > policy->count = 0;
> > }
> >
> > -#define MAKE_ENGINE_POLICY_ADD(func, id) \
> > -static void __guc_engine_policy_add_##func(struct engine_policy *policy, \
> > +#define MAKE_EXEC_QUEUE_POLICY_ADD(func, id) \
> > +static void __guc_exec_queue_policy_add_##func(struct exec_queue_policy *policy, \
> > u32 data) \
> > { \
> > XE_WARN_ON(policy->count >= GUC_CONTEXT_POLICIES_KLV_NUM_IDS); \
> > @@ -338,10 +338,10 @@ static void __guc_engine_policy_add_##func(struct engine_policy *policy, \
> > policy->count++; \
> > }
> >
> > -MAKE_ENGINE_POLICY_ADD(execution_quantum, EXECUTION_QUANTUM)
> > -MAKE_ENGINE_POLICY_ADD(preemption_timeout, PREEMPTION_TIMEOUT)
> > -MAKE_ENGINE_POLICY_ADD(priority, SCHEDULING_PRIORITY)
> > -#undef MAKE_ENGINE_POLICY_ADD
> > +MAKE_EXEC_QUEUE_POLICY_ADD(execution_quantum, EXECUTION_QUANTUM)
> > +MAKE_EXEC_QUEUE_POLICY_ADD(preemption_timeout, PREEMPTION_TIMEOUT)
> > +MAKE_EXEC_QUEUE_POLICY_ADD(priority, SCHEDULING_PRIORITY)
> > +#undef MAKE_EXEC_QUEUE_POLICY_ADD
> >
> > static const int drm_sched_prio_to_guc[] = {
> > [DRM_SCHED_PRIORITY_MIN] = GUC_CLIENT_PRIORITY_NORMAL,
> > @@ -350,33 +350,33 @@ static const int drm_sched_prio_to_guc[] = {
> > [DRM_SCHED_PRIORITY_KERNEL] = GUC_CLIENT_PRIORITY_KMD_HIGH,
> > };
> >
> > -static void init_policies(struct xe_guc *guc, struct xe_engine *e)
> > +static void init_policies(struct xe_guc *guc, struct xe_exec_queue *q)
> > {
> > - struct engine_policy policy;
> > - enum drm_sched_priority prio = e->entity->priority;
> > - u32 timeslice_us = e->sched_props.timeslice_us;
> > - u32 preempt_timeout_us = e->sched_props.preempt_timeout_us;
> > + struct exec_queue_policy policy;
> > + enum drm_sched_priority prio = q->entity->priority;
> > + u32 timeslice_us = q->sched_props.timeslice_us;
> > + u32 preempt_timeout_us = q->sched_props.preempt_timeout_us;
> >
> > - XE_WARN_ON(!engine_registered(e));
> > + XE_WARN_ON(!exec_queue_registered(q));
> >
> > - __guc_engine_policy_start_klv(&policy, e->guc->id);
> > - __guc_engine_policy_add_priority(&policy, drm_sched_prio_to_guc[prio]);
> > - __guc_engine_policy_add_execution_quantum(&policy, timeslice_us);
> > - __guc_engine_policy_add_preemption_timeout(&policy, preempt_timeout_us);
> > + __guc_exec_queue_policy_start_klv(&policy, q->guc->id);
> > + __guc_exec_queue_policy_add_priority(&policy, drm_sched_prio_to_guc[prio]);
> > + __guc_exec_queue_policy_add_execution_quantum(&policy, timeslice_us);
> > + __guc_exec_queue_policy_add_preemption_timeout(&policy, preempt_timeout_us);
> >
> > xe_guc_ct_send(&guc->ct, (u32 *)&policy.h2g,
> > - __guc_engine_policy_action_size(&policy), 0, 0);
> > + __guc_exec_queue_policy_action_size(&policy), 0, 0);
> > }
> >
> > -static void set_min_preemption_timeout(struct xe_guc *guc, struct xe_engine *e)
> > +static void set_min_preemption_timeout(struct xe_guc *guc, struct xe_exec_queue *q)
> > {
> > - struct engine_policy policy;
> > + struct exec_queue_policy policy;
> >
> > - __guc_engine_policy_start_klv(&policy, e->guc->id);
> > - __guc_engine_policy_add_preemption_timeout(&policy, 1);
> > + __guc_exec_queue_policy_start_klv(&policy, q->guc->id);
> > + __guc_exec_queue_policy_add_preemption_timeout(&policy, 1);
> >
> > xe_guc_ct_send(&guc->ct, (u32 *)&policy.h2g,
> > - __guc_engine_policy_action_size(&policy), 0, 0);
> > + __guc_exec_queue_policy_action_size(&policy), 0, 0);
> > }
> >
> > #define parallel_read(xe_, map_, field_) \
> > @@ -387,7 +387,7 @@ static void set_min_preemption_timeout(struct xe_guc *guc, struct xe_engine *e)
> > field_, val_)
> >
> > static void __register_mlrc_engine(struct xe_guc *guc,
> > - struct xe_engine *e,
> > + struct xe_exec_queue *q,
> > struct guc_ctxt_registration_info *info)
> > {
> > #define MAX_MLRC_REG_SIZE (13 + XE_HW_ENGINE_MAX_INSTANCE * 2)
> > @@ -395,7 +395,7 @@ static void __register_mlrc_engine(struct xe_guc *guc,
> > int len = 0;
> > int i;
> >
> > - XE_WARN_ON(!xe_engine_is_parallel(e));
> > + XE_WARN_ON(!xe_exec_queue_is_parallel(q));
> >
> > action[len++] = XE_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC;
> > action[len++] = info->flags;
> > @@ -407,12 +407,12 @@ static void __register_mlrc_engine(struct xe_guc *guc,
> > action[len++] = info->wq_base_lo;
> > action[len++] = info->wq_base_hi;
> > action[len++] = info->wq_size;
> > - action[len++] = e->width;
> > + action[len++] = q->width;
> > action[len++] = info->hwlrca_lo;
> > action[len++] = info->hwlrca_hi;
> >
> > - for (i = 1; i < e->width; ++i) {
> > - struct xe_lrc *lrc = e->lrc + i;
> > + for (i = 1; i < q->width; ++i) {
> > + struct xe_lrc *lrc = q->lrc + i;
> >
> > action[len++] = lower_32_bits(xe_lrc_descriptor(lrc));
> > action[len++] = upper_32_bits(xe_lrc_descriptor(lrc));
> > @@ -445,24 +445,24 @@ static void __register_engine(struct xe_guc *guc,
> > xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action), 0, 0);
> > }
> >
> > -static void register_engine(struct xe_engine *e)
> > +static void register_engine(struct xe_exec_queue *q)
> > {
> > - struct xe_guc *guc = engine_to_guc(e);
> > + struct xe_guc *guc = exec_queue_to_guc(q);
> > struct xe_device *xe = guc_to_xe(guc);
> > - struct xe_lrc *lrc = e->lrc;
> > + struct xe_lrc *lrc = q->lrc;
> > struct guc_ctxt_registration_info info;
> >
> > - XE_WARN_ON(engine_registered(e));
> > + XE_WARN_ON(exec_queue_registered(q));
> >
> > memset(&info, 0, sizeof(info));
> > - info.context_idx = e->guc->id;
> > - info.engine_class = xe_engine_class_to_guc_class(e->class);
> > - info.engine_submit_mask = e->logical_mask;
> > + info.context_idx = q->guc->id;
> > + info.engine_class = xe_engine_class_to_guc_class(q->class);
> > + info.engine_submit_mask = q->logical_mask;
> > info.hwlrca_lo = lower_32_bits(xe_lrc_descriptor(lrc));
> > info.hwlrca_hi = upper_32_bits(xe_lrc_descriptor(lrc));
> > info.flags = CONTEXT_REGISTRATION_FLAG_KMD;
> >
> > - if (xe_engine_is_parallel(e)) {
> > + if (xe_exec_queue_is_parallel(q)) {
> > u32 ggtt_addr = xe_lrc_parallel_ggtt_addr(lrc);
> > struct iosys_map map = xe_lrc_parallel_map(lrc);
> >
> > @@ -476,8 +476,8 @@ static void register_engine(struct xe_engine *e)
> > offsetof(struct guc_submit_parallel_scratch, wq[0]));
> > info.wq_size = WQ_SIZE;
> >
> > - e->guc->wqi_head = 0;
> > - e->guc->wqi_tail = 0;
> > + q->guc->wqi_head = 0;
> > + q->guc->wqi_tail = 0;
> > xe_map_memset(xe, &map, 0, 0, PARALLEL_SCRATCH_SIZE - WQ_SIZE);
> > parallel_write(xe, map, wq_desc.wq_status, WQ_STATUS_ACTIVE);
> > }
> > @@ -487,38 +487,38 @@ static void register_engine(struct xe_engine *e)
> > * the GuC as jobs signal immediately and can't destroy an engine if the
> > * GuC has a reference to it.
> > */
> > - if (xe_engine_is_lr(e))
> > - xe_engine_get(e);
> > + if (xe_exec_queue_is_lr(q))
> > + xe_exec_queue_get(q);
> >
> > - set_engine_registered(e);
> > - trace_xe_engine_register(e);
> > - if (xe_engine_is_parallel(e))
> > - __register_mlrc_engine(guc, e, &info);
> > + set_exec_queue_registered(q);
> > + trace_xe_exec_queue_register(q);
> > + if (xe_exec_queue_is_parallel(q))
> > + __register_mlrc_engine(guc, q, &info);
> > else
> > __register_engine(guc, &info);
> > - init_policies(guc, e);
> > + init_policies(guc, q);
> > }
> >
> > -static u32 wq_space_until_wrap(struct xe_engine *e)
> > +static u32 wq_space_until_wrap(struct xe_exec_queue *q)
> > {
> > - return (WQ_SIZE - e->guc->wqi_tail);
> > + return (WQ_SIZE - q->guc->wqi_tail);
> > }
> >
> > -static int wq_wait_for_space(struct xe_engine *e, u32 wqi_size)
> > +static int wq_wait_for_space(struct xe_exec_queue *q, u32 wqi_size)
> > {
> > - struct xe_guc *guc = engine_to_guc(e);
> > + struct xe_guc *guc = exec_queue_to_guc(q);
> > struct xe_device *xe = guc_to_xe(guc);
> > - struct iosys_map map = xe_lrc_parallel_map(e->lrc);
> > + struct iosys_map map = xe_lrc_parallel_map(q->lrc);
> > unsigned int sleep_period_ms = 1;
> >
> > #define AVAILABLE_SPACE \
> > - CIRC_SPACE(e->guc->wqi_tail, e->guc->wqi_head, WQ_SIZE)
> > + CIRC_SPACE(q->guc->wqi_tail, q->guc->wqi_head, WQ_SIZE)
> > if (wqi_size > AVAILABLE_SPACE) {
> > try_again:
> > - e->guc->wqi_head = parallel_read(xe, map, wq_desc.head);
> > + q->guc->wqi_head = parallel_read(xe, map, wq_desc.head);
> > if (wqi_size > AVAILABLE_SPACE) {
> > if (sleep_period_ms == 1024) {
> > - xe_gt_reset_async(e->gt);
> > + xe_gt_reset_async(q->gt);
> > return -ENODEV;
> > }
> >
> > @@ -532,52 +532,52 @@ static int wq_wait_for_space(struct xe_engine *e, u32 wqi_size)
> > return 0;
> > }
> >
> > -static int wq_noop_append(struct xe_engine *e)
> > +static int wq_noop_append(struct xe_exec_queue *q)
> > {
> > - struct xe_guc *guc = engine_to_guc(e);
> > + struct xe_guc *guc = exec_queue_to_guc(q);
> > struct xe_device *xe = guc_to_xe(guc);
> > - struct iosys_map map = xe_lrc_parallel_map(e->lrc);
> > - u32 len_dw = wq_space_until_wrap(e) / sizeof(u32) - 1;
> > + struct iosys_map map = xe_lrc_parallel_map(q->lrc);
> > + u32 len_dw = wq_space_until_wrap(q) / sizeof(u32) - 1;
> >
> > - if (wq_wait_for_space(e, wq_space_until_wrap(e)))
> > + if (wq_wait_for_space(q, wq_space_until_wrap(q)))
> > return -ENODEV;
> >
> > XE_WARN_ON(!FIELD_FIT(WQ_LEN_MASK, len_dw));
> >
> > - parallel_write(xe, map, wq[e->guc->wqi_tail / sizeof(u32)],
> > + parallel_write(xe, map, wq[q->guc->wqi_tail / sizeof(u32)],
> > FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_NOOP) |
> > FIELD_PREP(WQ_LEN_MASK, len_dw));
> > - e->guc->wqi_tail = 0;
> > + q->guc->wqi_tail = 0;
> >
> > return 0;
> > }
> >
> > -static void wq_item_append(struct xe_engine *e)
> > +static void wq_item_append(struct xe_exec_queue *q)
> > {
> > - struct xe_guc *guc = engine_to_guc(e);
> > + struct xe_guc *guc = exec_queue_to_guc(q);
> > struct xe_device *xe = guc_to_xe(guc);
> > - struct iosys_map map = xe_lrc_parallel_map(e->lrc);
> > + struct iosys_map map = xe_lrc_parallel_map(q->lrc);
> > #define WQ_HEADER_SIZE 4 /* Includes 1 LRC address too */
> > u32 wqi[XE_HW_ENGINE_MAX_INSTANCE + (WQ_HEADER_SIZE - 1)];
> > - u32 wqi_size = (e->width + (WQ_HEADER_SIZE - 1)) * sizeof(u32);
> > + u32 wqi_size = (q->width + (WQ_HEADER_SIZE - 1)) * sizeof(u32);
> > u32 len_dw = (wqi_size / sizeof(u32)) - 1;
> > int i = 0, j;
> >
> > - if (wqi_size > wq_space_until_wrap(e)) {
> > - if (wq_noop_append(e))
> > + if (wqi_size > wq_space_until_wrap(q)) {
> > + if (wq_noop_append(q))
> > return;
> > }
> > - if (wq_wait_for_space(e, wqi_size))
> > + if (wq_wait_for_space(q, wqi_size))
> > return;
> >
> > wqi[i++] = FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_MULTI_LRC) |
> > FIELD_PREP(WQ_LEN_MASK, len_dw);
> > - wqi[i++] = xe_lrc_descriptor(e->lrc);
> > - wqi[i++] = FIELD_PREP(WQ_GUC_ID_MASK, e->guc->id) |
> > - FIELD_PREP(WQ_RING_TAIL_MASK, e->lrc->ring.tail / sizeof(u64));
> > + wqi[i++] = xe_lrc_descriptor(q->lrc);
> > + wqi[i++] = FIELD_PREP(WQ_GUC_ID_MASK, q->guc->id) |
> > + FIELD_PREP(WQ_RING_TAIL_MASK, q->lrc->ring.tail / sizeof(u64));
> > wqi[i++] = 0;
> > - for (j = 1; j < e->width; ++j) {
> > - struct xe_lrc *lrc = e->lrc + j;
> > + for (j = 1; j < q->width; ++j) {
> > + struct xe_lrc *lrc = q->lrc + j;
> >
> > wqi[i++] = lrc->ring.tail / sizeof(u64);
> > }
> > @@ -585,55 +585,55 @@ static void wq_item_append(struct xe_engine *e)
> > XE_WARN_ON(i != wqi_size / sizeof(u32));
> >
> > iosys_map_incr(&map, offsetof(struct guc_submit_parallel_scratch,
> > - wq[e->guc->wqi_tail / sizeof(u32)]));
> > + wq[q->guc->wqi_tail / sizeof(u32)]));
> > xe_map_memcpy_to(xe, &map, 0, wqi, wqi_size);
> > - e->guc->wqi_tail += wqi_size;
> > - XE_WARN_ON(e->guc->wqi_tail > WQ_SIZE);
> > + q->guc->wqi_tail += wqi_size;
> > + XE_WARN_ON(q->guc->wqi_tail > WQ_SIZE);
> >
> > xe_device_wmb(xe);
> >
> > - map = xe_lrc_parallel_map(e->lrc);
> > - parallel_write(xe, map, wq_desc.tail, e->guc->wqi_tail);
> > + map = xe_lrc_parallel_map(q->lrc);
> > + parallel_write(xe, map, wq_desc.tail, q->guc->wqi_tail);
> > }
> >
> > #define RESUME_PENDING ~0x0ull
> > -static void submit_engine(struct xe_engine *e)
> > +static void submit_exec_queue(struct xe_exec_queue *q)
> > {
> > - struct xe_guc *guc = engine_to_guc(e);
> > - struct xe_lrc *lrc = e->lrc;
> > + struct xe_guc *guc = exec_queue_to_guc(q);
> > + struct xe_lrc *lrc = q->lrc;
> > u32 action[3];
> > u32 g2h_len = 0;
> > u32 num_g2h = 0;
> > int len = 0;
> > bool extra_submit = false;
> >
> > - XE_WARN_ON(!engine_registered(e));
> > + XE_WARN_ON(!exec_queue_registered(q));
> >
> > - if (xe_engine_is_parallel(e))
> > - wq_item_append(e);
> > + if (xe_exec_queue_is_parallel(q))
> > + wq_item_append(q);
> > else
> > xe_lrc_write_ctx_reg(lrc, CTX_RING_TAIL, lrc->ring.tail);
> >
> > - if (engine_suspended(e) && !xe_engine_is_parallel(e))
> > + if (exec_queue_suspended(q) && !xe_exec_queue_is_parallel(q))
> > return;
> >
> > - if (!engine_enabled(e) && !engine_suspended(e)) {
> > + if (!exec_queue_enabled(q) && !exec_queue_suspended(q)) {
> > action[len++] = XE_GUC_ACTION_SCHED_CONTEXT_MODE_SET;
> > - action[len++] = e->guc->id;
> > + action[len++] = q->guc->id;
> > action[len++] = GUC_CONTEXT_ENABLE;
> > g2h_len = G2H_LEN_DW_SCHED_CONTEXT_MODE_SET;
> > num_g2h = 1;
> > - if (xe_engine_is_parallel(e))
> > + if (xe_exec_queue_is_parallel(q))
> > extra_submit = true;
> >
> > - e->guc->resume_time = RESUME_PENDING;
> > - set_engine_pending_enable(e);
> > - set_engine_enabled(e);
> > - trace_xe_engine_scheduling_enable(e);
> > + q->guc->resume_time = RESUME_PENDING;
> > + set_exec_queue_pending_enable(q);
> > + set_exec_queue_enabled(q);
> > + trace_xe_exec_queue_scheduling_enable(q);
> > } else {
> > action[len++] = XE_GUC_ACTION_SCHED_CONTEXT;
> > - action[len++] = e->guc->id;
> > - trace_xe_engine_submit(e);
> > + action[len++] = q->guc->id;
> > + trace_xe_exec_queue_submit(q);
> > }
> >
> > xe_guc_ct_send(&guc->ct, action, len, g2h_len, num_g2h);
> > @@ -641,31 +641,31 @@ static void submit_engine(struct xe_engine *e)
> > if (extra_submit) {
> > len = 0;
> > action[len++] = XE_GUC_ACTION_SCHED_CONTEXT;
> > - action[len++] = e->guc->id;
> > - trace_xe_engine_submit(e);
> > + action[len++] = q->guc->id;
> > + trace_xe_exec_queue_submit(q);
> >
> > xe_guc_ct_send(&guc->ct, action, len, 0, 0);
> > }
> > }
> >
> > static struct dma_fence *
> > -guc_engine_run_job(struct drm_sched_job *drm_job)
> > +guc_exec_queue_run_job(struct drm_sched_job *drm_job)
> > {
> > struct xe_sched_job *job = to_xe_sched_job(drm_job);
> > - struct xe_engine *e = job->engine;
> > - bool lr = xe_engine_is_lr(e);
> > + struct xe_exec_queue *q = job->q;
> > + bool lr = xe_exec_queue_is_lr(q);
> >
> > - XE_WARN_ON((engine_destroyed(e) || engine_pending_disable(e)) &&
> > - !engine_banned(e) && !engine_suspended(e));
> > + XE_WARN_ON((exec_queue_destroyed(q) || exec_queue_pending_disable(q)) &&
> > + !exec_queue_banned(q) && !exec_queue_suspended(q));
> >
> > trace_xe_sched_job_run(job);
> >
> > - if (!engine_killed_or_banned(e) && !xe_sched_job_is_error(job)) {
> > - if (!engine_registered(e))
> > - register_engine(e);
> > + if (!exec_queue_killed_or_banned(q) && !xe_sched_job_is_error(job)) {
> > + if (!exec_queue_registered(q))
> > + register_engine(q);
> > if (!lr) /* LR jobs are emitted in the exec IOCTL */
> > - e->ring_ops->emit_job(job);
> > - submit_engine(e);
> > + q->ring_ops->emit_job(job);
> > + submit_exec_queue(q);
> > }
> >
> > if (lr) {
> > @@ -678,7 +678,7 @@ guc_engine_run_job(struct drm_sched_job *drm_job)
> > }
> > }
> >
> > -static void guc_engine_free_job(struct drm_sched_job *drm_job)
> > +static void guc_exec_queue_free_job(struct drm_sched_job *drm_job)
> > {
> > struct xe_sched_job *job = to_xe_sched_job(drm_job);
> >
> > @@ -691,38 +691,38 @@ static int guc_read_stopped(struct xe_guc *guc)
> > return atomic_read(&guc->submission_state.stopped);
> > }
> >
> > -#define MAKE_SCHED_CONTEXT_ACTION(e, enable_disable) \
> > +#define MAKE_SCHED_CONTEXT_ACTION(q, enable_disable) \
> > u32 action[] = { \
> > XE_GUC_ACTION_SCHED_CONTEXT_MODE_SET, \
> > - e->guc->id, \
> > + q->guc->id, \
> > GUC_CONTEXT_##enable_disable, \
> > }
> > #define MIN_SCHED_TIMEOUT 1
> >
> > static void disable_scheduling_deregister(struct xe_guc *guc,
> > - struct xe_engine *e)
> > + struct xe_exec_queue *q)
> > {
> > - MAKE_SCHED_CONTEXT_ACTION(e, DISABLE);
> > + MAKE_SCHED_CONTEXT_ACTION(q, DISABLE);
> > int ret;
> >
> > - set_min_preemption_timeout(guc, e);
> > + set_min_preemption_timeout(guc, q);
> > smp_rmb();
> > - ret = wait_event_timeout(guc->ct.wq, !engine_pending_enable(e) ||
> > + ret = wait_event_timeout(guc->ct.wq, !exec_queue_pending_enable(q) ||
> > guc_read_stopped(guc), HZ * 5);
> > if (!ret) {
> > - struct drm_gpu_scheduler *sched = &e->guc->sched;
> > + struct drm_gpu_scheduler *sched = &q->guc->sched;
> >
> > XE_WARN_ON("Pending enable failed to respond");
> > sched->timeout = MIN_SCHED_TIMEOUT;
> > drm_sched_run_wq_start(sched);
> > - xe_gt_reset_async(e->gt);
> > + xe_gt_reset_async(q->gt);
> > return;
> > }
> >
> > - clear_engine_enabled(e);
> > - set_engine_pending_disable(e);
> > - set_engine_destroyed(e);
> > - trace_xe_engine_scheduling_disable(e);
> > + clear_exec_queue_enabled(q);
> > + set_exec_queue_pending_disable(q);
> > + set_exec_queue_destroyed(q);
> > + trace_xe_exec_queue_scheduling_disable(q);
> >
> > /*
> > * Reserve space for both G2H here as the 2nd G2H is sent from a G2H
> > @@ -733,27 +733,27 @@ static void disable_scheduling_deregister(struct xe_guc *guc,
> > G2H_LEN_DW_DEREGISTER_CONTEXT, 2);
> > }
> >
> > -static void guc_engine_print(struct xe_engine *e, struct drm_printer *p);
> > +static void guc_exec_queue_print(struct xe_exec_queue *q, struct drm_printer *p);
> >
> > #if IS_ENABLED(CONFIG_DRM_XE_SIMPLE_ERROR_CAPTURE)
> > -static void simple_error_capture(struct xe_engine *e)
> > +static void simple_error_capture(struct xe_exec_queue *q)
> > {
> > - struct xe_guc *guc = engine_to_guc(e);
> > + struct xe_guc *guc = exec_queue_to_guc(q);
> > struct drm_printer p = drm_err_printer("");
> > struct xe_hw_engine *hwe;
> > enum xe_hw_engine_id id;
> > - u32 adj_logical_mask = e->logical_mask;
> > - u32 width_mask = (0x1 << e->width) - 1;
> > + u32 adj_logical_mask = q->logical_mask;
> > + u32 width_mask = (0x1 << q->width) - 1;
> > int i;
> > bool cookie;
> >
> > - if (e->vm && !e->vm->error_capture.capture_once) {
> > - e->vm->error_capture.capture_once = true;
> > + if (q->vm && !q->vm->error_capture.capture_once) {
> > + q->vm->error_capture.capture_once = true;
> > cookie = dma_fence_begin_signalling();
> > - for (i = 0; e->width > 1 && i < XE_HW_ENGINE_MAX_INSTANCE;) {
> > + for (i = 0; q->width > 1 && i < XE_HW_ENGINE_MAX_INSTANCE;) {
> > if (adj_logical_mask & BIT(i)) {
> > adj_logical_mask |= width_mask << i;
> > - i += e->width;
> > + i += q->width;
> > } else {
> > ++i;
> > }
> > @@ -761,66 +761,66 @@ static void simple_error_capture(struct xe_engine *e)
> >
> > xe_force_wake_get(gt_to_fw(guc_to_gt(guc)), XE_FORCEWAKE_ALL);
> > xe_guc_ct_print(&guc->ct, &p, true);
> > - guc_engine_print(e, &p);
> > + guc_exec_queue_print(q, &p);
> > for_each_hw_engine(hwe, guc_to_gt(guc), id) {
> > - if (hwe->class != e->hwe->class ||
> > + if (hwe->class != q->hwe->class ||
> > !(BIT(hwe->logical_instance) & adj_logical_mask))
> > continue;
> > xe_hw_engine_print(hwe, &p);
> > }
> > - xe_analyze_vm(&p, e->vm, e->gt->info.id);
> > + xe_analyze_vm(&p, q->vm, q->gt->info.id);
> > xe_force_wake_put(gt_to_fw(guc_to_gt(guc)), XE_FORCEWAKE_ALL);
> > dma_fence_end_signalling(cookie);
> > }
> > }
> > #else
> > -static void simple_error_capture(struct xe_engine *e)
> > +static void simple_error_capture(struct xe_exec_queue *q)
> > {
> > }
> > #endif
> >
> > -static void xe_guc_engine_trigger_cleanup(struct xe_engine *e)
> > +static void xe_guc_exec_queue_trigger_cleanup(struct xe_exec_queue *q)
> > {
> > - struct xe_guc *guc = engine_to_guc(e);
> > + struct xe_guc *guc = exec_queue_to_guc(q);
> >
> > - if (xe_engine_is_lr(e))
> > - queue_work(guc_to_gt(guc)->ordered_wq, &e->guc->lr_tdr);
> > + if (xe_exec_queue_is_lr(q))
> > + queue_work(guc_to_gt(guc)->ordered_wq, &q->guc->lr_tdr);
> > else
> > - drm_sched_set_timeout(&e->guc->sched, MIN_SCHED_TIMEOUT);
> > + drm_sched_set_timeout(&q->guc->sched, MIN_SCHED_TIMEOUT);
> > }
> >
> > -static void xe_guc_engine_lr_cleanup(struct work_struct *w)
> > +static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w)
> > {
> > - struct xe_guc_engine *ge =
> > - container_of(w, struct xe_guc_engine, lr_tdr);
> > - struct xe_engine *e = ge->engine;
> > + struct xe_guc_exec_queue *ge =
> > + container_of(w, struct xe_guc_exec_queue, lr_tdr);
> > + struct xe_exec_queue *q = ge->q;
> > struct drm_gpu_scheduler *sched = &ge->sched;
> >
> > - XE_WARN_ON(!xe_engine_is_lr(e));
> > - trace_xe_engine_lr_cleanup(e);
> > + XE_WARN_ON(!xe_exec_queue_is_lr(q));
> > + trace_xe_exec_queue_lr_cleanup(q);
> >
> > /* Kill the run_job / process_msg entry points */
> > drm_sched_run_wq_stop(sched);
> >
> > /* Engine state now stable, disable scheduling / deregister if needed */
> > - if (engine_registered(e)) {
> > - struct xe_guc *guc = engine_to_guc(e);
> > + if (exec_queue_registered(q)) {
> > + struct xe_guc *guc = exec_queue_to_guc(q);
> > int ret;
> >
> > - set_engine_banned(e);
> > - disable_scheduling_deregister(guc, e);
> > + set_exec_queue_banned(q);
> > + disable_scheduling_deregister(guc, q);
> >
> > /*
> > * Must wait for scheduling to be disabled before signalling
> > * any fences, if GT broken the GT reset code should signal us.
> > */
> > ret = wait_event_timeout(guc->ct.wq,
> > - !engine_pending_disable(e) ||
> > + !exec_queue_pending_disable(q) ||
> > guc_read_stopped(guc), HZ * 5);
> > if (!ret) {
> > XE_WARN_ON("Schedule disable failed to respond");
> > drm_sched_run_wq_start(sched);
> > - xe_gt_reset_async(e->gt);
> > + xe_gt_reset_async(q->gt);
> > return;
> > }
> > }
> > @@ -829,27 +829,27 @@ static void xe_guc_engine_lr_cleanup(struct work_struct *w)
> > }
> >
> > static enum drm_gpu_sched_stat
> > -guc_engine_timedout_job(struct drm_sched_job *drm_job)
> > +guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
> > {
> > struct xe_sched_job *job = to_xe_sched_job(drm_job);
> > struct xe_sched_job *tmp_job;
> > - struct xe_engine *e = job->engine;
> > - struct drm_gpu_scheduler *sched = &e->guc->sched;
> > - struct xe_device *xe = guc_to_xe(engine_to_guc(e));
> > + struct xe_exec_queue *q = job->q;
> > + struct drm_gpu_scheduler *sched = &q->guc->sched;
> > + struct xe_device *xe = guc_to_xe(exec_queue_to_guc(q));
> > int err = -ETIME;
> > int i = 0;
> >
> > if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &job->fence->flags)) {
> > - XE_WARN_ON(e->flags & ENGINE_FLAG_KERNEL);
> > - XE_WARN_ON(e->flags & ENGINE_FLAG_VM && !engine_killed(e));
> > + XE_WARN_ON(q->flags & EXEC_QUEUE_FLAG_KERNEL);
> > + XE_WARN_ON(q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q));
> >
> > drm_warn(&xe->drm, "Timedout job: seqno=%u, guc_id=%d, flags=0x%lx",
> > - xe_sched_job_seqno(job), e->guc->id, e->flags);
> > - simple_error_capture(e);
> > - xe_devcoredump(e);
> > + xe_sched_job_seqno(job), q->guc->id, q->flags);
> > + simple_error_capture(q);
> > + xe_devcoredump(q);
> > } else {
> > drm_dbg(&xe->drm, "Timedout signaled job: seqno=%u, guc_id=%d, flags=0x%lx",
> > - xe_sched_job_seqno(job), e->guc->id, e->flags);
> > + xe_sched_job_seqno(job), q->guc->id, q->flags);
> > }
> > trace_xe_sched_job_timedout(job);
> >
> > @@ -860,26 +860,26 @@ guc_engine_timedout_job(struct drm_sched_job *drm_job)
> > * Kernel jobs should never fail, nor should VM jobs if they do
> > * somethings has gone wrong and the GT needs a reset
> > */
> > - if (e->flags & ENGINE_FLAG_KERNEL ||
> > - (e->flags & ENGINE_FLAG_VM && !engine_killed(e))) {
> > + if (q->flags & EXEC_QUEUE_FLAG_KERNEL ||
> > + (q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q))) {
> > if (!drm_sched_invalidate_job(drm_job, 2)) {
> > list_add(&drm_job->list, &sched->pending_list);
> > drm_sched_run_wq_start(sched);
> > - xe_gt_reset_async(e->gt);
> > + xe_gt_reset_async(q->gt);
> > goto out;
> > }
> > }
> >
> > /* Engine state now stable, disable scheduling if needed */
> > - if (engine_enabled(e)) {
> > - struct xe_guc *guc = engine_to_guc(e);
> > + if (exec_queue_enabled(q)) {
> > + struct xe_guc *guc = exec_queue_to_guc(q);
> > int ret;
> >
> > - if (engine_reset(e))
> > + if (exec_queue_reset(q))
> > err = -EIO;
> > - set_engine_banned(e);
> > - xe_engine_get(e);
> > - disable_scheduling_deregister(guc, e);
> > + set_exec_queue_banned(q);
> > + xe_exec_queue_get(q);
> > + disable_scheduling_deregister(guc, q);
> >
> > /*
> > * Must wait for scheduling to be disabled before signalling
> > @@ -891,20 +891,20 @@ guc_engine_timedout_job(struct drm_sched_job *drm_job)
> > */
> > smp_rmb();
> > ret = wait_event_timeout(guc->ct.wq,
> > - !engine_pending_disable(e) ||
> > + !exec_queue_pending_disable(q) ||
> > guc_read_stopped(guc), HZ * 5);
> > if (!ret) {
> > XE_WARN_ON("Schedule disable failed to respond");
> > sched->timeout = MIN_SCHED_TIMEOUT;
> > list_add(&drm_job->list, &sched->pending_list);
> > drm_sched_run_wq_start(sched);
> > - xe_gt_reset_async(e->gt);
> > + xe_gt_reset_async(q->gt);
> > goto out;
> > }
> > }
> >
> > /* Stop fence signaling */
> > - xe_hw_fence_irq_stop(e->fence_irq);
> > + xe_hw_fence_irq_stop(q->fence_irq);
> >
> > /*
> > * Fence state now stable, stop / start scheduler which cleans up any
> > @@ -912,7 +912,7 @@ guc_engine_timedout_job(struct drm_sched_job *drm_job)
> > */
> > list_add(&drm_job->list, &sched->pending_list);
> > drm_sched_run_wq_start(sched);
> > - xe_guc_engine_trigger_cleanup(e);
> > + xe_guc_exec_queue_trigger_cleanup(q);
> >
> > /* Mark all outstanding jobs as bad, thus completing them */
> > spin_lock(&sched->job_list_lock);
> > @@ -921,53 +921,53 @@ guc_engine_timedout_job(struct drm_sched_job *drm_job)
> > spin_unlock(&sched->job_list_lock);
> >
> > /* Start fence signaling */
> > - xe_hw_fence_irq_start(e->fence_irq);
> > + xe_hw_fence_irq_start(q->fence_irq);
> >
> > out:
> > return DRM_GPU_SCHED_STAT_NOMINAL;
> > }
> >
> > -static void __guc_engine_fini_async(struct work_struct *w)
> > +static void __guc_exec_queue_fini_async(struct work_struct *w)
> > {
> > - struct xe_guc_engine *ge =
> > - container_of(w, struct xe_guc_engine, fini_async);
> > - struct xe_engine *e = ge->engine;
> > - struct xe_guc *guc = engine_to_guc(e);
> > + struct xe_guc_exec_queue *ge =
> > + container_of(w, struct xe_guc_exec_queue, fini_async);
> > + struct xe_exec_queue *q = ge->q;
> > + struct xe_guc *guc = exec_queue_to_guc(q);
> >
> > - trace_xe_engine_destroy(e);
> > + trace_xe_exec_queue_destroy(q);
> >
> > - if (xe_engine_is_lr(e))
> > + if (xe_exec_queue_is_lr(q))
> > cancel_work_sync(&ge->lr_tdr);
> > - if (e->flags & ENGINE_FLAG_PERSISTENT)
> > - xe_device_remove_persistent_engines(gt_to_xe(e->gt), e);
> > - release_guc_id(guc, e);
> > + if (q->flags & EXEC_QUEUE_FLAG_PERSISTENT)
> > + xe_device_remove_persistent_exec_queues(gt_to_xe(q->gt), q);
> > + release_guc_id(guc, q);
> > drm_sched_entity_fini(&ge->entity);
> > drm_sched_fini(&ge->sched);
> >
> > - if (!(e->flags & ENGINE_FLAG_KERNEL)) {
> > + if (!(q->flags & EXEC_QUEUE_FLAG_KERNEL)) {
> > kfree(ge);
> > - xe_engine_fini(e);
> > + xe_exec_queue_fini(q);
> > }
> > }
> >
> > -static void guc_engine_fini_async(struct xe_engine *e)
> > +static void guc_exec_queue_fini_async(struct xe_exec_queue *q)
> > {
> > - bool kernel = e->flags & ENGINE_FLAG_KERNEL;
> > + bool kernel = q->flags & EXEC_QUEUE_FLAG_KERNEL;
> >
> > - INIT_WORK(&e->guc->fini_async, __guc_engine_fini_async);
> > - queue_work(system_wq, &e->guc->fini_async);
> > + INIT_WORK(&q->guc->fini_async, __guc_exec_queue_fini_async);
> > + queue_work(system_wq, &q->guc->fini_async);
> >
> > /* We must block on kernel engines so slabs are empty on driver unload */
> > if (kernel) {
> > - struct xe_guc_engine *ge = e->guc;
> > + struct xe_guc_exec_queue *ge = q->guc;
> >
> > flush_work(&ge->fini_async);
> > kfree(ge);
> > - xe_engine_fini(e);
> > + xe_exec_queue_fini(q);
> > }
> > }
> >
> > -static void __guc_engine_fini(struct xe_guc *guc, struct xe_engine *e)
> > +static void __guc_exec_queue_fini(struct xe_guc *guc, struct xe_exec_queue *q)
> > {
> > /*
> > * Might be done from within the GPU scheduler, need to do async as we
> > @@ -976,104 +976,104 @@ static void __guc_engine_fini(struct xe_guc *guc, struct xe_engine *e)
> > * this we and don't really care when everything is fini'd, just that it
> > * is.
> > */
> > - guc_engine_fini_async(e);
> > + guc_exec_queue_fini_async(q);
> > }
> >
> > -static void __guc_engine_process_msg_cleanup(struct drm_sched_msg *msg)
> > +static void __guc_exec_queue_process_msg_cleanup(struct drm_sched_msg *msg)
> > {
> > - struct xe_engine *e = msg->private_data;
> > - struct xe_guc *guc = engine_to_guc(e);
> > + struct xe_exec_queue *q = msg->private_data;
> > + struct xe_guc *guc = exec_queue_to_guc(q);
> >
> > - XE_WARN_ON(e->flags & ENGINE_FLAG_KERNEL);
> > - trace_xe_engine_cleanup_entity(e);
> > + XE_WARN_ON(q->flags & EXEC_QUEUE_FLAG_KERNEL);
> > + trace_xe_exec_queue_cleanup_entity(q);
> >
> > - if (engine_registered(e))
> > - disable_scheduling_deregister(guc, e);
> > + if (exec_queue_registered(q))
> > + disable_scheduling_deregister(guc, q);
> > else
> > - __guc_engine_fini(guc, e);
> > + __guc_exec_queue_fini(guc, q);
> > }
> >
> > -static bool guc_engine_allowed_to_change_state(struct xe_engine *e)
> > +static bool guc_exec_queue_allowed_to_change_state(struct xe_exec_queue *q)
> > {
> > - return !engine_killed_or_banned(e) && engine_registered(e);
> > + return !exec_queue_killed_or_banned(q) && exec_queue_registered(q);
> > }
> >
> > -static void __guc_engine_process_msg_set_sched_props(struct drm_sched_msg *msg)
> > +static void __guc_exec_queue_process_msg_set_sched_props(struct drm_sched_msg *msg)
> > {
> > - struct xe_engine *e = msg->private_data;
> > - struct xe_guc *guc = engine_to_guc(e);
> > + struct xe_exec_queue *q = msg->private_data;
> > + struct xe_guc *guc = exec_queue_to_guc(q);
> >
> > - if (guc_engine_allowed_to_change_state(e))
> > - init_policies(guc, e);
> > + if (guc_exec_queue_allowed_to_change_state(q))
> > + init_policies(guc, q);
> > kfree(msg);
> > }
> >
> > -static void suspend_fence_signal(struct xe_engine *e)
> > +static void suspend_fence_signal(struct xe_exec_queue *q)
> > {
> > - struct xe_guc *guc = engine_to_guc(e);
> > + struct xe_guc *guc = exec_queue_to_guc(q);
> >
> > - XE_WARN_ON(!engine_suspended(e) && !engine_killed(e) &&
> > + XE_WARN_ON(!exec_queue_suspended(q) && !exec_queue_killed(q) &&
> > !guc_read_stopped(guc));
> > - XE_WARN_ON(!e->guc->suspend_pending);
> > + XE_WARN_ON(!q->guc->suspend_pending);
> >
> > - e->guc->suspend_pending = false;
> > + q->guc->suspend_pending = false;
> > smp_wmb();
> > - wake_up(&e->guc->suspend_wait);
> > + wake_up(&q->guc->suspend_wait);
> > }
> >
> > -static void __guc_engine_process_msg_suspend(struct drm_sched_msg *msg)
> > +static void __guc_exec_queue_process_msg_suspend(struct drm_sched_msg *msg)
> > {
> > - struct xe_engine *e = msg->private_data;
> > - struct xe_guc *guc = engine_to_guc(e);
> > + struct xe_exec_queue *q = msg->private_data;
> > + struct xe_guc *guc = exec_queue_to_guc(q);
> >
> > - if (guc_engine_allowed_to_change_state(e) && !engine_suspended(e) &&
> > - engine_enabled(e)) {
> > - wait_event(guc->ct.wq, e->guc->resume_time != RESUME_PENDING ||
> > + if (guc_exec_queue_allowed_to_change_state(q) && !exec_queue_suspended(q) &&
> > + exec_queue_enabled(q)) {
> > + wait_event(guc->ct.wq, q->guc->resume_time != RESUME_PENDING ||
> > guc_read_stopped(guc));
> >
> > if (!guc_read_stopped(guc)) {
> > - MAKE_SCHED_CONTEXT_ACTION(e, DISABLE);
> > + MAKE_SCHED_CONTEXT_ACTION(q, DISABLE);
> > s64 since_resume_ms =
> > ktime_ms_delta(ktime_get(),
> > - e->guc->resume_time);
> > - s64 wait_ms = e->vm->preempt.min_run_period_ms -
> > + q->guc->resume_time);
> > + s64 wait_ms = q->vm->preempt.min_run_period_ms -
> > since_resume_ms;
> >
> > - if (wait_ms > 0 && e->guc->resume_time)
> > + if (wait_ms > 0 && q->guc->resume_time)
> > msleep(wait_ms);
> >
> > - set_engine_suspended(e);
> > - clear_engine_enabled(e);
> > - set_engine_pending_disable(e);
> > - trace_xe_engine_scheduling_disable(e);
> > + set_exec_queue_suspended(q);
> > + clear_exec_queue_enabled(q);
> > + set_exec_queue_pending_disable(q);
> > + trace_xe_exec_queue_scheduling_disable(q);
> >
> > xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action),
> > G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, 1);
> > }
> > - } else if (e->guc->suspend_pending) {
> > - set_engine_suspended(e);
> > - suspend_fence_signal(e);
> > + } else if (q->guc->suspend_pending) {
> > + set_exec_queue_suspended(q);
> > + suspend_fence_signal(q);
> > }
> > }
> >
> > -static void __guc_engine_process_msg_resume(struct drm_sched_msg *msg)
> > +static void __guc_exec_queue_process_msg_resume(struct drm_sched_msg *msg)
> > {
> > - struct xe_engine *e = msg->private_data;
> > - struct xe_guc *guc = engine_to_guc(e);
> > + struct xe_exec_queue *q = msg->private_data;
> > + struct xe_guc *guc = exec_queue_to_guc(q);
> >
> > - if (guc_engine_allowed_to_change_state(e)) {
> > - MAKE_SCHED_CONTEXT_ACTION(e, ENABLE);
> > + if (guc_exec_queue_allowed_to_change_state(q)) {
> > + MAKE_SCHED_CONTEXT_ACTION(q, ENABLE);
> >
> > - e->guc->resume_time = RESUME_PENDING;
> > - clear_engine_suspended(e);
> > - set_engine_pending_enable(e);
> > - set_engine_enabled(e);
> > - trace_xe_engine_scheduling_enable(e);
> > + q->guc->resume_time = RESUME_PENDING;
> > + clear_exec_queue_suspended(q);
> > + set_exec_queue_pending_enable(q);
> > + set_exec_queue_enabled(q);
> > + trace_xe_exec_queue_scheduling_enable(q);
> >
> > xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action),
> > G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, 1);
> > } else {
> > - clear_engine_suspended(e);
> > + clear_exec_queue_suspended(q);
> > }
> > }
> >
> > @@ -1082,22 +1082,22 @@ static void __guc_engine_process_msg_resume(struct drm_sched_msg *msg)
> > #define SUSPEND 3
> > #define RESUME 4
> >
> > -static void guc_engine_process_msg(struct drm_sched_msg *msg)
> > +static void guc_exec_queue_process_msg(struct drm_sched_msg *msg)
> > {
> > trace_drm_sched_msg_recv(msg);
> >
> > switch (msg->opcode) {
> > case CLEANUP:
> > - __guc_engine_process_msg_cleanup(msg);
> > + __guc_exec_queue_process_msg_cleanup(msg);
> > break;
> > case SET_SCHED_PROPS:
> > - __guc_engine_process_msg_set_sched_props(msg);
> > + __guc_exec_queue_process_msg_set_sched_props(msg);
> > break;
> > case SUSPEND:
> > - __guc_engine_process_msg_suspend(msg);
> > + __guc_exec_queue_process_msg_suspend(msg);
> > break;
> > case RESUME:
> > - __guc_engine_process_msg_resume(msg);
> > + __guc_exec_queue_process_msg_resume(msg);
> > break;
> > default:
> > XE_WARN_ON("Unknown message type");
> > @@ -1105,17 +1105,17 @@ static void guc_engine_process_msg(struct drm_sched_msg *msg)
> > }
> >
> > static const struct drm_sched_backend_ops drm_sched_ops = {
> > - .run_job = guc_engine_run_job,
> > - .free_job = guc_engine_free_job,
> > - .timedout_job = guc_engine_timedout_job,
> > - .process_msg = guc_engine_process_msg,
> > + .run_job = guc_exec_queue_run_job,
> > + .free_job = guc_exec_queue_free_job,
> > + .timedout_job = guc_exec_queue_timedout_job,
> > + .process_msg = guc_exec_queue_process_msg,
> > };
> >
> > -static int guc_engine_init(struct xe_engine *e)
> > +static int guc_exec_queue_init(struct xe_exec_queue *q)
> > {
> > struct drm_gpu_scheduler *sched;
> > - struct xe_guc *guc = engine_to_guc(e);
> > - struct xe_guc_engine *ge;
> > + struct xe_guc *guc = exec_queue_to_guc(q);
> > + struct xe_guc_exec_queue *ge;
> > long timeout;
> > int err;
> >
> > @@ -1125,16 +1125,16 @@ static int guc_engine_init(struct xe_engine *e)
> > if (!ge)
> > return -ENOMEM;
> >
> > - e->guc = ge;
> > - ge->engine = e;
> > + q->guc = ge;
> > + ge->q = q;
> > init_waitqueue_head(&ge->suspend_wait);
> >
> > - timeout = xe_vm_no_dma_fences(e->vm) ? MAX_SCHEDULE_TIMEOUT : HZ * 5;
> > + timeout = xe_vm_no_dma_fences(q->vm) ? MAX_SCHEDULE_TIMEOUT : HZ * 5;
> > err = drm_sched_init(&ge->sched, &drm_sched_ops, NULL,
> > - e->lrc[0].ring.size / MAX_JOB_SIZE_BYTES,
> > + q->lrc[0].ring.size / MAX_JOB_SIZE_BYTES,
> > 64, timeout, guc_to_gt(guc)->ordered_wq, NULL,
> > - e->name, DRM_SCHED_POLICY_SINGLE_ENTITY,
> > - gt_to_xe(e->gt)->drm.dev);
> > + q->name, DRM_SCHED_POLICY_SINGLE_ENTITY,
> > + gt_to_xe(q->gt)->drm.dev);
> > if (err)
> > goto err_free;
> >
> > @@ -1144,43 +1144,43 @@ static int guc_engine_init(struct xe_engine *e)
> > if (err)
> > goto err_sched;
> >
> > - if (xe_engine_is_lr(e))
> > - INIT_WORK(&e->guc->lr_tdr, xe_guc_engine_lr_cleanup);
> > + if (xe_exec_queue_is_lr(q))
> > + INIT_WORK(&q->guc->lr_tdr, xe_guc_exec_queue_lr_cleanup);
> >
> > mutex_lock(&guc->submission_state.lock);
> >
> > - err = alloc_guc_id(guc, e);
> > + err = alloc_guc_id(guc, q);
> > if (err)
> > goto err_entity;
> >
> > - e->entity = &ge->entity;
> > + q->entity = &ge->entity;
> >
> > if (guc_read_stopped(guc))
> > drm_sched_stop(sched, NULL);
> >
> > mutex_unlock(&guc->submission_state.lock);
> >
> > - switch (e->class) {
> > + switch (q->class) {
> > case XE_ENGINE_CLASS_RENDER:
> > - sprintf(e->name, "rcs%d", e->guc->id);
> > + sprintf(q->name, "rcs%d", q->guc->id);
> > break;
> > case XE_ENGINE_CLASS_VIDEO_DECODE:
> > - sprintf(e->name, "vcs%d", e->guc->id);
> > + sprintf(q->name, "vcs%d", q->guc->id);
> > break;
> > case XE_ENGINE_CLASS_VIDEO_ENHANCE:
> > - sprintf(e->name, "vecs%d", e->guc->id);
> > + sprintf(q->name, "vecs%d", q->guc->id);
> > break;
> > case XE_ENGINE_CLASS_COPY:
> > - sprintf(e->name, "bcs%d", e->guc->id);
> > + sprintf(q->name, "bcs%d", q->guc->id);
> > break;
> > case XE_ENGINE_CLASS_COMPUTE:
> > - sprintf(e->name, "ccs%d", e->guc->id);
> > + sprintf(q->name, "ccs%d", q->guc->id);
> > break;
> > default:
> > - XE_WARN_ON(e->class);
> > + XE_WARN_ON(q->class);
> > }
> >
> > - trace_xe_engine_create(e);
> > + trace_xe_exec_queue_create(q);
> >
> > return 0;
> >
> > @@ -1194,132 +1194,132 @@ static int guc_engine_init(struct xe_engine *e)
> > return err;
> > }
> >
> > -static void guc_engine_kill(struct xe_engine *e)
> > +static void guc_exec_queue_kill(struct xe_exec_queue *q)
> > {
> > - trace_xe_engine_kill(e);
> > - set_engine_killed(e);
> > - xe_guc_engine_trigger_cleanup(e);
> > + trace_xe_exec_queue_kill(q);
> > + set_exec_queue_killed(q);
> > + xe_guc_exec_queue_trigger_cleanup(q);
> > }
> >
> > -static void guc_engine_add_msg(struct xe_engine *e, struct drm_sched_msg *msg,
> > - u32 opcode)
> > +static void guc_exec_queue_add_msg(struct xe_exec_queue *q, struct drm_sched_msg *msg,
> > + u32 opcode)
> > {
> > INIT_LIST_HEAD(&msg->link);
> > msg->opcode = opcode;
> > - msg->private_data = e;
> > + msg->private_data = q;
> >
> > trace_drm_sched_msg_add(msg);
> > - drm_sched_add_msg(&e->guc->sched, msg);
> > + drm_sched_add_msg(&q->guc->sched, msg);
> > }
> >
> > #define STATIC_MSG_CLEANUP 0
> > #define STATIC_MSG_SUSPEND 1
> > #define STATIC_MSG_RESUME 2
> > -static void guc_engine_fini(struct xe_engine *e)
> > +static void guc_exec_queue_fini(struct xe_exec_queue *q)
> > {
> > - struct drm_sched_msg *msg = e->guc->static_msgs + STATIC_MSG_CLEANUP;
> > + struct drm_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_CLEANUP;
> >
> > - if (!(e->flags & ENGINE_FLAG_KERNEL))
> > - guc_engine_add_msg(e, msg, CLEANUP);
> > + if (!(q->flags & EXEC_QUEUE_FLAG_KERNEL))
> > + guc_exec_queue_add_msg(q, msg, CLEANUP);
> > else
> > - __guc_engine_fini(engine_to_guc(e), e);
> > + __guc_exec_queue_fini(exec_queue_to_guc(q), q);
> > }
> >
> > -static int guc_engine_set_priority(struct xe_engine *e,
> > - enum drm_sched_priority priority)
> > +static int guc_exec_queue_set_priority(struct xe_exec_queue *q,
> > + enum drm_sched_priority priority)
> > {
> > struct drm_sched_msg *msg;
> >
> > - if (e->entity->priority == priority || engine_killed_or_banned(e))
> > + if (q->entity->priority == priority || exec_queue_killed_or_banned(q))
> > return 0;
> >
> > msg = kmalloc(sizeof(*msg), GFP_KERNEL);
> > if (!msg)
> > return -ENOMEM;
> >
> > - guc_engine_add_msg(e, msg, SET_SCHED_PROPS);
> > + guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS);
> >
> > return 0;
> > }
> >
> > -static int guc_engine_set_timeslice(struct xe_engine *e, u32 timeslice_us)
> > +static int guc_exec_queue_set_timeslice(struct xe_exec_queue *q, u32 timeslice_us)
> > {
> > struct drm_sched_msg *msg;
> >
> > - if (e->sched_props.timeslice_us == timeslice_us ||
> > - engine_killed_or_banned(e))
> > + if (q->sched_props.timeslice_us == timeslice_us ||
> > + exec_queue_killed_or_banned(q))
> > return 0;
> >
> > msg = kmalloc(sizeof(*msg), GFP_KERNEL);
> > if (!msg)
> > return -ENOMEM;
> >
> > - e->sched_props.timeslice_us = timeslice_us;
> > - guc_engine_add_msg(e, msg, SET_SCHED_PROPS);
> > + q->sched_props.timeslice_us = timeslice_us;
> > + guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS);
> >
> > return 0;
> > }
> >
> > -static int guc_engine_set_preempt_timeout(struct xe_engine *e,
> > - u32 preempt_timeout_us)
> > +static int guc_exec_queue_set_preempt_timeout(struct xe_exec_queue *q,
> > + u32 preempt_timeout_us)
> > {
> > struct drm_sched_msg *msg;
> >
> > - if (e->sched_props.preempt_timeout_us == preempt_timeout_us ||
> > - engine_killed_or_banned(e))
> > + if (q->sched_props.preempt_timeout_us == preempt_timeout_us ||
> > + exec_queue_killed_or_banned(q))
> > return 0;
> >
> > msg = kmalloc(sizeof(*msg), GFP_KERNEL);
> > if (!msg)
> > return -ENOMEM;
> >
> > - e->sched_props.preempt_timeout_us = preempt_timeout_us;
> > - guc_engine_add_msg(e, msg, SET_SCHED_PROPS);
> > + q->sched_props.preempt_timeout_us = preempt_timeout_us;
> > + guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS);
> >
> > return 0;
> > }
> >
> > -static int guc_engine_set_job_timeout(struct xe_engine *e, u32 job_timeout_ms)
> > +static int guc_exec_queue_set_job_timeout(struct xe_exec_queue *q, u32 job_timeout_ms)
> > {
> > - struct drm_gpu_scheduler *sched = &e->guc->sched;
> > + struct drm_gpu_scheduler *sched = &q->guc->sched;
> >
> > - XE_WARN_ON(engine_registered(e));
> > - XE_WARN_ON(engine_banned(e));
> > - XE_WARN_ON(engine_killed(e));
> > + XE_WARN_ON(exec_queue_registered(q));
> > + XE_WARN_ON(exec_queue_banned(q));
> > + XE_WARN_ON(exec_queue_killed(q));
> >
> > sched->timeout = job_timeout_ms;
> >
> > return 0;
> > }
> >
> > -static int guc_engine_suspend(struct xe_engine *e)
> > +static int guc_exec_queue_suspend(struct xe_exec_queue *q)
> > {
> > - struct drm_sched_msg *msg = e->guc->static_msgs + STATIC_MSG_SUSPEND;
> > + struct drm_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_SUSPEND;
> >
> > - if (engine_killed_or_banned(e) || e->guc->suspend_pending)
> > + if (exec_queue_killed_or_banned(q) || q->guc->suspend_pending)
> > return -EINVAL;
> >
> > - e->guc->suspend_pending = true;
> > - guc_engine_add_msg(e, msg, SUSPEND);
> > + q->guc->suspend_pending = true;
> > + guc_exec_queue_add_msg(q, msg, SUSPEND);
> >
> > return 0;
> > }
> >
> > -static void guc_engine_suspend_wait(struct xe_engine *e)
> > +static void guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
> > {
> > - struct xe_guc *guc = engine_to_guc(e);
> > + struct xe_guc *guc = exec_queue_to_guc(q);
> >
> > - wait_event(e->guc->suspend_wait, !e->guc->suspend_pending ||
> > + wait_event(q->guc->suspend_wait, !q->guc->suspend_pending ||
> > guc_read_stopped(guc));
> > }
> >
> > -static void guc_engine_resume(struct xe_engine *e)
> > +static void guc_exec_queue_resume(struct xe_exec_queue *q)
> > {
> > - struct drm_sched_msg *msg = e->guc->static_msgs + STATIC_MSG_RESUME;
> > + struct drm_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_RESUME;
> >
> > - XE_WARN_ON(e->guc->suspend_pending);
> > + XE_WARN_ON(q->guc->suspend_pending);
> >
> > - guc_engine_add_msg(e, msg, RESUME);
> > + guc_exec_queue_add_msg(q, msg, RESUME);
> > }
> >
> > /*
> > @@ -1328,49 +1328,49 @@ static void guc_engine_resume(struct xe_engine *e)
> > * really shouldn't do much other than trap into the DRM scheduler which
> > * synchronizes these operations.
> > */
> > -static const struct xe_engine_ops guc_engine_ops = {
> > - .init = guc_engine_init,
> > - .kill = guc_engine_kill,
> > - .fini = guc_engine_fini,
> > - .set_priority = guc_engine_set_priority,
> > - .set_timeslice = guc_engine_set_timeslice,
> > - .set_preempt_timeout = guc_engine_set_preempt_timeout,
> > - .set_job_timeout = guc_engine_set_job_timeout,
> > - .suspend = guc_engine_suspend,
> > - .suspend_wait = guc_engine_suspend_wait,
> > - .resume = guc_engine_resume,
> > +static const struct xe_exec_queue_ops guc_exec_queue_ops = {
> > + .init = guc_exec_queue_init,
> > + .kill = guc_exec_queue_kill,
> > + .fini = guc_exec_queue_fini,
> > + .set_priority = guc_exec_queue_set_priority,
> > + .set_timeslice = guc_exec_queue_set_timeslice,
> > + .set_preempt_timeout = guc_exec_queue_set_preempt_timeout,
> > + .set_job_timeout = guc_exec_queue_set_job_timeout,
> > + .suspend = guc_exec_queue_suspend,
> > + .suspend_wait = guc_exec_queue_suspend_wait,
> > + .resume = guc_exec_queue_resume,
> > };
> >
> > -static void guc_engine_stop(struct xe_guc *guc, struct xe_engine *e)
> > +static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q)
> > {
> > - struct drm_gpu_scheduler *sched = &e->guc->sched;
> > + struct drm_gpu_scheduler *sched = &q->guc->sched;
> >
> > /* Stop scheduling + flush any DRM scheduler operations */
> > drm_sched_run_wq_stop(sched);
> >
> > /* Clean up lost G2H + reset engine state */
> > - if (engine_registered(e)) {
> > - if ((engine_banned(e) && engine_destroyed(e)) ||
> > - xe_engine_is_lr(e))
> > - xe_engine_put(e);
> > - else if (engine_destroyed(e))
> > - __guc_engine_fini(guc, e);
> > + if (exec_queue_registered(q)) {
> > + if ((exec_queue_banned(q) && exec_queue_destroyed(q)) ||
> > + xe_exec_queue_is_lr(q))
> > + xe_exec_queue_put(q);
> > + else if (exec_queue_destroyed(q))
> > + __guc_exec_queue_fini(guc, q);
> > }
> > - if (e->guc->suspend_pending) {
> > - set_engine_suspended(e);
> > - suspend_fence_signal(e);
> > + if (q->guc->suspend_pending) {
> > + set_exec_queue_suspended(q);
> > + suspend_fence_signal(q);
> > }
> > - atomic_and(ENGINE_STATE_DESTROYED | ENGINE_STATE_SUSPENDED,
> > - &e->guc->state);
> > - e->guc->resume_time = 0;
> > - trace_xe_engine_stop(e);
> > + atomic_and(EXEC_QUEUE_STATE_DESTROYED | ENGINE_STATE_SUSPENDED,
> > + &q->guc->state);
> > + q->guc->resume_time = 0;
> > + trace_xe_exec_queue_stop(q);
> >
> > /*
> > * Ban any engine (aside from kernel and engines used for VM ops) with a
> > * started but not complete job or if a job has gone through a GT reset
> > * more than twice.
> > */
> > - if (!(e->flags & (ENGINE_FLAG_KERNEL | ENGINE_FLAG_VM))) {
> > + if (!(q->flags & (EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_VM))) {
> > struct drm_sched_job *drm_job =
> > list_first_entry_or_null(&sched->pending_list,
> > struct drm_sched_job, list);
> > @@ -1383,7 +1383,7 @@ static void guc_engine_stop(struct xe_guc *guc, struct xe_engine *e)
> > drm_sched_invalidate_job(drm_job, 2)) {
> > trace_xe_sched_job_ban(job);
> > sched->timeout = MIN_SCHED_TIMEOUT;
> > - set_engine_banned(e);
> > + set_exec_queue_banned(q);
> > }
> > }
> > }
> > @@ -1414,15 +1414,15 @@ void xe_guc_submit_reset_wait(struct xe_guc *guc)
> >
> > int xe_guc_submit_stop(struct xe_guc *guc)
> > {
> > - struct xe_engine *e;
> > + struct xe_exec_queue *q;
> > unsigned long index;
> >
> > XE_WARN_ON(guc_read_stopped(guc) != 1);
> >
> > mutex_lock(&guc->submission_state.lock);
> >
> > - xa_for_each(&guc->submission_state.engine_lookup, index, e)
> > - guc_engine_stop(guc, e);
> > + xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
> > + guc_exec_queue_stop(guc, q);
> >
> > mutex_unlock(&guc->submission_state.lock);
> >
> > @@ -1434,16 +1434,16 @@ int xe_guc_submit_stop(struct xe_guc *guc)
> > return 0;
> > }
> >
> > -static void guc_engine_start(struct xe_engine *e)
> > +static void guc_exec_queue_start(struct xe_exec_queue *q)
> > {
> > - struct drm_gpu_scheduler *sched = &e->guc->sched;
> > + struct drm_gpu_scheduler *sched = &q->guc->sched;
> >
> > - if (!engine_killed_or_banned(e)) {
> > + if (!exec_queue_killed_or_banned(q)) {
> > int i;
> >
> > - trace_xe_engine_resubmit(e);
> > - for (i = 0; i < e->width; ++i)
> > - xe_lrc_set_ring_head(e->lrc + i, e->lrc[i].ring.tail);
> > + trace_xe_exec_queue_resubmit(q);
> > + for (i = 0; i < q->width; ++i)
> > + xe_lrc_set_ring_head(q->lrc + i, q->lrc[i].ring.tail);
> > drm_sched_resubmit_jobs(sched);
> > }
> >
> > @@ -1453,15 +1453,15 @@ static void guc_engine_start(struct xe_engine *e)
> >
> > int xe_guc_submit_start(struct xe_guc *guc)
> > {
> > - struct xe_engine *e;
> > + struct xe_exec_queue *q;
> > unsigned long index;
> >
> > XE_WARN_ON(guc_read_stopped(guc) != 1);
> >
> > mutex_lock(&guc->submission_state.lock);
> > atomic_dec(&guc->submission_state.stopped);
> > - xa_for_each(&guc->submission_state.engine_lookup, index, e)
> > - guc_engine_start(e);
> > + xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
> > + guc_exec_queue_start(q);
> > mutex_unlock(&guc->submission_state.lock);
> >
> > wake_up_all(&guc->ct.wq);
> > @@ -1469,36 +1469,36 @@ int xe_guc_submit_start(struct xe_guc *guc)
> > return 0;
> > }
> >
> > -static struct xe_engine *
> > -g2h_engine_lookup(struct xe_guc *guc, u32 guc_id)
> > +static struct xe_exec_queue *
> > +g2h_exec_queue_lookup(struct xe_guc *guc, u32 guc_id)
> > {
> > struct xe_device *xe = guc_to_xe(guc);
> > - struct xe_engine *e;
> > + struct xe_exec_queue *q;
> >
> > if (unlikely(guc_id >= GUC_ID_MAX)) {
> > drm_err(&xe->drm, "Invalid guc_id %u", guc_id);
> > return NULL;
> > }
> >
> > - e = xa_load(&guc->submission_state.engine_lookup, guc_id);
> > - if (unlikely(!e)) {
> > + q = xa_load(&guc->submission_state.exec_queue_lookup, guc_id);
> > + if (unlikely(!q)) {
> > drm_err(&xe->drm, "Not engine present for guc_id %u", guc_id);
> > return NULL;
> > }
> >
> > - XE_WARN_ON(e->guc->id != guc_id);
> > + XE_WARN_ON(q->guc->id != guc_id);
> >
> > - return e;
> > + return q;
> > }
> >
> > -static void deregister_engine(struct xe_guc *guc, struct xe_engine *e)
> > +static void deregister_exec_queue(struct xe_guc *guc, struct xe_exec_queue *q)
> > {
> > u32 action[] = {
> > XE_GUC_ACTION_DEREGISTER_CONTEXT,
> > - e->guc->id,
> > + q->guc->id,
> > };
> >
> > - trace_xe_engine_deregister(e);
> > + trace_xe_exec_queue_deregister(q);
> >
> > xe_guc_ct_send_g2h_handler(&guc->ct, action, ARRAY_SIZE(action));
> > }
> > @@ -1506,7 +1506,7 @@ static void deregister_engine(struct xe_guc *guc, struct xe_engine *e)
> > int xe_guc_sched_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
> > {
> > struct xe_device *xe = guc_to_xe(guc);
> > - struct xe_engine *e;
> > + struct xe_exec_queue *q;
> > u32 guc_id = msg[0];
> >
> > if (unlikely(len < 2)) {
> > @@ -1514,34 +1514,34 @@ int xe_guc_sched_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
> > return -EPROTO;
> > }
> >
> > - e = g2h_engine_lookup(guc, guc_id);
> > - if (unlikely(!e))
> > + q = g2h_exec_queue_lookup(guc, guc_id);
> > + if (unlikely(!q))
> > return -EPROTO;
> >
> > - if (unlikely(!engine_pending_enable(e) &&
> > - !engine_pending_disable(e))) {
> > + if (unlikely(!exec_queue_pending_enable(q) &&
> > + !exec_queue_pending_disable(q))) {
> > drm_err(&xe->drm, "Unexpected engine state 0x%04x",
> > - atomic_read(&e->guc->state));
> > + atomic_read(&q->guc->state));
> > return -EPROTO;
> > }
> >
> > - trace_xe_engine_scheduling_done(e);
> > + trace_xe_exec_queue_scheduling_done(q);
> >
> > - if (engine_pending_enable(e)) {
> > - e->guc->resume_time = ktime_get();
> > - clear_engine_pending_enable(e);
> > + if (exec_queue_pending_enable(q)) {
> > + q->guc->resume_time = ktime_get();
> > + clear_exec_queue_pending_enable(q);
> > smp_wmb();
> > wake_up_all(&guc->ct.wq);
> > } else {
> > - clear_engine_pending_disable(e);
> > - if (e->guc->suspend_pending) {
> > - suspend_fence_signal(e);
> > + clear_exec_queue_pending_disable(q);
> > + if (q->guc->suspend_pending) {
> > + suspend_fence_signal(q);
> > } else {
> > - if (engine_banned(e)) {
> > + if (exec_queue_banned(q)) {
> > smp_wmb();
> > wake_up_all(&guc->ct.wq);
> > }
> > - deregister_engine(guc, e);
> > + deregister_exec_queue(guc, q);
> > }
> > }
> >
> > @@ -1551,7 +1551,7 @@ int xe_guc_sched_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
> > int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
> > {
> > struct xe_device *xe = guc_to_xe(guc);
> > - struct xe_engine *e;
> > + struct xe_exec_queue *q;
> > u32 guc_id = msg[0];
> >
> > if (unlikely(len < 1)) {
> > @@ -1559,33 +1559,33 @@ int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
> > return -EPROTO;
> > }
> >
> > - e = g2h_engine_lookup(guc, guc_id);
> > - if (unlikely(!e))
> > + q = g2h_exec_queue_lookup(guc, guc_id);
> > + if (unlikely(!q))
> > return -EPROTO;
> >
> > - if (!engine_destroyed(e) || engine_pending_disable(e) ||
> > - engine_pending_enable(e) || engine_enabled(e)) {
> > + if (!exec_queue_destroyed(q) || exec_queue_pending_disable(q) ||
> > + exec_queue_pending_enable(q) || exec_queue_enabled(q)) {
> > drm_err(&xe->drm, "Unexpected engine state 0x%04x",
> > - atomic_read(&e->guc->state));
> > + atomic_read(&q->guc->state));
> > return -EPROTO;
> > }
> >
> > - trace_xe_engine_deregister_done(e);
> > + trace_xe_exec_queue_deregister_done(q);
> >
> > - clear_engine_registered(e);
> > + clear_exec_queue_registered(q);
> >
> > - if (engine_banned(e) || xe_engine_is_lr(e))
> > - xe_engine_put(e);
> > + if (exec_queue_banned(q) || xe_exec_queue_is_lr(q))
> > + xe_exec_queue_put(q);
> > else
> > - __guc_engine_fini(guc, e);
> > + __guc_exec_queue_fini(guc, q);
> >
> > return 0;
> > }
> >
> > -int xe_guc_engine_reset_handler(struct xe_guc *guc, u32 *msg, u32 len)
> > +int xe_guc_exec_queue_reset_handler(struct xe_guc *guc, u32 *msg, u32 len)
> > {
> > struct xe_device *xe = guc_to_xe(guc);
> > - struct xe_engine *e;
> > + struct xe_exec_queue *q;
> > u32 guc_id = msg[0];
> >
> > if (unlikely(len < 1)) {
> > @@ -1593,34 +1593,34 @@ int xe_guc_engine_reset_handler(struct xe_guc *guc, u32 *msg, u32 len)
> > return -EPROTO;
> > }
> >
> > - e = g2h_engine_lookup(guc, guc_id);
> > - if (unlikely(!e))
> > + q = g2h_exec_queue_lookup(guc, guc_id);
> > + if (unlikely(!q))
> > return -EPROTO;
> >
> > drm_warn(&xe->drm, "Engine reset: guc_id=%d", guc_id);
> >
> > /* FIXME: Do error capture, most likely async */
> >
> > - trace_xe_engine_reset(e);
> > + trace_xe_exec_queue_reset(q);
> >
> > /*
> > * A banned engine is a NOP at this point (came from
> > - * guc_engine_timedout_job). Otherwise, kick drm scheduler to cancel
> > + * guc_exec_queue_timedout_job). Otherwise, kick drm scheduler to cancel
> > * jobs by setting timeout of the job to the minimum value kicking
> > - * guc_engine_timedout_job.
> > + * guc_exec_queue_timedout_job.
> > */
> > - set_engine_reset(e);
> > - if (!engine_banned(e))
> > - xe_guc_engine_trigger_cleanup(e);
> > + set_exec_queue_reset(q);
> > + if (!exec_queue_banned(q))
> > + xe_guc_exec_queue_trigger_cleanup(q);
> >
> > return 0;
> > }
> >
> > -int xe_guc_engine_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
> > - u32 len)
> > +int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
> > + u32 len)
> > {
> > struct xe_device *xe = guc_to_xe(guc);
> > - struct xe_engine *e;
> > + struct xe_exec_queue *q;
> > u32 guc_id = msg[0];
> >
> > if (unlikely(len < 1)) {
> > @@ -1628,22 +1628,22 @@ int xe_guc_engine_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
> > return -EPROTO;
> > }
> >
> > - e = g2h_engine_lookup(guc, guc_id);
> > - if (unlikely(!e))
> > + q = g2h_exec_queue_lookup(guc, guc_id);
> > + if (unlikely(!q))
> > return -EPROTO;
> >
> > drm_warn(&xe->drm, "Engine memory cat error: guc_id=%d", guc_id);
> > - trace_xe_engine_memory_cat_error(e);
> > + trace_xe_exec_queue_memory_cat_error(q);
> >
> > /* Treat the same as engine reset */
> > - set_engine_reset(e);
> > - if (!engine_banned(e))
> > - xe_guc_engine_trigger_cleanup(e);
> > + set_exec_queue_reset(q);
> > + if (!exec_queue_banned(q))
> > + xe_guc_exec_queue_trigger_cleanup(q);
> >
> > return 0;
> > }
> >
> > -int xe_guc_engine_reset_failure_handler(struct xe_guc *guc, u32 *msg, u32 len)
> > +int xe_guc_exec_queue_reset_failure_handler(struct xe_guc *guc, u32 *msg, u32 len)
> > {
> > struct xe_device *xe = guc_to_xe(guc);
> > u8 guc_class, instance;
> > @@ -1668,16 +1668,16 @@ int xe_guc_engine_reset_failure_handler(struct xe_guc *guc, u32 *msg, u32 len)
> > }
> >
> > static void
> > -guc_engine_wq_snapshot_capture(struct xe_engine *e,
> > - struct xe_guc_submit_engine_snapshot *snapshot)
> > +guc_exec_queue_wq_snapshot_capture(struct xe_exec_queue *q,
> > + struct xe_guc_submit_exec_queue_snapshot *snapshot)
> > {
> > - struct xe_guc *guc = engine_to_guc(e);
> > + struct xe_guc *guc = exec_queue_to_guc(q);
> > struct xe_device *xe = guc_to_xe(guc);
> > - struct iosys_map map = xe_lrc_parallel_map(e->lrc);
> > + struct iosys_map map = xe_lrc_parallel_map(q->lrc);
> > int i;
> >
> > - snapshot->guc.wqi_head = e->guc->wqi_head;
> > - snapshot->guc.wqi_tail = e->guc->wqi_tail;
> > + snapshot->guc.wqi_head = q->guc->wqi_head;
> > + snapshot->guc.wqi_tail = q->guc->wqi_tail;
> > snapshot->parallel.wq_desc.head = parallel_read(xe, map, wq_desc.head);
> > snapshot->parallel.wq_desc.tail = parallel_read(xe, map, wq_desc.tail);
> > snapshot->parallel.wq_desc.status = parallel_read(xe, map,
> > @@ -1694,8 +1694,8 @@ guc_engine_wq_snapshot_capture(struct xe_engine *e,
> > }
> >
> > static void
> > -guc_engine_wq_snapshot_print(struct xe_guc_submit_engine_snapshot *snapshot,
> > - struct drm_printer *p)
> > +guc_exec_queue_wq_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snapshot,
> > + struct drm_printer *p)
> > {
> > int i;
> >
> > @@ -1716,23 +1716,23 @@ guc_engine_wq_snapshot_print(struct xe_guc_submit_engine_snapshot *snapshot,
> > }
> >
> > /**
> > - * xe_guc_engine_snapshot_capture - Take a quick snapshot of the GuC Engine.
> > + * xe_guc_exec_queue_snapshot_capture - Take a quick snapshot of the GuC Engine.
> > * @e: Xe Engine.
> > *
> > * This can be printed out in a later stage like during dev_coredump
> > * analysis.
> > *
> > * Returns: a GuC Submit Engine snapshot object that must be freed by the
> > - * caller, using `xe_guc_engine_snapshot_free`.
> > + * caller, using `xe_guc_exec_queue_snapshot_free`.
> > */
> > -struct xe_guc_submit_engine_snapshot *
> > -xe_guc_engine_snapshot_capture(struct xe_engine *e)
> > +struct xe_guc_submit_exec_queue_snapshot *
> > +xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q)
> > {
> > - struct xe_guc *guc = engine_to_guc(e);
> > + struct xe_guc *guc = exec_queue_to_guc(q);
> > struct xe_device *xe = guc_to_xe(guc);
> > - struct drm_gpu_scheduler *sched = &e->guc->sched;
> > + struct drm_gpu_scheduler *sched = &q->guc->sched;
> > struct xe_sched_job *job;
> > - struct xe_guc_submit_engine_snapshot *snapshot;
> > + struct xe_guc_submit_exec_queue_snapshot *snapshot;
> > int i;
> >
> > snapshot = kzalloc(sizeof(*snapshot), GFP_ATOMIC);
> > @@ -1742,25 +1742,25 @@ xe_guc_engine_snapshot_capture(struct xe_engine *e)
> > return NULL;
> > }
> >
> > - snapshot->guc.id = e->guc->id;
> > - memcpy(&snapshot->name, &e->name, sizeof(snapshot->name));
> > - snapshot->class = e->class;
> > - snapshot->logical_mask = e->logical_mask;
> > - snapshot->width = e->width;
> > - snapshot->refcount = kref_read(&e->refcount);
> > + snapshot->guc.id = q->guc->id;
> > + memcpy(&snapshot->name, &q->name, sizeof(snapshot->name));
> > + snapshot->class = q->class;
> > + snapshot->logical_mask = q->logical_mask;
> > + snapshot->width = q->width;
> > + snapshot->refcount = kref_read(&q->refcount);
> > snapshot->sched_timeout = sched->timeout;
> > - snapshot->sched_props.timeslice_us = e->sched_props.timeslice_us;
> > + snapshot->sched_props.timeslice_us = q->sched_props.timeslice_us;
> > snapshot->sched_props.preempt_timeout_us =
> > - e->sched_props.preempt_timeout_us;
> > + q->sched_props.preempt_timeout_us;
> >
> > - snapshot->lrc = kmalloc_array(e->width, sizeof(struct lrc_snapshot),
> > + snapshot->lrc = kmalloc_array(q->width, sizeof(struct lrc_snapshot),
> > GFP_ATOMIC);
> >
> > if (!snapshot->lrc) {
> > drm_err(&xe->drm, "Skipping GuC Engine LRC snapshot.\n");
> > } else {
> > - for (i = 0; i < e->width; ++i) {
> > - struct xe_lrc *lrc = e->lrc + i;
> > + for (i = 0; i < q->width; ++i) {
> > + struct xe_lrc *lrc = q->lrc + i;
> >
> > snapshot->lrc[i].context_desc =
> > lower_32_bits(xe_lrc_ggtt_addr(lrc));
> > @@ -1773,12 +1773,12 @@ xe_guc_engine_snapshot_capture(struct xe_engine *e)
> > }
> > }
> >
> > - snapshot->schedule_state = atomic_read(&e->guc->state);
> > - snapshot->engine_flags = e->flags;
> > + snapshot->schedule_state = atomic_read(&q->guc->state);
> > + snapshot->exec_queue_flags = q->flags;
> >
> > - snapshot->parallel_execution = xe_engine_is_parallel(e);
> > + snapshot->parallel_execution = xe_exec_queue_is_parallel(q);
> > if (snapshot->parallel_execution)
> > - guc_engine_wq_snapshot_capture(e, snapshot);
> > + guc_exec_queue_wq_snapshot_capture(q, snapshot);
> >
> > spin_lock(&sched->job_list_lock);
> > snapshot->pending_list_size = list_count_nodes(&sched->pending_list);
> > @@ -1808,15 +1808,15 @@ xe_guc_engine_snapshot_capture(struct xe_engine *e)
> > }
> >
> > /**
> > - * xe_guc_engine_snapshot_print - Print out a given GuC Engine snapshot.
> > + * xe_guc_exec_queue_snapshot_print - Print out a given GuC Engine snapshot.
> > * @snapshot: GuC Submit Engine snapshot object.
> > * @p: drm_printer where it will be printed out.
> > *
> > * This function prints out a given GuC Submit Engine snapshot object.
> > */
> > void
> > -xe_guc_engine_snapshot_print(struct xe_guc_submit_engine_snapshot *snapshot,
> > - struct drm_printer *p)
> > +xe_guc_exec_queue_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snapshot,
> > + struct drm_printer *p)
> > {
> > int i;
> >
> > @@ -1848,10 +1848,10 @@ xe_guc_engine_snapshot_print(struct xe_guc_submit_engine_snapshot *snapshot,
> > drm_printf(p, "\tSeqno: (memory) %d\n", snapshot->lrc[i].seqno);
> > }
> > drm_printf(p, "\tSchedule State: 0x%x\n", snapshot->schedule_state);
> > - drm_printf(p, "\tFlags: 0x%lx\n", snapshot->engine_flags);
> > + drm_printf(p, "\tFlags: 0x%lx\n", snapshot->exec_queue_flags);
> >
> > if (snapshot->parallel_execution)
> > - guc_engine_wq_snapshot_print(snapshot, p);
> > + guc_exec_queue_wq_snapshot_print(snapshot, p);
> >
> > for (i = 0; snapshot->pending_list && i < snapshot->pending_list_size;
> > i++)
> > @@ -1862,14 +1862,14 @@ xe_guc_engine_snapshot_print(struct xe_guc_submit_engine_snapshot *snapshot,
> > }
> >
> > /**
> > - * xe_guc_engine_snapshot_free - Free all allocated objects for a given
> > + * xe_guc_exec_queue_snapshot_free - Free all allocated objects for a given
> > * snapshot.
> > * @snapshot: GuC Submit Engine snapshot object.
> > *
> > * This function free all the memory that needed to be allocated at capture
> > * time.
> > */
> > -void xe_guc_engine_snapshot_free(struct xe_guc_submit_engine_snapshot *snapshot)
> > +void xe_guc_exec_queue_snapshot_free(struct xe_guc_submit_exec_queue_snapshot *snapshot)
> > {
> > if (!snapshot)
> > return;
> > @@ -1879,13 +1879,13 @@ void xe_guc_engine_snapshot_free(struct xe_guc_submit_engine_snapshot *snapshot)
> > kfree(snapshot);
> > }
> >
> > -static void guc_engine_print(struct xe_engine *e, struct drm_printer *p)
> > +static void guc_exec_queue_print(struct xe_exec_queue *q, struct drm_printer *p)
> > {
> > - struct xe_guc_submit_engine_snapshot *snapshot;
> > + struct xe_guc_submit_exec_queue_snapshot *snapshot;
> >
> > - snapshot = xe_guc_engine_snapshot_capture(e);
> > - xe_guc_engine_snapshot_print(snapshot, p);
> > - xe_guc_engine_snapshot_free(snapshot);
> > + snapshot = xe_guc_exec_queue_snapshot_capture(q);
> > + xe_guc_exec_queue_snapshot_print(snapshot, p);
> > + xe_guc_exec_queue_snapshot_free(snapshot);
> > }
> >
> > /**
> > @@ -1897,14 +1897,14 @@ static void guc_engine_print(struct xe_engine *e, struct drm_printer *p)
> > */
> > void xe_guc_submit_print(struct xe_guc *guc, struct drm_printer *p)
> > {
> > - struct xe_engine *e;
> > + struct xe_exec_queue *q;
> > unsigned long index;
> >
> > if (!xe_device_guc_submission_enabled(guc_to_xe(guc)))
> > return;
> >
> > mutex_lock(&guc->submission_state.lock);
> > - xa_for_each(&guc->submission_state.engine_lookup, index, e)
> > - guc_engine_print(e, p);
> > + xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
> > + guc_exec_queue_print(q, p);
> > mutex_unlock(&guc->submission_state.lock);
> > }
> > diff --git a/drivers/gpu/drm/xe/xe_guc_submit.h b/drivers/gpu/drm/xe/xe_guc_submit.h
> > index 4153c2d22013..fc97869c5b86 100644
> > --- a/drivers/gpu/drm/xe/xe_guc_submit.h
> > +++ b/drivers/gpu/drm/xe/xe_guc_submit.h
> > @@ -9,7 +9,7 @@
> > #include <linux/types.h>
> >
> > struct drm_printer;
> > -struct xe_engine;
> > +struct xe_exec_queue;
> > struct xe_guc;
> >
> > int xe_guc_submit_init(struct xe_guc *guc);
> > @@ -21,18 +21,18 @@ int xe_guc_submit_start(struct xe_guc *guc);
> >
> > int xe_guc_sched_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
> > int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
> > -int xe_guc_engine_reset_handler(struct xe_guc *guc, u32 *msg, u32 len);
> > -int xe_guc_engine_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
> > - u32 len);
> > -int xe_guc_engine_reset_failure_handler(struct xe_guc *guc, u32 *msg, u32 len);
> > +int xe_guc_exec_queue_reset_handler(struct xe_guc *guc, u32 *msg, u32 len);
> > +int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
> > + u32 len);
> > +int xe_guc_exec_queue_reset_failure_handler(struct xe_guc *guc, u32 *msg, u32 len);
> >
> > -struct xe_guc_submit_engine_snapshot *
> > -xe_guc_engine_snapshot_capture(struct xe_engine *e);
> > +struct xe_guc_submit_exec_queue_snapshot *
> > +xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q);
> > void
> > -xe_guc_engine_snapshot_print(struct xe_guc_submit_engine_snapshot *snapshot,
> > - struct drm_printer *p);
> > +xe_guc_exec_queue_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snapshot,
> > + struct drm_printer *p);
> > void
> > -xe_guc_engine_snapshot_free(struct xe_guc_submit_engine_snapshot *snapshot);
> > +xe_guc_exec_queue_snapshot_free(struct xe_guc_submit_exec_queue_snapshot *snapshot);
> > void xe_guc_submit_print(struct xe_guc *guc, struct drm_printer *p);
> >
> > #endif
> > diff --git a/drivers/gpu/drm/xe/xe_guc_submit_types.h b/drivers/gpu/drm/xe/xe_guc_submit_types.h
> > index 6765b2c6eab1..649b0a852692 100644
> > --- a/drivers/gpu/drm/xe/xe_guc_submit_types.h
> > +++ b/drivers/gpu/drm/xe/xe_guc_submit_types.h
> > @@ -79,20 +79,20 @@ struct pending_list_snapshot {
> > };
> >
> > /**
> > - * struct xe_guc_submit_engine_snapshot - Snapshot for devcoredump
> > + * struct xe_guc_submit_exec_queue_snapshot - Snapshot for devcoredump
> > */
> > -struct xe_guc_submit_engine_snapshot {
> > - /** @name: name of this engine */
> > +struct xe_guc_submit_exec_queue_snapshot {
> > + /** @name: name of this exec queue */
> > char name[MAX_FENCE_NAME_LEN];
> > - /** @class: class of this engine */
> > + /** @class: class of this exec queue */
> > enum xe_engine_class class;
> > /**
> > - * @logical_mask: logical mask of where job submitted to engine can run
> > + * @logical_mask: logical mask of where job submitted to exec queue can run
> > */
> > u32 logical_mask;
> > - /** @width: width (number BB submitted per exec) of this engine */
> > + /** @width: width (number BB submitted per exec) of this exec queue */
> > u16 width;
> > - /** @refcount: ref count of this engine */
> > + /** @refcount: ref count of this exec queue */
> > u32 refcount;
> > /**
> > * @sched_timeout: the time after which a job is removed from the
> > @@ -113,8 +113,8 @@ struct xe_guc_submit_engine_snapshot {
> >
> > /** @schedule_state: Schedule State at the moment of Crash */
> > u32 schedule_state;
> > - /** @engine_flags: Flags of the faulty engine */
> > - unsigned long engine_flags;
> > + /** @exec_queue_flags: Flags of the faulty exec_queue */
> > + unsigned long exec_queue_flags;
> >
> > /** @guc: GuC Engine Snapshot */
> > struct {
> > @@ -122,7 +122,7 @@ struct xe_guc_submit_engine_snapshot {
> > u32 wqi_head;
> > /** @wqi_tail: work queue item tail */
> > u32 wqi_tail;
> > - /** @id: GuC id for this xe_engine */
> > + /** @id: GuC id for this exec_queue */
> > u16 id;
> > } guc;
> >
> > diff --git a/drivers/gpu/drm/xe/xe_guc_types.h b/drivers/gpu/drm/xe/xe_guc_types.h
> > index a304dce4e9f4..a5e58917a499 100644
> > --- a/drivers/gpu/drm/xe/xe_guc_types.h
> > +++ b/drivers/gpu/drm/xe/xe_guc_types.h
> > @@ -33,8 +33,8 @@ struct xe_guc {
> > struct xe_guc_pc pc;
> > /** @submission_state: GuC submission state */
> > struct {
> > - /** @engine_lookup: Lookup an xe_engine from guc_id */
> > - struct xarray engine_lookup;
> > + /** @exec_queue_lookup: Lookup an xe_engine from guc_id */
> > + struct xarray exec_queue_lookup;
> > /** @guc_ids: used to allocate new guc_ids, single-lrc */
> > struct ida guc_ids;
> > /** @guc_ids_bitmap: used to allocate new guc_ids, multi-lrc */
> > diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
> > index 05f3d8d68379..09db8da261a3 100644
> > --- a/drivers/gpu/drm/xe/xe_lrc.c
> > +++ b/drivers/gpu/drm/xe/xe_lrc.c
> > @@ -12,7 +12,7 @@
> > #include "regs/xe_regs.h"
> > #include "xe_bo.h"
> > #include "xe_device.h"
> > -#include "xe_engine_types.h"
> > +#include "xe_exec_queue_types.h"
> > #include "xe_gt.h"
> > #include "xe_hw_fence.h"
> > #include "xe_map.h"
> > @@ -604,7 +604,7 @@ static void xe_lrc_set_ppgtt(struct xe_lrc *lrc, struct xe_vm *vm)
> > #define ACC_NOTIFY_S 16
> >
> > int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
> > - struct xe_engine *e, struct xe_vm *vm, u32 ring_size)
> > + struct xe_exec_queue *q, struct xe_vm *vm, u32 ring_size)
> > {
> > struct xe_gt *gt = hwe->gt;
> > struct xe_tile *tile = gt_to_tile(gt);
> > @@ -669,12 +669,12 @@ int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
> > RING_CTL_SIZE(lrc->ring.size) | RING_VALID);
> > if (xe->info.has_asid && vm)
> > xe_lrc_write_ctx_reg(lrc, PVC_CTX_ASID,
> > - (e->usm.acc_granularity <<
> > + (q->usm.acc_granularity <<
> > ACC_GRANULARITY_S) | vm->usm.asid);
> > if (xe->info.supports_usm && vm)
> > xe_lrc_write_ctx_reg(lrc, PVC_CTX_ACC_CTR_THOLD,
> > - (e->usm.acc_notify << ACC_NOTIFY_S) |
> > - e->usm.acc_trigger);
> > + (q->usm.acc_notify << ACC_NOTIFY_S) |
> > + q->usm.acc_trigger);
> >
> > lrc->desc = GEN8_CTX_VALID;
> > lrc->desc |= INTEL_LEGACY_64B_CONTEXT << GEN8_CTX_ADDRESSING_MODE_SHIFT;
> > diff --git a/drivers/gpu/drm/xe/xe_lrc.h b/drivers/gpu/drm/xe/xe_lrc.h
> > index e37f89e75ef8..3a6e8fc5a837 100644
> > --- a/drivers/gpu/drm/xe/xe_lrc.h
> > +++ b/drivers/gpu/drm/xe/xe_lrc.h
> > @@ -8,7 +8,7 @@
> > #include "xe_lrc_types.h"
> >
> > struct xe_device;
> > -struct xe_engine;
> > +struct xe_exec_queue;
> > enum xe_engine_class;
> > struct xe_hw_engine;
> > struct xe_vm;
> > @@ -16,7 +16,7 @@ struct xe_vm;
> > #define LRC_PPHWSP_SCRATCH_ADDR (0x34 * 4)
> >
> > int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
> > - struct xe_engine *e, struct xe_vm *vm, u32 ring_size);
> > + struct xe_exec_queue *q, struct xe_vm *vm, u32 ring_size);
> > void xe_lrc_finish(struct xe_lrc *lrc);
> >
> > size_t xe_lrc_size(struct xe_device *xe, enum xe_engine_class class);
> > diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
> > index 414f93cddb28..7af2d74833c0 100644
> > --- a/drivers/gpu/drm/xe/xe_migrate.c
> > +++ b/drivers/gpu/drm/xe/xe_migrate.c
> > @@ -16,7 +16,7 @@
> > #include "tests/xe_test.h"
> > #include "xe_bb.h"
> > #include "xe_bo.h"
> > -#include "xe_engine.h"
> > +#include "xe_exec_queue.h"
> > #include "xe_ggtt.h"
> > #include "xe_gt.h"
> > #include "xe_hw_engine.h"
> > @@ -35,7 +35,7 @@
> > */
> > struct xe_migrate {
> > /** @eng: Default engine used for migration */
> > - struct xe_engine *eng;
> > + struct xe_exec_queue *q;
> > /** @tile: Backpointer to the tile this struct xe_migrate belongs to. */
> > struct xe_tile *tile;
> > /** @job_mutex: Timeline mutex for @eng. */
> > @@ -78,9 +78,9 @@ struct xe_migrate {
> > *
> > * Return: The default migrate engine
> > */
> > -struct xe_engine *xe_tile_migrate_engine(struct xe_tile *tile)
> > +struct xe_exec_queue *xe_tile_migrate_engine(struct xe_tile *tile)
> > {
> > - return tile->migrate->eng;
> > + return tile->migrate->q;
> > }
> >
> > static void xe_migrate_fini(struct drm_device *dev, void *arg)
> > @@ -88,11 +88,11 @@ static void xe_migrate_fini(struct drm_device *dev, void *arg)
> > struct xe_migrate *m = arg;
> > struct ww_acquire_ctx ww;
> >
> > - xe_vm_lock(m->eng->vm, &ww, 0, false);
> > + xe_vm_lock(m->q->vm, &ww, 0, false);
> > xe_bo_unpin(m->pt_bo);
> > if (m->cleared_bo)
> > xe_bo_unpin(m->cleared_bo);
> > - xe_vm_unlock(m->eng->vm, &ww);
> > + xe_vm_unlock(m->q->vm, &ww);
> >
> > dma_fence_put(m->fence);
> > if (m->cleared_bo)
> > @@ -100,8 +100,8 @@ static void xe_migrate_fini(struct drm_device *dev, void *arg)
> > xe_bo_put(m->pt_bo);
> > drm_suballoc_manager_fini(&m->vm_update_sa);
> > mutex_destroy(&m->job_mutex);
> > - xe_vm_close_and_put(m->eng->vm);
> > - xe_engine_put(m->eng);
> > + xe_vm_close_and_put(m->q->vm);
> > + xe_exec_queue_put(m->q);
> > }
> >
> > static u64 xe_migrate_vm_addr(u64 slot, u32 level)
> > @@ -341,20 +341,20 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
> > if (!hwe)
> > return ERR_PTR(-EINVAL);
> >
> > - m->eng = xe_engine_create(xe, vm,
> > - BIT(hwe->logical_instance), 1,
> > - hwe, ENGINE_FLAG_KERNEL);
> > + m->q = xe_exec_queue_create(xe, vm,
> > + BIT(hwe->logical_instance), 1,
> > + hwe, EXEC_QUEUE_FLAG_KERNEL);
> > } else {
> > - m->eng = xe_engine_create_class(xe, primary_gt, vm,
> > - XE_ENGINE_CLASS_COPY,
> > - ENGINE_FLAG_KERNEL);
> > + m->q = xe_exec_queue_create_class(xe, primary_gt, vm,
> > + XE_ENGINE_CLASS_COPY,
> > + EXEC_QUEUE_FLAG_KERNEL);
> > }
> > - if (IS_ERR(m->eng)) {
> > + if (IS_ERR(m->q)) {
> > xe_vm_close_and_put(vm);
> > - return ERR_CAST(m->eng);
> > + return ERR_CAST(m->q);
> > }
> > if (xe->info.supports_usm)
> > - m->eng->entity->priority = DRM_SCHED_PRIORITY_KERNEL;
> > + m->q->entity->priority = DRM_SCHED_PRIORITY_KERNEL;
> >
> > mutex_init(&m->job_mutex);
> >
> > @@ -456,7 +456,7 @@ static void emit_pte(struct xe_migrate *m,
> > addr = xe_res_dma(cur) & PAGE_MASK;
> > if (is_vram) {
> > /* Is this a 64K PTE entry? */
> > - if ((m->eng->vm->flags & XE_VM_FLAG_64K) &&
> > + if ((m->q->vm->flags & XE_VM_FLAG_64K) &&
> > !(cur_ofs & (16 * 8 - 1))) {
> > XE_WARN_ON(!IS_ALIGNED(addr, SZ_64K));
> > addr |= XE_PTE_PS64;
> > @@ -714,7 +714,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
> > src_L0, ccs_ofs, copy_ccs);
> >
> > mutex_lock(&m->job_mutex);
> > - job = xe_bb_create_migration_job(m->eng, bb,
> > + job = xe_bb_create_migration_job(m->q, bb,
> > xe_migrate_batch_base(m, usm),
> > update_idx);
> > if (IS_ERR(job)) {
> > @@ -938,7 +938,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
> > }
> >
> > mutex_lock(&m->job_mutex);
> > - job = xe_bb_create_migration_job(m->eng, bb,
> > + job = xe_bb_create_migration_job(m->q, bb,
> > xe_migrate_batch_base(m, usm),
> > update_idx);
> > if (IS_ERR(job)) {
> > @@ -1024,7 +1024,7 @@ static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
> >
> > struct xe_vm *xe_migrate_get_vm(struct xe_migrate *m)
> > {
> > - return xe_vm_get(m->eng->vm);
> > + return xe_vm_get(m->q->vm);
> > }
> >
> > #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
> > @@ -1132,7 +1132,7 @@ struct dma_fence *
> > xe_migrate_update_pgtables(struct xe_migrate *m,
> > struct xe_vm *vm,
> > struct xe_bo *bo,
> > - struct xe_engine *eng,
> > + struct xe_exec_queue *q,
> > const struct xe_vm_pgtable_update *updates,
> > u32 num_updates,
> > struct xe_sync_entry *syncs, u32 num_syncs,
> > @@ -1150,13 +1150,13 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
> > u32 i, batch_size, ppgtt_ofs, update_idx, page_ofs = 0;
> > u64 addr;
> > int err = 0;
> > - bool usm = !eng && xe->info.supports_usm;
> > + bool usm = !q && xe->info.supports_usm;
> > bool first_munmap_rebind = vma &&
> > vma->gpuva.flags & XE_VMA_FIRST_REBIND;
> > - struct xe_engine *eng_override = !eng ? m->eng : eng;
> > + struct xe_exec_queue *q_override = !q ? m->q : q;
> >
> > /* Use the CPU if no in syncs and engine is idle */
> > - if (no_in_syncs(syncs, num_syncs) && xe_engine_is_idle(eng_override)) {
> > + if (no_in_syncs(syncs, num_syncs) && xe_exec_queue_is_idle(q_override)) {
> > fence = xe_migrate_update_pgtables_cpu(m, vm, bo, updates,
> > num_updates,
> > first_munmap_rebind,
> > @@ -1186,14 +1186,14 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
> > */
> > XE_WARN_ON(batch_size >= SZ_128K);
> >
> > - bb = xe_bb_new(gt, batch_size, !eng && xe->info.supports_usm);
> > + bb = xe_bb_new(gt, batch_size, !q && xe->info.supports_usm);
> > if (IS_ERR(bb))
> > return ERR_CAST(bb);
> >
> > /* For sysmem PTE's, need to map them in our hole.. */
> > if (!IS_DGFX(xe)) {
> > ppgtt_ofs = NUM_KERNEL_PDE - 1;
> > - if (eng) {
> > + if (q) {
> > XE_WARN_ON(num_updates > NUM_VMUSA_WRITES_PER_UNIT);
> >
> > sa_bo = drm_suballoc_new(&m->vm_update_sa, 1,
> > @@ -1249,10 +1249,10 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
> > write_pgtable(tile, bb, 0, &updates[i], pt_update);
> > }
> >
> > - if (!eng)
> > + if (!q)
> > mutex_lock(&m->job_mutex);
> >
> > - job = xe_bb_create_migration_job(eng ?: m->eng, bb,
> > + job = xe_bb_create_migration_job(q ?: m->q, bb,
> > xe_migrate_batch_base(m, usm),
> > update_idx);
> > if (IS_ERR(job)) {
> > @@ -1295,7 +1295,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
> > fence = dma_fence_get(&job->drm.s_fence->finished);
> > xe_sched_job_push(job);
> >
> > - if (!eng)
> > + if (!q)
> > mutex_unlock(&m->job_mutex);
> >
> > xe_bb_free(bb, fence);
> > @@ -1306,7 +1306,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
> > err_job:
> > xe_sched_job_put(job);
> > err_bb:
> > - if (!eng)
> > + if (!q)
> > mutex_unlock(&m->job_mutex);
> > xe_bb_free(bb, NULL);
> > err:
> > diff --git a/drivers/gpu/drm/xe/xe_migrate.h b/drivers/gpu/drm/xe/xe_migrate.h
> > index 0d62aff6421c..c729241776ad 100644
> > --- a/drivers/gpu/drm/xe/xe_migrate.h
> > +++ b/drivers/gpu/drm/xe/xe_migrate.h
> > @@ -14,7 +14,7 @@ struct ttm_resource;
> >
> > struct xe_bo;
> > struct xe_gt;
> > -struct xe_engine;
> > +struct xe_exec_queue;
> > struct xe_migrate;
> > struct xe_migrate_pt_update;
> > struct xe_sync_entry;
> > @@ -97,7 +97,7 @@ struct dma_fence *
> > xe_migrate_update_pgtables(struct xe_migrate *m,
> > struct xe_vm *vm,
> > struct xe_bo *bo,
> > - struct xe_engine *eng,
> > + struct xe_exec_queue *q,
> > const struct xe_vm_pgtable_update *updates,
> > u32 num_updates,
> > struct xe_sync_entry *syncs, u32 num_syncs,
> > @@ -105,5 +105,5 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
> >
> > void xe_migrate_wait(struct xe_migrate *m);
> >
> > -struct xe_engine *xe_tile_migrate_engine(struct xe_tile *tile);
> > +struct xe_exec_queue *xe_tile_migrate_engine(struct xe_tile *tile);
> > #endif
> > diff --git a/drivers/gpu/drm/xe/xe_mocs.c b/drivers/gpu/drm/xe/xe_mocs.c
> > index ccc852500eda..c9653978fc9f 100644
> > --- a/drivers/gpu/drm/xe/xe_mocs.c
> > +++ b/drivers/gpu/drm/xe/xe_mocs.c
> > @@ -8,7 +8,7 @@
> > #include "regs/xe_gt_regs.h"
> > #include "xe_bo.h"
> > #include "xe_device.h"
> > -#include "xe_engine.h"
> > +#include "xe_exec_queue.h"
> > #include "xe_gt.h"
> > #include "xe_mmio.h"
> > #include "xe_platform_types.h"
> > diff --git a/drivers/gpu/drm/xe/xe_mocs.h b/drivers/gpu/drm/xe/xe_mocs.h
> > index 25f7b35a76da..d0f1ec4b0336 100644
> > --- a/drivers/gpu/drm/xe/xe_mocs.h
> > +++ b/drivers/gpu/drm/xe/xe_mocs.h
> > @@ -8,7 +8,7 @@
> >
> > #include <linux/types.h>
> >
> > -struct xe_engine;
> > +struct xe_exec_queue;
> > struct xe_gt;
> >
> > void xe_mocs_init_early(struct xe_gt *gt);
> > diff --git a/drivers/gpu/drm/xe/xe_preempt_fence.c b/drivers/gpu/drm/xe/xe_preempt_fence.c
> > index 219eefeb90ff..e0ce1588a734 100644
> > --- a/drivers/gpu/drm/xe/xe_preempt_fence.c
> > +++ b/drivers/gpu/drm/xe/xe_preempt_fence.c
> > @@ -7,7 +7,7 @@
> >
> > #include <linux/slab.h>
> >
> > -#include "xe_engine.h"
> > +#include "xe_exec_queue.h"
> > #include "xe_vm.h"
> >
> > static void preempt_fence_work_func(struct work_struct *w)
> > @@ -15,19 +15,19 @@ static void preempt_fence_work_func(struct work_struct *w)
> > bool cookie = dma_fence_begin_signalling();
> > struct xe_preempt_fence *pfence =
> > container_of(w, typeof(*pfence), preempt_work);
> > - struct xe_engine *e = pfence->engine;
> > + struct xe_exec_queue *q = pfence->q;
> >
> > if (pfence->error)
> > dma_fence_set_error(&pfence->base, pfence->error);
> > else
> > - e->ops->suspend_wait(e);
> > + q->ops->suspend_wait(q);
> >
> > dma_fence_signal(&pfence->base);
> > dma_fence_end_signalling(cookie);
> >
> > - xe_vm_queue_rebind_worker(e->vm);
> > + xe_vm_queue_rebind_worker(q->vm);
> >
> > - xe_engine_put(e);
> > + xe_exec_queue_put(q);
> > }
> >
> > static const char *
> > @@ -46,9 +46,9 @@ static bool preempt_fence_enable_signaling(struct dma_fence *fence)
> > {
> > struct xe_preempt_fence *pfence =
> > container_of(fence, typeof(*pfence), base);
> > - struct xe_engine *e = pfence->engine;
> > + struct xe_exec_queue *q = pfence->q;
> >
> > - pfence->error = e->ops->suspend(e);
> > + pfence->error = q->ops->suspend(q);
> > queue_work(system_unbound_wq, &pfence->preempt_work);
> > return true;
> > }
> > @@ -104,43 +104,43 @@ void xe_preempt_fence_free(struct xe_preempt_fence *pfence)
> > * xe_preempt_fence_alloc().
> > * @pfence: The struct xe_preempt_fence pointer returned from
> > * xe_preempt_fence_alloc().
> > - * @e: The struct xe_engine used for arming.
> > + * @e: The struct xe_exec_queue used for arming.
> > * @context: The dma-fence context used for arming.
> > * @seqno: The dma-fence seqno used for arming.
> > *
> > * Inserts the preempt fence into @context's timeline, takes @link off any
> > - * list, and registers the struct xe_engine as the xe_engine to be preempted.
> > + * list, and registers the struct xe_exec_queue as the xe_engine to be preempted.
> > *
> > * Return: A pointer to a struct dma_fence embedded into the preempt fence.
> > * This function doesn't error.
> > */
> > struct dma_fence *
> > -xe_preempt_fence_arm(struct xe_preempt_fence *pfence, struct xe_engine *e,
> > +xe_preempt_fence_arm(struct xe_preempt_fence *pfence, struct xe_exec_queue *q,
> > u64 context, u32 seqno)
> > {
> > list_del_init(&pfence->link);
> > - pfence->engine = xe_engine_get(e);
> > + pfence->q = xe_exec_queue_get(q);
> > dma_fence_init(&pfence->base, &preempt_fence_ops,
> > - &e->compute.lock, context, seqno);
> > + &q->compute.lock, context, seqno);
> >
> > return &pfence->base;
> > }
> >
> > /**
> > * xe_preempt_fence_create() - Helper to create and arm a preempt fence.
> > - * @e: The struct xe_engine used for arming.
> > + * @e: The struct xe_exec_queue used for arming.
> > * @context: The dma-fence context used for arming.
> > * @seqno: The dma-fence seqno used for arming.
> > *
> > * Allocates and inserts the preempt fence into @context's timeline,
> > - * and registers @e as the struct xe_engine to be preempted.
> > + * and registers @e as the struct xe_exec_queue to be preempted.
> > *
> > * Return: A pointer to the resulting struct dma_fence on success. An error
> > * pointer on error. In particular if allocation fails it returns
> > * ERR_PTR(-ENOMEM);
> > */
> > struct dma_fence *
> > -xe_preempt_fence_create(struct xe_engine *e,
> > +xe_preempt_fence_create(struct xe_exec_queue *q,
> > u64 context, u32 seqno)
> > {
> > struct xe_preempt_fence *pfence;
> > @@ -149,7 +149,7 @@ xe_preempt_fence_create(struct xe_engine *e,
> > if (IS_ERR(pfence))
> > return ERR_CAST(pfence);
> >
> > - return xe_preempt_fence_arm(pfence, e, context, seqno);
> > + return xe_preempt_fence_arm(pfence, q, context, seqno);
> > }
> >
> > bool xe_fence_is_xe_preempt(const struct dma_fence *fence)
> > diff --git a/drivers/gpu/drm/xe/xe_preempt_fence.h b/drivers/gpu/drm/xe/xe_preempt_fence.h
> > index 4f3966103203..9406c6fea525 100644
> > --- a/drivers/gpu/drm/xe/xe_preempt_fence.h
> > +++ b/drivers/gpu/drm/xe/xe_preempt_fence.h
> > @@ -11,7 +11,7 @@
> > struct list_head;
> >
> > struct dma_fence *
> > -xe_preempt_fence_create(struct xe_engine *e,
> > +xe_preempt_fence_create(struct xe_exec_queue *q,
> > u64 context, u32 seqno);
> >
> > struct xe_preempt_fence *xe_preempt_fence_alloc(void);
> > @@ -19,7 +19,7 @@ struct xe_preempt_fence *xe_preempt_fence_alloc(void);
> > void xe_preempt_fence_free(struct xe_preempt_fence *pfence);
> >
> > struct dma_fence *
> > -xe_preempt_fence_arm(struct xe_preempt_fence *pfence, struct xe_engine *e,
> > +xe_preempt_fence_arm(struct xe_preempt_fence *pfence, struct xe_exec_queue *q,
> > u64 context, u32 seqno);
> >
> > static inline struct xe_preempt_fence *
> > diff --git a/drivers/gpu/drm/xe/xe_preempt_fence_types.h b/drivers/gpu/drm/xe/xe_preempt_fence_types.h
> > index 9d9efd8ff0ed..176ea1e9e204 100644
> > --- a/drivers/gpu/drm/xe/xe_preempt_fence_types.h
> > +++ b/drivers/gpu/drm/xe/xe_preempt_fence_types.h
> > @@ -9,12 +9,11 @@
> > #include <linux/dma-fence.h>
> > #include <linux/workqueue.h>
> >
> > -struct xe_engine;
> > +struct xe_exec_queue;
> >
> > /**
> > * struct xe_preempt_fence - XE preempt fence
> > *
> > - * A preemption fence which suspends the execution of an xe_engine on the
> > * hardware and triggers a callback once the xe_engine is complete.
> > */
> > struct xe_preempt_fence {
> > @@ -23,7 +22,7 @@ struct xe_preempt_fence {
> > /** @link: link into list of pending preempt fences */
> > struct list_head link;
> > /** @engine: xe engine for this preempt fence */
> > - struct xe_engine *engine;
> > + struct xe_exec_queue *q;
> > /** @preempt_work: work struct which issues preemption */
> > struct work_struct preempt_work;
> > /** @error: preempt fence is in error state */
> > diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
> > index 9b034272d82f..5709518e314b 100644
> > --- a/drivers/gpu/drm/xe/xe_pt.c
> > +++ b/drivers/gpu/drm/xe/xe_pt.c
> > @@ -1307,7 +1307,7 @@ static void xe_pt_calc_rfence_interval(struct xe_vma *vma,
> > * address range.
> > * @tile: The tile to bind for.
> > * @vma: The vma to bind.
> > - * @e: The engine with which to do pipelined page-table updates.
> > + * @q: The exec_queue with which to do pipelined page-table updates.
> > * @syncs: Entries to sync on before binding the built tree to the live vm tree.
> > * @num_syncs: Number of @sync entries.
> > * @rebind: Whether we're rebinding this vma to the same address range without
> > @@ -1325,7 +1325,7 @@ static void xe_pt_calc_rfence_interval(struct xe_vma *vma,
> > * on success, an error pointer on error.
> > */
> > struct dma_fence *
> > -__xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e,
> > +__xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q,
> > struct xe_sync_entry *syncs, u32 num_syncs,
> > bool rebind)
> > {
> > @@ -1351,7 +1351,7 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e,
> >
> > vm_dbg(&xe_vma_vm(vma)->xe->drm,
> > "Preparing bind, with range [%llx...%llx) engine %p.\n",
> > - xe_vma_start(vma), xe_vma_end(vma) - 1, e);
> > + xe_vma_start(vma), xe_vma_end(vma) - 1, q);
> >
> > err = xe_pt_prepare_bind(tile, vma, entries, &num_entries, rebind);
> > if (err)
> > @@ -1388,7 +1388,7 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e,
> > }
> >
> > fence = xe_migrate_update_pgtables(tile->migrate,
> > - vm, xe_vma_bo(vma), e,
> > + vm, xe_vma_bo(vma), q,
> > entries, num_entries,
> > syncs, num_syncs,
> > &bind_pt_update.base);
> > @@ -1663,7 +1663,7 @@ static const struct xe_migrate_pt_update_ops userptr_unbind_ops = {
> > * address range.
> > * @tile: The tile to unbind for.
> > * @vma: The vma to unbind.
> > - * @e: The engine with which to do pipelined page-table updates.
> > + * @q: The exec_queue with which to do pipelined page-table updates.
> > * @syncs: Entries to sync on before disconnecting the tree to be destroyed.
> > * @num_syncs: Number of @sync entries.
> > *
> > @@ -1679,7 +1679,7 @@ static const struct xe_migrate_pt_update_ops userptr_unbind_ops = {
> > * on success, an error pointer on error.
> > */
> > struct dma_fence *
> > -__xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e,
> > +__xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q,
> > struct xe_sync_entry *syncs, u32 num_syncs)
> > {
> > struct xe_vm_pgtable_update entries[XE_VM_MAX_LEVEL * 2 + 1];
> > @@ -1704,7 +1704,7 @@ __xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e
> >
> > vm_dbg(&xe_vma_vm(vma)->xe->drm,
> > "Preparing unbind, with range [%llx...%llx) engine %p.\n",
> > - xe_vma_start(vma), xe_vma_end(vma) - 1, e);
> > + xe_vma_start(vma), xe_vma_end(vma) - 1, q);
> >
> > num_entries = xe_pt_stage_unbind(tile, vma, entries);
> > XE_WARN_ON(num_entries > ARRAY_SIZE(entries));
> > @@ -1729,8 +1729,8 @@ __xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e
> > * lower level, because it needs to be more conservative.
> > */
> > fence = xe_migrate_update_pgtables(tile->migrate,
> > - vm, NULL, e ? e :
> > - vm->eng[tile->id],
> > + vm, NULL, q ? q :
> > + vm->q[tile->id],
> > entries, num_entries,
> > syncs, num_syncs,
> > &unbind_pt_update.base);
> > diff --git a/drivers/gpu/drm/xe/xe_pt.h b/drivers/gpu/drm/xe/xe_pt.h
> > index bbb00d6461ff..01be7ab08f87 100644
> > --- a/drivers/gpu/drm/xe/xe_pt.h
> > +++ b/drivers/gpu/drm/xe/xe_pt.h
> > @@ -12,7 +12,7 @@
> > struct dma_fence;
> > struct xe_bo;
> > struct xe_device;
> > -struct xe_engine;
> > +struct xe_exec_queue;
> > struct xe_sync_entry;
> > struct xe_tile;
> > struct xe_vm;
> > @@ -35,12 +35,12 @@ void xe_pt_populate_empty(struct xe_tile *tile, struct xe_vm *vm,
> > void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred);
> >
> > struct dma_fence *
> > -__xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e,
> > +__xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q,
> > struct xe_sync_entry *syncs, u32 num_syncs,
> > bool rebind);
> >
> > struct dma_fence *
> > -__xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e,
> > +__xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q,
> > struct xe_sync_entry *syncs, u32 num_syncs);
> >
> > bool xe_pt_zap_ptes(struct xe_tile *tile, struct xe_vma *vma);
> > diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c
> > index 6ba7baf7c777..7ea235c71385 100644
> > --- a/drivers/gpu/drm/xe/xe_query.c
> > +++ b/drivers/gpu/drm/xe/xe_query.c
> > @@ -12,7 +12,7 @@
> >
> > #include "xe_bo.h"
> > #include "xe_device.h"
> > -#include "xe_engine.h"
> > +#include "xe_exec_queue.h"
> > #include "xe_ggtt.h"
> > #include "xe_gt.h"
> > #include "xe_guc_hwconfig.h"
> > @@ -203,7 +203,7 @@ static int query_config(struct xe_device *xe, struct drm_xe_device_query *query)
> > config->info[XE_QUERY_CONFIG_MEM_REGION_COUNT] =
> > hweight_long(xe->info.mem_region_mask);
> > config->info[XE_QUERY_CONFIG_MAX_ENGINE_PRIORITY] =
> > - xe_engine_device_get_max_priority(xe);
> > + xe_exec_queue_device_get_max_priority(xe);
> >
> > if (copy_to_user(query_ptr, config, size)) {
> > kfree(config);
> > diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c
> > index c9ef44e63772..4a474ab04f96 100644
> > --- a/drivers/gpu/drm/xe/xe_ring_ops.c
> > +++ b/drivers/gpu/drm/xe/xe_ring_ops.c
> > @@ -10,7 +10,7 @@
> > #include "regs/xe_gt_regs.h"
> > #include "regs/xe_lrc_layout.h"
> > #include "regs/xe_regs.h"
> > -#include "xe_engine_types.h"
> > +#include "xe_exec_queue_types.h"
> > #include "xe_gt.h"
> > #include "xe_lrc.h"
> > #include "xe_macros.h"
> > @@ -156,7 +156,7 @@ static int emit_store_imm_ppgtt_posted(u64 addr, u64 value,
> >
> > static int emit_render_cache_flush(struct xe_sched_job *job, u32 *dw, int i)
> > {
> > - struct xe_gt *gt = job->engine->gt;
> > + struct xe_gt *gt = job->q->gt;
> > bool lacks_render = !(gt->info.engine_mask & XE_HW_ENGINE_RCS_MASK);
> > u32 flags;
> >
> > @@ -172,7 +172,7 @@ static int emit_render_cache_flush(struct xe_sched_job *job, u32 *dw, int i)
> >
> > if (lacks_render)
> > flags &= ~PIPE_CONTROL_3D_ARCH_FLAGS;
> > - else if (job->engine->class == XE_ENGINE_CLASS_COMPUTE)
> > + else if (job->q->class == XE_ENGINE_CLASS_COMPUTE)
> > flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
> >
> > dw[i++] = GFX_OP_PIPE_CONTROL(6) | PIPE_CONTROL0_HDC_PIPELINE_FLUSH;
> > @@ -202,7 +202,7 @@ static int emit_pipe_imm_ggtt(u32 addr, u32 value, bool stall_only, u32 *dw,
> >
> > static u32 get_ppgtt_flag(struct xe_sched_job *job)
> > {
> > - return !(job->engine->flags & ENGINE_FLAG_WA) ? BIT(8) : 0;
> > + return !(job->q->flags & EXEC_QUEUE_FLAG_WA) ? BIT(8) : 0;
> > }
> >
> > static void __emit_job_gen12_copy(struct xe_sched_job *job, struct xe_lrc *lrc,
> > @@ -210,7 +210,7 @@ static void __emit_job_gen12_copy(struct xe_sched_job *job, struct xe_lrc *lrc,
> > {
> > u32 dw[MAX_JOB_SIZE_DW], i = 0;
> > u32 ppgtt_flag = get_ppgtt_flag(job);
> > - struct xe_vm *vm = job->engine->vm;
> > + struct xe_vm *vm = job->q->vm;
> >
> > if (vm->batch_invalidate_tlb) {
> > dw[i++] = preparser_disable(true);
> > @@ -255,10 +255,10 @@ static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc,
> > {
> > u32 dw[MAX_JOB_SIZE_DW], i = 0;
> > u32 ppgtt_flag = get_ppgtt_flag(job);
> > - struct xe_gt *gt = job->engine->gt;
> > + struct xe_gt *gt = job->q->gt;
> > struct xe_device *xe = gt_to_xe(gt);
> > - bool decode = job->engine->class == XE_ENGINE_CLASS_VIDEO_DECODE;
> > - struct xe_vm *vm = job->engine->vm;
> > + bool decode = job->q->class == XE_ENGINE_CLASS_VIDEO_DECODE;
> > + struct xe_vm *vm = job->q->vm;
> >
> > dw[i++] = preparser_disable(true);
> >
> > @@ -302,16 +302,16 @@ static void __emit_job_gen12_render_compute(struct xe_sched_job *job,
> > {
> > u32 dw[MAX_JOB_SIZE_DW], i = 0;
> > u32 ppgtt_flag = get_ppgtt_flag(job);
> > - struct xe_gt *gt = job->engine->gt;
> > + struct xe_gt *gt = job->q->gt;
> > struct xe_device *xe = gt_to_xe(gt);
> > bool lacks_render = !(gt->info.engine_mask & XE_HW_ENGINE_RCS_MASK);
> > - struct xe_vm *vm = job->engine->vm;
> > + struct xe_vm *vm = job->q->vm;
> > u32 mask_flags = 0;
> >
> > dw[i++] = preparser_disable(true);
> > if (lacks_render)
> > mask_flags = PIPE_CONTROL_3D_ARCH_FLAGS;
> > - else if (job->engine->class == XE_ENGINE_CLASS_COMPUTE)
> > + else if (job->q->class == XE_ENGINE_CLASS_COMPUTE)
> > mask_flags = PIPE_CONTROL_3D_ENGINE_FLAGS;
> >
> > /* See __xe_pt_bind_vma() for a discussion on TLB invalidations. */
> > @@ -378,14 +378,14 @@ static void emit_job_gen12_copy(struct xe_sched_job *job)
> > {
> > int i;
> >
> > - if (xe_sched_job_is_migration(job->engine)) {
> > - emit_migration_job_gen12(job, job->engine->lrc,
> > + if (xe_sched_job_is_migration(job->q)) {
> > + emit_migration_job_gen12(job, job->q->lrc,
> > xe_sched_job_seqno(job));
> > return;
> > }
> >
> > - for (i = 0; i < job->engine->width; ++i)
> > - __emit_job_gen12_copy(job, job->engine->lrc + i,
> > + for (i = 0; i < job->q->width; ++i)
> > + __emit_job_gen12_copy(job, job->q->lrc + i,
> > job->batch_addr[i],
> > xe_sched_job_seqno(job));
> > }
> > @@ -395,8 +395,8 @@ static void emit_job_gen12_video(struct xe_sched_job *job)
> > int i;
> >
> > /* FIXME: Not doing parallel handshake for now */
> > - for (i = 0; i < job->engine->width; ++i)
> > - __emit_job_gen12_video(job, job->engine->lrc + i,
> > + for (i = 0; i < job->q->width; ++i)
> > + __emit_job_gen12_video(job, job->q->lrc + i,
> > job->batch_addr[i],
> > xe_sched_job_seqno(job));
> > }
> > @@ -405,8 +405,8 @@ static void emit_job_gen12_render_compute(struct xe_sched_job *job)
> > {
> > int i;
> >
> > - for (i = 0; i < job->engine->width; ++i)
> > - __emit_job_gen12_render_compute(job, job->engine->lrc + i,
> > + for (i = 0; i < job->q->width; ++i)
> > + __emit_job_gen12_render_compute(job, job->q->lrc + i,
> > job->batch_addr[i],
> > xe_sched_job_seqno(job));
> > }
> > diff --git a/drivers/gpu/drm/xe/xe_sched_job.c b/drivers/gpu/drm/xe/xe_sched_job.c
> > index b5bf14b828f9..753aab0cc6f4 100644
> > --- a/drivers/gpu/drm/xe/xe_sched_job.c
> > +++ b/drivers/gpu/drm/xe/xe_sched_job.c
> > @@ -9,7 +9,7 @@
> > #include <linux/slab.h>
> >
> > #include "xe_device.h"
> > -#include "xe_engine.h"
> > +#include "xe_exec_queue.h"
> > #include "xe_gt.h"
> > #include "xe_hw_engine_types.h"
> > #include "xe_hw_fence.h"
> > @@ -57,58 +57,58 @@ static struct xe_sched_job *job_alloc(bool parallel)
> > xe_sched_job_slab, GFP_KERNEL);
> > }
> >
> > -bool xe_sched_job_is_migration(struct xe_engine *e)
> > +bool xe_sched_job_is_migration(struct xe_exec_queue *q)
> > {
> > - return e->vm && (e->vm->flags & XE_VM_FLAG_MIGRATION) &&
> > - !(e->flags & ENGINE_FLAG_WA);
> > + return q->vm && (q->vm->flags & XE_VM_FLAG_MIGRATION) &&
> > + !(q->flags & EXEC_QUEUE_FLAG_WA);
> > }
> >
> > static void job_free(struct xe_sched_job *job)
> > {
> > - struct xe_engine *e = job->engine;
> > - bool is_migration = xe_sched_job_is_migration(e);
> > + struct xe_exec_queue *q = job->q;
> > + bool is_migration = xe_sched_job_is_migration(q);
> >
> > - kmem_cache_free(xe_engine_is_parallel(job->engine) || is_migration ?
> > + kmem_cache_free(xe_exec_queue_is_parallel(job->q) || is_migration ?
> > xe_sched_job_parallel_slab : xe_sched_job_slab, job);
> > }
> >
> > static struct xe_device *job_to_xe(struct xe_sched_job *job)
> > {
> > - return gt_to_xe(job->engine->gt);
> > + return gt_to_xe(job->q->gt);
> > }
> >
> > -struct xe_sched_job *xe_sched_job_create(struct xe_engine *e,
> > +struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q,
> > u64 *batch_addr)
> > {
> > struct xe_sched_job *job;
> > struct dma_fence **fences;
> > - bool is_migration = xe_sched_job_is_migration(e);
> > + bool is_migration = xe_sched_job_is_migration(q);
> > int err;
> > int i, j;
> > u32 width;
> >
> > /* Migration and kernel engines have their own locking */
> > - if (!(e->flags & (ENGINE_FLAG_KERNEL | ENGINE_FLAG_VM |
> > - ENGINE_FLAG_WA))) {
> > - lockdep_assert_held(&e->vm->lock);
> > - if (!xe_vm_no_dma_fences(e->vm))
> > - xe_vm_assert_held(e->vm);
> > + if (!(q->flags & (EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_VM |
> > + EXEC_QUEUE_FLAG_WA))) {
> > + lockdep_assert_held(&q->vm->lock);
> > + if (!xe_vm_no_dma_fences(q->vm))
> > + xe_vm_assert_held(q->vm);
> > }
> >
> > - job = job_alloc(xe_engine_is_parallel(e) || is_migration);
> > + job = job_alloc(xe_exec_queue_is_parallel(q) || is_migration);
> > if (!job)
> > return ERR_PTR(-ENOMEM);
> >
> > - job->engine = e;
> > + job->q = q;
> > kref_init(&job->refcount);
> > - xe_engine_get(job->engine);
> > + xe_exec_queue_get(job->q);
> >
> > - err = drm_sched_job_init(&job->drm, e->entity, NULL);
> > + err = drm_sched_job_init(&job->drm, q->entity, NULL);
> > if (err)
> > goto err_free;
> >
> > - if (!xe_engine_is_parallel(e)) {
> > - job->fence = xe_lrc_create_seqno_fence(e->lrc);
> > + if (!xe_exec_queue_is_parallel(q)) {
> > + job->fence = xe_lrc_create_seqno_fence(q->lrc);
> > if (IS_ERR(job->fence)) {
> > err = PTR_ERR(job->fence);
> > goto err_sched_job;
> > @@ -116,38 +116,38 @@ struct xe_sched_job *xe_sched_job_create(struct xe_engine *e,
> > } else {
> > struct dma_fence_array *cf;
> >
> > - fences = kmalloc_array(e->width, sizeof(*fences), GFP_KERNEL);
> > + fences = kmalloc_array(q->width, sizeof(*fences), GFP_KERNEL);
> > if (!fences) {
> > err = -ENOMEM;
> > goto err_sched_job;
> > }
> >
> > - for (j = 0; j < e->width; ++j) {
> > - fences[j] = xe_lrc_create_seqno_fence(e->lrc + j);
> > + for (j = 0; j < q->width; ++j) {
> > + fences[j] = xe_lrc_create_seqno_fence(q->lrc + j);
> > if (IS_ERR(fences[j])) {
> > err = PTR_ERR(fences[j]);
> > goto err_fences;
> > }
> > }
> >
> > - cf = dma_fence_array_create(e->width, fences,
> > - e->parallel.composite_fence_ctx,
> > - e->parallel.composite_fence_seqno++,
> > + cf = dma_fence_array_create(q->width, fences,
> > + q->parallel.composite_fence_ctx,
> > + q->parallel.composite_fence_seqno++,
> > false);
> > if (!cf) {
> > - --e->parallel.composite_fence_seqno;
> > + --q->parallel.composite_fence_seqno;
> > err = -ENOMEM;
> > goto err_fences;
> > }
> >
> > /* Sanity check */
> > - for (j = 0; j < e->width; ++j)
> > + for (j = 0; j < q->width; ++j)
> > XE_WARN_ON(cf->base.seqno != fences[j]->seqno);
> >
> > job->fence = &cf->base;
> > }
> >
> > - width = e->width;
> > + width = q->width;
> > if (is_migration)
> > width = 2;
> >
> > @@ -155,7 +155,7 @@ struct xe_sched_job *xe_sched_job_create(struct xe_engine *e,
> > job->batch_addr[i] = batch_addr[i];
> >
> > /* All other jobs require a VM to be open which has a ref */
> > - if (unlikely(e->flags & ENGINE_FLAG_KERNEL))
> > + if (unlikely(q->flags & EXEC_QUEUE_FLAG_KERNEL))
> > xe_device_mem_access_get(job_to_xe(job));
> > xe_device_assert_mem_access(job_to_xe(job));
> >
> > @@ -164,14 +164,14 @@ struct xe_sched_job *xe_sched_job_create(struct xe_engine *e,
> >
> > err_fences:
> > for (j = j - 1; j >= 0; --j) {
> > - --e->lrc[j].fence_ctx.next_seqno;
> > + --q->lrc[j].fence_ctx.next_seqno;
> > dma_fence_put(fences[j]);
> > }
> > kfree(fences);
> > err_sched_job:
> > drm_sched_job_cleanup(&job->drm);
> > err_free:
> > - xe_engine_put(e);
> > + xe_exec_queue_put(q);
> > job_free(job);
> > return ERR_PTR(err);
> > }
> > @@ -188,9 +188,9 @@ void xe_sched_job_destroy(struct kref *ref)
> > struct xe_sched_job *job =
> > container_of(ref, struct xe_sched_job, refcount);
> >
> > - if (unlikely(job->engine->flags & ENGINE_FLAG_KERNEL))
> > + if (unlikely(job->q->flags & EXEC_QUEUE_FLAG_KERNEL))
> > xe_device_mem_access_put(job_to_xe(job));
> > - xe_engine_put(job->engine);
> > + xe_exec_queue_put(job->q);
> > dma_fence_put(job->fence);
> > drm_sched_job_cleanup(&job->drm);
> > job_free(job);
> > @@ -222,12 +222,12 @@ void xe_sched_job_set_error(struct xe_sched_job *job, int error)
> > trace_xe_sched_job_set_error(job);
> >
> > dma_fence_enable_sw_signaling(job->fence);
> > - xe_hw_fence_irq_run(job->engine->fence_irq);
> > + xe_hw_fence_irq_run(job->q->fence_irq);
> > }
> >
> > bool xe_sched_job_started(struct xe_sched_job *job)
> > {
> > - struct xe_lrc *lrc = job->engine->lrc;
> > + struct xe_lrc *lrc = job->q->lrc;
> >
> > return !__dma_fence_is_later(xe_sched_job_seqno(job),
> > xe_lrc_start_seqno(lrc),
> > @@ -236,7 +236,7 @@ bool xe_sched_job_started(struct xe_sched_job *job)
> >
> > bool xe_sched_job_completed(struct xe_sched_job *job)
> > {
> > - struct xe_lrc *lrc = job->engine->lrc;
> > + struct xe_lrc *lrc = job->q->lrc;
> >
> > /*
> > * Can safely check just LRC[0] seqno as that is last seqno written when
> > diff --git a/drivers/gpu/drm/xe/xe_sched_job.h b/drivers/gpu/drm/xe/xe_sched_job.h
> > index 5315ad8656c2..6ca1d426c036 100644
> > --- a/drivers/gpu/drm/xe/xe_sched_job.h
> > +++ b/drivers/gpu/drm/xe/xe_sched_job.h
> > @@ -14,7 +14,7 @@
> > int xe_sched_job_module_init(void);
> > void xe_sched_job_module_exit(void);
> >
> > -struct xe_sched_job *xe_sched_job_create(struct xe_engine *e,
> > +struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q,
> > u64 *batch_addr);
> > void xe_sched_job_destroy(struct kref *ref);
> >
> > @@ -71,6 +71,6 @@ xe_sched_job_add_migrate_flush(struct xe_sched_job *job, u32 flags)
> > job->migrate_flush_flags = flags;
> > }
> >
> > -bool xe_sched_job_is_migration(struct xe_engine *e);
> > +bool xe_sched_job_is_migration(struct xe_exec_queue *q);
> >
> > #endif
> > diff --git a/drivers/gpu/drm/xe/xe_sched_job_types.h b/drivers/gpu/drm/xe/xe_sched_job_types.h
> > index 5534bfacaa16..08db0db7814b 100644
> > --- a/drivers/gpu/drm/xe/xe_sched_job_types.h
> > +++ b/drivers/gpu/drm/xe/xe_sched_job_types.h
> > @@ -10,7 +10,7 @@
> >
> > #include <drm/gpu_scheduler.h>
> >
> > -struct xe_engine;
> > +struct xe_exec_queue;
> >
> > /**
> > * struct xe_sched_job - XE schedule job (batch buffer tracking)
> > @@ -18,8 +18,8 @@ struct xe_engine;
> > struct xe_sched_job {
> > /** @drm: base DRM scheduler job */
> > struct drm_sched_job drm;
> > - /** @engine: XE submission engine */
> > - struct xe_engine *engine;
> > + /** @exec_queue: Exec queue */
> > + struct xe_exec_queue *q;
> > /** @refcount: ref count of this job */
> > struct kref refcount;
> > /**
> > diff --git a/drivers/gpu/drm/xe/xe_trace.h b/drivers/gpu/drm/xe/xe_trace.h
> > index 463152528281..e32f1cad51d9 100644
> > --- a/drivers/gpu/drm/xe/xe_trace.h
> > +++ b/drivers/gpu/drm/xe/xe_trace.h
> > @@ -13,10 +13,10 @@
> > #include <linux/types.h>
> >
> > #include "xe_bo_types.h"
> > -#include "xe_engine_types.h"
> > +#include "xe_exec_queue_types.h"
> > #include "xe_gt_tlb_invalidation_types.h"
> > #include "xe_gt_types.h"
> > -#include "xe_guc_engine_types.h"
> > +#include "xe_guc_exec_queue_types.h"
> > #include "xe_sched_job.h"
> > #include "xe_vm.h"
> >
> > @@ -104,9 +104,9 @@ DEFINE_EVENT(xe_bo, xe_bo_move,
> > TP_ARGS(bo)
> > );
> >
> > -DECLARE_EVENT_CLASS(xe_engine,
> > - TP_PROTO(struct xe_engine *e),
> > - TP_ARGS(e),
> > +DECLARE_EVENT_CLASS(xe_exec_queue,
> > + TP_PROTO(struct xe_exec_queue *q),
> > + TP_ARGS(q),
> >
> > TP_STRUCT__entry(
> > __field(enum xe_engine_class, class)
> > @@ -119,13 +119,13 @@ DECLARE_EVENT_CLASS(xe_engine,
> > ),
> >
> > TP_fast_assign(
> > - __entry->class = e->class;
> > - __entry->logical_mask = e->logical_mask;
> > - __entry->gt_id = e->gt->info.id;
> > - __entry->width = e->width;
> > - __entry->guc_id = e->guc->id;
> > - __entry->guc_state = atomic_read(&e->guc->state);
> > - __entry->flags = e->flags;
> > + __entry->class = q->class;
> > + __entry->logical_mask = q->logical_mask;
> > + __entry->gt_id = q->gt->info.id;
> > + __entry->width = q->width;
> > + __entry->guc_id = q->guc->id;
> > + __entry->guc_state = atomic_read(&q->guc->state);
> > + __entry->flags = q->flags;
> > ),
> >
> > TP_printk("%d:0x%x, gt=%d, width=%d, guc_id=%d, guc_state=0x%x, flags=0x%x",
> > @@ -134,94 +134,94 @@ DECLARE_EVENT_CLASS(xe_engine,
> > __entry->guc_state, __entry->flags)
> > );
> >
> > -DEFINE_EVENT(xe_engine, xe_engine_create,
> > - TP_PROTO(struct xe_engine *e),
> > - TP_ARGS(e)
> > +DEFINE_EVENT(xe_exec_queue, xe_exec_queue_create,
> > + TP_PROTO(struct xe_exec_queue *q),
> > + TP_ARGS(q)
> > );
> >
> > -DEFINE_EVENT(xe_engine, xe_engine_supress_resume,
> > - TP_PROTO(struct xe_engine *e),
> > - TP_ARGS(e)
> > +DEFINE_EVENT(xe_exec_queue, xe_exec_queue_supress_resume,
> > + TP_PROTO(struct xe_exec_queue *q),
> > + TP_ARGS(q)
> > );
> >
> > -DEFINE_EVENT(xe_engine, xe_engine_submit,
> > - TP_PROTO(struct xe_engine *e),
> > - TP_ARGS(e)
> > +DEFINE_EVENT(xe_exec_queue, xe_exec_queue_submit,
> > + TP_PROTO(struct xe_exec_queue *q),
> > + TP_ARGS(q)
> > );
> >
> > -DEFINE_EVENT(xe_engine, xe_engine_scheduling_enable,
> > - TP_PROTO(struct xe_engine *e),
> > - TP_ARGS(e)
> > +DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_enable,
> > + TP_PROTO(struct xe_exec_queue *q),
> > + TP_ARGS(q)
> > );
> >
> > -DEFINE_EVENT(xe_engine, xe_engine_scheduling_disable,
> > - TP_PROTO(struct xe_engine *e),
> > - TP_ARGS(e)
> > +DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_disable,
> > + TP_PROTO(struct xe_exec_queue *q),
> > + TP_ARGS(q)
> > );
> >
> > -DEFINE_EVENT(xe_engine, xe_engine_scheduling_done,
> > - TP_PROTO(struct xe_engine *e),
> > - TP_ARGS(e)
> > +DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_done,
> > + TP_PROTO(struct xe_exec_queue *q),
> > + TP_ARGS(q)
> > );
> >
> > -DEFINE_EVENT(xe_engine, xe_engine_register,
> > - TP_PROTO(struct xe_engine *e),
> > - TP_ARGS(e)
> > +DEFINE_EVENT(xe_exec_queue, xe_exec_queue_register,
> > + TP_PROTO(struct xe_exec_queue *q),
> > + TP_ARGS(q)
> > );
> >
> > -DEFINE_EVENT(xe_engine, xe_engine_deregister,
> > - TP_PROTO(struct xe_engine *e),
> > - TP_ARGS(e)
> > +DEFINE_EVENT(xe_exec_queue, xe_exec_queue_deregister,
> > + TP_PROTO(struct xe_exec_queue *q),
> > + TP_ARGS(q)
> > );
> >
> > -DEFINE_EVENT(xe_engine, xe_engine_deregister_done,
> > - TP_PROTO(struct xe_engine *e),
> > - TP_ARGS(e)
> > +DEFINE_EVENT(xe_exec_queue, xe_exec_queue_deregister_done,
> > + TP_PROTO(struct xe_exec_queue *q),
> > + TP_ARGS(q)
> > );
> >
> > -DEFINE_EVENT(xe_engine, xe_engine_close,
> > - TP_PROTO(struct xe_engine *e),
> > - TP_ARGS(e)
> > +DEFINE_EVENT(xe_exec_queue, xe_exec_queue_close,
> > + TP_PROTO(struct xe_exec_queue *q),
> > + TP_ARGS(q)
> > );
> >
> > -DEFINE_EVENT(xe_engine, xe_engine_kill,
> > - TP_PROTO(struct xe_engine *e),
> > - TP_ARGS(e)
> > +DEFINE_EVENT(xe_exec_queue, xe_exec_queue_kill,
> > + TP_PROTO(struct xe_exec_queue *q),
> > + TP_ARGS(q)
> > );
> >
> > -DEFINE_EVENT(xe_engine, xe_engine_cleanup_entity,
> > - TP_PROTO(struct xe_engine *e),
> > - TP_ARGS(e)
> > +DEFINE_EVENT(xe_exec_queue, xe_exec_queue_cleanup_entity,
> > + TP_PROTO(struct xe_exec_queue *q),
> > + TP_ARGS(q)
> > );
> >
> > -DEFINE_EVENT(xe_engine, xe_engine_destroy,
> > - TP_PROTO(struct xe_engine *e),
> > - TP_ARGS(e)
> > +DEFINE_EVENT(xe_exec_queue, xe_exec_queue_destroy,
> > + TP_PROTO(struct xe_exec_queue *q),
> > + TP_ARGS(q)
> > );
> >
> > -DEFINE_EVENT(xe_engine, xe_engine_reset,
> > - TP_PROTO(struct xe_engine *e),
> > - TP_ARGS(e)
> > +DEFINE_EVENT(xe_exec_queue, xe_exec_queue_reset,
> > + TP_PROTO(struct xe_exec_queue *q),
> > + TP_ARGS(q)
> > );
> >
> > -DEFINE_EVENT(xe_engine, xe_engine_memory_cat_error,
> > - TP_PROTO(struct xe_engine *e),
> > - TP_ARGS(e)
> > +DEFINE_EVENT(xe_exec_queue, xe_exec_queue_memory_cat_error,
> > + TP_PROTO(struct xe_exec_queue *q),
> > + TP_ARGS(q)
> > );
> >
> > -DEFINE_EVENT(xe_engine, xe_engine_stop,
> > - TP_PROTO(struct xe_engine *e),
> > - TP_ARGS(e)
> > +DEFINE_EVENT(xe_exec_queue, xe_exec_queue_stop,
> > + TP_PROTO(struct xe_exec_queue *q),
> > + TP_ARGS(q)
> > );
> >
> > -DEFINE_EVENT(xe_engine, xe_engine_resubmit,
> > - TP_PROTO(struct xe_engine *e),
> > - TP_ARGS(e)
> > +DEFINE_EVENT(xe_exec_queue, xe_exec_queue_resubmit,
> > + TP_PROTO(struct xe_exec_queue *q),
> > + TP_ARGS(q)
> > );
> >
> > -DEFINE_EVENT(xe_engine, xe_engine_lr_cleanup,
> > - TP_PROTO(struct xe_engine *e),
> > - TP_ARGS(e)
> > +DEFINE_EVENT(xe_exec_queue, xe_exec_queue_lr_cleanup,
> > + TP_PROTO(struct xe_exec_queue *q),
> > + TP_ARGS(q)
> > );
> >
> > DECLARE_EVENT_CLASS(xe_sched_job,
> > @@ -240,10 +240,10 @@ DECLARE_EVENT_CLASS(xe_sched_job,
> >
> > TP_fast_assign(
> > __entry->seqno = xe_sched_job_seqno(job);
> > - __entry->guc_id = job->engine->guc->id;
> > + __entry->guc_id = job->q->guc->id;
> > __entry->guc_state =
> > - atomic_read(&job->engine->guc->state);
> > - __entry->flags = job->engine->flags;
> > + atomic_read(&job->q->guc->state);
> > + __entry->flags = job->q->flags;
> > __entry->error = job->fence->error;
> > __entry->fence = (unsigned long)job->fence;
> > __entry->batch_addr = (u64)job->batch_addr[0];
> > @@ -302,7 +302,7 @@ DECLARE_EVENT_CLASS(drm_sched_msg,
> > TP_fast_assign(
> > __entry->opcode = msg->opcode;
> > __entry->guc_id =
> > - ((struct xe_engine *)msg->private_data)->guc->id;
> > + ((struct xe_exec_queue *)msg->private_data)->guc->id;
> > ),
> >
> > TP_printk("guc_id=%d, opcode=%u", __entry->guc_id,
> > diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
> > index 44df7b9d801e..cb28dbc2bdbb 100644
> > --- a/drivers/gpu/drm/xe/xe_vm.c
> > +++ b/drivers/gpu/drm/xe/xe_vm.c
> > @@ -18,7 +18,7 @@
> >
> > #include "xe_bo.h"
> > #include "xe_device.h"
> > -#include "xe_engine.h"
> > +#include "xe_exec_queue.h"
> > #include "xe_gt.h"
> > #include "xe_gt_pagefault.h"
> > #include "xe_gt_tlb_invalidation.h"
> > @@ -165,15 +165,15 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma)
> >
> > static bool preempt_fences_waiting(struct xe_vm *vm)
> > {
> > - struct xe_engine *e;
> > + struct xe_exec_queue *q;
> >
> > lockdep_assert_held(&vm->lock);
> > xe_vm_assert_held(vm);
> >
> > - list_for_each_entry(e, &vm->preempt.engines, compute.link) {
> > - if (!e->compute.pfence || (e->compute.pfence &&
> > - test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
> > - &e->compute.pfence->flags))) {
> > + list_for_each_entry(q, &vm->preempt.engines, compute.link) {
> > + if (!q->compute.pfence ||
> > + (q->compute.pfence && test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
> > + &q->compute.pfence->flags))) {
> > return true;
> > }
> > }
> > @@ -212,18 +212,18 @@ static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
> >
> > static int wait_for_existing_preempt_fences(struct xe_vm *vm)
> > {
> > - struct xe_engine *e;
> > + struct xe_exec_queue *q;
> >
> > xe_vm_assert_held(vm);
> >
> > - list_for_each_entry(e, &vm->preempt.engines, compute.link) {
> > - if (e->compute.pfence) {
> > - long timeout = dma_fence_wait(e->compute.pfence, false);
> > + list_for_each_entry(q, &vm->preempt.engines, compute.link) {
> > + if (q->compute.pfence) {
> > + long timeout = dma_fence_wait(q->compute.pfence, false);
> >
> > if (timeout < 0)
> > return -ETIME;
> > - dma_fence_put(e->compute.pfence);
> > - e->compute.pfence = NULL;
> > + dma_fence_put(q->compute.pfence);
> > + q->compute.pfence = NULL;
> > }
> > }
> >
> > @@ -232,11 +232,11 @@ static int wait_for_existing_preempt_fences(struct xe_vm *vm)
> >
> > static bool xe_vm_is_idle(struct xe_vm *vm)
> > {
> > - struct xe_engine *e;
> > + struct xe_exec_queue *q;
> >
> > xe_vm_assert_held(vm);
> > - list_for_each_entry(e, &vm->preempt.engines, compute.link) {
> > - if (!xe_engine_is_idle(e))
> > + list_for_each_entry(q, &vm->preempt.engines, compute.link) {
> > + if (!xe_exec_queue_is_idle(q))
> > return false;
> > }
> >
> > @@ -246,25 +246,25 @@ static bool xe_vm_is_idle(struct xe_vm *vm)
> > static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
> > {
> > struct list_head *link;
> > - struct xe_engine *e;
> > + struct xe_exec_queue *q;
> >
> > - list_for_each_entry(e, &vm->preempt.engines, compute.link) {
> > + list_for_each_entry(q, &vm->preempt.engines, compute.link) {
> > struct dma_fence *fence;
> >
> > link = list->next;
> > XE_WARN_ON(link == list);
> >
> > fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
> > - e, e->compute.context,
> > - ++e->compute.seqno);
> > - dma_fence_put(e->compute.pfence);
> > - e->compute.pfence = fence;
> > + q, q->compute.context,
> > + ++q->compute.seqno);
> > + dma_fence_put(q->compute.pfence);
> > + q->compute.pfence = fence;
> > }
> > }
> >
> > static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
> > {
> > - struct xe_engine *e;
> > + struct xe_exec_queue *q;
> > struct ww_acquire_ctx ww;
> > int err;
> >
> > @@ -272,10 +272,10 @@ static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
> > if (err)
> > return err;
> >
> > - list_for_each_entry(e, &vm->preempt.engines, compute.link)
> > - if (e->compute.pfence) {
> > + list_for_each_entry(q, &vm->preempt.engines, compute.link)
> > + if (q->compute.pfence) {
> > dma_resv_add_fence(bo->ttm.base.resv,
> > - e->compute.pfence,
> > + q->compute.pfence,
> > DMA_RESV_USAGE_BOOKKEEP);
> > }
> >
> > @@ -304,22 +304,22 @@ void xe_vm_fence_all_extobjs(struct xe_vm *vm, struct dma_fence *fence,
> >
> > static void resume_and_reinstall_preempt_fences(struct xe_vm *vm)
> > {
> > - struct xe_engine *e;
> > + struct xe_exec_queue *q;
> >
> > lockdep_assert_held(&vm->lock);
> > xe_vm_assert_held(vm);
> >
> > - list_for_each_entry(e, &vm->preempt.engines, compute.link) {
> > - e->ops->resume(e);
> > + list_for_each_entry(q, &vm->preempt.engines, compute.link) {
> > + q->ops->resume(q);
> >
> > - dma_resv_add_fence(&vm->resv, e->compute.pfence,
> > + dma_resv_add_fence(&vm->resv, q->compute.pfence,
> > DMA_RESV_USAGE_BOOKKEEP);
> > - xe_vm_fence_all_extobjs(vm, e->compute.pfence,
> > + xe_vm_fence_all_extobjs(vm, q->compute.pfence,
> > DMA_RESV_USAGE_BOOKKEEP);
> > }
> > }
> >
> > -int xe_vm_add_compute_engine(struct xe_vm *vm, struct xe_engine *e)
> > +int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
> > {
> > struct ttm_validate_buffer tv_onstack[XE_ONSTACK_TV];
> > struct ttm_validate_buffer *tv;
> > @@ -337,16 +337,16 @@ int xe_vm_add_compute_engine(struct xe_vm *vm, struct xe_engine *e)
> > if (err)
> > goto out_unlock_outer;
> >
> > - pfence = xe_preempt_fence_create(e, e->compute.context,
> > - ++e->compute.seqno);
> > + pfence = xe_preempt_fence_create(q, q->compute.context,
> > + ++q->compute.seqno);
> > if (!pfence) {
> > err = -ENOMEM;
> > goto out_unlock;
> > }
> >
> > - list_add(&e->compute.link, &vm->preempt.engines);
> > + list_add(&q->compute.link, &vm->preempt.engines);
> > ++vm->preempt.num_engines;
> > - e->compute.pfence = pfence;
> > + q->compute.pfence = pfence;
> >
> > down_read(&vm->userptr.notifier_lock);
> >
> > @@ -518,7 +518,7 @@ void xe_vm_unlock_dma_resv(struct xe_vm *vm,
> > static void xe_vm_kill(struct xe_vm *vm)
> > {
> > struct ww_acquire_ctx ww;
> > - struct xe_engine *e;
> > + struct xe_exec_queue *q;
> >
> > lockdep_assert_held(&vm->lock);
> >
> > @@ -526,8 +526,8 @@ static void xe_vm_kill(struct xe_vm *vm)
> > vm->flags |= XE_VM_FLAG_BANNED;
> > trace_xe_vm_kill(vm);
> >
> > - list_for_each_entry(e, &vm->preempt.engines, compute.link)
> > - e->ops->kill(e);
> > + list_for_each_entry(q, &vm->preempt.engines, compute.link)
> > + q->ops->kill(q);
> > xe_vm_unlock(vm, &ww);
> >
> > /* TODO: Inform user the VM is banned */
> > @@ -833,7 +833,7 @@ int xe_vm_userptr_check_repin(struct xe_vm *vm)
> > }
> >
> > static struct dma_fence *
> > -xe_vm_bind_vma(struct xe_vma *vma, struct xe_engine *e,
> > +xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
> > struct xe_sync_entry *syncs, u32 num_syncs,
> > bool first_op, bool last_op);
> >
> > @@ -1297,21 +1297,21 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
> > for_each_tile(tile, xe, id) {
> > struct xe_gt *gt = tile->primary_gt;
> > struct xe_vm *migrate_vm;
> > - struct xe_engine *eng;
> > + struct xe_exec_queue *q;
> >
> > if (!vm->pt_root[id])
> > continue;
> >
> > migrate_vm = xe_migrate_get_vm(tile->migrate);
> > - eng = xe_engine_create_class(xe, gt, migrate_vm,
> > - XE_ENGINE_CLASS_COPY,
> > - ENGINE_FLAG_VM);
> > + q = xe_exec_queue_create_class(xe, gt, migrate_vm,
> > + XE_ENGINE_CLASS_COPY,
> > + EXEC_QUEUE_FLAG_VM);
> > xe_vm_put(migrate_vm);
> > - if (IS_ERR(eng)) {
> > + if (IS_ERR(q)) {
> > xe_vm_close_and_put(vm);
> > - return ERR_CAST(eng);
> > + return ERR_CAST(q);
> > }
> > - vm->eng[id] = eng;
> > + vm->q[id] = q;
> > number_tiles++;
> > }
> > }
> > @@ -1422,10 +1422,10 @@ void xe_vm_close_and_put(struct xe_vm *vm)
> > flush_work(&vm->preempt.rebind_work);
> >
> > for_each_tile(tile, xe, id) {
> > - if (vm->eng[id]) {
> > - xe_engine_kill(vm->eng[id]);
> > - xe_engine_put(vm->eng[id]);
> > - vm->eng[id] = NULL;
> > + if (vm->q[id]) {
> > + xe_exec_queue_kill(vm->q[id]);
> > + xe_exec_queue_put(vm->q[id]);
> > + vm->q[id] = NULL;
> > }
> > }
> >
> > @@ -1576,7 +1576,7 @@ u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
> > }
> >
> > static struct dma_fence *
> > -xe_vm_unbind_vma(struct xe_vma *vma, struct xe_engine *e,
> > +xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
> > struct xe_sync_entry *syncs, u32 num_syncs,
> > bool first_op, bool last_op)
> > {
> > @@ -1603,7 +1603,7 @@ xe_vm_unbind_vma(struct xe_vma *vma, struct xe_engine *e,
> > if (!(vma->tile_present & BIT(id)))
> > goto next;
> >
> > - fence = __xe_pt_unbind_vma(tile, vma, e, first_op ? syncs : NULL,
> > + fence = __xe_pt_unbind_vma(tile, vma, q, first_op ? syncs : NULL,
> > first_op ? num_syncs : 0);
> > if (IS_ERR(fence)) {
> > err = PTR_ERR(fence);
> > @@ -1614,8 +1614,8 @@ xe_vm_unbind_vma(struct xe_vma *vma, struct xe_engine *e,
> > fences[cur_fence++] = fence;
> >
> > next:
> > - if (e && vm->pt_root[id] && !list_empty(&e->multi_gt_list))
> > - e = list_next_entry(e, multi_gt_list);
> > + if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
> > + q = list_next_entry(q, multi_gt_list);
> > }
> >
> > if (fences) {
> > @@ -1651,7 +1651,7 @@ xe_vm_unbind_vma(struct xe_vma *vma, struct xe_engine *e,
> > }
> >
> > static struct dma_fence *
> > -xe_vm_bind_vma(struct xe_vma *vma, struct xe_engine *e,
> > +xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
> > struct xe_sync_entry *syncs, u32 num_syncs,
> > bool first_op, bool last_op)
> > {
> > @@ -1678,7 +1678,7 @@ xe_vm_bind_vma(struct xe_vma *vma, struct xe_engine *e,
> > if (!(vma->tile_mask & BIT(id)))
> > goto next;
> >
> > - fence = __xe_pt_bind_vma(tile, vma, e ? e : vm->eng[id],
> > + fence = __xe_pt_bind_vma(tile, vma, q ? q : vm->q[id],
> > first_op ? syncs : NULL,
> > first_op ? num_syncs : 0,
> > vma->tile_present & BIT(id));
> > @@ -1691,8 +1691,8 @@ xe_vm_bind_vma(struct xe_vma *vma, struct xe_engine *e,
> > fences[cur_fence++] = fence;
> >
> > next:
> > - if (e && vm->pt_root[id] && !list_empty(&e->multi_gt_list))
> > - e = list_next_entry(e, multi_gt_list);
> > + if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
> > + q = list_next_entry(q, multi_gt_list);
> > }
> >
> > if (fences) {
> > @@ -1808,7 +1808,7 @@ int xe_vm_async_fence_wait_start(struct dma_fence *fence)
> > }
> >
> > static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
> > - struct xe_engine *e, struct xe_sync_entry *syncs,
> > + struct xe_exec_queue *q, struct xe_sync_entry *syncs,
> > u32 num_syncs, struct async_op_fence *afence,
> > bool immediate, bool first_op, bool last_op)
> > {
> > @@ -1817,7 +1817,7 @@ static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
> > xe_vm_assert_held(vm);
> >
> > if (immediate) {
> > - fence = xe_vm_bind_vma(vma, e, syncs, num_syncs, first_op,
> > + fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, first_op,
> > last_op);
> > if (IS_ERR(fence))
> > return PTR_ERR(fence);
> > @@ -1839,7 +1839,7 @@ static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
> > return 0;
> > }
> >
> > -static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_engine *e,
> > +static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q,
> > struct xe_bo *bo, struct xe_sync_entry *syncs,
> > u32 num_syncs, struct async_op_fence *afence,
> > bool immediate, bool first_op, bool last_op)
> > @@ -1855,12 +1855,12 @@ static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_engine *e,
> > return err;
> > }
> >
> > - return __xe_vm_bind(vm, vma, e, syncs, num_syncs, afence, immediate,
> > + return __xe_vm_bind(vm, vma, q, syncs, num_syncs, afence, immediate,
> > first_op, last_op);
> > }
> >
> > static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
> > - struct xe_engine *e, struct xe_sync_entry *syncs,
> > + struct xe_exec_queue *q, struct xe_sync_entry *syncs,
> > u32 num_syncs, struct async_op_fence *afence,
> > bool first_op, bool last_op)
> > {
> > @@ -1869,7 +1869,7 @@ static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
> > xe_vm_assert_held(vm);
> > xe_bo_assert_held(xe_vma_bo(vma));
> >
> > - fence = xe_vm_unbind_vma(vma, e, syncs, num_syncs, first_op, last_op);
> > + fence = xe_vm_unbind_vma(vma, q, syncs, num_syncs, first_op, last_op);
> > if (IS_ERR(fence))
> > return PTR_ERR(fence);
> > if (afence)
> > @@ -2096,7 +2096,7 @@ static const u32 region_to_mem_type[] = {
> > };
> >
> > static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
> > - struct xe_engine *e, u32 region,
> > + struct xe_exec_queue *q, u32 region,
> > struct xe_sync_entry *syncs, u32 num_syncs,
> > struct async_op_fence *afence, bool first_op,
> > bool last_op)
> > @@ -2112,7 +2112,7 @@ static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
> > }
> >
> > if (vma->tile_mask != (vma->tile_present & ~vma->usm.tile_invalidated)) {
> > - return xe_vm_bind(vm, vma, e, xe_vma_bo(vma), syncs, num_syncs,
> > + return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
> > afence, true, first_op, last_op);
> > } else {
> > int i;
> > @@ -2410,7 +2410,7 @@ static u64 xe_vma_max_pte_size(struct xe_vma *vma)
> > * Parse operations list and create any resources needed for the operations
> > * prior to fully committing to the operations. This setup can fail.
> > */
> > -static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_engine *e,
> > +static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
> > struct drm_gpuva_ops **ops, int num_ops_list,
> > struct xe_sync_entry *syncs, u32 num_syncs,
> > struct list_head *ops_list, bool async)
> > @@ -2430,9 +2430,9 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_engine *e,
> > if (!fence)
> > return -ENOMEM;
> >
> > - seqno = e ? ++e->bind.fence_seqno : ++vm->async_ops.fence.seqno;
> > + seqno = q ? ++q->bind.fence_seqno : ++vm->async_ops.fence.seqno;
> > dma_fence_init(&fence->fence, &async_op_fence_ops,
> > - &vm->async_ops.lock, e ? e->bind.fence_ctx :
> > + &vm->async_ops.lock, q ? q->bind.fence_ctx :
> > vm->async_ops.fence.context, seqno);
> >
> > if (!xe_vm_no_dma_fences(vm)) {
> > @@ -2463,7 +2463,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_engine *e,
> > op->syncs = syncs;
> > }
> >
> > - op->engine = e;
> > + op->q = q;
> >
> > switch (op->base.op) {
> > case DRM_GPUVA_OP_MAP:
> > @@ -2673,7 +2673,7 @@ static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
> >
> > switch (op->base.op) {
> > case DRM_GPUVA_OP_MAP:
> > - err = xe_vm_bind(vm, vma, op->engine, xe_vma_bo(vma),
> > + err = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
> > op->syncs, op->num_syncs, op->fence,
> > op->map.immediate || !xe_vm_in_fault_mode(vm),
> > op->flags & XE_VMA_OP_FIRST,
> > @@ -2689,7 +2689,7 @@ static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
> > vm->async_ops.munmap_rebind_inflight = true;
> > vma->gpuva.flags |= XE_VMA_FIRST_REBIND;
> > }
> > - err = xe_vm_unbind(vm, vma, op->engine, op->syncs,
> > + err = xe_vm_unbind(vm, vma, op->q, op->syncs,
> > op->num_syncs,
> > !prev && !next ? op->fence : NULL,
> > op->flags & XE_VMA_OP_FIRST,
> > @@ -2702,7 +2702,7 @@ static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
> >
> > if (prev) {
> > op->remap.prev->gpuva.flags |= XE_VMA_LAST_REBIND;
> > - err = xe_vm_bind(vm, op->remap.prev, op->engine,
> > + err = xe_vm_bind(vm, op->remap.prev, op->q,
> > xe_vma_bo(op->remap.prev), op->syncs,
> > op->num_syncs,
> > !next ? op->fence : NULL, true, false,
> > @@ -2715,7 +2715,7 @@ static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
> >
> > if (next) {
> > op->remap.next->gpuva.flags |= XE_VMA_LAST_REBIND;
> > - err = xe_vm_bind(vm, op->remap.next, op->engine,
> > + err = xe_vm_bind(vm, op->remap.next, op->q,
> > xe_vma_bo(op->remap.next),
> > op->syncs, op->num_syncs,
> > op->fence, true, false,
> > @@ -2730,13 +2730,13 @@ static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
> > break;
> > }
> > case DRM_GPUVA_OP_UNMAP:
> > - err = xe_vm_unbind(vm, vma, op->engine, op->syncs,
> > + err = xe_vm_unbind(vm, vma, op->q, op->syncs,
> > op->num_syncs, op->fence,
> > op->flags & XE_VMA_OP_FIRST,
> > op->flags & XE_VMA_OP_LAST);
> > break;
> > case DRM_GPUVA_OP_PREFETCH:
> > - err = xe_vm_prefetch(vm, vma, op->engine, op->prefetch.region,
> > + err = xe_vm_prefetch(vm, vma, op->q, op->prefetch.region,
> > op->syncs, op->num_syncs, op->fence,
> > op->flags & XE_VMA_OP_FIRST,
> > op->flags & XE_VMA_OP_LAST);
> > @@ -2815,8 +2815,8 @@ static void xe_vma_op_cleanup(struct xe_vm *vm, struct xe_vma_op *op)
> > while (op->num_syncs--)
> > xe_sync_entry_cleanup(&op->syncs[op->num_syncs]);
> > kfree(op->syncs);
> > - if (op->engine)
> > - xe_engine_put(op->engine);
> > + if (op->q)
> > + xe_exec_queue_put(op->q);
> > if (op->fence)
> > dma_fence_put(&op->fence->fence);
> > }
> > @@ -3170,7 +3170,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
> > struct xe_bo **bos = NULL;
> > struct drm_gpuva_ops **ops = NULL;
> > struct xe_vm *vm;
> > - struct xe_engine *e = NULL;
> > + struct xe_exec_queue *q = NULL;
> > u32 num_syncs;
> > struct xe_sync_entry *syncs = NULL;
> > struct drm_xe_vm_bind_op *bind_ops;
> > @@ -3183,23 +3183,23 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
> > if (err)
> > return err;
> >
> > - if (args->engine_id) {
> > - e = xe_engine_lookup(xef, args->engine_id);
> > - if (XE_IOCTL_DBG(xe, !e)) {
> > + if (args->exec_queue_id) {
> > + q = xe_exec_queue_lookup(xef, args->exec_queue_id);
> > + if (XE_IOCTL_DBG(xe, !q)) {
> > err = -ENOENT;
> > goto free_objs;
> > }
> >
> > - if (XE_IOCTL_DBG(xe, !(e->flags & ENGINE_FLAG_VM))) {
> > + if (XE_IOCTL_DBG(xe, !(q->flags & EXEC_QUEUE_FLAG_VM))) {
> > err = -EINVAL;
> > - goto put_engine;
> > + goto put_exec_queue;
> > }
> > }
> >
> > vm = xe_vm_lookup(xef, args->vm_id);
> > if (XE_IOCTL_DBG(xe, !vm)) {
> > err = -EINVAL;
> > - goto put_engine;
> > + goto put_exec_queue;
> > }
> >
> > err = down_write_killable(&vm->lock);
> > @@ -3353,7 +3353,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
> > }
> > }
> >
> > - err = vm_bind_ioctl_ops_parse(vm, e, ops, args->num_binds,
> > + err = vm_bind_ioctl_ops_parse(vm, q, ops, args->num_binds,
> > syncs, num_syncs, &ops_list, async);
> > if (err)
> > goto unwind_ops;
> > @@ -3387,9 +3387,9 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
> > up_write(&vm->lock);
> > put_vm:
> > xe_vm_put(vm);
> > -put_engine:
> > - if (e)
> > - xe_engine_put(e);
> > +put_exec_queue:
> > + if (q)
> > + xe_exec_queue_put(q);
> > free_objs:
> > kfree(bos);
> > kfree(ops);
> > diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
> > index 4db777d7e375..6de6e3edb24a 100644
> > --- a/drivers/gpu/drm/xe/xe_vm.h
> > +++ b/drivers/gpu/drm/xe/xe_vm.h
> > @@ -18,7 +18,7 @@ struct drm_file;
> > struct ttm_buffer_object;
> > struct ttm_validate_buffer;
> >
> > -struct xe_engine;
> > +struct xe_exec_queue;
> > struct xe_file;
> > struct xe_sync_entry;
> >
> > @@ -167,7 +167,7 @@ static inline bool xe_vm_no_dma_fences(struct xe_vm *vm)
> > return xe_vm_in_compute_mode(vm) || xe_vm_in_fault_mode(vm);
> > }
> >
> > -int xe_vm_add_compute_engine(struct xe_vm *vm, struct xe_engine *e);
> > +int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
> >
> > int xe_vm_userptr_pin(struct xe_vm *vm);
> >
> > diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
> > index 261a4b0e8570..3681a5ff588b 100644
> > --- a/drivers/gpu/drm/xe/xe_vm_types.h
> > +++ b/drivers/gpu/drm/xe/xe_vm_types.h
> > @@ -142,8 +142,8 @@ struct xe_vm {
> >
> > struct kref refcount;
> >
> > - /* engine used for (un)binding vma's */
> > - struct xe_engine *eng[XE_MAX_TILES_PER_DEVICE];
> > + /* exec queue used for (un)binding vma's */
> > + struct xe_exec_queue *q[XE_MAX_TILES_PER_DEVICE];
> >
> > /** Protects @rebind_list and the page-table structures */
> > struct dma_resv resv;
> > @@ -394,7 +394,7 @@ struct xe_vma_op {
> > */
> > struct drm_gpuva_ops *ops;
> > /** @engine: engine for this operation */
> > - struct xe_engine *engine;
> > + struct xe_exec_queue *q;
> > /**
> > * @syncs: syncs for this operation, only used on first and last
> > * operation
> > diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
> > index 3d09e9e9267b..86f16d50e9cc 100644
> > --- a/include/uapi/drm/xe_drm.h
> > +++ b/include/uapi/drm/xe_drm.h
> > @@ -103,14 +103,14 @@ struct xe_user_extension {
> > #define DRM_XE_VM_CREATE 0x03
> > #define DRM_XE_VM_DESTROY 0x04
> > #define DRM_XE_VM_BIND 0x05
> > -#define DRM_XE_ENGINE_CREATE 0x06
> > -#define DRM_XE_ENGINE_DESTROY 0x07
> > +#define DRM_XE_EXEC_QUEUE_CREATE 0x06
> > +#define DRM_XE_EXEC_QUEUE_DESTROY 0x07
> > #define DRM_XE_EXEC 0x08
> > #define DRM_XE_MMIO 0x09
> > -#define DRM_XE_ENGINE_SET_PROPERTY 0x0a
> > +#define DRM_XE_EXEC_QUEUE_SET_PROPERTY 0x0a
> > #define DRM_XE_WAIT_USER_FENCE 0x0b
> > #define DRM_XE_VM_MADVISE 0x0c
> > -#define DRM_XE_ENGINE_GET_PROPERTY 0x0d
> > +#define DRM_XE_EXEC_QUEUE_GET_PROPERTY 0x0d
> >
> > /* Must be kept compact -- no holes */
> > #define DRM_IOCTL_XE_DEVICE_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_DEVICE_QUERY, struct drm_xe_device_query)
> > @@ -119,12 +119,12 @@ struct xe_user_extension {
> > #define DRM_IOCTL_XE_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_CREATE, struct drm_xe_vm_create)
> > #define DRM_IOCTL_XE_VM_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_DESTROY, struct drm_xe_vm_destroy)
> > #define DRM_IOCTL_XE_VM_BIND DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_BIND, struct drm_xe_vm_bind)
> > -#define DRM_IOCTL_XE_ENGINE_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_ENGINE_CREATE, struct drm_xe_engine_create)
> > -#define DRM_IOCTL_XE_ENGINE_GET_PROPERTY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_ENGINE_GET_PROPERTY, struct drm_xe_engine_get_property)
> > -#define DRM_IOCTL_XE_ENGINE_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_ENGINE_DESTROY, struct drm_xe_engine_destroy)
> > +#define DRM_IOCTL_XE_EXEC_QUEUE_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_CREATE, struct drm_xe_exec_queue_create)
> > +#define DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_GET_PROPERTY, struct drm_xe_exec_queue_get_property)
> > +#define DRM_IOCTL_XE_EXEC_QUEUE_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_DESTROY, struct drm_xe_exec_queue_destroy)
> > #define DRM_IOCTL_XE_EXEC DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec)
> > #define DRM_IOCTL_XE_MMIO DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_MMIO, struct drm_xe_mmio)
> > -#define DRM_IOCTL_XE_ENGINE_SET_PROPERTY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_ENGINE_SET_PROPERTY, struct drm_xe_engine_set_property)
> > +#define DRM_IOCTL_XE_EXEC_QUEUE_SET_PROPERTY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_SET_PROPERTY, struct drm_xe_exec_queue_set_property)
> > #define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
> > #define DRM_IOCTL_XE_VM_MADVISE DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_MADVISE, struct drm_xe_vm_madvise)
> >
> > @@ -649,11 +649,11 @@ struct drm_xe_vm_bind {
> > __u32 vm_id;
> >
> > /**
> > - * @engine_id: engine_id, must be of class DRM_XE_ENGINE_CLASS_VM_BIND
> > - * and engine must have same vm_id. If zero, the default VM bind engine
> > + * @exec_queue_id: exec_queue_id, must be of class DRM_XE_ENGINE_CLASS_VM_BIND
> > + * and exec queue must have same vm_id. If zero, the default VM bind engine
> > * is used.
> > */
> > - __u32 engine_id;
> > + __u32 exec_queue_id;
> >
> > /** @num_binds: number of binds in this IOCTL */
> > __u32 num_binds;
> > @@ -685,8 +685,8 @@ struct drm_xe_vm_bind {
> > __u64 reserved[2];
> > };
> >
> > -/** struct drm_xe_ext_engine_set_property - engine set property extension */
> > -struct drm_xe_ext_engine_set_property {
> > +/** struct drm_xe_ext_exec_queue_set_property - exec queue set property extension */
> > +struct drm_xe_ext_exec_queue_set_property {
> > /** @base: base user extension */
> > struct xe_user_extension base;
> >
> > @@ -701,32 +701,32 @@ struct drm_xe_ext_engine_set_property {
> > };
> >
> > /**
> > - * struct drm_xe_engine_set_property - engine set property
> > + * struct drm_xe_exec_queue_set_property - exec queue set property
> > *
> > - * Same namespace for extensions as drm_xe_engine_create
> > + * Same namespace for extensions as drm_xe_exec_queue_create
> > */
> > -struct drm_xe_engine_set_property {
> > +struct drm_xe_exec_queue_set_property {
> > /** @extensions: Pointer to the first extension struct, if any */
> > __u64 extensions;
> >
> > - /** @engine_id: Engine ID */
> > - __u32 engine_id;
> > + /** @exec_queue_id: Exec queue ID */
> > + __u32 exec_queue_id;
> >
> > -#define XE_ENGINE_SET_PROPERTY_PRIORITY 0
> > -#define XE_ENGINE_SET_PROPERTY_TIMESLICE 1
> > -#define XE_ENGINE_SET_PROPERTY_PREEMPTION_TIMEOUT 2
> > +#define XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0
> > +#define XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1
> > +#define XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT 2
> > /*
> > * Long running or ULLS engine mode. DMA fences not allowed in this
> > * mode. Must match the value of DRM_XE_VM_CREATE_COMPUTE_MODE, serves
> > * as a sanity check the UMD knows what it is doing. Can only be set at
> > * engine create time.
> > */
> > -#define XE_ENGINE_SET_PROPERTY_COMPUTE_MODE 3
> > -#define XE_ENGINE_SET_PROPERTY_PERSISTENCE 4
> > -#define XE_ENGINE_SET_PROPERTY_JOB_TIMEOUT 5
> > -#define XE_ENGINE_SET_PROPERTY_ACC_TRIGGER 6
> > -#define XE_ENGINE_SET_PROPERTY_ACC_NOTIFY 7
> > -#define XE_ENGINE_SET_PROPERTY_ACC_GRANULARITY 8
> > +#define XE_EXEC_QUEUE_SET_PROPERTY_COMPUTE_MODE 3
> > +#define XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE 4
> > +#define XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT 5
> > +#define XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER 6
> > +#define XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY 7
> > +#define XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY 8
> > /** @property: property to set */
> > __u32 property;
> >
> > @@ -755,25 +755,25 @@ struct drm_xe_engine_class_instance {
> > __u16 gt_id;
> > };
> >
> > -struct drm_xe_engine_create {
> > -#define XE_ENGINE_EXTENSION_SET_PROPERTY 0
> > +struct drm_xe_exec_queue_create {
> > +#define XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0
> > /** @extensions: Pointer to the first extension struct, if any */
> > __u64 extensions;
> >
> > - /** @width: submission width (number BB per exec) for this engine */
> > + /** @width: submission width (number BB per exec) for this exec queue */
> > __u16 width;
> >
> > - /** @num_placements: number of valid placements for this engine */
> > + /** @num_placements: number of valid placements for this exec queue */
> > __u16 num_placements;
> >
> > - /** @vm_id: VM to use for this engine */
> > + /** @vm_id: VM to use for this exec queue */
> > __u32 vm_id;
> >
> > /** @flags: MBZ */
> > __u32 flags;
> >
> > - /** @engine_id: Returned engine ID */
> > - __u32 engine_id;
> > + /** @exec_queue_id: Returned exec queue ID */
> > + __u32 exec_queue_id;
> >
> > /**
> > * @instances: user pointer to a 2-d array of struct
> > @@ -788,14 +788,14 @@ struct drm_xe_engine_create {
> > __u64 reserved[2];
> > };
> >
> > -struct drm_xe_engine_get_property {
> > +struct drm_xe_exec_queue_get_property {
> > /** @extensions: Pointer to the first extension struct, if any */
> > __u64 extensions;
> >
> > - /** @engine_id: Engine ID */
> > - __u32 engine_id;
> > + /** @exec_queue_id: Exec queue ID */
> > + __u32 exec_queue_id;
> >
> > -#define XE_ENGINE_GET_PROPERTY_BAN 0
> > +#define XE_EXEC_QUEUE_GET_PROPERTY_BAN 0
> > /** @property: property to get */
> > __u32 property;
> >
> > @@ -806,9 +806,9 @@ struct drm_xe_engine_get_property {
> > __u64 reserved[2];
> > };
> >
> > -struct drm_xe_engine_destroy {
> > - /** @engine_id: Engine ID */
> > - __u32 engine_id;
> > +struct drm_xe_exec_queue_destroy {
> > + /** @exec_queue_id: Exec queue ID */
> > + __u32 exec_queue_id;
> >
> > /** @pad: MBZ */
> > __u32 pad;
> > @@ -855,8 +855,8 @@ struct drm_xe_exec {
> > /** @extensions: Pointer to the first extension struct, if any */
> > __u64 extensions;
> >
> > - /** @engine_id: Engine ID for the batch buffer */
> > - __u32 engine_id;
> > + /** @exec_queue_id: Exec queue ID for the batch buffer */
> > + __u32 exec_queue_id;
> >
> > /** @num_syncs: Amount of struct drm_xe_sync in array. */
> > __u32 num_syncs;
> > --
> > 2.34.1
> >
More information about the Intel-xe
mailing list