[Intel-xe] [PATCH 3/3] drm/xe: Kill execlist support.
Francois Dugast
francois.dugast at intel.com
Mon Jul 24 08:02:24 UTC 2023
On Fri, Jul 21, 2023 at 08:40:01PM +0000, Souza, Jose wrote:
> On Fri, 2023-07-21 at 16:19 -0400, Rodrigo Vivi wrote:
> > This submission backend is incomplete and unsupported.
> > It was left there only for some initial bring-up for comparison
> > and experiments. But most of Xe driver and its features rely
> > entirely on the GuC submission.
> >
> > If this code is not entirely broken yet, it is likely just a matter
> > of time.
>
> My 2 cents here, I think it is worthy to keep it even broken to make sure Xe have a backend layered architecture and don't end up like i915 +
> execlist.
> Also for new platforms bring-up this might come handy when fixed(if the necessity comes someone will put the needed hours to make it functional
> again)...
>
My 2 cents: I agree execlist has potential to be useful but only if it is fixed
and maintained in the long run, and (quoting the cover letter) "untested,
unvalidated, un-ci, unsupported" are all addressed.
Otherwise I think it is better to remove it and to keep it somewhere else as a
patch if needed in the future, because it is odd to keep code we know not to be
functional.
Francois
>
> >
> > Signed-off-by: Rodrigo Vivi <rodrigo.vivi at intel.com>
> > ---
> > drivers/gpu/drm/xe/Makefile | 1 -
> > drivers/gpu/drm/xe/xe_debugfs.c | 1 -
> > drivers/gpu/drm/xe/xe_device.c | 1 -
> > drivers/gpu/drm/xe/xe_device.h | 5 -
> > drivers/gpu/drm/xe/xe_device_types.h | 2 -
> > drivers/gpu/drm/xe/xe_engine.c | 5 -
> > drivers/gpu/drm/xe/xe_engine_types.h | 9 +-
> > drivers/gpu/drm/xe/xe_execlist.c | 485 ------------------------
> > drivers/gpu/drm/xe/xe_execlist.h | 21 -
> > drivers/gpu/drm/xe/xe_execlist_types.h | 49 ---
> > drivers/gpu/drm/xe/xe_ggtt.c | 2 +-
> > drivers/gpu/drm/xe/xe_gt.c | 12 -
> > drivers/gpu/drm/xe/xe_guc_pc.c | 2 -
> > drivers/gpu/drm/xe/xe_guc_submit.c | 5 -
> > drivers/gpu/drm/xe/xe_hw_engine.c | 16 +-
> > drivers/gpu/drm/xe/xe_hw_engine_types.h | 2 -
> > drivers/gpu/drm/xe/xe_irq.c | 11 +-
> > drivers/gpu/drm/xe/xe_module.c | 4 -
> > drivers/gpu/drm/xe/xe_module.h | 1 -
> > drivers/gpu/drm/xe/xe_uc.c | 32 --
> > 20 files changed, 6 insertions(+), 660 deletions(-)
> > delete mode 100644 drivers/gpu/drm/xe/xe_execlist.c
> > delete mode 100644 drivers/gpu/drm/xe/xe_execlist.h
> > delete mode 100644 drivers/gpu/drm/xe/xe_execlist_types.h
> >
> > diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
> > index 4ea9e3150c20..06d806c8a68d 100644
> > --- a/drivers/gpu/drm/xe/Makefile
> > +++ b/drivers/gpu/drm/xe/Makefile
> > @@ -53,7 +53,6 @@ xe-y += xe_bb.o \
> > xe_dma_buf.o \
> > xe_engine.o \
> > xe_exec.o \
> > - xe_execlist.o \
> > xe_force_wake.o \
> > xe_ggtt.o \
> > xe_gt.o \
> > diff --git a/drivers/gpu/drm/xe/xe_debugfs.c b/drivers/gpu/drm/xe/xe_debugfs.c
> > index 491506a1e12e..313b666c1c6e 100644
> > --- a/drivers/gpu/drm/xe/xe_debugfs.c
> > +++ b/drivers/gpu/drm/xe/xe_debugfs.c
> > @@ -47,7 +47,6 @@ static int info(struct seq_file *m, void *data)
> > drm_printf(&p, "revid %d\n", xe->info.revid);
> > drm_printf(&p, "tile_count %d\n", xe->info.tile_count);
> > drm_printf(&p, "vm_max_level %d\n", xe->info.vm_max_level);
> > - drm_printf(&p, "force_execlist %s\n", str_yes_no(xe->info.force_execlist));
> > drm_printf(&p, "supports_usm %s\n", str_yes_no(xe->info.supports_usm));
> > drm_printf(&p, "has_flat_ccs %s\n", str_yes_no(xe->info.has_flat_ccs));
> > for_each_gt(gt, xe, id) {
> > diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
> > index 7221806c1693..7f7198a3dc6d 100644
> > --- a/drivers/gpu/drm/xe/xe_device.c
> > +++ b/drivers/gpu/drm/xe/xe_device.c
> > @@ -203,7 +203,6 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
> >
> > xe->info.devid = pdev->device;
> > xe->info.revid = pdev->revision;
> > - xe->info.force_execlist = force_execlist;
> >
> > spin_lock_init(&xe->irq.lock);
> >
> > diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h
> > index 0ee44856ad08..327453781f91 100644
> > --- a/drivers/gpu/drm/xe/xe_device.h
> > +++ b/drivers/gpu/drm/xe/xe_device.h
> > @@ -110,11 +110,6 @@ static inline struct xe_gt *xe_root_mmio_gt(struct xe_device *xe)
> > return xe_device_get_root_tile(xe)->primary_gt;
> > }
> >
> > -static inline bool xe_device_guc_submission_enabled(struct xe_device *xe)
> > -{
> > - return !xe->info.force_execlist;
> > -}
> > -
> > #define for_each_tile(tile__, xe__, id__) \
> > for ((id__) = 0; (id__) < (xe__)->info.tile_count; (id__)++) \
> > for_each_if((tile__) = &(xe__)->tiles[(id__)])
> > diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
> > index b156f69d7320..0432ef995a03 100644
> > --- a/drivers/gpu/drm/xe/xe_device_types.h
> > +++ b/drivers/gpu/drm/xe/xe_device_types.h
> > @@ -217,8 +217,6 @@ struct xe_device {
> > u8 supports_usm:1;
> > /** @has_asid: Has address space ID */
> > u8 has_asid:1;
> > - /** @force_execlist: Forced execlist submission */
> > - u8 force_execlist:1;
> > /** @has_flat_ccs: Whether flat CCS metadata is used */
> > u8 has_flat_ccs:1;
> > /** @has_4tile: Whether tile-4 tiling is supported */
> > diff --git a/drivers/gpu/drm/xe/xe_engine.c b/drivers/gpu/drm/xe/xe_engine.c
> > index 59e0a9e085ba..b0218b09ded7 100644
> > --- a/drivers/gpu/drm/xe/xe_engine.c
> > +++ b/drivers/gpu/drm/xe/xe_engine.c
> > @@ -460,16 +460,11 @@ static u32 calc_validate_logical_mask(struct xe_device *xe, struct xe_gt *gt,
> > struct drm_xe_engine_class_instance *eci,
> > u16 width, u16 num_placements)
> > {
> > - int len = width * num_placements;
> > int i, j, n;
> > u16 class;
> > u16 gt_id;
> > u32 return_mask = 0, prev_mask;
> >
> > - if (XE_IOCTL_DBG(xe, !xe_device_guc_submission_enabled(xe) &&
> > - len > 1))
> > - return 0;
> > -
> > for (i = 0; i < width; ++i) {
> > u32 current_mask = 0;
> >
> > diff --git a/drivers/gpu/drm/xe/xe_engine_types.h b/drivers/gpu/drm/xe/xe_engine_types.h
> > index 36bfaeec23f4..24cabeb4021b 100644
> > --- a/drivers/gpu/drm/xe/xe_engine_types.h
> > +++ b/drivers/gpu/drm/xe/xe_engine_types.h
> > @@ -14,7 +14,6 @@
> > #include "xe_hw_fence_types.h"
> > #include "xe_lrc_types.h"
> >
> > -struct xe_execlist_engine;
> > struct xe_gt;
> > struct xe_guc_engine;
> > struct xe_hw_engine;
> > @@ -73,12 +72,8 @@ struct xe_engine {
> > struct list_head multi_gt_link;
> > };
> >
> > - union {
> > - /** @execlist: execlist backend specific state for engine */
> > - struct xe_execlist_engine *execlist;
> > - /** @guc: GuC backend specific state for engine */
> > - struct xe_guc_engine *guc;
> > - };
> > + /** @guc: GuC backend specific state for engine */
> > + struct xe_guc_engine *guc;
> >
> > /**
> > * @persistent: persistent engine state
> > diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
> > deleted file mode 100644
> > index 541f82387bb5..000000000000
> > --- a/drivers/gpu/drm/xe/xe_execlist.c
> > +++ /dev/null
> > @@ -1,485 +0,0 @@
> > -// SPDX-License-Identifier: MIT
> > -/*
> > - * Copyright © 2021 Intel Corporation
> > - */
> > -
> > -#include "xe_execlist.h"
> > -
> > -#include <drm/drm_managed.h>
> > -
> > -#include "regs/xe_engine_regs.h"
> > -#include "regs/xe_gpu_commands.h"
> > -#include "regs/xe_gt_regs.h"
> > -#include "regs/xe_lrc_layout.h"
> > -#include "regs/xe_regs.h"
> > -#include "xe_bo.h"
> > -#include "xe_device.h"
> > -#include "xe_engine.h"
> > -#include "xe_gt.h"
> > -#include "xe_hw_fence.h"
> > -#include "xe_lrc.h"
> > -#include "xe_macros.h"
> > -#include "xe_mmio.h"
> > -#include "xe_mocs.h"
> > -#include "xe_ring_ops_types.h"
> > -#include "xe_sched_job.h"
> > -
> > -#define XE_EXECLIST_HANG_LIMIT 1
> > -
> > -#define GEN11_SW_CTX_ID_SHIFT 37
> > -#define GEN11_SW_CTX_ID_WIDTH 11
> > -#define XEHP_SW_CTX_ID_SHIFT 39
> > -#define XEHP_SW_CTX_ID_WIDTH 16
> > -
> > -#define GEN11_SW_CTX_ID \
> > - GENMASK_ULL(GEN11_SW_CTX_ID_WIDTH + GEN11_SW_CTX_ID_SHIFT - 1, \
> > - GEN11_SW_CTX_ID_SHIFT)
> > -
> > -#define XEHP_SW_CTX_ID \
> > - GENMASK_ULL(XEHP_SW_CTX_ID_WIDTH + XEHP_SW_CTX_ID_SHIFT - 1, \
> > - XEHP_SW_CTX_ID_SHIFT)
> > -
> > -
> > -static void __start_lrc(struct xe_hw_engine *hwe, struct xe_lrc *lrc,
> > - u32 ctx_id)
> > -{
> > - struct xe_gt *gt = hwe->gt;
> > - struct xe_device *xe = gt_to_xe(gt);
> > - u64 lrc_desc;
> > -
> > - lrc_desc = xe_lrc_descriptor(lrc);
> > -
> > - if (GRAPHICS_VERx100(xe) >= 1250) {
> > - XE_BUG_ON(!FIELD_FIT(XEHP_SW_CTX_ID, ctx_id));
> > - lrc_desc |= FIELD_PREP(XEHP_SW_CTX_ID, ctx_id);
> > - } else {
> > - XE_BUG_ON(!FIELD_FIT(GEN11_SW_CTX_ID, ctx_id));
> > - lrc_desc |= FIELD_PREP(GEN11_SW_CTX_ID, ctx_id);
> > - }
> > -
> > - if (hwe->class == XE_ENGINE_CLASS_COMPUTE)
> > - xe_mmio_write32(hwe->gt, RCU_MODE,
> > - _MASKED_BIT_ENABLE(RCU_MODE_CCS_ENABLE));
> > -
> > - xe_lrc_write_ctx_reg(lrc, CTX_RING_TAIL, lrc->ring.tail);
> > - lrc->ring.old_tail = lrc->ring.tail;
> > -
> > - /*
> > - * Make sure the context image is complete before we submit it to HW.
> > - *
> > - * Ostensibly, writes (including the WCB) should be flushed prior to
> > - * an uncached write such as our mmio register access, the empirical
> > - * evidence (esp. on Braswell) suggests that the WC write into memory
> > - * may not be visible to the HW prior to the completion of the UC
> > - * register write and that we may begin execution from the context
> > - * before its image is complete leading to invalid PD chasing.
> > - */
> > - wmb();
> > -
> > - xe_mmio_write32(gt, RING_HWS_PGA(hwe->mmio_base),
> > - xe_bo_ggtt_addr(hwe->hwsp));
> > - xe_mmio_read32(gt, RING_HWS_PGA(hwe->mmio_base));
> > - xe_mmio_write32(gt, RING_MODE(hwe->mmio_base),
> > - _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE));
> > -
> > - xe_mmio_write32(gt, RING_EXECLIST_SQ_CONTENTS_LO(hwe->mmio_base),
> > - lower_32_bits(lrc_desc));
> > - xe_mmio_write32(gt, RING_EXECLIST_SQ_CONTENTS_HI(hwe->mmio_base),
> > - upper_32_bits(lrc_desc));
> > - xe_mmio_write32(gt, RING_EXECLIST_CONTROL(hwe->mmio_base),
> > - EL_CTRL_LOAD);
> > -}
> > -
> > -static void __xe_execlist_port_start(struct xe_execlist_port *port,
> > - struct xe_execlist_engine *exl)
> > -{
> > - struct xe_device *xe = gt_to_xe(port->hwe->gt);
> > - int max_ctx = FIELD_MAX(GEN11_SW_CTX_ID);
> > -
> > - if (GRAPHICS_VERx100(xe) >= 1250)
> > - max_ctx = FIELD_MAX(XEHP_SW_CTX_ID);
> > -
> > - xe_execlist_port_assert_held(port);
> > -
> > - if (port->running_exl != exl || !exl->has_run) {
> > - port->last_ctx_id++;
> > -
> > - /* 0 is reserved for the kernel context */
> > - if (port->last_ctx_id > max_ctx)
> > - port->last_ctx_id = 1;
> > - }
> > -
> > - __start_lrc(port->hwe, exl->engine->lrc, port->last_ctx_id);
> > - port->running_exl = exl;
> > - exl->has_run = true;
> > -}
> > -
> > -static void __xe_execlist_port_idle(struct xe_execlist_port *port)
> > -{
> > - u32 noop[2] = { MI_NOOP, MI_NOOP };
> > -
> > - xe_execlist_port_assert_held(port);
> > -
> > - if (!port->running_exl)
> > - return;
> > -
> > - xe_lrc_write_ring(&port->hwe->kernel_lrc, noop, sizeof(noop));
> > - __start_lrc(port->hwe, &port->hwe->kernel_lrc, 0);
> > - port->running_exl = NULL;
> > -}
> > -
> > -static bool xe_execlist_is_idle(struct xe_execlist_engine *exl)
> > -{
> > - struct xe_lrc *lrc = exl->engine->lrc;
> > -
> > - return lrc->ring.tail == lrc->ring.old_tail;
> > -}
> > -
> > -static void __xe_execlist_port_start_next_active(struct xe_execlist_port *port)
> > -{
> > - struct xe_execlist_engine *exl = NULL;
> > - int i;
> > -
> > - xe_execlist_port_assert_held(port);
> > -
> > - for (i = ARRAY_SIZE(port->active) - 1; i >= 0; i--) {
> > - while (!list_empty(&port->active[i])) {
> > - exl = list_first_entry(&port->active[i],
> > - struct xe_execlist_engine,
> > - active_link);
> > - list_del(&exl->active_link);
> > -
> > - if (xe_execlist_is_idle(exl)) {
> > - exl->active_priority = DRM_SCHED_PRIORITY_UNSET;
> > - continue;
> > - }
> > -
> > - list_add_tail(&exl->active_link, &port->active[i]);
> > - __xe_execlist_port_start(port, exl);
> > - return;
> > - }
> > - }
> > -
> > - __xe_execlist_port_idle(port);
> > -}
> > -
> > -static u64 read_execlist_status(struct xe_hw_engine *hwe)
> > -{
> > - struct xe_gt *gt = hwe->gt;
> > - u32 hi, lo;
> > -
> > - lo = xe_mmio_read32(gt, RING_EXECLIST_STATUS_LO(hwe->mmio_base));
> > - hi = xe_mmio_read32(gt, RING_EXECLIST_STATUS_HI(hwe->mmio_base));
> > -
> > - return lo | (u64)hi << 32;
> > -}
> > -
> > -static void xe_execlist_port_irq_handler_locked(struct xe_execlist_port *port)
> > -{
> > - u64 status;
> > -
> > - xe_execlist_port_assert_held(port);
> > -
> > - status = read_execlist_status(port->hwe);
> > - if (status & BIT(7))
> > - return;
> > -
> > - __xe_execlist_port_start_next_active(port);
> > -}
> > -
> > -static void xe_execlist_port_irq_handler(struct xe_hw_engine *hwe,
> > - u16 intr_vec)
> > -{
> > - struct xe_execlist_port *port = hwe->exl_port;
> > -
> > - spin_lock(&port->lock);
> > - xe_execlist_port_irq_handler_locked(port);
> > - spin_unlock(&port->lock);
> > -}
> > -
> > -static void xe_execlist_port_wake_locked(struct xe_execlist_port *port,
> > - enum drm_sched_priority priority)
> > -{
> > - xe_execlist_port_assert_held(port);
> > -
> > - if (port->running_exl && port->running_exl->active_priority >= priority)
> > - return;
> > -
> > - __xe_execlist_port_start_next_active(port);
> > -}
> > -
> > -static void xe_execlist_make_active(struct xe_execlist_engine *exl)
> > -{
> > - struct xe_execlist_port *port = exl->port;
> > - enum drm_sched_priority priority = exl->entity.priority;
> > -
> > - XE_BUG_ON(priority == DRM_SCHED_PRIORITY_UNSET);
> > - XE_BUG_ON(priority < 0);
> > - XE_BUG_ON(priority >= ARRAY_SIZE(exl->port->active));
> > -
> > - spin_lock_irq(&port->lock);
> > -
> > - if (exl->active_priority != priority &&
> > - exl->active_priority != DRM_SCHED_PRIORITY_UNSET) {
> > - /* Priority changed, move it to the right list */
> > - list_del(&exl->active_link);
> > - exl->active_priority = DRM_SCHED_PRIORITY_UNSET;
> > - }
> > -
> > - if (exl->active_priority == DRM_SCHED_PRIORITY_UNSET) {
> > - exl->active_priority = priority;
> > - list_add_tail(&exl->active_link, &port->active[priority]);
> > - }
> > -
> > - xe_execlist_port_wake_locked(exl->port, priority);
> > -
> > - spin_unlock_irq(&port->lock);
> > -}
> > -
> > -static void xe_execlist_port_irq_fail_timer(struct timer_list *timer)
> > -{
> > - struct xe_execlist_port *port =
> > - container_of(timer, struct xe_execlist_port, irq_fail);
> > -
> > - spin_lock_irq(&port->lock);
> > - xe_execlist_port_irq_handler_locked(port);
> > - spin_unlock_irq(&port->lock);
> > -
> > - port->irq_fail.expires = jiffies + msecs_to_jiffies(1000);
> > - add_timer(&port->irq_fail);
> > -}
> > -
> > -struct xe_execlist_port *xe_execlist_port_create(struct xe_device *xe,
> > - struct xe_hw_engine *hwe)
> > -{
> > - struct drm_device *drm = &xe->drm;
> > - struct xe_execlist_port *port;
> > - int i;
> > -
> > - port = drmm_kzalloc(drm, sizeof(*port), GFP_KERNEL);
> > - if (!port)
> > - return ERR_PTR(-ENOMEM);
> > -
> > - port->hwe = hwe;
> > -
> > - spin_lock_init(&port->lock);
> > - for (i = 0; i < ARRAY_SIZE(port->active); i++)
> > - INIT_LIST_HEAD(&port->active[i]);
> > -
> > - port->last_ctx_id = 1;
> > - port->running_exl = NULL;
> > -
> > - hwe->irq_handler = xe_execlist_port_irq_handler;
> > -
> > - /* TODO: Fix the interrupt code so it doesn't race like mad */
> > - timer_setup(&port->irq_fail, xe_execlist_port_irq_fail_timer, 0);
> > - port->irq_fail.expires = jiffies + msecs_to_jiffies(1000);
> > - add_timer(&port->irq_fail);
> > -
> > - return port;
> > -}
> > -
> > -void xe_execlist_port_destroy(struct xe_execlist_port *port)
> > -{
> > - del_timer(&port->irq_fail);
> > -
> > - /* Prevent an interrupt while we're destroying */
> > - spin_lock_irq(>_to_xe(port->hwe->gt)->irq.lock);
> > - port->hwe->irq_handler = NULL;
> > - spin_unlock_irq(>_to_xe(port->hwe->gt)->irq.lock);
> > -}
> > -
> > -static struct dma_fence *
> > -execlist_run_job(struct drm_sched_job *drm_job)
> > -{
> > - struct xe_sched_job *job = to_xe_sched_job(drm_job);
> > - struct xe_engine *e = job->engine;
> > - struct xe_execlist_engine *exl = job->engine->execlist;
> > -
> > - e->ring_ops->emit_job(job);
> > - xe_execlist_make_active(exl);
> > -
> > - return dma_fence_get(job->fence);
> > -}
> > -
> > -static void execlist_job_free(struct drm_sched_job *drm_job)
> > -{
> > - struct xe_sched_job *job = to_xe_sched_job(drm_job);
> > -
> > - xe_sched_job_put(job);
> > -}
> > -
> > -static const struct drm_sched_backend_ops drm_sched_ops = {
> > - .run_job = execlist_run_job,
> > - .free_job = execlist_job_free,
> > -};
> > -
> > -static int execlist_engine_init(struct xe_engine *e)
> > -{
> > - struct drm_gpu_scheduler *sched;
> > - struct xe_execlist_engine *exl;
> > - struct xe_device *xe = gt_to_xe(e->gt);
> > - int err;
> > -
> > - XE_BUG_ON(xe_device_guc_submission_enabled(xe));
> > -
> > - drm_info(&xe->drm, "Enabling execlist submission (GuC submission disabled)\n");
> > -
> > - exl = kzalloc(sizeof(*exl), GFP_KERNEL);
> > - if (!exl)
> > - return -ENOMEM;
> > -
> > - exl->engine = e;
> > -
> > - err = drm_sched_init(&exl->sched, &drm_sched_ops, NULL,
> > - e->lrc[0].ring.size / MAX_JOB_SIZE_BYTES,
> > - XE_SCHED_HANG_LIMIT, XE_SCHED_JOB_TIMEOUT,
> > - NULL, NULL, e->hwe->name,
> > - DRM_SCHED_POLICY_SINGLE_ENTITY,
> > - gt_to_xe(e->gt)->drm.dev);
> > - if (err)
> > - goto err_free;
> > -
> > - sched = &exl->sched;
> > - err = drm_sched_entity_init(&exl->entity, DRM_SCHED_PRIORITY_NORMAL,
> > - &sched, 1, NULL);
> > - if (err)
> > - goto err_sched;
> > -
> > - exl->port = e->hwe->exl_port;
> > - exl->has_run = false;
> > - exl->active_priority = DRM_SCHED_PRIORITY_UNSET;
> > - e->execlist = exl;
> > - e->entity = &exl->entity;
> > -
> > - switch (e->class) {
> > - case XE_ENGINE_CLASS_RENDER:
> > - sprintf(e->name, "rcs%d", ffs(e->logical_mask) - 1);
> > - break;
> > - case XE_ENGINE_CLASS_VIDEO_DECODE:
> > - sprintf(e->name, "vcs%d", ffs(e->logical_mask) - 1);
> > - break;
> > - case XE_ENGINE_CLASS_VIDEO_ENHANCE:
> > - sprintf(e->name, "vecs%d", ffs(e->logical_mask) - 1);
> > - break;
> > - case XE_ENGINE_CLASS_COPY:
> > - sprintf(e->name, "bcs%d", ffs(e->logical_mask) - 1);
> > - break;
> > - case XE_ENGINE_CLASS_COMPUTE:
> > - sprintf(e->name, "ccs%d", ffs(e->logical_mask) - 1);
> > - break;
> > - default:
> > - XE_WARN_ON(e->class);
> > - }
> > -
> > - return 0;
> > -
> > -err_sched:
> > - drm_sched_fini(&exl->sched);
> > -err_free:
> > - kfree(exl);
> > - return err;
> > -}
> > -
> > -static void execlist_engine_fini_async(struct work_struct *w)
> > -{
> > - struct xe_execlist_engine *ee =
> > - container_of(w, struct xe_execlist_engine, fini_async);
> > - struct xe_engine *e = ee->engine;
> > - struct xe_execlist_engine *exl = e->execlist;
> > - unsigned long flags;
> > -
> > - XE_BUG_ON(xe_device_guc_submission_enabled(gt_to_xe(e->gt)));
> > -
> > - spin_lock_irqsave(&exl->port->lock, flags);
> > - if (WARN_ON(exl->active_priority != DRM_SCHED_PRIORITY_UNSET))
> > - list_del(&exl->active_link);
> > - spin_unlock_irqrestore(&exl->port->lock, flags);
> > -
> > - if (e->flags & ENGINE_FLAG_PERSISTENT)
> > - xe_device_remove_persistent_engines(gt_to_xe(e->gt), e);
> > - drm_sched_entity_fini(&exl->entity);
> > - drm_sched_fini(&exl->sched);
> > - kfree(exl);
> > -
> > - xe_engine_fini(e);
> > -}
> > -
> > -static void execlist_engine_kill(struct xe_engine *e)
> > -{
> > - /* NIY */
> > -}
> > -
> > -static void execlist_engine_fini(struct xe_engine *e)
> > -{
> > - INIT_WORK(&e->execlist->fini_async, execlist_engine_fini_async);
> > - queue_work(system_unbound_wq, &e->execlist->fini_async);
> > -}
> > -
> > -static int execlist_engine_set_priority(struct xe_engine *e,
> > - enum drm_sched_priority priority)
> > -{
> > - /* NIY */
> > - return 0;
> > -}
> > -
> > -static int execlist_engine_set_timeslice(struct xe_engine *e, u32 timeslice_us)
> > -{
> > - /* NIY */
> > - return 0;
> > -}
> > -
> > -static int execlist_engine_set_preempt_timeout(struct xe_engine *e,
> > - u32 preempt_timeout_us)
> > -{
> > - /* NIY */
> > - return 0;
> > -}
> > -
> > -static int execlist_engine_set_job_timeout(struct xe_engine *e,
> > - u32 job_timeout_ms)
> > -{
> > - /* NIY */
> > - return 0;
> > -}
> > -
> > -static int execlist_engine_suspend(struct xe_engine *e)
> > -{
> > - /* NIY */
> > - return 0;
> > -}
> > -
> > -static void execlist_engine_suspend_wait(struct xe_engine *e)
> > -
> > -{
> > - /* NIY */
> > -}
> > -
> > -static void execlist_engine_resume(struct xe_engine *e)
> > -{
> > - /* NIY */
> > -}
> > -
> > -static const struct xe_engine_ops execlist_engine_ops = {
> > - .init = execlist_engine_init,
> > - .kill = execlist_engine_kill,
> > - .fini = execlist_engine_fini,
> > - .set_priority = execlist_engine_set_priority,
> > - .set_timeslice = execlist_engine_set_timeslice,
> > - .set_preempt_timeout = execlist_engine_set_preempt_timeout,
> > - .set_job_timeout = execlist_engine_set_job_timeout,
> > - .suspend = execlist_engine_suspend,
> > - .suspend_wait = execlist_engine_suspend_wait,
> > - .resume = execlist_engine_resume,
> > -};
> > -
> > -int xe_execlist_init(struct xe_gt *gt)
> > -{
> > - /* GuC submission enabled, nothing to do */
> > - if (xe_device_guc_submission_enabled(gt_to_xe(gt)))
> > - return 0;
> > -
> > - gt->engine_ops = &execlist_engine_ops;
> > -
> > - return 0;
> > -}
> > diff --git a/drivers/gpu/drm/xe/xe_execlist.h b/drivers/gpu/drm/xe/xe_execlist.h
> > deleted file mode 100644
> > index 26f600ac8552..000000000000
> > --- a/drivers/gpu/drm/xe/xe_execlist.h
> > +++ /dev/null
> > @@ -1,21 +0,0 @@
> > -/* SPDX-License-Identifier: MIT */
> > -/*
> > - * Copyright © 2021 Intel Corporation
> > - */
> > -
> > -#ifndef _XE_EXECLIST_H_
> > -#define _XE_EXECLIST_H_
> > -
> > -#include "xe_execlist_types.h"
> > -
> > -struct xe_device;
> > -struct xe_gt;
> > -
> > -#define xe_execlist_port_assert_held(port) lockdep_assert_held(&(port)->lock)
> > -
> > -int xe_execlist_init(struct xe_gt *gt);
> > -struct xe_execlist_port *xe_execlist_port_create(struct xe_device *xe,
> > - struct xe_hw_engine *hwe);
> > -void xe_execlist_port_destroy(struct xe_execlist_port *port);
> > -
> > -#endif
> > diff --git a/drivers/gpu/drm/xe/xe_execlist_types.h b/drivers/gpu/drm/xe/xe_execlist_types.h
> > deleted file mode 100644
> > index 05a620940209..000000000000
> > --- a/drivers/gpu/drm/xe/xe_execlist_types.h
> > +++ /dev/null
> > @@ -1,49 +0,0 @@
> > -/* SPDX-License-Identifier: MIT */
> > -/*
> > - * Copyright © 2022 Intel Corporation
> > - */
> > -
> > -#ifndef _XE_EXECLIST_TYPES_H_
> > -#define _XE_EXECLIST_TYPES_H_
> > -
> > -#include <linux/list.h>
> > -#include <linux/spinlock.h>
> > -#include <linux/workqueue.h>
> > -
> > -#include <drm/gpu_scheduler.h>
> > -
> > -struct xe_hw_engine;
> > -struct xe_execlist_engine;
> > -
> > -struct xe_execlist_port {
> > - struct xe_hw_engine *hwe;
> > -
> > - spinlock_t lock;
> > -
> > - struct list_head active[DRM_SCHED_PRIORITY_COUNT];
> > -
> > - u32 last_ctx_id;
> > -
> > - struct xe_execlist_engine *running_exl;
> > -
> > - struct timer_list irq_fail;
> > -};
> > -
> > -struct xe_execlist_engine {
> > - struct xe_engine *engine;
> > -
> > - struct drm_gpu_scheduler sched;
> > -
> > - struct drm_sched_entity entity;
> > -
> > - struct xe_execlist_port *port;
> > -
> > - bool has_run;
> > -
> > - struct work_struct fini_async;
> > -
> > - enum drm_sched_priority active_priority;
> > - struct list_head active_link;
> > -};
> > -
> > -#endif
> > diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c
> > index e1b84bc25375..01164eb25a10 100644
> > --- a/drivers/gpu/drm/xe/xe_ggtt.c
> > +++ b/drivers/gpu/drm/xe/xe_ggtt.c
> > @@ -234,7 +234,7 @@ static void ggtt_invalidate_gt_tlb(struct xe_gt *gt)
> > XE_WARN_ON(seqno <= 0);
> > if (seqno > 0)
> > xe_gt_tlb_invalidation_wait(gt, seqno);
> > - } else if (xe_device_guc_submission_enabled(gt_to_xe(gt))) {
> > + } else {
> > struct xe_device *xe = gt_to_xe(gt);
> >
> > if (xe->info.platform == XE_PVC) {
> > diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
> > index 3e32d38aeeea..1b54344fe78f 100644
> > --- a/drivers/gpu/drm/xe/xe_gt.c
> > +++ b/drivers/gpu/drm/xe/xe_gt.c
> > @@ -14,7 +14,6 @@
> > #include "xe_bo.h"
> > #include "xe_device.h"
> > #include "xe_engine.h"
> > -#include "xe_execlist.h"
> > #include "xe_force_wake.h"
> > #include "xe_ggtt.h"
> > #include "xe_gt_clock.h"
> > @@ -355,9 +354,6 @@ static int all_fw_domain_init(struct xe_gt *gt)
> > goto err_force_wake;
> >
> > xe_mocs_init(gt);
> > - err = xe_execlist_init(gt);
> > - if (err)
> > - goto err_force_wake;
> >
> > err = xe_hw_engines_init(gt);
> > if (err)
> > @@ -504,10 +500,6 @@ static int gt_reset(struct xe_gt *gt)
> > {
> > int err;
> >
> > - /* We only support GT resets with GuC submission */
> > - if (!xe_device_guc_submission_enabled(gt_to_xe(gt)))
> > - return -ENODEV;
> > -
> > xe_gt_info(gt, "reset started\n");
> >
> > xe_gt_sanitize(gt);
> > @@ -587,10 +579,6 @@ int xe_gt_suspend(struct xe_gt *gt)
> > {
> > int err;
> >
> > - /* For now suspend/resume is only allowed with GuC */
> > - if (!xe_device_guc_submission_enabled(gt_to_xe(gt)))
> > - return -ENODEV;
> > -
> > xe_gt_sanitize(gt);
> >
> > xe_device_mem_access_get(gt_to_xe(gt));
> > diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c
> > index 91a3967fd799..3e442b1619df 100644
> > --- a/drivers/gpu/drm/xe/xe_guc_pc.c
> > +++ b/drivers/gpu/drm/xe/xe_guc_pc.c
> > @@ -814,8 +814,6 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
> > u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
> > int ret;
> >
> > - XE_WARN_ON(!xe_device_guc_submission_enabled(xe));
> > -
> > xe_device_mem_access_get(pc_to_xe(pc));
> >
> > memset(pc->bo->vmap.vaddr, 0, size);
> > diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
> > index 911d4965c27c..390a29b2fa70 100644
> > --- a/drivers/gpu/drm/xe/xe_guc_submit.c
> > +++ b/drivers/gpu/drm/xe/xe_guc_submit.c
> > @@ -1118,8 +1118,6 @@ static int guc_engine_init(struct xe_engine *e)
> > long timeout;
> > int err;
> >
> > - XE_BUG_ON(!xe_device_guc_submission_enabled(guc_to_xe(guc)));
> > -
> > ge = kzalloc(sizeof(*ge), GFP_KERNEL);
> > if (!ge)
> > return -ENOMEM;
> > @@ -1899,9 +1897,6 @@ void xe_guc_submit_print(struct xe_guc *guc, struct drm_printer *p)
> > struct xe_engine *e;
> > unsigned long index;
> >
> > - if (!xe_device_guc_submission_enabled(guc_to_xe(guc)))
> > - return;
> > -
> > mutex_lock(&guc->submission_state.lock);
> > xa_for_each(&guc->submission_state.engine_lookup, index, e)
> > guc_engine_print(e, p);
> > diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c
> > index 1af5cccd1142..0c7bf79ed767 100644
> > --- a/drivers/gpu/drm/xe/xe_hw_engine.c
> > +++ b/drivers/gpu/drm/xe/xe_hw_engine.c
> > @@ -12,7 +12,6 @@
> > #include "regs/xe_regs.h"
> > #include "xe_bo.h"
> > #include "xe_device.h"
> > -#include "xe_execlist.h"
> > #include "xe_force_wake.h"
> > #include "xe_gt.h"
> > #include "xe_gt_topology.h"
> > @@ -225,8 +224,6 @@ static void hw_engine_fini(struct drm_device *drm, void *arg)
> > {
> > struct xe_hw_engine *hwe = arg;
> >
> > - if (hwe->exl_port)
> > - xe_execlist_port_destroy(hwe->exl_port);
> > xe_lrc_finish(&hwe->kernel_lrc);
> >
> > xe_bo_unpin_map_no_vm(hwe->hwsp);
> > @@ -395,16 +392,7 @@ static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe,
> > if (err)
> > goto err_hwsp;
> >
> > - if (!xe_device_guc_submission_enabled(xe)) {
> > - hwe->exl_port = xe_execlist_port_create(xe, hwe);
> > - if (IS_ERR(hwe->exl_port)) {
> > - err = PTR_ERR(hwe->exl_port);
> > - goto err_kernel_lrc;
> > - }
> > - }
> > -
> > - if (xe_device_guc_submission_enabled(xe))
> > - xe_hw_engine_enable_ring(hwe);
> > + xe_hw_engine_enable_ring(hwe);
> >
> > /* We reserve the highest BCS instance for USM */
> > if (xe->info.supports_usm && hwe->class == XE_ENGINE_CLASS_COPY)
> > @@ -416,8 +404,6 @@ static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe,
> >
> > return 0;
> >
> > -err_kernel_lrc:
> > - xe_lrc_finish(&hwe->kernel_lrc);
> > err_hwsp:
> > xe_bo_unpin_map_no_vm(hwe->hwsp);
> > err_name:
> > diff --git a/drivers/gpu/drm/xe/xe_hw_engine_types.h b/drivers/gpu/drm/xe/xe_hw_engine_types.h
> > index 803d557cf5aa..0ebbe5ae2419 100644
> > --- a/drivers/gpu/drm/xe/xe_hw_engine_types.h
> > +++ b/drivers/gpu/drm/xe/xe_hw_engine_types.h
> > @@ -99,8 +99,6 @@ struct xe_hw_engine {
> > struct xe_bo *hwsp;
> > /** @kernel_lrc: Kernel LRC (should be replaced /w an xe_engine) */
> > struct xe_lrc kernel_lrc;
> > - /** @exl_port: execlists port */
> > - struct xe_execlist_port *exl_port;
> > /** @fence_irq: fence IRQ to run when a hw engine IRQ is received */
> > struct xe_hw_fence_irq *fence_irq;
> > /** @irq_handler: IRQ handler to run when hw engine IRQ is received */
> > diff --git a/drivers/gpu/drm/xe/xe_irq.c b/drivers/gpu/drm/xe/xe_irq.c
> > index eae190cb0969..5b00dd9ea31e 100644
> > --- a/drivers/gpu/drm/xe/xe_irq.c
> > +++ b/drivers/gpu/drm/xe/xe_irq.c
> > @@ -129,15 +129,8 @@ void xe_irq_enable_hwe(struct xe_gt *gt)
> > u32 ccs_mask, bcs_mask;
> > u32 irqs, dmask, smask;
> >
> > - if (xe_device_guc_submission_enabled(xe)) {
> > - irqs = GT_RENDER_USER_INTERRUPT |
> > - GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
> > - } else {
> > - irqs = GT_RENDER_USER_INTERRUPT |
> > - GT_CS_MASTER_ERROR_INTERRUPT |
> > - GT_CONTEXT_SWITCH_INTERRUPT |
> > - GT_WAIT_SEMAPHORE_INTERRUPT;
> > - }
> > + irqs = GT_RENDER_USER_INTERRUPT |
> > + GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
> >
> > ccs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_COMPUTE);
> > bcs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_COPY);
> > diff --git a/drivers/gpu/drm/xe/xe_module.c b/drivers/gpu/drm/xe/xe_module.c
> > index de85494e2280..81f2379935db 100644
> > --- a/drivers/gpu/drm/xe/xe_module.c
> > +++ b/drivers/gpu/drm/xe/xe_module.c
> > @@ -14,10 +14,6 @@
> > #include "xe_pci.h"
> > #include "xe_sched_job.h"
> >
> > -bool force_execlist = false;
> > -module_param_named_unsafe(force_execlist, force_execlist, bool, 0444);
> > -MODULE_PARM_DESC(force_execlist, "Force Execlist submission");
> > -
> > bool enable_display = true;
> > module_param_named(enable_display, enable_display, bool, 0444);
> > MODULE_PARM_DESC(enable_display, "Enable display");
> > diff --git a/drivers/gpu/drm/xe/xe_module.h b/drivers/gpu/drm/xe/xe_module.h
> > index 2c1f9199f909..33ee4add5e87 100644
> > --- a/drivers/gpu/drm/xe/xe_module.h
> > +++ b/drivers/gpu/drm/xe/xe_module.h
> > @@ -6,7 +6,6 @@
> > #include <linux/types.h>
> >
> > /* Module modprobe variables */
> > -extern bool force_execlist;
> > extern bool enable_display;
> > extern u32 xe_force_vram_bar_size;
> > extern int xe_guc_log_level;
> > diff --git a/drivers/gpu/drm/xe/xe_uc.c b/drivers/gpu/drm/xe/xe_uc.c
> > index addd6f2681b9..f27059d96602 100644
> > --- a/drivers/gpu/drm/xe/xe_uc.c
> > +++ b/drivers/gpu/drm/xe/xe_uc.c
> > @@ -31,10 +31,6 @@ int xe_uc_init(struct xe_uc *uc)
> > {
> > int ret;
> >
> > - /* GuC submission not enabled, nothing to do */
> > - if (!xe_device_guc_submission_enabled(uc_to_xe(uc)))
> > - return 0;
> > -
> > ret = xe_guc_init(&uc->guc);
> > if (ret)
> > goto err;
> > @@ -65,10 +61,6 @@ int xe_uc_init(struct xe_uc *uc)
> > */
> > int xe_uc_init_post_hwconfig(struct xe_uc *uc)
> > {
> > - /* GuC submission not enabled, nothing to do */
> > - if (!xe_device_guc_submission_enabled(uc_to_xe(uc)))
> > - return 0;
> > -
> > return xe_guc_init_post_hwconfig(&uc->guc);
> > }
> >
> > @@ -109,10 +101,6 @@ int xe_uc_init_hwconfig(struct xe_uc *uc)
> > {
> > int ret;
> >
> > - /* GuC submission not enabled, nothing to do */
> > - if (!xe_device_guc_submission_enabled(uc_to_xe(uc)))
> > - return 0;
> > -
> > ret = xe_guc_min_load_for_hwconfig(&uc->guc);
> > if (ret)
> > return ret;
> > @@ -128,10 +116,6 @@ int xe_uc_init_hw(struct xe_uc *uc)
> > {
> > int ret;
> >
> > - /* GuC submission not enabled, nothing to do */
> > - if (!xe_device_guc_submission_enabled(uc_to_xe(uc)))
> > - return 0;
> > -
> > ret = xe_uc_sanitize_reset(uc);
> > if (ret)
> > return ret;
> > @@ -169,10 +153,6 @@ int xe_uc_init_hw(struct xe_uc *uc)
> >
> > int xe_uc_reset_prepare(struct xe_uc *uc)
> > {
> > - /* GuC submission not enabled, nothing to do */
> > - if (!xe_device_guc_submission_enabled(uc_to_xe(uc)))
> > - return 0;
> > -
> > return xe_guc_reset_prepare(&uc->guc);
> > }
> >
> > @@ -188,19 +168,11 @@ void xe_uc_stop_prepare(struct xe_uc *uc)
> >
> > int xe_uc_stop(struct xe_uc *uc)
> > {
> > - /* GuC submission not enabled, nothing to do */
> > - if (!xe_device_guc_submission_enabled(uc_to_xe(uc)))
> > - return 0;
> > -
> > return xe_guc_stop(&uc->guc);
> > }
> >
> > int xe_uc_start(struct xe_uc *uc)
> > {
> > - /* GuC submission not enabled, nothing to do */
> > - if (!xe_device_guc_submission_enabled(uc_to_xe(uc)))
> > - return 0;
> > -
> > return xe_guc_start(&uc->guc);
> > }
> >
> > @@ -220,10 +192,6 @@ int xe_uc_suspend(struct xe_uc *uc)
> > {
> > int ret;
> >
> > - /* GuC submission not enabled, nothing to do */
> > - if (!xe_device_guc_submission_enabled(uc_to_xe(uc)))
> > - return 0;
> > -
> > uc_reset_wait(uc);
> >
> > ret = xe_uc_stop(uc);
>
More information about the Intel-xe
mailing list