[Intel-xe] [PATCH 4/8] drm/xe/irq: Drop unnecessary GEN11_ and GEN12_ register prefixes
Lucas De Marchi
lucas.demarchi at intel.com
Fri Mar 31 22:20:12 UTC 2023
On Thu, Mar 30, 2023 at 11:24:01AM -0700, Matt Roper wrote:
>Any interrupt registers that were introduced by platforms i915
>considered to be "gen11" or "gen12" are present on all platforms that
>the Xe driver supports; drop the unnecessary prefixes.
>
>While working in the area, also convert a few open-coded bit
>manipulations over to REG_BIT and REG_FIELD_GET notation.
>
>Signed-off-by: Matt Roper <matthew.d.roper at intel.com>
>---
> drivers/gpu/drm/xe/regs/xe_gt_regs.h | 42 +++++------
> drivers/gpu/drm/xe/regs/xe_regs.h | 12 +--
I was thinking more on an approach of cleaning up the entire driver:
get all the defines from regs/* that have the GEN*_ or _GEN* suffix and
drop the suffix.
I wrote it some time ago, but was having some issues with display/
failing to compile because of that (some of the ifdef's inside i915 use
our register defines).
Since this is done for irq, with more contained changes, I won't oppose
though. It's going in the right direction.
Checked with --color-words and it does what it says
Reviewed-by: Lucas De Marchi <lucas.demarchi at intel.com>
thanks
Lucas De Marchi
> drivers/gpu/drm/xe/xe_guc.c | 6 +-
> drivers/gpu/drm/xe/xe_irq.c | 109 +++++++++++++--------------
> 4 files changed, 84 insertions(+), 85 deletions(-)
>
>diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
>index f45251df5715..a8a37e6a45a3 100644
>--- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h
>+++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
>@@ -348,34 +348,34 @@
> #define GFX_FLSH_CNTL_GEN6 _MMIO(0x101008)
> #define GFX_FLSH_CNTL_EN (1 << 0)
>
>-#define GEN11_GT_INTR_DW(x) _MMIO(0x190018 + ((x) * 4))
>+#define GT_INTR_DW(x) _MMIO(0x190018 + ((x) * 4))
>
>-#define GEN11_GUC_SG_INTR_ENABLE _MMIO(0x190038)
>+#define GUC_SG_INTR_ENABLE _MMIO(0x190038)
> #define ENGINE1_MASK REG_GENMASK(31, 16)
> #define ENGINE0_MASK REG_GENMASK(15, 0)
>
>-#define GEN11_GPM_WGBOXPERF_INTR_ENABLE _MMIO(0x19003c)
>+#define GPM_WGBOXPERF_INTR_ENABLE _MMIO(0x19003c)
>
>-#define GEN11_INTR_IDENTITY_REG(x) _MMIO(0x190060 + ((x) * 4))
>-#define GEN11_INTR_DATA_VALID (1 << 31)
>-#define GEN11_INTR_ENGINE_INSTANCE(x) (((x) & GENMASK(25, 20)) >> 20)
>-#define GEN11_INTR_ENGINE_CLASS(x) (((x) & GENMASK(18, 16)) >> 16)
>-#define GEN11_INTR_ENGINE_INTR(x) ((x) & 0xffff)
>+#define INTR_IDENTITY_REG(x) _MMIO(0x190060 + ((x) * 4))
>+#define INTR_DATA_VALID REG_BIT(31)
>+#define INTR_ENGINE_INSTANCE(x) REG_FIELD_GET(GENMASK(25, 20), x)
>+#define INTR_ENGINE_CLASS(x) REG_FIELD_GET(GENMASK(18, 16), x)
>+#define INTR_ENGINE_INTR(x) REG_FIELD_GET(GENMASK(15, 0), x)
> #define OTHER_GUC_INSTANCE 0
>
>-#define GEN11_RENDER_COPY_INTR_ENABLE _MMIO(0x190030)
>-#define GEN11_VCS_VECS_INTR_ENABLE _MMIO(0x190034)
>-#define GEN12_CCS_RSVD_INTR_ENABLE _MMIO(0x190048)
>-#define GEN11_IIR_REG_SELECTOR(x) _MMIO(0x190070 + ((x) * 4))
>-#define GEN11_RCS0_RSVD_INTR_MASK _MMIO(0x190090)
>-#define GEN11_BCS_RSVD_INTR_MASK _MMIO(0x1900a0)
>-#define GEN11_VCS0_VCS1_INTR_MASK _MMIO(0x1900a8)
>-#define GEN11_VCS2_VCS3_INTR_MASK _MMIO(0x1900ac)
>-#define GEN11_VECS0_VECS1_INTR_MASK _MMIO(0x1900d0)
>-#define GEN11_GUC_SG_INTR_MASK _MMIO(0x1900e8)
>-#define GEN11_GPM_WGBOXPERF_INTR_MASK _MMIO(0x1900ec)
>-#define GEN12_CCS0_CCS1_INTR_MASK _MMIO(0x190100)
>-#define GEN12_CCS2_CCS3_INTR_MASK _MMIO(0x190104)
>+#define RENDER_COPY_INTR_ENABLE _MMIO(0x190030)
>+#define VCS_VECS_INTR_ENABLE _MMIO(0x190034)
>+#define CCS_RSVD_INTR_ENABLE _MMIO(0x190048)
>+#define IIR_REG_SELECTOR(x) _MMIO(0x190070 + ((x) * 4))
>+#define RCS0_RSVD_INTR_MASK _MMIO(0x190090)
>+#define BCS_RSVD_INTR_MASK _MMIO(0x1900a0)
>+#define VCS0_VCS1_INTR_MASK _MMIO(0x1900a8)
>+#define VCS2_VCS3_INTR_MASK _MMIO(0x1900ac)
>+#define VECS0_VECS1_INTR_MASK _MMIO(0x1900d0)
>+#define GUC_SG_INTR_MASK _MMIO(0x1900e8)
>+#define GPM_WGBOXPERF_INTR_MASK _MMIO(0x1900ec)
>+#define CCS0_CCS1_INTR_MASK _MMIO(0x190100)
>+#define CCS2_CCS3_INTR_MASK _MMIO(0x190104)
> #define XEHPC_BCS1_BCS2_INTR_MASK _MMIO(0x190110)
> #define XEHPC_BCS3_BCS4_INTR_MASK _MMIO(0x190114)
> #define XEHPC_BCS5_BCS6_INTR_MASK _MMIO(0x190118)
>diff --git a/drivers/gpu/drm/xe/regs/xe_regs.h b/drivers/gpu/drm/xe/regs/xe_regs.h
>index ffe5d726e196..34f12eacd432 100644
>--- a/drivers/gpu/drm/xe/regs/xe_regs.h
>+++ b/drivers/gpu/drm/xe/regs/xe_regs.h
>@@ -78,13 +78,13 @@
>
> #define PCU_IRQ_REGS 0x444e0
> #define GU_MISC_IRQ_REGS 0x444f0
>-#define GEN11_GU_MISC_GSE (1 << 27)
>+#define GU_MISC_GSE REG_BIT(27)
>
>-#define GEN11_GFX_MSTR_IRQ _MMIO(0x190010)
>-#define GEN11_MASTER_IRQ (1 << 31)
>-#define GEN11_GU_MISC_IRQ (1 << 29)
>-#define GEN11_DISPLAY_IRQ (1 << 16)
>-#define GEN11_GT_DW_IRQ(x) (1 << (x))
>+#define GFX_MSTR_IRQ _MMIO(0x190010)
>+#define MASTER_IRQ REG_BIT(31)
>+#define GU_MISC_IRQ REG_BIT(29)
>+#define DISPLAY_IRQ REG_BIT(16)
>+#define GT_DW_IRQ(x) REG_BIT(x)
>
> #define DG1_MSTR_TILE_INTR _MMIO(0x190008)
> #define DG1_MSTR_IRQ REG_BIT(31)
>diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c
>index 58b9841616e4..ee71b969bcbf 100644
>--- a/drivers/gpu/drm/xe/xe_guc.c
>+++ b/drivers/gpu/drm/xe/xe_guc.c
>@@ -561,12 +561,12 @@ static void guc_enable_irq(struct xe_guc *guc)
> REG_FIELD_PREP(ENGINE0_MASK, GUC_INTR_GUC2HOST) :
> REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
>
>- xe_mmio_write32(gt, GEN11_GUC_SG_INTR_ENABLE.reg,
>+ xe_mmio_write32(gt, GUC_SG_INTR_ENABLE.reg,
> REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST));
> if (xe_gt_is_media_type(gt))
>- xe_mmio_rmw32(gt, GEN11_GUC_SG_INTR_MASK.reg, events, 0);
>+ xe_mmio_rmw32(gt, GUC_SG_INTR_MASK.reg, events, 0);
> else
>- xe_mmio_write32(gt, GEN11_GUC_SG_INTR_MASK.reg, ~events);
>+ xe_mmio_write32(gt, GUC_SG_INTR_MASK.reg, ~events);
> }
>
> int xe_guc_enable_communication(struct xe_guc *guc)
>diff --git a/drivers/gpu/drm/xe/xe_irq.c b/drivers/gpu/drm/xe/xe_irq.c
>index bf097ba6a10b..b1b94ba43b3f 100644
>--- a/drivers/gpu/drm/xe/xe_irq.c
>+++ b/drivers/gpu/drm/xe/xe_irq.c
>@@ -72,7 +72,7 @@ static void mask_and_disable(struct xe_gt *gt, u32 irqregs)
>
> static u32 gen11_intr_disable(struct xe_gt *gt)
> {
>- xe_mmio_write32(gt, GEN11_GFX_MSTR_IRQ.reg, 0);
>+ xe_mmio_write32(gt, GFX_MSTR_IRQ.reg, 0);
>
> /*
> * Now with master disabled, get a sample of level indications
>@@ -80,7 +80,7 @@ static u32 gen11_intr_disable(struct xe_gt *gt)
> * New indications can and will light up during processing,
> * and will generate new interrupt after enabling master.
> */
>- return xe_mmio_read32(gt, GEN11_GFX_MSTR_IRQ.reg);
>+ return xe_mmio_read32(gt, GFX_MSTR_IRQ.reg);
> }
>
> static u32
>@@ -88,7 +88,7 @@ gen11_gu_misc_irq_ack(struct xe_gt *gt, const u32 master_ctl)
> {
> u32 iir;
>
>- if (!(master_ctl & GEN11_GU_MISC_IRQ))
>+ if (!(master_ctl & GU_MISC_IRQ))
> return 0;
>
> iir = xe_mmio_read32(gt, IIR(GU_MISC_IRQ_REGS).reg);
>@@ -100,9 +100,9 @@ gen11_gu_misc_irq_ack(struct xe_gt *gt, const u32 master_ctl)
>
> static inline void gen11_intr_enable(struct xe_gt *gt, bool stall)
> {
>- xe_mmio_write32(gt, GEN11_GFX_MSTR_IRQ.reg, GEN11_MASTER_IRQ);
>+ xe_mmio_write32(gt, GFX_MSTR_IRQ.reg, MASTER_IRQ);
> if (stall)
>- xe_mmio_read32(gt, GEN11_GFX_MSTR_IRQ.reg);
>+ xe_mmio_read32(gt, GFX_MSTR_IRQ.reg);
> }
>
> static void gen11_gt_irq_postinstall(struct xe_device *xe, struct xe_gt *gt)
>@@ -125,14 +125,14 @@ static void gen11_gt_irq_postinstall(struct xe_device *xe, struct xe_gt *gt)
> smask = irqs << 16;
>
> /* Enable RCS, BCS, VCS and VECS class interrupts. */
>- xe_mmio_write32(gt, GEN11_RENDER_COPY_INTR_ENABLE.reg, dmask);
>- xe_mmio_write32(gt, GEN11_VCS_VECS_INTR_ENABLE.reg, dmask);
>+ xe_mmio_write32(gt, RENDER_COPY_INTR_ENABLE.reg, dmask);
>+ xe_mmio_write32(gt, VCS_VECS_INTR_ENABLE.reg, dmask);
> if (ccs_mask)
>- xe_mmio_write32(gt, GEN12_CCS_RSVD_INTR_ENABLE.reg, smask);
>+ xe_mmio_write32(gt, CCS_RSVD_INTR_ENABLE.reg, smask);
>
> /* Unmask irqs on RCS, BCS, VCS and VECS engines. */
>- xe_mmio_write32(gt, GEN11_RCS0_RSVD_INTR_MASK.reg, ~smask);
>- xe_mmio_write32(gt, GEN11_BCS_RSVD_INTR_MASK.reg, ~smask);
>+ xe_mmio_write32(gt, RCS0_RSVD_INTR_MASK.reg, ~smask);
>+ xe_mmio_write32(gt, BCS_RSVD_INTR_MASK.reg, ~smask);
> if (bcs_mask & (BIT(1)|BIT(2)))
> xe_mmio_write32(gt, XEHPC_BCS1_BCS2_INTR_MASK.reg, ~dmask);
> if (bcs_mask & (BIT(3)|BIT(4)))
>@@ -141,31 +141,31 @@ static void gen11_gt_irq_postinstall(struct xe_device *xe, struct xe_gt *gt)
> xe_mmio_write32(gt, XEHPC_BCS5_BCS6_INTR_MASK.reg, ~dmask);
> if (bcs_mask & (BIT(7)|BIT(8)))
> xe_mmio_write32(gt, XEHPC_BCS7_BCS8_INTR_MASK.reg, ~dmask);
>- xe_mmio_write32(gt, GEN11_VCS0_VCS1_INTR_MASK.reg, ~dmask);
>- xe_mmio_write32(gt, GEN11_VCS2_VCS3_INTR_MASK.reg, ~dmask);
>+ xe_mmio_write32(gt, VCS0_VCS1_INTR_MASK.reg, ~dmask);
>+ xe_mmio_write32(gt, VCS2_VCS3_INTR_MASK.reg, ~dmask);
> //if (HAS_ENGINE(gt, VCS4) || HAS_ENGINE(gt, VCS5))
>- // intel_uncore_write(uncore, GEN12_VCS4_VCS5_INTR_MASK, ~dmask);
>+ // intel_uncore_write(uncore, VCS4_VCS5_INTR_MASK, ~dmask);
> //if (HAS_ENGINE(gt, VCS6) || HAS_ENGINE(gt, VCS7))
>- // intel_uncore_write(uncore, GEN12_VCS6_VCS7_INTR_MASK, ~dmask);
>- xe_mmio_write32(gt, GEN11_VECS0_VECS1_INTR_MASK.reg, ~dmask);
>+ // intel_uncore_write(uncore, VCS6_VCS7_INTR_MASK, ~dmask);
>+ xe_mmio_write32(gt, VECS0_VECS1_INTR_MASK.reg, ~dmask);
> //if (HAS_ENGINE(gt, VECS2) || HAS_ENGINE(gt, VECS3))
>- // intel_uncore_write(uncore, GEN12_VECS2_VECS3_INTR_MASK, ~dmask);
>+ // intel_uncore_write(uncore, VECS2_VECS3_INTR_MASK, ~dmask);
> if (ccs_mask & (BIT(0)|BIT(1)))
>- xe_mmio_write32(gt, GEN12_CCS0_CCS1_INTR_MASK.reg, ~dmask);
>+ xe_mmio_write32(gt, CCS0_CCS1_INTR_MASK.reg, ~dmask);
> if (ccs_mask & (BIT(2)|BIT(3)))
>- xe_mmio_write32(gt, GEN12_CCS2_CCS3_INTR_MASK.reg, ~dmask);
>+ xe_mmio_write32(gt, CCS2_CCS3_INTR_MASK.reg, ~dmask);
>
> /*
> * RPS interrupts will get enabled/disabled on demand when RPS itself
> * is enabled/disabled.
> */
> /* TODO: gt->pm_ier, gt->pm_imr */
>- xe_mmio_write32(gt, GEN11_GPM_WGBOXPERF_INTR_ENABLE.reg, 0);
>- xe_mmio_write32(gt, GEN11_GPM_WGBOXPERF_INTR_MASK.reg, ~0);
>+ xe_mmio_write32(gt, GPM_WGBOXPERF_INTR_ENABLE.reg, 0);
>+ xe_mmio_write32(gt, GPM_WGBOXPERF_INTR_MASK.reg, ~0);
>
> /* Same thing for GuC interrupts */
>- xe_mmio_write32(gt, GEN11_GUC_SG_INTR_ENABLE.reg, 0);
>- xe_mmio_write32(gt, GEN11_GUC_SG_INTR_MASK.reg, ~0);
>+ xe_mmio_write32(gt, GUC_SG_INTR_ENABLE.reg, 0);
>+ xe_mmio_write32(gt, GUC_SG_INTR_MASK.reg, ~0);
> }
>
> static void gen11_irq_postinstall(struct xe_device *xe, struct xe_gt *gt)
>@@ -174,7 +174,7 @@ static void gen11_irq_postinstall(struct xe_device *xe, struct xe_gt *gt)
>
> gen11_gt_irq_postinstall(xe, gt);
>
>- unmask_and_enable(gt, GU_MISC_IRQ_REGS, GEN11_GU_MISC_GSE);
>+ unmask_and_enable(gt, GU_MISC_IRQ_REGS, GU_MISC_GSE);
>
> gen11_intr_enable(gt, true);
> }
>@@ -190,7 +190,7 @@ gen11_gt_engine_identity(struct xe_device *xe,
>
> lockdep_assert_held(&xe->irq.lock);
>
>- xe_mmio_write32(gt, GEN11_IIR_REG_SELECTOR(bank).reg, BIT(bit));
>+ xe_mmio_write32(gt, IIR_REG_SELECTOR(bank).reg, BIT(bit));
>
> /*
> * NB: Specs do not specify how long to spin wait,
>@@ -198,18 +198,17 @@ gen11_gt_engine_identity(struct xe_device *xe,
> */
> timeout_ts = (local_clock() >> 10) + 100;
> do {
>- ident = xe_mmio_read32(gt, GEN11_INTR_IDENTITY_REG(bank).reg);
>- } while (!(ident & GEN11_INTR_DATA_VALID) &&
>+ ident = xe_mmio_read32(gt, INTR_IDENTITY_REG(bank).reg);
>+ } while (!(ident & INTR_DATA_VALID) &&
> !time_after32(local_clock() >> 10, timeout_ts));
>
>- if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
>+ if (unlikely(!(ident & INTR_DATA_VALID))) {
> drm_err(&xe->drm, "INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
> bank, bit, ident);
> return 0;
> }
>
>- xe_mmio_write32(gt, GEN11_INTR_IDENTITY_REG(bank).reg,
>- GEN11_INTR_DATA_VALID);
>+ xe_mmio_write32(gt, INTR_IDENTITY_REG(bank).reg, INTR_DATA_VALID);
>
> return ident;
> }
>@@ -243,24 +242,24 @@ static void gen11_gt_irq_handler(struct xe_device *xe, struct xe_gt *gt,
> spin_lock(&xe->irq.lock);
>
> for (bank = 0; bank < 2; bank++) {
>- if (!(master_ctl & GEN11_GT_DW_IRQ(bank)))
>+ if (!(master_ctl & GT_DW_IRQ(bank)))
> continue;
>
> if (!xe_gt_is_media_type(gt)) {
> intr_dw[bank] =
>- xe_mmio_read32(gt, GEN11_GT_INTR_DW(bank).reg);
>+ xe_mmio_read32(gt, GT_INTR_DW(bank).reg);
> for_each_set_bit(bit, intr_dw + bank, 32)
> identity[bit] = gen11_gt_engine_identity(xe, gt,
> bank,
> bit);
>- xe_mmio_write32(gt, GEN11_GT_INTR_DW(bank).reg,
>+ xe_mmio_write32(gt, GT_INTR_DW(bank).reg,
> intr_dw[bank]);
> }
>
> for_each_set_bit(bit, intr_dw + bank, 32) {
>- class = GEN11_INTR_ENGINE_CLASS(identity[bit]);
>- instance = GEN11_INTR_ENGINE_INSTANCE(identity[bit]);
>- intr_vec = GEN11_INTR_ENGINE_INTR(identity[bit]);
>+ class = INTR_ENGINE_CLASS(identity[bit]);
>+ instance = INTR_ENGINE_INSTANCE(identity[bit]);
>+ intr_vec = INTR_ENGINE_INTR(identity[bit]);
>
> if (class == XE_ENGINE_CLASS_OTHER) {
> gen11_gt_other_irq_handler(gt, instance,
>@@ -337,7 +336,7 @@ static void dg1_irq_postinstall(struct xe_device *xe, struct xe_gt *gt)
> {
> gen11_gt_irq_postinstall(xe, gt);
>
>- unmask_and_enable(gt, GU_MISC_IRQ_REGS, GEN11_GU_MISC_GSE);
>+ unmask_and_enable(gt, GU_MISC_IRQ_REGS, GU_MISC_GSE);
>
> if (gt->info.id == XE_GT0)
> dg1_intr_enable(xe, true);
>@@ -365,7 +364,7 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
> continue;
>
> if (!xe_gt_is_media_type(gt))
>- master_ctl = xe_mmio_read32(gt, GEN11_GFX_MSTR_IRQ.reg);
>+ master_ctl = xe_mmio_read32(gt, GFX_MSTR_IRQ.reg);
>
> /*
> * We might be in irq handler just when PCIe DPC is initiated
>@@ -379,7 +378,7 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
> }
>
> if (!xe_gt_is_media_type(gt))
>- xe_mmio_write32(gt, GEN11_GFX_MSTR_IRQ.reg, master_ctl);
>+ xe_mmio_write32(gt, GFX_MSTR_IRQ.reg, master_ctl);
> gen11_gt_irq_handler(xe, gt, master_ctl, intr_dw, identity);
> }
>
>@@ -400,14 +399,14 @@ static void gen11_gt_irq_reset(struct xe_gt *gt)
> u32 bcs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_COPY);
>
> /* Disable RCS, BCS, VCS and VECS class engines. */
>- xe_mmio_write32(gt, GEN11_RENDER_COPY_INTR_ENABLE.reg, 0);
>- xe_mmio_write32(gt, GEN11_VCS_VECS_INTR_ENABLE.reg, 0);
>+ xe_mmio_write32(gt, RENDER_COPY_INTR_ENABLE.reg, 0);
>+ xe_mmio_write32(gt, VCS_VECS_INTR_ENABLE.reg, 0);
> if (ccs_mask)
>- xe_mmio_write32(gt, GEN12_CCS_RSVD_INTR_ENABLE.reg, 0);
>+ xe_mmio_write32(gt, CCS_RSVD_INTR_ENABLE.reg, 0);
>
> /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
>- xe_mmio_write32(gt, GEN11_RCS0_RSVD_INTR_MASK.reg, ~0);
>- xe_mmio_write32(gt, GEN11_BCS_RSVD_INTR_MASK.reg, ~0);
>+ xe_mmio_write32(gt, RCS0_RSVD_INTR_MASK.reg, ~0);
>+ xe_mmio_write32(gt, BCS_RSVD_INTR_MASK.reg, ~0);
> if (bcs_mask & (BIT(1)|BIT(2)))
> xe_mmio_write32(gt, XEHPC_BCS1_BCS2_INTR_MASK.reg, ~0);
> if (bcs_mask & (BIT(3)|BIT(4)))
>@@ -416,24 +415,24 @@ static void gen11_gt_irq_reset(struct xe_gt *gt)
> xe_mmio_write32(gt, XEHPC_BCS5_BCS6_INTR_MASK.reg, ~0);
> if (bcs_mask & (BIT(7)|BIT(8)))
> xe_mmio_write32(gt, XEHPC_BCS7_BCS8_INTR_MASK.reg, ~0);
>- xe_mmio_write32(gt, GEN11_VCS0_VCS1_INTR_MASK.reg, ~0);
>- xe_mmio_write32(gt, GEN11_VCS2_VCS3_INTR_MASK.reg, ~0);
>+ xe_mmio_write32(gt, VCS0_VCS1_INTR_MASK.reg, ~0);
>+ xe_mmio_write32(gt, VCS2_VCS3_INTR_MASK.reg, ~0);
> // if (HAS_ENGINE(gt, VCS4) || HAS_ENGINE(gt, VCS5))
>-// xe_mmio_write32(xe, GEN12_VCS4_VCS5_INTR_MASK.reg, ~0);
>+// xe_mmio_write32(xe, VCS4_VCS5_INTR_MASK.reg, ~0);
> // if (HAS_ENGINE(gt, VCS6) || HAS_ENGINE(gt, VCS7))
>-// xe_mmio_write32(xe, GEN12_VCS6_VCS7_INTR_MASK.reg, ~0);
>- xe_mmio_write32(gt, GEN11_VECS0_VECS1_INTR_MASK.reg, ~0);
>+// xe_mmio_write32(xe, VCS6_VCS7_INTR_MASK.reg, ~0);
>+ xe_mmio_write32(gt, VECS0_VECS1_INTR_MASK.reg, ~0);
> // if (HAS_ENGINE(gt, VECS2) || HAS_ENGINE(gt, VECS3))
>-// xe_mmio_write32(xe, GEN12_VECS2_VECS3_INTR_MASK.reg, ~0);
>+// xe_mmio_write32(xe, VECS2_VECS3_INTR_MASK.reg, ~0);
> if (ccs_mask & (BIT(0)|BIT(1)))
>- xe_mmio_write32(gt, GEN12_CCS0_CCS1_INTR_MASK.reg, ~0);
>+ xe_mmio_write32(gt, CCS0_CCS1_INTR_MASK.reg, ~0);
> if (ccs_mask & (BIT(2)|BIT(3)))
>- xe_mmio_write32(gt, GEN12_CCS2_CCS3_INTR_MASK.reg, ~0);
>+ xe_mmio_write32(gt, CCS2_CCS3_INTR_MASK.reg, ~0);
>
>- xe_mmio_write32(gt, GEN11_GPM_WGBOXPERF_INTR_ENABLE.reg, 0);
>- xe_mmio_write32(gt, GEN11_GPM_WGBOXPERF_INTR_MASK.reg, ~0);
>- xe_mmio_write32(gt, GEN11_GUC_SG_INTR_ENABLE.reg, 0);
>- xe_mmio_write32(gt, GEN11_GUC_SG_INTR_MASK.reg, ~0);
>+ xe_mmio_write32(gt, GPM_WGBOXPERF_INTR_ENABLE.reg, 0);
>+ xe_mmio_write32(gt, GPM_WGBOXPERF_INTR_MASK.reg, ~0);
>+ xe_mmio_write32(gt, GUC_SG_INTR_ENABLE.reg, 0);
>+ xe_mmio_write32(gt, GUC_SG_INTR_MASK.reg, ~0);
> }
>
> static void gen11_irq_reset(struct xe_gt *gt)
>--
>2.39.2
>
More information about the Intel-xe
mailing list