[PATCH 09/43] drm/xe: Switch MMIO interface to take xe_mmio instead of xe_gt
Matt Roper
matthew.d.roper at intel.com
Fri Sep 6 22:44:36 UTC 2024
On Thu, Sep 05, 2024 at 10:44:53PM -0500, Lucas De Marchi wrote:
> On Tue, Sep 03, 2024 at 05:21:10PM GMT, Matt Roper wrote:
> > Since much of the MMIO register access done by the driver is to non-GT
> > registers, use of 'xe_gt' in these interfaces has been a long-standing
> > design flaw that's been hard to disentangle.
> >
> > To avoid a flag day across the whole driver, munge the function names
> > and add temporary compatibility macros with the original function names
> > that can accept either the new xe_mmio or the old xe_gt structure as a
> > parameter. This will allow us to slowly convert parts of the driver
> > over to the new interface independently.
> >
> > Signed-off-by: Matt Roper <matthew.d.roper at intel.com>
> > ---
> > drivers/gpu/drm/xe/xe_mmio.c | 131 ++++++++++++++++------------------
> > drivers/gpu/drm/xe/xe_mmio.h | 76 +++++++++++++++-----
> > drivers/gpu/drm/xe/xe_trace.h | 6 +-
> > 3 files changed, 125 insertions(+), 88 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
> > index 8068663e7d28..92f0bda0ae30 100644
> > --- a/drivers/gpu/drm/xe/xe_mmio.c
> > +++ b/drivers/gpu/drm/xe/xe_mmio.c
> > @@ -67,16 +67,16 @@ static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size)
> >
> > /* Possibly override number of tile based on configuration register */
> > if (!xe->info.skip_mtcfg) {
> > - struct xe_gt *gt = xe_root_mmio_gt(xe);
> > + struct xe_mmio *mmio = xe_root_tile_mmio(xe);
> > u8 tile_count;
> > u32 mtcfg;
> >
> > /*
> > * Although the per-tile mmio regs are not yet initialized, this
> > - * is fine as it's going to the root gt, that's guaranteed to be
> > - * initialized earlier in xe_mmio_init()
> > + * is fine as it's going to the root tile's mmio, that's
> > + * guaranteed to be initialized earlier in xe_mmio_init()
> > */
> > - mtcfg = xe_mmio_read64_2x32(gt, XEHP_MTCFG_ADDR);
> > + mtcfg = xe_mmio_read64_2x32(mmio, XEHP_MTCFG_ADDR);
> > tile_count = REG_FIELD_GET(TILE_COUNT, mtcfg) + 1;
> >
> > if (tile_count < xe->info.tile_count) {
> > @@ -187,116 +187,111 @@ int xe_mmio_init(struct xe_device *xe)
> > return devm_add_action_or_reset(xe->drm.dev, mmio_fini, xe);
> > }
> >
> > -static void mmio_flush_pending_writes(struct xe_gt *gt)
> > +static void mmio_flush_pending_writes(struct xe_mmio *mmio)
> > {
> > #define DUMMY_REG_OFFSET 0x130030
> > - struct xe_tile *tile = gt_to_tile(gt);
> > int i;
> >
> > - if (tile->xe->info.platform != XE_LUNARLAKE)
> > + if (mmio->xe->info.platform != XE_LUNARLAKE)
> > return;
> >
> > /* 4 dummy writes */
> > for (i = 0; i < 4; i++)
> > - writel(0, tile->mmio.regs + DUMMY_REG_OFFSET);
> > + writel(0, mmio->regs + DUMMY_REG_OFFSET);
> > }
> >
> > -u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg)
> > +u8 __xe_mmio_read8(struct xe_mmio *mmio, struct xe_reg reg)
> > {
> > - struct xe_tile *tile = gt_to_tile(gt);
> > - u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
> > + u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
> > u8 val;
> >
> > /* Wa_15015404425 */
> > - mmio_flush_pending_writes(gt);
> > + mmio_flush_pending_writes(mmio);
> >
> > - val = readb((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
>
> not sure I understand the plan to replace this. This previously worked
> by checking what is the type of the register we have declared and
> redirect the access to the other region.
>
> Do you think we'd rather use the separate mmio object in the caller like
> below?
>
> xe_mmio_read8(tile->mmio_ext, A_REG_WITH_EXT_MMIO_SET)
Yes, now that mmio_ext is a separate MMIO target (which will probably
either use a completely different base pointer into the tile's existing
iomap, or might even use a completely separate iomap), there's no need
to detect "extended" registers at this point anymore for regular,
non-debug builds. They just take the base pointer from the appropriate
xe_mmio structure and then add the register offset from the xe_reg.
As mentioned elsewhere, there will be a follow-up series that adds extra
sanity checks to debug builds to catch accidental misuse. One of those
checks will be adding a flag to the mmio_ext structure that turns on
checks to make sure reads/writes only happen for registers that also
have the appropriate flag set. So debug builds will warn if they see
xe_mmio_read32(tile->mmio_ext, REG_WITHOUT_EXT_FLAG);
or
xe_mmio_read32(tile->mmio, REG_WITH_EXT_FLAG);
BTW, we'll probably need to kill off the "ext" naming too.
tile->mmio_ext should probably become something like "tile->foo_mmio"
where "foo" accurately reflects the specific unit that the map is for.
And there can potentially be other "tile->bar_mmio" or "xe->baz_mmio"
targets as well if this infrastructure needs to be used for multiple
register regions that fall outside our traditional gttmmadr registers.
Matt
>
> ?
>
> Then maybe we could have a field in struct xe_mmio that we can xe_assert
> with the type of register being passed... ?
>
> This does look like a more elegant solution. As we don't have any users
> for that, this is entirely delegated for when the users show up.
>
> Cc Koby
>
> Lucas De Marchi
>
> > - trace_xe_reg_rw(gt, false, addr, val, sizeof(val));
> > + val = readb(mmio->regs + addr);
> > + trace_xe_reg_rw(mmio, false, addr, val, sizeof(val));
> >
> > return val;
> > }
> >
> > -u16 xe_mmio_read16(struct xe_gt *gt, struct xe_reg reg)
> > +u16 __xe_mmio_read16(struct xe_mmio *mmio, struct xe_reg reg)
> > {
> > - struct xe_tile *tile = gt_to_tile(gt);
> > - u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
> > + u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
> > u16 val;
> >
> > /* Wa_15015404425 */
> > - mmio_flush_pending_writes(gt);
> > + mmio_flush_pending_writes(mmio);
> >
> > - val = readw((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
> > - trace_xe_reg_rw(gt, false, addr, val, sizeof(val));
> > + val = readw(mmio->regs + addr);
> > + trace_xe_reg_rw(mmio, false, addr, val, sizeof(val));
> >
> > return val;
> > }
> >
> > -void xe_mmio_write32(struct xe_gt *gt, struct xe_reg reg, u32 val)
> > +void __xe_mmio_write32(struct xe_mmio *mmio, struct xe_reg reg, u32 val)
> > {
> > - struct xe_tile *tile = gt_to_tile(gt);
> > - u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
> > + u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
> >
> > - trace_xe_reg_rw(gt, true, addr, val, sizeof(val));
> > + trace_xe_reg_rw(mmio, true, addr, val, sizeof(val));
> >
> > - if (!reg.vf && IS_SRIOV_VF(gt_to_xe(gt)))
> > - xe_gt_sriov_vf_write32(gt->mmio.sriov_vf, reg, val);
> > + if (!reg.vf && mmio->sriov_vf)
> > + xe_gt_sriov_vf_write32(mmio->sriov_vf, reg, val);
> > else
> > - writel(val, (reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
> > + writel(val, mmio->regs + addr);
> > }
> >
> > -u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg)
> > +u32 __xe_mmio_read32(struct xe_mmio *mmio, struct xe_reg reg)
> > {
> > - struct xe_tile *tile = gt_to_tile(gt);
> > - u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
> > + u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
> > u32 val;
> >
> > /* Wa_15015404425 */
> > - mmio_flush_pending_writes(gt);
> > + mmio_flush_pending_writes(mmio);
> >
> > - if (!reg.vf && IS_SRIOV_VF(gt_to_xe(gt)))
> > - val = xe_gt_sriov_vf_read32(gt->mmio.sriov_vf, reg);
> > + if (!reg.vf && mmio->sriov_vf)
> > + val = xe_gt_sriov_vf_read32(mmio->sriov_vf, reg);
> > else
> > - val = readl((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
> > + val = readl(mmio->regs + addr);
> >
> > - trace_xe_reg_rw(gt, false, addr, val, sizeof(val));
> > + trace_xe_reg_rw(mmio, false, addr, val, sizeof(val));
> >
> > return val;
> > }
> >
> > -u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr, u32 set)
> > +u32 __xe_mmio_rmw32(struct xe_mmio *mmio, struct xe_reg reg, u32 clr, u32 set)
> > {
> > u32 old, reg_val;
> >
> > - old = xe_mmio_read32(gt, reg);
> > + old = xe_mmio_read32(mmio, reg);
> > reg_val = (old & ~clr) | set;
> > - xe_mmio_write32(gt, reg, reg_val);
> > + xe_mmio_write32(mmio, reg, reg_val);
> >
> > return old;
> > }
> >
> > -int xe_mmio_write32_and_verify(struct xe_gt *gt,
> > - struct xe_reg reg, u32 val, u32 mask, u32 eval)
> > +int __xe_mmio_write32_and_verify(struct xe_mmio *mmio,
> > + struct xe_reg reg, u32 val, u32 mask, u32 eval)
> > {
> > u32 reg_val;
> >
> > - xe_mmio_write32(gt, reg, val);
> > - reg_val = xe_mmio_read32(gt, reg);
> > + xe_mmio_write32(mmio, reg, val);
> > + reg_val = xe_mmio_read32(mmio, reg);
> >
> > return (reg_val & mask) != eval ? -EINVAL : 0;
> > }
> >
> > -bool xe_mmio_in_range(const struct xe_gt *gt,
> > - const struct xe_mmio_range *range,
> > - struct xe_reg reg)
> > +bool __xe_mmio_in_range(const struct xe_mmio *mmio,
> > + const struct xe_mmio_range *range,
> > + struct xe_reg reg)
> > {
> > - u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
> > + u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
> >
> > return range && addr >= range->start && addr <= range->end;
> > }
> >
> > /**
> > * xe_mmio_read64_2x32() - Read a 64-bit register as two 32-bit reads
> > - * @gt: MMIO target GT
> > + * @mmio: MMIO target
> > * @reg: register to read value from
> > *
> > * Although Intel GPUs have some 64-bit registers, the hardware officially
> > @@ -316,21 +311,21 @@ bool xe_mmio_in_range(const struct xe_gt *gt,
> > *
> > * Returns the value of the 64-bit register.
> > */
> > -u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg)
> > +u64 __xe_mmio_read64_2x32(struct xe_mmio *mmio, struct xe_reg reg)
> > {
> > struct xe_reg reg_udw = { .addr = reg.addr + 0x4 };
> > u32 ldw, udw, oldudw, retries;
> >
> > - reg.addr = xe_mmio_adjusted_addr(gt, reg.addr);
> > - reg_udw.addr = xe_mmio_adjusted_addr(gt, reg_udw.addr);
> > + reg.addr = xe_mmio_adjusted_addr(mmio, reg.addr);
> > + reg_udw.addr = xe_mmio_adjusted_addr(mmio, reg_udw.addr);
> >
> > /* we shouldn't adjust just one register address */
> > - xe_gt_assert(gt, reg_udw.addr == reg.addr + 0x4);
> > + xe_assert(mmio->xe, reg_udw.addr == reg.addr + 0x4);
> >
> > - oldudw = xe_mmio_read32(gt, reg_udw);
> > + oldudw = xe_mmio_read32(mmio, reg_udw);
> > for (retries = 5; retries; --retries) {
> > - ldw = xe_mmio_read32(gt, reg);
> > - udw = xe_mmio_read32(gt, reg_udw);
> > + ldw = xe_mmio_read32(mmio, reg);
> > + udw = xe_mmio_read32(mmio, reg_udw);
> >
> > if (udw == oldudw)
> > break;
> > @@ -338,14 +333,14 @@ u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg)
> > oldudw = udw;
> > }
> >
> > - xe_gt_WARN(gt, retries == 0,
> > - "64-bit read of %#x did not stabilize\n", reg.addr);
> > + drm_WARN(&mmio->xe->drm, retries == 0,
> > + "64-bit read of %#x did not stabilize\n", reg.addr);
> >
> > return (u64)udw << 32 | ldw;
> > }
> >
> > -static int __xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
> > - u32 *out_val, bool atomic, bool expect_match)
> > +static int ____xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
> > + u32 *out_val, bool atomic, bool expect_match)
> > {
> > ktime_t cur = ktime_get_raw();
> > const ktime_t end = ktime_add_us(cur, timeout_us);
> > @@ -355,7 +350,7 @@ static int __xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 v
> > bool check;
> >
> > for (;;) {
> > - read = xe_mmio_read32(gt, reg);
> > + read = xe_mmio_read32(mmio, reg);
> >
> > check = (read & mask) == val;
> > if (!expect_match)
> > @@ -381,7 +376,7 @@ static int __xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 v
> > }
> >
> > if (ret != 0) {
> > - read = xe_mmio_read32(gt, reg);
> > + read = xe_mmio_read32(mmio, reg);
> >
> > check = (read & mask) == val;
> > if (!expect_match)
> > @@ -399,7 +394,7 @@ static int __xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 v
> >
> > /**
> > * xe_mmio_wait32() - Wait for a register to match the desired masked value
> > - * @gt: MMIO target GT
> > + * @mmio: MMIO target
> > * @reg: register to read value from
> > * @mask: mask to be applied to the value read from the register
> > * @val: desired value after applying the mask
> > @@ -416,15 +411,15 @@ static int __xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 v
> > * @timeout_us for different reasons, specially in non-atomic contexts. Thus,
> > * it is possible that this function succeeds even after @timeout_us has passed.
> > */
> > -int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
> > - u32 *out_val, bool atomic)
> > +int __xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
> > + u32 *out_val, bool atomic)
> > {
> > - return __xe_mmio_wait32(gt, reg, mask, val, timeout_us, out_val, atomic, true);
> > + return ____xe_mmio_wait32(mmio, reg, mask, val, timeout_us, out_val, atomic, true);
> > }
> >
> > /**
> > * xe_mmio_wait32_not() - Wait for a register to return anything other than the given masked value
> > - * @gt: MMIO target GT
> > + * @mmio: MMIO target
> > * @reg: register to read value from
> > * @mask: mask to be applied to the value read from the register
> > * @val: value not to be matched after applying the mask
> > @@ -435,8 +430,8 @@ int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 t
> > * This function works exactly like xe_mmio_wait32() with the exception that
> > * @val is expected not to be matched.
> > */
> > -int xe_mmio_wait32_not(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
> > - u32 *out_val, bool atomic)
> > +int __xe_mmio_wait32_not(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
> > + u32 *out_val, bool atomic)
> > {
> > - return __xe_mmio_wait32(gt, reg, mask, val, timeout_us, out_val, atomic, false);
> > + return ____xe_mmio_wait32(mmio, reg, mask, val, timeout_us, out_val, atomic, false);
> > }
> > diff --git a/drivers/gpu/drm/xe/xe_mmio.h b/drivers/gpu/drm/xe/xe_mmio.h
> > index 26551410ecc8..ac6846447c52 100644
> > --- a/drivers/gpu/drm/xe/xe_mmio.h
> > +++ b/drivers/gpu/drm/xe/xe_mmio.h
> > @@ -14,25 +14,67 @@ struct xe_reg;
> > int xe_mmio_init(struct xe_device *xe);
> > int xe_mmio_probe_tiles(struct xe_device *xe);
> >
> > -u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg);
> > -u16 xe_mmio_read16(struct xe_gt *gt, struct xe_reg reg);
> > -void xe_mmio_write32(struct xe_gt *gt, struct xe_reg reg, u32 val);
> > -u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg);
> > -u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr, u32 set);
> > -int xe_mmio_write32_and_verify(struct xe_gt *gt, struct xe_reg reg, u32 val, u32 mask, u32 eval);
> > -bool xe_mmio_in_range(const struct xe_gt *gt, const struct xe_mmio_range *range, struct xe_reg reg);
> > -
> > -u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg);
> > -int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
> > - u32 *out_val, bool atomic);
> > -int xe_mmio_wait32_not(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
> > - u32 *out_val, bool atomic);
> > -
> > -static inline u32 xe_mmio_adjusted_addr(const struct xe_gt *gt, u32 addr)
> > +/*
> > + * Temporary transition helper for xe_gt -> xe_mmio conversion. Allows
> > + * continued usage of xe_gt as a parameter to MMIO operations which now
> > + * take an xe_mmio structure instead. Will be removed once the driver-wide
> > + * conversion is complete.
> > + */
> > +#define __to_xe_mmio(ptr) \
> > + _Generic(ptr, \
> > + const struct xe_gt *: (&((const struct xe_gt *)(ptr))->mmio), \
> > + struct xe_gt *: (&((struct xe_gt *)(ptr))->mmio), \
> > + const struct xe_mmio *: (ptr), \
> > + struct xe_mmio *: (ptr))
> > +
> > +u8 __xe_mmio_read8(struct xe_mmio *mmio, struct xe_reg reg);
> > +#define xe_mmio_read8(p, reg) __xe_mmio_read8(__to_xe_mmio(p), reg)
> > +
> > +u16 __xe_mmio_read16(struct xe_mmio *mmio, struct xe_reg reg);
> > +#define xe_mmio_read16(p, reg) __xe_mmio_read16(__to_xe_mmio(p), reg)
> > +
> > +void __xe_mmio_write32(struct xe_mmio *mmio, struct xe_reg reg, u32 val);
> > +#define xe_mmio_write32(p, reg, val) __xe_mmio_write32(__to_xe_mmio(p), reg, val)
> > +
> > +u32 __xe_mmio_read32(struct xe_mmio *mmio, struct xe_reg reg);
> > +#define xe_mmio_read32(p, reg) __xe_mmio_read32(__to_xe_mmio(p), reg)
> > +
> > +u32 __xe_mmio_rmw32(struct xe_mmio *mmio, struct xe_reg reg, u32 clr, u32 set);
> > +#define xe_mmio_rmw32(p, reg, clr, set) __xe_mmio_rmw32(__to_xe_mmio(p), reg, clr, set)
> > +
> > +int __xe_mmio_write32_and_verify(struct xe_mmio *mmio, struct xe_reg reg,
> > + u32 val, u32 mask, u32 eval);
> > +#define xe_mmio_write32_and_verify(p, reg, val, mask, eval) \
> > + __xe_mmio_write32_and_verify(__to_xe_mmio(p), reg, val, mask, eval)
> > +
> > +bool __xe_mmio_in_range(const struct xe_mmio *mmio,
> > + const struct xe_mmio_range *range, struct xe_reg reg);
> > +#define xe_mmio_in_range(p, range, reg) __xe_mmio_in_range(__to_xe_mmio(p), range, reg)
> > +
> > +u64 __xe_mmio_read64_2x32(struct xe_mmio *mmio, struct xe_reg reg);
> > +#define xe_mmio_read64_2x32(p, reg) __xe_mmio_read64_2x32(__to_xe_mmio(p), reg)
> > +
> > +int __xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val,
> > + u32 timeout_us, u32 *out_val, bool atomic);
> > +#define xe_mmio_wait32(p, reg, mask, val, timeout_us, out_val, atomic) \
> > + __xe_mmio_wait32(__to_xe_mmio(p), reg, mask, val, timeout_us, out_val, atomic)
> > +
> > +int __xe_mmio_wait32_not(struct xe_mmio *mmio, struct xe_reg reg, u32 mask,
> > + u32 val, u32 timeout_us, u32 *out_val, bool atomic);
> > +#define xe_mmio_wait32_not(p, reg, mask, val, timeout_us, out_val, atomic) \
> > + __xe_mmio_wait32_not(__to_xe_mmio(p), reg, mask, val, timeout_us, out_val, atomic)
> > +
> > +static inline u32 __xe_mmio_adjusted_addr(const struct xe_mmio *mmio, u32 addr)
> > {
> > - if (addr < gt->mmio.adj_limit)
> > - addr += gt->mmio.adj_offset;
> > + if (addr < mmio->adj_limit)
> > + addr += mmio->adj_offset;
> > return addr;
> > }
> > +#define xe_mmio_adjusted_addr(p, addr) __xe_mmio_adjusted_addr(__to_xe_mmio(p), addr)
> > +
> > +static inline struct xe_mmio *xe_root_tile_mmio(struct xe_device *xe)
> > +{
> > + return &xe->tiles[0].mmio;
> > +}
> >
> > #endif
> > diff --git a/drivers/gpu/drm/xe/xe_trace.h b/drivers/gpu/drm/xe/xe_trace.h
> > index 8573d7a87d84..d323873dda20 100644
> > --- a/drivers/gpu/drm/xe/xe_trace.h
> > +++ b/drivers/gpu/drm/xe/xe_trace.h
> > @@ -342,12 +342,12 @@ DEFINE_EVENT(xe_hw_fence, xe_hw_fence_try_signal,
> > );
> >
> > TRACE_EVENT(xe_reg_rw,
> > - TP_PROTO(struct xe_gt *gt, bool write, u32 reg, u64 val, int len),
> > + TP_PROTO(struct xe_mmio *mmio, bool write, u32 reg, u64 val, int len),
> >
> > - TP_ARGS(gt, write, reg, val, len),
> > + TP_ARGS(mmio, write, reg, val, len),
> >
> > TP_STRUCT__entry(
> > - __string(dev, __dev_name_gt(gt))
> > + __string(dev, __dev_name_xe(mmio->xe))
> > __field(u64, val)
> > __field(u32, reg)
> > __field(u16, write)
> > --
> > 2.45.2
> >
--
Matt Roper
Graphics Software Engineer
Linux GPU Platform Enablement
Intel Corporation
More information about the Intel-xe
mailing list