[PATCH 14/43] drm/xe/compat-i915: Convert register access to use xe_mmio
Matt Roper
matthew.d.roper at intel.com
Fri Sep 6 23:17:01 UTC 2024
On Fri, Sep 06, 2024 at 04:51:45PM -0500, Lucas De Marchi wrote:
> On Fri, Sep 06, 2024 at 12:02:09PM GMT, Jani Nikula wrote:
> > On Tue, 03 Sep 2024, Matt Roper <matthew.d.roper at intel.com> wrote:
> > > Stop using GT pointers for register access.
> > >
> > > Since display (via compat-i915) was the only part of the driver doing
> > > 8-bit and 16-bit register reads, this also allows us to drop the
> > > _Generic wrapper macro on these two functions.
> > >
> > > Signed-off-by: Matt Roper <matthew.d.roper at intel.com>
> > > ---
> > > .../drm/xe/compat-i915-headers/intel_uncore.h | 36 +++++++++----------
> > > drivers/gpu/drm/xe/xe_mmio.c | 4 +--
> > > drivers/gpu/drm/xe/xe_mmio.h | 7 ++--
> > > 3 files changed, 22 insertions(+), 25 deletions(-)
> > >
> > > diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
> > > index eb5b5f0e4bd9..ee3469d4ae73 100644
> > > --- a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
> > > +++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
> > > @@ -10,11 +10,11 @@
> > > #include "xe_device_types.h"
> > > #include "xe_mmio.h"
> > >
> > > -static inline struct xe_gt *__compat_uncore_to_gt(struct intel_uncore *uncore)
> > > +static inline struct xe_mmio *__compat_uncore_to_mmio(struct intel_uncore *uncore)
> > > {
> > > struct xe_device *xe = container_of(uncore, struct xe_device, uncore);
> > >
> > > - return xe_root_mmio_gt(xe);
> > > + return xe_root_tile_mmio(xe);
> > > }
> >
> > Side note, I'd be really interested in ideas getting from the intel_de.h
> > struct intel_display based interfaces to both i915 struct intel_uncore
> > and xe struct xe_mmio based interfaces.
>
> shouldn't we have an equivalent of xe_mmio for the display engine?
> intel_de is kind of that already, but without the protection about the
> range it's writting/reading to/from.
At one point I was thinking maybe intel_display's register functions
should just do their own readq's and such, either with its own iomap, or
with an iomap that the core driver hands it. But recently I've seen a
couple workarounds come by of the form "do XXX after every YYY accesses
to the GTTMMADR" which do require some per-PCI device tracking so I
backed off the idea of trying to do anything like that right now. Also
the device-wide "uncore lock" is probably still important for some of
the older platforms i915 supports (although nobody seems to know the
specific platform(s) that needed that synchronization).
Matt
>
> Lucas De Marchi
>
> >
> > Is the easiest common denominator actually a struct drm_device based
> > interface? At the calling side, we shouldn't use or have any idea about
> > i915 or xe specifics, like struct drm_i915_private, struct xe_device,
> > struct intel_uncore, struct xe_mmio, struct intel_gt or struct xe_gt,
> > etc.
> >
> > BR,
> > Jani.
> >
> >
> > >
> > > static inline struct xe_tile *__compat_uncore_to_tile(struct intel_uncore *uncore)
> > > @@ -29,7 +29,7 @@ static inline u32 intel_uncore_read(struct intel_uncore *uncore,
> > > {
> > > struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
> > >
> > > - return xe_mmio_read32(__compat_uncore_to_gt(uncore), reg);
> > > + return xe_mmio_read32(__compat_uncore_to_mmio(uncore), reg);
> > > }
> > >
> > > static inline u8 intel_uncore_read8(struct intel_uncore *uncore,
> > > @@ -37,7 +37,7 @@ static inline u8 intel_uncore_read8(struct intel_uncore *uncore,
> > > {
> > > struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
> > >
> > > - return xe_mmio_read8(__compat_uncore_to_gt(uncore), reg);
> > > + return xe_mmio_read8(__compat_uncore_to_mmio(uncore), reg);
> > > }
> > >
> > > static inline u16 intel_uncore_read16(struct intel_uncore *uncore,
> > > @@ -45,7 +45,7 @@ static inline u16 intel_uncore_read16(struct intel_uncore *uncore,
> > > {
> > > struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
> > >
> > > - return xe_mmio_read16(__compat_uncore_to_gt(uncore), reg);
> > > + return xe_mmio_read16(__compat_uncore_to_mmio(uncore), reg);
> > > }
> > >
> > > static inline u64
> > > @@ -57,11 +57,11 @@ intel_uncore_read64_2x32(struct intel_uncore *uncore,
> > > u32 upper, lower, old_upper;
> > > int loop = 0;
> > >
> > > - upper = xe_mmio_read32(__compat_uncore_to_gt(uncore), upper_reg);
> > > + upper = xe_mmio_read32(__compat_uncore_to_mmio(uncore), upper_reg);
> > > do {
> > > old_upper = upper;
> > > - lower = xe_mmio_read32(__compat_uncore_to_gt(uncore), lower_reg);
> > > - upper = xe_mmio_read32(__compat_uncore_to_gt(uncore), upper_reg);
> > > + lower = xe_mmio_read32(__compat_uncore_to_mmio(uncore), lower_reg);
> > > + upper = xe_mmio_read32(__compat_uncore_to_mmio(uncore), upper_reg);
> > > } while (upper != old_upper && loop++ < 2);
> > >
> > > return (u64)upper << 32 | lower;
> > > @@ -72,7 +72,7 @@ static inline void intel_uncore_posting_read(struct intel_uncore *uncore,
> > > {
> > > struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
> > >
> > > - xe_mmio_read32(__compat_uncore_to_gt(uncore), reg);
> > > + xe_mmio_read32(__compat_uncore_to_mmio(uncore), reg);
> > > }
> > >
> > > static inline void intel_uncore_write(struct intel_uncore *uncore,
> > > @@ -80,7 +80,7 @@ static inline void intel_uncore_write(struct intel_uncore *uncore,
> > > {
> > > struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
> > >
> > > - xe_mmio_write32(__compat_uncore_to_gt(uncore), reg, val);
> > > + xe_mmio_write32(__compat_uncore_to_mmio(uncore), reg, val);
> > > }
> > >
> > > static inline u32 intel_uncore_rmw(struct intel_uncore *uncore,
> > > @@ -88,7 +88,7 @@ static inline u32 intel_uncore_rmw(struct intel_uncore *uncore,
> > > {
> > > struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
> > >
> > > - return xe_mmio_rmw32(__compat_uncore_to_gt(uncore), reg, clear, set);
> > > + return xe_mmio_rmw32(__compat_uncore_to_mmio(uncore), reg, clear, set);
> > > }
> > >
> > > static inline int intel_wait_for_register(struct intel_uncore *uncore,
> > > @@ -97,7 +97,7 @@ static inline int intel_wait_for_register(struct intel_uncore *uncore,
> > > {
> > > struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
> > >
> > > - return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, mask, value,
> > > + return xe_mmio_wait32(__compat_uncore_to_mmio(uncore), reg, mask, value,
> > > timeout * USEC_PER_MSEC, NULL, false);
> > > }
> > >
> > > @@ -107,7 +107,7 @@ static inline int intel_wait_for_register_fw(struct intel_uncore *uncore,
> > > {
> > > struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
> > >
> > > - return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, mask, value,
> > > + return xe_mmio_wait32(__compat_uncore_to_mmio(uncore), reg, mask, value,
> > > timeout * USEC_PER_MSEC, NULL, false);
> > > }
> > >
> > > @@ -118,7 +118,7 @@ __intel_wait_for_register(struct intel_uncore *uncore, i915_reg_t i915_reg,
> > > {
> > > struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
> > >
> > > - return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, mask, value,
> > > + return xe_mmio_wait32(__compat_uncore_to_mmio(uncore), reg, mask, value,
> > > fast_timeout_us + 1000 * slow_timeout_ms,
> > > out_value, false);
> > > }
> > > @@ -128,7 +128,7 @@ static inline u32 intel_uncore_read_fw(struct intel_uncore *uncore,
> > > {
> > > struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
> > >
> > > - return xe_mmio_read32(__compat_uncore_to_gt(uncore), reg);
> > > + return xe_mmio_read32(__compat_uncore_to_mmio(uncore), reg);
> > > }
> > >
> > > static inline void intel_uncore_write_fw(struct intel_uncore *uncore,
> > > @@ -136,7 +136,7 @@ static inline void intel_uncore_write_fw(struct intel_uncore *uncore,
> > > {
> > > struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
> > >
> > > - xe_mmio_write32(__compat_uncore_to_gt(uncore), reg, val);
> > > + xe_mmio_write32(__compat_uncore_to_mmio(uncore), reg, val);
> > > }
> > >
> > > static inline u32 intel_uncore_read_notrace(struct intel_uncore *uncore,
> > > @@ -144,7 +144,7 @@ static inline u32 intel_uncore_read_notrace(struct intel_uncore *uncore,
> > > {
> > > struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
> > >
> > > - return xe_mmio_read32(__compat_uncore_to_gt(uncore), reg);
> > > + return xe_mmio_read32(__compat_uncore_to_mmio(uncore), reg);
> > > }
> > >
> > > static inline void intel_uncore_write_notrace(struct intel_uncore *uncore,
> > > @@ -152,7 +152,7 @@ static inline void intel_uncore_write_notrace(struct intel_uncore *uncore,
> > > {
> > > struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
> > >
> > > - xe_mmio_write32(__compat_uncore_to_gt(uncore), reg, val);
> > > + xe_mmio_write32(__compat_uncore_to_mmio(uncore), reg, val);
> > > }
> > >
> > > static inline void __iomem *intel_uncore_regs(struct intel_uncore *uncore)
> > > diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
> > > index 92f0bda0ae30..dbede6056bee 100644
> > > --- a/drivers/gpu/drm/xe/xe_mmio.c
> > > +++ b/drivers/gpu/drm/xe/xe_mmio.c
> > > @@ -200,7 +200,7 @@ static void mmio_flush_pending_writes(struct xe_mmio *mmio)
> > > writel(0, mmio->regs + DUMMY_REG_OFFSET);
> > > }
> > >
> > > -u8 __xe_mmio_read8(struct xe_mmio *mmio, struct xe_reg reg)
> > > +u8 xe_mmio_read8(struct xe_mmio *mmio, struct xe_reg reg)
> > > {
> > > u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
> > > u8 val;
> > > @@ -214,7 +214,7 @@ u8 __xe_mmio_read8(struct xe_mmio *mmio, struct xe_reg reg)
> > > return val;
> > > }
> > >
> > > -u16 __xe_mmio_read16(struct xe_mmio *mmio, struct xe_reg reg)
> > > +u16 xe_mmio_read16(struct xe_mmio *mmio, struct xe_reg reg)
> > > {
> > > u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
> > > u16 val;
> > > diff --git a/drivers/gpu/drm/xe/xe_mmio.h b/drivers/gpu/drm/xe/xe_mmio.h
> > > index ac6846447c52..99e3b58c9bb2 100644
> > > --- a/drivers/gpu/drm/xe/xe_mmio.h
> > > +++ b/drivers/gpu/drm/xe/xe_mmio.h
> > > @@ -27,11 +27,8 @@ int xe_mmio_probe_tiles(struct xe_device *xe);
> > > const struct xe_mmio *: (ptr), \
> > > struct xe_mmio *: (ptr))
> > >
> > > -u8 __xe_mmio_read8(struct xe_mmio *mmio, struct xe_reg reg);
> > > -#define xe_mmio_read8(p, reg) __xe_mmio_read8(__to_xe_mmio(p), reg)
> > > -
> > > -u16 __xe_mmio_read16(struct xe_mmio *mmio, struct xe_reg reg);
> > > -#define xe_mmio_read16(p, reg) __xe_mmio_read16(__to_xe_mmio(p), reg)
> > > +u8 xe_mmio_read8(struct xe_mmio *mmio, struct xe_reg reg);
> > > +u16 xe_mmio_read16(struct xe_mmio *mmio, struct xe_reg reg);
> > >
> > > void __xe_mmio_write32(struct xe_mmio *mmio, struct xe_reg reg, u32 val);
> > > #define xe_mmio_write32(p, reg, val) __xe_mmio_write32(__to_xe_mmio(p), reg, val)
> >
> > --
> > Jani Nikula, Intel
--
Matt Roper
Graphics Software Engineer
Linux GPU Platform Enablement
Intel Corporation
More information about the Intel-xe
mailing list