[Intel-xe] [PATCH v2 13/17] drm/xe: Introduce xe_reg/xe_reg_mcr
Lucas De Marchi
lucas.demarchi at intel.com
Fri Apr 21 22:32:54 UTC 2023
Stop using i915 types for registers. Use our own types. Differently from
i915, this will keep under this will keep under the register definition
the knowledge for the different types of registers. For now, the "flags"
are mcr and masked, although only the former is being used.
Additionally MCR registers have their own type. The only place that
should really look inside a xe_mcr_reg_t is that code dealing with the
steering and using other APIs when the register is MCR has been a source
of problem in the past.
Most of the driver is agnostic to the register differences since they
either use the definition from the header or already call the correct
MCR_REG()/_MMIO() macros. By embeding the struct xe_reg inside the
struct it's also possible to guarantee the compiler will break if
using RANDOM_MCR_REG.reg is attempted, since now the u32 is inside the
inner struct.
v2:
- Deep a dedicated type for MCR registers to avoid misuse
(Matt Roper, Jani)
- Drop the typedef and just use a struct since it's not an opaque type
(Jani)
- Add more kernel-doc
Signed-off-by: Lucas De Marchi <lucas.demarchi at intel.com>
---
drivers/gpu/drm/xe/regs/xe_reg_defs.h | 93 +++++++++++++++++++++++++++
drivers/gpu/drm/xe/xe_gt_mcr.c | 36 +++++++----
drivers/gpu/drm/xe/xe_gt_mcr.h | 11 ++--
drivers/gpu/drm/xe/xe_irq.c | 2 +-
drivers/gpu/drm/xe/xe_mmio.c | 2 +-
5 files changed, 125 insertions(+), 19 deletions(-)
diff --git a/drivers/gpu/drm/xe/regs/xe_reg_defs.h b/drivers/gpu/drm/xe/regs/xe_reg_defs.h
index b5c25e31b889..c0a091206f27 100644
--- a/drivers/gpu/drm/xe/regs/xe_reg_defs.h
+++ b/drivers/gpu/drm/xe/regs/xe_reg_defs.h
@@ -8,4 +8,97 @@
#include "compat-i915-headers/i915_reg_defs.h"
+/**
+ * struct xe_reg - Register definition
+ *
+ * Register defintion to be used by the individual register. Although the same
+ * definition is used for xe_reg and xe_reg_mcr, they use different internal
+ * APIs for accesses.
+ */
+struct xe_reg {
+ union {
+ struct {
+ /** @reg: address */
+ u32 reg:30;
+ /**
+ * @masked: register is "masked", with upper 16bits used
+ * to identify the bits that are updated on the lower
+ * bits
+ */
+ u32 masked:1;
+ /**
+ * @__mcr: register is multicast/replicated in the
+ * hardware and needs special handling. Any register
+ * with this set should also use a type of xe_reg_mcr_t.
+ * It's only here so the few places that deal with MCR
+ * registers specially (xe_sr.c) and tests using the raw
+ * value can inspect it.
+ */
+ u32 mcr:1;
+ };
+ /** @raw: Raw value with both address and options */
+ u32 raw;
+ };
+};
+
+/**
+ * xe_reg_mcr_t - MCR register is the same as a regular register, but uses
+ * another type since the internal API used for accessing them is different:
+ * it's never correct to use regular MMIO access.
+ */
+struct xe_reg_mcr {
+ /** reg: The register */
+ struct xe_reg __reg;
+};
+
+
+/**
+ * XE_REG_OPTION_MASKED - Register is "masked", with upper 16 bits marking the
+ * read/written bits on the lower 16 bits.
+ *
+ * To be used with XE_REG(). XE_REG_MCR() and XE_REG_INITIALIZER()
+ */
+#define XE_REG_OPTION_MASKED .masked = 1
+
+/**
+ * XE_REG_INITIALIZER - Initializer for xe_reg_t.
+ * @r_: Register offset
+ * @...: Additional options like access mode. See struct xe_reg for available
+ * options.
+ *
+ * Register field is mandatory, and additional options may be passed as
+ * arguments. Usually ``XE_REG()`` should be preferred since it creates an
+ * object of the right type. However when initializing static const storage,
+ * where a compound statement is not allowed, this can be used instead.
+ */
+#define XE_REG_INITIALIZER(r_, ...) { .reg = r_, __VA_ARGS__ }
+
+
+/**
+ * XE_REG - Create a struct xe_reg from offset and additional flags
+ * @r: Register offset
+ * @...: Additional options like access mode. See struct xe_reg for available
+ * options.
+ */
+#define XE_REG(r_, ...) ((const struct xe_reg)XE_REG_INITIALIZER(r_, ##__VA_ARGS__))
+
+/**
+ * XE_REG_MCR - Create a struct xe_reg_mcr from offset and additional flags
+ * @r: Register offset
+ * @...: Additional options like access mode. See struct xe_reg for available
+ * options.
+ */
+#define XE_REG_MCR(r_, ...) ((const struct xe_reg_mcr){ \
+ .__reg = XE_REG_INITIALIZER(r_, ##__VA_ARGS__, .mcr = 1) \
+ })
+
+/*
+ * TODO: remove these once the register declarations are not using them anymore
+ */
+#undef _MMIO
+#undef MCR_REG
+#define _MMIO(r_) ((const struct xe_reg){ .reg = r_ })
+#define MCR_REG(r_) ((const struct xe_reg_mcr){ .__reg.reg = r_, \
+ .__reg.mcr = 1 })
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.c b/drivers/gpu/drm/xe/xe_gt_mcr.c
index aa04ba5a6dbe..f5bd453849f4 100644
--- a/drivers/gpu/drm/xe/xe_gt_mcr.c
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.c
@@ -40,6 +40,11 @@
* non-terminated instance.
*/
+static inline struct xe_reg to_xe_reg(struct xe_reg_mcr reg_mcr)
+{
+ return reg_mcr.__reg;
+}
+
enum {
MCR_OP_READ,
MCR_OP_WRITE
@@ -360,9 +365,10 @@ void xe_gt_mcr_set_implicit_defaults(struct xe_gt *gt)
* returned. Returns false if the caller need not perform any steering
*/
static bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt,
- i915_mcr_reg_t reg,
+ struct xe_reg_mcr reg_mcr,
u8 *group, u8 *instance)
{
+ const struct xe_reg reg = to_xe_reg(reg_mcr);
const struct xe_mmio_range *implicit_ranges;
for (int type = 0; type < IMPLICIT_STEERING; type++) {
@@ -436,9 +442,10 @@ static void mcr_unlock(struct xe_gt *gt) {
*
* Caller needs to make sure the relevant forcewake wells are up.
*/
-static u32 rw_with_mcr_steering(struct xe_gt *gt, i915_mcr_reg_t reg, u8 rw_flag,
- int group, int instance, u32 value)
+static u32 rw_with_mcr_steering(struct xe_gt *gt, struct xe_reg_mcr reg_mcr,
+ u8 rw_flag, int group, int instance, u32 value)
{
+ const struct xe_reg reg = to_xe_reg(reg_mcr);
u32 steer_reg, steer_val, val = 0;
lockdep_assert_held(>->mcr_lock);
@@ -494,17 +501,19 @@ static u32 rw_with_mcr_steering(struct xe_gt *gt, i915_mcr_reg_t reg, u8 rw_flag
*
* Returns the value from a non-terminated instance of @reg.
*/
-u32 xe_gt_mcr_unicast_read_any(struct xe_gt *gt, i915_mcr_reg_t reg)
+u32 xe_gt_mcr_unicast_read_any(struct xe_gt *gt, struct xe_reg_mcr reg_mcr)
{
+ const struct xe_reg reg = to_xe_reg(reg_mcr);
u8 group, instance;
u32 val;
bool steer;
- steer = xe_gt_mcr_get_nonterminated_steering(gt, reg, &group, &instance);
+ steer = xe_gt_mcr_get_nonterminated_steering(gt, reg_mcr,
+ &group, &instance);
if (steer) {
mcr_lock(gt);
- val = rw_with_mcr_steering(gt, reg, MCR_OP_READ,
+ val = rw_with_mcr_steering(gt, reg_mcr, MCR_OP_READ,
group, instance, 0);
mcr_unlock(gt);
} else {
@@ -525,13 +534,13 @@ u32 xe_gt_mcr_unicast_read_any(struct xe_gt *gt, i915_mcr_reg_t reg)
* group/instance.
*/
u32 xe_gt_mcr_unicast_read(struct xe_gt *gt,
- i915_mcr_reg_t reg,
+ struct xe_reg_mcr reg_mcr,
int group, int instance)
{
u32 val;
mcr_lock(gt);
- val = rw_with_mcr_steering(gt, reg, MCR_OP_READ, group, instance, 0);
+ val = rw_with_mcr_steering(gt, reg_mcr, MCR_OP_READ, group, instance, 0);
mcr_unlock(gt);
return val;
@@ -548,11 +557,11 @@ u32 xe_gt_mcr_unicast_read(struct xe_gt *gt,
* Write an MCR register in unicast mode after steering toward a specific
* group/instance.
*/
-void xe_gt_mcr_unicast_write(struct xe_gt *gt, i915_mcr_reg_t reg, u32 value,
- int group, int instance)
+void xe_gt_mcr_unicast_write(struct xe_gt *gt, struct xe_reg_mcr reg_mcr,
+ u32 value, int group, int instance)
{
mcr_lock(gt);
- rw_with_mcr_steering(gt, reg, MCR_OP_WRITE, group, instance, value);
+ rw_with_mcr_steering(gt, reg_mcr, MCR_OP_WRITE, group, instance, value);
mcr_unlock(gt);
}
@@ -564,8 +573,11 @@ void xe_gt_mcr_unicast_write(struct xe_gt *gt, i915_mcr_reg_t reg, u32 value,
*
* Write an MCR register in multicast mode to update all instances.
*/
-void xe_gt_mcr_multicast_write(struct xe_gt *gt, i915_mcr_reg_t reg, u32 value)
+void xe_gt_mcr_multicast_write(struct xe_gt *gt, struct xe_reg_mcr reg_mcr,
+ u32 value)
{
+ struct xe_reg reg = to_xe_reg(reg_mcr);
+
/*
* Synchronize with any unicast operations. Once we have exclusive
* access, the MULTICAST bit should already be set, so there's no need
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.h b/drivers/gpu/drm/xe/xe_gt_mcr.h
index 2a6cd38c8cb7..27ca1bc880a0 100644
--- a/drivers/gpu/drm/xe/xe_gt_mcr.h
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.h
@@ -15,13 +15,14 @@ void xe_gt_mcr_init(struct xe_gt *gt);
void xe_gt_mcr_set_implicit_defaults(struct xe_gt *gt);
-u32 xe_gt_mcr_unicast_read(struct xe_gt *gt, i915_mcr_reg_t reg,
+u32 xe_gt_mcr_unicast_read(struct xe_gt *gt, struct xe_reg_mcr mcr_reg,
int group, int instance);
-u32 xe_gt_mcr_unicast_read_any(struct xe_gt *gt, i915_mcr_reg_t reg);
+u32 xe_gt_mcr_unicast_read_any(struct xe_gt *gt, struct xe_reg_mcr mcr_reg);
-void xe_gt_mcr_unicast_write(struct xe_gt *gt, i915_mcr_reg_t reg, u32 value,
- int group, int instance);
-void xe_gt_mcr_multicast_write(struct xe_gt *gt, i915_mcr_reg_t reg, u32 value);
+void xe_gt_mcr_unicast_write(struct xe_gt *gt, struct xe_reg_mcr mcr_reg,
+ u32 value, int group, int instance);
+void xe_gt_mcr_multicast_write(struct xe_gt *gt, struct xe_reg_mcr mcr_reg,
+ u32 value);
void xe_gt_mcr_steering_dump(struct xe_gt *gt, struct drm_printer *p);
diff --git a/drivers/gpu/drm/xe/xe_irq.c b/drivers/gpu/drm/xe/xe_irq.c
index 13f9f220bca0..4409490024a6 100644
--- a/drivers/gpu/drm/xe/xe_irq.c
+++ b/drivers/gpu/drm/xe/xe_irq.c
@@ -27,7 +27,7 @@
#define IIR(offset) _MMIO(offset + 0x8)
#define IER(offset) _MMIO(offset + 0xc)
-static void assert_iir_is_zero(struct xe_gt *gt, i915_reg_t reg)
+static void assert_iir_is_zero(struct xe_gt *gt, struct xe_reg reg)
{
u32 val = xe_mmio_read32(gt, reg.reg);
diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
index a93838e23b7b..0bb83ff18599 100644
--- a/drivers/gpu/drm/xe/xe_mmio.c
+++ b/drivers/gpu/drm/xe/xe_mmio.c
@@ -397,7 +397,7 @@ int xe_mmio_init(struct xe_device *xe)
DRM_XE_MMIO_READ |\
DRM_XE_MMIO_WRITE)
-static const i915_reg_t mmio_read_whitelist[] = {
+static const struct xe_reg mmio_read_whitelist[] = {
RING_TIMESTAMP(RENDER_RING_BASE),
};
--
2.39.0
More information about the Intel-xe
mailing list