[Intel-xe] [RESEND 3/5] fixup! drm/xe/display: Implement display support

Jani Nikula jani.nikula at intel.com
Fri Jun 30 11:50:17 UTC 2023


We'll have struct intel_uncore uses, so add the name back instead of
fake_uncore.

Reviewed-by: Lucas De Marchi <lucas.demarchi at intel.com>
Signed-off-by: Jani Nikula <jani.nikula at intel.com>
---
 .../drm/xe/compat-i915-headers/intel_pcode.h  | 16 ++---
 .../drm/xe/compat-i915-headers/intel_uncore.h | 58 +++++++++----------
 drivers/gpu/drm/xe/xe_device_types.h          |  2 +-
 3 files changed, 38 insertions(+), 38 deletions(-)

diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h
index 71f1fd1de2fd..0c47661bdc6a 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h
@@ -10,32 +10,32 @@
 #include "xe_pcode.h"
 
 static inline int
-snb_pcode_write_timeout(struct fake_uncore *uncore, u32 mbox, u32 val,
+snb_pcode_write_timeout(struct intel_uncore *uncore, u32 mbox, u32 val,
 			int fast_timeout_us, int slow_timeout_ms)
 {
-	return xe_pcode_write_timeout(__fake_uncore_to_gt(uncore), mbox, val,
+	return xe_pcode_write_timeout(__compat_uncore_to_gt(uncore), mbox, val,
 				      slow_timeout_ms ?: 1);
 }
 
 static inline int
-snb_pcode_write(struct fake_uncore *uncore, u32 mbox, u32 val)
+snb_pcode_write(struct intel_uncore *uncore, u32 mbox, u32 val)
 {
 
-	return xe_pcode_write(__fake_uncore_to_gt(uncore), mbox, val);
+	return xe_pcode_write(__compat_uncore_to_gt(uncore), mbox, val);
 }
 
 static inline int
-snb_pcode_read(struct fake_uncore *uncore, u32 mbox, u32 *val, u32 *val1)
+snb_pcode_read(struct intel_uncore *uncore, u32 mbox, u32 *val, u32 *val1)
 {
-	return xe_pcode_read(__fake_uncore_to_gt(uncore), mbox, val, val1);
+	return xe_pcode_read(__compat_uncore_to_gt(uncore), mbox, val, val1);
 }
 
 static inline int
-skl_pcode_request(struct fake_uncore *uncore, u32 mbox,
+skl_pcode_request(struct intel_uncore *uncore, u32 mbox,
 		  u32 request, u32 reply_mask, u32 reply,
 		  int timeout_base_ms)
 {
-	return xe_pcode_request(__fake_uncore_to_gt(uncore), mbox, request, reply_mask, reply,
+	return xe_pcode_request(__compat_uncore_to_gt(uncore), mbox, request, reply_mask, reply,
 				timeout_base_ms);
 }
 
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
index fae6213d26f1..652654b5481d 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
@@ -10,31 +10,31 @@
 #include "xe_device_types.h"
 #include "xe_mmio.h"
 
-static inline struct xe_gt *__fake_uncore_to_gt(struct fake_uncore *uncore)
+static inline struct xe_gt *__compat_uncore_to_gt(struct intel_uncore *uncore)
 {
 	struct xe_device *xe = container_of(uncore, struct xe_device, uncore);
 
 	return xe_root_mmio_gt(xe);
 }
 
-static inline u32 intel_uncore_read(struct fake_uncore *uncore,
+static inline u32 intel_uncore_read(struct intel_uncore *uncore,
 				    i915_reg_t i915_reg)
 {
 	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
 
-	return xe_mmio_read32(__fake_uncore_to_gt(uncore), reg);
+	return xe_mmio_read32(__compat_uncore_to_gt(uncore), reg);
 }
 
-static inline u32 intel_uncore_read8(struct fake_uncore *uncore,
+static inline u32 intel_uncore_read8(struct intel_uncore *uncore,
 				     i915_reg_t i915_reg)
 {
 	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
 
-	return xe_mmio_read8(__fake_uncore_to_gt(uncore), reg);
+	return xe_mmio_read8(__compat_uncore_to_gt(uncore), reg);
 }
 
 static inline u64
-intel_uncore_read64_2x32(struct fake_uncore *uncore,
+intel_uncore_read64_2x32(struct intel_uncore *uncore,
 			 i915_reg_t i915_lower_reg, i915_reg_t i915_upper_reg)
 {
 	struct xe_reg lower_reg = XE_REG(i915_mmio_reg_offset(i915_lower_reg));
@@ -42,102 +42,102 @@ intel_uncore_read64_2x32(struct fake_uncore *uncore,
 	u32 upper, lower, old_upper;
 	int loop = 0;
 
-	upper = xe_mmio_read32(__fake_uncore_to_gt(uncore), upper_reg);
+	upper = xe_mmio_read32(__compat_uncore_to_gt(uncore), upper_reg);
 	do {
 		old_upper = upper;
-		lower = xe_mmio_read32(__fake_uncore_to_gt(uncore), lower_reg);
-		upper = xe_mmio_read32(__fake_uncore_to_gt(uncore), upper_reg);
+		lower = xe_mmio_read32(__compat_uncore_to_gt(uncore), lower_reg);
+		upper = xe_mmio_read32(__compat_uncore_to_gt(uncore), upper_reg);
 	} while (upper != old_upper && loop++ < 2);
 
 	return (u64)upper << 32 | lower;
 }
 
-static inline void intel_uncore_posting_read(struct fake_uncore *uncore,
+static inline void intel_uncore_posting_read(struct intel_uncore *uncore,
 					     i915_reg_t i915_reg)
 {
 	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
 
-	xe_mmio_read32(__fake_uncore_to_gt(uncore), reg);
+	xe_mmio_read32(__compat_uncore_to_gt(uncore), reg);
 }
 
-static inline void intel_uncore_write(struct fake_uncore *uncore,
+static inline void intel_uncore_write(struct intel_uncore *uncore,
 				      i915_reg_t i915_reg, u32 val)
 {
 	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
 
-	xe_mmio_write32(__fake_uncore_to_gt(uncore), reg, val);
+	xe_mmio_write32(__compat_uncore_to_gt(uncore), reg, val);
 }
 
-static inline u32 intel_uncore_rmw(struct fake_uncore *uncore,
+static inline u32 intel_uncore_rmw(struct intel_uncore *uncore,
 				   i915_reg_t i915_reg, u32 clear, u32 set)
 {
 	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
 
-	return xe_mmio_rmw32(__fake_uncore_to_gt(uncore), reg, clear, set);
+	return xe_mmio_rmw32(__compat_uncore_to_gt(uncore), reg, clear, set);
 }
 
-static inline int intel_wait_for_register(struct fake_uncore *uncore,
+static inline int intel_wait_for_register(struct intel_uncore *uncore,
 					  i915_reg_t i915_reg, u32 mask,
 					  u32 value, unsigned int timeout)
 {
 	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
 
-	return xe_mmio_wait32(__fake_uncore_to_gt(uncore), reg, value, mask,
+	return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, value, mask,
 			      timeout * USEC_PER_MSEC, NULL, false);
 }
 
-static inline int intel_wait_for_register_fw(struct fake_uncore *uncore,
+static inline int intel_wait_for_register_fw(struct intel_uncore *uncore,
 					     i915_reg_t i915_reg, u32 mask,
 					     u32 value, unsigned int timeout)
 {
 	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
 
-	return xe_mmio_wait32(__fake_uncore_to_gt(uncore), reg, value, mask,
+	return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, value, mask,
 			      timeout * USEC_PER_MSEC, NULL, false);
 }
 
 static inline int
-__intel_wait_for_register(struct fake_uncore *uncore, i915_reg_t i915_reg,
+__intel_wait_for_register(struct intel_uncore *uncore, i915_reg_t i915_reg,
 			  u32 mask, u32 value, unsigned int fast_timeout_us,
 			  unsigned int slow_timeout_ms, u32 *out_value)
 {
 	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
 
-	return xe_mmio_wait32(__fake_uncore_to_gt(uncore), reg, value, mask,
+	return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, value, mask,
 			      fast_timeout_us + 1000 * slow_timeout_ms,
 			      out_value, false);
 }
 
-static inline u32 intel_uncore_read_fw(struct fake_uncore *uncore,
+static inline u32 intel_uncore_read_fw(struct intel_uncore *uncore,
 				       i915_reg_t i915_reg)
 {
 	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
 
-	return xe_mmio_read32(__fake_uncore_to_gt(uncore), reg);
+	return xe_mmio_read32(__compat_uncore_to_gt(uncore), reg);
 }
 
-static inline void intel_uncore_write_fw(struct fake_uncore *uncore,
+static inline void intel_uncore_write_fw(struct intel_uncore *uncore,
 					 i915_reg_t i915_reg, u32 val)
 {
 	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
 
-	xe_mmio_write32(__fake_uncore_to_gt(uncore), reg, val);
+	xe_mmio_write32(__compat_uncore_to_gt(uncore), reg, val);
 }
 
-static inline u32 intel_uncore_read_notrace(struct fake_uncore *uncore,
+static inline u32 intel_uncore_read_notrace(struct intel_uncore *uncore,
 					    i915_reg_t i915_reg)
 {
 	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
 
-	return xe_mmio_read32(__fake_uncore_to_gt(uncore), reg);
+	return xe_mmio_read32(__compat_uncore_to_gt(uncore), reg);
 }
 
-static inline void intel_uncore_write_notrace(struct fake_uncore *uncore,
+static inline void intel_uncore_write_notrace(struct intel_uncore *uncore,
 					      i915_reg_t i915_reg, u32 val)
 {
 	struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
 
-	xe_mmio_write32(__fake_uncore_to_gt(uncore), reg, val);
+	xe_mmio_write32(__compat_uncore_to_gt(uncore), reg, val);
 }
 
 #endif /* __INTEL_UNCORE_H__ */
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index 6afe37c8704e..ed7860645e8c 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -397,7 +397,7 @@ struct xe_device {
 	bool display_irqs_enabled;
 	u32 enabled_irq_mask;
 
-	struct fake_uncore {
+	struct intel_uncore {
 		spinlock_t lock;
 	} uncore;
 
-- 
2.39.2



More information about the Intel-xe mailing list