[PATCH 5/9] c10 msgbus fixes

Mika Kahola mika.kahola at intel.com
Wed Apr 5 14:20:20 UTC 2023


Signed-off-by: Mika Kahola <mika.kahola at intel.com>
---
 drivers/gpu/drm/i915/display/intel_cx0_phy.c  | 544 +++++++++++-------
 drivers/gpu/drm/i915/display/intel_cx0_phy.h  |  11 +-
 .../gpu/drm/i915/display/intel_cx0_phy_regs.h |  11 +-
 .../drm/i915/display/intel_display_power.c    |   3 +-
 .../i915/display/intel_display_power_well.c   |   2 +-
 .../drm/i915/display/intel_display_types.h    |   2 +
 6 files changed, 348 insertions(+), 225 deletions(-)

diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
index ced8c8aa6c82..b1b359bdf3ae 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -10,8 +10,31 @@
 #include "intel_display_types.h"
 #include "intel_dp.h"
 #include "intel_panel.h"
+#include "intel_psr.h"
 #include "intel_tc.h"
 
+#define MB_WRITE_COMMITTED      true
+#define MB_WRITE_UNCOMMITTED    false
+
+#define XELPDP_LANE_POWERDOWN_UPDATE(lane)	((lane) & INTEL_CX0_LANE0 ? \
+							  XELPDP_LANE0_POWERDOWN_UPDATE : \
+							  XELPDP_LANE1_POWERDOWN_UPDATE)
+#define XELPDP_LANE_POWERDOWN_NEW_STATE(lane, state)	((lane) & INTEL_CX0_LANE0 ? \
+                                                           XELPDP_LANE0_POWERDOWN_NEW_STATE(state) : \
+                                                           XELPDP_LANE1_POWERDOWN_NEW_STATE(state))
+#define XELPDP_LANE_PCLK_REFCLK_REQUEST(lane)	((lane) & INTEL_CX0_LANE0 ? \
+                                                          XELPDP_LANE0_PCLK_REFCLK_REQUEST : \
+                                                          XELPDP_LANE1_PCLK_REFCLK_REQUEST)
+#define XELPDP_LANE_PCLK_REFCLK_ACK(lane)	((lane) & INTEL_CX0_LANE0 ? \
+                                                          XELPDP_LANE0_PCLK_REFCLK_ACK : \
+                                                          XELPDP_LANE1_PCLK_REFCLK_ACK)
+#define XELPDP_LANE_PCLK_PLL_REQUEST(lane)	 ((lane) & INTEL_CX0_LANE0 ? \
+                                                          XELPDP_LANE0_PCLK_PLL_REQUEST : \
+                                                          XELPDP_LANE1_PCLK_PLL_REQUEST)
+#define XELPDP_LANE_PCLK_PLL_ACK(lane)		 ((lane) & INTEL_CX0_LANE0 ? \
+                                                          XELPDP_LANE0_PCLK_PLL_ACK : \
+                                                          XELPDP_LANE1_PCLK_PLL_ACK)
+
 bool intel_is_c10phy(struct drm_i915_private *dev_priv, enum phy phy)
 {
 	if (IS_METEORLAKE(dev_priv) && (phy < PHY_C))
@@ -20,31 +43,84 @@ bool intel_is_c10phy(struct drm_i915_private *dev_priv, enum phy phy)
 	return false;
 }
 
+static int lane_mask_to_lane(u8 lane_mask)
+{
+	if (lane_mask & INTEL_CX0_LANE0)
+		return 0;
+	else if (lane_mask & INTEL_CX0_LANE1)
+		return 1;
+
+	return -EINVAL;
+}
+
+static void
+assert_dc_off(struct drm_i915_private *i915)
+{
+	bool enabled;
+
+	enabled = intel_display_power_is_enabled(i915, POWER_DOMAIN_DC_OFF);
+	drm_WARN_ON(&i915->drm, !enabled);
+}
+
+/*
+ * Prepare HW for CX0 phy transactions.
+ *
+ * It is required that PSR and DC5/6 are disabled before any CX0 message
+ * bus transaction is executed.
+ */
+static intel_wakeref_t intel_cx0_phy_transaction_begin(struct intel_encoder *encoder)
+{
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+	intel_psr_pause(intel_dp);
+	return intel_display_power_get(i915, POWER_DOMAIN_DC_OFF);
+}
+
+static void intel_cx0_phy_transaction_end(struct intel_encoder *encoder, intel_wakeref_t wakeref)
+{
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+	intel_psr_resume(intel_dp);
+	intel_display_power_put(i915, POWER_DOMAIN_DC_OFF, wakeref);
+}
+
+static void intel_clear_response_ready_flag(struct drm_i915_private *i915,
+					    enum port port, int lane)
+{
+	intel_de_rmw(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane),
+		     XELPDP_PORT_P2M_RESPONSE_READY, 0);
+	intel_de_rmw(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane),
+		     XELPDP_PORT_P2M_ERROR_SET, 0);
+}
+
 static void intel_cx0_bus_reset(struct drm_i915_private *i915, enum port port, int lane)
 {
 	enum phy phy = intel_port_to_phy(i915, port);
 
 	/* Bring the phy to idle. */
-	intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane - 1),
+	intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
 		       XELPDP_PORT_M2P_TRANSACTION_RESET);
 
 	/* Wait for Idle Clear. */
-	if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane - 1),
+	if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
 				    XELPDP_PORT_M2P_TRANSACTION_RESET,
 				    XELPDP_MSGBUS_TIMEOUT_SLOW)) {
-		drm_dbg_kms(&i915->drm, "Failed to bring PHY %c to idle.\n", phy_name(phy));
+		drm_err_once(&i915->drm, "Failed to bring PHY %c to idle.\n", phy_name(phy));
 		return;
 	}
 
-	intel_de_write(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane - 1), ~0);
+	intel_clear_response_ready_flag(i915, port, lane);
 }
 
-static int intel_cx0_wait_for_ack(struct drm_i915_private *i915, enum port port, int lane, u32 *val)
+static int intel_cx0_wait_for_ack(struct drm_i915_private *i915, enum port port,
+				  int command, int lane, u32 *val)
 {
 	enum phy phy = intel_port_to_phy(i915, port);
 
 	if (__intel_de_wait_for_register(i915,
-					 XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane - 1),
+					 XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane),
 					 XELPDP_PORT_P2M_RESPONSE_READY,
 					 XELPDP_PORT_P2M_RESPONSE_READY,
 					 XELPDP_MSGBUS_TIMEOUT_FAST_US,
@@ -53,17 +129,34 @@ static int intel_cx0_wait_for_ack(struct drm_i915_private *i915, enum port port,
 		return -ETIMEDOUT;
 	}
 
+	/* Check for error. */
+	if (*val & XELPDP_PORT_P2M_ERROR_SET) {
+		drm_dbg_kms(&i915->drm, "PHY %c Error occurred during %s command. Status: 0x%x\n",
+			    phy_name(phy), command == XELPDP_PORT_P2M_COMMAND_READ_ACK ? "read" : "write", *val);
+		intel_cx0_bus_reset(i915, port, lane);
+		return -EINVAL;
+	}
+
+	/* Check for Read/Write Ack. */
+	if (REG_FIELD_GET(XELPDP_PORT_P2M_COMMAND_TYPE_MASK, *val) != command) {
+		drm_dbg_kms(&i915->drm, "PHY %c Not a %s response. MSGBUS Status: 0x%x.\n",
+			    phy_name(phy), command == XELPDP_PORT_P2M_COMMAND_READ_ACK ? "read" : "write", *val);
+		intel_cx0_bus_reset(i915, port, lane);
+		return -EINVAL;
+	}
+
 	return 0;
 }
 
-static int __intel_cx0_read(struct drm_i915_private *i915, enum port port,
-			   int lane, u16 addr, u32 *val)
+static int __intel_cx0_read_once(struct drm_i915_private *i915, enum port port,
+				 int lane, u16 addr)
 {
 	enum phy phy = intel_port_to_phy(i915, port);
 	int ack;
+	u32 val;
 
 	/* Wait for pending transactions.*/
-	if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane - 1),
+	if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
 				    XELPDP_PORT_M2P_TRANSACTION_PENDING,
 				    XELPDP_MSGBUS_TIMEOUT_SLOW)) {
 		drm_dbg_kms(&i915->drm, "PHY %c Timeout waiting for previous transaction to complete. Reset the bus and retry.\n", phy_name(phy));
@@ -72,118 +165,107 @@ static int __intel_cx0_read(struct drm_i915_private *i915, enum port port,
 	}
 
 	/* Issue the read command. */
-	intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane - 1),
+	intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
 		       XELPDP_PORT_M2P_TRANSACTION_PENDING |
 		       XELPDP_PORT_M2P_COMMAND_READ |
 		       XELPDP_PORT_M2P_ADDRESS(addr));
 
 	/* Wait for response ready. And read response.*/
-	ack = intel_cx0_wait_for_ack(i915, port, lane, val);
+	ack = intel_cx0_wait_for_ack(i915, port, XELPDP_PORT_P2M_COMMAND_READ_ACK, lane, &val);
 	if (ack < 0) {
 		intel_cx0_bus_reset(i915, port, lane);
 		return ack;
 	}
 
-	/* Check for error. */
-	if (*val & XELPDP_PORT_P2M_ERROR_SET) {
-		drm_dbg_kms(&i915->drm, "PHY %c Error occurred during read command. Status: 0x%x\n", phy_name(phy), *val);
-		intel_cx0_bus_reset(i915, port, lane);
-		return -EINVAL;
-	}
-
-	/* Check for Read Ack. */
-	if (REG_FIELD_GET(XELPDP_PORT_P2M_COMMAND_TYPE_MASK, *val) !=
-			  XELPDP_PORT_P2M_COMMAND_READ_ACK) {
-		drm_dbg_kms(&i915->drm, "PHY %c Not a Read response. MSGBUS Status: 0x%x.\n", phy_name(phy), *val);
-		intel_cx0_bus_reset(i915, port, lane);
-		return -EINVAL;
-	}
-
 	/* Clear Response Ready flag.*/
-	intel_de_write(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane - 1), ~0);
+	intel_clear_response_ready_flag(i915, port, lane);
 
-	return REG_FIELD_GET(XELPDP_PORT_P2M_DATA_MASK, *val);
+	return REG_FIELD_GET(XELPDP_PORT_P2M_DATA_MASK, val);
 }
 
-static u8 intel_cx0_read(struct drm_i915_private *i915, enum port port,
-			 int lane, u16 addr)
+static u8 __intel_cx0_read(struct drm_i915_private *i915, enum port port,
+			   int lane, u16 addr)
 {
 	enum phy phy = intel_port_to_phy(i915, port);
-	int i, status = 0;
-	u32 val;
+	int i, status;
+
+	assert_dc_off(i915);
 
+	/* 3 tries is assumed to be enough to read successfully */
 	for (i = 0; i < 3; i++) {
-		status = __intel_cx0_read(i915, port, lane, addr, &val);
+		status = __intel_cx0_read_once(i915, port, lane, addr);
 
 		if (status >= 0)
-			break;
+			return status;
 	}
 
-	if (i == 3) {
+	if (i == 3)
 		drm_err_once(&i915->drm, "PHY %c Read %04x failed after %d retries.\n", phy_name(phy), addr, i);
-		return 0;
-	}
 
-	return status;
+	return 0;
 }
 
-static int intel_cx0_wait_cwrite_ack(struct drm_i915_private *i915,
-				      enum port port, int lane)
+static u8 intel_cx0_read(struct drm_i915_private *i915, enum port port,
+			 u8 lane_mask, u16 addr)
 {
 	enum phy phy = intel_port_to_phy(i915, port);
-	int ack;
-	u32 val = 0;
-
-	/* Check for write ack. */
-	ack = intel_cx0_wait_for_ack(i915, port, lane, &val);
-	if (ack < 0)
-		return ack;
+	int lane = lane_mask_to_lane(lane_mask);
 
-	if ((REG_FIELD_GET(XELPDP_PORT_P2M_COMMAND_TYPE_MASK, val) !=
-	     XELPDP_PORT_P2M_COMMAND_WRITE_ACK) || val & XELPDP_PORT_P2M_ERROR_SET) {
-		drm_dbg_kms(&i915->drm, "PHY %c Unexpected ACK received. MSGBUS STATUS: 0x%x.\n", phy_name(phy), val);
-		return -EINVAL;
+	if (lane < 0) {
+		drm_err_once(&i915->drm, "Incorrect lane for PHY %c\n", phy_name(phy));
+		return lane;
 	}
 
-	return 0;
+	return __intel_cx0_read(i915, port, lane, addr);
 }
 
 static int __intel_cx0_write_once(struct drm_i915_private *i915, enum port port,
 				  int lane, u16 addr, u8 data, bool committed)
 {
 	enum phy phy = intel_port_to_phy(i915, port);
+	u32 val;
 
-	/* Wait for pending transactions.*/
-	if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane - 1),
+	/* Wait for pending transactions */
+	if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
 				    XELPDP_PORT_M2P_TRANSACTION_PENDING,
 				    XELPDP_MSGBUS_TIMEOUT_SLOW)) {
-		drm_dbg_kms(&i915->drm, "PHY %c Timeout waiting for previous transaction to complete. Reset the bus and retry.\n", phy_name(phy));
+		drm_dbg_kms(&i915->drm, "PHY %c Timeout waiting for previous transaction to complete. Resetting the bus.\n", phy_name(phy));
 		intel_cx0_bus_reset(i915, port, lane);
 		return -ETIMEDOUT;
 	}
 
 	/* Issue the write command. */
-	intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane - 1),
+	intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
 		       XELPDP_PORT_M2P_TRANSACTION_PENDING |
 		       (committed ? XELPDP_PORT_M2P_COMMAND_WRITE_COMMITTED :
-		       XELPDP_PORT_M2P_COMMAND_WRITE_UNCOMMITTED) |
+				    XELPDP_PORT_M2P_COMMAND_WRITE_UNCOMMITTED) |
 		       XELPDP_PORT_M2P_DATA(data) |
 		       XELPDP_PORT_M2P_ADDRESS(addr));
 
+	/* Wait for pending transactions.*/
+	if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
+				    XELPDP_PORT_M2P_TRANSACTION_PENDING,
+				    XELPDP_MSGBUS_TIMEOUT_SLOW)) {
+		drm_dbg_kms(&i915->drm, "PHY %c Timeout waiting for previous transaction to complete. Resetting the bus.\n", phy_name(phy));
+		intel_cx0_bus_reset(i915, port, lane);
+		return -ETIMEDOUT;
+	}
+
 	/* Check for error. */
 	if (committed) {
-		if (intel_cx0_wait_cwrite_ack(i915, port, lane) < 0) {
+		if (intel_cx0_wait_for_ack(i915, port, XELPDP_PORT_P2M_COMMAND_WRITE_ACK, lane, &val) < 0) {
 			intel_cx0_bus_reset(i915, port, lane);
 			return -EINVAL;
 		}
-	} else if ((intel_de_read(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane - 1)) &
+	} else if ((intel_de_read(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane)) &
 			    XELPDP_PORT_P2M_ERROR_SET)) {
 		drm_dbg_kms(&i915->drm, "PHY %c Error occurred during write command.\n", phy_name(phy));
 		intel_cx0_bus_reset(i915, port, lane);
 		return -EINVAL;
 	}
 
-	intel_de_write(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane - 1), ~0);
+	/* Clear Response Ready flag.*/
+	intel_clear_response_ready_flag(i915, port, lane);
 
 	return 0;
 }
@@ -194,28 +276,27 @@ static void __intel_cx0_write(struct drm_i915_private *i915, enum port port,
 	enum phy phy = intel_port_to_phy(i915, port);
 	int i, status;
 
+	assert_dc_off(i915);
+
+	/* 3 tries is assumed to be enough to write successfully */
 	for (i = 0; i < 3; i++) {
 		status = __intel_cx0_write_once(i915, port, lane, addr, data, committed);
 
 		if (status == 0)
-			break;
+			return;
 	}
 
-	if (i == 3) {
+	if (i == 3)
 		drm_err_once(&i915->drm, "PHY %c Write %04x failed after %d retries.\n", phy_name(phy), addr, i);
-		return;
-	}
 }
 
 static void intel_cx0_write(struct drm_i915_private *i915, enum port port,
-			    int lane, u16 addr, u8 data, bool committed)
+			    u8 lane_mask, u16 addr, u8 data, bool committed)
 {
-	if (lane == INTEL_CX0_BOTH_LANES) {
-		__intel_cx0_write(i915, port, INTEL_CX0_LANE0, addr, data, committed);
-		__intel_cx0_write(i915, port, INTEL_CX0_LANE1, addr, data, committed);
-	} else {
+	int lane;
+
+	for_each_cx0_lane_in_mask(lane_mask, lane)
 		__intel_cx0_write(i915, port, lane, addr, data, committed);
-	}
 }
 
 static void __intel_cx0_rmw(struct drm_i915_private *i915, enum port port,
@@ -223,22 +304,20 @@ static void __intel_cx0_rmw(struct drm_i915_private *i915, enum port port,
 {
 	u8 old, val;
 
-	old = intel_cx0_read(i915, port, lane, addr);
+	old = __intel_cx0_read(i915, port, lane, addr);
 	val = (old & ~clear) | set;
 
 	if (val != old)
-		intel_cx0_write(i915, port, lane, addr, val, committed);
+		__intel_cx0_write(i915, port, lane, addr, val, committed);
 }
 
 static void intel_cx0_rmw(struct drm_i915_private *i915, enum port port,
-			  int lane, u16 addr, u8 clear, u8 set, bool committed)
+			  u8 lane_mask, u16 addr, u8 clear, u8 set, bool committed)
 {
-	if (lane == INTEL_CX0_BOTH_LANES) {
-		__intel_cx0_rmw(i915, port, INTEL_CX0_LANE0, addr, clear, set, committed);
-		__intel_cx0_rmw(i915, port, INTEL_CX0_LANE1, addr, clear, set, committed);
-	} else {
+	u8 lane;
+
+	for_each_cx0_lane_in_mask(lane_mask, lane)
 		__intel_cx0_rmw(i915, port, lane, addr, clear, set, committed);
-	}
 }
 
 /*
@@ -250,6 +329,8 @@ static void intel_cx0_rmw(struct drm_i915_private *i915, enum port port,
 
 static const struct intel_c10mpllb_state mtl_c10_dp_rbr = {
 	.clock = 162000,
+	.tx = 0x10,
+	.cmn = 0x21,
 	.pll[0] = 0xB4,
 	.pll[1] = 0,
 	.pll[2] = 0x30,
@@ -274,6 +355,8 @@ static const struct intel_c10mpllb_state mtl_c10_dp_rbr = {
 
 static const struct intel_c10mpllb_state mtl_c10_edp_r216 = {
 	.clock = 216000,
+	.tx = 0x10,
+	.cmn = 0x21,
 	.pll[0] = 0x4,
 	.pll[1] = 0,
 	.pll[2] = 0xA2,
@@ -298,6 +381,8 @@ static const struct intel_c10mpllb_state mtl_c10_edp_r216 = {
 
 static const struct intel_c10mpllb_state mtl_c10_edp_r243 = {
 	.clock = 243000,
+	.tx = 0x10,
+	.cmn = 0x21,
 	.pll[0] = 0x34,
 	.pll[1] = 0,
 	.pll[2] = 0xDA,
@@ -322,6 +407,8 @@ static const struct intel_c10mpllb_state mtl_c10_edp_r243 = {
 
 static const struct intel_c10mpllb_state mtl_c10_dp_hbr1 = {
 	.clock = 270000,
+	.tx = 0x10,
+	.cmn = 0x21,
 	.pll[0] = 0xF4,
 	.pll[1] = 0,
 	.pll[2] = 0xF8,
@@ -346,6 +433,8 @@ static const struct intel_c10mpllb_state mtl_c10_dp_hbr1 = {
 
 static const struct intel_c10mpllb_state mtl_c10_edp_r324 = {
 	.clock = 324000,
+	.tx = 0x10,
+	.cmn = 0x21,
 	.pll[0] = 0xB4,
 	.pll[1] = 0,
 	.pll[2] = 0x30,
@@ -370,6 +459,8 @@ static const struct intel_c10mpllb_state mtl_c10_edp_r324 = {
 
 static const struct intel_c10mpllb_state mtl_c10_edp_r432 = {
 	.clock = 432000,
+	.tx = 0x10,
+	.cmn = 0x21,
 	.pll[0] = 0x4,
 	.pll[1] = 0,
 	.pll[2] = 0xA2,
@@ -394,6 +485,8 @@ static const struct intel_c10mpllb_state mtl_c10_edp_r432 = {
 
 static const struct intel_c10mpllb_state mtl_c10_dp_hbr2 = {
 	.clock = 540000,
+	.tx = 0x10,
+	.cmn = 0x21,
 	.pll[0] = 0xF4,
 	.pll[1] = 0,
 	.pll[2] = 0xF8,
@@ -418,6 +511,8 @@ static const struct intel_c10mpllb_state mtl_c10_dp_hbr2 = {
 
 static const struct intel_c10mpllb_state mtl_c10_edp_r675 = {
 	.clock = 675000,
+	.tx = 0x10,
+	.cmn = 0x21,
 	.pll[0] = 0xB4,
 	.pll[1] = 0,
 	.pll[2] = 0x3E,
@@ -442,6 +537,8 @@ static const struct intel_c10mpllb_state mtl_c10_edp_r675 = {
 
 static const struct intel_c10mpllb_state mtl_c10_dp_hbr3 = {
 	.clock = 810000,
+	.tx = 0x10,
+	.cmn = 0x21,
 	.pll[0] = 0x34,
 	.pll[1] = 0,
 	.pll[2] = 0x84,
@@ -501,10 +598,36 @@ intel_c10_mpllb_tables_get(struct intel_crtc_state *crtc_state,
 	return NULL;
 }
 
+static void intel_c10mpllb_update_pll(struct intel_crtc_state *crtc_state,
+				      struct intel_encoder *encoder,
+				      struct intel_c10mpllb_state *pll_state)
+{
+	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+        bool use_ssc = false;
+	int i;
+
+        if (intel_crtc_has_dp_encoder(crtc_state)) {
+                intel_dp = enc_to_intel_dp(encoder);
+                use_ssc = (intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
+                          DP_MAX_DOWNSPREAD_0_5);
+
+                if (!intel_panel_use_ssc(i915))
+                        use_ssc = false;
+	}
+
+	/* If not using ssc pll[4] through pll[8] must be 0*/
+	for (i = 0; i < ARRAY_SIZE(pll_state->pll); i++) {
+		if (!use_ssc && (i > 3 && i < 9))
+			pll_state->pll[i] = 0;
+	}
+}
+
 static int intel_c10mpllb_calc_state(struct intel_crtc_state *crtc_state,
 				     struct intel_encoder *encoder)
 {
 	const struct intel_c10mpllb_state * const *tables;
+	struct intel_c10mpllb_state *pll_state;
 	int i;
 
 	tables = intel_c10_mpllb_tables_get(crtc_state, encoder);
@@ -512,8 +635,11 @@ static int intel_c10mpllb_calc_state(struct intel_crtc_state *crtc_state,
 		return -EINVAL;
 
 	for (i = 0; tables[i]; i++) {
-		if (crtc_state->port_clock <= tables[i]->clock) {
+		if (crtc_state->port_clock == tables[i]->clock) {
 			crtc_state->c10mpllb_state = *tables[i];
+			pll_state = &crtc_state->c10mpllb_state;
+			intel_c10mpllb_update_pll(crtc_state, encoder,
+						  pll_state);
 			return 0;
 		}
 	}
@@ -541,8 +667,10 @@ void intel_c10mpllb_readout_hw_state(struct intel_encoder *encoder,
 	u8 lane = lane_reversal ? INTEL_CX0_LANE1 :
 				  INTEL_CX0_LANE0;
 	enum phy phy = intel_port_to_phy(i915, encoder->port);
+	intel_wakeref_t wakeref;
 	int i;
-	u8 cmn, tx0;
+
+	wakeref = intel_cx0_phy_transaction_begin(encoder);
 
 	/*
 	 * According to C10 VDR Register programming Sequence we need
@@ -555,12 +683,18 @@ void intel_c10mpllb_readout_hw_state(struct intel_encoder *encoder,
 		pll_state->pll[i] = intel_cx0_read(i915, encoder->port, lane,
 						   PHY_C10_VDR_PLL(i));
 
-	cmn = intel_cx0_read(i915, encoder->port, lane, PHY_C10_VDR_CMN(0));
-	tx0 = intel_cx0_read(i915, encoder->port, lane, PHY_C10_VDR_TX(0));
+	pll_state->cmn = intel_cx0_read(i915, encoder->port, lane, PHY_C10_VDR_CMN(0));
+	pll_state->tx = intel_cx0_read(i915, encoder->port, lane, PHY_C10_VDR_TX(0));
+
+	if (pll_state->tx != C10_TX0_TX_MPLLB_SEL ||
+	    pll_state->cmn != (C10_CMN0_REF_RANGE | C10_CMN0_REF_CLK_MPLLB_DIV))
+		drm_dbg_kms(&i915->drm, "Unexpected tx: %x or cmn: %x for phy: %c.\n",
+			    pll_state->tx, pll_state->cmn, phy_name(phy));
 
-	if (tx0 != C10_TX0_VAL || cmn != C10_CMN0_DP_VAL)
-		drm_warn(&i915->drm, "Unexpected tx: %x or cmn: %x for phy: %c.\n",
-			 tx0, cmn, phy_name(phy));
+	intel_cx0_rmw(i915, encoder->port, lane, PHY_C10_VDR_CONTROL(1),
+		      C10_VDR_CTRL_MSGBUS_ACCESS, 0, MB_WRITE_COMMITTED);
+
+	intel_cx0_phy_transaction_end(encoder, wakeref);
 }
 
 static void intel_c10_pll_program(struct drm_i915_private *i915,
@@ -570,46 +704,29 @@ static void intel_c10_pll_program(struct drm_i915_private *i915,
 	const struct intel_c10mpllb_state *pll_state = &crtc_state->c10mpllb_state;
 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
 	bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
-	u8 master_lane = lane_reversal ? INTEL_CX0_LANE1 :
-					 INTEL_CX0_LANE0;
 	u8 follower_lane = lane_reversal ? INTEL_CX0_LANE0 :
 					   INTEL_CX0_LANE1;
 
 	int i;
-	struct intel_dp *intel_dp;
-	bool use_ssc = false;
-	u8 cmn0 = 0;
-
-	if (intel_crtc_has_dp_encoder(crtc_state)) {
-		intel_dp = enc_to_intel_dp(encoder);
-		use_ssc = (intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
-			  DP_MAX_DOWNSPREAD_0_5);
-
-		if (!intel_panel_use_ssc(i915))
-			use_ssc = false;
-
-		cmn0 = C10_CMN0_DP_VAL;
-	}
 
 	intel_cx0_write(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
 			C10_VDR_CTRL_MSGBUS_ACCESS, MB_WRITE_COMMITTED);
 	/* Custom width needs to be programmed to 0 for both the phy lanes */
 	intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES,
-		      PHY_C10_VDR_CUSTOM_WIDTH, 0x3, 0, MB_WRITE_COMMITTED);
+		      PHY_C10_VDR_CUSTOM_WIDTH, C10_VDR_CUSTOM_WIDTH, 0, MB_WRITE_COMMITTED);
 	intel_cx0_rmw(i915, encoder->port, follower_lane, PHY_C10_VDR_CONTROL(1),
 		      C10_VDR_CTRL_MASTER_LANE, C10_VDR_CTRL_UPDATE_CFG,
 		      MB_WRITE_COMMITTED);
 
 	/* Program the pll values only for the master lane */
 	for (i = 0; i < ARRAY_SIZE(pll_state->pll); i++)
-		/* If not using ssc pll[4] through pll[8] must be 0*/
-		intel_cx0_write(i915, encoder->port, master_lane, PHY_C10_VDR_PLL(i),
-				(!use_ssc && (i > 3 && i < 9)) ? 0 : pll_state->pll[i],
+		intel_cx0_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C10_VDR_PLL(i),
+				pll_state->pll[i],
 				(i % 4) ? MB_WRITE_UNCOMMITTED : MB_WRITE_COMMITTED);
 
-	intel_cx0_write(i915, encoder->port, master_lane, PHY_C10_VDR_CMN(0), cmn0, MB_WRITE_COMMITTED);
-	intel_cx0_write(i915, encoder->port, master_lane, PHY_C10_VDR_TX(0), C10_TX0_VAL, MB_WRITE_COMMITTED);
-	intel_cx0_rmw(i915, encoder->port, master_lane, PHY_C10_VDR_CONTROL(1),
+	intel_cx0_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C10_VDR_CMN(0), pll_state->cmn, MB_WRITE_COMMITTED);
+	intel_cx0_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C10_VDR_TX(0), pll_state->tx, MB_WRITE_COMMITTED);
+	intel_cx0_rmw(i915, encoder->port, INTEL_CX0_LANE0, PHY_C10_VDR_CONTROL(1),
 		      C10_VDR_CTRL_MSGBUS_ACCESS, C10_VDR_CTRL_MASTER_LANE |
 		      C10_VDR_CTRL_UPDATE_CFG, MB_WRITE_COMMITTED);
 }
@@ -641,6 +758,7 @@ void intel_c10mpllb_dump_hw_state(struct drm_i915_private *dev_priv,
 		    "multiplier: %u, tx_clk_div: %u.\n", multiplier, tx_clk_div);
 
 	drm_dbg_kms(&dev_priv->drm, "c10pll_rawhw_state:");
+	drm_dbg_kms(&dev_priv->drm, "tx: 0x%x, cmn: 0x%x\n", hw_state->tx, hw_state->cmn);
 
 	for (i = 0; i < ARRAY_SIZE(hw_state->pll); i = i + 4)
 		drm_dbg_kms(&dev_priv->drm, "pll[%d] = 0x%x, pll[%d] = 0x%x, pll[%d] = 0x%x, pll[%d] = 0x%x\n",
@@ -688,6 +806,7 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
 	val |= XELPDP_FORWARD_CLOCK_UNGATE;
 	val |= XELPDP_DDI_CLOCK_SELECT(XELPDP_DDI_CLOCK_SELECT_MAXPCLK);
 
+	/* TODO: HDMI FRL */
 	if (intel_crtc_has_dp_encoder(crtc_state)) {
 		intel_dp = enc_to_intel_dp(encoder);
 		ssc_enabled = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
@@ -706,44 +825,53 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
 		     XELPDP_SSC_ENABLE_PLLB, val);
 }
 
-static u32 intel_cx0_get_powerdown_update(u8 lane)
+static u32 intel_cx0_get_powerdown_update(u8 lane_mask)
 {
-	if (lane == INTEL_CX0_LANE0)
-		return XELPDP_LANE0_POWERDOWN_UPDATE;
-	else if (lane == INTEL_CX0_LANE1)
-		return XELPDP_LANE1_POWERDOWN_UPDATE;
-	else
-		return XELPDP_LANE0_POWERDOWN_UPDATE |
-		       XELPDP_LANE1_POWERDOWN_UPDATE;
+	int val = 0;
+	int lane = 0;
+
+	for_each_cx0_lane_in_mask(lane, lane_mask)
+		val |= XELPDP_LANE_POWERDOWN_UPDATE(lane);
+
+	return val;
 }
 
-static u32 intel_cx0_get_powerdown_state(u8 lane, u8 state)
+static u32 intel_cx0_get_powerdown_state(u8 lane_mask, u8 state)
 {
-	if (lane == INTEL_CX0_LANE0)
-		return XELPDP_LANE0_POWERDOWN_NEW_STATE(state);
-	else if (lane == INTEL_CX0_LANE1)
-		return XELPDP_LANE1_POWERDOWN_NEW_STATE(state);
-	else
-		return XELPDP_LANE0_POWERDOWN_NEW_STATE(state) |
-		       XELPDP_LANE1_POWERDOWN_NEW_STATE(state);
+	int val = 0;
+	int lane = 0;
+
+	for_each_cx0_lane_in_mask(lane, lane_mask)
+		val |= XELPDP_LANE_POWERDOWN_NEW_STATE(lane, state);
+
+	return val;
 }
 
 static void intel_cx0_powerdown_change_sequence(struct drm_i915_private *i915,
 						enum port port,
-						u8 lane, u8 state)
+						u8 lane_mask, u8 state)
 {
 	enum phy phy = intel_port_to_phy(i915, port);
 
 	intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port),
 		     XELPDP_LANE0_POWERDOWN_NEW_STATE_MASK | XELPDP_LANE1_POWERDOWN_NEW_STATE_MASK,
-		     intel_cx0_get_powerdown_state(lane, state));
+		     intel_cx0_get_powerdown_state(lane_mask, state));
+
+        /* Wait for pending transactions.*/
+        if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane_mask_to_lane(lane_mask)),
+                                    XELPDP_PORT_M2P_TRANSACTION_PENDING,
+                                    XELPDP_MSGBUS_TIMEOUT_SLOW)) {
+                drm_dbg_kms(&i915->drm, "PHY %c Timeout waiting for previous transaction to complete. Reset the bus.\n", phy_name(phy));
+                intel_cx0_bus_reset(i915, port, lane_mask_to_lane(lane_mask));
+        }
+
 	intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port),
 		     XELPDP_LANE0_POWERDOWN_UPDATE | XELPDP_LANE1_POWERDOWN_UPDATE,
-		     intel_cx0_get_powerdown_update(lane));
+		     intel_cx0_get_powerdown_update(lane_mask));
 
 	/* Update Timeout Value */
 	if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL2(port),
-					 intel_cx0_get_powerdown_update(lane), 0,
+					 intel_cx0_get_powerdown_update(lane_mask), 0,
 					 XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_US, 0, NULL))
 		drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dus.\n",
 			 phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US);
@@ -761,26 +889,26 @@ static void intel_cx0_setup_powerdown(struct drm_i915_private *i915, enum port p
 		     XELPDP_PLL_LANE_STAGGERING_DELAY(0));
 }
 
-static u32 intel_cx0_get_pclk_refclk_request(u8 lane)
+static u32 intel_cx0_get_pclk_refclk_request(u8 lane_mask)
 {
-	if (lane == INTEL_CX0_LANE0)
-		return XELPDP_LANE0_PCLK_REFCLK_REQUEST;
-	else if (lane == INTEL_CX0_LANE1)
-		return XELPDP_LANE1_PCLK_REFCLK_REQUEST;
-	else
-		return XELPDP_LANE0_PCLK_REFCLK_REQUEST |
-		       XELPDP_LANE1_PCLK_REFCLK_REQUEST;
+	int val = 0;
+	int lane = 0;
+
+	for_each_cx0_lane_in_mask(lane, lane_mask)
+		val |= XELPDP_LANE_PCLK_REFCLK_REQUEST(lane);
+
+	return val;
 }
 
-static u32 intel_cx0_get_pclk_refclk_ack(u8 lane)
+static u32 intel_cx0_get_pclk_refclk_ack(u8 lane_mask)
 {
-	if (lane == INTEL_CX0_LANE0)
-		return XELPDP_LANE0_PCLK_REFCLK_ACK;
-	else if (lane == INTEL_CX0_LANE1)
-		return XELPDP_LANE1_PCLK_REFCLK_ACK;
-	else
-		return XELPDP_LANE0_PCLK_REFCLK_ACK |
-		       XELPDP_LANE1_PCLK_REFCLK_ACK;
+	int val = 0;
+	int lane = 0;
+
+	for_each_cx0_lane_in_mask(lane, lane_mask)
+		val |= XELPDP_LANE_PCLK_REFCLK_ACK(lane);
+
+	return val;
 }
 
 /* FIXME: Some Type-C cases need not reset both the lanes. Handle those cases. */
@@ -788,7 +916,7 @@ static void intel_cx0_phy_lane_reset(struct drm_i915_private *i915, enum port po
 				     bool lane_reversal)
 {
 	enum phy phy = intel_port_to_phy(i915, port);
-	u8 lane = lane_reversal ? INTEL_CX0_LANE1 :
+	u8 lane_mask = lane_reversal ? INTEL_CX0_LANE1 :
 				  INTEL_CX0_LANE0;
 
 	if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL1(port),
@@ -811,11 +939,11 @@ static void intel_cx0_phy_lane_reset(struct drm_i915_private *i915, enum port po
 
 	intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(port),
 		     intel_cx0_get_pclk_refclk_request(INTEL_CX0_BOTH_LANES),
-		     intel_cx0_get_pclk_refclk_request(lane));
+		     intel_cx0_get_pclk_refclk_request(lane_mask));
 
 	if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(port),
 					 intel_cx0_get_pclk_refclk_ack(INTEL_CX0_BOTH_LANES),
-					 intel_cx0_get_pclk_refclk_ack(lane),
+					 intel_cx0_get_pclk_refclk_ack(lane_mask),
 					 XELPDP_REFCLK_ENABLE_TIMEOUT_US, 0, NULL))
 		drm_warn(&i915->drm, "PHY %c failed to request refclk after %dus.\n",
 			 phy_name(phy), XELPDP_REFCLK_ENABLE_TIMEOUT_US);
@@ -847,6 +975,7 @@ static void intel_c10_program_phy_lane(struct drm_i915_private *i915,
 		      C10_VDR_CTRL_MSGBUS_ACCESS, C10_VDR_CTRL_MSGBUS_ACCESS,
 		      MB_WRITE_COMMITTED);
 
+	/* TODO: DP-alt MFD case where only one PHY lane should be programmed. */
 	l0t1 = intel_cx0_read(i915, port, INTEL_CX0_LANE0, PHY_CX0_TX_CONTROL(1, 2));
 	l0t2 = intel_cx0_read(i915, port, INTEL_CX0_LANE0, PHY_CX0_TX_CONTROL(2, 2));
 	l1t1 = intel_cx0_read(i915, port, INTEL_CX0_LANE1, PHY_CX0_TX_CONTROL(1, 2));
@@ -855,84 +984,70 @@ static void intel_c10_program_phy_lane(struct drm_i915_private *i915,
 	if (lane_reversal) {
 		switch (lane_count) {
 		case 1:
-			/* Disable MLs 1(lane0), 2(lane0), 3(lane1) */
-			intel_cx0_write(i915, port, INTEL_CX0_LANE1, PHY_CX0_TX_CONTROL(1, 2),
-					l1t1 | CONTROL2_DISABLE_SINGLE_TX,
-					MB_WRITE_COMMITTED);
-			fallthrough;
+			l1t1 &= ~CONTROL2_DISABLE_SINGLE_TX;
+			break;
 		case 2:
-			/* Disable MLs 1(lane0), 2(lane0) */
-			intel_cx0_write(i915, port, INTEL_CX0_LANE0, PHY_CX0_TX_CONTROL(2, 2),
-					l0t2 | CONTROL2_DISABLE_SINGLE_TX,
-					MB_WRITE_COMMITTED);
-			fallthrough;
+			l0t2 &= ~CONTROL2_DISABLE_SINGLE_TX;
+			break;
 		case 3:
-			/* Disable MLs 1(lane0) */
-			intel_cx0_write(i915, port, INTEL_CX0_LANE0, PHY_CX0_TX_CONTROL(1, 2),
-					l0t1 | CONTROL2_DISABLE_SINGLE_TX,
-					MB_WRITE_COMMITTED);
+			l0t1 &= ~CONTROL2_DISABLE_SINGLE_TX;
 			break;
 		}
 	} else {
 		switch (lane_count) {
 		case 1:
-			if (dp_alt_mode) {
-				/* Disable MLs 1(lane0), 3(lane1), 4(lane1) */
-				intel_cx0_write(i915, port, INTEL_CX0_LANE0, PHY_CX0_TX_CONTROL(1, 2),
-						l0t1 | CONTROL2_DISABLE_SINGLE_TX,
-						MB_WRITE_COMMITTED);
-			} else {
-				/* Disable MLs 2(lane0), 3(lane1), 4(lane1) */
-				intel_cx0_write(i915, port, INTEL_CX0_LANE0, PHY_CX0_TX_CONTROL(2, 2),
-						l0t2 | CONTROL2_DISABLE_SINGLE_TX,
-						MB_WRITE_COMMITTED);
-			}
-			fallthrough;
+			if (dp_alt_mode)
+				l0t1 &= ~CONTROL2_DISABLE_SINGLE_TX;
+			else
+				l0t2 &= ~CONTROL2_DISABLE_SINGLE_TX;
+			break;
 		case 2:
-			/* Disable MLs 3(lane1), 4(lane1) */
-			intel_cx0_write(i915, port, INTEL_CX0_LANE1, PHY_CX0_TX_CONTROL(1, 2),
-					l1t1 | CONTROL2_DISABLE_SINGLE_TX,
-					MB_WRITE_COMMITTED);
-			fallthrough;
+			l1t1 &= ~CONTROL2_DISABLE_SINGLE_TX;
+			break;
 		case 3:
-			/* Disable MLs 4(lane1) */
-			intel_cx0_write(i915, port, INTEL_CX0_LANE1, PHY_CX0_TX_CONTROL(2, 2),
-					l1t2 | CONTROL2_DISABLE_SINGLE_TX,
-					MB_WRITE_COMMITTED);
+			l1t2 &= ~CONTROL2_DISABLE_SINGLE_TX;
 			break;
 		}
 	}
 
-	if (intel_is_c10phy(i915, intel_port_to_phy(i915, port))) {
-		intel_cx0_rmw(i915, port, INTEL_CX0_LANE1, PHY_C10_VDR_CONTROL(1),
-			      C10_VDR_CTRL_UPDATE_CFG | C10_VDR_CTRL_MSGBUS_ACCESS,
-			      C10_VDR_CTRL_UPDATE_CFG, MB_WRITE_COMMITTED);
-		intel_cx0_rmw(i915, port, INTEL_CX0_LANE0, PHY_C10_VDR_CONTROL(1),
-			      C10_VDR_CTRL_UPDATE_CFG | C10_VDR_CTRL_MSGBUS_ACCESS,
-			      C10_VDR_CTRL_MASTER_LANE | C10_VDR_CTRL_UPDATE_CFG, MB_WRITE_COMMITTED);
-	}
+	/* disable MLs */
+	intel_cx0_write(i915, port, INTEL_CX0_LANE0, PHY_CX0_TX_CONTROL(1, 2),
+			l0t1, MB_WRITE_COMMITTED);
+	intel_cx0_write(i915, port, INTEL_CX0_LANE0, PHY_CX0_TX_CONTROL(2, 2),
+			l0t2, MB_WRITE_COMMITTED);
+	intel_cx0_write(i915, port, INTEL_CX0_LANE1, PHY_CX0_TX_CONTROL(1, 2),
+                        l1t1, MB_WRITE_COMMITTED);
+	intel_cx0_write(i915, port, INTEL_CX0_LANE1, PHY_CX0_TX_CONTROL(2, 2),
+                        l1t2, MB_WRITE_COMMITTED);
+
+	intel_cx0_rmw(i915, port, INTEL_CX0_LANE1, PHY_C10_VDR_CONTROL(1),
+		      C10_VDR_CTRL_UPDATE_CFG | C10_VDR_CTRL_MSGBUS_ACCESS,
+		      C10_VDR_CTRL_UPDATE_CFG, MB_WRITE_COMMITTED);
+	intel_cx0_rmw(i915, port, INTEL_CX0_LANE0, PHY_C10_VDR_CONTROL(1),
+		      C10_VDR_CTRL_UPDATE_CFG | C10_VDR_CTRL_MSGBUS_ACCESS,
+		      C10_VDR_CTRL_MASTER_LANE | C10_VDR_CTRL_UPDATE_CFG, MB_WRITE_COMMITTED);
 }
 
-static u32 intel_cx0_get_pclk_pll_request(u8 lane)
+static u32 intel_cx0_get_pclk_pll_request(u8 lane_mask)
 {
-	if (lane == INTEL_CX0_LANE0)
-		return XELPDP_LANE0_PCLK_PLL_REQUEST;
-	else if (lane == INTEL_CX0_LANE1)
-		return XELPDP_LANE1_PCLK_PLL_REQUEST;
-	else
-		return XELPDP_LANE0_PCLK_PLL_REQUEST |
-		       XELPDP_LANE1_PCLK_PLL_REQUEST;
+	int val = 0;
+	int lane = 0;
+
+	for_each_cx0_lane_in_mask(lane, lane_mask)
+		val |= XELPDP_LANE_PCLK_PLL_REQUEST(lane);
+
+	return val;
 }
 
-static u32 intel_cx0_get_pclk_pll_ack(u8 lane)
+static u32 intel_cx0_get_pclk_pll_ack(u8 lane_mask)
 {
-	if (lane == INTEL_CX0_LANE0)
-		return XELPDP_LANE0_PCLK_PLL_ACK;
-	else if (lane == INTEL_CX0_LANE1)
-		return XELPDP_LANE1_PCLK_PLL_ACK;
-	else
-		return XELPDP_LANE0_PCLK_PLL_ACK |
-		       XELPDP_LANE1_PCLK_PLL_ACK;
+	int val = 0;
+	int lane = 0;
+
+	for_each_cx0_lane_in_mask(lane, lane_mask)
+		val |= XELPDP_LANE_PCLK_PLL_ACK(lane);
+
+	return val;
 }
 
 static void intel_c10pll_enable(struct intel_encoder *encoder,
@@ -1008,9 +1123,15 @@ void intel_cx0pll_enable(struct intel_encoder *encoder,
 {
 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
 	enum phy phy = intel_port_to_phy(i915, encoder->port);
+	intel_wakeref_t wakeref;
+
+	wakeref = intel_cx0_phy_transaction_begin(encoder);
 
 	drm_WARN_ON(&i915->drm, !intel_is_c10phy(i915, phy));
 	intel_c10pll_enable(encoder, crtc_state);
+
+	/* TODO: enable TBT-ALT mode */
+	intel_cx0_phy_transaction_end(encoder, wakeref);
 }
 
 static void intel_c10pll_disable(struct intel_encoder *encoder)
@@ -1063,9 +1184,13 @@ void intel_cx0pll_disable(struct intel_encoder *encoder)
 {
 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
 	enum phy phy = intel_port_to_phy(i915, encoder->port);
+	intel_wakeref_t wakeref;
+
+	wakeref = intel_cx0_phy_transaction_begin(encoder);
 
 	drm_WARN_ON(&i915->drm, !intel_is_c10phy(i915, phy));
 	intel_c10pll_disable(encoder);
+	intel_cx0_phy_transaction_end(encoder, wakeref);
 }
 
 void intel_c10mpllb_state_verify(struct intel_atomic_state *state,
@@ -1105,12 +1230,7 @@ void intel_c10mpllb_state_verify(struct intel_atomic_state *state,
 	intel_c10mpllb_readout_hw_state(encoder, &mpllb_hw_state);
 
 	for (i = 0; i < ARRAY_SIZE(mpllb_sw_state->pll); i++) {
-		u8 expected;
-
-		if (!use_ssc && i > 3 && i < 9)
-			expected = 0;
-		else
-			expected = mpllb_sw_state->pll[i];
+		u8 expected = mpllb_sw_state->pll[i];
 
 		I915_STATE_WARN(mpllb_hw_state.pll[i] != expected,
 				"[CRTC:%d:%s] mismatch in C10MPLLB: Register[%d] (expected 0x%02x, found 0x%02x)",
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.h b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
index 8cf340509097..30b1b11b2176 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
@@ -18,12 +18,13 @@ struct intel_encoder;
 struct intel_crtc_state;
 enum phy;
 
-#define INTEL_CX0_LANE0		0x1
-#define INTEL_CX0_LANE1		0x2
-#define INTEL_CX0_BOTH_LANES	0x3
+#define for_each_cx0_lane_in_mask(__lane_mask, __lane) \
+        for ((__lane) = 0; (__lane) < 2; (__lane)++) \
+                for_each_if((__lane_mask) & BIT(__lane))
 
-#define MB_WRITE_COMMITTED		1
-#define MB_WRITE_UNCOMMITTED		0
+#define INTEL_CX0_LANE0		BIT(0)
+#define INTEL_CX0_LANE1		BIT(1)
+#define INTEL_CX0_BOTH_LANES	(INTEL_CX0_LANE1 | INTEL_CX0_LANE0)
 
 bool intel_is_c10phy(struct drm_i915_private *dev_priv, enum phy phy);
 void intel_cx0pll_enable(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
index 15e249f46a64..ba7105dd37af 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
@@ -134,17 +134,18 @@
 #define   C10_PLL3_MULTIPLIERH_MASK	REG_GENMASK8(3, 0)
 #define   C10_PLL15_TXCLKDIV_MASK	REG_GENMASK8(2, 0)
 #define PHY_C10_VDR_CMN(idx)		(0xC20 + (idx))
-#define   C10_CMN0_DP_VAL		0x21
+#define   C10_CMN0_REF_RANGE		REG_FIELD_PREP(REG_GENMASK(4, 0), 1)
+#define   C10_CMN0_REF_CLK_MPLLB_DIV	REG_FIELD_PREP(REG_GENMASK(7, 5), 1)
 #define   C10_CMN3_TXVBOOST_MASK	REG_GENMASK8(7, 5)
 #define   C10_CMN3_TXVBOOST(val)	REG_FIELD_PREP8(C10_CMN3_TXVBOOST_MASK, val)
 #define PHY_C10_VDR_TX(idx)		(0xC30 + (idx))
-#define   C10_TX0_VAL			0x10
+#define   C10_TX0_TX_MPLLB_SEL		REG_BIT(4)
 #define PHY_C10_VDR_CONTROL(idx)	(0xC70 + (idx) - 1)
 #define   C10_VDR_CTRL_MSGBUS_ACCESS	REG_BIT8(2)
 #define   C10_VDR_CTRL_MASTER_LANE	REG_BIT8(1)
 #define   C10_VDR_CTRL_UPDATE_CFG	REG_BIT8(0)
 #define PHY_C10_VDR_CUSTOM_WIDTH	0xD02
-
+#define   C10_VDR_CUSTOM_WIDTH		REG_FIELD_PREP(REG_GENMASK(1, 0), 3)
 #define CX0_P0_STATE_ACTIVE             0x0
 #define CX0_P2_STATE_READY              0x2
 #define CX0_P2PG_STATE_DISABLE          0x9
@@ -155,7 +156,7 @@
 #define PLL_C10_MPLL_SSC_EN             REG_BIT8(0)
 
 /* PIPE SPEC Defined Registers */
-#define PHY_CX0_TX_CONTROL(tx, control) (0x400 + ((tx) - 1) * 0x200 + (control))
-#define CONTROL2_DISABLE_SINGLE_TX      REG_BIT(6)
+#define PHY_CX0_TX_CONTROL(tx, control)	(0x400 + ((tx) - 1) * 0x200 + (control))
+#define   CONTROL2_DISABLE_SINGLE_TX	REG_BIT(6)
 
 #endif /* __INTEL_CX0_REG_DEFS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 4e8210dfa523..7c9f4288329e 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -1618,8 +1618,7 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
 		return;
 
 	/* 2. Initialize all combo phys */
-	if (DISPLAY_VER(dev_priv) < 14)
-		intel_combo_phy_init(dev_priv);
+	intel_combo_phy_init(dev_priv);
 
 	/*
 	 * 3. Enable Power Well 1 (PG1).
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index 828608f6f592..62bafcbc7937 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -973,7 +973,7 @@ void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
 	if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
 		bxt_verify_ddi_phy_power_wells(dev_priv);
 
-	if (DISPLAY_VER(dev_priv) >= 11 && DISPLAY_VER(dev_priv) < 14)
+	if (DISPLAY_VER(dev_priv) >= 11)
 		/*
 		 * DMC retains HW context only for port A, the other combo
 		 * PHY's HW context for port B is lost after DC transitions,
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index d83e49b1aadf..7dd128ab9a49 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -982,6 +982,8 @@ struct intel_link_m_n {
 
 struct intel_c10mpllb_state {
 	u32 clock; /* in KHz */
+	u8 tx;
+	u8 cmn;
 	u8 pll[20];
 };
 
-- 
2.34.1



More information about the Intel-gfx-trybot mailing list