[PATCH 21/29] drm/i915/dp: Add support for DP tunnel BW allocation

Imre Deak imre.deak at intel.com
Wed Dec 20 00:54:00 UTC 2023


Add support to detect and enable DP tunnels with BW allocation mode.

Signed-off-by: Imre Deak <imre.deak at intel.com>
---
 drivers/gpu/drm/i915/Makefile                 |   1 +
 drivers/gpu/drm/i915/display/intel_atomic.c   |   2 +
 .../gpu/drm/i915/display/intel_display_core.h |   1 +
 .../drm/i915/display/intel_display_types.h    |  10 +
 .../gpu/drm/i915/display/intel_dp_tunnel.c    | 661 ++++++++++++++++++
 .../gpu/drm/i915/display/intel_dp_tunnel.h    |  65 ++
 6 files changed, 740 insertions(+)
 create mode 100644 drivers/gpu/drm/i915/display/intel_dp_tunnel.c
 create mode 100644 drivers/gpu/drm/i915/display/intel_dp_tunnel.h

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index e777686190ca2..8e7d1007eb24d 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -351,6 +351,7 @@ i915-y += \
 	display/intel_dp_hdcp.o \
 	display/intel_dp_link_training.o \
 	display/intel_dp_mst.o \
+	display/intel_dp_tunnel.o \
 	display/intel_dsi.o \
 	display/intel_dsi_dcs_backlight.o \
 	display/intel_dsi_vbt.o \
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index ec0d5168b5035..96ab37e158995 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -29,6 +29,7 @@
  * See intel_atomic_plane.c for the plane-specific atomic functionality.
  */
 
+#include <drm/display/drm_dp_tunnel.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_fourcc.h>
@@ -38,6 +39,7 @@
 #include "intel_atomic.h"
 #include "intel_cdclk.h"
 #include "intel_display_types.h"
+#include "intel_dp_tunnel.h"
 #include "intel_global_state.h"
 #include "intel_hdcp.h"
 #include "intel_psr.h"
diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h
index 91662456f82e4..a33240192de4b 100644
--- a/drivers/gpu/drm/i915/display/intel_display_core.h
+++ b/drivers/gpu/drm/i915/display/intel_display_core.h
@@ -514,6 +514,7 @@ struct intel_display {
 	} wq;
 
 	/* Grouping using named structs. Keep sorted. */
+	struct drm_dp_tunnel_mgr *dp_tunnel_mgr;
 	struct intel_audio audio;
 	struct intel_dpll dpll;
 	struct intel_fbc *fbc[I915_MAX_FBCS];
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index dd30f8a328617..a0521d867c95a 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -33,6 +33,7 @@
 
 #include <drm/display/drm_dp_dual_mode_helper.h>
 #include <drm/display/drm_dp_mst_helper.h>
+#include <drm/display/drm_dp_tunnel.h>
 #include <drm/display/drm_dsc.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_crtc.h>
@@ -678,6 +679,8 @@ struct intel_atomic_state {
 
 	struct intel_shared_dpll_state shared_dpll[I915_NUM_PLLS];
 
+	struct intel_dp_tunnel_inherited_state *dp_tunnel_state;
+
 	/*
 	 * Current watermarks can't be trusted during hardware readout, so
 	 * don't bother calculating intermediate watermarks.
@@ -1373,6 +1376,9 @@ struct intel_crtc_state {
 		struct drm_dsc_config config;
 	} dsc;
 
+	/* DP tunnel used for BW allocation. */
+	struct drm_dp_tunnel_ref dp_tunnel_ref;
+
 	/* HSW+ linetime watermarks */
 	u16 linetime;
 	u16 ips_linetime;
@@ -1774,6 +1780,10 @@ struct intel_dp {
 	/* connector directly attached - won't be use for modeset in mst world */
 	struct intel_connector *attached_connector;
 
+	struct drm_dp_tunnel *tunnel;
+	bool reset_tunnel:1;
+	bool tunnel_suspended:1;
+
 	/* mst connector list */
 	struct intel_dp_mst_encoder *mst_encoders[I915_MAX_PIPES];
 	struct drm_dp_mst_topology_mgr mst_mgr;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_tunnel.c b/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
new file mode 100644
index 0000000000000..fcd7ba5904124
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
@@ -0,0 +1,661 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "i915_drv.h"
+
+#include <drm/display/drm_dp_tunnel.h>
+
+#include "intel_atomic.h"
+#include "intel_display_limits.h"
+#include "intel_display_types.h"
+#include "intel_dp.h"
+#include "intel_dp_link_training.h"
+#include "intel_dp_mst.h"
+#include "intel_dp_tunnel.h"
+#include "intel_link_bw.h"
+
+struct intel_dp_tunnel_inherited_state {
+	int count;
+	struct {
+		const struct intel_crtc *crtc;
+		struct drm_dp_tunnel_ref tunnel_ref;
+	} tunnels[I915_MAX_PIPES];
+};
+
+void intel_dp_disconnect_tunnel(struct intel_dp *intel_dp)
+{
+	if (!intel_dp->tunnel)
+		return;
+
+	drm_dp_tunnel_destroy(intel_dp->tunnel);
+	intel_dp->tunnel = NULL;
+}
+
+void intel_dp_destroy_tunnel(struct intel_dp *intel_dp)
+{
+	if (intel_dp_has_bw_alloc_tunnel(intel_dp))
+		drm_dp_tunnel_disable_bw_alloc(intel_dp->tunnel);
+
+	intel_dp_disconnect_tunnel(intel_dp);
+}
+
+static int kbytes_to_mbits(int kbytes)
+{
+	return DIV_ROUND_UP(kbytes * 8, 1000);
+}
+
+static int allocate_initial_tunnel_bw(struct intel_dp *intel_dp,
+				      struct drm_modeset_acquire_ctx *ctx)
+{
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+	const struct intel_crtc *crtc;
+	int tunnel_bw = 0;
+	u8 pipe_mask;
+	int err;
+
+	err = intel_dp_get_active_pipes(intel_dp, ctx, &pipe_mask);
+	if (err)
+		return err;
+
+	for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, pipe_mask) {
+		const struct intel_crtc_state *crtc_state =
+			to_intel_crtc_state(crtc->base.state);
+		int stream_bw = intel_dp_config_required_rate(crtc_state);
+
+		drm_dbg_kms(&i915->drm,
+			    "[DPTUN %s][ENCODER:%d:%s][CRTC:%d:%s] initial BW for stream %d: %d/%d Mb/s\n",
+			    drm_dp_tunnel_name(intel_dp->tunnel),
+			    encoder->base.base.id,
+			    encoder->base.name,
+			    crtc->base.base.id,
+			    crtc->base.name,
+			    crtc->pipe,
+			    kbytes_to_mbits(stream_bw),
+			    kbytes_to_mbits(tunnel_bw));
+
+		tunnel_bw += stream_bw;
+	}
+
+	err = drm_dp_tunnel_alloc_bw(intel_dp->tunnel, tunnel_bw);
+	if (err) {
+		drm_dbg_kms(&i915->drm,
+			    "[DPTUN %s][ENCODER:%d:%s] failed to allocate initial tunnel BW (err %pe)\n",
+			    drm_dp_tunnel_name(intel_dp->tunnel),
+			    encoder->base.base.id,
+			    encoder->base.name,
+			    ERR_PTR(err));
+
+		return err;
+	}
+
+	return drm_dp_tunnel_update_state(intel_dp->tunnel, true);
+}
+
+static int update_dp_tunnel_state(struct intel_dp *intel_dp,
+				  struct drm_dp_tunnel *tunnel)
+{
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+	int old_bw_below_dprx;
+	int new_bw_below_dprx;
+	bool reset_tunnel = intel_dp->reset_tunnel;
+	int old_bw;
+	int new_bw;
+	int ret;
+
+	old_bw = intel_dp_max_link_data_rate(intel_dp,
+					     intel_dp_max_common_rate(intel_dp),
+					     intel_dp_max_common_lane_count(intel_dp));
+
+	old_bw_below_dprx = old_bw <
+		drm_dp_max_dprx_data_rate(intel_dp_max_common_rate(intel_dp),
+					  intel_dp_max_common_lane_count(intel_dp));
+
+	intel_dp->reset_tunnel = false;
+	ret = drm_dp_tunnel_update_state(intel_dp->tunnel, reset_tunnel);
+	if (ret < 0) {
+		drm_dbg_kms(&i915->drm,
+			    "[DPTUN %s][ENCODER:%d:%s] state update failed (err %pe)\n",
+			    drm_dp_tunnel_name(intel_dp->tunnel),
+			    encoder->base.base.id,
+			    encoder->base.name,
+			    ERR_PTR(ret));
+
+		return ret;
+	}
+
+	if (ret == 0 ||
+	    !drm_dp_tunnel_bw_alloc_is_enabled(intel_dp->tunnel))
+		return 0;
+
+	intel_dp_update_sink_caps(intel_dp);
+
+	new_bw = intel_dp_max_link_data_rate(intel_dp,
+					     intel_dp_max_common_rate(intel_dp),
+					     intel_dp_max_common_lane_count(intel_dp));
+
+	new_bw_below_dprx = new_bw <
+		drm_dp_max_dprx_data_rate(intel_dp_max_common_rate(intel_dp),
+					  intel_dp_max_common_lane_count(intel_dp));
+
+	if (old_bw_below_dprx == new_bw_below_dprx &&
+	    !new_bw_below_dprx)
+		return 0;
+
+	drm_dbg_kms(&i915->drm,
+		    "[DPTUN %s][ENCODER:%d:%s] BW changed: %d -> %d\n",
+		    drm_dp_tunnel_name(intel_dp->tunnel),
+		    encoder->base.base.id,
+		    encoder->base.name,
+		    old_bw, new_bw);
+
+	return 1;
+}
+
+int intel_dp_detect_tunnel(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx)
+{
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+	struct drm_dp_tunnel *tunnel;
+	int ret;
+
+	if (intel_dp_is_edp(intel_dp))
+		return 0;
+
+	if (intel_dp->tunnel) {
+		ret = update_dp_tunnel_state(intel_dp, intel_dp->tunnel);
+		if (ret >= 0)
+			return ret;
+
+		/* An error or change that requires recreating the tunnel */
+		intel_dp_destroy_tunnel(intel_dp);
+	}
+
+	tunnel = drm_dp_tunnel_detect(i915->display.dp_tunnel_mgr,
+					&intel_dp->aux);
+	if (IS_ERR(tunnel))
+		return PTR_ERR(tunnel);
+
+	intel_dp->tunnel = tunnel;
+
+	ret = drm_dp_tunnel_enable_bw_alloc(intel_dp->tunnel);
+	if (ret) {
+		if (ret == -ENOTSUPP)
+			return 0;
+
+		drm_dbg_kms(&i915->drm,
+			    "[DPTUN %s][ENCODER:%d:%s]: failed to enable BW allocation (err %pe)\n",
+			    drm_dp_tunnel_name(intel_dp->tunnel),
+			    encoder->base.base.id,
+			    encoder->base.name,
+			    ERR_PTR(ret));
+
+		/* Keep the BW allocation mode disabled */
+		return ret;
+	}
+
+	ret = allocate_initial_tunnel_bw(intel_dp, ctx);
+	if (ret == -EDEADLK)
+		intel_dp_destroy_tunnel(intel_dp);
+
+	return ret;
+}
+
+static bool drm_dp_tunnel_has_bw_alloc(const struct drm_dp_tunnel *tunnel)
+{
+	return tunnel && drm_dp_tunnel_bw_alloc_is_enabled(tunnel);
+}
+
+bool intel_dp_has_bw_alloc_tunnel(struct intel_dp *intel_dp)
+{
+	return drm_dp_tunnel_has_bw_alloc(intel_dp->tunnel);
+}
+
+void intel_dp_suspend_tunnel(struct intel_dp *intel_dp)
+{
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+	struct intel_connector *connector = intel_dp->attached_connector;
+	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+
+	if (!intel_dp_has_bw_alloc_tunnel(intel_dp))
+		return;
+
+	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s][ENCODER:%d:%s] DPTUN: suspend\n",
+		    connector->base.base.id, connector->base.name,
+		    encoder->base.base.id, encoder->base.name);
+
+	drm_dp_tunnel_disable_bw_alloc(intel_dp->tunnel);
+
+	intel_dp->tunnel_suspended = true;
+}
+
+void intel_dp_resume_tunnel(struct intel_dp *intel_dp, bool dpcd_updated)
+{
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+	struct intel_connector *connector = intel_dp->attached_connector;
+	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+	u8 dpcd[DP_RECEIVER_CAP_SIZE];
+
+	if (!intel_dp->tunnel_suspended)
+		return;
+
+	intel_dp->tunnel_suspended = false;
+
+	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s][ENCODER:%d:%s] DPTUN: resume\n",
+		    connector->base.base.id, connector->base.name,
+		    encoder->base.base.id, encoder->base.name);
+
+	/* DPRX caps read required by tunnel detection */
+	if ((!dpcd_updated && intel_dp_read_dprx_caps(intel_dp, dpcd) < 0) ||
+	    drm_dp_tunnel_enable_bw_alloc(intel_dp->tunnel) < 0)
+		intel_dp->reset_tunnel = true;
+}
+
+struct drm_dp_tunnel *
+intel_dp_tunnel_atomic_get_inherited_state(struct intel_atomic_state *state,
+					   const struct intel_crtc *crtc)
+{
+	int i;
+
+	if (!state->dp_tunnel_state)
+		return NULL;
+
+	for (i = 0; i < state->dp_tunnel_state->count; i++)
+		if (state->dp_tunnel_state->tunnels[i].crtc == crtc)
+			return state->dp_tunnel_state->tunnels[i].tunnel_ref.tunnel;
+
+	return NULL;
+}
+
+int intel_dp_tunnel_atomic_add_inherited_state(struct intel_atomic_state *state,
+					       struct drm_dp_tunnel *tunnel,
+					       const struct intel_crtc *crtc)
+{
+	struct drm_i915_private *i915 = to_i915(state->base.dev);
+	struct drm_dp_tunnel *old_tunnel;
+
+	old_tunnel = intel_dp_tunnel_atomic_get_inherited_state(state, crtc);
+
+	if (old_tunnel) {
+		drm_WARN_ON(&i915->drm, old_tunnel != tunnel);
+		return 0;
+	}
+
+	if (!state->dp_tunnel_state) {
+		state->dp_tunnel_state = kzalloc(sizeof(*state->dp_tunnel_state), GFP_KERNEL);
+		if (!state->dp_tunnel_state)
+			return -ENOMEM;
+	}
+
+	if (drm_WARN_ON(&i915->drm,
+			state->dp_tunnel_state->count == ARRAY_SIZE(state->dp_tunnel_state->tunnels)))
+		return -EINVAL;
+
+	state->dp_tunnel_state->tunnels[state->dp_tunnel_state->count].crtc = crtc;
+	drm_dp_tunnel_ref_get(tunnel,
+				&state->dp_tunnel_state->tunnels[state->dp_tunnel_state->count].tunnel_ref);
+
+	state->dp_tunnel_state->count++;
+
+	return 0;
+}
+
+void intel_dp_tunnel_atomic_cleanup_inherited_state(struct intel_atomic_state *state)
+{
+	int i;
+
+	if (!state->dp_tunnel_state)
+		return;
+
+	for (i = 0; i < state->dp_tunnel_state->count; i++)
+		drm_dp_tunnel_ref_put(&state->dp_tunnel_state->tunnels[i].tunnel_ref);
+
+	kfree(state->dp_tunnel_state);
+	state->dp_tunnel_state = NULL;
+}
+
+static int intel_dp_tunnel_atomic_add_state_early(struct intel_atomic_state *state,
+						  struct drm_dp_tunnel *tunnel)
+{
+	struct drm_i915_private *i915 = to_i915(state->base.dev);
+	u32 pipe_mask;
+	int err;
+
+	if (!tunnel)
+		return 0;
+
+	err = drm_dp_tunnel_atomic_get_group_streams_in_state(&state->base,
+							      tunnel, &pipe_mask);
+	if (err)
+		return err;
+
+	drm_WARN_ON(&i915->drm, pipe_mask & ~((1 << I915_MAX_PIPES) - 1));
+
+	return intel_modeset_pipes_in_mask_early(state, "dptun", pipe_mask);
+}
+
+int intel_dp_tunnel_atomic_add_state_for_crtc(struct intel_atomic_state *state,
+					      struct intel_crtc *crtc)
+{
+	const struct intel_crtc_state *new_crtc_state =
+		intel_atomic_get_new_crtc_state(state, crtc);
+	const struct drm_dp_tunnel_state *tunnel_state;
+	struct drm_dp_tunnel *tunnel = new_crtc_state->dp_tunnel_ref.tunnel;
+
+	if (!tunnel)
+		return 0;
+
+	tunnel_state = drm_dp_tunnel_atomic_get_state(&state->base, tunnel);
+	if (IS_ERR(tunnel_state))
+		return PTR_ERR(tunnel_state);
+
+	return 0;
+}
+
+static int intel_dp_tunnel_atomic_add_cleared_state(struct intel_atomic_state *state,
+						    struct intel_dp *intel_dp,
+						    const struct intel_connector *connector,
+						    const struct intel_crtc *crtc,
+						    struct drm_dp_tunnel *tunnel)
+{
+	struct drm_i915_private *i915 = to_i915(state->base.dev);
+	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+	struct drm_dp_tunnel_state *tunnel_state;
+	int err;
+
+	drm_dbg_kms(&i915->drm,
+		    "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s][CRTC:%d:%s] adding cleared state for tunnel %px\n",
+		    drm_dp_tunnel_name(intel_dp->tunnel),
+		    connector->base.base.id,
+		    connector->base.name,
+		    encoder->base.base.id,
+		    encoder->base.name,
+		    crtc->base.base.id,
+		    crtc->base.name,
+		    intel_dp->tunnel);
+
+	err = intel_dp_tunnel_atomic_add_state_early(state, tunnel);
+	if (err)
+		return err;
+
+	tunnel_state = drm_dp_tunnel_atomic_get_state(&state->base, tunnel);
+	if (IS_ERR(tunnel_state))
+		return PTR_ERR(tunnel_state);
+
+	drm_dp_tunnel_atomic_clear_state(tunnel_state);
+
+	return 0;
+}
+
+int intel_dp_tunnel_atomic_check_tunnels(struct intel_atomic_state *state,
+					 struct intel_dp *intel_dp,
+					 struct intel_connector *connector)
+{
+	struct drm_i915_private *i915 = to_i915(state->base.dev);
+	const struct drm_connector_state *_old_conn_state =
+		drm_atomic_get_old_connector_state(&state->base, &connector->base);
+	const struct drm_connector_state *_new_conn_state =
+		drm_atomic_get_new_connector_state(&state->base, &connector->base);
+	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+	const struct intel_crtc_state *old_crtc_state = NULL;
+	struct intel_crtc *old_crtc;
+	struct intel_crtc *new_crtc;
+	int err = 0;
+
+	if (_old_conn_state->crtc) {
+		old_crtc = to_intel_crtc(_old_conn_state->crtc);
+		old_crtc_state = intel_atomic_get_old_crtc_state(state, old_crtc);
+	}
+
+	if (old_crtc_state && old_crtc_state->dp_tunnel_ref.tunnel) {
+		err = intel_dp_tunnel_atomic_add_cleared_state(state,
+							       intel_dp,
+							       connector,
+							       old_crtc,
+							       old_crtc_state->dp_tunnel_ref.tunnel);
+		if (err)
+			return err;
+	}
+
+	if (!drm_dp_tunnel_has_bw_alloc(intel_dp->tunnel))
+		return 0;
+
+	if (_new_conn_state->crtc) {
+		new_crtc = to_intel_crtc(_new_conn_state->crtc);
+		err = intel_dp_tunnel_atomic_add_cleared_state(state,
+							       intel_dp,
+							       connector,
+							       new_crtc,
+							       intel_dp->tunnel);
+		if (err)
+			return err;
+	}
+
+	if (!old_crtc_state)
+		return 0;
+
+	/*
+	 * A tunnel detected only after the CRTC/connector on the tunnel got enabled
+	 * already, the old CRTC state won't contain the state of the tunnel.
+	 * The tunnel still have a reserved BW, which needs to be released,
+	 * add the state for such inherited tunnels separately.
+	 */
+	if (!old_crtc_state->hw.active ||
+	    old_crtc_state->dp_tunnel_ref.tunnel == intel_dp->tunnel)
+		return 0;
+
+	drm_dbg_kms(&i915->drm,
+		    "[DPTUN %s][ENCODER:%d:%s][CONNECTOR:%d:%s][CRTC:%d:%s] check inherited tunnel %px\n",
+		    drm_dp_tunnel_name(intel_dp->tunnel),
+		    connector->base.base.id,
+		    connector->base.name,
+		    encoder->base.base.id,
+		    encoder->base.name,
+		    old_crtc->base.base.id,
+		    old_crtc->base.name,
+		    intel_dp->tunnel);
+
+	return intel_dp_tunnel_atomic_add_inherited_state(state, intel_dp->tunnel, old_crtc);
+}
+
+void intel_dp_tunnel_atomic_compute_stream_bw(struct intel_atomic_state *state,
+					      struct intel_dp *intel_dp,
+					      const struct intel_connector *connector,
+					      struct intel_crtc_state *crtc_state)
+{
+	struct drm_i915_private *i915 = to_i915(state->base.dev);
+	const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	int required_rate = intel_dp_config_required_rate(crtc_state);
+
+	if (!intel_dp_has_bw_alloc_tunnel(intel_dp))
+		return;
+
+	drm_dbg_kms(&i915->drm,
+		    "[CONNECTOR:%d:%s][CRTC:%d:%s] DPTUN stream %d required BW %d Mb/s\n",
+		    connector->base.base.id,
+		    connector->base.name,
+		    crtc->base.base.id,
+		    crtc->base.name,
+		    crtc->pipe,
+		    required_rate * 8 / 1000);
+
+	drm_dp_tunnel_atomic_set_stream_bw(&state->base, intel_dp->tunnel,
+					   crtc->pipe, required_rate);
+
+	drm_dp_tunnel_ref_get(intel_dp->tunnel,
+			      &crtc_state->dp_tunnel_ref);
+}
+
+/**
+ * intel_dp_tunnel_atomic_check_link - Check the DP tunnel atomic state
+ * @state: intel atomic state
+ * @limits: link BW limits
+ *
+ * Check the link configuration for all DP tunnels in @state. If the
+ * configuration is invalid @limits will be updated if possible to
+ * reduce the total BW, after which the configuration for all CRTCs in
+ * @state must be recomputed with the updated @limits.
+ *
+ * Returns:
+ *   - 0 if the confugration is valid
+ *   - %-EAGAIN, if the configuration is invalid and @limits got updated
+ *     with fallback values with which the configuration of all CRTCs in
+ *     @state must be recomputed
+ *   - Other negative error, if the configuration is invalid without a
+ *     fallback possibility, or the check failed for another reason
+ */
+int intel_dp_tunnel_atomic_check_link(struct intel_atomic_state *state,
+				      struct intel_link_bw_limits *limits)
+{
+	u32 failed_stream_mask;
+	int err;
+
+	err = drm_dp_tunnel_atomic_check_stream_bws(&state->base,
+						    &failed_stream_mask);
+	if (err == -ENOSPC)
+		err = intel_link_bw_reduce_bpp(state, limits,
+					       failed_stream_mask,
+					       "DP tunnel link BW");
+
+	return err;
+}
+
+static void queue_modeset_retry_work(struct intel_atomic_state *state,
+				     struct intel_encoder *encoder,
+				     const struct intel_crtc_state *crtc_state,
+				     const struct drm_connector_state *conn_state)
+{
+	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+	struct intel_connector *connector;
+	struct intel_digital_connector_state *iter_conn_state;
+	struct intel_dp *intel_dp;
+	int i;
+
+	if (conn_state) {
+		intel_dp_queue_modeset_retry_work(to_intel_connector(conn_state->connector));
+		return;
+	}
+
+	if (drm_WARN_ON(&i915->drm,
+			!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)))
+		return;
+
+	intel_dp = enc_to_intel_dp(encoder);
+
+	for_each_new_intel_connector_in_state(state, connector, iter_conn_state, i) {
+		(void)iter_conn_state;
+
+		if (connector->mst_port != intel_dp)
+			continue;
+
+		intel_dp_queue_modeset_retry_work(connector);
+	}
+}
+
+void intel_dp_tunnel_atomic_alloc_bw(struct intel_atomic_state *state,
+				     struct intel_encoder *encoder,
+				     const struct intel_crtc_state *new_crtc_state,
+				     const struct drm_connector_state *new_conn_state)
+{
+	struct drm_i915_private *i915 = to_i915(state->base.dev);
+	struct drm_dp_tunnel *tunnel = new_crtc_state->dp_tunnel_ref.tunnel;
+	const struct drm_dp_tunnel_state *new_tunnel_state;
+	int err;
+
+	if (!tunnel)
+		return;
+
+	new_tunnel_state = drm_dp_tunnel_atomic_get_new_state(&state->base, tunnel);
+
+	err = drm_dp_tunnel_alloc_bw(tunnel,
+				     drm_dp_tunnel_atomic_get_tunnel_bw(new_tunnel_state));
+	if (!err)
+		return;
+
+	if (!intel_digital_port_connected(encoder))
+		return;
+
+	drm_dbg_kms(&i915->drm,
+		    "[ENCODER:%d:%s] DP tunnel BW allocation failed on a connected sink.\n",
+		    encoder->base.base.id,
+		    encoder->base.name);
+
+	queue_modeset_retry_work(state, encoder, new_crtc_state, new_conn_state);
+}
+
+void intel_dp_tunnel_atomic_free_bw(struct intel_atomic_state *state,
+				    struct intel_encoder *encoder,
+				    const struct intel_crtc_state *old_crtc_state,
+				    const struct drm_connector_state *old_conn_state)
+{
+	struct drm_i915_private *i915 = to_i915(state->base.dev);
+	struct intel_crtc *old_crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
+	struct drm_dp_tunnel *tunnel;
+	int err;
+
+	tunnel = intel_dp_tunnel_atomic_get_inherited_state(state, old_crtc);
+	if (tunnel) {
+		drm_dbg_kms(&i915->drm,
+			    "[DPTUN %s][ENCODER:%d:%s] free BW for inherited tunnel\n",
+			    drm_dp_tunnel_name(tunnel),
+			    encoder->base.base.id,
+			    encoder->base.name);
+
+		err = drm_dp_tunnel_alloc_bw(tunnel, 0);
+		if (err)
+			goto out;
+	}
+
+	if (!old_crtc_state->dp_tunnel_ref.tunnel)
+		return;
+
+	err = drm_dp_tunnel_alloc_bw(old_crtc_state->dp_tunnel_ref.tunnel, 0);
+	if (!err)
+		return;
+
+out:
+	if (!intel_digital_port_connected(encoder))
+		return;
+
+	drm_dbg_kms(&i915->drm,
+		    "[ENCODER:%d:%s] DP tunnel BW freeing failed on a connected sink. (err %d)\n",
+		    encoder->base.base.id,
+		    encoder->base.name,
+		    err);
+
+	queue_modeset_retry_work(state, encoder, old_crtc_state, old_conn_state);
+}
+
+int intel_dp_tunnel_mgr_init(struct drm_i915_private *i915)
+{
+	struct drm_dp_tunnel_mgr *tunnel_mgr;
+	struct drm_connector_list_iter connector_list_iter;
+	struct intel_connector *connector;
+	int dp_connectors = 0;
+
+	drm_connector_list_iter_begin(&i915->drm, &connector_list_iter);
+	for_each_intel_connector_iter(connector, &connector_list_iter) {
+		if (connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+			continue;
+
+		dp_connectors++;
+	}
+	drm_connector_list_iter_end(&connector_list_iter);
+
+	tunnel_mgr = drm_dp_tunnel_mgr_create(&i915->drm, dp_connectors);
+	if (IS_ERR(tunnel_mgr))
+		return PTR_ERR(tunnel_mgr);
+
+	i915->display.dp_tunnel_mgr = tunnel_mgr;
+
+	return 0;
+}
+
+void intel_dp_tunnel_mgr_cleanup(struct drm_i915_private *i915)
+{
+	drm_dp_tunnel_mgr_destroy(i915->display.dp_tunnel_mgr);
+	i915->display.dp_tunnel_mgr = NULL;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_tunnel.h b/drivers/gpu/drm/i915/display/intel_dp_tunnel.h
new file mode 100644
index 0000000000000..374e65b0dde78
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dp_tunnel.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_DP_TUNNEL_H__
+#define __INTEL_DP_TUNNEL_H__
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct drm_connector_state;
+struct drm_dp_tunnel;
+struct drm_modeset_acquire_ctx;
+
+struct intel_atomic_state;
+struct intel_connector;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_dp;
+struct intel_encoder;
+struct intel_link_bw_limits;
+
+int intel_dp_detect_tunnel(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx);
+void intel_dp_disconnect_tunnel(struct intel_dp *intel_dp);
+void intel_dp_destroy_tunnel(struct intel_dp *intel_dp);
+void intel_dp_resume_tunnel(struct intel_dp *intel_dp, bool dpcd_updated);
+void intel_dp_suspend_tunnel(struct intel_dp *intel_dp);
+
+bool intel_dp_has_bw_alloc_tunnel(struct intel_dp *intel_dp);
+
+int intel_dp_tunnel_atomic_add_inherited_state(struct intel_atomic_state *state,
+					       struct drm_dp_tunnel *tunnel,
+					       const struct intel_crtc *crtc);
+struct drm_dp_tunnel *
+intel_dp_tunnel_atomic_get_inherited_state(struct intel_atomic_state *state,
+					   const struct intel_crtc *crtc);
+void
+intel_dp_tunnel_atomic_cleanup_inherited_state(struct intel_atomic_state *state);
+
+void intel_dp_tunnel_atomic_compute_stream_bw(struct intel_atomic_state *state,
+					      struct intel_dp *intel_dp,
+					      const struct intel_connector *connector,
+					      struct intel_crtc_state *crtc_state);
+
+int intel_dp_tunnel_atomic_add_state_for_crtc(struct intel_atomic_state *state,
+					      struct intel_crtc *crtc);
+int intel_dp_tunnel_atomic_check_link(struct intel_atomic_state *state,
+				      struct intel_link_bw_limits *limits);
+int intel_dp_tunnel_atomic_check_tunnels(struct intel_atomic_state *state,
+					 struct intel_dp *intel_dp,
+					 struct intel_connector *connector);
+void intel_dp_tunnel_atomic_alloc_bw(struct intel_atomic_state *state,
+				     struct intel_encoder *encoder,
+				     const struct intel_crtc_state *new_crtc_state,
+				     const struct drm_connector_state *new_conn_state);
+void intel_dp_tunnel_atomic_free_bw(struct intel_atomic_state *state,
+				    struct intel_encoder *encoder,
+				    const struct intel_crtc_state *old_crtc_state,
+				    const struct drm_connector_state *old_conn_state);
+
+int intel_dp_tunnel_mgr_init(struct drm_i915_private *i915);
+void intel_dp_tunnel_mgr_cleanup(struct drm_i915_private *i915);
+
+#endif
-- 
2.39.2



More information about the Intel-gfx-trybot mailing list