[Intel-gfx] [PATCH v5 4/4] drm/i915: Correctly map DBUF slices to pipes

Stanislav Lisovskiy stanislav.lisovskiy at intel.com
Tue Nov 26 16:28:39 UTC 2019


Added proper DBuf slice mapping to correspondent
pipes, depending on pipe configuration as stated
in BSpec.

Signed-off-by: Stanislav Lisovskiy <stanislav.lisovskiy at intel.com>
---
 drivers/gpu/drm/i915/intel_pm.c | 250 ++++++++++++++++++++++++++++++--
 1 file changed, 236 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 4c30dddb943a..66657a207ed4 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3815,6 +3815,25 @@ bool intel_can_enable_sagv(struct intel_atomic_state *state)
 	return true;
 }
 
+/*
+ * Calculate initial DBuf slice offset, based on slice size
+ * and mask(i.e if slice size is 1024 and second slice is enabled
+ * offset would be 1024)
+ */
+static u32 icl_get_first_dbuf_slice_offset(u32 dbuf_slice_mask,
+					   u32 slice_size, u32 ddb_size)
+{
+	u32 offset = 0;
+
+	if (!dbuf_slice_mask)
+		return 0;
+
+	offset = (ffs(dbuf_slice_mask) - 1) * slice_size;
+
+	WARN_ON(offset >= ddb_size);
+	return offset;
+}
+
 static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
 			      const struct intel_crtc_state *crtc_state,
 			      const u64 total_data_rate,
@@ -3828,18 +3847,13 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
 	if (INTEL_GEN(dev_priv) < 11)
 		return ddb_size - 4; /* 4 blocks for bypass path allocation */
 
-	/*
-	 * FIXME: Enabled slices should be only
-	 * in according to BSpec and be determined
-	 * by num active pipes(BSpec 12716 and 49255).
-	 * For now set mask for 1st slice only.
-	 */
-	ddb->enabled_slices = DBUF_S1_BIT;
-	ddb_size /= 2;
-
 	return ddb_size;
 }
 
+u32 i915_possible_dbuf_slices(struct drm_i915_private *dev_priv,
+			      int pipe, u32 active_pipes,
+			      const struct intel_crtc_state *crtc_state);
+
 static void
 skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
 				   const struct intel_crtc_state *crtc_state,
@@ -3855,7 +3869,14 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
 	u32 pipe_width = 0, total_width = 0, width_before_pipe = 0;
 	enum pipe for_pipe = to_intel_crtc(for_crtc)->pipe;
 	u16 ddb_size;
+	u32 ddb_range_size;
 	u32 i;
+	u32 dbuf_slice_mask;
+	u32 active_pipes;
+	u32 offset;
+	u32 slice_size;
+	u32 total_slice_mask;
+	u32 start, end;
 
 	if (WARN_ON(!state) || !crtc_state->hw.active) {
 		alloc->start = 0;
@@ -3864,14 +3885,23 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
 		return;
 	}
 
-	if (intel_state->active_pipe_changes)
+	if (intel_state->active_pipe_changes) {
 		*num_active = hweight8(intel_state->active_pipes);
-	else
+		active_pipes = intel_state->active_pipes;
+	} else {
 		*num_active = hweight8(dev_priv->active_pipes);
+		active_pipes = dev_priv->active_pipes;
+	}
 
 	ddb_size = intel_get_ddb_size(dev_priv, crtc_state, total_data_rate,
 				      *num_active, ddb);
 
+	DRM_DEBUG_KMS("Got total ddb size %d\n", ddb_size);
+
+	slice_size = ddb_size / INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
+
+	DRM_DEBUG_KMS("Got DBuf slice size %d\n", slice_size);
+
 	/*
 	 * If the state doesn't change the active CRTC's or there is no
 	 * modeset request, then there's no need to recalculate;
@@ -3889,18 +3919,68 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
 		return;
 	}
 
+	/*
+	 * Get allowed DBuf slices for correspondent pipe and platform.
+	 */
+	dbuf_slice_mask = i915_possible_dbuf_slices(dev_priv, for_pipe,
+						    active_pipes, crtc_state);
+
+	DRM_DEBUG_KMS("DBuf slice mask %x pipe %d active pipes %x\n",
+		      dbuf_slice_mask,
+		      for_pipe, active_pipes);
+
+	/*
+	 * Figure out at which DBuf slice we start, i.e if we start at Dbuf S2
+	 * and slice size is 1024, the offset would be 1024
+	 */
+	offset = icl_get_first_dbuf_slice_offset(dbuf_slice_mask,
+						 slice_size, ddb_size);
+
+	/*
+	 * Figure out total size of allowed DBuf slices, which is basically
+	 * a number of allowed slices for that pipe multiplied by slice size.
+	 * Inside of this
+	 * range ddb entries are still allocated in proportion to display width.
+	 */
+	ddb_range_size = hweight8(dbuf_slice_mask) * slice_size;
+
 	/*
 	 * Watermark/ddb requirement highly depends upon width of the
 	 * framebuffer, So instead of allocating DDB equally among pipes
 	 * distribute DDB based on resolution/width of the display.
 	 */
+	total_slice_mask = dbuf_slice_mask;
 	for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
 		const struct drm_display_mode *adjusted_mode =
 			&crtc_state->hw.adjusted_mode;
 		enum pipe pipe = crtc->pipe;
 		int hdisplay, vdisplay;
+		u32 pipe_dbuf_slice_mask =
+					i915_possible_dbuf_slices(dev_priv,
+								  pipe,
+								  active_pipes,
+								  crtc_state);
 
-		if (!crtc_state->hw.enable)
+		if (!crtc_state->hw.active)
+			continue;
+
+		/*
+		 * According to BSpec pipe can share one dbuf slice with another
+		 * pipes or pipe can use multiple dbufs, in both cases we
+		 * account for other pipes only if they have exactly same mask.
+		 * However we need to account how many slices we should enable
+		 * in total.
+		 */
+		total_slice_mask |= pipe_dbuf_slice_mask;
+
+		/*
+		 * Do not account pipes using other slice sets
+		 * luckily as of current BSpec slice sets do not partially
+		 * intersect(pipes share either same one slice or same slice set
+		 * i.e no partial intersection), so it is enough to check for
+		 * equality for now.
+		 */
+		if (dbuf_slice_mask != pipe_dbuf_slice_mask)
 			continue;
 
 		drm_mode_get_hv_timing(adjusted_mode, &hdisplay, &vdisplay);
@@ -3912,8 +3992,19 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
 			pipe_width = hdisplay;
 	}
 
-	alloc->start = ddb_size * width_before_pipe / total_width;
-	alloc->end = ddb_size * (width_before_pipe + pipe_width) / total_width;
+	ddb->enabled_slices = total_slice_mask;
+
+	start = ddb_range_size * width_before_pipe / total_width;
+	end = ddb_range_size * (width_before_pipe + pipe_width) / total_width;
+
+	alloc->start = offset + start;
+	alloc->end = offset + end;
+
+	DRM_DEBUG_KMS("Pipe %d ddb %d-%d\n", for_pipe,
+		      alloc->start, alloc->end);
+	DRM_DEBUG_KMS("Enabled ddb slices mask %x num supported %d\n",
+		      ddb->enabled_slices,
+		      INTEL_INFO(dev_priv)->num_supported_dbuf_slices);
 }
 
 static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
@@ -4035,6 +4126,7 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
 			  struct skl_ddb_values *ddb /* out */)
 {
 	ddb->enabled_slices = intel_enabled_dbuf_slices_num(dev_priv);
+	DRM_DEBUG_KMS("Got hw dbuf slices mask %x\n", ddb->enabled_slices);
 }
 
 /*
@@ -4084,6 +4176,136 @@ skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state,
 	return mul_fixed16(downscale_w, downscale_h);
 }
 
+
+struct dbuf_slice_conf_entry {
+	u32 active_pipes;
+	u32 dbuf_mask[I915_MAX_PIPES];
+};
+
+
+#define DBUF_TO_4_PIPES_MAP(active_pipes, dbuf1, dbuf2, dbuf3, dbuf4) \
+	{ active_pipes, { dbuf1, dbuf2, dbuf3, dbuf4 } }
+
+/*
+ * Table taken from Bspec 12716
+ * Pipes do have some preferred DBuf slice affinity,
+ * plus there are some hardcoded requirements on how
+ * those should be distributed for multipipe scenarios.
+ * For more DBuf slices algorithm can get even more messy
+ * and less readable, so decided to use a table almost
+ * as is from BSpec itself - that way it is at least easier
+ * to compare, change and check.
+ */
+static struct dbuf_slice_conf_entry icl_allowed_dbufs[] = {
+	DBUF_TO_4_PIPES_MAP(BIT(PIPE_A), DBUF_S1_BIT, 0, 0, 0),
+	DBUF_TO_4_PIPES_MAP(BIT(PIPE_B), 0, DBUF_S1_BIT, 0, 0),
+	DBUF_TO_4_PIPES_MAP(BIT(PIPE_C), 0, 0, DBUF_S2_BIT, 0),
+	DBUF_TO_4_PIPES_MAP(BIT(PIPE_A) | BIT(PIPE_B),
+			     DBUF_S1_BIT, DBUF_S2_BIT, 0, 0),
+	DBUF_TO_4_PIPES_MAP(BIT(PIPE_A) | BIT(PIPE_C),
+			     DBUF_S1_BIT, 0, DBUF_S2_BIT, 0),
+	DBUF_TO_4_PIPES_MAP(BIT(PIPE_B) | BIT(PIPE_C), 0,
+			     DBUF_S1_BIT, DBUF_S2_BIT, 0),
+	DBUF_TO_4_PIPES_MAP(BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+			     DBUF_S1_BIT, DBUF_S1_BIT, DBUF_S2_BIT, 0)
+};
+
+/*
+ * Table taken from Bspec 49255
+ * Pipes do have some preferred DBuf slice affinity,
+ * plus there are some hardcoded requirements on how
+ * those should be distributed for multipipe scenarios.
+ * For more DBuf slices algorithm can get even more messy
+ * and less readable, so decided to use a table almost
+ * as is from BSpec itself - that way it is at least easier
+ * to compare, change and check.
+ */
+static struct dbuf_slice_conf_entry tgl_allowed_dbufs[] = {
+	DBUF_TO_4_PIPES_MAP(BIT(PIPE_A), DBUF_S1_BIT | DBUF_S2_BIT, 0, 0, 0),
+	DBUF_TO_4_PIPES_MAP(BIT(PIPE_B), 0, DBUF_S1_BIT | DBUF_S2_BIT, 0, 0),
+	DBUF_TO_4_PIPES_MAP(BIT(PIPE_C), 0, 0, DBUF_S1_BIT | DBUF_S2_BIT, 0),
+	DBUF_TO_4_PIPES_MAP(BIT(PIPE_C), 0, 0, 0, DBUF_S1_BIT | DBUF_S2_BIT),
+	DBUF_TO_4_PIPES_MAP(BIT(PIPE_A) | BIT(PIPE_B),
+			     DBUF_S2_BIT, DBUF_S1_BIT, 0, 0),
+	DBUF_TO_4_PIPES_MAP(BIT(PIPE_A) | BIT(PIPE_C),
+			     DBUF_S1_BIT, 0, DBUF_S2_BIT, 0),
+	DBUF_TO_4_PIPES_MAP(BIT(PIPE_A) | BIT(PIPE_D),
+			     DBUF_S1_BIT, 0, 0, DBUF_S2_BIT),
+	DBUF_TO_4_PIPES_MAP(BIT(PIPE_B) | BIT(PIPE_C),
+			     0, DBUF_S1_BIT, DBUF_S2_BIT, 0),
+	DBUF_TO_4_PIPES_MAP(BIT(PIPE_B) | BIT(PIPE_D),
+			     0, DBUF_S1_BIT, 0, DBUF_S2_BIT),
+	DBUF_TO_4_PIPES_MAP(BIT(PIPE_C) | BIT(PIPE_D),
+			     0, 0, DBUF_S1_BIT, DBUF_S2_BIT),
+	DBUF_TO_4_PIPES_MAP(BIT(PIPE_C) | BIT(PIPE_D),
+			     0, 0, DBUF_S1_BIT, DBUF_S2_BIT),
+	DBUF_TO_4_PIPES_MAP(BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+			     DBUF_S1_BIT, DBUF_S1_BIT, DBUF_S2_BIT, 0),
+	DBUF_TO_4_PIPES_MAP(BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
+			     DBUF_S1_BIT, DBUF_S1_BIT, 0, DBUF_S2_BIT),
+	DBUF_TO_4_PIPES_MAP(BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
+			     DBUF_S1_BIT, 0, DBUF_S2_BIT, DBUF_S2_BIT),
+	DBUF_TO_4_PIPES_MAP(BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+			     0, DBUF_S1_BIT, DBUF_S2_BIT, DBUF_S2_BIT)
+};
+
+static u32 i915_find_pipe_conf(int pipe,
+			       u32 active_pipes,
+			       const struct dbuf_slice_conf_entry *dbuf_slices,
+			       int size)
+{
+	int i;
+
+	for (i = 0; i < size; i++) {
+		if (dbuf_slices[i].active_pipes == active_pipes) {
+			return dbuf_slices[i].dbuf_mask[pipe];
+		}
+	}
+	return 0;
+}
+
+/*
+ * This function finds an entry with same enabled pipe configuration and
+ * returns correspondent DBuf slice mask as stated in BSpec for particular
+ * platform.
+ */
+static u32 icl_possible_dbuf_slices(int pipe,
+				    u32 active_pipes,
+				    const struct intel_crtc_state *crtc_state)
+{
+	return i915_find_pipe_conf(pipe, active_pipes,
+				   icl_allowed_dbufs,
+				   ARRAY_SIZE(icl_allowed_dbufs));
+}
+
+static u32 tgl_possible_dbuf_slices(int pipe,
+				    u32 active_pipes,
+				    const struct intel_crtc_state *crtc_state)
+{
+	return i915_find_pipe_conf(pipe, active_pipes,
+				   tgl_allowed_dbufs,
+				   ARRAY_SIZE(tgl_allowed_dbufs));
+}
+
+u32 i915_possible_dbuf_slices(struct drm_i915_private *dev_priv,
+			      int pipe, u32 active_pipes,
+			      const struct intel_crtc_state *crtc_state)
+{
+	if (IS_GEN(dev_priv, 11))
+		return icl_possible_dbuf_slices(pipe,
+						active_pipes,
+						crtc_state);
+	else if (IS_GEN(dev_priv, 12))
+		return tgl_possible_dbuf_slices(pipe,
+						active_pipes,
+						crtc_state);
+	/*
+	 * For anything else just return one slice yet.
+	 * Should be extended for other platforms.
+	 */
+	return DBUF_S1_BIT;
+}
+
 static u64
 skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
 			     const struct intel_plane_state *plane_state,
-- 
2.17.1



More information about the Intel-gfx mailing list