[PATCH v14 2/6] drm/i915: Introduce skl_plane_wm_level accessor.

Stanislav Lisovskiy stanislav.lisovskiy at intel.com
Tue Jan 28 16:14:57 UTC 2020


For future Gen12 SAGV implementation we need to
seemlessly alter wm levels calculated, depending
on whether we are allowed to enable SAGV or not.

So this accessor will give additional flexibility
to do that.

Currently this accessor is still simply working
as "pass-through" function. This will be changed
in next coming patches from this series.

v2: - plane_id -> plane->id(Ville Syrjälä)
    - Moved wm_level var to have more local scope
      (Ville Syrjälä)
    - Renamed yuv to color_plane(Ville Syrjälä) in
      skl_plane_wm_level

Signed-off-by: Stanislav Lisovskiy <stanislav.lisovskiy at intel.com>
---
 drivers/gpu/drm/i915/intel_pm.c | 122 +++++++++++++++++++++-----------
 1 file changed, 82 insertions(+), 40 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 221fcad7cc4a..5109757ced6d 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4273,6 +4273,18 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
 	return total_data_rate;
 }
 
+static const struct skl_wm_level *
+skl_plane_wm_level(struct intel_plane *plane,
+		   const struct intel_crtc_state *crtc_state,
+		   int level,
+		   int color_plane)
+{
+	const struct skl_plane_wm *wm =
+		&crtc_state->wm.skl.optimal.planes[plane->id];
+
+	return color_plane ? &wm->uv_wm[level] : &wm->wm[level];
+}
+
 static int
 skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
 		      struct skl_ddb_allocation *ddb /* out */)
@@ -4286,7 +4298,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
 	u16 total[I915_MAX_PLANES] = {};
 	u16 uv_total[I915_MAX_PLANES] = {};
 	u64 total_data_rate;
-	enum plane_id plane_id;
+	struct intel_plane *plane;
 	int num_active;
 	u64 plane_data_rate[I915_MAX_PLANES] = {};
 	u64 uv_plane_data_rate[I915_MAX_PLANES] = {};
@@ -4338,22 +4350,28 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
 	 */
 	for (level = ilk_wm_max_level(dev_priv); level >= 0; level--) {
 		blocks = 0;
-		for_each_plane_id_on_crtc(intel_crtc, plane_id) {
-			const struct skl_plane_wm *wm =
-				&crtc_state->wm.skl.optimal.planes[plane_id];
-
-			if (plane_id == PLANE_CURSOR) {
-				if (wm->wm[level].min_ddb_alloc > total[PLANE_CURSOR]) {
-					drm_WARN_ON(&dev_priv->drm,
-						    wm->wm[level].min_ddb_alloc != U16_MAX);
+
+		for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
+			const struct skl_wm_level *wm_level;
+			const struct skl_wm_level *wm_uv_level;
+
+			wm_level = skl_plane_wm_level(plane, crtc_state,
+						      level, false);
+			wm_uv_level = skl_plane_wm_level(plane, crtc_state,
+							 level, true);
+
+                        if (plane->id == PLANE_CURSOR) {
+                                if (wm_level->min_ddb_alloc > total[PLANE_CURSOR]) {
+                                        drm_WARN_ON(&dev_priv->drm,
+                                                    wm_level->min_ddb_alloc != U16_MAX);
 					blocks = U32_MAX;
 					break;
 				}
 				continue;
 			}
 
-			blocks += wm->wm[level].min_ddb_alloc;
-			blocks += wm->uv_wm[level].min_ddb_alloc;
+			blocks += wm_level->min_ddb_alloc;
+			blocks += wm_uv_level->min_ddb_alloc;
 		}
 
 		if (blocks <= alloc_size) {
@@ -4375,13 +4393,18 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
 	 * watermark level, plus an extra share of the leftover blocks
 	 * proportional to its relative data rate.
 	 */
-	for_each_plane_id_on_crtc(intel_crtc, plane_id) {
-		const struct skl_plane_wm *wm =
-			&crtc_state->wm.skl.optimal.planes[plane_id];
+	for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
+		const struct skl_wm_level *wm_level;
+		const struct skl_wm_level *wm_uv_level;
 		u64 rate;
 		u16 extra;
 
-		if (plane_id == PLANE_CURSOR)
+		wm_level = skl_plane_wm_level(plane, crtc_state,
+					      level, false);
+		wm_uv_level = skl_plane_wm_level(plane, crtc_state,
+						 level, true);
+
+		if (plane->id == PLANE_CURSOR)
 			continue;
 
 		/*
@@ -4391,22 +4414,22 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
 		if (total_data_rate == 0)
 			break;
 
-		rate = plane_data_rate[plane_id];
+		rate = plane_data_rate[plane->id];
 		extra = min_t(u16, alloc_size,
 			      DIV64_U64_ROUND_UP(alloc_size * rate,
 						 total_data_rate));
-		total[plane_id] = wm->wm[level].min_ddb_alloc + extra;
+		total[plane->id] = wm_level->min_ddb_alloc + extra;
 		alloc_size -= extra;
 		total_data_rate -= rate;
 
 		if (total_data_rate == 0)
 			break;
 
-		rate = uv_plane_data_rate[plane_id];
+		rate = uv_plane_data_rate[plane->id];
 		extra = min_t(u16, alloc_size,
 			      DIV64_U64_ROUND_UP(alloc_size * rate,
 						 total_data_rate));
-		uv_total[plane_id] = wm->uv_wm[level].min_ddb_alloc + extra;
+		uv_total[plane->id] = wm_uv_level->min_ddb_alloc + extra;
 		alloc_size -= extra;
 		total_data_rate -= rate;
 	}
@@ -4414,13 +4437,13 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
 
 	/* Set the actual DDB start/end points for each plane */
 	start = alloc->start;
-	for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+	for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
 		struct skl_ddb_entry *plane_alloc =
-			&crtc_state->wm.skl.plane_ddb_y[plane_id];
+			&crtc_state->wm.skl.plane_ddb_y[plane->id];
 		struct skl_ddb_entry *uv_plane_alloc =
-			&crtc_state->wm.skl.plane_ddb_uv[plane_id];
+			&crtc_state->wm.skl.plane_ddb_uv[plane->id];
 
-		if (plane_id == PLANE_CURSOR)
+		if (plane->id == PLANE_CURSOR)
 			continue;
 
 		/* Gen11+ uses a separate plane for UV watermarks */
@@ -4428,15 +4451,15 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
 			    INTEL_GEN(dev_priv) >= 11 && uv_total[plane_id]);
 
 		/* Leave disabled planes at (0,0) */
-		if (total[plane_id]) {
+		if (total[plane->id]) {
 			plane_alloc->start = start;
-			start += total[plane_id];
+			start += total[plane->id];
 			plane_alloc->end = start;
 		}
 
-		if (uv_total[plane_id]) {
+		if (uv_total[plane->id]) {
 			uv_plane_alloc->start = start;
-			start += uv_total[plane_id];
+			start += uv_total[plane->id];
 			uv_plane_alloc->end = start;
 		}
 	}
@@ -4448,9 +4471,16 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
 	 * that aren't actually possible.
 	 */
 	for (level++; level <= ilk_wm_max_level(dev_priv); level++) {
-		for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+		for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
+			const struct skl_wm_level *wm_level;
+			const struct skl_wm_level *wm_uv_level;
 			struct skl_plane_wm *wm =
-				&crtc_state->wm.skl.optimal.planes[plane_id];
+				&crtc_state->wm.skl.optimal.planes[plane->id];
+
+			wm_level = skl_plane_wm_level(plane, crtc_state,
+						      level, false);
+			wm_uv_level = skl_plane_wm_level(plane, crtc_state,
+							 level, true);
 
 			/*
 			 * We only disable the watermarks for each plane if
@@ -4464,9 +4494,10 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
 			 *  planes must be enabled before the level will be used."
 			 * So this is actually safe to do.
 			 */
-			if (wm->wm[level].min_ddb_alloc > total[plane_id] ||
-			    wm->uv_wm[level].min_ddb_alloc > uv_total[plane_id])
-				memset(&wm->wm[level], 0, sizeof(wm->wm[level]));
+			if (wm_level->min_ddb_alloc > total[plane->id] ||
+			    wm_uv_level->min_ddb_alloc > uv_total[plane->id])
+				memset(&wm->wm[level], 0,
+				       sizeof(struct skl_wm_level));
 
 			/*
 			 * Wa_1408961008:icl, ehl
@@ -4474,9 +4505,14 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
 			 */
 			if (IS_GEN(dev_priv, 11) &&
 			    level == 1 && wm->wm[0].plane_en) {
-				wm->wm[level].plane_res_b = wm->wm[0].plane_res_b;
-				wm->wm[level].plane_res_l = wm->wm[0].plane_res_l;
-				wm->wm[level].ignore_lines = wm->wm[0].ignore_lines;
+				wm_level = skl_plane_wm_level(plane, crtc_state,
+							      0, false);
+				wm->wm[level].plane_res_b =
+					wm_level->plane_res_b;
+				wm->wm[level].plane_res_l =
+					wm_level->plane_res_l;
+				wm->wm[level].ignore_lines =
+					wm_level->ignore_lines;
 			}
 		}
 	}
@@ -4485,11 +4521,11 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
 	 * Go back and disable the transition watermark if it turns out we
 	 * don't have enough DDB blocks for it.
 	 */
-	for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+	for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
 		struct skl_plane_wm *wm =
-			&crtc_state->wm.skl.optimal.planes[plane_id];
+			&crtc_state->wm.skl.optimal.planes[plane->id];
 
-		if (wm->trans_wm.plane_res_b >= total[plane_id])
+		if (wm->trans_wm.plane_res_b >= total[plane->id])
 			memset(&wm->trans_wm, 0, sizeof(wm->trans_wm));
 	}
 
@@ -5099,10 +5135,13 @@ void skl_write_plane_wm(struct intel_plane *plane,
 		&crtc_state->wm.skl.plane_ddb_y[plane_id];
 	const struct skl_ddb_entry *ddb_uv =
 		&crtc_state->wm.skl.plane_ddb_uv[plane_id];
+	const struct skl_wm_level *wm_level;
 
 	for (level = 0; level <= max_level; level++) {
+		wm_level = skl_plane_wm_level(plane, crtc_state, level, false);
+
 		skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level),
-				   &wm->wm[level]);
+				   wm_level);
 	}
 	skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id),
 			   &wm->trans_wm);
@@ -5133,10 +5172,13 @@ void skl_write_cursor_wm(struct intel_plane *plane,
 		&crtc_state->wm.skl.optimal.planes[plane_id];
 	const struct skl_ddb_entry *ddb =
 		&crtc_state->wm.skl.plane_ddb_y[plane_id];
+	const struct skl_wm_level *wm_level;
 
 	for (level = 0; level <= max_level; level++) {
+		wm_level = skl_plane_wm_level(plane, crtc_state, level, false);
+
 		skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
-				   &wm->wm[level]);
+				   wm_level);
 	}
 	skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), &wm->trans_wm);
 
-- 
2.24.1.485.gad05a3d8e5



More information about the Intel-gfx-trybot mailing list