[Intel-gfx] [PATCH 13/14] drm/i915: Move ddb/wm programming into plane update/disable hooks on skl+
Ville Syrjala
ville.syrjala at linux.intel.com
Thu Nov 1 15:06:04 UTC 2018
From: Ville Syrjälä <ville.syrjala at linux.intel.com>
On SKL+ the plane WM/BUF_CFG registers are a proper part of each
plane's register set. That means accessing them will cancel any
pending plane update, and we would need a PLANE_SURF register write
to arm the wm/ddb change as well.
To avoid all the problems with that let's just move the wm/ddb
programming into the plane update/disable hooks. Now all plane
registers get written in one (hopefully atomic) operation.
To make that feasible we'll move the plane ddb tracking into
the crtc state. Watermarks were already tracked there.
Signed-off-by: Ville Syrjälä <ville.syrjala at linux.intel.com>
---
drivers/gpu/drm/i915/i915_debugfs.c | 21 +-
drivers/gpu/drm/i915/i915_drv.h | 3 -
drivers/gpu/drm/i915/intel_display.c | 16 +-
drivers/gpu/drm/i915/intel_display.h | 11 +-
drivers/gpu/drm/i915/intel_drv.h | 9 +
drivers/gpu/drm/i915/intel_pm.c | 450 ++++++++++++---------------
drivers/gpu/drm/i915/intel_sprite.c | 4 +
7 files changed, 250 insertions(+), 264 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 041319d48ca3..4fde18422839 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -3436,31 +3436,32 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_device *dev = &dev_priv->drm;
- struct skl_ddb_allocation *ddb;
struct skl_ddb_entry *entry;
- enum pipe pipe;
- int plane;
+ struct intel_crtc *crtc;
if (INTEL_GEN(dev_priv) < 9)
return -ENODEV;
drm_modeset_lock_all(dev);
- ddb = &dev_priv->wm.skl_hw.ddb;
-
seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
- for_each_pipe(dev_priv, pipe) {
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ enum pipe pipe = crtc->pipe;
+ enum plane_id plane_id;
+
seq_printf(m, "Pipe %c\n", pipe_name(pipe));
- for_each_universal_plane(dev_priv, pipe, plane) {
- entry = &ddb->plane[pipe][plane];
- seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1,
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
+ seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1,
entry->start, entry->end,
skl_ddb_entry_size(entry));
}
- entry = &ddb->plane[pipe][PLANE_CURSOR];
+ entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
entry->end, skl_ddb_entry_size(entry));
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index c9e5bab6861b..4ed37ee23aac 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1231,9 +1231,6 @@ static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
}
struct skl_ddb_allocation {
- /* packed/y */
- struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES];
- struct skl_ddb_entry uv_plane[I915_MAX_PIPES][I915_MAX_PLANES];
u8 enabled_slices; /* GEN11 has configurable 2 slices */
};
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 6088ae554e56..6905a267a13f 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -10072,6 +10072,10 @@ static void i9xx_update_cursor(struct intel_plane *plane,
* except when the plane is getting enabled at which time
* the CURCNTR write arms the update.
*/
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ skl_write_cursor_wm(plane, crtc_state);
+
if (plane->cursor.base != base ||
plane->cursor.size != fbc_ctl ||
plane->cursor.cntl != cntl) {
@@ -11853,6 +11857,8 @@ static void verify_wm_state(struct drm_crtc *crtc,
struct skl_pipe_wm hw_wm, *sw_wm;
struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
+ struct skl_ddb_entry hw_ddb_y[I915_MAX_PLANES];
+ struct skl_ddb_entry hw_ddb_uv[I915_MAX_PLANES];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
const enum pipe pipe = intel_crtc->pipe;
int plane, level, max_level = ilk_wm_max_level(dev_priv);
@@ -11863,6 +11869,8 @@ static void verify_wm_state(struct drm_crtc *crtc,
skl_pipe_wm_get_hw_state(crtc, &hw_wm);
sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
+ skl_pipe_ddb_get_hw_state(intel_crtc, hw_ddb_y, hw_ddb_uv);
+
skl_ddb_get_hw_state(dev_priv, &hw_ddb);
sw_ddb = &dev_priv->wm.skl_hw.ddb;
@@ -11905,8 +11913,8 @@ static void verify_wm_state(struct drm_crtc *crtc,
}
/* DDB */
- hw_ddb_entry = &hw_ddb.plane[pipe][plane];
- sw_ddb_entry = &sw_ddb->plane[pipe][plane];
+ hw_ddb_entry = &hw_ddb_y[plane];
+ sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[plane];
if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
@@ -11955,8 +11963,8 @@ static void verify_wm_state(struct drm_crtc *crtc,
}
/* DDB */
- hw_ddb_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
- sw_ddb_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
+ hw_ddb_entry = &hw_ddb_y[PLANE_CURSOR];
+ sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[PLANE_CURSOR];
if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h
index df9e6ebb27de..078406dc65e5 100644
--- a/drivers/gpu/drm/i915/intel_display.h
+++ b/drivers/gpu/drm/i915/intel_display.h
@@ -319,7 +319,7 @@ struct intel_link_m_n {
&(dev)->mode_config.plane_list, \
base.head) \
for_each_if((plane_mask) & \
- drm_plane_mask(&intel_plane->base)))
+ drm_plane_mask(&intel_plane->base))
#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \
list_for_each_entry(intel_plane, \
@@ -415,6 +415,15 @@ struct intel_link_m_n {
(__i)++) \
for_each_if(plane)
+#define for_each_oldnew_intel_crtc_in_state(__state, crtc, old_crtc_state, new_crtc_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->base.dev->mode_config.num_crtc && \
+ ((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \
+ (old_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].old_state), \
+ (new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \
+ (__i)++) \
+ for_each_if(crtc)
+
void intel_link_compute_m_n(int bpp, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n,
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index facd5cb0b540..8a93e0e8c89d 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -705,6 +705,8 @@ struct intel_crtc_wm_state {
/* gen9+ only needs 1-step wm programming */
struct skl_pipe_wm optimal;
struct skl_ddb_entry ddb;
+ struct skl_ddb_entry plane_ddb_y[I915_MAX_PLANES];
+ struct skl_ddb_entry plane_ddb_uv[I915_MAX_PLANES];
} skl;
struct {
@@ -2174,6 +2176,9 @@ void g4x_wm_get_hw_state(struct drm_device *dev);
void vlv_wm_get_hw_state(struct drm_device *dev);
void ilk_wm_get_hw_state(struct drm_device *dev);
void skl_wm_get_hw_state(struct drm_device *dev);
+void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
+ struct skl_ddb_entry *ddb_y,
+ struct skl_ddb_entry *ddb_uv);
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb /* out */);
void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
@@ -2188,6 +2193,10 @@ bool skl_wm_level_equals(const struct skl_wm_level *l1,
bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
const struct skl_ddb_entry entries[],
int num_entries, int ignore_idx);
+void skl_write_plane_wm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state);
+void skl_write_cursor_wm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state);
bool ilk_disable_lp_wm(struct drm_device *dev);
int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
struct intel_crtc_state *cstate);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index b0720994fa0a..4c778bc153fa 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3911,68 +3911,70 @@ static void
skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
const enum pipe pipe,
const enum plane_id plane_id,
- struct skl_ddb_allocation *ddb /* out */)
+ struct skl_ddb_entry *ddb_y,
+ struct skl_ddb_entry *ddb_uv)
{
- u32 val, val2 = 0;
- int fourcc, pixel_format;
+ u32 val, val2;
+ u32 fourcc = 0;
/* Cursor doesn't support NV12/planar, so no extra calculation needed */
if (plane_id == PLANE_CURSOR) {
val = I915_READ(CUR_BUF_CFG(pipe));
- skl_ddb_entry_init_from_hw(dev_priv,
- &ddb->plane[pipe][plane_id], val);
+ skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
return;
}
val = I915_READ(PLANE_CTL(pipe, plane_id));
/* No DDB allocated for disabled planes */
- if (!(val & PLANE_CTL_ENABLE))
- return;
-
- pixel_format = val & PLANE_CTL_FORMAT_MASK;
- fourcc = skl_format_to_fourcc(pixel_format,
- val & PLANE_CTL_ORDER_RGBX,
- val & PLANE_CTL_ALPHA_MASK);
+ if (val & PLANE_CTL_ENABLE)
+ fourcc = skl_format_to_fourcc(val & PLANE_CTL_FORMAT_MASK,
+ val & PLANE_CTL_ORDER_RGBX,
+ val & PLANE_CTL_ALPHA_MASK);
- val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
- if (fourcc == DRM_FORMAT_NV12 && INTEL_GEN(dev_priv) < 11) {
+ if (INTEL_GEN(dev_priv) >= 11) {
+ val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
+ skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
+ } else {
+ val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id));
- skl_ddb_entry_init_from_hw(dev_priv,
- &ddb->plane[pipe][plane_id], val2);
- skl_ddb_entry_init_from_hw(dev_priv,
- &ddb->uv_plane[pipe][plane_id], val);
- } else {
- skl_ddb_entry_init_from_hw(dev_priv,
- &ddb->plane[pipe][plane_id], val);
+ if (fourcc == DRM_FORMAT_NV12)
+ swap(val, val2);
+
+ skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
+ skl_ddb_entry_init_from_hw(dev_priv, ddb_uv, val2);
}
}
-void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
- struct skl_ddb_allocation *ddb /* out */)
+void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
+ struct skl_ddb_entry *ddb_y,
+ struct skl_ddb_entry *ddb_uv)
{
- struct intel_crtc *crtc;
-
- memset(ddb, 0, sizeof(*ddb));
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum intel_display_power_domain power_domain;
+ enum pipe pipe = crtc->pipe;
+ enum plane_id plane_id;
- ddb->enabled_slices = intel_enabled_dbuf_slices_num(dev_priv);
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ return;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- enum intel_display_power_domain power_domain;
- enum plane_id plane_id;
- enum pipe pipe = crtc->pipe;
+ for_each_plane_id_on_crtc(crtc, plane_id)
+ skl_ddb_get_hw_plane_state(dev_priv, pipe,
+ plane_id,
+ &ddb_y[plane_id],
+ &ddb_uv[plane_id]);
- power_domain = POWER_DOMAIN_PIPE(pipe);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
- continue;
+ intel_display_power_put(dev_priv, power_domain);
+}
- for_each_plane_id_on_crtc(crtc, plane_id)
- skl_ddb_get_hw_plane_state(dev_priv, pipe,
- plane_id, ddb);
+void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
+ struct skl_ddb_allocation *ddb /* out */)
+{
+ memset(ddb, 0, sizeof(*ddb));
- intel_display_power_put(dev_priv, power_domain);
- }
+ ddb->enabled_slices = intel_enabled_dbuf_slices_num(dev_priv);
}
/*
@@ -4370,7 +4372,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
struct drm_crtc *crtc = cstate->base.crtc;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
uint16_t alloc_size, start;
uint16_t minimum[I915_MAX_PLANES] = {};
@@ -4383,8 +4384,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
uint16_t total_min_blocks = 0;
/* Clear the partitioning for disabled planes. */
- memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
- memset(ddb->uv_plane[pipe], 0, sizeof(ddb->uv_plane[pipe]));
+ memset(cstate->wm.skl.plane_ddb_y, 0, sizeof(cstate->wm.skl.plane_ddb_y));
+ memset(cstate->wm.skl.plane_ddb_uv, 0, sizeof(cstate->wm.skl.plane_ddb_uv));
if (WARN_ON(!state))
return 0;
@@ -4431,8 +4432,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
}
alloc_size -= total_min_blocks;
- ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR];
- ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
+ cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR];
+ cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end;
/*
* 2. Distribute the remaining space in proportion to the amount of
@@ -4463,8 +4464,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
/* Leave disabled planes at (0,0) */
if (data_rate) {
- ddb->plane[pipe][plane_id].start = start;
- ddb->plane[pipe][plane_id].end = start + plane_blocks;
+ cstate->wm.skl.plane_ddb_y[plane_id].start = start;
+ cstate->wm.skl.plane_ddb_y[plane_id].end = start + plane_blocks;
}
start += plane_blocks;
@@ -4479,8 +4480,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_plane_blocks);
if (uv_data_rate) {
- ddb->uv_plane[pipe][plane_id].start = start;
- ddb->uv_plane[pipe][plane_id].end =
+ cstate->wm.skl.plane_ddb_uv[plane_id].start = start;
+ cstate->wm.skl.plane_ddb_uv[plane_id].end =
start + uv_plane_blocks;
}
@@ -4590,9 +4591,6 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
to_intel_atomic_state(cstate->base.state);
bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
- if (!intel_wm_plane_visible(cstate, intel_pstate))
- return 0;
-
/* only NV12 format has two planes */
if (plane_id == 1 && fb->format->format != DRM_FORMAT_NV12) {
DRM_DEBUG_KMS("Non NV12 format have single plane\n");
@@ -4706,9 +4704,6 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
if (latency == 0)
return level == 0 ? -EINVAL : 0;
- if (!intel_wm_plane_visible(cstate, intel_pstate))
- return 0;
-
/* Display WA #1141: kbl,cfl */
if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) ||
IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_B0)) &&
@@ -4840,21 +4835,16 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
static int
skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
- struct skl_ddb_allocation *ddb,
const struct intel_crtc_state *cstate,
const struct intel_plane_state *intel_pstate,
uint16_t ddb_blocks,
const struct skl_wm_params *wm_params,
- struct skl_plane_wm *wm,
struct skl_wm_level *levels)
{
int level, max_level = ilk_wm_max_level(dev_priv);
struct skl_wm_level *result_prev = &levels[0];
int ret;
- if (WARN_ON(!intel_pstate->base.fb))
- return -EINVAL;
-
for (level = 0; level <= max_level; level++) {
struct skl_wm_level *result = &levels[level];
@@ -4872,9 +4862,6 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
result_prev = result;
}
- if (intel_pstate->base.fb->format->format == DRM_FORMAT_NV12)
- wm->is_planar = true;
-
return 0;
}
@@ -4902,10 +4889,9 @@ skl_compute_linetime_wm(const struct intel_crtc_state *cstate)
}
static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
- struct skl_wm_params *wp,
- struct skl_wm_level *wm_l0,
- uint16_t ddb_allocation,
- struct skl_wm_level *trans_wm /* out */)
+ const struct skl_wm_params *wp,
+ struct skl_plane_wm *wm,
+ uint16_t ddb_allocation)
{
struct drm_device *dev = cstate->base.crtc->dev;
const struct drm_i915_private *dev_priv = to_i915(dev);
@@ -4913,9 +4899,6 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
const uint16_t trans_amount = 10; /* This is configurable amount */
uint16_t wm0_sel_res_b, trans_offset_b, res_blocks;
- if (!cstate->base.active)
- return;
-
/* Transition WM are not recommended by HW team for GEN9 */
if (INTEL_GEN(dev_priv) <= 9)
return;
@@ -4940,7 +4923,7 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
* Result Blocks is Result Blocks minus 1 and it should work for the
* current platforms.
*/
- wm0_sel_res_b = wm_l0->plane_res_b - 1;
+ wm0_sel_res_b = wm->wm[0].plane_res_b - 1;
if (wp->y_tiled) {
trans_y_tile_min = (uint16_t) mul_round_up_u32_fixed16(2,
@@ -4959,23 +4942,19 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
res_blocks += 1;
if (res_blocks < ddb_allocation) {
- trans_wm->plane_res_b = res_blocks;
- trans_wm->plane_en = true;
+ wm->trans_wm.plane_res_b = res_blocks;
+ wm->trans_wm.plane_en = true;
}
}
-static int __skl_build_plane_wm_single(struct skl_ddb_allocation *ddb,
- struct skl_pipe_wm *pipe_wm,
- enum plane_id plane_id,
- const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate,
- int color_plane)
+static int skl_build_plane_wm_single(struct intel_crtc_state *cstate,
+ const struct intel_plane_state *pstate,
+ enum plane_id plane_id, int color_plane)
{
struct drm_i915_private *dev_priv = to_i915(pstate->base.plane->dev);
- struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
- enum pipe pipe = to_intel_plane(pstate->base.plane)->pipe;
+ struct skl_plane_wm *wm = &cstate->wm.skl.optimal.planes[plane_id];
+ u16 ddb_blocks = skl_ddb_entry_size(&cstate->wm.skl.plane_ddb_y[plane_id]);
struct skl_wm_params wm_params;
- uint16_t ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]);
int ret;
ret = skl_compute_plane_wm_params(dev_priv, cstate, pstate,
@@ -4983,79 +4962,105 @@ static int __skl_build_plane_wm_single(struct skl_ddb_allocation *ddb,
if (ret)
return ret;
- ret = skl_compute_wm_levels(dev_priv, ddb, cstate, pstate,
- ddb_blocks, &wm_params, wm, wm->wm);
-
+ ret = skl_compute_wm_levels(dev_priv, cstate, pstate,
+ ddb_blocks, &wm_params, wm->wm);
if (ret)
return ret;
- skl_compute_transition_wm(cstate, &wm_params, &wm->wm[0],
- ddb_blocks, &wm->trans_wm);
+ skl_compute_transition_wm(cstate, &wm_params, wm, ddb_blocks);
return 0;
}
-static int skl_build_plane_wm_single(struct skl_ddb_allocation *ddb,
- struct skl_pipe_wm *pipe_wm,
- const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate)
+static int skl_build_plane_wm_uv(struct intel_crtc_state *cstate,
+ const struct intel_plane_state *pstate,
+ enum plane_id plane_id)
{
- enum plane_id plane_id = to_intel_plane(pstate->base.plane)->id;
+ struct drm_i915_private *dev_priv = to_i915(pstate->base.plane->dev);
+ struct skl_plane_wm *wm = &cstate->wm.skl.optimal.planes[plane_id];
+ u16 ddb_blocks = skl_ddb_entry_size(&cstate->wm.skl.plane_ddb_uv[plane_id]);
+ struct skl_wm_params wm_params;
+ int ret;
+
+ wm->is_planar = true;
- return __skl_build_plane_wm_single(ddb, pipe_wm, plane_id, cstate, pstate, 0);
+ /* uv plane watermarks must also be validated for NV12/Planar */
+ ret = skl_compute_plane_wm_params(dev_priv, cstate, pstate,
+ &wm_params, 1);
+ if (ret)
+ return ret;
+
+ ret = skl_compute_wm_levels(dev_priv, cstate, pstate,
+ ddb_blocks, &wm_params, wm->uv_wm);
+ if (ret)
+ return ret;
+
+ return 0;
}
-static int skl_build_plane_wm_planar(struct skl_ddb_allocation *ddb,
- struct skl_pipe_wm *pipe_wm,
- const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate)
+static int skl_build_plane_wm(struct skl_pipe_wm *pipe_wm,
+ struct intel_crtc_state *cstate,
+ const struct intel_plane_state *pstate)
{
struct intel_plane *plane = to_intel_plane(pstate->base.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = pstate->base.fb;
enum plane_id plane_id = plane->id;
- struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
- struct skl_wm_params wm_params;
- enum pipe pipe = plane->pipe;
- uint16_t ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]);
int ret;
- ret = __skl_build_plane_wm_single(ddb, pipe_wm, plane_id, cstate, pstate, 0);
- if (ret)
- return ret;
-
- /* uv plane watermarks must also be validated for NV12/Planar */
- ddb_blocks = skl_ddb_entry_size(&ddb->uv_plane[pipe][plane_id]);
+ if (!intel_wm_plane_visible(cstate, pstate))
+ return 0;
- ret = skl_compute_plane_wm_params(dev_priv, cstate, pstate, &wm_params, 1);
+ ret = skl_build_plane_wm_single(cstate, pstate, plane_id, 0);
if (ret)
return ret;
- return skl_compute_wm_levels(dev_priv, ddb, cstate, pstate,
- ddb_blocks, &wm_params, wm, wm->uv_wm);
+ if (fb->format->is_yuv && fb->format->num_planes > 1) {
+ ret = skl_build_plane_wm_uv(cstate, pstate, plane_id);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
-static int icl_build_plane_wm_planar(struct skl_ddb_allocation *ddb,
- struct skl_pipe_wm *pipe_wm,
- const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate)
+static int icl_build_plane_wm(struct skl_pipe_wm *pipe_wm,
+ struct intel_crtc_state *cstate,
+ const struct intel_plane_state *pstate)
{
+ enum plane_id plane_id = to_intel_plane(pstate->base.plane)->id;
int ret;
- enum plane_id y_plane_id = pstate->linked_plane->id;
- enum plane_id uv_plane_id = to_intel_plane(pstate->base.plane)->id;
- ret = __skl_build_plane_wm_single(ddb, pipe_wm, y_plane_id,
- cstate, pstate, 0);
- if (ret)
- return ret;
+ /* Watermarks calculated in master */
+ if (pstate->slave)
+ return 0;
+
+ if (pstate->linked_plane) {
+ const struct drm_framebuffer *fb = pstate->base.fb;
+ enum plane_id y_plane_id = pstate->linked_plane->id;
+
+ WARN_ON(!fb->format->is_yuv ||
+ fb->format->num_planes == 1);
+
+ ret = skl_build_plane_wm_single(cstate, pstate, y_plane_id, 0);
+ if (ret)
+ return ret;
- return __skl_build_plane_wm_single(ddb, pipe_wm, uv_plane_id,
- cstate, pstate, 1);
+ ret = skl_build_plane_wm_single(cstate, pstate, plane_id, 1);
+ if (ret)
+ return ret;
+ } else if (intel_wm_plane_visible(cstate, pstate)) {
+ ret = skl_build_plane_wm_single(cstate, pstate, plane_id, 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
- struct skl_ddb_allocation *ddb,
struct skl_pipe_wm *pipe_wm)
{
+ struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
struct drm_crtc_state *crtc_state = &cstate->base;
struct drm_plane *plane;
const struct drm_plane_state *pstate;
@@ -5071,18 +5076,10 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
const struct intel_plane_state *intel_pstate =
to_intel_plane_state(pstate);
- /* Watermarks calculated in master */
- if (intel_pstate->slave)
- continue;
-
- if (intel_pstate->linked_plane)
- ret = icl_build_plane_wm_planar(ddb, pipe_wm, cstate, intel_pstate);
- else if (intel_pstate->base.fb &&
- intel_pstate->base.fb->format->format == DRM_FORMAT_NV12)
- ret = skl_build_plane_wm_planar(ddb, pipe_wm, cstate, intel_pstate);
+ if (INTEL_GEN(dev_priv) >= 11)
+ ret = icl_build_plane_wm(pipe_wm, cstate, intel_pstate);
else
- ret = skl_build_plane_wm_single(ddb, pipe_wm, cstate, intel_pstate);
-
+ ret = skl_build_plane_wm(pipe_wm, cstate, intel_pstate);
if (ret)
return ret;
}
@@ -5097,9 +5094,9 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
const struct skl_ddb_entry *entry)
{
if (entry->end)
- I915_WRITE(reg, (entry->end - 1) << 16 | entry->start);
+ I915_WRITE_FW(reg, (entry->end - 1) << 16 | entry->start);
else
- I915_WRITE(reg, 0);
+ I915_WRITE_FW(reg, 0);
}
static void skl_write_wm_level(struct drm_i915_private *dev_priv,
@@ -5114,19 +5111,22 @@ static void skl_write_wm_level(struct drm_i915_private *dev_priv,
val |= level->plane_res_l << PLANE_WM_LINES_SHIFT;
}
- I915_WRITE(reg, val);
+ I915_WRITE_FW(reg, val);
}
-static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
- const struct skl_plane_wm *wm,
- const struct skl_ddb_allocation *ddb,
- enum plane_id plane_id)
+void skl_write_plane_wm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
- struct drm_crtc *crtc = &intel_crtc->base;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
int level, max_level = ilk_wm_max_level(dev_priv);
- enum pipe pipe = intel_crtc->pipe;
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
+ const struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+ const struct skl_ddb_entry *ddb_y =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
+ const struct skl_ddb_entry *ddb_uv =
+ &crtc_state->wm.skl.plane_ddb_uv[plane_id];
for (level = 0; level <= max_level; level++) {
skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level),
@@ -5135,29 +5135,32 @@ static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id),
&wm->trans_wm);
- if (wm->is_planar && INTEL_GEN(dev_priv) < 11) {
- skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
- &ddb->uv_plane[pipe][plane_id]);
+ if (INTEL_GEN(dev_priv) >= 11) {
skl_ddb_entry_write(dev_priv,
- PLANE_NV12_BUF_CFG(pipe, plane_id),
- &ddb->plane[pipe][plane_id]);
- } else {
- skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
- &ddb->plane[pipe][plane_id]);
- if (INTEL_GEN(dev_priv) < 11)
- I915_WRITE(PLANE_NV12_BUF_CFG(pipe, plane_id), 0x0);
+ PLANE_BUF_CFG(pipe, plane_id), ddb_y);
+ return;
}
+
+ if (wm->is_planar)
+ swap(ddb_y, ddb_uv);
+
+ skl_ddb_entry_write(dev_priv,
+ PLANE_BUF_CFG(pipe, plane_id), ddb_y);
+ skl_ddb_entry_write(dev_priv,
+ PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_uv);
}
-static void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
- const struct skl_plane_wm *wm,
- const struct skl_ddb_allocation *ddb)
+void skl_write_cursor_wm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
- struct drm_crtc *crtc = &intel_crtc->base;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
int level, max_level = ilk_wm_max_level(dev_priv);
- enum pipe pipe = intel_crtc->pipe;
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
+ const struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+ const struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
for (level = 0; level <= max_level; level++) {
skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
@@ -5165,8 +5168,7 @@ static void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
}
skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), &wm->trans_wm);
- skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
- &ddb->plane[pipe][PLANE_CURSOR]);
+ skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), ddb);
}
bool skl_wm_level_equals(const struct skl_wm_level *l1,
@@ -5207,13 +5209,12 @@ bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
const struct skl_pipe_wm *old_pipe_wm,
struct skl_pipe_wm *pipe_wm, /* out */
- struct skl_ddb_allocation *ddb, /* out */
bool *changed /* out */)
{
struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate);
int ret;
- ret = skl_build_pipe_wm(intel_cstate, ddb, pipe_wm);
+ ret = skl_build_pipe_wm(intel_cstate, pipe_wm);
if (ret)
return ret;
@@ -5239,42 +5240,29 @@ pipes_modified(struct drm_atomic_state *state)
}
static int
-skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
+skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
{
- struct drm_atomic_state *state = cstate->base.state;
- struct drm_device *dev = state->dev;
- struct drm_crtc *crtc = cstate->base.crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
- struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
- struct drm_plane *plane;
- enum pipe pipe = intel_crtc->pipe;
+ struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->base.state);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_plane *plane;
- drm_for_each_plane_mask(plane, dev, cstate->base.plane_mask) {
- struct drm_plane_state *plane_state;
- struct intel_plane *linked;
- enum plane_id plane_id = to_intel_plane(plane)->id;
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ struct intel_plane_state *plane_state;
+ enum plane_id plane_id = plane->id;
- if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][plane_id],
- &new_ddb->plane[pipe][plane_id]) &&
- skl_ddb_entry_equal(&cur_ddb->uv_plane[pipe][plane_id],
- &new_ddb->uv_plane[pipe][plane_id]))
+ if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id],
+ &new_crtc_state->wm.skl.plane_ddb_y[plane_id]) &&
+ skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_uv[plane_id],
+ &new_crtc_state->wm.skl.plane_ddb_uv[plane_id]))
continue;
- plane_state = drm_atomic_get_plane_state(state, plane);
+ plane_state = intel_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state))
return PTR_ERR(plane_state);
- /* Make sure linked plane is updated too */
- linked = to_intel_plane_state(plane_state)->linked_plane;
- if (!linked)
- continue;
-
- plane_state = drm_atomic_get_plane_state(state, &linked->base);
- if (IS_ERR(plane_state))
- return PTR_ERR(plane_state);
+ new_crtc_state->update_planes |= BIT(plane_id);
}
return 0;
@@ -5286,18 +5274,21 @@ skl_compute_ddb(struct drm_atomic_state *state)
const struct drm_i915_private *dev_priv = to_i915(state->dev);
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb;
+ struct intel_crtc_state *old_crtc_state;
+ struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
- struct intel_crtc_state *cstate;
int ret, i;
memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
- for_each_new_intel_crtc_in_state(intel_state, crtc, cstate, i) {
- ret = skl_allocate_pipe_ddb(cstate, ddb);
+ for_each_oldnew_intel_crtc_in_state(intel_state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ ret = skl_allocate_pipe_ddb(new_crtc_state, ddb);
if (ret)
return ret;
- ret = skl_ddb_add_affected_planes(cstate);
+ ret = skl_ddb_add_affected_planes(old_crtc_state,
+ new_crtc_state);
if (ret)
return ret;
}
@@ -5306,36 +5297,29 @@ skl_compute_ddb(struct drm_atomic_state *state)
}
static void
-skl_print_wm_changes(const struct drm_atomic_state *state)
+skl_print_wm_changes(struct intel_atomic_state *state)
{
- const struct drm_device *dev = state->dev;
- const struct drm_i915_private *dev_priv = to_i915(dev);
- const struct intel_atomic_state *intel_state =
- to_intel_atomic_state(state);
- const struct drm_crtc *crtc;
- const struct drm_crtc_state *cstate;
- const struct intel_plane *intel_plane;
- const struct skl_ddb_allocation *old_ddb = &dev_priv->wm.skl_hw.ddb;
- const struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_crtc_state *old_crtc_state;
+ const struct intel_crtc_state *new_crtc_state;
+ struct intel_plane *plane;
+ struct intel_crtc *crtc;
int i;
- for_each_new_crtc_in_state(state, crtc, cstate, i) {
- const struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
-
- for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
- enum plane_id plane_id = intel_plane->id;
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ enum plane_id plane_id = plane->id;
const struct skl_ddb_entry *old, *new;
- old = &old_ddb->plane[pipe][plane_id];
- new = &new_ddb->plane[pipe][plane_id];
+ old = &old_crtc_state->wm.skl.plane_ddb_y[plane_id];
+ new = &new_crtc_state->wm.skl.plane_ddb_y[plane_id];
if (skl_ddb_entry_equal(old, new))
continue;
DRM_DEBUG_KMS("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n",
- intel_plane->base.base.id,
- intel_plane->base.name,
+ plane->base.base.id, plane->base.name,
old->start, old->end,
new->start, new->end);
}
@@ -5471,8 +5455,7 @@ skl_compute_wm(struct drm_atomic_state *state)
&to_intel_crtc_state(crtc->state)->wm.skl.optimal;
pipe_wm = &intel_cstate->wm.skl.optimal;
- ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm,
- &results->ddb, &changed);
+ ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm, &changed);
if (ret)
return ret;
@@ -5486,7 +5469,7 @@ skl_compute_wm(struct drm_atomic_state *state)
intel_cstate->update_wm_pre = true;
}
- skl_print_wm_changes(state);
+ skl_print_wm_changes(intel_state);
return 0;
}
@@ -5497,23 +5480,12 @@ static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc);
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
- const struct skl_ddb_allocation *ddb = &state->wm_results.ddb;
enum pipe pipe = crtc->pipe;
- enum plane_id plane_id;
if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base)))
return;
I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime);
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- if (plane_id != PLANE_CURSOR)
- skl_write_plane_wm(crtc, &pipe_wm->planes[plane_id],
- ddb, plane_id);
- else
- skl_write_cursor_wm(crtc, &pipe_wm->planes[plane_id],
- ddb);
- }
}
static void skl_initial_wm(struct intel_atomic_state *state,
@@ -5523,8 +5495,6 @@ static void skl_initial_wm(struct intel_atomic_state *state,
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct skl_ddb_values *results = &state->wm_results;
- struct skl_ddb_values *hw_vals = &dev_priv->wm.skl_hw;
- enum pipe pipe = intel_crtc->pipe;
if ((results->dirty_pipes & drm_crtc_mask(&intel_crtc->base)) == 0)
return;
@@ -5534,11 +5504,6 @@ static void skl_initial_wm(struct intel_atomic_state *state,
if (cstate->base.active_changed)
skl_atomic_update_crtc_wm(state, cstate);
- memcpy(hw_vals->ddb.uv_plane[pipe], results->ddb.uv_plane[pipe],
- sizeof(hw_vals->ddb.uv_plane[pipe]));
- memcpy(hw_vals->ddb.plane[pipe], results->ddb.plane[pipe],
- sizeof(hw_vals->ddb.plane[pipe]));
-
mutex_unlock(&dev_priv->wm.wm_mutex);
}
@@ -5689,13 +5654,6 @@ void skl_wm_get_hw_state(struct drm_device *dev)
if (dev_priv->active_crtcs) {
/* Fully recompute DDB on first atomic commit */
dev_priv->wm.distrust_bios_wm = true;
- } else {
- /*
- * Easy/common case; just sanitize DDB now if everything off
- * Keep dbuf slice info intact
- */
- memset(ddb->plane, 0, sizeof(ddb->plane));
- memset(ddb->uv_plane, 0, sizeof(ddb->uv_plane));
}
}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 2f97a298c24e..d0aa753012b5 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -436,6 +436,8 @@ skl_program_plane(struct intel_plane *plane,
I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id),
plane_state->color_ctl);
+ skl_write_plane_wm(plane, crtc_state);
+
I915_WRITE_FW(PLANE_KEYVAL(pipe, plane_id), key->min_value);
I915_WRITE_FW(PLANE_KEYMAX(pipe, plane_id), keymax);
I915_WRITE_FW(PLANE_KEYMSK(pipe, plane_id), keymsk);
@@ -498,6 +500,8 @@ skl_disable_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ skl_write_plane_wm(plane, crtc_state);
+
I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0);
I915_WRITE_FW(PLANE_SURF(pipe, plane_id), 0);
--
2.18.1
More information about the Intel-gfx
mailing list