[Intel-gfx] [PATCH 12/13] drm/i915: Remove total[] and uv_total[] from ddb allocation
Ville Syrjala
ville.syrjala at linux.intel.com
Thu Nov 19 18:54:00 UTC 2020
From: Ville Syrjälä <ville.syrjala at linux.intel.com>
There's really no need to maintain these total[] arrays to
track the size of each plane's ddb allocation. We just stick
the results straight into the crtc_state ddb tracking structures.
The main annoyance with all this is the mismatch between
wm_uv vs. ddb_y on pre-icl. If only the hw was consistent in
what it considers the primary source of information we could
avoid some of the uglyness. But since that is not the case
we need a bit of special casing for planar formats.
Signed-off-by: Ville Syrjälä <ville.syrjala at linux.intel.com>
---
drivers/gpu/drm/i915/intel_pm.c | 105 ++++++++++++++++----------------
1 file changed, 51 insertions(+), 54 deletions(-)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index a0ec7a102270..30f2de715398 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4627,13 +4627,12 @@ skl_plane_wm_level(const struct intel_crtc_state *crtc_state,
struct skl_plane_ddb_iter {
u64 data_rate;
- u16 total[I915_MAX_PLANES];
- u16 uv_total[I915_MAX_PLANES];
u16 start, size;
};
-static u16
+static void
skl_allocate_plane_ddb(struct skl_plane_ddb_iter *iter,
+ struct skl_ddb_entry *ddb,
const struct skl_wm_level *wm,
u64 data_rate)
{
@@ -4644,7 +4643,8 @@ skl_allocate_plane_ddb(struct skl_plane_ddb_iter *iter,
iter->size -= extra;
iter->data_rate -= data_rate;
- return wm->min_ddb_alloc + extra;
+ iter->start = skl_ddb_entry_init(ddb, iter->start,
+ iter->start + wm->min_ddb_alloc + extra);
}
static int
@@ -4655,8 +4655,9 @@ skl_allocate_pipe_ddb(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct skl_ddb_entry *alloc = &crtc_state->wm.skl.ddb;
- struct skl_plane_ddb_iter iter = {};
+ struct skl_plane_ddb_iter iter;
enum plane_id plane_id;
+ u16 cursor_size;
int num_active;
u32 blocks;
int level;
@@ -4701,15 +4702,16 @@ skl_allocate_pipe_ddb(struct intel_atomic_state *state,
if (ret)
return ret;
+ iter.start = alloc->start;
iter.size = skl_ddb_entry_size(alloc);
if (iter.size == 0)
return 0;
/* Allocate fixed number of blocks for cursor. */
- iter.total[PLANE_CURSOR] = skl_cursor_allocation(crtc_state, num_active);
- iter.size -= iter.total[PLANE_CURSOR];
+ cursor_size = skl_cursor_allocation(crtc_state, num_active);
+ iter.size -= cursor_size;
skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb[PLANE_CURSOR],
- alloc->end - iter.total[PLANE_CURSOR], alloc->end);
+ alloc->end - cursor_size, alloc->end);
iter.data_rate = skl_total_relative_data_rate(crtc_state);
if (iter.data_rate == 0)
@@ -4722,11 +4724,13 @@ skl_allocate_pipe_ddb(struct intel_atomic_state *state,
for (level = ilk_wm_max_level(dev_priv); level >= 0; level--) {
blocks = 0;
for_each_plane_id_on_crtc(crtc, plane_id) {
+ const struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb[plane_id];
const struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
if (plane_id == PLANE_CURSOR) {
- if (wm->wm[level].min_ddb_alloc > iter.total[PLANE_CURSOR]) {
+ if (wm->wm[level].min_ddb_alloc > skl_ddb_entry_size(ddb)) {
drm_WARN_ON(&dev_priv->drm,
wm->wm[level].min_ddb_alloc != U16_MAX);
blocks = U32_MAX;
@@ -4759,6 +4763,10 @@ skl_allocate_pipe_ddb(struct intel_atomic_state *state,
* proportional to its relative data rate.
*/
for_each_plane_id_on_crtc(crtc, plane_id) {
+ struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb[plane_id];
+ struct skl_ddb_entry *ddb_y =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
const struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
@@ -4774,51 +4782,17 @@ skl_allocate_pipe_ddb(struct intel_atomic_state *state,
if (INTEL_GEN(dev_priv) < 11 &&
crtc_state->nv12_planes & BIT(plane_id)) {
- iter.total[plane_id] =
- skl_allocate_plane_ddb(&iter, &wm->wm[level],
- crtc_state->rel_data_rate_y[plane_id]);
- iter.uv_total[plane_id] =
- skl_allocate_plane_ddb(&iter, &wm->uv_wm[level],
- crtc_state->rel_data_rate[plane_id]);
+ skl_allocate_plane_ddb(&iter, ddb_y, &wm->wm[level],
+ crtc_state->rel_data_rate_y[plane_id]);
+ skl_allocate_plane_ddb(&iter, ddb, &wm->uv_wm[level],
+ crtc_state->rel_data_rate[plane_id]);
} else {
- iter.total[plane_id] =
- skl_allocate_plane_ddb(&iter, &wm->wm[level],
- crtc_state->rel_data_rate[plane_id]);
+ skl_allocate_plane_ddb(&iter, ddb, &wm->wm[level],
+ crtc_state->rel_data_rate[plane_id]);
}
}
drm_WARN_ON(&dev_priv->drm, iter.size != 0 || iter.data_rate != 0);
- /* Set the actual DDB start/end points for each plane */
- iter.start = alloc->start;
- for_each_plane_id_on_crtc(crtc, plane_id) {
- struct skl_ddb_entry *ddb =
- &crtc_state->wm.skl.plane_ddb[plane_id];
- struct skl_ddb_entry *ddb_y =
- &crtc_state->wm.skl.plane_ddb_y[plane_id];
-
- if (plane_id == PLANE_CURSOR)
- continue;
-
- /* Gen11+ uses a separate plane for UV watermarks */
- drm_WARN_ON(&dev_priv->drm,
- INTEL_GEN(dev_priv) >= 11 && iter.uv_total[plane_id]);
-
- /* Leave disabled planes at (0,0) */
- if (INTEL_GEN(dev_priv) < 11 &&
- crtc_state->nv12_planes & BIT(plane_id)) {
- if (iter.total[plane_id])
- iter.start = skl_ddb_entry_init(ddb_y, iter.start,
- iter.start + iter.total[plane_id]);
- if (iter.uv_total[plane_id])
- iter.start = skl_ddb_entry_init(ddb, iter.start,
- iter.start + iter.uv_total[plane_id]);
- } else {
- if (iter.total[plane_id])
- iter.start = skl_ddb_entry_init(ddb, iter.start,
- iter.start + iter.total[plane_id]);
- }
- }
-
/*
* When we calculated watermark values we didn't know how high
* of a level we'd actually be able to hit, so we just marked
@@ -4827,6 +4801,10 @@ skl_allocate_pipe_ddb(struct intel_atomic_state *state,
*/
for (level++; level <= ilk_wm_max_level(dev_priv); level++) {
for_each_plane_id_on_crtc(crtc, plane_id) {
+ const struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb[plane_id];
+ const struct skl_ddb_entry *ddb_y =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
@@ -4842,9 +4820,15 @@ skl_allocate_pipe_ddb(struct intel_atomic_state *state,
* planes must be enabled before the level will be used."
* So this is actually safe to do.
*/
- if (wm->wm[level].min_ddb_alloc > iter.total[plane_id] ||
- wm->uv_wm[level].min_ddb_alloc > iter.uv_total[plane_id])
- memset(&wm->wm[level], 0, sizeof(wm->wm[level]));
+ if (INTEL_GEN(dev_priv) < 11 &&
+ crtc_state->nv12_planes & BIT(plane_id)) {
+ if (wm->wm[level].min_ddb_alloc > skl_ddb_entry_size(ddb_y) ||
+ wm->uv_wm[level].min_ddb_alloc > skl_ddb_entry_size(ddb))
+ memset(&wm->wm[level], 0, sizeof(wm->wm[level]));
+ } else {
+ if (wm->wm[level].min_ddb_alloc > skl_ddb_entry_size(ddb))
+ memset(&wm->wm[level], 0, sizeof(wm->wm[level]));
+ }
/*
* Wa_1408961008:icl, ehl
@@ -4864,11 +4848,24 @@ skl_allocate_pipe_ddb(struct intel_atomic_state *state,
* don't have enough DDB blocks for it.
*/
for_each_plane_id_on_crtc(crtc, plane_id) {
+ const struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb[plane_id];
+ const struct skl_ddb_entry *ddb_y =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
- if (wm->trans_wm.plane_res_b >= iter.total[plane_id])
- memset(&wm->trans_wm, 0, sizeof(wm->trans_wm));
+ if (INTEL_GEN(dev_priv) < 11 &&
+ crtc_state->nv12_planes & BIT(plane_id)) {
+ if (wm->trans_wm.plane_res_b >= skl_ddb_entry_size(ddb_y))
+ memset(&wm->trans_wm, 0, sizeof(wm->trans_wm));
+ } else {
+ WARN_ON(skl_ddb_entry_size(ddb_y));
+ WARN_ON(wm->uv_wm[level].min_ddb_alloc);
+
+ if (wm->trans_wm.plane_res_b >= skl_ddb_entry_size(ddb))
+ memset(&wm->trans_wm, 0, sizeof(wm->trans_wm));
+ }
}
return 0;
--
2.26.2
More information about the Intel-gfx
mailing list