[Intel-gfx] [PATCH v10 2/2] drm/i915: Restrict qgv points which don't have enough bandwidth.
Stanislav Lisovskiy
stanislav.lisovskiy at intel.com
Thu Nov 7 15:30:37 UTC 2019
According to BSpec 53998, we should try to
restrict qgv points, which can't provide
enough bandwidth for desired display configuration.
Currently we are just comparing against all of
those and take minimum(worst case).
v2: Fixed wrong PCode reply mask, removed hardcoded
values.
v3: Forbid simultaneous legacy SAGV PCode requests and
restricting qgv points. Put the actual restriction
to commit function, added serialization(thanks to Ville)
to prevent commit being applied out of order in case of
nonblocking and/or nomodeset commits.
v4:
- Minor code refactoring, fixed few typos(thanks to James Ausmus)
- Change the naming of qgv point
masking/unmasking functions(James Ausmus).
- Simplify the masking/unmasking operation itself,
as we don't need to mask only single point per request(James Ausmus)
- Reject and stick to highest bandwidth point if SAGV
can't be enabled(BSpec)
v5:
- Add new mailbox reply codes, which seems to happen during boot
time for TGL and indicate that QGV setting is not yet available.
v6:
- Increase number of supported QGV points to be in sync with BSpec.
v7: - Rebased and resolved conflict to fix build failure.
- Fix NUM_QGV_POINTS to 8 and moved that to header file(James Ausmus)
v8: - Don't report an error if we can't restrict qgv points, as SAGV
can be disabled by BIOS, which is completely legal. So don't
make CI panic. Instead if we detect that there is only 1 QGV
point accessible just analyze if we can fit the required bandwidth
requirements, but no need in restricting.
v9: - Fix wrong QGV transition if we have 0 planes and no SAGV
simultaneously.
v10: - Fix CDCLK corruption, because of global state getting serialized
without modeset, which caused copying of non-calculated cdclk
to be copied to dev_priv(thanks to Ville for the hint).
Reviewed-by: James Ausmus <james.ausmus at intel.com>
Signed-off-by: Stanislav Lisovskiy <stanislav.lisovskiy at intel.com>
Cc: Ville Syrjälä <ville.syrjala at intel.com>
Cc: James Ausmus <james.ausmus at intel.com>
---
drivers/gpu/drm/i915/display/intel_atomic.h | 3 +
drivers/gpu/drm/i915/display/intel_bw.c | 137 +++++++++++++++---
drivers/gpu/drm/i915/display/intel_bw.h | 2 +
drivers/gpu/drm/i915/display/intel_display.c | 104 ++++++++++++-
.../drm/i915/display/intel_display_types.h | 3 +
drivers/gpu/drm/i915/i915_drv.h | 4 +-
drivers/gpu/drm/i915/i915_reg.h | 8 +
drivers/gpu/drm/i915/intel_pm.c | 15 +-
drivers/gpu/drm/i915/intel_pm.h | 1 +
drivers/gpu/drm/i915/intel_sideband.c | 27 +++-
drivers/gpu/drm/i915/intel_sideband.h | 1 -
11 files changed, 264 insertions(+), 41 deletions(-)
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.h b/drivers/gpu/drm/i915/display/intel_atomic.h
index 7b49623419ba..3ab6d7ec75ae 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic.h
@@ -7,6 +7,7 @@
#define __INTEL_ATOMIC_H__
#include <linux/types.h>
+#include "intel_display_types.h"
struct drm_atomic_state;
struct drm_connector;
@@ -41,6 +42,8 @@ void intel_crtc_copy_color_blobs(struct intel_crtc_state *crtc_state);
struct drm_atomic_state *intel_atomic_state_alloc(struct drm_device *dev);
void intel_atomic_state_clear(struct drm_atomic_state *state);
+int intel_atomic_serialize_global_state(struct intel_atomic_state *state);
+
struct intel_crtc_state *
intel_atomic_get_crtc_state(struct drm_atomic_state *state,
struct intel_crtc *crtc);
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 3f6e29f61323..1dde4e1574fb 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -8,6 +8,9 @@
#include "intel_bw.h"
#include "intel_display_types.h"
#include "intel_sideband.h"
+#include "intel_atomic.h"
+#include "intel_pm.h"
+
/* Parameters for Qclk Geyserville (QGV) */
struct intel_qgv_point {
@@ -15,7 +18,7 @@ struct intel_qgv_point {
};
struct intel_qgv_info {
- struct intel_qgv_point points[3];
+ struct intel_qgv_point points[NUM_SAGV_POINTS];
u8 num_points;
u8 num_channels;
u8 t_bl;
@@ -113,6 +116,27 @@ static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
return 0;
}
+int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
+ u32 points_mask)
+{
+ int ret;
+
+ /* bspec says to keep retrying for at least 1 ms */
+ ret = skl_pcode_request(dev_priv, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
+ points_mask,
+ GEN11_PCODE_POINTS_RESTRICTED_MASK,
+ GEN11_PCODE_POINTS_RESTRICTED,
+ 1);
+
+ if (ret < 0) {
+ DRM_ERROR("Failed to disable qgv points (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+
static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
struct intel_qgv_info *qi)
{
@@ -176,7 +200,7 @@ static const struct intel_sa_info tgl_sa_info = {
static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
{
- struct intel_qgv_info qi = {};
+ struct intel_qgv_info qi;
bool is_y_tile = true; /* assume y tile may be used */
int num_channels;
int deinterleave;
@@ -270,22 +294,6 @@ void intel_bw_init_hw(struct drm_i915_private *dev_priv)
icl_get_bw_info(dev_priv, &icl_sa_info);
}
-static unsigned int intel_max_data_rate(struct drm_i915_private *dev_priv,
- int num_planes)
-{
- if (INTEL_GEN(dev_priv) >= 11)
- /*
- * FIXME with SAGV disabled maybe we can assume
- * point 1 will always be used? Seems to match
- * the behaviour observed in the wild.
- */
- return min3(icl_max_bw(dev_priv, num_planes, 0),
- icl_max_bw(dev_priv, num_planes, 1),
- icl_max_bw(dev_priv, num_planes, 2));
- else
- return UINT_MAX;
-}
-
static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_state *crtc_state)
{
/*
@@ -377,7 +385,12 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
unsigned int data_rate, max_data_rate;
unsigned int num_active_planes;
struct intel_crtc *crtc;
- int i;
+ int i, ret;
+ struct intel_qgv_info qi = {};
+ u32 allowed_points = 0;
+ unsigned int max_bw_point = 0, max_bw = 0;
+ unsigned int num_qgv_points = dev_priv->max_bw[0].num_qgv_points;
+ u32 mask = (1 << num_qgv_points) - 1;
/* FIXME earlier gens need some checks too */
if (INTEL_GEN(dev_priv) < 11)
@@ -421,16 +434,92 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
data_rate = intel_bw_data_rate(dev_priv, bw_state);
num_active_planes = intel_bw_num_active_planes(dev_priv, bw_state);
- max_data_rate = intel_max_data_rate(dev_priv, num_active_planes);
-
data_rate = DIV_ROUND_UP(data_rate, 1000);
- if (data_rate > max_data_rate) {
- DRM_DEBUG_KMS("Bandwidth %u MB/s exceeds max available %d MB/s (%d active planes)\n",
- data_rate, max_data_rate, num_active_planes);
+ for (i = 0; i < num_qgv_points; i++) {
+ max_data_rate = icl_max_bw(dev_priv, num_active_planes, i);
+ /*
+ * We need to know which qgv point gives us
+ * maximum bandwidth in order to disable SAGV
+ * if we find that we exceed SAGV block time
+ * with watermarks. By that moment we already
+ * have those, as it is calculated earlier in
+ * intel_atomic_check,
+ */
+ if (max_data_rate > max_bw) {
+ max_bw_point = i;
+ max_bw = max_data_rate;
+ }
+ if (max_data_rate >= data_rate)
+ allowed_points |= 1 << i;
+ DRM_DEBUG_KMS("QGV point %d: max bw %d required %d\n",
+ i, max_data_rate, data_rate);
+ }
+
+ /*
+ * BSpec states that we always should have at least one allowed point
+ * left, so if we couldn't - simply reject the configuration for obvious
+ * reasons.
+ */
+ if (allowed_points == 0) {
+ DRM_DEBUG_KMS("Could not find any suitable QGV points\n");
return -EINVAL;
}
+ /*
+ * In case if SAGV is disabled in BIOS, we always get 1
+ * SAGV point, but we can't send PCode commands to restrict it
+ * as it will fail and pointless anyway.
+ */
+ if (qi.num_points == 1)
+ dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
+ else
+ dev_priv->sagv_status = I915_SAGV_ENABLED;
+
+ /*
+ * Leave only single point with highest bandwidth, if
+ * we can't enable SAGV according to BSpec.
+ */
+ if (!intel_can_enable_sagv(state)) {
+
+ /*
+ * This is a border line condition when we have 0 planes
+ * and SAGV not enabled means that we should keep QGV with
+ * highest bandwidth, however algorithm returns wrong result
+ * for 0 planes and 0 data rate, so just stick to last config
+ * then. Otherwise use the QGV point with highest BW according
+ * to BSpec.
+ */
+ if (!data_rate && !num_active_planes) {
+ DRM_DEBUG_KMS("No SAGV, using old QGV mask\n");
+ allowed_points = (~dev_priv->qgv_points_mask) & mask;
+ } else {
+ allowed_points = 1 << max_bw_point;
+ DRM_DEBUG_KMS("No SAGV, using single QGV point %d\n",
+ max_bw_point);
+ }
+ }
+ /*
+ * We store the ones which need to be masked as that is what PCode
+ * actually accepts as a parameter.
+ */
+ state->qgv_points_mask = (~allowed_points) & mask;
+
+ DRM_DEBUG_KMS("New state %p qgv mask %x\n",
+ state, state->qgv_points_mask);
+
+ /*
+ * If the actual mask had changed we need to make sure that
+ * the commits are serialized(in case this is a nomodeset, nonblocking)
+ */
+ if (state->qgv_points_mask != dev_priv->qgv_points_mask) {
+ ret = intel_atomic_serialize_global_state(state);
+ if (ret) {
+ DRM_DEBUG_KMS("Could not serialize global state\n");
+ return ret;
+ }
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h
index 9db10af012f4..66bf9bc10b73 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_bw.h
@@ -28,5 +28,7 @@ int intel_bw_init(struct drm_i915_private *dev_priv);
int intel_bw_atomic_check(struct intel_atomic_state *state);
void intel_bw_crtc_update(struct intel_bw_state *bw_state,
const struct intel_crtc_state *crtc_state);
+int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
+ u32 points_mask);
#endif /* __INTEL_BW_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 7ea1e7518ab6..71f6aaa1f6b9 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -14744,6 +14744,80 @@ static void intel_atomic_cleanup_work(struct work_struct *work)
intel_atomic_helper_free_state(i915);
}
+static void intel_qgv_points_mask(struct intel_atomic_state *state)
+{
+ struct drm_device *dev = state->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ int ret;
+ u32 new_mask = dev_priv->qgv_points_mask | state->qgv_points_mask;
+ unsigned int num_qgv_points = dev_priv->max_bw[0].num_qgv_points;
+ unsigned int mask = (1 << num_qgv_points) - 1;
+
+ /*
+ * As we don't know initial hardware state during initial commit
+ * we should not do anything, until we actually figure out,
+ * what are the qgv points to mask.
+ */
+ if (!new_mask)
+ return;
+
+ WARN_ON(new_mask == mask);
+
+ /*
+ * Just return if we can't control SAGV or don't have it.
+ */
+ if (!intel_has_sagv(dev_priv))
+ return;
+
+ /*
+ * Restrict required qgv points before updating the configuration.
+ * According to BSpec we can't mask and unmask qgv points at the same
+ * time. Also masking should be done before updating the configuration
+ * and unmasking afterwards.
+ */
+ ret = icl_pcode_restrict_qgv_points(dev_priv, new_mask);
+ if (ret < 0)
+ DRM_DEBUG_KMS("Could not restrict required qgv points(%d)\n",
+ ret);
+ else
+ dev_priv->qgv_points_mask = new_mask;
+}
+
+static void intel_qgv_points_unmask(struct intel_atomic_state *state)
+{
+ struct drm_device *dev = state->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ int ret;
+ u32 new_mask = dev_priv->qgv_points_mask & state->qgv_points_mask;
+
+ /*
+ * As we don't know initial hardware state during initial commit
+ * we should not do anything, until we actually figure out,
+ * what are the qgv points to mask.
+ */
+ if (!new_mask)
+ return;
+
+ /*
+ * Just return if we can't control SAGV or don't have it.
+ */
+ if (!intel_has_sagv(dev_priv))
+ return;
+
+ /*
+ * Allow required qgv points after updating the configuration.
+ * According to BSpec we can't mask and unmask qgv points at the same
+ * time. Also masking should be done before updating the configuration
+ * and unmasking afterwards.
+ */
+ ret = icl_pcode_restrict_qgv_points(dev_priv, new_mask);
+ if (ret < 0)
+ DRM_DEBUG_KMS("Could not restrict required qgv points(%d)\n",
+ ret);
+ else
+ dev_priv->qgv_points_mask = new_mask;
+}
+
static void intel_atomic_commit_tail(struct intel_atomic_state *state)
{
struct drm_device *dev = state->base.dev;
@@ -14771,6 +14845,9 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
}
}
+ if ((INTEL_GEN(dev_priv) >= 11))
+ intel_qgv_points_mask(state);
+
intel_commit_modeset_disables(state);
/* FIXME: Eventually get rid of our crtc->config pointer */
@@ -14789,8 +14866,9 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
* SKL workaround: bspec recommends we disable the SAGV when we
* have more then one pipe enabled
*/
- if (!intel_can_enable_sagv(state))
- intel_disable_sagv(dev_priv);
+ if (INTEL_GEN(dev_priv) < 11)
+ if (!intel_can_enable_sagv(state))
+ intel_disable_sagv(dev_priv);
intel_modeset_verify_disabled(dev_priv, state);
}
@@ -14873,8 +14951,11 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
if (state->modeset)
intel_verify_planes(state);
- if (state->modeset && intel_can_enable_sagv(state))
- intel_enable_sagv(dev_priv);
+ if (INTEL_GEN(dev_priv) < 11) {
+ if (state->modeset && intel_can_enable_sagv(state))
+ intel_enable_sagv(dev_priv);
+ } else
+ intel_qgv_points_unmask(state);
drm_atomic_helper_commit_hw_done(&state->base);
@@ -14962,7 +15043,18 @@ static int intel_atomic_commit(struct drm_device *dev,
{
struct intel_atomic_state *state = to_intel_atomic_state(_state);
struct drm_i915_private *dev_priv = to_i915(dev);
- int ret = 0;
+ struct intel_crtc_state *new_crtc_state, *old_crtc_state;
+ int ret = 0, i;
+ bool any_ms = false;
+ struct intel_crtc *crtc;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (needs_modeset(new_crtc_state)) {
+ any_ms = true;
+ break;
+ }
+ }
state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
@@ -15021,7 +15113,7 @@ static int intel_atomic_commit(struct drm_device *dev,
intel_shared_dpll_swap_state(state);
intel_atomic_track_fbs(state);
- if (state->global_state_changed) {
+ if (state->global_state_changed && any_ms) {
assert_global_state_locked(dev_priv);
memcpy(dev_priv->min_cdclk, state->min_cdclk,
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index fb274538af23..896b13bc4494 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -528,6 +528,9 @@ struct intel_atomic_state {
struct i915_sw_fence commit_ready;
struct llist_node freed;
+
+ /* Gen11+ only */
+ u32 qgv_points_mask;
};
struct intel_plane_state {
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 4f4e2e839513..9924390cb94b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1243,11 +1243,13 @@ struct drm_i915_private {
} dram_info;
struct intel_bw_info {
- unsigned int deratedbw[3]; /* for each QGV point */
+ unsigned int deratedbw[NUM_SAGV_POINTS]; /* for each QGV point */
u8 num_qgv_points;
u8 num_planes;
} max_bw[6];
+ u32 qgv_points_mask;
+
struct drm_private_obj bw_obj;
struct intel_runtime_pm runtime_pm;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index a607ea520829..6d6ecf1d9f6b 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -8951,6 +8951,9 @@ enum {
#define VLV_RENDER_C0_COUNT _MMIO(0x138118)
#define VLV_MEDIA_C0_COUNT _MMIO(0x13811C)
+/* BSpec precisely defines this */
+#define NUM_SAGV_POINTS 8
+
#define GEN6_PCODE_MAILBOX _MMIO(0x138124)
#define GEN6_PCODE_READY (1 << 31)
#define GEN6_PCODE_ERROR_MASK 0xFF
@@ -8961,6 +8964,8 @@ enum {
#define GEN6_PCODE_UNIMPLEMENTED_CMD 0xFF
#define GEN7_PCODE_TIMEOUT 0x2
#define GEN7_PCODE_ILLEGAL_DATA 0x3
+#define GEN11_PCODE_MAIL_BOX_LOCKED 0x6
+#define GEN11_PCODE_REJECTED 0x11
#define GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x10
#define GEN6_PCODE_WRITE_RC6VIDS 0x4
#define GEN6_PCODE_READ_RC6VIDS 0x5
@@ -8982,6 +8987,7 @@ enum {
#define ICL_PCODE_MEM_SUBSYSYSTEM_INFO 0xd
#define ICL_PCODE_MEM_SS_READ_GLOBAL_INFO (0x0 << 8)
#define ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point) (((point) << 16) | (0x1 << 8))
+#define ICL_PCODE_SAGV_DE_MEM_SS_CONFIG 0xe
#define GEN6_PCODE_READ_D_COMP 0x10
#define GEN6_PCODE_WRITE_D_COMP 0x11
#define HSW_PCODE_DE_WRITE_FREQ_REQ 0x17
@@ -8994,6 +9000,8 @@ enum {
#define GEN9_SAGV_IS_DISABLED 0x1
#define GEN9_SAGV_ENABLE 0x3
#define GEN12_PCODE_READ_SAGV_BLOCK_TIME_US 0x23
+#define GEN11_PCODE_POINTS_RESTRICTED 0x0
+#define GEN11_PCODE_POINTS_RESTRICTED_MASK 0x1
#define GEN6_PCODE_DATA _MMIO(0x138128)
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index c792dd168742..10816f3e29f0 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3617,13 +3617,9 @@ static bool skl_needs_memory_bw_wa(struct drm_i915_private *dev_priv)
return IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv);
}
-static bool
+bool
intel_has_sagv(struct drm_i915_private *dev_priv)
{
- /* HACK! */
- if (IS_GEN(dev_priv, 12))
- return false;
-
return (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) &&
dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED;
}
@@ -3839,7 +3835,7 @@ static void icl_set_sagv_mask(struct intel_atomic_state *state)
if (flags & DRM_MODE_FLAG_INTERLACE)
continue;
- if (!new_crtc_state->base.active)
+ if (!new_crtc_state->hw.enable)
continue;
can_sagv = true;
@@ -3886,6 +3882,9 @@ bool intel_can_enable_sagv(struct intel_atomic_state *state)
else
skl_set_sagv_mask(state);
+ DRM_DEBUG_KMS("Crtc sagv masks, state %x global state %x\n",
+ state->crtc_sagv_mask,
+ dev_priv->crtc_sagv_mask);
/*
* For SAGV we need to account all the pipes,
* not only the ones which are in state currently.
@@ -4340,7 +4339,7 @@ static int
tgl_check_pipe_fits_sagv_wm(struct intel_crtc_state *crtc_state,
struct skl_ddb_allocation *ddb /* out */)
{
- struct drm_crtc *crtc = crtc_state->base.crtc;
+ struct drm_crtc *crtc = crtc_state->uapi.crtc;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct skl_ddb_entry *alloc = &crtc_state->wm.skl.ddb;
@@ -4833,7 +4832,6 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
struct skl_wm_level *result /* out */)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- u32 latency = dev_priv->wm.skl_latency[level];
uint_fixed_16_16_t method1, method2;
uint_fixed_16_16_t selected_result;
u32 res_blocks, res_lines, min_ddb_alloc = 0;
@@ -5707,6 +5705,7 @@ static void tgl_set_sagv_mask(struct intel_atomic_state *state)
ret = tgl_check_pipe_fits_sagv_wm(new_crtc_state, ddb);
if (!ret) {
int pipe_bit = BIT(crtc->pipe);
+
state->crtc_sagv_mask |= pipe_bit;
}
}
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index b579c724b915..53275860731a 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -43,6 +43,7 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
void vlv_wm_sanitize(struct drm_i915_private *dev_priv);
bool intel_can_enable_sagv(struct intel_atomic_state *state);
+bool intel_has_sagv(struct drm_i915_private *dev_priv);
int intel_enable_sagv(struct drm_i915_private *dev_priv);
int intel_disable_sagv(struct drm_i915_private *dev_priv);
bool skl_wm_level_equals(const struct skl_wm_level *l1,
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index e06b35b844a0..ff9dbed094d8 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -371,6 +371,29 @@ static inline int gen7_check_mailbox_status(u32 mbox)
}
}
+static inline int gen11_check_mailbox_status(u32 mbox)
+{
+ switch (mbox & GEN6_PCODE_ERROR_MASK) {
+ case GEN6_PCODE_SUCCESS:
+ return 0;
+ case GEN6_PCODE_ILLEGAL_CMD:
+ return -ENXIO;
+ case GEN7_PCODE_TIMEOUT:
+ return -ETIMEDOUT;
+ case GEN7_PCODE_ILLEGAL_DATA:
+ return -EINVAL;
+ case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
+ return -EOVERFLOW;
+ case GEN11_PCODE_MAIL_BOX_LOCKED:
+ return -EAGAIN;
+ case GEN11_PCODE_REJECTED:
+ return -EACCES;
+ default:
+ MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK);
+ return 0;
+ }
+}
+
static int __sandybridge_pcode_rw(struct drm_i915_private *i915,
u32 mbox, u32 *val, u32 *val1,
int fast_timeout_us,
@@ -408,7 +431,9 @@ static int __sandybridge_pcode_rw(struct drm_i915_private *i915,
if (is_read && val1)
*val1 = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA1);
- if (INTEL_GEN(i915) > 6)
+ if (INTEL_GEN(i915) >= 11)
+ return gen11_check_mailbox_status(mbox);
+ else if (INTEL_GEN(i915) > 6)
return gen7_check_mailbox_status(mbox);
else
return gen6_check_mailbox_status(mbox);
diff --git a/drivers/gpu/drm/i915/intel_sideband.h b/drivers/gpu/drm/i915/intel_sideband.h
index 7fb95745a444..14627ace99ae 100644
--- a/drivers/gpu/drm/i915/intel_sideband.h
+++ b/drivers/gpu/drm/i915/intel_sideband.h
@@ -137,5 +137,4 @@ int sandybridge_pcode_write_timeout(struct drm_i915_private *i915, u32 mbox,
int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request,
u32 reply_mask, u32 reply, int timeout_base_ms);
-
#endif /* _INTEL_SIDEBAND_H */
--
2.17.1
More information about the Intel-gfx
mailing list