[PATCH 2/3] wa
Chris Wilson
chris at chris-wilson.co.uk
Thu Apr 12 10:36:26 UTC 2018
---
drivers/gpu/drm/i915/intel_workarounds.c | 148 +++++++++++++----------
1 file changed, 82 insertions(+), 66 deletions(-)
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index 16285b9ea5a3..77c86b04f868 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -475,205 +475,221 @@ int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv)
return 0;
}
-static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
- i915_reg_t reg)
+static int wa_ring_whitelist_reg(struct intel_engine_cs *engine, i915_reg_t reg)
{
- struct drm_i915_private *dev_priv = engine->i915;
- struct i915_workarounds *wa = &dev_priv->workarounds;
- const unsigned int index = wa->hw_whitelist_count[engine->id];
+ struct i915_workarounds *wa = &engine->i915->workarounds;
+ const unsigned int index = wa->count++;
if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
return -EINVAL;
- I915_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
- i915_mmio_reg_offset(reg));
+ wa->reg[index] = i915_mmio_reg_offset(reg);
wa->hw_whitelist_count[engine->id]++;
return 0;
}
-static int bdw_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+static int bdw_whitelist_workarounds_emit(struct i915_request *rq)
{
return 0;
}
-static int chv_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+static int chv_whitelist_workarounds_emit(struct i915_request *rq)
{
return 0;
}
-static int gen9_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+static int gen9_whitelist_workarounds_emit(struct i915_request *rq)
{
int ret;
/* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
- ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
+ ret = wa_ring_whitelist_reg(rq, GEN9_CTX_PREEMPT_REG);
if (ret)
return ret;
/* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
- ret = wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
+ ret = wa_ring_whitelist_reg(rq, GEN8_CS_CHICKEN1);
if (ret)
return ret;
/* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
- ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
+ ret = wa_ring_whitelist_reg(rq, GEN8_HDC_CHICKEN1);
if (ret)
return ret;
return 0;
}
-static int skl_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+static int skl_whitelist_workarounds_emit(struct i915_request *rq)
{
int ret;
- ret = gen9_whitelist_workarounds_apply(engine);
+ ret = gen9_whitelist_workarounds_apply(rq);
if (ret)
return ret;
/* WaDisableLSQCROPERFforOCL:skl */
- ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
+ ret = wa_ring_whitelist_reg(rq, GEN8_L3SQCREG4);
if (ret)
return ret;
return 0;
}
-static int bxt_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+static int bxt_whitelist_workarounds_emit(struct i915_request *rq)
{
int ret;
- ret = gen9_whitelist_workarounds_apply(engine);
+ ret = gen9_whitelist_workarounds_emit(rq);
if (ret)
return ret;
return 0;
}
-static int kbl_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+static int kbl_whitelist_workarounds_emit(struct i915_request *rq)
{
int ret;
- ret = gen9_whitelist_workarounds_apply(engine);
+ ret = gen9_whitelist_workarounds_emit(rq);
if (ret)
return ret;
/* WaDisableLSQCROPERFforOCL:kbl */
- ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
+ ret = wa_ring_whitelist_reg(rq, GEN8_L3SQCREG4);
if (ret)
return ret;
return 0;
}
-static int glk_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+static int glk_whitelist_workarounds_emit(struct i915_request *rq)
{
int ret;
- ret = gen9_whitelist_workarounds_apply(engine);
+ ret = gen9_whitelist_workarounds_emit(rq);
if (ret)
return ret;
/* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
- ret = wa_ring_whitelist_reg(engine, GEN9_SLICE_COMMON_ECO_CHICKEN1);
+ ret = wa_ring_whitelist_reg(rq, GEN9_SLICE_COMMON_ECO_CHICKEN1);
if (ret)
return ret;
return 0;
}
-static int cfl_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+static int cfl_whitelist_workarounds_emit(struct i915_request *rq)
{
int ret;
- ret = gen9_whitelist_workarounds_apply(engine);
+ ret = gen9_whitelist_workarounds_apply(rq);
if (ret)
return ret;
return 0;
}
-static int cnl_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+static int cnl_whitelist_workarounds_emit(struct i915_request *rq)
{
int ret;
/* WaEnablePreemptionGranularityControlByUMD:cnl */
- ret = wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
+ ret = wa_ring_whitelist_reg(rq, GEN8_CS_CHICKEN1);
if (ret)
return ret;
return 0;
}
-int intel_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+static int wa_ring_whitelist_emit(struct i915_request *rq)
{
- struct drm_i915_private *dev_priv = engine->i915;
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_workarounds *wa = &i915->workarounds;
+ u32 base = rq->engine->mmio_base;
+ int count;
+ u32 *cs;
+
+ count = wa->count;
+ if (!count)
+ return;
+
+ cs = intel_ring_begin(rq, 2*count + 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_LOAD_REGISTER_IMM(count);
+ for (i = 0; i < count; i++) {
+ *cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i));
+ *cs++ = i915_mmio_reg_offset(reg);
+ }
+ *cs++ = MI_NOOP;
+ wa->hw_whitelist_count[engine->id]++;
+
+ return 0;
+}
+
+static int whitelist_workarounds_emit(struct i915_request *rq)
+{
+ struct drm_i915_private *i915 = rq->i915;
+ const struct intel_engine_cs *engine = rq->engine;
int err = 0;
- WARN_ON(engine->id != RCS);
+ GEM_BUG_ON(engine->id != RCS);
- dev_priv->workarounds.hw_whitelist_count[engine->id] = 0;
+ i915->workarounds.hw_whitelist_count[engine->id] = 0;
- if (INTEL_GEN(dev_priv) < 8)
+ if (INTEL_GEN(i915) < 8)
err = 0;
- else if (IS_BROADWELL(dev_priv))
+ else if (IS_BROADWELL(i915))
err = bdw_whitelist_workarounds_apply(engine);
- else if (IS_CHERRYVIEW(dev_priv))
+ else if (IS_CHERRYVIEW(i915))
err = chv_whitelist_workarounds_apply(engine);
- else if (IS_SKYLAKE(dev_priv))
+ else if (IS_SKYLAKE(i915))
err = skl_whitelist_workarounds_apply(engine);
- else if (IS_BROXTON(dev_priv))
+ else if (IS_BROXTON(i915))
err = bxt_whitelist_workarounds_apply(engine);
- else if (IS_KABYLAKE(dev_priv))
+ else if (IS_KABYLAKE(i915))
err = kbl_whitelist_workarounds_apply(engine);
- else if (IS_GEMINILAKE(dev_priv))
+ else if (IS_GEMINILAKE(i915))
err = glk_whitelist_workarounds_apply(engine);
- else if (IS_COFFEELAKE(dev_priv))
+ else if (IS_COFFEELAKE(i915))
err = cfl_whitelist_workarounds_apply(engine);
- else if (IS_CANNONLAKE(dev_priv))
+ else if (IS_CANNONLAKE(i915))
err = cnl_whitelist_workarounds_apply(engine);
else
- MISSING_CASE(INTEL_GEN(dev_priv));
+ MISSING_CASE(INTEL_GEN(i915));
if (err)
return err;
- DRM_DEBUG_DRIVER("%s: Number of whitelist w/a: %d\n", engine->name,
- dev_priv->workarounds.hw_whitelist_count[engine->id]);
- return 0;
+ return whitelist_workarounds_emit(rq);
}
int intel_ctx_workarounds_emit(struct i915_request *rq)
{
struct i915_workarounds *w = &rq->i915->workarounds;
- u32 *cs;
- int ret, i;
- if (w->count == 0)
- return 0;
+ if (w->count) {
+ u32 *cs;
+ int i;
- ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
- if (ret)
- return ret;
+ cs = intel_ring_begin(rq, (w->count * 2 + 2));
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- cs = intel_ring_begin(rq, (w->count * 2 + 2));
- if (IS_ERR(cs))
- return PTR_ERR(cs);
+ *cs++ = MI_LOAD_REGISTER_IMM(w->count);
+ for (i = 0; i < w->count; i++) {
+ *cs++ = i915_mmio_reg_offset(w->reg[i].addr);
+ *cs++ = w->reg[i].value;
+ }
+ *cs++ = MI_NOOP;
- *cs++ = MI_LOAD_REGISTER_IMM(w->count);
- for (i = 0; i < w->count; i++) {
- *cs++ = i915_mmio_reg_offset(w->reg[i].addr);
- *cs++ = w->reg[i].value;
+ intel_ring_advance(rq, cs);
}
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
- ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
- if (ret)
- return ret;
-
- return 0;
+ return whitelist_workarounds_emit(rq);
}
static void bdw_gt_workarounds_apply(struct drm_i915_private *dev_priv)
--
2.17.0
More information about the Intel-gfx-trybot
mailing list