[PATCH 3/3] drm/i915: Store whitelist registers in the default context image
Chris Wilson
chris at chris-wilson.co.uk
Thu Apr 12 10:36:27 UTC 2018
As the RING_NONPRIV (whitelist) registers are stored inside the context,
we need only emit them once when setting up the default context image
that is then copied into every user context.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/i915/i915_debugfs.c | 14 +-
drivers/gpu/drm/i915/i915_drv.h | 1 -
drivers/gpu/drm/i915/intel_lrc.c | 8 --
drivers/gpu/drm/i915/intel_ringbuffer.c | 4 -
drivers/gpu/drm/i915/intel_workarounds.c | 164 +++++++----------------
drivers/gpu/drm/i915/intel_workarounds.h | 2 -
6 files changed, 53 insertions(+), 140 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 2e6652a9bb9e..e0274f41bc76 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -3304,24 +3304,13 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
static int i915_wa_registers(struct seq_file *m, void *unused)
{
- int i;
- int ret;
- struct intel_engine_cs *engine;
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
struct i915_workarounds *workarounds = &dev_priv->workarounds;
- enum intel_engine_id id;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
+ int i;
intel_runtime_pm_get(dev_priv);
seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
- for_each_engine(engine, dev_priv, id)
- seq_printf(m, "HW whitelist count for %s: %d\n",
- engine->name, workarounds->hw_whitelist_count[id]);
for (i = 0; i < workarounds->count; ++i) {
i915_reg_t addr;
u32 mask, value, read;
@@ -3337,7 +3326,6 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
}
intel_runtime_pm_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 649c0f2f3bae..15e1260be58e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1297,7 +1297,6 @@ struct i915_wa_reg {
struct i915_workarounds {
struct i915_wa_reg reg[I915_MAX_WA_REGS];
u32 count;
- u32 hw_whitelist_count[I915_NUM_ENGINES];
};
struct i915_virtual_gpu {
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index c7c85134a84a..e8e9b730aad6 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1744,10 +1744,6 @@ static int gen8_init_render_ring(struct intel_engine_cs *engine)
if (ret)
return ret;
- ret = intel_whitelist_workarounds_apply(engine);
- if (ret)
- return ret;
-
/* We need to disable the AsyncFlip performance optimisations in order
* to use MI_WAIT_FOR_EVENT within the CS. It should already be
* programmed to '1' on all products.
@@ -1769,10 +1765,6 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine)
if (ret)
return ret;
- ret = intel_whitelist_workarounds_apply(engine);
- if (ret)
- return ret;
-
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 757bb0990c07..d2ac4435c199 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -618,10 +618,6 @@ static int init_render_ring(struct intel_engine_cs *engine)
if (ret)
return ret;
- ret = intel_whitelist_workarounds_apply(engine);
- if (ret)
- return ret;
-
/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
if (IS_GEN(dev_priv, 4, 6))
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index 77c86b04f868..7bbd0e195029 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -475,157 +475,100 @@ int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv)
return 0;
}
-static int wa_ring_whitelist_reg(struct intel_engine_cs *engine, i915_reg_t reg)
+struct whitelist {
+ i915_reg_t reg[RING_MAX_NONPRIV_SLOTS];
+ unsigned int count;
+};
+
+static void __whitelist_reg(struct whitelist *wa, i915_reg_t reg)
{
- struct i915_workarounds *wa = &engine->i915->workarounds;
const unsigned int index = wa->count++;
- if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
- return -EINVAL;
-
- wa->reg[index] = i915_mmio_reg_offset(reg);
- wa->hw_whitelist_count[engine->id]++;
+ if (GEM_WARN_ON(index >= ARRAY_SIZE(wa->reg)))
+ return;
- return 0;
+ wa->reg[index] = reg;
}
-static int bdw_whitelist_workarounds_emit(struct i915_request *rq)
+static void bdw_whitelist_workarounds_apply(struct whitelist *wa)
{
- return 0;
}
-static int chv_whitelist_workarounds_emit(struct i915_request *rq)
+static void chv_whitelist_workarounds_apply(struct whitelist *wa)
{
- return 0;
}
-static int gen9_whitelist_workarounds_emit(struct i915_request *rq)
+static void gen9_whitelist_workarounds_apply(struct whitelist *wa)
{
- int ret;
-
/* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
- ret = wa_ring_whitelist_reg(rq, GEN9_CTX_PREEMPT_REG);
- if (ret)
- return ret;
+ __whitelist_reg(wa, GEN9_CTX_PREEMPT_REG);
/* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
- ret = wa_ring_whitelist_reg(rq, GEN8_CS_CHICKEN1);
- if (ret)
- return ret;
+ __whitelist_reg(wa, GEN8_CS_CHICKEN1);
/* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
- ret = wa_ring_whitelist_reg(rq, GEN8_HDC_CHICKEN1);
- if (ret)
- return ret;
-
- return 0;
+ __whitelist_reg(wa, GEN8_HDC_CHICKEN1);
}
-static int skl_whitelist_workarounds_emit(struct i915_request *rq)
+static void skl_whitelist_workarounds_apply(struct whitelist *wa)
{
- int ret;
-
- ret = gen9_whitelist_workarounds_apply(rq);
- if (ret)
- return ret;
+ gen9_whitelist_workarounds_apply(wa);
/* WaDisableLSQCROPERFforOCL:skl */
- ret = wa_ring_whitelist_reg(rq, GEN8_L3SQCREG4);
- if (ret)
- return ret;
-
- return 0;
+ __whitelist_reg(wa, GEN8_L3SQCREG4);
}
-static int bxt_whitelist_workarounds_emit(struct i915_request *rq)
+static void bxt_whitelist_workarounds_apply(struct whitelist *wa)
{
- int ret;
-
- ret = gen9_whitelist_workarounds_emit(rq);
- if (ret)
- return ret;
-
- return 0;
+ gen9_whitelist_workarounds_apply(wa);
}
-static int kbl_whitelist_workarounds_emit(struct i915_request *rq)
+static void kbl_whitelist_workarounds_apply(struct whitelist *wa)
{
- int ret;
-
- ret = gen9_whitelist_workarounds_emit(rq);
- if (ret)
- return ret;
+ gen9_whitelist_workarounds_apply(wa);
/* WaDisableLSQCROPERFforOCL:kbl */
- ret = wa_ring_whitelist_reg(rq, GEN8_L3SQCREG4);
- if (ret)
- return ret;
-
- return 0;
+ __whitelist_reg(wa, GEN8_L3SQCREG4);
}
-static int glk_whitelist_workarounds_emit(struct i915_request *rq)
+static void glk_whitelist_workarounds_apply(struct whitelist *wa)
{
- int ret;
-
- ret = gen9_whitelist_workarounds_emit(rq);
- if (ret)
- return ret;
+ gen9_whitelist_workarounds_apply(wa);
/* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
- ret = wa_ring_whitelist_reg(rq, GEN9_SLICE_COMMON_ECO_CHICKEN1);
- if (ret)
- return ret;
-
- return 0;
+ __whitelist_reg(wa, GEN9_SLICE_COMMON_ECO_CHICKEN1);
}
-static int cfl_whitelist_workarounds_emit(struct i915_request *rq)
+static void cfl_whitelist_workarounds_apply(struct whitelist *wa)
{
- int ret;
-
- ret = gen9_whitelist_workarounds_apply(rq);
- if (ret)
- return ret;
-
- return 0;
+ gen9_whitelist_workarounds_apply(wa);
}
-static int cnl_whitelist_workarounds_emit(struct i915_request *rq)
+static void cnl_whitelist_workarounds_apply(struct whitelist *wa)
{
- int ret;
-
/* WaEnablePreemptionGranularityControlByUMD:cnl */
- ret = wa_ring_whitelist_reg(rq, GEN8_CS_CHICKEN1);
- if (ret)
- return ret;
-
- return 0;
+ __whitelist_reg(wa, GEN8_CS_CHICKEN1);
}
-static int wa_ring_whitelist_emit(struct i915_request *rq)
+static int __whitelist_emit(struct i915_request *rq, const struct whitelist *wa)
{
- struct drm_i915_private *i915 = engine->i915;
- struct i915_workarounds *wa = &i915->workarounds;
- u32 base = rq->engine->mmio_base;
- int count;
+ const u32 base = rq->engine->mmio_base;
+ unsigned int i;
u32 *cs;
- count = wa->count;
- if (!count)
- return;
+ if (!wa->count)
+ return 0;
- cs = intel_ring_begin(rq, 2*count + 2);
+ cs = intel_ring_begin(rq, 2*wa->count + 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
- *cs++ = MI_LOAD_REGISTER_IMM(count);
- for (i = 0; i < count; i++) {
+ *cs++ = MI_LOAD_REGISTER_IMM(wa->count);
+ for (i = 0; i < wa->count; i++) {
*cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i));
- *cs++ = i915_mmio_reg_offset(reg);
+ *cs++ = i915_mmio_reg_offset(wa->reg[i]);
}
*cs++ = MI_NOOP;
- wa->hw_whitelist_count[engine->id]++;
return 0;
}
@@ -633,37 +576,34 @@ static int wa_ring_whitelist_emit(struct i915_request *rq)
static int whitelist_workarounds_emit(struct i915_request *rq)
{
struct drm_i915_private *i915 = rq->i915;
- const struct intel_engine_cs *engine = rq->engine;
- int err = 0;
+ struct whitelist wa;
- GEM_BUG_ON(engine->id != RCS);
+ GEM_BUG_ON(rq->engine->id != RCS);
- i915->workarounds.hw_whitelist_count[engine->id] = 0;
+ wa.count = 0;
if (INTEL_GEN(i915) < 8)
- err = 0;
+ ;
else if (IS_BROADWELL(i915))
- err = bdw_whitelist_workarounds_apply(engine);
+ bdw_whitelist_workarounds_apply(&wa);
else if (IS_CHERRYVIEW(i915))
- err = chv_whitelist_workarounds_apply(engine);
+ chv_whitelist_workarounds_apply(&wa);
else if (IS_SKYLAKE(i915))
- err = skl_whitelist_workarounds_apply(engine);
+ skl_whitelist_workarounds_apply(&wa);
else if (IS_BROXTON(i915))
- err = bxt_whitelist_workarounds_apply(engine);
+ bxt_whitelist_workarounds_apply(&wa);
else if (IS_KABYLAKE(i915))
- err = kbl_whitelist_workarounds_apply(engine);
+ kbl_whitelist_workarounds_apply(&wa);
else if (IS_GEMINILAKE(i915))
- err = glk_whitelist_workarounds_apply(engine);
+ glk_whitelist_workarounds_apply(&wa);
else if (IS_COFFEELAKE(i915))
- err = cfl_whitelist_workarounds_apply(engine);
+ cfl_whitelist_workarounds_apply(&wa);
else if (IS_CANNONLAKE(i915))
- err = cnl_whitelist_workarounds_apply(engine);
+ cnl_whitelist_workarounds_apply(&wa);
else
MISSING_CASE(INTEL_GEN(i915));
- if (err)
- return err;
- return whitelist_workarounds_emit(rq);
+ return __whitelist_emit(rq, &wa);
}
int intel_ctx_workarounds_emit(struct i915_request *rq)
diff --git a/drivers/gpu/drm/i915/intel_workarounds.h b/drivers/gpu/drm/i915/intel_workarounds.h
index d9b0cc5afb4a..90503ad0c226 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.h
+++ b/drivers/gpu/drm/i915/intel_workarounds.h
@@ -12,6 +12,4 @@ int intel_ctx_workarounds_emit(struct i915_request *rq);
void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv);
-int intel_whitelist_workarounds_apply(struct intel_engine_cs *engine);
-
#endif
--
2.17.0
More information about the Intel-gfx-trybot
mailing list