[PATCH 4/4] v2
Chris Wilson
chris at chris-wilson.co.uk
Wed Nov 28 13:20:42 UTC 2018
---
drivers/gpu/drm/i915/intel_lrc.c | 134 ++++++++++++++++---------------
1 file changed, 68 insertions(+), 66 deletions(-)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 50b871e922d3..d5953bb58afe 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1214,64 +1214,6 @@ execlists_context_pin(struct intel_engine_cs *engine,
return __execlists_context_pin(engine, ctx, ce);
}
-static int emit_pdps(struct i915_request *rq)
-{
- const struct intel_engine_cs * const engine = rq->engine;
- struct i915_hw_ppgtt * const ppgtt = rq->gem_context->ppgtt;
- const unsigned int num_rings = INTEL_INFO(rq->i915)->num_rings;
- struct intel_engine_cs *other;
- enum intel_engine_id id;
- u32 *cs;
- int i;
-
- if (!(ppgtt->pd_dirty_rings & intel_engine_flag(engine)))
- return 0;
-
- cs = intel_ring_begin(rq, 4 * (GEN8_3LVL_PDPES + num_rings) + 8);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- /*
- * Force the GPU (not just the local engine/powerwell!) to remain awake,
- * or else we may kill the machine with "timed out waiting for
- * forcewake ack request".
- */
-
- *cs++ = MI_LOAD_REGISTER_IMM(num_rings);
- for_each_engine(other, rq->i915, id) {
- *cs++ = i915_mmio_reg_offset(RING_PSMI_CTL(other->mmio_base));
- *cs++ = _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE);
- }
-
- *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES);
- for (i = GEN8_3LVL_PDPES; i--; ) {
- const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
-
- *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, i));
- *cs++ = upper_32_bits(pd_daddr);
- *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, i));
- *cs++ = lower_32_bits(pd_daddr);
- }
- *cs++ = MI_NOOP;
-
- /* Posting read to flush the mmio before letting the GPU sleep again */
- *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
- *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, 0));
- *cs++ = i915_ggtt_offset(engine->i915->gt.scratch);
- *cs++ = 0;
-
- *cs++ = MI_LOAD_REGISTER_IMM(num_rings);
- for_each_engine(other, rq->i915, id) {
- *cs++ = i915_mmio_reg_offset(RING_PSMI_CTL(other->mmio_base));
- *cs++ = _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE);
- }
-
- intel_ring_advance(rq, cs);
-
- ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
- return 0;
-}
-
static int execlists_request_alloc(struct i915_request *request)
{
int ret;
@@ -1296,14 +1238,6 @@ static int execlists_request_alloc(struct i915_request *request)
* to cancel/unwind this request now.
*/
- if (!i915_vm_is_48bit(&request->gem_context->ppgtt->vm)) {
- GEM_BUG_ON(intel_vgpu_active(request->i915));
-
- ret = emit_pdps(request);
- if (ret)
- return ret;
- }
-
request->reserved_space -= EXECLISTS_REQUEST_SIZE;
return 0;
}
@@ -1884,11 +1818,79 @@ static void execlists_reset_finish(struct intel_engine_cs *engine)
atomic_read(&execlists->tasklet.count));
}
+static int emit_pdps(struct i915_request *rq)
+{
+ const struct intel_engine_cs * const engine = rq->engine;
+ struct i915_hw_ppgtt * const ppgtt = rq->gem_context->ppgtt;
+ const unsigned int num_rings = INTEL_INFO(rq->i915)->num_rings;
+ struct intel_engine_cs *other;
+ enum intel_engine_id id;
+ u32 *cs;
+ int i;
+
+ if (!(ppgtt->pd_dirty_rings & intel_engine_flag(engine)))
+ return 0;
+
+ cs = intel_ring_begin(rq, 4 * (GEN8_3LVL_PDPES + num_rings) + 8);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /*
+ * Force the GPU (not just the local engine/powerwell!) to remain awake,
+ * or else we may kill the machine with "timed out waiting for
+ * forcewake ack request".
+ */
+
+ *cs++ = MI_LOAD_REGISTER_IMM(num_rings);
+ for_each_engine(other, rq->i915, id) {
+ *cs++ = i915_mmio_reg_offset(RING_PSMI_CTL(other->mmio_base));
+ *cs++ = _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE);
+ }
+
+ *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES);
+ for (i = GEN8_3LVL_PDPES; i--; ) {
+ const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
+
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, i));
+ *cs++ = upper_32_bits(pd_daddr);
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, i));
+ *cs++ = lower_32_bits(pd_daddr);
+ }
+ *cs++ = MI_NOOP;
+
+ /* Posting read to flush the mmio before letting the GPU sleep again */
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, 0));
+ *cs++ = i915_ggtt_offset(engine->i915->gt.scratch);
+ *cs++ = 0;
+
+ *cs++ = MI_LOAD_REGISTER_IMM(num_rings);
+ for_each_engine(other, rq->i915, id) {
+ *cs++ = i915_mmio_reg_offset(RING_PSMI_CTL(other->mmio_base));
+ *cs++ = _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE);
+ }
+
+ intel_ring_advance(rq, cs);
+
+ ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
+ return 0;
+}
+
static int gen8_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
const unsigned int flags)
{
u32 *cs;
+ int ret;
+
+ /* XXX review request allocation vs vma pinning order */
+ if (!i915_vm_is_48bit(&rq->gem_context->ppgtt->vm)) {
+ GEM_BUG_ON(intel_vgpu_active(rq->i915));
+
+ ret = emit_pdps(rq);
+ if (ret)
+ return ret;
+ }
cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
--
2.20.0.rc1
More information about the Intel-gfx-trybot
mailing list