[PATCH 05/19] drm/i915: Force PD reload on any PD update

Chris Wilson chris at chris-wilson.co.uk
Wed Aug 28 15:36:21 UTC 2019


Use a serial to track address space updates as this is more robust than
a set of dirty flags stored on the vm (but actually may be shared by
multiple contexts). Bump the serial on any insertion in the vm, then
during request construction we can compare the current serial stashed
away inside the context against the current serial of the address space
and force a PD refresh as required.

The challenge wrt future async PD updates is making sure we flag the
change in address space prior to request construction.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gem/i915_gem_context.c   |  1 +
 drivers/gpu/drm/i915/gt/intel_context_types.h |  1 +
 drivers/gpu/drm/i915/gt/intel_lrc.c           |  5 ++-
 drivers/gpu/drm/i915/gt/intel_ringbuffer.c    | 40 ++++++++-----------
 drivers/gpu/drm/i915/i915_gem_gtt.c           | 16 +-------
 drivers/gpu/drm/i915/i915_gem_gtt.h           |  2 +-
 drivers/gpu/drm/i915/i915_vma.c               |  3 ++
 7 files changed, 27 insertions(+), 41 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 1f735ca9b173..d7c17a8dadff 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -465,6 +465,7 @@ static void __apply_ppgtt(struct intel_context *ce, void *vm)
 {
 	i915_vm_put(ce->vm);
 	ce->vm = i915_vm_get(vm);
+	ce->vm_serial = 0;
 }
 
 static struct i915_address_space *
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index bf9cedfccbf0..0a610355affb 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -55,6 +55,7 @@ struct intel_context {
 
 	unsigned long flags;
 #define CONTEXT_ALLOC_BIT 0
+	unsigned int vm_serial;
 
 	u32 *lrc_reg_state;
 	u64 lrc_desc;
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 7f33f245c2df..c0b5185879ca 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1873,9 +1873,10 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq)
 
 static int execlists_request_alloc(struct i915_request *request)
 {
+	struct intel_context *ce = request->hw_context;
 	int ret;
 
-	GEM_BUG_ON(!intel_context_is_pinned(request->hw_context));
+	GEM_BUG_ON(!intel_context_is_pinned(ce));
 
 	/*
 	 * Flush enough space to reduce the likelihood of waiting after
@@ -1897,6 +1898,8 @@ static int execlists_request_alloc(struct i915_request *request)
 	if (ret)
 		return ret;
 
+	ce->vm_serial = ce->vm->serial;
+
 	request->reserved_space -= EXECLISTS_REQUEST_SIZE;
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
index fdddda75eb41..d6e2f5b3dd62 100644
--- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
@@ -1742,28 +1742,23 @@ static int remap_l3(struct i915_request *rq)
 static int switch_context(struct i915_request *rq)
 {
 	struct intel_engine_cs *engine = rq->engine;
-	struct i915_address_space *vm = vm_alias(rq->hw_context);
-	unsigned int unwind_mm = 0;
+	struct intel_context *ce = rq->hw_context;
+	struct i915_address_space *vm = vm_alias(ce);
 	u32 hw_flags = 0;
 	int ret;
 
 	GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
 
 	if (vm) {
-		struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
-
-		ret = load_pd_dir(rq, ppgtt);
+		ret = load_pd_dir(rq, i915_vm_to_ppgtt(vm));
 		if (ret)
 			return ret;
 
-		if (ppgtt->pd_dirty_engines & engine->mask) {
-			unwind_mm = engine->mask;
-			ppgtt->pd_dirty_engines &= ~unwind_mm;
+		if (vm->serial != ce->vm_serial)
 			hw_flags = MI_FORCE_RESTORE;
-		}
 	}
 
-	if (rq->hw_context->state) {
+	if (ce->state) {
 		GEM_BUG_ON(engine->id != RCS0);
 
 		/*
@@ -1778,17 +1773,21 @@ static int switch_context(struct i915_request *rq)
 
 		ret = mi_set_context(rq, hw_flags);
 		if (ret)
-			goto err_mm;
+			return ret;
 	}
 
+	ret = remap_l3(rq);
+	if (ret)
+		return ret;
+
 	if (vm) {
 		ret = engine->emit_flush(rq, EMIT_INVALIDATE);
 		if (ret)
-			goto err_mm;
+			return ret;
 
 		ret = flush_pd_dir(rq);
 		if (ret)
-			goto err_mm;
+			return ret;
 
 		/*
 		 * Not only do we need a full barrier (post-sync write) after
@@ -1800,23 +1799,16 @@ static int switch_context(struct i915_request *rq)
 		 */
 		ret = engine->emit_flush(rq, EMIT_INVALIDATE);
 		if (ret)
-			goto err_mm;
+			return ret;
 
 		ret = engine->emit_flush(rq, EMIT_FLUSH);
 		if (ret)
-			goto err_mm;
-	}
+			return ret;
 
-	ret = remap_l3(rq);
-	if (ret)
-		goto err_mm;
+		ce->vm_serial = vm->serial;
+	}
 
 	return 0;
-
-err_mm:
-	if (unwind_mm)
-		i915_vm_to_ppgtt(vm)->pd_dirty_engines |= unwind_mm;
-	return ret;
 }
 
 static int ring_request_alloc(struct i915_request *request)
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index c94dfa562247..71db7459e598 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -823,17 +823,6 @@ release_pd_entry(struct i915_page_directory * const pd,
 	return free;
 }
 
-/*
- * PDE TLBs are a pain to invalidate on GEN8+. When we modify
- * the page table structures, we mark them dirty so that
- * context switching/execlist queuing code takes extra steps
- * to ensure that tlbs are flushed.
- */
-static void mark_tlbs_dirty(struct i915_ppgtt *ppgtt)
-{
-	ppgtt->pd_dirty_engines = ALL_ENGINES;
-}
-
 static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
 {
 	struct drm_i915_private *dev_priv = ppgtt->vm.i915;
@@ -1735,10 +1724,8 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
 	}
 	spin_unlock(&pd->lock);
 
-	if (flush) {
-		mark_tlbs_dirty(&ppgtt->base);
+	if (flush)
 		gen6_ggtt_invalidate(vm->gt->ggtt);
-	}
 
 	goto out;
 
@@ -1833,7 +1820,6 @@ static int pd_vma_bind(struct i915_vma *vma,
 	gen6_for_all_pdes(pt, ppgtt->base.pd, pde)
 		gen6_write_pde(ppgtt, pde, pt);
 
-	mark_tlbs_dirty(&ppgtt->base);
 	gen6_ggtt_invalidate(ggtt);
 
 	return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index b97a47fc7a68..dcc3d4e88a45 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -305,6 +305,7 @@ struct i915_address_space {
 	u64 total;		/* size addr space maps (ex. 2GB for ggtt) */
 	u64 reserved;		/* size addr space reserved */
 
+	unsigned int serial;
 	bool closed;
 
 	struct mutex mutex; /* protects vma and our lists */
@@ -422,7 +423,6 @@ struct i915_ggtt {
 struct i915_ppgtt {
 	struct i915_address_space vm;
 
-	intel_engine_mask_t pd_dirty_engines;
 	struct i915_page_directory *pd;
 };
 
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index e0e677b2a3a9..ddb03cbcbf60 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -340,6 +340,9 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
 	if (ret)
 		return ret;
 
+	/* Must be incremented prior to request construction */
+	vma->vm->serial++;
+
 	vma->flags |= bind_flags;
 	return 0;
 }
-- 
2.23.0



More information about the Intel-gfx-trybot mailing list