[PATCH 1/2] drm/i915/perf: Assert locking for i915_init_oa_perf_state()

Chris Wilson chris at chris-wilson.co.uk
Fri Aug 30 20:25:42 UTC 2019


We use the context->pin_mutex to serialise updates to the OA config and
the registers values written into each new context. Document this
relationship and assert we do hold the context->pin_mutex as used by
gen8_configure_all_contexts() to serialise updates to the OA config
itself.

v2: Add a white-lie for when we call intel_gt_resume() from init.
v3: Lie while we have the context pinned inside atomic reset.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Cc: Lionel Landwerlin <lionel.g.landwerlin at intel.com>
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin at intel.com> #v1
---
 drivers/gpu/drm/i915/gt/intel_gt_pm.c |  7 ++++++-
 drivers/gpu/drm/i915/gt/intel_lrc.c   | 10 ++++++++++
 drivers/gpu/drm/i915/i915_perf.c      |  3 +++
 3 files changed, 19 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 1363e069ec83..aa6cf0152ce7 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -6,6 +6,7 @@
 
 #include "i915_drv.h"
 #include "i915_params.h"
+#include "intel_context.h"
 #include "intel_engine_pm.h"
 #include "intel_gt.h"
 #include "intel_gt_pm.h"
@@ -142,8 +143,12 @@ int intel_gt_resume(struct intel_gt *gt)
 		intel_engine_pm_get(engine);
 
 		ce = engine->kernel_context;
-		if (ce)
+		if (ce) {
+			GEM_BUG_ON(!intel_context_is_pinned(ce));
+			mutex_acquire(&ce->pin_mutex.dep_map, 0, 0, _THIS_IP_);
 			ce->ops->reset(ce);
+			mutex_release(&ce->pin_mutex.dep_map, 0, _THIS_IP_);
+		}
 
 		engine->serial++; /* kernel context lost */
 		err = engine->resume(engine);
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 0e28263c7b87..87b7473a6dfb 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -2383,6 +2383,11 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
 	ce = rq->hw_context;
 	GEM_BUG_ON(i915_active_is_idle(&ce->active));
 	GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
+
+	/* Proclaim we have exclusive access to the context image! */
+	GEM_BUG_ON(!intel_context_is_pinned(ce));
+	mutex_acquire(&ce->pin_mutex.dep_map, 2, 0, _THIS_IP_);
+
 	rq = active_request(rq);
 	if (!rq) {
 		ce->ring->head = ce->ring->tail;
@@ -2442,6 +2447,7 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
 		  engine->name, ce->ring->head, ce->ring->tail);
 	intel_ring_update_space(ce->ring);
 	__execlists_update_reg_state(ce, engine);
+	mutex_release(&ce->pin_mutex.dep_map, 0, _THIS_IP_);
 
 unwind:
 	/* Push back any incomplete requests for replay after the reset. */
@@ -3920,6 +3926,9 @@ void intel_lr_context_reset(struct intel_engine_cs *engine,
 			    u32 head,
 			    bool scrub)
 {
+	GEM_BUG_ON(!intel_context_is_pinned(ce));
+	mutex_acquire(&ce->pin_mutex.dep_map, 2, 0, _THIS_IP_);
+
 	/*
 	 * We want a simple context + ring to execute the breadcrumb update.
 	 * We cannot rely on the context being intact across the GPU hang,
@@ -3944,6 +3953,7 @@ void intel_lr_context_reset(struct intel_engine_cs *engine,
 	intel_ring_update_space(ce->ring);
 
 	__execlists_update_reg_state(ce, engine);
+	mutex_release(&ce->pin_mutex.dep_map, 0, _THIS_IP_);
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 2c9f46e12622..c1b764233761 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -2305,6 +2305,9 @@ void i915_oa_init_reg_state(struct intel_engine_cs *engine,
 {
 	struct i915_perf_stream *stream;
 
+	/* perf.exclusive_stream serialised by gen8_configure_all_contexts() */
+	lockdep_assert_held(&ce->pin_mutex);
+
 	if (engine->class != RENDER_CLASS)
 		return;
 
-- 
2.23.0



More information about the Intel-gfx-trybot mailing list