[Intel-gfx] [PATCH v5 21/25] drm/i915/guc: Drop pin count check trick between sched_disable and re-pin
Daniele Ceraolo Spurio
daniele.ceraolospurio at intel.com
Thu Sep 2 00:50:18 UTC 2021
From: Matthew Brost <matthew.brost at intel.com>
Drop pin count check trick between a sched_disable and re-pin, now rely
on the lock and counter of the number of committed requests to determine
if scheduling should be disabled on the context.
Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospurio at intel.com>
Signed-off-by: Matthew Brost <matthew.brost at intel.com>
---
drivers/gpu/drm/i915/gt/intel_context_types.h | 2 +
.../gpu/drm/i915/gt/uc/intel_guc_submission.c | 53 +++++++++++--------
2 files changed, 34 insertions(+), 21 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index d2f798ef678c..3a5d98e908f4 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -169,6 +169,8 @@ struct intel_context {
struct list_head fences;
/* GuC context blocked fence */
struct i915_sw_fence blocked;
+ /* GuC committed requests */
+ int number_committed_requests;
} guc_state;
struct {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 987aec197658..440ddcaae627 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -249,6 +249,25 @@ static inline void decr_context_blocked(struct intel_context *ce)
ce->guc_state.sched_state -= SCHED_STATE_BLOCKED;
}
+static inline bool context_has_committed_requests(struct intel_context *ce)
+{
+ return !!ce->guc_state.number_committed_requests;
+}
+
+static inline void incr_context_committed_requests(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ ++ce->guc_state.number_committed_requests;
+ GEM_BUG_ON(ce->guc_state.number_committed_requests < 0);
+}
+
+static inline void decr_context_committed_requests(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ --ce->guc_state.number_committed_requests;
+ GEM_BUG_ON(ce->guc_state.number_committed_requests < 0);
+}
+
static inline bool context_guc_id_invalid(struct intel_context *ce)
{
return ce->guc_id == GUC_INVALID_LRC_ID;
@@ -1749,24 +1768,18 @@ static void guc_context_sched_disable(struct intel_context *ce)
spin_lock_irqsave(&ce->guc_state.lock, flags);
/*
- * We have to check if the context has been disabled by another thread.
- * We also have to check if the context has been pinned again as another
- * pin operation is allowed to pass this function. Checking the pin
- * count, within ce->guc_state.lock, synchronizes this function with
- * guc_request_alloc ensuring a request doesn't slip through the
- * 'context_pending_disable' fence. Checking within the spin lock (can't
- * sleep) ensures another process doesn't pin this context and generate
- * a request before we set the 'context_pending_disable' flag here.
+ * We have to check if the context has been disabled by another thread,
+ * check if submssion has been disabled to seal a race with reset and
+ * finally check if any more requests have been committed to the
+ * context ensursing that a request doesn't slip through the
+ * 'context_pending_disable' fence.
*/
- if (unlikely(!context_enabled(ce) || submission_disabled(guc))) {
+ if (unlikely(!context_enabled(ce) || submission_disabled(guc) ||
+ context_has_committed_requests(ce))) {
clr_context_enabled(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
goto unpin;
}
- if (unlikely(atomic_add_unless(&ce->pin_count, -2, 2))) {
- spin_unlock_irqrestore(&ce->guc_state.lock, flags);
- return;
- }
guc_id = prep_context_pending_disable(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
@@ -1796,6 +1809,7 @@ static void __guc_context_destroy(struct intel_context *ce)
ce->guc_prio_count[GUC_CLIENT_PRIORITY_HIGH] ||
ce->guc_prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] ||
ce->guc_prio_count[GUC_CLIENT_PRIORITY_NORMAL]);
+ GEM_BUG_ON(ce->guc_state.number_committed_requests);
lrc_fini(ce);
intel_context_fini(ce);
@@ -2026,6 +2040,10 @@ static void remove_from_context(struct i915_request *rq)
spin_unlock_irq(&ce->guc_active.lock);
+ spin_lock_irq(&ce->guc_state.lock);
+ decr_context_committed_requests(ce);
+ spin_unlock_irq(&ce->guc_state.lock);
+
atomic_dec(&ce->guc_id_ref);
i915_request_notify_execute_cb_imm(rq);
}
@@ -2176,15 +2194,7 @@ static int guc_request_alloc(struct i915_request *rq)
* schedule enable or context registration if either G2H is pending
* respectfully. Once a G2H returns, the fence is released that is
* blocking these requests (see guc_signal_context_fence).
- *
- * We can safely check the below fields outside of the lock as it isn't
- * possible for these fields to transition from being clear to set but
- * converse is possible, hence the need for the check within the lock.
*/
- if (likely(!context_wait_for_deregister_to_register(ce) &&
- !context_pending_disable(ce)))
- return 0;
-
spin_lock_irqsave(&ce->guc_state.lock, flags);
if (context_wait_for_deregister_to_register(ce) ||
context_pending_disable(ce)) {
@@ -2193,6 +2203,7 @@ static int guc_request_alloc(struct i915_request *rq)
list_add_tail(&rq->guc_fence_link, &ce->guc_state.fences);
}
+ incr_context_committed_requests(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
return 0;
--
2.25.1
More information about the Intel-gfx
mailing list