[PATCH v5 4/5] drm/i915: Throttle for ringspace prior to taking the timeline mutex
Andi Shyti
andi.shyti at linux.intel.com
Wed Apr 12 11:33:07 UTC 2023
From: Chris Wilson <chris at chris-wilson.co.uk>
Before taking exclusive ownership of the ring for emitting the request,
wait for space in the ring to become available. This allows others to
take the timeline->mutex to make forward progresses while userspace is
blocked.
In particular, this allows regular clients to issue requests on the
kernel context, potentially filling the ring, but allow the higher
priority heartbeats and pulses to still be submitted without being
blocked by the less critical work.
Signed-off-by: Chris Wilson <chris.p.wilson at linux.intel.com>
Cc: Maciej Patelczyk <maciej.patelczyk at intel.com>
Cc: stable at vger.kernel.org
Signed-off-by: Andi Shyti <andi.shyti at linux.intel.com>
Reviewed-by: Andrzej Hajda <andrzej.hajda at intel.com>
---
drivers/gpu/drm/i915/gt/intel_context.c | 41 +++++++++++++++++++++++++
drivers/gpu/drm/i915/gt/intel_context.h | 2 ++
drivers/gpu/drm/i915/i915_request.c | 3 ++
3 files changed, 46 insertions(+)
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 2aa63ec521b89..59cd612a23561 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -626,6 +626,47 @@ bool intel_context_revoke(struct intel_context *ce)
return ret;
}
+int intel_context_throttle(const struct intel_context *ce)
+{
+ const struct intel_ring *ring = ce->ring;
+ const struct intel_timeline *tl = ce->timeline;
+ struct i915_request *rq;
+ int err = 0;
+
+ if (READ_ONCE(ring->space) >= SZ_1K)
+ return 0;
+
+ rcu_read_lock();
+ list_for_each_entry_reverse(rq, &tl->requests, link) {
+ if (__i915_request_is_complete(rq))
+ break;
+
+ if (rq->ring != ring)
+ continue;
+
+ /* Wait until there will be enough space following that rq */
+ if (__intel_ring_space(rq->postfix,
+ ring->emit,
+ ring->size) < ring->size / 2) {
+ if (i915_request_get_rcu(rq)) {
+ rcu_read_unlock();
+
+ if (i915_request_wait(rq,
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT) < 0)
+ err = -EINTR;
+
+ rcu_read_lock();
+ i915_request_put(rq);
+ }
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return err;
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_context.c"
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index f2f79ff0dfd1d..c0db00ac6b950 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -233,6 +233,8 @@ static inline void intel_context_exit(struct intel_context *ce)
ce->ops->exit(ce);
}
+int intel_context_throttle(const struct intel_context *ce);
+
static inline struct intel_context *intel_context_get(struct intel_context *ce)
{
kref_get(&ce->ref);
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 21032b3b9d330..0b7c6aede0c6b 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -1057,6 +1057,9 @@ i915_request_create_locked(struct intel_context *ce)
{
intel_context_assert_timeline_is_locked(ce->timeline);
+ if (intel_context_throttle(ce))
+ return ERR_PTR(-EINTR);
+
return __i915_request_create_locked(ce);
}
--
2.39.2
More information about the dri-devel
mailing list