[PATCH 73/73] request-suspend
Chris Wilson
chris at chris-wilson.co.uk
Tue Jan 12 13:58:16 UTC 2021
---
drivers/gpu/drm/i915/i915_request.h | 20 +++++++
drivers/gpu/drm/i915/i915_scheduler.c | 78 +++++++++++++++++++++------
2 files changed, 83 insertions(+), 15 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index a74d8204fca9..c1cc7c15e629 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -137,6 +137,11 @@ enum {
* the GPU. Here we track such boost requests on a per-request basis.
*/
I915_FENCE_FLAG_BOOST,
+
+ /*
+ * I915_FENCE_FLAG_SUSPENDED - this request should not be executed
+ */
+ I915_FENCE_FLAG_SUSPENDED,
};
/**
@@ -591,6 +596,21 @@ static inline void i915_request_clear_hold(struct i915_request *rq)
clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
}
+static inline bool i915_request_is_suspended(const struct i915_request *rq)
+{
+ return unlikely(test_bit(I915_FENCE_FLAG_SUSPENDED, &rq->fence.flags));
+}
+
+static inline bool i915_request_set_suspended(struct i915_request *rq)
+{
+ return !test_and_set_bit(I915_FENCE_FLAG_SUSPENDED, &rq->fence.flags);
+}
+
+static inline bool i915_request_clear_suspended(struct i915_request *rq)
+{
+ return test_and_clear_bit(I915_FENCE_FLAG_SUSPENDED, &rq->fence.flags);
+}
+
static inline struct intel_timeline *
i915_request_timeline(const struct i915_request *rq)
{
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 076ae795d852..f285efb9c39f 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -8,6 +8,7 @@
#include <linux/mutex.h>
#include <linux/prandom.h>
+#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_ring.h"
#include "gt/intel_lrc_reg.h"
@@ -1019,6 +1020,44 @@ void i915_request_enqueue(struct i915_request *rq)
i915_sched_kick(se);
}
+void i915_request_suspend(struct i915_request *rq)
+{
+ struct intel_engine_cs *engine;
+ unsigned long flags;
+
+ if (i915_request_is_suspended(rq))
+ return;
+
+ engine = lock_engine_irqsave(rq, flags);
+ if (!i915_request_set_suspended(rq))
+ goto unlock;
+
+ if (!i915_request_is_active(rq))
+ __intel_engine_suspend_request(engine, rq);
+
+unlock:
+ spin_unlock_irqrestore(&engine->active.lock, flags);
+
+ if (i915_request_is_active(rq))
+ intel_engine_pulse(engine);
+}
+
+void i915_request_resume(struct i915_request *rq)
+{
+ struct intel_engine_cs *engine;
+ unsigned long flags;
+
+ if (!i915_request_is_suspended(rq))
+ return;
+
+ engine = lock_engine_irqsave(rq, flags);
+
+ if (i915_request_clear_suspended(rq))
+ __intel_engine_resume_request(engine, rq);
+
+ spin_unlock_irqrestore(&engine->active.lock, flags);
+}
+
struct i915_request *
__intel_engine_rewind_requests(struct intel_engine_cs *engine)
{
@@ -1038,22 +1077,26 @@ __intel_engine_rewind_requests(struct intel_engine_cs *engine)
__i915_request_unsubmit(rq);
- if (__i915_request_has_started(rq)) {
- rq->sched.deadline =
- min(rq_deadline(rq),
- next_virtual_deadline(rq_prio(rq)));
- }
- GEM_BUG_ON(rq_deadline(rq) == I915_DEADLINE_NEVER);
+ if (i915_request_is_suspended(rq)) {
+ __intel_engine_suspend_request(engine, rq);
+ } else {
+ if (__i915_request_has_started(rq)) {
+ rq->sched.deadline =
+ min(rq_deadline(rq),
+ next_virtual_deadline(rq_prio(rq)));
+ }
+ GEM_BUG_ON(rq_deadline(rq) == I915_DEADLINE_NEVER);
- if (rq_deadline(rq) != deadline) {
- deadline = rq_deadline(rq);
- pl = lookup_priolist(engine, deadline);
- }
- GEM_BUG_ON(i915_sched_is_idle(&engine->active));
+ if (rq_deadline(rq) != deadline) {
+ deadline = rq_deadline(rq);
+ pl = lookup_priolist(engine, deadline);
+ }
+ GEM_BUG_ON(i915_sched_is_idle(&engine->active));
- GEM_BUG_ON(i915_request_in_priority_queue(rq));
- list_move(&rq->sched.link, pl);
- set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+ GEM_BUG_ON(i915_request_in_priority_queue(rq));
+ list_move(&rq->sched.link, pl);
+ set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+ }
/* Check in case we rollback so far we wrap [size/2] */
if (intel_ring_direction(rq->ring,
@@ -1073,12 +1116,14 @@ bool __intel_engine_suspend_request(struct intel_engine_cs *engine,
LIST_HEAD(list);
lockdep_assert_held(&engine->active.lock);
- GEM_BUG_ON(i915_request_on_hold(rq));
GEM_BUG_ON(rq->engine != engine);
if (__i915_request_is_complete(rq)) /* too late! */
return false;
+ if (i915_request_on_hold(rq))
+ return false;
+
/*
* Transfer this request onto the hold queue to prevent it
* being resumbitted to HW (and potentially completed) before we have
@@ -1150,6 +1195,9 @@ void __intel_engine_resume_request(struct intel_engine_cs *engine,
lockdep_assert_held(&engine->active.lock);
+ if (!i915_request_on_hold(rq))
+ return;
+
/*
* Move this request back to the priority queue, and all of its
* children and grandchildren that were suspended along with it.
--
2.20.1
More information about the Intel-gfx-trybot
mailing list