[PATCH 70/73] active-request
Chris Wilson
chris at chris-wilson.co.uk
Tue Jan 12 13:58:13 UTC 2021
---
drivers/gpu/drm/i915/gt/intel_engine.h | 19 ----------
drivers/gpu/drm/i915/gt/intel_engine_cs.c | 37 +++----------------
.../drm/i915/gt/intel_execlists_submission.c | 24 ++++++++++++
.../gpu/drm/i915/gt/intel_ring_scheduler.c | 24 ++++++++++++
.../gpu/drm/i915/gt/intel_ring_submission.c | 21 +++++++++++
drivers/gpu/drm/i915/i915_scheduler_types.h | 2 +
6 files changed, 76 insertions(+), 51 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index fa257a305143..ce24ac6f58cb 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -103,25 +103,6 @@ execlists_num_ports(const struct intel_engine_execlists * const execlists)
return execlists->port_mask + 1;
}
-static inline struct i915_request *
-execlists_active(const struct intel_engine_execlists *execlists)
-{
- struct i915_request * const *cur, * const *old, *active;
-
- cur = READ_ONCE(execlists->active);
- smp_rmb(); /* pairs with overwrite protection in process_csb() */
- do {
- old = cur;
-
- active = READ_ONCE(*cur);
- cur = READ_ONCE(execlists->active);
-
- smp_rmb(); /* and complete the seqlock retry */
- } while (unlikely(cur != old));
-
- return active;
-}
-
static inline u32
intel_read_status_page(const struct intel_engine_cs *engine, int reg)
{
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index e25772d8b0cd..c21243106b0f 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -1734,10 +1734,8 @@ ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now)
}
struct i915_request *
-intel_engine_find_active_request(struct intel_engine_cs *engine)
+i915_sched_engine_active_request(struct i915_sched_engine *se)
{
- struct i915_request *request, *active = NULL;
-
/*
* We are called by the error capture, reset and to dump engine
* state at random points in time. In particular, note that neither is
@@ -1749,36 +1747,11 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
* At all other times, we must assume the GPU is still running, but
* we only care about the snapshot of this moment.
*/
- lockdep_assert_held(&engine->active.lock);
+ lockdep_assert_held(&se->lock);
+ if (!se->active_request)
+ return NULL;
- rcu_read_lock();
- request = execlists_active(&engine->execlists);
- if (request) {
- struct intel_timeline *tl = request->context->timeline;
-
- list_for_each_entry_from_reverse(request, &tl->requests, link) {
- if (__i915_request_is_complete(request))
- break;
-
- active = request;
- }
- }
- rcu_read_unlock();
- if (active)
- return active;
-
- list_for_each_entry(request, &engine->active.requests, sched.link) {
- if (i915_request_completed(request))
- continue;
-
- if (!i915_request_started(request))
- break;
-
- active = request;
- break;
- }
-
- return active;
+ return se->active_request(se);
}
ktime_t intel_runtime_stats_get_busy_time(const struct intel_runtime_stats *st)
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 2387cde2426c..8ef6dd158423 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -2907,6 +2907,29 @@ static void rcs_submission_override(struct intel_engine_cs *engine)
}
}
+static struct i915_request *
+execlists_active_request(struct i915_sched_engine *se)
+{
+ struct intel_engine_cs *engine = container_of(se, typeof(*se), active);
+ struct i915_request *rq, *active = NULL;
+
+ rcu_read_lock();
+ rq = *READ_ONCE(engine->execlists.active);
+ if (rq) {
+ struct intel_timeline *tl = rq->context->timeline;
+
+ list_for_each_entry_from_reverse(rq, &tl->requests, link) {
+ if (__i915_request_is_complete(rq))
+ break;
+
+ active = rq;
+ }
+ }
+ rcu_read_unlock();
+
+ return active;
+}
+
int intel_execlists_submission_setup(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const el = &engine->execlists;
@@ -2918,6 +2941,7 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
execlists_submission_tasklet, (unsigned long)engine);
timer_setup(&el->timer, execlists_timeslice, 0);
timer_setup(&el->preempt, execlists_preempt, 0);
+ engine->active.active_request = execlists_active_request;
logical_ring_default_vfuncs(engine);
logical_ring_default_irqs(engine);
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c b/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
index 5080f106f913..21793a502866 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_scheduler.c
@@ -1144,6 +1144,29 @@ static int gen7_ctx_switch_bb_init(struct intel_engine_cs *engine)
return err;
}
+static struct i915_request *
+active_request(struct i915_sched_engine *se)
+{
+ struct intel_engine_cs *engine = container_of(se, typeof(*se), active);
+ struct i915_request *rq, *active = NULL;
+
+ rcu_read_lock();
+ rq = *READ_ONCE(engine->execlists.active);
+ if (rq) {
+ struct intel_timeline *tl = rq->context->timeline;
+
+ list_for_each_entry_from_reverse(rq, &tl->requests, link) {
+ if (__i915_request_is_complete(rq))
+ break;
+
+ active = rq;
+ }
+ }
+ rcu_read_unlock();
+
+ return active;
+}
+
int intel_ring_scheduler_setup(struct intel_engine_cs *engine)
{
struct intel_ring *ring;
@@ -1153,6 +1176,7 @@ int intel_ring_scheduler_setup(struct intel_engine_cs *engine)
tasklet_init(&engine->active.tasklet,
submission_tasklet, (unsigned long)engine);
+ engine->active.active_request = active_request;
setup_common(engine);
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 3875e006668c..6a3a8bf8be60 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -1043,6 +1043,25 @@ static void setup_irq(struct intel_engine_cs *engine)
}
}
+static struct i915_request *
+legacy_active_request(struct i915_sched_engine *se)
+{
+ struct intel_engine_cs *engine = container_of(se, typeof(*se), active);
+ struct i915_request *rq;
+
+ list_for_each_entry(rq, &engine->active.requests, sched.link) {
+ if (__i915_request_is_complete(rq))
+ continue;
+
+ if (!__i915_request_has_started(rq))
+ break;
+
+ return rq;
+ }
+
+ return NULL;
+}
+
static void setup_common(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
@@ -1050,6 +1069,8 @@ static void setup_common(struct intel_engine_cs *engine)
/* gen8+ are only supported with execlists */
GEM_BUG_ON(INTEL_GEN(i915) >= 8);
+ engine->active.active_request = legacy_active_request;
+
setup_irq(engine);
engine->resume = intel_ring_submission_resume;
diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h
index b8c484511185..a65354975074 100644
--- a/drivers/gpu/drm/i915/i915_scheduler_types.h
+++ b/drivers/gpu/drm/i915/i915_scheduler_types.h
@@ -130,6 +130,8 @@ struct i915_sched_engine {
* @tasklet: softirq tasklet for bottom half
*/
struct tasklet_struct tasklet;
+
+ struct i915_request *(*active_request)(struct i915_sched_engine *se);
};
struct i915_dependency {
--
2.20.1
More information about the Intel-gfx-trybot
mailing list