[Intel-gfx] [PATCH] drm/i915: Only reschedule the submission tasklet if preemption is possible
Chris Wilson
chris at chris-wilson.co.uk
Mon Apr 29 12:52:45 UTC 2019
If we couple the scheduler more tightly with the execlists policy, we
can apply the preemption policy to the question of whether we need to
kick the tasklet at all for this priority bump.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
---
drivers/gpu/drm/i915/gt/selftest_lrc.c | 5 +++++
drivers/gpu/drm/i915/i915_request.c | 2 --
drivers/gpu/drm/i915/i915_scheduler.c | 18 +++++++++++-------
3 files changed, 16 insertions(+), 9 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 84538f69185b..37ed35459972 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -645,7 +645,12 @@ static struct i915_request *dummy_request(struct intel_engine_cs *engine)
static void dummy_request_free(struct i915_request *dummy)
{
+ /* We have to fake the CS interrupt to kick the next request */
+ tasklet_hi_schedule(&dummy->engine->execlists.tasklet);
+
i915_request_mark_complete(dummy);
+ dma_fence_signal(&dummy->fence);
+
i915_sched_node_fini(&dummy->sched);
i915_sw_fence_fini(&dummy->submit);
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index af8c9fa5e066..2e22da66a56c 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -1358,9 +1358,7 @@ long i915_request_wait(struct i915_request *rq,
if (flags & I915_WAIT_PRIORITY) {
if (!i915_request_started(rq) && INTEL_GEN(rq->i915) >= 6)
gen6_rps_boost(rq);
- local_bh_disable(); /* suspend tasklets for reprioritisation */
i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
- local_bh_enable(); /* kick tasklets en masse */
}
wait.tsk = current;
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 39bc4f54e272..4913418387be 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -261,16 +261,20 @@ sched_lock_engine(const struct i915_sched_node *node,
return engine;
}
-static bool inflight(const struct i915_request *rq,
- const struct intel_engine_cs *engine)
+static inline int rq_prio(const struct i915_request *rq)
{
- const struct i915_request *active;
+ return rq->sched.attr.priority | __NO_PREEMPTION;
+}
+
+static bool kick_tasklet(const struct intel_engine_cs *engine, int prio)
+{
+ const struct i915_request *inflight =
+ port_request(engine->execlists.port);
- if (!i915_request_is_active(rq))
+ if (!inflight)
return false;
- active = port_request(engine->execlists.port);
- return active->hw_context == rq->hw_context;
+ return __execlists_need_preempt(prio, rq_prio(inflight));
}
static void __i915_schedule(struct i915_request *rq,
@@ -400,7 +404,7 @@ static void __i915_schedule(struct i915_request *rq,
* If we are already the currently executing context, don't
* bother evaluating if we should preempt ourselves.
*/
- if (inflight(node_to_request(node), engine))
+ if (!kick_tasklet(engine, prio))
continue;
/* Defer (tasklet) submission until after all of our updates. */
--
2.20.1
More information about the Intel-gfx
mailing list