[Intel-gfx] [PATCH 11/41] drm/i915: Extract request submission from execlists
Tvrtko Ursulin
tvrtko.ursulin at linux.intel.com
Tue Jan 26 16:28:14 UTC 2021
On 25/01/2021 14:01, Chris Wilson wrote:
> In the process of preparing to reuse the request submission logic for
> other backends, lift it out of the execlists backend. It already
> operates on the common structs, so just a matter of moving and renaming.
>
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> ---
> .../drm/i915/gt/intel_execlists_submission.c | 55 +------------
> .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 30 +------
> drivers/gpu/drm/i915/i915_scheduler.c | 82 +++++++++++++++++++
> drivers/gpu/drm/i915/i915_scheduler.h | 2 +
> 4 files changed, 86 insertions(+), 83 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
> index 309fb421ff5c..e6acdd8dc361 100644
> --- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
> +++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
> @@ -2404,59 +2404,6 @@ static void execlists_preempt(struct timer_list *timer)
> execlists_kick(timer, preempt);
> }
>
> -static void queue_request(struct intel_engine_cs *engine,
> - struct i915_request *rq)
> -{
> - GEM_BUG_ON(!list_empty(&rq->sched.link));
> - list_add_tail(&rq->sched.link,
> - i915_sched_lookup_priolist(engine, rq_prio(rq)));
> - set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
> -}
> -
> -static bool submit_queue(struct intel_engine_cs *engine,
> - const struct i915_request *rq)
> -{
> - struct intel_engine_execlists *execlists = &engine->execlists;
> -
> - if (rq_prio(rq) <= execlists->queue_priority_hint)
> - return false;
> -
> - execlists->queue_priority_hint = rq_prio(rq);
> - return true;
> -}
> -
> -static bool ancestor_on_hold(const struct intel_engine_cs *engine,
> - const struct i915_request *rq)
> -{
> - GEM_BUG_ON(i915_request_on_hold(rq));
> - return !list_empty(&engine->active.hold) && hold_request(rq);
> -}
> -
> -static void execlists_submit_request(struct i915_request *request)
> -{
> - struct intel_engine_cs *engine = request->engine;
> - unsigned long flags;
> -
> - /* Will be called from irq-context when using foreign fences. */
> - spin_lock_irqsave(&engine->active.lock, flags);
> -
> - if (unlikely(ancestor_on_hold(engine, request))) {
> - RQ_TRACE(request, "ancestor on hold\n");
> - list_add_tail(&request->sched.link, &engine->active.hold);
> - i915_request_set_hold(request);
> - } else {
> - queue_request(engine, request);
> -
> - GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
> - GEM_BUG_ON(list_empty(&request->sched.link));
> -
> - if (submit_queue(engine, request))
> - __execlists_kick(&engine->execlists);
> - }
> -
> - spin_unlock_irqrestore(&engine->active.lock, flags);
> -}
> -
> static int execlists_context_pre_pin(struct intel_context *ce,
> struct i915_gem_ww_ctx *ww,
> void **vaddr)
> @@ -3072,7 +3019,7 @@ static bool can_preempt(struct intel_engine_cs *engine)
>
> static void execlists_set_default_submission(struct intel_engine_cs *engine)
> {
> - engine->submit_request = execlists_submit_request;
> + engine->submit_request = i915_request_enqueue;
> engine->execlists.tasklet.func = execlists_submission_tasklet;
>
> engine->reset.prepare = execlists_reset_prepare;
> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> index 53cf68e240c3..4f1eee4fbfb2 100644
> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> @@ -510,34 +510,6 @@ static int guc_request_alloc(struct i915_request *request)
> return 0;
> }
>
> -static inline void queue_request(struct intel_engine_cs *engine,
> - struct i915_request *rq,
> - int prio)
> -{
> - GEM_BUG_ON(!list_empty(&rq->sched.link));
> - list_add_tail(&rq->sched.link,
> - i915_sched_lookup_priolist(engine, prio));
> - set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
> -}
> -
> -static void guc_submit_request(struct i915_request *rq)
> -{
> - struct intel_engine_cs *engine = rq->engine;
> - unsigned long flags;
> -
> - /* Will be called from irq-context when using foreign fences. */
> - spin_lock_irqsave(&engine->active.lock, flags);
> -
> - queue_request(engine, rq, rq_prio(rq));
> -
> - GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
> - GEM_BUG_ON(list_empty(&rq->sched.link));
> -
> - tasklet_hi_schedule(&engine->execlists.tasklet);
> -
> - spin_unlock_irqrestore(&engine->active.lock, flags);
> -}
> -
> static void sanitize_hwsp(struct intel_engine_cs *engine)
> {
> struct intel_timeline *tl;
> @@ -606,7 +578,7 @@ static int guc_resume(struct intel_engine_cs *engine)
>
> static void guc_set_default_submission(struct intel_engine_cs *engine)
> {
> - engine->submit_request = guc_submit_request;
> + engine->submit_request = i915_request_enqueue;
> engine->execlists.tasklet.func = guc_submission_tasklet;
>
> engine->reset.prepare = guc_reset_prepare;
> diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
> index 9139a91f0aa3..3f5fc03908dc 100644
> --- a/drivers/gpu/drm/i915/i915_scheduler.c
> +++ b/drivers/gpu/drm/i915/i915_scheduler.c
> @@ -448,6 +448,88 @@ void i915_request_set_priority(struct i915_request *rq, int prio)
> spin_unlock_irqrestore(&engine->active.lock, flags);
> }
>
> +static void queue_request(struct intel_engine_cs *engine,
> + struct i915_request *rq)
> +{
> + GEM_BUG_ON(!list_empty(&rq->sched.link));
> + list_add_tail(&rq->sched.link,
> + i915_sched_lookup_priolist(engine, rq_prio(rq)));
> + set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
> +}
> +
> +static bool submit_queue(struct intel_engine_cs *engine,
> + const struct i915_request *rq)
> +{
> + struct intel_engine_execlists *execlists = &engine->execlists;
> +
> + if (rq_prio(rq) <= execlists->queue_priority_hint)
> + return false;
> +
> + execlists->queue_priority_hint = rq_prio(rq);
> + return true;
> +}
> +
> +static bool hold_request(const struct i915_request *rq)
> +{
> + struct i915_dependency *p;
> + bool result = false;
> +
> + /*
> + * If one of our ancestors is on hold, we must also be put on hold,
> + * otherwise we will bypass it and execute before it.
> + */
> + rcu_read_lock();
> + for_each_signaler(p, rq) {
> + const struct i915_request *s =
> + container_of(p->signaler, typeof(*s), sched);
> +
> + if (s->engine != rq->engine)
> + continue;
> +
> + result = i915_request_on_hold(s);
> + if (result)
> + break;
> + }
> + rcu_read_unlock();
> +
> + return result;
> +}
> +
> +static bool ancestor_on_hold(const struct intel_engine_cs *engine,
> + const struct i915_request *rq)
> +{
> + GEM_BUG_ON(i915_request_on_hold(rq));
> + return unlikely(!list_empty(&engine->active.hold)) && hold_request(rq);
> +}
> +
> +void i915_request_enqueue(struct i915_request *rq)
> +{
> + struct intel_engine_cs *engine = rq->engine;
> + unsigned long flags;
> + bool kick = false;
> +
> + /* Will be called from irq-context when using foreign fences. */
> + spin_lock_irqsave(&engine->active.lock, flags);
> + GEM_BUG_ON(test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags));
> +
> + if (unlikely(ancestor_on_hold(engine, rq))) {
> + RQ_TRACE(rq, "ancestor on hold\n");
> + list_add_tail(&rq->sched.link, &engine->active.hold);
> + i915_request_set_hold(rq);
> + } else {
> + queue_request(engine, rq);
> +
> + GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
> +
> + kick = submit_queue(engine, rq);
> + }
> +
> + GEM_BUG_ON(list_empty(&rq->sched.link));
> + spin_unlock_irqrestore(&engine->active.lock, flags);
> + if (kick)
> + tasklet_hi_schedule(&engine->execlists.tasklet);
> +}
> +
> void i915_sched_node_init(struct i915_sched_node *node)
> {
> spin_lock_init(&node->lock);
> diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
> index 5be7f90e7896..c4c086d56f81 100644
> --- a/drivers/gpu/drm/i915/i915_scheduler.h
> +++ b/drivers/gpu/drm/i915/i915_scheduler.h
> @@ -39,6 +39,8 @@ void i915_sched_init_ipi(struct i915_sched_ipi *ipi);
>
> void i915_request_set_priority(struct i915_request *request, int prio);
>
> +void i915_request_enqueue(struct i915_request *request);
> +
> struct list_head *
> i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio);
>
>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
Regards,
Tvrtko
More information about the Intel-gfx
mailing list