[Intel-gfx] [PATCH 03/40] drm/i915: Pass i915_sched_node around internally
Tvrtko Ursulin
tvrtko.ursulin at linux.intel.com
Wed May 8 10:15:43 UTC 2019
On 08/05/2019 09:06, Chris Wilson wrote:
> To simplify the next patch, update bump_priority and schedule to accept
> the internal i915_sched_ndoe directly and not expect a request pointer.
>
> add/remove: 0/0 grow/shrink: 2/1 up/down: 8/-15 (-7)
> Function old new delta
> i915_schedule_bump_priority 109 113 +4
> i915_schedule 50 54 +4
> __i915_schedule 922 907 -15
>
> v2: Adopt node for the old rq local, since it no longer is a request but
> the origin node.
>
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> ---
> drivers/gpu/drm/i915/i915_scheduler.c | 36 ++++++++++++++-------------
> 1 file changed, 19 insertions(+), 17 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
> index b7488c31e3e9..f32d0ee6d58c 100644
> --- a/drivers/gpu/drm/i915/i915_scheduler.c
> +++ b/drivers/gpu/drm/i915/i915_scheduler.c
> @@ -186,7 +186,7 @@ static void kick_submission(struct intel_engine_cs *engine, int prio)
> tasklet_hi_schedule(&engine->execlists.tasklet);
> }
>
> -static void __i915_schedule(struct i915_request *rq,
> +static void __i915_schedule(struct i915_sched_node *node,
> const struct i915_sched_attr *attr)
> {
> struct intel_engine_cs *engine;
> @@ -200,13 +200,13 @@ static void __i915_schedule(struct i915_request *rq,
> lockdep_assert_held(&schedule_lock);
> GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
>
> - if (i915_request_completed(rq))
> + if (node_signaled(node))
> return;
>
> - if (prio <= READ_ONCE(rq->sched.attr.priority))
> + if (prio <= READ_ONCE(node->attr.priority))
> return;
>
> - stack.signaler = &rq->sched;
> + stack.signaler = node;
> list_add(&stack.dfs_link, &dfs);
>
> /*
> @@ -257,9 +257,9 @@ static void __i915_schedule(struct i915_request *rq,
> * execlists_submit_request()), we can set our own priority and skip
> * acquiring the engine locks.
> */
> - if (rq->sched.attr.priority == I915_PRIORITY_INVALID) {
> - GEM_BUG_ON(!list_empty(&rq->sched.link));
> - rq->sched.attr = *attr;
> + if (node->attr.priority == I915_PRIORITY_INVALID) {
> + GEM_BUG_ON(!list_empty(&node->link));
> + node->attr = *attr;
>
> if (stack.dfs_link.next == stack.dfs_link.prev)
> return;
> @@ -268,15 +268,14 @@ static void __i915_schedule(struct i915_request *rq,
> }
>
> memset(&cache, 0, sizeof(cache));
> - engine = rq->engine;
> + engine = node_to_request(node)->engine;
> spin_lock(&engine->timeline.lock);
>
> /* Fifo and depth-first replacement ensure our deps execute before us */
> list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
> - struct i915_sched_node *node = dep->signaler;
> -
> INIT_LIST_HEAD(&dep->dfs_link);
>
> + node = dep->signaler;
> engine = sched_lock_engine(node, engine, &cache);
> lockdep_assert_held(&engine->timeline.lock);
>
> @@ -319,13 +318,20 @@ static void __i915_schedule(struct i915_request *rq,
> void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
> {
> spin_lock_irq(&schedule_lock);
> - __i915_schedule(rq, attr);
> + __i915_schedule(&rq->sched, attr);
> spin_unlock_irq(&schedule_lock);
> }
>
> +static void __bump_priority(struct i915_sched_node *node, unsigned int bump)
> +{
> + struct i915_sched_attr attr = node->attr;
> +
> + attr.priority |= bump;
> + __i915_schedule(node, &attr);
> +}
> +
> void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
> {
> - struct i915_sched_attr attr;
> unsigned long flags;
>
> GEM_BUG_ON(bump & ~I915_PRIORITY_MASK);
> @@ -334,11 +340,7 @@ void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
> return;
>
> spin_lock_irqsave(&schedule_lock, flags);
> -
> - attr = rq->sched.attr;
> - attr.priority |= bump;
> - __i915_schedule(rq, &attr);
> -
> + __bump_priority(&rq->sched, bump);
> spin_unlock_irqrestore(&schedule_lock, flags);
> }
>
>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
Regards,
Tvrtko
More information about the Intel-gfx
mailing list