[Intel-gfx] [PATCH 19/47] drm/i915/guc: Ensure request ordering via completion fences
Daniele Ceraolo Spurio
daniele.ceraolospurio at intel.com
Thu Jul 15 01:51:39 UTC 2021
On 6/24/2021 12:04 AM, Matthew Brost wrote:
> If two requests are on the same ring, they are explicitly ordered by the
> HW. So, a submission fence is sufficient to ensure ordering when using
> the new GuC submission interface. Conversely, if two requests share a
> timeline and are on the same physical engine but different context this
> doesn't ensure ordering on the new GuC submission interface. So, a
> completion fence needs to be used to ensure ordering.
>
> Signed-off-by: John Harrison <John.C.Harrison at Intel.com>
> Signed-off-by: Matthew Brost <matthew.brost at intel.com>
> ---
> .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 1 -
> drivers/gpu/drm/i915/i915_request.c | 17 +++++++++++++----
> 2 files changed, 13 insertions(+), 5 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> index 0a6ccdf32316..010e46dd6b16 100644
> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> @@ -926,7 +926,6 @@ static void guc_context_sched_disable(struct intel_context *ce)
> * request doesn't slip through the 'context_pending_disable' fence.
> */
> if (unlikely(atomic_add_unless(&ce->pin_count, -2, 2))) {
> - spin_unlock_irqrestore(&ce->guc_state.lock, flags);
Why is this unlock() being dropped here?
> return;
> }
> guc_id = prep_context_pending_disable(ce);
> diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
> index 9dad3df5eaf7..d92c9f25c9f4 100644
> --- a/drivers/gpu/drm/i915/i915_request.c
> +++ b/drivers/gpu/drm/i915/i915_request.c
> @@ -444,6 +444,7 @@ void i915_request_retire_upto(struct i915_request *rq)
>
> do {
> tmp = list_first_entry(&tl->requests, typeof(*tmp), link);
> + GEM_BUG_ON(!i915_request_completed(tmp));
This condition in the BUG_ON is not a new requirement introduced by the
changes below, right? just want to make sure I'm not missing anything.
> } while (i915_request_retire(tmp) && tmp != rq);
> }
>
> @@ -1405,6 +1406,9 @@ i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
> return err;
> }
>
> +static int
> +i915_request_await_request(struct i915_request *to, struct i915_request *from);
> +
> int
> i915_request_await_execution(struct i915_request *rq,
> struct dma_fence *fence,
> @@ -1464,12 +1468,13 @@ await_request_submit(struct i915_request *to, struct i915_request *from)
> * the waiter to be submitted immediately to the physical engine
> * as it may then bypass the virtual request.
> */
> - if (to->engine == READ_ONCE(from->engine))
> + if (to->engine == READ_ONCE(from->engine)) {
> return i915_sw_fence_await_sw_fence_gfp(&to->submit,
> &from->submit,
> I915_FENCE_GFP);
> - else
> + } else {
> return __i915_request_await_execution(to, from, NULL);
> + }
{ } are not needed here. I'm guessing they're leftover from a dropped
change.
> }
>
> static int
> @@ -1493,7 +1498,8 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
> return ret;
> }
>
> - if (is_power_of_2(to->execution_mask | READ_ONCE(from->execution_mask)))
> + if (!intel_engine_uses_guc(to->engine) &&
> + is_power_of_2(to->execution_mask | READ_ONCE(from->execution_mask)))
> ret = await_request_submit(to, from);
> else
> ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
> @@ -1654,6 +1660,8 @@ __i915_request_add_to_timeline(struct i915_request *rq)
> prev = to_request(__i915_active_fence_set(&timeline->last_request,
> &rq->fence));
> if (prev && !__i915_request_is_complete(prev)) {
> + bool uses_guc = intel_engine_uses_guc(rq->engine);
> +
> /*
> * The requests are supposed to be kept in order. However,
> * we need to be wary in case the timeline->last_request
> @@ -1664,7 +1672,8 @@ __i915_request_add_to_timeline(struct i915_request *rq)
> i915_seqno_passed(prev->fence.seqno,
> rq->fence.seqno));
>
> - if (is_power_of_2(READ_ONCE(prev->engine)->mask | rq->engine->mask))
> + if ((!uses_guc && is_power_of_2(READ_ONCE(prev->engine)->mask | rq->engine->mask)) ||
> + (uses_guc && prev->context == rq->context))
Would it be worth adding an engine flag instead of checking which
back-end is in use? I915_ENGINE_IS_FIFO or something. Not a blocker.
Daniele
> i915_sw_fence_await_sw_fence(&rq->submit,
> &prev->submit,
> &rq->submitq);
More information about the Intel-gfx
mailing list