[Intel-gfx] [PATCH 25/33] drm/i915/guc: Support request cancellation
Daniele Ceraolo Spurio
daniele.ceraolospurio at intel.com
Tue Jul 27 19:15:59 UTC 2021
On 7/26/2021 5:23 PM, Matthew Brost wrote:
> This adds GuC backend support for i915_request_cancel(), which in turn
> makes CONFIG_DRM_I915_REQUEST_TIMEOUT work.
>
> This implementation makes use of fence while there are likely simplier
> options. A fence was chosen because of another feature coming soon
> which requires a user to block on a context until scheduling is
> disabled. In that case we return the fence to the user and the user can
> wait on that fence.
>
> v2:
> (Daniele)
> - A comment about locking the blocked incr / decr
> - A comments about the use of the fence
> - Update commit message explaining why fence
> - Delete redundant check blocked count in unblock function
> - Ring buffer implementation
> - Comment about blocked in submission path
> - Shorter rpm path
> v3:
> (Checkpatch)
> - Fix typos in commit message
> (Daniel)
> - Rework to simplier locking structure in guc_context_block / unblock
>
> Signed-off-by: Matthew Brost <matthew.brost at intel.com>
> Cc: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospurio at intel.com>
Daniele
> ---
> drivers/gpu/drm/i915/gt/intel_context.c | 13 ++
> drivers/gpu/drm/i915/gt/intel_context.h | 7 +
> drivers/gpu/drm/i915/gt/intel_context_types.h | 9 +-
> .../drm/i915/gt/intel_execlists_submission.c | 18 ++
> .../gpu/drm/i915/gt/intel_ring_submission.c | 16 ++
> .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 188 ++++++++++++++++++
> drivers/gpu/drm/i915/i915_request.c | 14 +-
> 7 files changed, 251 insertions(+), 14 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
> index 237b70e98744..477c42d7d693 100644
> --- a/drivers/gpu/drm/i915/gt/intel_context.c
> +++ b/drivers/gpu/drm/i915/gt/intel_context.c
> @@ -366,6 +366,12 @@ static int __intel_context_active(struct i915_active *active)
> return 0;
> }
>
> +static int sw_fence_dummy_notify(struct i915_sw_fence *sf,
> + enum i915_sw_fence_notify state)
> +{
> + return NOTIFY_DONE;
> +}
> +
> void
> intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine)
> {
> @@ -399,6 +405,13 @@ intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine)
> ce->guc_id = GUC_INVALID_LRC_ID;
> INIT_LIST_HEAD(&ce->guc_id_link);
>
> + /*
> + * Initialize fence to be complete as this is expected to be complete
> + * unless there is a pending schedule disable outstanding.
> + */
> + i915_sw_fence_init(&ce->guc_blocked, sw_fence_dummy_notify);
> + i915_sw_fence_commit(&ce->guc_blocked);
> +
> i915_active_init(&ce->active,
> __intel_context_active, __intel_context_retire, 0);
> }
> diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
> index 814d9277096a..876bdb08303c 100644
> --- a/drivers/gpu/drm/i915/gt/intel_context.h
> +++ b/drivers/gpu/drm/i915/gt/intel_context.h
> @@ -70,6 +70,13 @@ intel_context_is_pinned(struct intel_context *ce)
> return atomic_read(&ce->pin_count);
> }
>
> +static inline void intel_context_cancel_request(struct intel_context *ce,
> + struct i915_request *rq)
> +{
> + GEM_BUG_ON(!ce->ops->cancel_request);
> + return ce->ops->cancel_request(ce, rq);
> +}
> +
> /**
> * intel_context_unlock_pinned - Releases the earlier locking of 'pinned' status
> * @ce - the context
> diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
> index 57c19ee3e313..a5bc876face7 100644
> --- a/drivers/gpu/drm/i915/gt/intel_context_types.h
> +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
> @@ -13,6 +13,7 @@
> #include <linux/types.h>
>
> #include "i915_active_types.h"
> +#include "i915_sw_fence.h"
> #include "i915_utils.h"
> #include "intel_engine_types.h"
> #include "intel_sseu.h"
> @@ -42,6 +43,9 @@ struct intel_context_ops {
> void (*unpin)(struct intel_context *ce);
> void (*post_unpin)(struct intel_context *ce);
>
> + void (*cancel_request)(struct intel_context *ce,
> + struct i915_request *rq);
> +
> void (*enter)(struct intel_context *ce);
> void (*exit)(struct intel_context *ce);
>
> @@ -156,7 +160,7 @@ struct intel_context {
> * sched_state: scheduling state of this context using GuC
> * submission
> */
> - u8 sched_state;
> + u16 sched_state;
> /*
> * fences: maintains of list of requests that have a submit
> * fence related to GuC submission
> @@ -184,6 +188,9 @@ struct intel_context {
> * GuC ID link - in list when unpinned but guc_id still valid in GuC
> */
> struct list_head guc_id_link;
> +
> + /* GuC context blocked fence */
> + struct i915_sw_fence guc_blocked;
> };
>
> #endif /* __INTEL_CONTEXT_TYPES__ */
> diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
> index b4a876736074..de5f9c86b9a4 100644
> --- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
> +++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
> @@ -114,6 +114,7 @@
> #include "gen8_engine_cs.h"
> #include "intel_breadcrumbs.h"
> #include "intel_context.h"
> +#include "intel_engine_heartbeat.h"
> #include "intel_engine_pm.h"
> #include "intel_engine_stats.h"
> #include "intel_execlists_submission.h"
> @@ -2587,11 +2588,26 @@ static int execlists_context_alloc(struct intel_context *ce)
> return lrc_alloc(ce, ce->engine);
> }
>
> +static void execlists_context_cancel_request(struct intel_context *ce,
> + struct i915_request *rq)
> +{
> + struct intel_engine_cs *engine = NULL;
> +
> + i915_request_active_engine(rq, &engine);
> +
> + if (engine && intel_engine_pulse(engine))
> + intel_gt_handle_error(engine->gt, engine->mask, 0,
> + "request cancellation by %s",
> + current->comm);
> +}
> +
> static const struct intel_context_ops execlists_context_ops = {
> .flags = COPS_HAS_INFLIGHT,
>
> .alloc = execlists_context_alloc,
>
> + .cancel_request = execlists_context_cancel_request,
> +
> .pre_pin = execlists_context_pre_pin,
> .pin = execlists_context_pin,
> .unpin = lrc_unpin,
> @@ -3608,6 +3624,8 @@ static const struct intel_context_ops virtual_context_ops = {
>
> .alloc = virtual_context_alloc,
>
> + .cancel_request = execlists_context_cancel_request,
> +
> .pre_pin = virtual_context_pre_pin,
> .pin = virtual_context_pin,
> .unpin = lrc_unpin,
> diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
> index 05bb9f449df1..2958e2fae380 100644
> --- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
> +++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
> @@ -16,6 +16,7 @@
> #include "intel_reset.h"
> #include "intel_ring.h"
> #include "shmem_utils.h"
> +#include "intel_engine_heartbeat.h"
>
> /* Rough estimate of the typical request size, performing a flush,
> * set-context and then emitting the batch.
> @@ -604,9 +605,24 @@ static void ring_context_ban(struct intel_context *ce,
> }
> }
>
> +static void ring_context_cancel_request(struct intel_context *ce,
> + struct i915_request *rq)
> +{
> + struct intel_engine_cs *engine = NULL;
> +
> + i915_request_active_engine(rq, &engine);
> +
> + if (engine && intel_engine_pulse(engine))
> + intel_gt_handle_error(engine->gt, engine->mask, 0,
> + "request cancellation by %s",
> + current->comm);
> +}
> +
> static const struct intel_context_ops ring_context_ops = {
> .alloc = ring_context_alloc,
>
> + .cancel_request = ring_context_cancel_request,
> +
> .ban = ring_context_ban,
>
> .pre_pin = ring_context_pre_pin,
> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> index ad9a38a861df..ee4f1f996efa 100644
> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> @@ -126,6 +126,9 @@ static inline void clr_context_pending_enable(struct intel_context *ce)
> #define SCHED_STATE_DESTROYED BIT(1)
> #define SCHED_STATE_PENDING_DISABLE BIT(2)
> #define SCHED_STATE_BANNED BIT(3)
> +#define SCHED_STATE_BLOCKED_SHIFT 4
> +#define SCHED_STATE_BLOCKED BIT(SCHED_STATE_BLOCKED_SHIFT)
> +#define SCHED_STATE_BLOCKED_MASK (0xfff << SCHED_STATE_BLOCKED_SHIFT)
> static inline void init_sched_state(struct intel_context *ce)
> {
> /* Only should be called from guc_lrc_desc_pin() */
> @@ -203,6 +206,32 @@ static inline void clr_context_banned(struct intel_context *ce)
> ce->guc_state.sched_state &= ~SCHED_STATE_BANNED;
> }
>
> +static inline u32 context_blocked(struct intel_context *ce)
> +{
> + return (ce->guc_state.sched_state & SCHED_STATE_BLOCKED_MASK) >>
> + SCHED_STATE_BLOCKED_SHIFT;
> +}
> +
> +static inline void incr_context_blocked(struct intel_context *ce)
> +{
> + lockdep_assert_held(&ce->engine->sched_engine->lock);
> + lockdep_assert_held(&ce->guc_state.lock);
> +
> + ce->guc_state.sched_state += SCHED_STATE_BLOCKED;
> +
> + GEM_BUG_ON(!context_blocked(ce)); /* Overflow check */
> +}
> +
> +static inline void decr_context_blocked(struct intel_context *ce)
> +{
> + lockdep_assert_held(&ce->engine->sched_engine->lock);
> + lockdep_assert_held(&ce->guc_state.lock);
> +
> + GEM_BUG_ON(!context_blocked(ce)); /* Underflow check */
> +
> + ce->guc_state.sched_state -= SCHED_STATE_BLOCKED;
> +}
> +
> static inline bool context_guc_id_invalid(struct intel_context *ce)
> {
> return ce->guc_id == GUC_INVALID_LRC_ID;
> @@ -404,6 +433,14 @@ static int guc_add_request(struct intel_guc *guc, struct i915_request *rq)
> if (unlikely(err))
> goto out;
> }
> +
> + /*
> + * The request / context will be run on the hardware when scheduling
> + * gets enabled in the unblock.
> + */
> + if (unlikely(context_blocked(ce)))
> + goto out;
> +
> enabled = context_enabled(ce);
>
> if (!enabled) {
> @@ -532,6 +569,7 @@ static void __guc_context_destroy(struct intel_context *ce);
> static void release_guc_id(struct intel_guc *guc, struct intel_context *ce);
> static void guc_signal_context_fence(struct intel_context *ce);
> static void guc_cancel_context_requests(struct intel_context *ce);
> +static void guc_blocked_fence_complete(struct intel_context *ce);
>
> static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc)
> {
> @@ -579,6 +617,10 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc)
> }
> intel_context_sched_disable_unpin(ce);
> atomic_dec(&guc->outstanding_submission_g2h);
> + spin_lock_irqsave(&ce->guc_state.lock, flags);
> + guc_blocked_fence_complete(ce);
> + spin_unlock_irqrestore(&ce->guc_state.lock, flags);
> +
> intel_context_put(ce);
> }
> }
> @@ -1354,6 +1396,21 @@ static void guc_context_post_unpin(struct intel_context *ce)
> lrc_post_unpin(ce);
> }
>
> +static void __guc_context_sched_enable(struct intel_guc *guc,
> + struct intel_context *ce)
> +{
> + u32 action[] = {
> + INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET,
> + ce->guc_id,
> + GUC_CONTEXT_ENABLE
> + };
> +
> + trace_intel_context_sched_enable(ce);
> +
> + guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
> + G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, true);
> +}
> +
> static void __guc_context_sched_disable(struct intel_guc *guc,
> struct intel_context *ce,
> u16 guc_id)
> @@ -1372,17 +1429,143 @@ static void __guc_context_sched_disable(struct intel_guc *guc,
> G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, true);
> }
>
> +static void guc_blocked_fence_complete(struct intel_context *ce)
> +{
> + lockdep_assert_held(&ce->guc_state.lock);
> +
> + if (!i915_sw_fence_done(&ce->guc_blocked))
> + i915_sw_fence_complete(&ce->guc_blocked);
> +}
> +
> +static void guc_blocked_fence_reinit(struct intel_context *ce)
> +{
> + lockdep_assert_held(&ce->guc_state.lock);
> + GEM_BUG_ON(!i915_sw_fence_done(&ce->guc_blocked));
> +
> + /*
> + * This fence is always complete unless a pending schedule disable is
> + * outstanding. We arm the fence here and complete it when we receive
> + * the pending schedule disable complete message.
> + */
> + i915_sw_fence_fini(&ce->guc_blocked);
> + i915_sw_fence_reinit(&ce->guc_blocked);
> + i915_sw_fence_await(&ce->guc_blocked);
> + i915_sw_fence_commit(&ce->guc_blocked);
> +}
> +
> static u16 prep_context_pending_disable(struct intel_context *ce)
> {
> lockdep_assert_held(&ce->guc_state.lock);
>
> set_context_pending_disable(ce);
> clr_context_enabled(ce);
> + guc_blocked_fence_reinit(ce);
> intel_context_get(ce);
>
> return ce->guc_id;
> }
>
> +static struct i915_sw_fence *guc_context_block(struct intel_context *ce)
> +{
> + struct intel_guc *guc = ce_to_guc(ce);
> + struct i915_sched_engine *sched_engine = ce->engine->sched_engine;
> + unsigned long flags;
> + struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm;
> + intel_wakeref_t wakeref;
> + u16 guc_id;
> + bool enabled;
> +
> + spin_lock_irqsave(&ce->guc_state.lock, flags);
> +
> + /*
> + * Sync with submission path, increment before below changes to context
> + * state.
> + */
> + spin_lock(&sched_engine->lock);
> + incr_context_blocked(ce);
> + spin_unlock(&sched_engine->lock);
> +
> + enabled = context_enabled(ce);
> + if (unlikely(!enabled || submission_disabled(guc))) {
> + if (enabled)
> + clr_context_enabled(ce);
> + spin_unlock_irqrestore(&ce->guc_state.lock, flags);
> + return &ce->guc_blocked;
> + }
> +
> + /*
> + * We add +2 here as the schedule disable complete CTB handler calls
> + * intel_context_sched_disable_unpin (-2 to pin_count).
> + */
> + atomic_add(2, &ce->pin_count);
> +
> + guc_id = prep_context_pending_disable(ce);
> +
> + spin_unlock_irqrestore(&ce->guc_state.lock, flags);
> +
> + with_intel_runtime_pm(runtime_pm, wakeref)
> + __guc_context_sched_disable(guc, ce, guc_id);
> +
> + return &ce->guc_blocked;
> +}
> +
> +static void guc_context_unblock(struct intel_context *ce)
> +{
> + struct intel_guc *guc = ce_to_guc(ce);
> + struct i915_sched_engine *sched_engine = ce->engine->sched_engine;
> + unsigned long flags;
> + struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm;
> + intel_wakeref_t wakeref;
> + bool enable;
> +
> + GEM_BUG_ON(context_enabled(ce));
> +
> + spin_lock_irqsave(&ce->guc_state.lock, flags);
> +
> + if (unlikely(submission_disabled(guc) ||
> + !intel_context_is_pinned(ce) ||
> + context_pending_disable(ce) ||
> + context_blocked(ce) > 1)) {
> + enable = false;
> + } else {
> + enable = true;
> + set_context_pending_enable(ce);
> + set_context_enabled(ce);
> + intel_context_get(ce);
> + }
> +
> + /*
> + * Sync with submission path, decrement after above changes to context
> + * state.
> + */
> + spin_lock(&sched_engine->lock);
> + decr_context_blocked(ce);
> + spin_unlock(&sched_engine->lock);
> +
> + spin_unlock_irqrestore(&ce->guc_state.lock, flags);
> +
> + if (enable) {
> + with_intel_runtime_pm(runtime_pm, wakeref)
> + __guc_context_sched_enable(guc, ce);
> + }
> +}
> +
> +static void guc_context_cancel_request(struct intel_context *ce,
> + struct i915_request *rq)
> +{
> + if (i915_sw_fence_signaled(&rq->submit)) {
> + struct i915_sw_fence *fence = guc_context_block(ce);
> +
> + i915_sw_fence_wait(fence);
> + if (!i915_request_completed(rq)) {
> + __i915_request_skip(rq);
> + guc_reset_state(ce, intel_ring_wrap(ce->ring, rq->head),
> + true);
> + }
> + guc_context_unblock(ce);
> + }
> +}
> +
> static void __guc_context_set_preemption_timeout(struct intel_guc *guc,
> u16 guc_id,
> u32 preemption_timeout)
> @@ -1642,6 +1825,8 @@ static const struct intel_context_ops guc_context_ops = {
>
> .ban = guc_context_ban,
>
> + .cancel_request = guc_context_cancel_request,
> +
> .enter = intel_context_enter_engine,
> .exit = intel_context_exit_engine,
>
> @@ -1837,6 +2022,8 @@ static const struct intel_context_ops virtual_guc_context_ops = {
>
> .ban = guc_context_ban,
>
> + .cancel_request = guc_context_cancel_request,
> +
> .enter = guc_virtual_context_enter,
> .exit = guc_virtual_context_exit,
>
> @@ -2295,6 +2482,7 @@ int intel_guc_sched_done_process_msg(struct intel_guc *guc,
> clr_context_banned(ce);
> clr_context_pending_disable(ce);
> __guc_signal_context_fence(ce);
> + guc_blocked_fence_complete(ce);
> spin_unlock_irqrestore(&ce->guc_state.lock, flags);
>
> if (banned) {
> diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
> index 28f38b02a5d2..541a20371502 100644
> --- a/drivers/gpu/drm/i915/i915_request.c
> +++ b/drivers/gpu/drm/i915/i915_request.c
> @@ -710,18 +710,6 @@ void i915_request_unsubmit(struct i915_request *request)
> spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
> }
>
> -static void __cancel_request(struct i915_request *rq)
> -{
> - struct intel_engine_cs *engine = NULL;
> -
> - i915_request_active_engine(rq, &engine);
> -
> - if (engine && intel_engine_pulse(engine))
> - intel_gt_handle_error(engine->gt, engine->mask, 0,
> - "request cancellation by %s",
> - current->comm);
> -}
> -
> void i915_request_cancel(struct i915_request *rq, int error)
> {
> if (!i915_request_set_error_once(rq, error))
> @@ -729,7 +717,7 @@ void i915_request_cancel(struct i915_request *rq, int error)
>
> set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags);
>
> - __cancel_request(rq);
> + intel_context_cancel_request(rq->context, rq);
> }
>
> static int __i915_sw_fence_call
More information about the Intel-gfx
mailing list