[Intel-gfx] [PATCH 03/26] drm/i915/guc: Take engine PM when a context is pinned with GuC submission
John Harrison
john.c.harrison at intel.com
Thu Oct 7 18:15:51 UTC 2021
On 10/7/2021 08:19, Matthew Brost wrote:
> On Wed, Oct 06, 2021 at 08:45:42PM -0700, John Harrison wrote:
>> On 10/4/2021 15:06, Matthew Brost wrote:
>>> Taking a PM reference to prevent intel_gt_wait_for_idle from short
>>> circuiting while a scheduling of user context could be enabled.
>> I'm not sure what 'while a scheduling of user context could be enabled'
>> means.
>>
> Not really sure how this isn't clear.
>
> It means if a user context has scheduling enabled this function cannot
> short circuit returning idle.
>
> Matt
Okay. The 'a scheduling' was throwing me off. And I was reading 'could
be enabled' as saying something that might happen in the future. English
is great at being ambiguous ;). Maybe 'while any user context has
scheduling enabled' would be simpler?
John.
>
>> John.
>>
>>> Returning GT idle when it is not can cause all sorts of issues
>>> throughout the stack.
>>>
>>> v2:
>>> (Daniel Vetter)
>>> - Add might_lock annotations to pin / unpin function
>>> v3:
>>> (CI)
>>> - Drop intel_engine_pm_might_put from unpin path as an async put is
>>> used
>>> v4:
>>> (John Harrison)
>>> - Make intel_engine_pm_might_get/put work with GuC virtual engines
>>> - Update commit message
>>>
>>> Signed-off-by: Matthew Brost <matthew.brost at intel.com>
>>> ---
>>> drivers/gpu/drm/i915/gt/intel_context.c | 2 ++
>>> drivers/gpu/drm/i915/gt/intel_engine_pm.h | 32 +++++++++++++++++
>>> drivers/gpu/drm/i915/gt/intel_gt_pm.h | 10 ++++++
>>> .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 36 +++++++++++++++++--
>>> drivers/gpu/drm/i915/intel_wakeref.h | 12 +++++++
>>> 5 files changed, 89 insertions(+), 3 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
>>> index 1076066f41e0..f601323b939f 100644
>>> --- a/drivers/gpu/drm/i915/gt/intel_context.c
>>> +++ b/drivers/gpu/drm/i915/gt/intel_context.c
>>> @@ -240,6 +240,8 @@ int __intel_context_do_pin_ww(struct intel_context *ce,
>>> if (err)
>>> goto err_post_unpin;
>>> + intel_engine_pm_might_get(ce->engine);
>>> +
>>> if (unlikely(intel_context_is_closed(ce))) {
>>> err = -ENOENT;
>>> goto err_unlock;
>>> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
>>> index 6fdeae668e6e..d68675925b79 100644
>>> --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.h
>>> +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
>>> @@ -6,9 +6,11 @@
>>> #ifndef INTEL_ENGINE_PM_H
>>> #define INTEL_ENGINE_PM_H
>>> +#include "i915_drv.h"
>>> #include "i915_request.h"
>>> #include "intel_engine_types.h"
>>> #include "intel_wakeref.h"
>>> +#include "intel_gt_pm.h"
>>> static inline bool
>>> intel_engine_pm_is_awake(const struct intel_engine_cs *engine)
>>> @@ -31,6 +33,21 @@ static inline bool intel_engine_pm_get_if_awake(struct intel_engine_cs *engine)
>>> return intel_wakeref_get_if_active(&engine->wakeref);
>>> }
>>> +static inline void intel_engine_pm_might_get(struct intel_engine_cs *engine)
>>> +{
>>> + if (!intel_engine_is_virtual(engine)) {
>>> + intel_wakeref_might_get(&engine->wakeref);
>>> + } else {
>>> + struct intel_gt *gt = engine->gt;
>>> + struct intel_engine_cs *tengine;
>>> + intel_engine_mask_t tmp, mask = engine->mask;
>>> +
>>> + for_each_engine_masked(tengine, gt, mask, tmp)
>>> + intel_wakeref_might_get(&tengine->wakeref);
>>> + }
>>> + intel_gt_pm_might_get(engine->gt);
>>> +}
>>> +
>>> static inline void intel_engine_pm_put(struct intel_engine_cs *engine)
>>> {
>>> intel_wakeref_put(&engine->wakeref);
>>> @@ -52,6 +69,21 @@ static inline void intel_engine_pm_flush(struct intel_engine_cs *engine)
>>> intel_wakeref_unlock_wait(&engine->wakeref);
>>> }
>>> +static inline void intel_engine_pm_might_put(struct intel_engine_cs *engine)
>>> +{
>>> + if (!intel_engine_is_virtual(engine)) {
>>> + intel_wakeref_might_put(&engine->wakeref);
>>> + } else {
>>> + struct intel_gt *gt = engine->gt;
>>> + struct intel_engine_cs *tengine;
>>> + intel_engine_mask_t tmp, mask = engine->mask;
>>> +
>>> + for_each_engine_masked(tengine, gt, mask, tmp)
>>> + intel_wakeref_might_put(&tengine->wakeref);
>>> + }
>>> + intel_gt_pm_might_put(engine->gt);
>>> +}
>>> +
>>> static inline struct i915_request *
>>> intel_engine_create_kernel_request(struct intel_engine_cs *engine)
>>> {
>>> diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
>>> index 05de6c1af25b..bc898df7a48c 100644
>>> --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h
>>> +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
>>> @@ -31,6 +31,11 @@ static inline bool intel_gt_pm_get_if_awake(struct intel_gt *gt)
>>> return intel_wakeref_get_if_active(>->wakeref);
>>> }
>>> +static inline void intel_gt_pm_might_get(struct intel_gt *gt)
>>> +{
>>> + intel_wakeref_might_get(>->wakeref);
>>> +}
>>> +
>>> static inline void intel_gt_pm_put(struct intel_gt *gt)
>>> {
>>> intel_wakeref_put(>->wakeref);
>>> @@ -41,6 +46,11 @@ static inline void intel_gt_pm_put_async(struct intel_gt *gt)
>>> intel_wakeref_put_async(>->wakeref);
>>> }
>>> +static inline void intel_gt_pm_might_put(struct intel_gt *gt)
>>> +{
>>> + intel_wakeref_might_put(>->wakeref);
>>> +}
>>> +
>>> #define with_intel_gt_pm(gt, tmp) \
>>> for (tmp = 1, intel_gt_pm_get(gt); tmp; \
>>> intel_gt_pm_put(gt), tmp = 0)
>>> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
>>> index 17da2fea1bff..8b82da50c2bc 100644
>>> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
>>> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
>>> @@ -1571,7 +1571,12 @@ static int guc_context_pre_pin(struct intel_context *ce,
>>> static int guc_context_pin(struct intel_context *ce, void *vaddr)
>>> {
>>> - return __guc_context_pin(ce, ce->engine, vaddr);
>>> + int ret = __guc_context_pin(ce, ce->engine, vaddr);
>>> +
>>> + if (likely(!ret && !intel_context_is_barrier(ce)))
>>> + intel_engine_pm_get(ce->engine);
>>> +
>>> + return ret;
>>> }
>>> static void guc_context_unpin(struct intel_context *ce)
>>> @@ -1580,6 +1585,9 @@ static void guc_context_unpin(struct intel_context *ce)
>>> unpin_guc_id(guc, ce);
>>> lrc_unpin(ce);
>>> +
>>> + if (likely(!intel_context_is_barrier(ce)))
>>> + intel_engine_pm_put_async(ce->engine);
>>> }
>>> static void guc_context_post_unpin(struct intel_context *ce)
>>> @@ -2341,8 +2349,30 @@ static int guc_virtual_context_pre_pin(struct intel_context *ce,
>>> static int guc_virtual_context_pin(struct intel_context *ce, void *vaddr)
>>> {
>>> struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
>>> + int ret = __guc_context_pin(ce, engine, vaddr);
>>> + intel_engine_mask_t tmp, mask = ce->engine->mask;
>>> +
>>> + if (likely(!ret))
>>> + for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
>>> + intel_engine_pm_get(engine);
>>> - return __guc_context_pin(ce, engine, vaddr);
>>> + return ret;
>>> +}
>>> +
>>> +static void guc_virtual_context_unpin(struct intel_context *ce)
>>> +{
>>> + intel_engine_mask_t tmp, mask = ce->engine->mask;
>>> + struct intel_engine_cs *engine;
>>> + struct intel_guc *guc = ce_to_guc(ce);
>>> +
>>> + GEM_BUG_ON(context_enabled(ce));
>>> + GEM_BUG_ON(intel_context_is_barrier(ce));
>>> +
>>> + unpin_guc_id(guc, ce);
>>> + lrc_unpin(ce);
>>> +
>>> + for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
>>> + intel_engine_pm_put_async(engine);
>>> }
>>> static void guc_virtual_context_enter(struct intel_context *ce)
>>> @@ -2379,7 +2409,7 @@ static const struct intel_context_ops virtual_guc_context_ops = {
>>> .pre_pin = guc_virtual_context_pre_pin,
>>> .pin = guc_virtual_context_pin,
>>> - .unpin = guc_context_unpin,
>>> + .unpin = guc_virtual_context_unpin,
>>> .post_unpin = guc_context_post_unpin,
>>> .ban = guc_context_ban,
>>> diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h
>>> index 545c8f277c46..4f4c2e15e736 100644
>>> --- a/drivers/gpu/drm/i915/intel_wakeref.h
>>> +++ b/drivers/gpu/drm/i915/intel_wakeref.h
>>> @@ -123,6 +123,12 @@ enum {
>>> __INTEL_WAKEREF_PUT_LAST_BIT__
>>> };
>>> +static inline void
>>> +intel_wakeref_might_get(struct intel_wakeref *wf)
>>> +{
>>> + might_lock(&wf->mutex);
>>> +}
>>> +
>>> /**
>>> * intel_wakeref_put_flags: Release the wakeref
>>> * @wf: the wakeref
>>> @@ -170,6 +176,12 @@ intel_wakeref_put_delay(struct intel_wakeref *wf, unsigned long delay)
>>> FIELD_PREP(INTEL_WAKEREF_PUT_DELAY, delay));
>>> }
>>> +static inline void
>>> +intel_wakeref_might_put(struct intel_wakeref *wf)
>>> +{
>>> + might_lock(&wf->mutex);
>>> +}
>>> +
>>> /**
>>> * intel_wakeref_lock: Lock the wakeref (mutex)
>>> * @wf: the wakeref
More information about the Intel-gfx
mailing list