[Intel-gfx] [PATCH v10 3/9] drm/i915: Create uC runtime and system suspend/resume helpers
Sagar Arun Kamble
sagar.a.kamble at intel.com
Wed Sep 27 17:07:40 UTC 2017
On 9/27/2017 9:26 PM, Michal Wajdeczko wrote:
> On Wed, 27 Sep 2017 11:30:33 +0200, Sagar Arun Kamble
> <sagar.a.kamble at intel.com> wrote:
>
>> Prepared generic helpers intel_uc_suspend, intel_uc_resume,
>> intel_uc_runtime_suspend, intel_uc_runtime_resume. These are
>> called from respective GEM functions.
>> Only exception is intel_uc_resume that needs to be called
>> w/ or w/o GuC loaded in i915_drm_resume path. Changes to
>> add WOPCM condition check to load GuC during resume will be added
>> in later patches.
>>
>> v2: Rebase w.r.t removal of GuC code restructuring.
>>
>> v3: Calling intel_uc_resume from i915_gem_resume post resuming
>> i915 gem setup. This is symmetrical with i915_gem_suspend.
>> Removed error messages from i915 suspend/resume routines as
>> uC suspend/resume routines will have those. (Michal Wajdeczko)
>> Declare wedged on uc_suspend failure and uc_resume failure.
>> (Michał Winiarski)
>> Keeping the uC suspend/resume function definitions close to other
>> uC functions.
>>
>> v4: Added implementation to intel_uc_resume as GuC resume is
>> needed to be triggered post reloading the firmware as well. Added
>> comments about semantics of GuC resume with the firmware reload.
>>
>> Signed-off-by: Sagar Arun Kamble <sagar.a.kamble at intel.com>
>> Cc: Chris Wilson <chris at chris-wilson.co.uk>
>> Cc: Michal Wajdeczko <michal.wajdeczko at intel.com>
>> Cc: Michał Winiarski <michal.winiarski at intel.com>
>> ---
>> drivers/gpu/drm/i915/i915_drv.c | 10 ++++++++++
>> drivers/gpu/drm/i915/i915_gem.c | 36
>> ++++++++++++++++++++++++++++--------
>> drivers/gpu/drm/i915/intel_uc.c | 20 ++++++++++++++++++++
>> drivers/gpu/drm/i915/intel_uc.h | 4 ++++
>> 4 files changed, 62 insertions(+), 8 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/i915/i915_drv.c
>> b/drivers/gpu/drm/i915/i915_drv.c
>> index 174a7c5..f79646b 100644
>> --- a/drivers/gpu/drm/i915/i915_drv.c
>> +++ b/drivers/gpu/drm/i915/i915_drv.c
>> @@ -1698,6 +1698,16 @@ static int i915_drm_resume(struct drm_device
>> *dev)
>> }
>> mutex_unlock(&dev->struct_mutex);
>> + /*
>> + * NB: Currently we know that at the end of suspend we have done
>> Full
>> + * GPU reset, Hence GuC is loaded again during i915_gem_init_hw.
>> + * Now, send action to GuC to resume back again as earlier call to
>> + * intel_uc_resume from i915_gem_resume would have done nothing.
>> + */
>> + ret = intel_uc_resume(dev_priv);
>> + if (ret)
>> + i915_gem_set_wedged(dev_priv);
>> +
>> intel_modeset_init_hw(dev);
>> spin_lock_irq(&dev_priv->irq_lock);
>> diff --git a/drivers/gpu/drm/i915/i915_gem.c
>> b/drivers/gpu/drm/i915/i915_gem.c
>> index 11922af..c54c9b4 100644
>> --- a/drivers/gpu/drm/i915/i915_gem.c
>> +++ b/drivers/gpu/drm/i915/i915_gem.c
>> @@ -2025,9 +2025,11 @@ int i915_gem_fault(struct vm_fault *vmf)
>> int i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
>> {
>> struct drm_i915_gem_object *obj, *on;
>> - int i;
>> + int i, ret;
>> - intel_guc_suspend(dev_priv);
>> + ret = intel_uc_runtime_suspend(dev_priv);
>> + if (ret)
>> + return ret;
>> /*
>> * Only called during RPM suspend. All users of the userfault_list
>> @@ -2068,7 +2070,7 @@ int i915_gem_runtime_suspend(struct
>> drm_i915_private *dev_priv)
>> reg->dirty = true;
>> }
>> - return 0;
>> + return ret;
>
> Hmm, at this point we know that we didn't fail, so return 0 was fine.
Ok. Will update. ret was also zero and I though returning ret will be
good in future if we use goto to the end. But can be updated later if
needed.
>
>> }
>> void i915_gem_runtime_resume(struct drm_i915_private *dev_priv)
>> @@ -2080,7 +2082,7 @@ void i915_gem_runtime_resume(struct
>> drm_i915_private *dev_priv)
>> i915_gem_init_swizzling(dev_priv);
>> i915_gem_restore_fences(dev_priv);
>> - intel_guc_resume(dev_priv);
>> + intel_uc_runtime_resume(dev_priv);
>> }
>> static int i915_gem_object_create_mmap_offset(struct
>> drm_i915_gem_object *obj)
>> @@ -4571,7 +4573,9 @@ int i915_gem_suspend(struct drm_i915_private
>> *dev_priv)
>> if (WARN_ON(!intel_engines_are_idle(dev_priv)))
>> i915_gem_set_wedged(dev_priv); /* no hope, discard
>> everything */
>> - intel_guc_suspend(dev_priv);
>> + ret = intel_uc_suspend(dev_priv);
>> + if (ret)
>> + i915_gem_set_wedged(dev_priv); /* no hope, discard
>> everything */
>> /*
>> * Neither the BIOS, ourselves or any other kernel
>> @@ -4606,6 +4610,7 @@ int i915_gem_suspend(struct drm_i915_private
>> *dev_priv)
>> int i915_gem_resume(struct drm_i915_private *dev_priv)
>> {
>> struct drm_device *dev = &dev_priv->drm;
>> + int ret;
>> WARN_ON(dev_priv->gt.awake);
>> @@ -4618,10 +4623,20 @@ int i915_gem_resume(struct drm_i915_private
>> *dev_priv)
>> * it and start again.
>> */
>> dev_priv->gt.resume(dev_priv);
>> - intel_guc_resume(dev_priv);
>> +
>> + /*
>> + * NB: At the end of suspend, Full GPU reset is being done which
>> + * wipes/unloads the GuC firmware. If reset is avoided there, we
>> can
>> + * check the WOPCM status here to see if GuC is still loaded and
>> just
>> + * do GuC resume without reloading the firmware back.
>> + */
>> + ret = intel_uc_resume(dev_priv);
>> + if (ret)
>> + i915_gem_set_wedged(dev_priv);
>> +
>> mutex_unlock(&dev->struct_mutex);
>> - return 0;
>> + return ret;
>> }
>> void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
>> @@ -4741,7 +4756,12 @@ int i915_gem_init_hw(struct drm_i915_private
>> *dev_priv)
>> intel_mocs_init_l3cc_table(dev_priv);
>> - /* We can't enable contexts until all firmware is loaded */
>> + /*
>> + * We can't enable contexts until all firmware is loaded.
>> + * FIXME: GuC is loaded unconditionally currently without checking
>> + * if it is already available in WOPCM. We can skip that load based
>> + * on WOPCM status.
>
> hmm, maybe this comment should be in intel_uc_init_hw ?
Sure. I will move it there before call to intel_huc_init_hw as this
WOPCM check applies to HuC as well.
>
>> + */
>> ret = intel_uc_init_hw(dev_priv);
>> if (ret)
>> goto out;
>> diff --git a/drivers/gpu/drm/i915/intel_uc.c
>> b/drivers/gpu/drm/i915/intel_uc.c
>> index 2774778..7b30790 100644
>> --- a/drivers/gpu/drm/i915/intel_uc.c
>> +++ b/drivers/gpu/drm/i915/intel_uc.c
>> @@ -481,6 +481,26 @@ void intel_uc_fini_hw(struct drm_i915_private
>> *dev_priv)
>> i915_ggtt_disable_guc(dev_priv);
>> }
>> +int intel_uc_runtime_suspend(struct drm_i915_private *dev_priv)
>> +{
>> + return intel_guc_suspend(dev_priv);
>> +}
>> +
>> +int intel_uc_runtime_resume(struct drm_i915_private *dev_priv)
>> +{
>> + return intel_guc_resume(dev_priv);
>> +}
>> +
>> +int intel_uc_suspend(struct drm_i915_private *dev_priv)
>> +{
>> + return intel_guc_suspend(dev_priv);
>> +}
>> +
>> +int intel_uc_resume(struct drm_i915_private *dev_priv)
>> +{
>> + return intel_guc_resume(dev_priv);
>> +}
>> +
>> int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32
>> len)
>> {
>> WARN(1, "Unexpected send: action=%#x\n", *action);
>> diff --git a/drivers/gpu/drm/i915/intel_uc.h
>> b/drivers/gpu/drm/i915/intel_uc.h
>> index 6966349..0a79e17 100644
>> --- a/drivers/gpu/drm/i915/intel_uc.h
>> +++ b/drivers/gpu/drm/i915/intel_uc.h
>> @@ -208,6 +208,10 @@ struct intel_huc {
>> void intel_uc_fini_fw(struct drm_i915_private *dev_priv);
>> int intel_uc_init_hw(struct drm_i915_private *dev_priv);
>> void intel_uc_fini_hw(struct drm_i915_private *dev_priv);
>> +int intel_uc_runtime_suspend(struct drm_i915_private *dev_priv);
>> +int intel_uc_runtime_resume(struct drm_i915_private *dev_priv);
>> +int intel_uc_suspend(struct drm_i915_private *dev_priv);
>> +int intel_uc_resume(struct drm_i915_private *dev_priv);
>> int intel_guc_sample_forcewake(struct intel_guc *guc);
>> int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32
>> len);
>> int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action,
>> u32 len);
More information about the Intel-gfx
mailing list