[PATCH] drm/amdgpu: don't runtime suspend if there are displays attached (v2)

Alex Deucher alexdeucher at gmail.com
Tue Apr 19 14:44:43 UTC 2022


On Tue, Apr 19, 2022 at 10:04 AM Paul Menzel <pmenzel at molgen.mpg.de> wrote:
>
> Dear Alex,
>
>
> Thank you for the patch.
>
> Am 13.04.22 um 22:15 schrieb Alex Deucher:
> > We normally runtime suspend when there are displays attached if they
> > are in the DPMS off state, however, if something wakes the GPU
> > we send a hotplug event on resume (in case any displays were connected
> > while the GPU was in suspend) which can cause userspace to light
> > up the displays again soon after they were turned off.
> >
> > Prior to
> > commit 087451f372bf76 ("drm/amdgpu: use generic fb helpers instead of setting up AMD own's."),
> > the driver took a runtime pm reference when the fbdev emulation was
> > enabled because we didn't implement proper shadowing support for
> > vram access when the device was off so the device never runtime
> > suspended when there was a console bound.  Once that commit landed,
> > we now utilize the core fb helper implementation which properly
> > handles the emulation, so runtime pm now suspends in cases where it did
> > not before.  Ultimately, we need to sort out why runtime suspend in not
> > working in this case for some users, but this should restore similar
> > behavior to before.
> >
> > v2: move check into runtime_suspend
> >
> > Fixes: 087451f372bf76 ("drm/amdgpu: use generic fb helpers instead of setting up AMD own's.")
> > Tested-by: Michele Ballabio <ballabio.m at gmail.com>
>
> On what system and device?

It was a polaris dGPU, but it has been seen on other GPUs as well.
It's not device specific.  The issue is hard to reproduce at least in
our testing unfortunately.

>
> > Signed-off-by: Alex Deucher <alexander.deucher at amd.com>
> > ---
> >   drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 107 ++++++++++++++++--------
> >   1 file changed, 72 insertions(+), 35 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
> > index 4efaa183abcd..97a1aa02d76e 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
> > @@ -2395,6 +2395,71 @@ static int amdgpu_pmops_restore(struct device *dev)
> >       return amdgpu_device_resume(drm_dev, true);
> >   }
> >
> > +static int amdgpu_runtime_idle_check_display(struct device *dev)
> > +{
> > +     struct pci_dev *pdev = to_pci_dev(dev);
> > +     struct drm_device *drm_dev = pci_get_drvdata(pdev);
> > +     struct amdgpu_device *adev = drm_to_adev(drm_dev);
> > +
> > +     if (adev->mode_info.num_crtc) {
> > +             struct drm_connector *list_connector;
> > +             struct drm_connector_list_iter iter;
> > +             int ret = 0;
> > +
> > +             /* XXX: Return busy if any displays are connected to avoid
> > +              * possible display wake ups after runtime resume due to
>
> Nit: wakeups

Ack.

>
> > +              * hotplug events in case any displays were connected while
> > +              * the GPU was in suspend.  Remove this once that is fixed.
> > +              */
>
> Do you have an (internal) issue to track this?

Yes, we are tracking it.

Alex

>
> > +             mutex_lock(&drm_dev->mode_config.mutex);
> > +             drm_connector_list_iter_begin(drm_dev, &iter);
> > +             drm_for_each_connector_iter(list_connector, &iter) {
> > +                     if (list_connector->status == connector_status_connected) {
> > +                             ret = -EBUSY;
> > +                             break;
> > +                     }
> > +             }
> > +             drm_connector_list_iter_end(&iter);
> > +             mutex_unlock(&drm_dev->mode_config.mutex);
> > +
> > +             if (ret)
> > +                     return ret;
> > +
> > +             if (amdgpu_device_has_dc_support(adev)) {
> > +                     struct drm_crtc *crtc;
> > +
> > +                     drm_for_each_crtc(crtc, drm_dev) {
> > +                             drm_modeset_lock(&crtc->mutex, NULL);
> > +                             if (crtc->state->active)
> > +                                     ret = -EBUSY;
> > +                             drm_modeset_unlock(&crtc->mutex);
> > +                             if (ret < 0)
> > +                                     break;
> > +                     }
> > +             } else {
> > +                     mutex_lock(&drm_dev->mode_config.mutex);
> > +                     drm_modeset_lock(&drm_dev->mode_config.connection_mutex, NULL);
> > +
> > +                     drm_connector_list_iter_begin(drm_dev, &iter);
> > +                     drm_for_each_connector_iter(list_connector, &iter) {
> > +                             if (list_connector->dpms ==  DRM_MODE_DPMS_ON) {
> > +                                     ret = -EBUSY;
> > +                                     break;
> > +                             }
> > +                     }
> > +
> > +                     drm_connector_list_iter_end(&iter);
> > +
> > +                     drm_modeset_unlock(&drm_dev->mode_config.connection_mutex);
> > +                     mutex_unlock(&drm_dev->mode_config.mutex);
> > +             }
> > +             if (ret)
> > +                     return ret;
> > +     }
> > +
> > +     return 0;
> > +}
> > +
> >   static int amdgpu_pmops_runtime_suspend(struct device *dev)
> >   {
> >       struct pci_dev *pdev = to_pci_dev(dev);
> > @@ -2407,6 +2472,10 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
> >               return -EBUSY;
> >       }
> >
> > +     ret = amdgpu_runtime_idle_check_display(dev);
> > +     if (ret)
> > +             return ret;
> > +
> >       /* wait for all rings to drain before suspending */
> >       for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
> >               struct amdgpu_ring *ring = adev->rings[i];
> > @@ -2516,41 +2585,9 @@ static int amdgpu_pmops_runtime_idle(struct device *dev)
> >               return -EBUSY;
> >       }
> >
> > -     if (amdgpu_device_has_dc_support(adev)) {
> > -             struct drm_crtc *crtc;
> > -
> > -             drm_for_each_crtc(crtc, drm_dev) {
> > -                     drm_modeset_lock(&crtc->mutex, NULL);
> > -                     if (crtc->state->active)
> > -                             ret = -EBUSY;
> > -                     drm_modeset_unlock(&crtc->mutex);
> > -                     if (ret < 0)
> > -                             break;
> > -             }
> > -
> > -     } else {
> > -             struct drm_connector *list_connector;
> > -             struct drm_connector_list_iter iter;
> > -
> > -             mutex_lock(&drm_dev->mode_config.mutex);
> > -             drm_modeset_lock(&drm_dev->mode_config.connection_mutex, NULL);
> > -
> > -             drm_connector_list_iter_begin(drm_dev, &iter);
> > -             drm_for_each_connector_iter(list_connector, &iter) {
> > -                     if (list_connector->dpms ==  DRM_MODE_DPMS_ON) {
> > -                             ret = -EBUSY;
> > -                             break;
> > -                     }
> > -             }
> > -
> > -             drm_connector_list_iter_end(&iter);
> > -
> > -             drm_modeset_unlock(&drm_dev->mode_config.connection_mutex);
> > -             mutex_unlock(&drm_dev->mode_config.mutex);
> > -     }
> > -
> > -     if (ret == -EBUSY)
> > -             DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
> > +     ret = amdgpu_runtime_idle_check_display(dev);
> > +     if (ret)
> > +             return ret;
> >
> >       pm_runtime_mark_last_busy(dev);
> >       pm_runtime_autosuspend(dev);
>
> The overall change looks good.
>
>
> Kind regards,
>
> Paul


More information about the amd-gfx mailing list