[PATCH] drm/amdgpu: don't runtime suspend if there are displays attached (v2)

Quan, Evan Evan.Quan at amd.com
Thu Apr 21 05:23:00 UTC 2022


[AMD Official Use Only]

Reviewed-by: Evan Quan <evan.quan at amd.com>

> -----Original Message-----
> From: amd-gfx <amd-gfx-bounces at lists.freedesktop.org> On Behalf Of Alex
> Deucher
> Sent: Thursday, April 21, 2022 11:16 AM
> To: amd-gfx at lists.freedesktop.org
> Cc: Deucher, Alexander <Alexander.Deucher at amd.com>; Michele Ballabio
> <ballabio.m at gmail.com>
> Subject: [PATCH] drm/amdgpu: don't runtime suspend if there are displays
> attached (v2)
> 
> We normally runtime suspend when there are displays attached if they
> are in the DPMS off state, however, if something wakes the GPU
> we send a hotplug event on resume (in case any displays were connected
> while the GPU was in suspend) which can cause userspace to light
> up the displays again soon after they were turned off.
> 
> Prior to
> commit 087451f372bf76 ("drm/amdgpu: use generic fb helpers instead of
> setting up AMD own's."),
> the driver took a runtime pm reference when the fbdev emulation was
> enabled because we didn't implement proper shadowing support for
> vram access when the device was off so the device never runtime
> suspended when there was a console bound.  Once that commit landed,
> we now utilize the core fb helper implementation which properly
> handles the emulation, so runtime pm now suspends in cases where it did
> not before.  Ultimately, we need to sort out why runtime suspend in not
> working in this case for some users, but this should restore similar
> behavior to before.
> 
> v2: move check into runtime_suspend
> v3: wake ups -> wakeups in comment, retain pm_runtime behavior in
>     runtime_idle callback
> 
> Fixes: 087451f372bf76 ("drm/amdgpu: use generic fb helpers instead of
> setting up AMD own's.")
> Tested-by: Michele Ballabio <ballabio.m at gmail.com>
> Signed-off-by: Alex Deucher <alexander.deucher at amd.com>
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 105 ++++++++++++++++---
> -----
>  1 file changed, 70 insertions(+), 35 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
> index 4efaa183abcd..ebd37fb19cdb 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
> @@ -2395,6 +2395,71 @@ static int amdgpu_pmops_restore(struct device
> *dev)
>  	return amdgpu_device_resume(drm_dev, true);
>  }
> 
> +static int amdgpu_runtime_idle_check_display(struct device *dev)
> +{
> +	struct pci_dev *pdev = to_pci_dev(dev);
> +	struct drm_device *drm_dev = pci_get_drvdata(pdev);
> +	struct amdgpu_device *adev = drm_to_adev(drm_dev);
> +
> +	if (adev->mode_info.num_crtc) {
> +		struct drm_connector *list_connector;
> +		struct drm_connector_list_iter iter;
> +		int ret = 0;
> +
> +		/* XXX: Return busy if any displays are connected to avoid
> +		 * possible display wakeups after runtime resume due to
> +		 * hotplug events in case any displays were connected while
> +		 * the GPU was in suspend.  Remove this once that is fixed.
> +		 */
> +		mutex_lock(&drm_dev->mode_config.mutex);
> +		drm_connector_list_iter_begin(drm_dev, &iter);
> +		drm_for_each_connector_iter(list_connector, &iter) {
> +			if (list_connector->status ==
> connector_status_connected) {
> +				ret = -EBUSY;
> +				break;
> +			}
> +		}
> +		drm_connector_list_iter_end(&iter);
> +		mutex_unlock(&drm_dev->mode_config.mutex);
> +
> +		if (ret)
> +			return ret;
> +
> +		if (amdgpu_device_has_dc_support(adev)) {
> +			struct drm_crtc *crtc;
> +
> +			drm_for_each_crtc(crtc, drm_dev) {
> +				drm_modeset_lock(&crtc->mutex, NULL);
> +				if (crtc->state->active)
> +					ret = -EBUSY;
> +				drm_modeset_unlock(&crtc->mutex);
> +				if (ret < 0)
> +					break;
> +			}
> +		} else {
> +			mutex_lock(&drm_dev->mode_config.mutex);
> +			drm_modeset_lock(&drm_dev-
> >mode_config.connection_mutex, NULL);
> +
> +			drm_connector_list_iter_begin(drm_dev, &iter);
> +			drm_for_each_connector_iter(list_connector, &iter)
> {
> +				if (list_connector->dpms ==
> DRM_MODE_DPMS_ON) {
> +					ret = -EBUSY;
> +					break;
> +				}
> +			}
> +
> +			drm_connector_list_iter_end(&iter);
> +
> +			drm_modeset_unlock(&drm_dev-
> >mode_config.connection_mutex);
> +			mutex_unlock(&drm_dev->mode_config.mutex);
> +		}
> +		if (ret)
> +			return ret;
> +	}
> +
> +	return 0;
> +}
> +
>  static int amdgpu_pmops_runtime_suspend(struct device *dev)
>  {
>  	struct pci_dev *pdev = to_pci_dev(dev);
> @@ -2407,6 +2472,10 @@ static int
> amdgpu_pmops_runtime_suspend(struct device *dev)
>  		return -EBUSY;
>  	}
> 
> +	ret = amdgpu_runtime_idle_check_display(dev);
> +	if (ret)
> +		return ret;
> +
>  	/* wait for all rings to drain before suspending */
>  	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
>  		struct amdgpu_ring *ring = adev->rings[i];
> @@ -2516,41 +2585,7 @@ static int amdgpu_pmops_runtime_idle(struct
> device *dev)
>  		return -EBUSY;
>  	}
> 
> -	if (amdgpu_device_has_dc_support(adev)) {
> -		struct drm_crtc *crtc;
> -
> -		drm_for_each_crtc(crtc, drm_dev) {
> -			drm_modeset_lock(&crtc->mutex, NULL);
> -			if (crtc->state->active)
> -				ret = -EBUSY;
> -			drm_modeset_unlock(&crtc->mutex);
> -			if (ret < 0)
> -				break;
> -		}
> -
> -	} else {
> -		struct drm_connector *list_connector;
> -		struct drm_connector_list_iter iter;
> -
> -		mutex_lock(&drm_dev->mode_config.mutex);
> -		drm_modeset_lock(&drm_dev-
> >mode_config.connection_mutex, NULL);
> -
> -		drm_connector_list_iter_begin(drm_dev, &iter);
> -		drm_for_each_connector_iter(list_connector, &iter) {
> -			if (list_connector->dpms ==  DRM_MODE_DPMS_ON)
> {
> -				ret = -EBUSY;
> -				break;
> -			}
> -		}
> -
> -		drm_connector_list_iter_end(&iter);
> -
> -		drm_modeset_unlock(&drm_dev-
> >mode_config.connection_mutex);
> -		mutex_unlock(&drm_dev->mode_config.mutex);
> -	}
> -
> -	if (ret == -EBUSY)
> -		DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
> +	ret = amdgpu_runtime_idle_check_display(dev);
> 
>  	pm_runtime_mark_last_busy(dev);
>  	pm_runtime_autosuspend(dev);
> --
> 2.35.1


More information about the amd-gfx mailing list