RFC: Add drm_dev_suspend/resume() ?

Noralf Trønnes noralf at tronnes.org
Sun Oct 22 16:52:41 UTC 2017


Hi,

I've spent some time in the fbdev emulation code and discovered a
recurring pattern around suspend/resume.
Should we add some more helpers :-)

struct drm_device {
     /**
      * @suspend_state:
      *
      * Atomic state when suspended.
      * Set by drm_dev_suspend(), cleared by drm_dev_resume().
      */
     struct drm_atomic_state *suspend_state;
};

int drm_dev_suspend(struct drm_device *dev)
{
     struct drm_atomic_state *state;

     if (!dev)
         return 0;

     drm_kms_helper_poll_disable(dev);
     drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 1);
     state = drm_atomic_helper_suspend(dev);
     if (IS_ERR(state)) {
         drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0);
         drm_kms_helper_poll_enable(dev);
         return PTR_ERR(state);
     }

     dev->suspend_state = state;

     return 0;
}

int drm_dev_resume(struct drm_device *dev)
{
     int ret;

     if (!dev || WARN_ON(!dev->suspend_state))
         return 0;

     ret = drm_atomic_helper_resume(dev, dev->suspend_state);
     if (ret)
         DRM_ERROR("Failed to resume (%d)\n", ret);
     dev->suspend_state = NULL;

     drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0);
     drm_kms_helper_poll_enable(dev);

     return 0;
}

The drm_kms_helper_poll_enable() docs states that it's not allowed to
call this function if polling support has been set up, but the code
allows it. drm_kms_helper_poll_disable() docs states that it's ok to
call it even if polling is not enabled.


Here are the suspend/resume code from the drivers that use
drm_atomic_helper_suspend/resume():

static int __maybe_unused hdlcd_pm_suspend(struct device *dev)
{
     struct drm_device *drm = dev_get_drvdata(dev);
     struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL;

     if (!hdlcd)
         return 0;

     drm_kms_helper_poll_disable(drm);

     hdlcd->state = drm_atomic_helper_suspend(drm);
     if (IS_ERR(hdlcd->state)) {
         drm_kms_helper_poll_enable(drm);
         return PTR_ERR(hdlcd->state);
     }

     return 0;
}

static int __maybe_unused hdlcd_pm_resume(struct device *dev)
{
     struct drm_device *drm = dev_get_drvdata(dev);
     struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL;

     if (!hdlcd)
         return 0;

     drm_atomic_helper_resume(drm, hdlcd->state);
     drm_kms_helper_poll_enable(drm);
     pm_runtime_set_active(dev);

     return 0;
}


static int __maybe_unused malidp_pm_suspend(struct device *dev)
{
     struct drm_device *drm = dev_get_drvdata(dev);
     struct malidp_drm *malidp = drm->dev_private;

     drm_kms_helper_poll_disable(drm);
     console_lock();
     drm_fbdev_cma_set_suspend(malidp->fbdev, 1);
     console_unlock();
     malidp->pm_state = drm_atomic_helper_suspend(drm);
     if (IS_ERR(malidp->pm_state)) {
         console_lock();
         drm_fbdev_cma_set_suspend(malidp->fbdev, 0);
         console_unlock();
         drm_kms_helper_poll_enable(drm);
         return PTR_ERR(malidp->pm_state);
     }

     return 0;
}

static int __maybe_unused malidp_pm_resume(struct device *dev)
{
     struct drm_device *drm = dev_get_drvdata(dev);
     struct malidp_drm *malidp = drm->dev_private;

     drm_atomic_helper_resume(drm, malidp->pm_state);
     console_lock();
     drm_fbdev_cma_set_suspend(malidp->fbdev, 0);
     console_unlock();
     drm_kms_helper_poll_enable(drm);

     return 0;
}


static int atmel_hlcdc_dc_drm_suspend(struct device *dev)
{
     struct drm_device *drm_dev = dev_get_drvdata(dev);
     struct atmel_hlcdc_dc *dc = drm_dev->dev_private;
     struct regmap *regmap = dc->hlcdc->regmap;
     struct drm_atomic_state *state;

     state = drm_atomic_helper_suspend(drm_dev);
     if (IS_ERR(state))
         return PTR_ERR(state);

     dc->suspend.state = state;

     regmap_read(regmap, ATMEL_HLCDC_IMR, &dc->suspend.imr);
     regmap_write(regmap, ATMEL_HLCDC_IDR, dc->suspend.imr);
     clk_disable_unprepare(dc->hlcdc->periph_clk);

     return 0;
}

static int atmel_hlcdc_dc_drm_resume(struct device *dev)
{
     struct drm_device *drm_dev = dev_get_drvdata(dev);
     struct atmel_hlcdc_dc *dc = drm_dev->dev_private;

     clk_prepare_enable(dc->hlcdc->periph_clk);
     regmap_write(dc->hlcdc->regmap, ATMEL_HLCDC_IER, dc->suspend.imr);

     return drm_atomic_helper_resume(drm_dev, dc->suspend.state);
}


static int exynos_drm_suspend(struct device *dev)
{
     struct drm_device *drm_dev = dev_get_drvdata(dev);
     struct exynos_drm_private *private = drm_dev->dev_private;

     if (pm_runtime_suspended(dev) || !drm_dev)
         return 0;

     drm_kms_helper_poll_disable(drm_dev);
     exynos_drm_fbdev_suspend(drm_dev);
     private->suspend_state = drm_atomic_helper_suspend(drm_dev);
     if (IS_ERR(private->suspend_state)) {
         exynos_drm_fbdev_resume(drm_dev);
         drm_kms_helper_poll_enable(drm_dev);
         return PTR_ERR(private->suspend_state);
     }

     return 0;
}

static int exynos_drm_resume(struct device *dev)
{
     struct drm_device *drm_dev = dev_get_drvdata(dev);
     struct exynos_drm_private *private = drm_dev->dev_private;

     if (pm_runtime_suspended(dev) || !drm_dev)
         return 0;

     drm_atomic_helper_resume(drm_dev, private->suspend_state);
     exynos_drm_fbdev_resume(drm_dev);
     drm_kms_helper_poll_enable(drm_dev);

     return 0;
}


static int fsl_dcu_drm_pm_suspend(struct device *dev)
{
     struct fsl_dcu_drm_device *fsl_dev = dev_get_drvdata(dev);

     if (!fsl_dev)
         return 0;

     disable_irq(fsl_dev->irq);
     drm_kms_helper_poll_disable(fsl_dev->drm);

     console_lock();
     drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 1);
     console_unlock();

     fsl_dev->state = drm_atomic_helper_suspend(fsl_dev->drm);
     if (IS_ERR(fsl_dev->state)) {
         console_lock();
         drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 0);
         console_unlock();

         drm_kms_helper_poll_enable(fsl_dev->drm);
         enable_irq(fsl_dev->irq);
         return PTR_ERR(fsl_dev->state);
     }

     clk_disable_unprepare(fsl_dev->pix_clk);
     clk_disable_unprepare(fsl_dev->clk);

     return 0;
}

static int fsl_dcu_drm_pm_resume(struct device *dev)
{
     struct fsl_dcu_drm_device *fsl_dev = dev_get_drvdata(dev);
     int ret;

     if (!fsl_dev)
         return 0;

     ret = clk_prepare_enable(fsl_dev->clk);
     if (ret < 0) {
         dev_err(dev, "failed to enable dcu clk\n");
         return ret;
     }

     if (fsl_dev->tcon)
         fsl_tcon_bypass_enable(fsl_dev->tcon);
     fsl_dcu_drm_init_planes(fsl_dev->drm);
     drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state);

     console_lock();
     drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 0);
     console_unlock();

     drm_kms_helper_poll_enable(fsl_dev->drm);
     enable_irq(fsl_dev->irq);

     return 0;
}


static int __maybe_unused hibmc_pm_suspend(struct device *dev)
{
     struct pci_dev *pdev = to_pci_dev(dev);
     struct drm_device *drm_dev = pci_get_drvdata(pdev);
     struct hibmc_drm_private *priv = drm_dev->dev_private;

     drm_kms_helper_poll_disable(drm_dev);
     priv->suspend_state = drm_atomic_helper_suspend(drm_dev);
     if (IS_ERR(priv->suspend_state)) {
         DRM_ERROR("drm_atomic_helper_suspend failed: %ld\n",
               PTR_ERR(priv->suspend_state));
         drm_kms_helper_poll_enable(drm_dev);
         return PTR_ERR(priv->suspend_state);
     }

     return 0;
}

static int  __maybe_unused hibmc_pm_resume(struct device *dev)
{
     struct pci_dev *pdev = to_pci_dev(dev);
     struct drm_device *drm_dev = pci_get_drvdata(pdev);
     struct hibmc_drm_private *priv = drm_dev->dev_private;

     drm_atomic_helper_resume(drm_dev, priv->suspend_state);
     drm_kms_helper_poll_enable(drm_dev);

     return 0;
}


i915 is excluded since it's rather complicated.


static int imx_drm_suspend(struct device *dev)
{
     struct drm_device *drm_dev = dev_get_drvdata(dev);
     struct imx_drm_device *imxdrm;

     /* The drm_dev is NULL before .load hook is called */
     if (drm_dev == NULL)
         return 0;

     drm_kms_helper_poll_disable(drm_dev);

     imxdrm = drm_dev->dev_private;
     imxdrm->state = drm_atomic_helper_suspend(drm_dev);
     if (IS_ERR(imxdrm->state)) {
         drm_kms_helper_poll_enable(drm_dev);
         return PTR_ERR(imxdrm->state);
     }

     return 0;
}

static int imx_drm_resume(struct device *dev)
{
     struct drm_device *drm_dev = dev_get_drvdata(dev);
     struct imx_drm_device *imx_drm;

     if (drm_dev == NULL)
         return 0;

     imx_drm = drm_dev->dev_private;
     drm_atomic_helper_resume(drm_dev, imx_drm->state);
     drm_kms_helper_poll_enable(drm_dev);

     return 0;
}


static int mtk_drm_sys_suspend(struct device *dev)
{
     struct mtk_drm_private *private = dev_get_drvdata(dev);
     struct drm_device *drm = private->drm;

     drm_kms_helper_poll_disable(drm);

     private->suspend_state = drm_atomic_helper_suspend(drm);
     if (IS_ERR(private->suspend_state)) {
         drm_kms_helper_poll_enable(drm);
         return PTR_ERR(private->suspend_state);
     }

     DRM_DEBUG_DRIVER("mtk_drm_sys_suspend\n");
     return 0;
}

static int mtk_drm_sys_resume(struct device *dev)
{
     struct mtk_drm_private *private = dev_get_drvdata(dev);
     struct drm_device *drm = private->drm;

     drm_atomic_helper_resume(drm, private->suspend_state);
     drm_kms_helper_poll_enable(drm);

     DRM_DEBUG_DRIVER("mtk_drm_sys_resume\n");
     return 0;
}


nouveau is also rather complicated.


static int rockchip_drm_sys_suspend(struct device *dev)
{
     struct drm_device *drm = dev_get_drvdata(dev);
     struct rockchip_drm_private *priv;

     if (!drm)
         return 0;

     drm_kms_helper_poll_disable(drm);
     rockchip_drm_fb_suspend(drm);

     priv = drm->dev_private;
     priv->state = drm_atomic_helper_suspend(drm);
     if (IS_ERR(priv->state)) {
         rockchip_drm_fb_resume(drm);
         drm_kms_helper_poll_enable(drm);
         return PTR_ERR(priv->state);
     }

     return 0;
}

static int rockchip_drm_sys_resume(struct device *dev)
{
     struct drm_device *drm = dev_get_drvdata(dev);
     struct rockchip_drm_private *priv;

     if (!drm)
         return 0;

     priv = drm->dev_private;
     drm_atomic_helper_resume(drm, priv->state);
     rockchip_drm_fb_resume(drm);
     drm_kms_helper_poll_enable(drm);

     return 0;
}


static int host1x_drm_suspend(struct device *dev)
{
     struct drm_device *drm = dev_get_drvdata(dev);
     struct tegra_drm *tegra = drm->dev_private;

     drm_kms_helper_poll_disable(drm);
     tegra_drm_fb_suspend(drm);

     tegra->state = drm_atomic_helper_suspend(drm);
     if (IS_ERR(tegra->state)) {
         tegra_drm_fb_resume(drm);
         drm_kms_helper_poll_enable(drm);
         return PTR_ERR(tegra->state);
     }

     return 0;
}

static int host1x_drm_resume(struct device *dev)
{
     struct drm_device *drm = dev_get_drvdata(dev);
     struct tegra_drm *tegra = drm->dev_private;

     drm_atomic_helper_resume(drm, tegra->state);
     tegra_drm_fb_resume(drm);
     drm_kms_helper_poll_enable(drm);

     return 0;
}


static int tilcdc_pm_suspend(struct device *dev)
{
     struct drm_device *ddev = dev_get_drvdata(dev);
     struct tilcdc_drm_private *priv = ddev->dev_private;

     priv->saved_state = drm_atomic_helper_suspend(ddev);

     /* Select sleep pin state */
     pinctrl_pm_select_sleep_state(dev);

     return 0;
}

static int tilcdc_pm_resume(struct device *dev)
{
     struct drm_device *ddev = dev_get_drvdata(dev);
     struct tilcdc_drm_private *priv = ddev->dev_private;
     int ret = 0;

     /* Select default pin state */
     pinctrl_pm_select_default_state(dev);

     if (priv->saved_state)
         ret = drm_atomic_helper_resume(ddev, priv->saved_state);

     return ret;
}


int tinydrm_suspend(struct tinydrm_device *tdev)
{
     struct drm_atomic_state *state;

     if (tdev->suspend_state) {
         DRM_ERROR("Failed to suspend: state already set\n");
         return -EINVAL;
     }

     drm_fbdev_cma_set_suspend_unlocked(tdev->fbdev_cma, 1);
     state = drm_atomic_helper_suspend(tdev->drm);
     if (IS_ERR(state)) {
         drm_fbdev_cma_set_suspend_unlocked(tdev->fbdev_cma, 0);
         return PTR_ERR(state);
     }

     tdev->suspend_state = state;

     return 0;
}

int tinydrm_resume(struct tinydrm_device *tdev)
{
     struct drm_atomic_state *state = tdev->suspend_state;
     int ret;

     if (!state) {
         DRM_ERROR("Failed to resume: state is not set\n");
         return -EINVAL;
     }

     tdev->suspend_state = NULL;

     ret = drm_atomic_helper_resume(tdev->drm, state);
     if (ret) {
         DRM_ERROR("Error resuming state: %d\n", ret);
         return ret;
     }

     drm_fbdev_cma_set_suspend_unlocked(tdev->fbdev_cma, 0);

     return 0;
}


Noralf.



More information about the dri-devel mailing list