[PATCH RFC 084/111] staging: etnaviv: runtime PM: add initial support

Lucas Stach l.stach at pengutronix.de
Thu Apr 2 08:30:26 PDT 2015


From: Russell King <rmk+kernel at arm.linux.org.uk>

Add initial runtime PM support.  We manage the runtime PM state based
on the events: when we allocate an event upon command submission, we
"get" the runtime PM state, causing it to resume if not already resumed.

When we receive an interrupt, and free an event, we "put" the runtime
state.  When all events are eventually freed, the runtime PM state
will then indicate that it can attempt to suspend the device.

We also include an idle callback which checks that the GPU modules
(except for the front end) are idle before suspending.  This way we
ensure that the GPU is properly idle, and retry the suspend later if
not.

Signed-off-by: Russell King <rmk+kernel at arm.linux.org.uk>
Signed-off-by: Lucas Stach <l.stach at pengutronix.de>
---
lst: squashed fix for PM_RUNTIME config symbol removal
---
 drivers/staging/etnaviv/etnaviv_gpu.c | 111 ++++++++++++++++++++++++++++++++--
 drivers/staging/etnaviv/etnaviv_gpu.h |   1 +
 2 files changed, 107 insertions(+), 5 deletions(-)

diff --git a/drivers/staging/etnaviv/etnaviv_gpu.c b/drivers/staging/etnaviv/etnaviv_gpu.c
index a22c5ab558a7..32084e08a387 100644
--- a/drivers/staging/etnaviv/etnaviv_gpu.c
+++ b/drivers/staging/etnaviv/etnaviv_gpu.c
@@ -457,10 +457,14 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
 	struct iommu_domain *iommu;
 	bool mmuv2;
 
+	ret = pm_runtime_get_sync(gpu->dev);
+	if (ret < 0)
+		return ret;
+
 	etnaviv_hw_identify(gpu);
 	ret = etnaviv_hw_reset(gpu);
 	if (ret)
-		return ret;
+		goto fail;
 
 	/* Setup IOMMU.. eventually we will (I think) do this once per context
 	 * and have separate page tables per context.  For now, to keep things
@@ -510,10 +514,17 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
 
 	/* Now program the hardware */
 	etnaviv_gpu_hw_init(gpu);
+	gpu->initialized = true;
+
+	pm_runtime_mark_last_busy(gpu->dev);
+	pm_runtime_put_autosuspend(gpu->dev);
 
 	return 0;
 
 fail:
+	pm_runtime_mark_last_busy(gpu->dev);
+	pm_runtime_put_autosuspend(gpu->dev);
+
 	return ret;
 }
 
@@ -545,10 +556,15 @@ static void verify_dma(struct etnaviv_gpu *gpu, struct dma_debug *debug)
 void etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
 {
 	struct dma_debug debug;
-	u32 dma_lo = gpu_read(gpu, VIVS_FE_DMA_LOW);
-	u32 dma_hi = gpu_read(gpu, VIVS_FE_DMA_HIGH);
-	u32 axi = gpu_read(gpu, VIVS_HI_AXI_STATUS);
-	u32 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
+	u32 dma_lo, dma_hi, axi, idle;
+
+	if (pm_runtime_get_sync(gpu->dev) < 0)
+		return;
+
+	dma_lo = gpu_read(gpu, VIVS_FE_DMA_LOW);
+	dma_hi = gpu_read(gpu, VIVS_FE_DMA_HIGH);
+	axi = gpu_read(gpu, VIVS_HI_AXI_STATUS);
+	idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
 
 	verify_dma(gpu, &debug);
 
@@ -610,6 +626,9 @@ void etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
 	seq_printf(m, "\t state 1: 0x%08x\n", debug.state[1]);
 	seq_printf(m, "\t last fetch 64 bit word: 0x%08x 0x%08x\n",
 		   dma_lo, dma_hi);
+
+	pm_runtime_mark_last_busy(gpu->dev);
+	pm_runtime_put_autosuspend(gpu->dev);
 }
 #endif
 
@@ -819,6 +838,11 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
 	struct drm_device *dev = gpu->drm;
 	struct etnaviv_drm_private *priv = dev->dev_private;
 	unsigned int event, i;
+	int ret;
+
+	ret = pm_runtime_get_sync(gpu->dev);
+	if (ret < 0)
+		return ret;
 
 	/*
 	 * TODO
@@ -832,6 +856,7 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
 	event = event_alloc(gpu);
 	if (unlikely(event == ~0U)) {
 		DRM_ERROR("no free event\n");
+		pm_runtime_put_autosuspend(gpu->dev);
 		return -EBUSY;
 	}
 
@@ -884,6 +909,8 @@ static irqreturn_t irq_handler(int irq, void *data)
 	if (intr != 0) {
 		int event;
 
+		pm_runtime_mark_last_busy(gpu->dev);
+
 		dev_dbg(gpu->dev, "intr 0x%08x\n", intr);
 
 		if (intr & VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR) {
@@ -911,6 +938,15 @@ static irqreturn_t irq_handler(int irq, void *data)
 				gpu->last_ring_pos = gpu->event[event].ring_pos;
 			}
 			event_free(gpu, event);
+
+			/*
+			 * We need to balance the runtime PM count caused by
+			 * each submission.  Upon submission, we increment
+			 * the runtime PM counter, and allocate one event.
+			 * So here, we put the runtime PM count for each
+			 * completed event.
+			 */
+			pm_runtime_put_autosuspend(gpu->dev);
 		}
 
 		etnaviv_gpu_retire(gpu);
@@ -1003,7 +1039,11 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
 
 	dev_info(dev, "post gpu[idx]: %p\n", priv->gpu[idx]);
 
+#ifdef CONFIG_PM
+	ret = pm_runtime_get_sync(gpu->dev);
+#else
 	ret = etnaviv_gpu_resume(gpu);
+#endif
 	if (ret < 0)
 		return ret;
 
@@ -1015,6 +1055,10 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
 
 	setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
 			(unsigned long)gpu);
+
+	pm_runtime_mark_last_busy(gpu->dev);
+	pm_runtime_put_autosuspend(gpu->dev);
+
 	return 0;
 fail:
 	return -1;
@@ -1031,7 +1075,12 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
 
 	WARN_ON(!list_empty(&gpu->active_list));
 
+#ifdef CONFIG_PM
+	pm_runtime_get_sync(gpu->dev);
+	pm_runtime_put_sync_suspend(gpu->dev);
+#else
 	etnaviv_gpu_suspend(gpu);
+#endif
 
 	if (gpu->buffer) {
 		drm_gem_object_unreference(gpu->buffer);
@@ -1130,6 +1179,15 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
 	/* TODO: figure out max mapped size */
 	dev_set_drvdata(dev, gpu);
 
+	/*
+	 * We treat the device as initially suspended.  The runtime PM
+	 * autosuspend delay is rather arbitary: no measurements have
+	 * yet been performed to determine an appropriate value.
+	 */
+	pm_runtime_use_autosuspend(gpu->dev);
+	pm_runtime_set_autosuspend_delay(gpu->dev, 200);
+	pm_runtime_enable(gpu->dev);
+
 	err = component_add(&pdev->dev, &gpu_ops);
 	if (err < 0) {
 		dev_err(&pdev->dev, "failed to register component: %d\n", err);
@@ -1145,13 +1203,56 @@ fail:
 static int etnaviv_gpu_platform_remove(struct platform_device *pdev)
 {
 	component_del(&pdev->dev, &gpu_ops);
+	pm_runtime_disable(&pdev->dev);
 	return 0;
 }
 
+#ifdef CONFIG_PM
+static int etnaviv_gpu_rpm_suspend(struct device *dev)
+{
+	struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
+	u32 idle, mask;
+
+	/* If we have outstanding fences, we're not idle */
+	if (gpu->retired_fence != gpu->submitted_fence)
+		return -EBUSY;
+
+	/* Check whether the hardware (except FE) is idle */
+	mask = gpu->idle_mask & ~VIVS_HI_IDLE_STATE_FE;
+	idle = gpu_read(gpu, VIVS_HI_IDLE_STATE) & mask;
+	if (idle != mask)
+		return -EBUSY;
+
+	return etnaviv_gpu_suspend(gpu);
+}
+
+static int etnaviv_gpu_rpm_resume(struct device *dev)
+{
+	struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
+	int ret;
+
+	ret = etnaviv_gpu_resume(gpu);
+	if (ret)
+		return ret;
+
+	/* Re-initialise the basic hardware state */
+	if (gpu->initialized)
+		etnaviv_gpu_hw_init(gpu);
+
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops etnaviv_gpu_pm_ops = {
+	SET_RUNTIME_PM_OPS(etnaviv_gpu_rpm_suspend, etnaviv_gpu_rpm_resume,
+			   NULL)
+};
+
 struct platform_driver etnaviv_gpu_driver = {
 	.driver = {
 		.name = "etnaviv-gpu",
 		.owner = THIS_MODULE,
+		.pm = &etnaviv_gpu_pm_ops,
 		.of_match_table = etnaviv_gpu_match,
 	},
 	.probe = etnaviv_gpu_platform_probe,
diff --git a/drivers/staging/etnaviv/etnaviv_gpu.h b/drivers/staging/etnaviv/etnaviv_gpu.h
index bbc91b6ea8fe..3a4c111a4978 100644
--- a/drivers/staging/etnaviv/etnaviv_gpu.h
+++ b/drivers/staging/etnaviv/etnaviv_gpu.h
@@ -89,6 +89,7 @@ struct etnaviv_gpu {
 	struct device *dev;
 	struct etnaviv_chip_identity identity;
 	int pipe;
+	bool initialized;
 
 	/* 'ring'-buffer: */
 	struct drm_gem_object *buffer;
-- 
2.1.4



More information about the dri-devel mailing list