[PATCH v9 5/5] drm: exynos: mixer: Add interconnect support

Sylwester Nawrocki s.nawrocki at samsung.com
Thu Nov 12 14:09:31 UTC 2020


This patch adds interconnect support to exynos-mixer. The mixer works
the same as before when CONFIG_INTERCONNECT is 'n'.

For proper operation of the video mixer block we need to ensure the
interconnect busses like DMC or LEFTBUS provide enough bandwidth so
as to avoid DMA buffer underruns in the mixer block. I.e we need to
prevent those busses from operating in low perfomance OPPs when
the mixer is running.
In this patch the bus bandwidth request is done through the interconnect
API, the bandwidth value is calculated from selected DRM mode, i.e.
video plane width, height, refresh rate and pixel format.

The bandwidth setting is synchronized with VSYNC when we are switching
to lower bandwidth. This is required to ensure enough bandwidth for
the device since new settings are normally being applied in the hardware
synchronously with VSYNC.

Acked-by: Krzysztof Kozlowski <krzk at kernel.org>
Reviewed-by: Chanwoo Choi <cw00.choi at samsung.com>
Tested-by: Chanwoo Choi <cw00.choi at samsung.com>
Co-developed-by: Artur Świgoń <a.swigon at samsung.com>
Signed-off-by: Artur Świgoń <a.swigon at samsung.com>
Co-developed-by: Marek Szyprowski <m.szyprowski at samsung.com>
Signed-off-by: Marek Szyprowski <m.szyprowski at samsung.com>
Signed-off-by: Sylwester Nawrocki <s.nawrocki at samsung.com>
---
Changes for v9:
 - whitespace cleanup,
 - Co-developed-by/Signed-off-by tags corrections.

Changes for v8:
 - updated comment in mixer_probe()

Changes for v7:
 - fixed incorrect setting of the ICC bandwidth when the mixer is
   disabled, now the bandwidth is set explicitly to 0 in such case.

Changes for v6:
 - the icc_set_bw() call is now only done when calculated value for
   a crtc changes, this avoids unnecessary calls per each video frame
 - added synchronization of the interconnect bandwidth setting with
   the mixer VSYNC in order to avoid buffer underflow when we lower
   the interconnect bandwidth when the hardware still operates with
   previous mode settings that require higher bandwidth. This fixed
   IOMMU faults observed e.g. during switching from two planes to
   a single plane operation.

Changes for v5:
 - renamed soc_path variable to icc_path
---
 drivers/gpu/drm/exynos/exynos_mixer.c | 146 ++++++++++++++++++++++++++++++++--
 1 file changed, 138 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index af192e5..502ce04 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -13,6 +13,7 @@
 #include <linux/component.h>
 #include <linux/delay.h>
 #include <linux/i2c.h>
+#include <linux/interconnect.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/kernel.h>
@@ -73,6 +74,7 @@ enum mixer_flag_bits {
 	MXR_BIT_INTERLACE,
 	MXR_BIT_VP_ENABLED,
 	MXR_BIT_HAS_SCLK,
+	MXR_BIT_REQUEST_BW,
 };
 
 static const uint32_t mixer_formats[] = {
@@ -99,6 +101,13 @@ struct mixer_context {
 	struct exynos_drm_plane	planes[MIXER_WIN_NR];
 	unsigned long		flags;
 
+	struct icc_path		*icc_path;
+	/* memory bandwidth on the interconnect bus in B/s */
+	unsigned long		icc_bandwidth;
+	/* mutex protecting @icc_bandwidth */
+	struct mutex		icc_lock;
+	struct work_struct	work;
+
 	int			irq;
 	void __iomem		*mixer_regs;
 	void __iomem		*vp_regs;
@@ -754,6 +763,9 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
 		val |= MXR_INT_CLEAR_VSYNC;
 		val &= ~MXR_INT_STATUS_VSYNC;
 
+		if (test_and_clear_bit(MXR_BIT_REQUEST_BW, &ctx->flags))
+			schedule_work(&ctx->work);
+
 		/* interlace scan need to check shadow register */
 		if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)
 		    && !mixer_is_synced(ctx))
@@ -934,6 +946,76 @@ static void mixer_disable_vblank(struct exynos_drm_crtc *crtc)
 	mixer_reg_writemask(mixer_ctx, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
 }
 
+/**
+ * mixer_get_memory_bandwidth - calculate memory bandwidth for current crtc mode
+ * @crtc: a crtc with DRM mode to calculate the bandwidth for
+ *
+ * Return: memory bandwidth in B/s
+ *
+ * This function returns memory bandwidth calculated as a sum of amount of data
+ * per second for each plane. The calculation is based on maximum possible pixel
+ * resolution for a plane so as to avoid different bandwidth request per each
+ * video frame. The formula used for calculation for each plane is:
+ *
+ * bw = width * height * frame_rate / interlace / (hor_subs * vert_subs)
+ *
+ * where:
+ *  - width, height - (DRM mode) video frame width and height in pixels,
+ *  - frame_rate - DRM mode frame refresh rate,
+ *  - interlace: 1 - in case of progressive and 2 in case of interlaced video,
+ *  - hor_subs, vert_subs - accordingly horizontal and vertical pixel
+ *    subsampling for a plane.
+ */
+static unsigned int mixer_get_memory_bandwidth(struct exynos_drm_crtc *crtc)
+{
+	struct drm_display_mode *mode = &crtc->base.state->adjusted_mode;
+	struct mixer_context *ctx = crtc->ctx;
+	unsigned long bw, bandwidth = 0;
+	int i, j, sub;
+
+	for (i = 0; i < MIXER_WIN_NR; i++) {
+		struct drm_plane *plane = &ctx->planes[i].base;
+		const struct drm_format_info *format;
+
+		if (plane->state && plane->state->crtc && plane->state->fb) {
+			format = plane->state->fb->format;
+			bw = mode->hdisplay * mode->vdisplay *
+							drm_mode_vrefresh(mode);
+			if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+				bw /= 2;
+			for (j = 0; j < format->num_planes; j++) {
+				sub = j ? (format->vsub * format->hsub) : 1;
+				bandwidth += format->cpp[j] * bw / sub;
+			}
+		}
+	}
+
+	return bandwidth;
+}
+
+static void mixer_set_icc_bandwidth(struct mixer_context *ctx,
+				    unsigned long bandwidth)
+{
+	u32 avg_bw, peak_bw;
+
+	/* add 20% safety margin */
+	bandwidth = bandwidth / 4 * 5;
+
+	avg_bw = peak_bw = Bps_to_icc(bandwidth);
+	icc_set_bw(ctx->icc_path, avg_bw, peak_bw);
+
+	dev_dbg(ctx->dev, "safe bandwidth %lu Bps\n", bandwidth);
+}
+
+static void mixer_icc_request_fn(struct work_struct *work)
+{
+	struct mixer_context *ctx = container_of(work, struct mixer_context,
+						 work);
+	mutex_lock(&ctx->icc_lock);
+	mixer_set_icc_bandwidth(ctx, ctx->icc_bandwidth);
+	mutex_unlock(&ctx->icc_lock);
+}
+
 static void mixer_atomic_begin(struct exynos_drm_crtc *crtc)
 {
 	struct mixer_context *ctx = crtc->ctx;
@@ -980,12 +1062,35 @@ static void mixer_disable_plane(struct exynos_drm_crtc *crtc,
 
 static void mixer_atomic_flush(struct exynos_drm_crtc *crtc)
 {
-	struct mixer_context *mixer_ctx = crtc->ctx;
+	struct mixer_context *ctx = crtc->ctx;
+	int bw, prev_bw;
 
-	if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
+	if (!test_bit(MXR_BIT_POWERED, &ctx->flags))
 		return;
 
-	mixer_enable_sync(mixer_ctx);
+	/*
+	 * Request necessary bandwidth on the interconnects. If new
+	 * bandwidth is greater than current value set the new value
+	 * immediately. Otherwise request lower bandwidth only after
+	 * VSYNC, after the HW has actually switched to new video
+	 * frame settings.
+	 */
+	if (ctx->icc_path) {
+		bw = mixer_get_memory_bandwidth(crtc);
+
+		mutex_lock(&ctx->icc_lock);
+		prev_bw = ctx->icc_bandwidth;
+		ctx->icc_bandwidth = bw;
+
+		if (bw > prev_bw)
+			mixer_set_icc_bandwidth(ctx, bw);
+		else if (bw < prev_bw)
+			set_bit(MXR_BIT_REQUEST_BW, &ctx->flags);
+
+		mutex_unlock(&ctx->icc_lock);
+	}
+
+	mixer_enable_sync(ctx);
 	exynos_crtc_handle_event(crtc);
 }
 
@@ -1036,6 +1141,8 @@ static void mixer_atomic_disable(struct exynos_drm_crtc *crtc)
 
 	pm_runtime_put(ctx->dev);
 
+	mixer_set_icc_bandwidth(ctx, 0);
+
 	clear_bit(MXR_BIT_POWERED, &ctx->flags);
 }
 
@@ -1223,19 +1330,33 @@ static int mixer_probe(struct platform_device *pdev)
 	struct device *dev = &pdev->dev;
 	const struct mixer_drv_data *drv;
 	struct mixer_context *ctx;
+	struct icc_path *path;
 	int ret;
 
+	/*
+	 * Returns NULL if CONFIG_INTERCONNECT is disabled or if "interconnects"
+	 * property does not exist. May return ERR_PTR(-EPROBE_DEFER).
+	 */
+	path = of_icc_get(dev, NULL);
+	if (IS_ERR(path))
+		return PTR_ERR(path);
+
 	ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
 	if (!ctx) {
 		DRM_DEV_ERROR(dev, "failed to alloc mixer context.\n");
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto err;
 	}
 
 	drv = of_device_get_match_data(dev);
 
+	INIT_WORK(&ctx->work, mixer_icc_request_fn);
+	mutex_init(&ctx->icc_lock);
+
 	ctx->pdev = pdev;
 	ctx->dev = dev;
 	ctx->mxr_ver = drv->version;
+	ctx->icc_path = path;
 
 	if (drv->is_vp_enabled)
 		__set_bit(MXR_BIT_VP_ENABLED, &ctx->flags);
@@ -1247,17 +1368,26 @@ static int mixer_probe(struct platform_device *pdev)
 	pm_runtime_enable(dev);
 
 	ret = component_add(&pdev->dev, &mixer_component_ops);
-	if (ret)
+	if (ret) {
 		pm_runtime_disable(dev);
-
+		goto err;
+	}
+	return 0;
+err:
+	icc_put(path);
 	return ret;
 }
 
 static int mixer_remove(struct platform_device *pdev)
 {
-	pm_runtime_disable(&pdev->dev);
+	struct device *dev = &pdev->dev;
+	struct mixer_context *ctx = dev_get_drvdata(dev);
+
+	pm_runtime_disable(dev);
+
+	component_del(dev, &mixer_component_ops);
 
-	component_del(&pdev->dev, &mixer_component_ops);
+	icc_put(ctx->icc_path);
 
 	return 0;
 }
-- 
2.7.4



More information about the dri-devel mailing list