[RFC PATCH 4/9] drm/exynos: add DSIM driver

Andrzej Hajda a.hajda at samsung.com
Wed Jan 22 06:35:30 PST 2014


The patch adds driver for Exynos DSI master (DSIM). It is a platform driver
which is registered as exynos_drm_display sub-driver of exynos_drm framework
and implements DRM encoder/connector pair.
It is also MIPI-DSI host driver and provides DSI bus for panels.
It interacts with its panel(s) using drm_panel framework.

Signed-off-by: Andrzej Hajda <a.hajda at samsung.com>
---
 drivers/gpu/drm/exynos/Kconfig          |    9 +
 drivers/gpu/drm/exynos/Makefile         |    1 +
 drivers/gpu/drm/exynos/exynos_drm_drv.c |   14 +
 drivers/gpu/drm/exynos/exynos_drm_drv.h |    1 +
 drivers/gpu/drm/exynos/exynos_drm_dsi.c | 1393 +++++++++++++++++++++++++++++++
 5 files changed, 1418 insertions(+)
 create mode 100644 drivers/gpu/drm/exynos/exynos_drm_dsi.c

diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 7441c4a..3681a06 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -31,6 +31,15 @@ config DRM_EXYNOS_FIMD
 	help
 	  Choose this option if you want to use Exynos FIMD for DRM.
 
+config DRM_EXYNOS_DSI
+	bool "EXYNOS DRM MIPI-DSI driver support"
+	depends on DRM_EXYNOS
+	select DRM_MIPI_DSI
+	select DRM_PANEL
+	default n
+	help
+	  This enables support for Exynos MIPI-DSI device.
+
 config DRM_EXYNOS_DP
 	bool "EXYNOS DRM DP driver support"
 	depends on ARCH_EXYNOS
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index b1839e8..02dde22 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -11,6 +11,7 @@ exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o \
 exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD)	+= exynos_drm_fimd.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_DSI)	+= exynos_drm_dsi.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_DP)	+= exynos_dp_core.o exynos_dp_reg.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI)	+= exynos_hdmi.o exynos_mixer.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI)	+= exynos_drm_vidi.o
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 4dc9f0d..57ee22a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -419,6 +419,12 @@ static int __init exynos_drm_init(void)
 		goto out_dp;
 #endif
 
+#ifdef CONFIG_DRM_EXYNOS_DSI
+	ret = platform_driver_register(&dsi_driver);
+	if (ret < 0)
+		goto out_dsi;
+#endif
+
 #ifdef CONFIG_DRM_EXYNOS_FIMD
 	ret = platform_driver_register(&fimd_driver);
 	if (ret < 0)
@@ -534,6 +540,10 @@ out_hdmi:
 	platform_driver_unregister(&fimd_driver);
 out_fimd:
 #endif
+#ifdef CONFIG_DRM_EXYNOS_DSI
+	platform_driver_unregister(&dsi_driver);
+out_dsi:
+#endif
 #ifdef CONFIG_DRM_EXYNOS_DP
 	platform_driver_unregister(&dp_driver);
 out_dp:
@@ -581,6 +591,10 @@ static void __exit exynos_drm_exit(void)
 	platform_driver_unregister(&fimd_driver);
 #endif
 
+#ifdef CONFIG_DRM_EXYNOS_DSI
+	platform_driver_unregister(&dsi_driver);
+#endif
+
 #ifdef CONFIG_DRM_EXYNOS_DP
 	platform_driver_unregister(&dp_driver);
 #endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 26bf10a..03c1f4d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -361,6 +361,7 @@ int exynos_platform_device_ipp_register(void);
 void exynos_platform_device_ipp_unregister(void);
 
 extern struct platform_driver dp_driver;
+extern struct platform_driver dsi_driver;
 extern struct platform_driver fimd_driver;
 extern struct platform_driver hdmi_driver;
 extern struct platform_driver mixer_driver;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
new file mode 100644
index 0000000..794bca0
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -0,0 +1,1393 @@
+/*
+ * Samsung SoC MIPI DSI Master driver.
+ *
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd
+ *
+ * Contacts: Tomasz Figa <t.figa at samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/phy/phy.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+#include <video/videomode.h>
+
+#include "exynos_drm_drv.h"
+
+#define DSIM_STATUS_REG		0x0	/* Status register */
+#define DSIM_SWRST_REG		0x4	/* Software reset register */
+#define DSIM_CLKCTRL_REG	0x8	/* Clock control register */
+#define DSIM_TIMEOUT_REG	0xc	/* Time out register */
+#define DSIM_CONFIG_REG		0x10	/* Configuration register */
+#define DSIM_ESCMODE_REG	0x14	/* Escape mode register */
+
+/* Main display image resolution register */
+#define DSIM_MDRESOL_REG	0x18
+#define DSIM_MVPORCH_REG	0x1c	/* Main display Vporch register */
+#define DSIM_MHPORCH_REG	0x20	/* Main display Hporch register */
+#define DSIM_MSYNC_REG		0x24	/* Main display sync area register */
+
+/* Sub display image resolution register */
+#define DSIM_SDRESOL_REG	0x28
+#define DSIM_INTSRC_REG		0x2c	/* Interrupt source register */
+#define DSIM_INTMSK_REG		0x30	/* Interrupt mask register */
+#define DSIM_PKTHDR_REG		0x34	/* Packet Header FIFO register */
+#define DSIM_PAYLOAD_REG	0x38	/* Payload FIFO register */
+#define DSIM_RXFIFO_REG		0x3c	/* Read FIFO register */
+#define DSIM_FIFOTHLD_REG	0x40	/* FIFO threshold level register */
+#define DSIM_FIFOCTRL_REG	0x44	/* FIFO status and control register */
+
+/* FIFO memory AC characteristic register */
+#define DSIM_PLLCTRL_REG	0x4c	/* PLL control register */
+#define DSIM_PLLTMR_REG		0x50	/* PLL timer register */
+#define DSIM_PHYACCHR_REG	0x54	/* D-PHY AC characteristic register */
+#define DSIM_PHYACCHR1_REG	0x58	/* D-PHY AC characteristic register1 */
+
+/* DSIM_STATUS */
+#define DSIM_STOP_STATE_DAT(x)		(((x) & 0xf) << 0)
+#define DSIM_STOP_STATE_CLK		(1 << 8)
+#define DSIM_TX_READY_HS_CLK		(1 << 10)
+#define DSIM_PLL_STABLE			(1 << 31)
+
+/* DSIM_SWRST */
+#define DSIM_FUNCRST			(1 << 16)
+#define DSIM_SWRST			(1 << 0)
+
+/* DSIM_TIMEOUT */
+#define DSIM_LPDR_TIMEOUT(x)		((x) << 0)
+#define DSIM_BTA_TIMEOUT(x)		((x) << 16)
+
+/* DSIM_CLKCTRL */
+#define DSIM_ESC_PRESCALER(x)		(((x) & 0xffff) << 0)
+#define DSIM_ESC_PRESCALER_MASK		(0xffff << 0)
+#define DSIM_LANE_ESC_CLK_EN_CLK	(1 << 19)
+#define DSIM_LANE_ESC_CLK_EN_DATA(x)	(((x) & 0xf) << 20)
+#define DSIM_LANE_ESC_CLK_EN_DATA_MASK	(0xf << 20)
+#define DSIM_BYTE_CLKEN			(1 << 24)
+#define DSIM_BYTE_CLK_SRC(x)		(((x) & 0x3) << 25)
+#define DSIM_BYTE_CLK_SRC_MASK		(0x3 << 25)
+#define DSIM_PLL_BYPASS			(1 << 27)
+#define DSIM_ESC_CLKEN			(1 << 28)
+#define DSIM_TX_REQUEST_HSCLK		(1 << 31)
+
+/* DSIM_CONFIG */
+#define DSIM_LANE_EN_CLK		(1 << 0)
+#define DSIM_LANE_EN(x)			(((x) & 0xf) << 1)
+#define DSIM_NUM_OF_DATA_LANE(x)	(((x) & 0x3) << 5)
+#define DSIM_SUB_PIX_FORMAT(x)		(((x) & 0x7) << 8)
+#define DSIM_MAIN_PIX_FORMAT_MASK	(0x7 << 12)
+#define DSIM_MAIN_PIX_FORMAT_RGB888	(0x7 << 12)
+#define DSIM_MAIN_PIX_FORMAT_RGB666	(0x6 << 12)
+#define DSIM_MAIN_PIX_FORMAT_RGB666_P	(0x5 << 12)
+#define DSIM_MAIN_PIX_FORMAT_RGB565	(0x4 << 12)
+#define DSIM_SUB_VC			(((x) & 0x3) << 16)
+#define DSIM_MAIN_VC			(((x) & 0x3) << 18)
+#define DSIM_HSA_MODE			(1 << 20)
+#define DSIM_HBP_MODE			(1 << 21)
+#define DSIM_HFP_MODE			(1 << 22)
+#define DSIM_HSE_MODE			(1 << 23)
+#define DSIM_AUTO_MODE			(1 << 24)
+#define DSIM_VIDEO_MODE			(1 << 25)
+#define DSIM_BURST_MODE			(1 << 26)
+#define DSIM_SYNC_INFORM		(1 << 27)
+#define DSIM_EOT_DISABLE		(1 << 28)
+#define DSIM_MFLUSH_VS			(1 << 29)
+
+/* DSIM_ESCMODE */
+#define DSIM_TX_TRIGGER_RST		(1 << 4)
+#define DSIM_TX_LPDT_LP			(1 << 6)
+#define DSIM_CMD_LPDT_LP		(1 << 7)
+#define DSIM_FORCE_BTA			(1 << 16)
+#define DSIM_FORCE_STOP_STATE		(1 << 20)
+#define DSIM_STOP_STATE_CNT(x)		(((x) & 0x7ff) << 21)
+#define DSIM_STOP_STATE_CNT_MASK	(0x7ff << 21)
+
+/* DSIM_MDRESOL */
+#define DSIM_MAIN_STAND_BY		(1 << 31)
+#define DSIM_MAIN_VRESOL(x)		(((x) & 0x7ff) << 16)
+#define DSIM_MAIN_HRESOL(x)		(((x) & 0X7ff) << 0)
+
+/* DSIM_MVPORCH */
+#define DSIM_CMD_ALLOW(x)		((x) << 28)
+#define DSIM_STABLE_VFP(x)		((x) << 16)
+#define DSIM_MAIN_VBP(x)		((x) << 0)
+#define DSIM_CMD_ALLOW_MASK		(0xf << 28)
+#define DSIM_STABLE_VFP_MASK		(0x7ff << 16)
+#define DSIM_MAIN_VBP_MASK		(0x7ff << 0)
+
+/* DSIM_MHPORCH */
+#define DSIM_MAIN_HFP(x)		((x) << 16)
+#define DSIM_MAIN_HBP(x)		((x) << 0)
+#define DSIM_MAIN_HFP_MASK		((0xffff) << 16)
+#define DSIM_MAIN_HBP_MASK		((0xffff) << 0)
+
+/* DSIM_MSYNC */
+#define DSIM_MAIN_VSA(x)		((x) << 22)
+#define DSIM_MAIN_HSA(x)		((x) << 0)
+#define DSIM_MAIN_VSA_MASK		((0x3ff) << 22)
+#define DSIM_MAIN_HSA_MASK		((0xffff) << 0)
+
+/* DSIM_SDRESOL */
+#define DSIM_SUB_STANDY(x)		((x) << 31)
+#define DSIM_SUB_VRESOL(x)		((x) << 16)
+#define DSIM_SUB_HRESOL(x)		((x) << 0)
+#define DSIM_SUB_STANDY_MASK		((0x1) << 31)
+#define DSIM_SUB_VRESOL_MASK		((0x7ff) << 16)
+#define DSIM_SUB_HRESOL_MASK		((0x7ff) << 0)
+
+/* DSIM_INTSRC */
+#define DSIM_INT_PLL_STABLE		(1 << 31)
+#define DSIM_INT_SW_RST_RELEASE		(1 << 30)
+#define DSIM_INT_SFR_FIFO_EMPTY		(1 << 29)
+#define DSIM_INT_BTA			(1 << 25)
+#define DSIM_INT_FRAME_DONE		(1 << 24)
+#define DSIM_INT_RX_TIMEOUT		(1 << 21)
+#define DSIM_INT_BTA_TIMEOUT		(1 << 20)
+#define DSIM_INT_RX_DONE		(1 << 18)
+#define DSIM_INT_RX_TE			(1 << 17)
+#define DSIM_INT_RX_ACK			(1 << 16)
+#define DSIM_INT_RX_ECC_ERR		(1 << 15)
+#define DSIM_INT_RX_CRC_ERR		(1 << 14)
+
+/* DSIM_FIFOCTRL */
+#define DSIM_RX_DATA_FULL		(1 << 25)
+#define DSIM_RX_DATA_EMPTY		(1 << 24)
+#define DSIM_SFR_HEADER_FULL		(1 << 23)
+#define DSIM_SFR_HEADER_EMPTY		(1 << 22)
+#define DSIM_SFR_PAYLOAD_FULL		(1 << 21)
+#define DSIM_SFR_PAYLOAD_EMPTY		(1 << 20)
+#define DSIM_I80_HEADER_FULL		(1 << 19)
+#define DSIM_I80_HEADER_EMPTY		(1 << 18)
+#define DSIM_I80_PAYLOAD_FULL		(1 << 17)
+#define DSIM_I80_PAYLOAD_EMPTY		(1 << 16)
+#define DSIM_SD_HEADER_FULL		(1 << 15)
+#define DSIM_SD_HEADER_EMPTY		(1 << 14)
+#define DSIM_SD_PAYLOAD_FULL		(1 << 13)
+#define DSIM_SD_PAYLOAD_EMPTY		(1 << 12)
+#define DSIM_MD_HEADER_FULL		(1 << 11)
+#define DSIM_MD_HEADER_EMPTY		(1 << 10)
+#define DSIM_MD_PAYLOAD_FULL		(1 << 9)
+#define DSIM_MD_PAYLOAD_EMPTY		(1 << 8)
+#define DSIM_RX_FIFO			(1 << 4)
+#define DSIM_SFR_FIFO			(1 << 3)
+#define DSIM_I80_FIFO			(1 << 2)
+#define DSIM_SD_FIFO			(1 << 1)
+#define DSIM_MD_FIFO			(1 << 0)
+
+/* DSIM_PHYACCHR */
+#define DSIM_AFC_EN			(1 << 14)
+#define DSIM_AFC_CTL(x)			(((x) & 0x7) << 5)
+
+/* DSIM_PLLCTRL */
+#define DSIM_FREQ_BAND(x)		((x) << 24)
+#define DSIM_PLL_EN			(1 << 23)
+#define DSIM_PLL_P(x)			((x) << 13)
+#define DSIM_PLL_M(x)			((x) << 4)
+#define DSIM_PLL_S(x)			((x) << 1)
+
+#define DSI_MAX_BUS_WIDTH		4
+#define DSI_NUM_VIRTUAL_CHANNELS	4
+#define DSI_TX_FIFO_SIZE		2048
+#define DSI_RX_FIFO_SIZE		256
+#define DSI_XFER_TIMEOUT_MS		100
+#define DSI_RX_FIFO_EMPTY		0x30800002
+
+enum exynos_dsi_transfer_type {
+	EXYNOS_DSI_TX,
+	EXYNOS_DSI_RX,
+};
+
+struct exynos_dsi_transfer {
+	struct list_head list;
+	struct completion completed;
+	int result;
+	u8 type;
+	u8 data[2];
+
+	const u8 *tx_payload;
+	u16 tx_len;
+	u16 tx_done;
+
+	u8 *rx_payload;
+	u16 rx_len;
+	u16 rx_done;
+};
+
+struct exynos_dsi {
+	struct mipi_dsi_host dsi_host;
+	struct drm_connector connector;
+	struct drm_device *drm_dev;
+	struct drm_encoder *encoder;
+	struct drm_panel *panel;
+	struct device *dev;
+
+	void __iomem *reg_base;
+	struct phy *phy;
+	struct clk *pll_clk;
+	struct clk *bus_clk;
+	struct regulator_bulk_data supplies[2];
+	int irq;
+
+	u32 pll_clk_rate;
+	u32 hs_clk_freq;
+	u32 esc_clk_freq;
+	u32 lanes;
+	u32 mode_flags;
+	u32 cmd_allow;
+	u32 format;
+	struct videomode vm;
+
+	int dpms_mode;
+	bool initialized;
+	struct drm_property *brightness;
+	struct completion completed;
+
+	spinlock_t transfer_lock; /* protects transfer_list */
+	struct list_head transfer_list;
+};
+
+#define host_to_dsi(host) container_of(host, struct exynos_dsi, dsi_host)
+#define connector_to_dsi(c) container_of(c, struct exynos_dsi, connector)
+
+static void exynos_dsi_wait_for_reset(struct exynos_dsi *dsi)
+{
+	if (wait_for_completion_timeout(&dsi->completed, msecs_to_jiffies(300)))
+		return;
+
+	dev_err(dsi->dev, "timeout waiting for reset\n");
+}
+
+static void exynos_dsi_reset(struct exynos_dsi *dsi)
+{
+	reinit_completion(&dsi->completed);
+	writel(DSIM_SWRST, dsi->reg_base + DSIM_SWRST_REG);
+}
+
+#ifndef MHZ
+#define MHZ	(1000*1000)
+#endif
+
+static unsigned long exynos_dsi_pll_find_pms(struct exynos_dsi *dsi,
+		unsigned long fin, unsigned long fout, u8 *p, u16 *m, u8 *s)
+{
+	unsigned long best_freq = 0;
+	u32 min_delta = 0xffffffff;
+	u8 p_min, p_max;
+	u8 _p, uninitialized_var(best_p);
+	u16 _m, uninitialized_var(best_m);
+	u8 _s, uninitialized_var(best_s);
+
+	p_min = DIV_ROUND_UP(fin, (12 * MHZ));
+	p_max = fin / (6 * MHZ);
+
+	for (_p = p_min; _p <= p_max; ++_p) {
+		for (_s = 0; _s <= 5; ++_s) {
+			u64 tmp;
+			u32 delta;
+
+			tmp = (u64)fout * (_p << _s);
+			do_div(tmp, fin);
+			_m = tmp;
+			if (_m < 41 || _m > 125)
+				continue;
+
+			tmp = (u64)_m * fin;
+			do_div(tmp, _p);
+			if (tmp < 500 * MHZ || tmp > 1000 * MHZ)
+				continue;
+
+			tmp = (u64)_m * fin;
+			do_div(tmp, _p << _s);
+
+			delta = abs(fout - tmp);
+			if (delta < min_delta) {
+				best_p = _p;
+				best_m = _m;
+				best_s = _s;
+				min_delta = delta;
+				best_freq = tmp;
+			}
+		}
+	}
+
+	if (best_freq) {
+		*p = best_p;
+		*m = best_m;
+		*s = best_s;
+	}
+
+	return best_freq;
+}
+
+static unsigned long exynos_dsi_set_pll(struct exynos_dsi *dsi,
+					unsigned long freq)
+{
+	static const unsigned long freq_bands[] = {
+		100 * MHZ, 120 * MHZ, 160 * MHZ, 200 * MHZ,
+		270 * MHZ, 320 * MHZ, 390 * MHZ, 450 * MHZ,
+		510 * MHZ, 560 * MHZ, 640 * MHZ, 690 * MHZ,
+		770 * MHZ, 870 * MHZ, 950 * MHZ,
+	};
+	unsigned long fin, fout;
+	int timeout, band;
+	u8 p, s;
+	u16 m;
+	u32 reg;
+
+	clk_set_rate(dsi->pll_clk, dsi->pll_clk_rate);
+
+	fin = clk_get_rate(dsi->pll_clk);
+	if (!fin) {
+		dev_err(dsi->dev, "failed to get PLL clock frequency\n");
+		return 0;
+	}
+
+	dev_dbg(dsi->dev, "PLL input frequency: %lu\n", fin);
+
+	fout = exynos_dsi_pll_find_pms(dsi, fin, freq, &p, &m, &s);
+	if (!fout) {
+		dev_err(dsi->dev,
+			"failed to find PLL coefficients for requested frequency\n");
+		return -EFAULT;
+	}
+
+	for (band = 0; band < ARRAY_SIZE(freq_bands); ++band)
+		if (fout < freq_bands[band])
+			break;
+
+	dev_dbg(dsi->dev, "PLL freq %lu, (p %d, m %d, s %d), band %d\n", fout,
+		p, m, s, band);
+
+	writel(500, dsi->reg_base + DSIM_PLLTMR_REG);
+
+	reg = DSIM_FREQ_BAND(band) | DSIM_PLL_EN
+			| DSIM_PLL_P(p) | DSIM_PLL_M(m) | DSIM_PLL_S(s);
+	writel(reg, dsi->reg_base + DSIM_PLLCTRL_REG);
+
+	timeout = 1000;
+	do {
+		if (timeout-- == 0) {
+			dev_err(dsi->dev, "PLL failed to stabilize\n");
+			return -EFAULT;
+		}
+		reg = readl(dsi->reg_base + DSIM_STATUS_REG);
+	} while ((reg & DSIM_PLL_STABLE) == 0);
+
+	return fout;
+}
+
+static int exynos_dsi_enable_clock(struct exynos_dsi *dsi)
+{
+	unsigned long hs_clk, byte_clk, esc_clk;
+	unsigned long esc_div;
+	u32 reg;
+
+	hs_clk = exynos_dsi_set_pll(dsi, dsi->hs_clk_freq);
+	if (!hs_clk) {
+		dev_err(dsi->dev, "failed to configure DSI PLL\n");
+		return -EFAULT;
+	}
+
+	byte_clk = hs_clk / 8;
+	esc_div = DIV_ROUND_UP(byte_clk, dsi->esc_clk_freq);
+	esc_clk = byte_clk / esc_div;
+
+	if (esc_clk > 20 * MHZ) {
+		++esc_div;
+		esc_clk = byte_clk / esc_div;
+	}
+
+	dev_dbg(dsi->dev, "hs_clk = %lu, byte_clk = %lu, esc_clk = %lu\n",
+		hs_clk, byte_clk, esc_clk);
+
+	reg = readl(dsi->reg_base + DSIM_CLKCTRL_REG);
+	reg &= ~(DSIM_ESC_PRESCALER_MASK | DSIM_LANE_ESC_CLK_EN_CLK
+			| DSIM_LANE_ESC_CLK_EN_DATA_MASK | DSIM_PLL_BYPASS
+			| DSIM_BYTE_CLK_SRC_MASK);
+	reg |= DSIM_ESC_CLKEN | DSIM_BYTE_CLKEN
+			| DSIM_ESC_PRESCALER(esc_div)
+			| DSIM_LANE_ESC_CLK_EN_CLK
+			| DSIM_LANE_ESC_CLK_EN_DATA(BIT(dsi->lanes) - 1)
+			| DSIM_BYTE_CLK_SRC(0)
+			| DSIM_TX_REQUEST_HSCLK;
+	writel(reg, dsi->reg_base + DSIM_CLKCTRL_REG);
+
+	return 0;
+}
+
+static void exynos_dsi_disable_clock(struct exynos_dsi *dsi)
+{
+	u32 reg;
+
+	reg = readl(dsi->reg_base + DSIM_CLKCTRL_REG);
+	reg &= ~(DSIM_LANE_ESC_CLK_EN_CLK | DSIM_LANE_ESC_CLK_EN_DATA_MASK
+			| DSIM_ESC_CLKEN | DSIM_BYTE_CLKEN);
+	writel(reg, dsi->reg_base + DSIM_CLKCTRL_REG);
+
+	reg = readl(dsi->reg_base + DSIM_PLLCTRL_REG);
+	reg &= ~DSIM_PLL_EN;
+	writel(reg, dsi->reg_base + DSIM_PLLCTRL_REG);
+}
+
+static int exynos_dsi_init_link(struct exynos_dsi *dsi)
+{
+	int timeout;
+	u32 reg;
+	u32 lanes_mask;
+
+	/* Initialize FIFO pointers */
+	reg = readl(dsi->reg_base + DSIM_FIFOCTRL_REG);
+	reg &= ~0x1f;
+	writel(reg, dsi->reg_base + DSIM_FIFOCTRL_REG);
+
+	usleep_range(9000, 11000);
+
+	reg |= 0x1f;
+	writel(reg, dsi->reg_base + DSIM_FIFOCTRL_REG);
+
+	usleep_range(9000, 11000);
+
+	/* DSI configuration */
+	reg = 0;
+
+	if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
+		reg |= DSIM_VIDEO_MODE;
+
+		if (!(dsi->mode_flags & MIPI_DSI_MODE_VSYNC_FLUSH))
+			reg |= DSIM_MFLUSH_VS;
+		if (!(dsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET))
+			reg |= DSIM_EOT_DISABLE;
+		if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
+			reg |= DSIM_SYNC_INFORM;
+		if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
+			reg |= DSIM_BURST_MODE;
+		if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_AUTO_VERT)
+			reg |= DSIM_AUTO_MODE;
+		if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HSE)
+			reg |= DSIM_HSE_MODE;
+		if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HFP))
+			reg |= DSIM_HFP_MODE;
+		if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HBP))
+			reg |= DSIM_HBP_MODE;
+		if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HSA))
+			reg |= DSIM_HSA_MODE;
+	}
+
+	switch (dsi->format) {
+	case MIPI_DSI_FMT_RGB888:
+		reg |= DSIM_MAIN_PIX_FORMAT_RGB888;
+		break;
+	case MIPI_DSI_FMT_RGB666:
+		reg |= DSIM_MAIN_PIX_FORMAT_RGB666;
+		break;
+	case MIPI_DSI_FMT_RGB666_PACKED:
+		reg |= DSIM_MAIN_PIX_FORMAT_RGB666_P;
+		break;
+	case MIPI_DSI_FMT_RGB565:
+		reg |= DSIM_MAIN_PIX_FORMAT_RGB565;
+		break;
+	default:
+		dev_err(dsi->dev, "invalid pixel format\n");
+		return -EINVAL;
+	}
+
+	reg |= DSIM_NUM_OF_DATA_LANE(dsi->lanes - 1);
+
+	writel(reg, dsi->reg_base + DSIM_CONFIG_REG);
+
+	reg |= DSIM_LANE_EN_CLK;
+	writel(reg, dsi->reg_base + DSIM_CONFIG_REG);
+
+	lanes_mask = BIT(dsi->lanes) - 1;
+	reg |= DSIM_LANE_EN(lanes_mask);
+	writel(reg, dsi->reg_base + DSIM_CONFIG_REG);
+
+	/* Check clock and data lane state are stop state */
+	timeout = 100;
+	do {
+		if (timeout-- == 0) {
+			dev_err(dsi->dev, "waiting for bus lanes timed out\n");
+			return -EFAULT;
+		}
+
+		reg = readl(dsi->reg_base + DSIM_STATUS_REG);
+		if ((reg & DSIM_STOP_STATE_DAT(lanes_mask))
+		    != DSIM_STOP_STATE_DAT(lanes_mask))
+			continue;
+	} while (!(reg & (DSIM_STOP_STATE_CLK | DSIM_TX_READY_HS_CLK)));
+
+	reg = readl(dsi->reg_base + DSIM_ESCMODE_REG);
+	reg &= ~DSIM_STOP_STATE_CNT_MASK;
+	writel(reg, dsi->reg_base + DSIM_ESCMODE_REG);
+
+	reg = DSIM_BTA_TIMEOUT(0xff) | DSIM_LPDR_TIMEOUT(0xffff);
+	writel(reg, dsi->reg_base + DSIM_TIMEOUT_REG);
+
+	return 0;
+}
+
+static int exynos_dsi_set_display_mode(struct exynos_dsi *dsi)
+{
+	struct videomode *vm = &dsi->vm;
+	u32 reg;
+
+	if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
+		reg = DSIM_CMD_ALLOW(0xf)
+			| DSIM_STABLE_VFP(vm->vfront_porch)
+			| DSIM_MAIN_VBP(vm->vback_porch);
+		writel(reg, dsi->reg_base + DSIM_MVPORCH_REG);
+
+		reg = DSIM_MAIN_HFP(vm->hfront_porch)
+			| DSIM_MAIN_HBP(vm->hback_porch);
+		writel(reg, dsi->reg_base + DSIM_MHPORCH_REG);
+
+		reg = DSIM_MAIN_VSA(vm->vsync_len)
+			| DSIM_MAIN_HSA(vm->hsync_len);
+		writel(reg, dsi->reg_base + DSIM_MSYNC_REG);
+	}
+
+	reg = DSIM_MAIN_HRESOL(vm->hactive) | DSIM_MAIN_VRESOL(vm->vactive);
+	writel(reg, dsi->reg_base + DSIM_MDRESOL_REG);
+
+	dev_dbg(dsi->dev, "LCD width = %d, height = %d\n",
+		vm->hactive, vm->vactive);
+
+	return 0;
+}
+
+static void exynos_dsi_set_display_enable(struct exynos_dsi *dsi, bool enable)
+{
+	u32 reg;
+
+	reg = readl(dsi->reg_base + DSIM_MDRESOL_REG);
+	if (enable)
+		reg |= DSIM_MAIN_STAND_BY;
+	else
+		reg &= ~DSIM_MAIN_STAND_BY;
+	writel(reg, dsi->reg_base + DSIM_MDRESOL_REG);
+}
+
+static int exynos_dsi_wait_for_hdr_fifo(struct exynos_dsi *dsi)
+{
+	int timeout = 20000;
+
+	do {
+		u32 reg = readl(dsi->reg_base + DSIM_FIFOCTRL_REG);
+
+		if (!(reg & DSIM_SFR_HEADER_FULL))
+			return 0;
+
+		if (!cond_resched())
+			usleep_range(950, 1050);
+	} while (--timeout);
+
+	return -ETIMEDOUT;
+}
+
+static void exynos_dsi_send_to_fifo(struct exynos_dsi *dsi,
+					struct exynos_dsi_transfer *xfer)
+{
+	struct device *dev = dsi->dev;
+	const u8 *payload = xfer->tx_payload + xfer->tx_done;
+	u16 length = xfer->tx_len - xfer->tx_done;
+	bool first = !xfer->tx_done;
+	u32 reg;
+
+	dev_dbg(dev, "< xfer %p, tx_len %u, tx_done %u, rx_len %u, rx_done %u\n",
+		xfer, xfer->tx_len, xfer->tx_done, xfer->rx_len, xfer->rx_done);
+
+	if (length > DSI_TX_FIFO_SIZE)
+		length = DSI_TX_FIFO_SIZE;
+
+	xfer->tx_done += length;
+
+	/* Send payload */
+	while (length >= 4) {
+		reg = (payload[3] << 24) | (payload[2] << 16)
+					| (payload[1] << 8) | payload[0];
+		writel(reg, dsi->reg_base + DSIM_PAYLOAD_REG);
+		payload += 4;
+		length -= 4;
+	}
+
+	reg = 0;
+	switch (length) {
+	case 3:
+		reg |= payload[2] << 16;
+		/* Fall through */
+	case 2:
+		reg |= payload[1] << 8;
+		/* Fall through */
+	case 1:
+		reg |= payload[0];
+		writel(reg, dsi->reg_base + DSIM_PAYLOAD_REG);
+		break;
+	case 0:
+		/* Do nothing */
+		break;
+	}
+
+	/* Send packet header */
+	if (!first)
+		return;
+
+	if (xfer->rx_len) {
+		reg = (xfer->rx_len << 8)
+				| MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE;
+		if (exynos_dsi_wait_for_hdr_fifo(dsi)) {
+			dev_err(dev, "waiting for header FIFO timed out\n");
+			return;
+		}
+		writel(reg, dsi->reg_base + DSIM_PKTHDR_REG);
+	}
+
+	reg = (xfer->data[1] << 16) | (xfer->data[0] << 8) | xfer->type;
+	if (exynos_dsi_wait_for_hdr_fifo(dsi)) {
+		dev_err(dev, "waiting for header FIFO timed out\n");
+		return;
+	}
+	writel(reg, dsi->reg_base + DSIM_PKTHDR_REG);
+	writel(DSIM_FORCE_BTA, dsi->reg_base + DSIM_ESCMODE_REG);
+}
+
+static void exynos_dsi_read_from_fifo(struct exynos_dsi *dsi,
+					struct exynos_dsi_transfer *xfer)
+{
+	u8 *payload = xfer->rx_payload + xfer->rx_done;
+	bool first = !xfer->rx_done;
+	u16 length;
+	u32 reg;
+
+	if (first) {
+		reg = readl(dsi->reg_base + DSIM_RXFIFO_REG);
+
+		switch (reg & 0x3f) {
+		case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
+		case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
+			if (xfer->rx_len >= 2) {
+				payload[1] = reg >> 16;
+				++xfer->rx_done;
+			}
+			/* Fall through */
+		case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
+		case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
+			payload[0] = reg >> 8;
+			++xfer->rx_done;
+			xfer->rx_len = xfer->rx_done;
+			xfer->result = 0;
+			goto clear_fifo;
+		case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
+			pr_err("DCS_ACK: %#x\n", reg);
+			xfer->result = 0;
+			goto clear_fifo;
+		}
+
+		length = (reg >> 8) & 0xffff;
+		if (length > xfer->rx_len) {
+			dev_err(dsi->dev,
+				"response too long (expected %u, got %u bytes)\n",
+				xfer->rx_len, length);
+			xfer->rx_len = 0;
+			xfer->rx_done = 0;
+			xfer->result = -EFAULT;
+			goto clear_fifo;
+		}
+		if (length < xfer->rx_len)
+			xfer->rx_len = length;
+	}
+
+	length = xfer->rx_len - xfer->rx_done;
+	xfer->rx_done += length;
+
+	/* Receive payload */
+	while (length >= 4) {
+		reg = readl(dsi->reg_base + DSIM_RXFIFO_REG);
+		payload[0] = (reg >>  0) & 0xff;
+		payload[1] = (reg >>  8) & 0xff;
+		payload[2] = (reg >> 16) & 0xff;
+		payload[3] = (reg >> 24) & 0xff;
+		payload += 4;
+		length -= 4;
+	}
+
+	if (length) {
+		reg = readl(dsi->reg_base + DSIM_RXFIFO_REG);
+		switch (length) {
+		case 3:
+			payload[2] = (reg >> 16) & 0xff;
+			/* Fall through */
+		case 2:
+			payload[1] = (reg >> 8) & 0xff;
+			/* Fall through */
+		case 1:
+			payload[0] = reg & 0xff;
+		}
+	}
+
+	if (xfer->rx_done == xfer->rx_len)
+		xfer->result = 0;
+
+clear_fifo:
+	length = DSI_RX_FIFO_SIZE / 4;
+	do {
+		reg = readl(dsi->reg_base + DSIM_RXFIFO_REG);
+		if (reg == DSI_RX_FIFO_EMPTY)
+			break;
+	} while (--length);
+}
+
+static void exynos_dsi_transfer_start(struct exynos_dsi *dsi)
+{
+	unsigned long flags;
+	struct exynos_dsi_transfer *xfer;
+	bool start = false;
+
+again:
+	spin_lock_irqsave(&dsi->transfer_lock, flags);
+
+	if (list_empty(&dsi->transfer_list)) {
+		spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+		return;
+	}
+
+	xfer = list_first_entry(&dsi->transfer_list,
+					struct exynos_dsi_transfer, list);
+
+	spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+
+	if (xfer->tx_len && xfer->tx_done == xfer->tx_len)
+		/* waiting for RX */
+		return;
+
+	exynos_dsi_send_to_fifo(dsi, xfer);
+
+	if (xfer->tx_len || xfer->rx_len)
+		return;
+
+	xfer->result = 0;
+	complete(&xfer->completed);
+
+	spin_lock_irqsave(&dsi->transfer_lock, flags);
+
+	list_del_init(&xfer->list);
+	start = !list_empty(&dsi->transfer_list);
+
+	spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+
+	if (start)
+		goto again;
+}
+
+static bool exynos_dsi_transfer_finish(struct exynos_dsi *dsi)
+{
+	struct exynos_dsi_transfer *xfer;
+	unsigned long flags;
+	bool start = true;
+
+	spin_lock_irqsave(&dsi->transfer_lock, flags);
+
+	if (list_empty(&dsi->transfer_list)) {
+		spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+		return false;
+	}
+
+	xfer = list_first_entry(&dsi->transfer_list,
+					struct exynos_dsi_transfer, list);
+
+	spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+
+	dev_dbg(dsi->dev,
+		"> xfer %p, tx_len %u, tx_done %u, rx_len %u, rx_done %u\n",
+		xfer, xfer->tx_len, xfer->tx_done, xfer->rx_len, xfer->rx_done);
+
+	if (xfer->tx_done != xfer->tx_len)
+		return true;
+
+	if (xfer->rx_done != xfer->rx_len)
+		exynos_dsi_read_from_fifo(dsi, xfer);
+
+	if (xfer->rx_done != xfer->rx_len)
+		return true;
+
+	spin_lock_irqsave(&dsi->transfer_lock, flags);
+
+	list_del_init(&xfer->list);
+	start = !list_empty(&dsi->transfer_list);
+
+	spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+
+	if (!xfer->rx_len)
+		xfer->result = 0;
+	complete(&xfer->completed);
+
+	return start;
+}
+
+static void exynos_dsi_remove_transfer(struct exynos_dsi *dsi,
+					struct exynos_dsi_transfer *xfer)
+{
+	unsigned long flags;
+	bool start;
+
+	spin_lock_irqsave(&dsi->transfer_lock, flags);
+
+	if (!list_empty(&dsi->transfer_list) &&
+	    xfer == list_first_entry(&dsi->transfer_list,
+				     struct exynos_dsi_transfer, list)) {
+		list_del_init(&xfer->list);
+		start = !list_empty(&dsi->transfer_list);
+		spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+		if (start)
+			exynos_dsi_transfer_start(dsi);
+		return;
+	}
+
+	list_del_init(&xfer->list);
+
+	spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+}
+
+static int exynos_dsi_transfer(struct exynos_dsi *dsi,
+					struct exynos_dsi_transfer *xfer)
+{
+	unsigned long flags;
+	bool stopped;
+
+	xfer->tx_done = 0;
+	xfer->rx_done = 0;
+	xfer->result = -ETIMEDOUT;
+	init_completion(&xfer->completed);
+
+	spin_lock_irqsave(&dsi->transfer_lock, flags);
+
+	stopped = list_empty(&dsi->transfer_list);
+	list_add_tail(&xfer->list, &dsi->transfer_list);
+
+	spin_unlock_irqrestore(&dsi->transfer_lock, flags);
+
+	if (stopped)
+		exynos_dsi_transfer_start(dsi);
+
+	wait_for_completion_timeout(&xfer->completed,
+				    msecs_to_jiffies(DSI_XFER_TIMEOUT_MS));
+	if (xfer->result == -ETIMEDOUT) {
+		exynos_dsi_remove_transfer(dsi, xfer);
+		dev_err(dsi->dev, "xfer timed out\n");
+		return -ETIMEDOUT;
+	}
+
+	/* Also covers hardware timeout condition */
+	return xfer->result;
+}
+
+static irqreturn_t exynos_dsi_irq(int irq, void *dev_id)
+{
+	struct exynos_dsi *dsi = dev_id;
+	u32 status;
+
+	status = readl(dsi->reg_base + DSIM_INTSRC_REG);
+	if (!status) {
+		static unsigned long int j;
+		if (printk_timed_ratelimit(&j, 500))
+			dev_warn(dsi->dev, "spurious interrupt\n");
+		return IRQ_HANDLED;
+	}
+	writel(status, dsi->reg_base + DSIM_INTSRC_REG);
+
+	if (status & DSIM_INT_SW_RST_RELEASE) {
+		u32 mask = ~(DSIM_INT_RX_DONE | DSIM_INT_SFR_FIFO_EMPTY);
+		writel(mask, dsi->reg_base + DSIM_INTMSK_REG);
+		complete(&dsi->completed);
+		return IRQ_HANDLED;
+	}
+
+	if (!(status & (DSIM_INT_RX_DONE | DSIM_INT_SFR_FIFO_EMPTY)))
+		return IRQ_HANDLED;
+
+	if (exynos_dsi_transfer_finish(dsi))
+		exynos_dsi_transfer_start(dsi);
+
+	return IRQ_HANDLED;
+}
+
+static int exynos_dsi_init(struct exynos_dsi *dsi)
+{
+	exynos_dsi_enable_clock(dsi);
+	exynos_dsi_reset(dsi);
+	enable_irq(dsi->irq);
+	exynos_dsi_wait_for_reset(dsi);
+	exynos_dsi_init_link(dsi);
+	dsi->initialized = true;
+
+	return 0;
+}
+
+static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
+				  struct mipi_dsi_device *device)
+{
+	struct exynos_dsi *dsi = host_to_dsi(host);
+
+	dsi->lanes = device->lanes;
+	dsi->format = device->format;
+	dsi->mode_flags = device->mode_flags;
+
+	dsi->panel = of_drm_find_panel(device->dev.of_node);
+	if (dsi->panel && dsi->connector.dev) {
+		drm_panel_attach(dsi->panel, &dsi->connector);
+		drm_helper_hpd_irq_event(dsi->connector.dev);
+	}
+
+	return 0;
+}
+
+static int exynos_dsi_host_detach(struct mipi_dsi_host *host,
+				  struct mipi_dsi_device *device)
+{
+	struct exynos_dsi *dsi = host_to_dsi(host);
+
+	dsi->panel = NULL;
+
+	return 0;
+}
+
+/* distinguish between short and long DSI packet types */
+static bool exynos_dsi_is_short_dsi_type(u8 type)
+{
+	return (type & 0x0f) <= 8;
+}
+
+static ssize_t exynos_dsi_host_transfer(struct mipi_dsi_host *host,
+				       struct mipi_dsi_msg *msg)
+{
+	struct exynos_dsi *dsi = host_to_dsi(host);
+	struct exynos_dsi_transfer xfer;
+	int ret;
+
+	if (!dsi->initialized) {
+		ret = exynos_dsi_init(dsi);
+		if (ret)
+			return ret;
+	}
+
+	if (msg->tx_len == 0)
+		return -EINVAL;
+
+	xfer.type = msg->type | (msg->channel << 6);
+
+	if (exynos_dsi_is_short_dsi_type(msg->type)) {
+		const char *tx_buf = msg->tx_buf;
+
+		if (msg->tx_len > 2)
+			return -EINVAL;
+		xfer.tx_len = 0;
+		xfer.data[0] = tx_buf[0];
+		xfer.data[1] = (msg->tx_len == 2) ? tx_buf[1] : 0;
+	} else {
+		xfer.tx_len = msg->tx_len;
+		xfer.data[0] = msg->tx_len & 0xff;
+		xfer.data[1] = msg->tx_len >> 8;
+		xfer.tx_payload = msg->tx_buf;
+	}
+
+	xfer.rx_len = msg->rx_len;
+	xfer.rx_payload = msg->rx_buf;
+
+	ret = exynos_dsi_transfer(dsi, &xfer);
+	return (ret < 0) ? ret : xfer.rx_done;
+}
+
+static const struct mipi_dsi_host_ops exynos_dsi_ops = {
+	.attach = exynos_dsi_host_attach,
+	.detach = exynos_dsi_host_detach,
+	.transfer = exynos_dsi_host_transfer,
+};
+
+static void exynos_dsi_add_panel_properties(struct exynos_dsi *dsi)
+{
+	struct drm_device *drm = dsi->connector.dev;
+	uint64_t min, max, val;
+	int err;
+
+	if (dsi->brightness)
+		return;
+
+	err = drm_panel_get_brightness_range(dsi->panel, &min, &max);
+	if (err < 0)
+		return;
+
+	err = drm_panel_get_brightness(dsi->panel, &val);
+	if (err < 0)
+		return;
+
+	dsi->brightness = drm_property_create_range(drm, 0, "brightness",
+						       min, max);
+	drm_object_attach_property(&dsi->connector.base, dsi->brightness, val);
+}
+
+static void exynos_dsi_remove_panel_properties(struct exynos_dsi *dsi)
+{
+	struct drm_device *drm = dsi->connector.dev;
+
+	if (dsi->brightness) {
+		drm_property_destroy(drm, dsi->brightness);
+		dsi->brightness = NULL;
+	}
+}
+
+static enum drm_connector_status
+exynos_dsi_detect(struct drm_connector *connector, bool force)
+{
+	struct exynos_dsi *dsi = connector_to_dsi(connector);
+
+	if (dsi->panel) {
+		exynos_dsi_add_panel_properties(dsi);
+		return connector_status_connected;
+	} else {
+		exynos_dsi_remove_panel_properties(dsi);
+		return connector_status_disconnected;
+	}
+}
+
+static int exynos_dsi_set_property(struct drm_connector *connector,
+				   struct drm_property *property,
+				   uint64_t value)
+{
+	struct exynos_dsi *dsi = connector_to_dsi(connector);
+
+	if (dsi->panel && property == dsi->brightness)
+		return drm_panel_set_brightness(dsi->panel, value);
+
+	return 0;
+}
+
+static void exynos_dsi_connector_destroy(struct drm_connector *connector)
+{
+}
+
+static struct drm_connector_funcs exynos_dsi_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = exynos_dsi_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = exynos_dsi_set_property,
+	.destroy = exynos_dsi_connector_destroy,
+};
+
+static int exynos_dsi_get_modes(struct drm_connector *connector)
+{
+	struct exynos_dsi *dsi = connector_to_dsi(connector);
+
+	if (dsi->panel)
+		return dsi->panel->funcs->get_modes(dsi->panel);
+
+	return 0;
+}
+
+static int exynos_dsi_mode_valid(struct drm_connector *connector,
+				 struct drm_display_mode *mode)
+{
+	return MODE_OK;
+}
+
+static struct drm_encoder *
+exynos_dsi_best_encoder(struct drm_connector *connector)
+{
+	struct exynos_dsi *dsi = connector_to_dsi(connector);
+
+	return dsi->encoder;
+}
+
+static struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = {
+	.get_modes = exynos_dsi_get_modes,
+	.mode_valid = exynos_dsi_mode_valid,
+	.best_encoder = exynos_dsi_best_encoder,
+};
+
+static int exynos_dsi_initialize(struct exynos_drm_display *display,
+				 struct drm_device *drm_dev)
+{
+	struct exynos_dsi *dsi = display->ctx;
+
+	dsi->drm_dev = drm_dev;
+
+	return 0;
+}
+
+static int exynos_dsi_create_connector(struct exynos_drm_display *display,
+				       struct drm_encoder *encoder)
+{
+	struct exynos_dsi *dsi = display->ctx;
+	struct drm_connector *connector = &dsi->connector;
+	int ret;
+
+	dsi->encoder = encoder;
+
+	connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+	ret = drm_connector_init(dsi->drm_dev, connector,
+				 &exynos_dsi_connector_funcs,
+				 DRM_MODE_CONNECTOR_DSI);
+	if (ret) {
+		DRM_ERROR("Failed to initialize connector with drm\n");
+		return ret;
+	}
+
+	drm_connector_helper_add(connector, &exynos_dsi_connector_helper_funcs);
+	drm_sysfs_connector_add(connector);
+	drm_mode_connector_attach_encoder(connector, encoder);
+
+	return 0;
+}
+
+static void exynos_dsi_poweron(struct exynos_dsi *dsi)
+{
+	int ret;
+
+	ret = regulator_bulk_enable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
+	if (ret < 0)
+		dev_err(dsi->dev, "cannot enable regulators %d\n", ret);
+
+	clk_prepare_enable(dsi->bus_clk);
+	clk_prepare_enable(dsi->pll_clk);
+
+	phy_power_on(dsi->phy);
+}
+
+static void exynos_dsi_poweroff(struct exynos_dsi *dsi)
+{
+	int ret;
+
+	dsi->initialized = false;
+
+	usleep_range(10000, 20000);
+	exynos_dsi_disable_clock(dsi);
+
+	disable_irq(dsi->irq);
+
+	phy_power_off(dsi->phy);
+
+	clk_disable_unprepare(dsi->pll_clk);
+	clk_disable_unprepare(dsi->bus_clk);
+
+	ret = regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
+	if (ret < 0)
+		dev_err(dsi->dev, "cannot disable regulators %d\n", ret);
+}
+
+void exynos_dsi_mode_set(struct exynos_drm_display *display,
+			 struct drm_display_mode *mode)
+{
+	struct exynos_dsi *dsi = display->ctx;
+	struct videomode *vm = &dsi->vm;
+
+	vm->hactive = mode->hdisplay;
+	vm->vactive = mode->vdisplay;
+	vm->vfront_porch = mode->vsync_start - mode->vdisplay;
+	vm->vback_porch = mode->vtotal - mode->vsync_end;
+	vm->vsync_len = mode->vsync_end - mode->vsync_start;
+	vm->hfront_porch = mode->hsync_start - mode->hdisplay;
+	vm->hback_porch = mode->htotal - mode->hsync_end;
+	vm->hsync_len = mode->hsync_end - mode->hsync_start;
+}
+
+static void exynos_dsi_dpms(struct exynos_drm_display *display, int mode)
+{
+	struct exynos_dsi *dsi = display->ctx;
+
+	if (dsi->panel) {
+		switch (mode) {
+		case DRM_MODE_DPMS_ON:
+			if (dsi->dpms_mode != DRM_MODE_DPMS_ON) {
+				exynos_dsi_poweron(dsi);
+				drm_panel_enable(dsi->panel);
+				exynos_dsi_set_display_mode(dsi);
+				exynos_dsi_set_display_enable(dsi, true);
+			}
+			break;
+		case DRM_MODE_DPMS_STANDBY:
+		case DRM_MODE_DPMS_SUSPEND:
+		case DRM_MODE_DPMS_OFF:
+			if (dsi->dpms_mode == DRM_MODE_DPMS_ON) {
+				exynos_dsi_set_display_enable(dsi, false);
+				drm_panel_disable(dsi->panel);
+				exynos_dsi_poweroff(dsi);
+			}
+			break;
+		default:
+			break;
+		};
+	}
+	dsi->dpms_mode = mode;
+}
+
+static struct exynos_drm_display_ops exynos_dsi_display_ops = {
+	.initialize = exynos_dsi_initialize,
+	.create_connector = exynos_dsi_create_connector,
+	.mode_set = exynos_dsi_mode_set,
+	.dpms = exynos_dsi_dpms
+};
+
+static struct exynos_drm_display exynos_dsi_display = {
+	.type = EXYNOS_DISPLAY_TYPE_LCD,
+	.ops = &exynos_dsi_display_ops,
+};
+
+static int exynos_dsi_parse_dt(struct exynos_dsi *dsi)
+{
+	struct device *dev = dsi->dev;
+	struct device_node *node = dev->of_node;
+	int ret;
+
+	ret = of_property_read_u32(node, "samsung,pll-clk-freq",
+				   &dsi->pll_clk_rate);
+	if (ret) {
+		dev_err(dev, "failed to get pll-clk-freq property\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int exynos_dsi_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	struct exynos_dsi *dsi;
+	int ret;
+
+	dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL);
+	if (!dsi) {
+		dev_err(&pdev->dev, "failed to allocate dsi object.\n");
+		return -ENOMEM;
+	}
+
+	init_completion(&dsi->completed);
+	spin_lock_init(&dsi->transfer_lock);
+	INIT_LIST_HEAD(&dsi->transfer_list);
+
+	dsi->dsi_host.ops = &exynos_dsi_ops;
+	dsi->dsi_host.dev = &pdev->dev;
+
+	dsi->dev = &pdev->dev;
+	dsi->dpms_mode = DRM_MODE_DPMS_OFF;
+	dsi->hs_clk_freq = 500000000;
+	dsi->esc_clk_freq = 20000000;
+
+	ret = exynos_dsi_parse_dt(dsi);
+	if (ret)
+		return ret;
+
+	dsi->supplies[0].supply = "vddcore";
+	dsi->supplies[1].supply = "vddio";
+	ret = devm_regulator_bulk_get(&pdev->dev, ARRAY_SIZE(dsi->supplies),
+				      dsi->supplies);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to get regulators: %d\n", ret);
+		return ret;
+	}
+
+	dsi->pll_clk = devm_clk_get(&pdev->dev, "pll_clk");
+	if (IS_ERR(dsi->pll_clk)) {
+		dev_err(&pdev->dev, "failed to get dsi pll input clock\n");
+		return PTR_ERR(dsi->pll_clk);
+	}
+
+	dsi->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
+	if (IS_ERR(dsi->bus_clk)) {
+		dev_err(&pdev->dev, "failed to get dsi bus clock\n");
+		return PTR_ERR(dsi->bus_clk);
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dsi->reg_base = devm_request_and_ioremap(&pdev->dev, res);
+	if (!dsi->reg_base) {
+		dev_err(&pdev->dev, "failed to remap io region\n");
+		return -EADDRNOTAVAIL;
+	}
+
+	dsi->phy = devm_phy_get(&pdev->dev, "dsim");
+	if (IS_ERR(dsi->phy)) {
+		dev_info(&pdev->dev, "failed to get dsim phy\n");
+		return PTR_ERR(dsi->phy);
+	}
+
+	dsi->irq = platform_get_irq(pdev, 0);
+	if (dsi->irq < 0) {
+		dev_err(&pdev->dev, "failed to request dsi irq resource\n");
+		return dsi->irq;
+	}
+
+	irq_set_status_flags(dsi->irq, IRQ_NOAUTOEN);
+	ret = devm_request_threaded_irq(&pdev->dev, dsi->irq, NULL,
+					exynos_dsi_irq, IRQF_ONESHOT,
+					dev_name(&pdev->dev), dsi);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to request dsi irq\n");
+		return ret;
+	}
+
+	exynos_dsi_display.ctx = dsi;
+
+	platform_set_drvdata(pdev, &exynos_dsi_display);
+	exynos_drm_display_register(&exynos_dsi_display);
+
+	return mipi_dsi_host_register(&dsi->dsi_host);
+}
+
+static int exynos_dsi_remove(struct platform_device *pdev)
+{
+	struct exynos_dsi *dsi = exynos_dsi_display.ctx;
+
+	mipi_dsi_host_unregister(&dsi->dsi_host);
+
+	return 0;
+}
+
+static int exynos_dsi_resume(struct device *dev)
+{
+	exynos_dsi_dpms(&exynos_dsi_display, DRM_MODE_DPMS_ON);
+
+	return 0;
+}
+
+static int exynos_dsi_suspend(struct device *dev)
+{
+	exynos_dsi_dpms(&exynos_dsi_display, DRM_MODE_DPMS_OFF);
+
+	return 0;
+}
+
+static const struct dev_pm_ops exynos_dsi_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(exynos_dsi_suspend, exynos_dsi_resume)
+};
+
+static struct of_device_id exynos_dsi_of_match[] = {
+	{ .compatible = "samsung,exynos4210-mipi-dsi" },
+	{ }
+};
+
+MODULE_DEVICE_TABLE(of, exynos_dsi_of_match);
+struct platform_driver dsi_driver = {
+	.probe = exynos_dsi_probe,
+	.remove = exynos_dsi_remove,
+	.driver = {
+		   .name = "exynos-dsi",
+		   .owner = THIS_MODULE,
+		   .pm = &exynos_dsi_pm_ops,
+		   .of_match_table = exynos_dsi_of_match,
+	},
+};
+
+MODULE_AUTHOR("Tomasz Figa <t.figa at samsung.com>");
+MODULE_AUTHOR("Andrzej Hajda <a.hajda at samsung.com>");
+MODULE_DESCRIPTION("Samsung SoC MIPI DSI Master");
+MODULE_LICENSE("GPL v2");
-- 
1.8.3.2



More information about the dri-devel mailing list