[Freedreno] [PATCH 20/21] drm/msm: Add SDM845 DPU support

Sean Paul seanpaul at chromium.org
Mon Jul 9 17:31:56 UTC 2018


From: Jeykumar Sankaran <jsanka at codeaurora.org>

SDM845 SoC includes the Mobile Display Sub System (MDSS) which is a
top level wrapper consisting of Display Processing Unit (DPU) and
display peripheral modules such as Display Serial Interface (DSI)
and DisplayPort (DP).

MDSS functions essentially as a back-end composition engine. It blends
video and graphic images stored in the frame buffers and scans out the
composed image to a display sink (over DSI/DP).

The following diagram represents hardware blocks for a simple pipeline
(two planes are present on a given crtc which is connected to a DSI
connector):

       MDSS
      +---------------------------------+
      | +-----------------------------+ |
      | | DPU                         | |
      | |  +--------+  +--------+     | |
      | |  |  SSPP  |  |  SSPP  |     | |
      | |  +----+---+  +----+---+     | |
      | |       |           |         | |
      | |  +----v-----------v---+     | |
      | |  |  Layer Mixer (LM)  |     | |
      | |  +--------------------+     | |
      | |  +--------------------+     | |
      | |  |    PingPong (PP)   |     | |
      | |  +--------------------+     | |
      | |  +--------------------+     | |
      | |  |  INTERFACE (VIDEO) |     | |
      | |  +---+----------------+     | |
      | +------|----------------------+ |
      |        |                        |
      | +------|---------------------+  |
      | |      | DISPLAY PERIPHERALS |  |
      | |  +---v-+      +-----+      |  |
      | |  | DSI |      |  DP |      |  |
      | |  +-----+      +-----+      |  |
      | +----------------------------+  |
      +---------------------------------+

The number of DPU sub-blocks (i.e. SSPPs, LMs, PP blocks and INTFs)
depends on SoC capabilities.

Overview of DPU sub-blocks:
---------------------------
* Source Surface Processor (SSPP):
 Refers to any of hardware pipes like ViG, DMA etc. Only ViG pipes are
 capable of performing format conversion, scaling and quality improvement
 for source surfaces.

* Layer Mixer (LM):
 Blend source surfaces together (in requested zorder)

* PingPong (PP):
 This block controls frame done interrupt output, EOL and EOF generation,
 overflow/underflow control.

* Display interface (INTF):
 Timing generator and interface connecting the display peripherals.

DRM components mapping to DPU architecture:
------------------------------------------
PLANEs maps to SSPPs
CRTC maps to LMs
Encoder maps to PPs, INTFs

Data flow setup:
---------------
MDSS hardware can support various data flows (e.g.):
  - Dual pipe: Output from two LMs combined to single display.
  - Split display: Output from two LMs connected to two separate
                   interfaces.

The hardware capabilities determine the number of concurrent data paths
possible. Any control path (i.e. pipeline w/i DPU) can be routed to any
of the hardware data paths. A given control path can be triggered,
flushed and controlled independently.

Signed-off-by: Abhinav Kumar <abhinavk at codeaurora.org>
Signed-off-by: Archit Taneja <architt at codeaurora.org>
Signed-off-by: Chandan Uddaraju <chandanu at codeaurora.org>
Signed-off-by: Jeykumar Sankaran <jsanka at codeaurora.org>
Signed-off-by: Jordan Crouse <jcrouse at codeaurora.org>
Signed-off-by: Rajesh Yadav <ryadav at codeaurora.org>
Signed-off-by: Sravanthi Kollukuduru <skolluku at codeaurora.org>
Signed-off-by: Sean Paul <seanpaul at chromium.org>
---
 drivers/gpu/drm/msm/Makefile                  |   32 +-
 drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c  |  532 ++++
 drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h  |  178 ++
 drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c |  637 ++++
 drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h |  133 +
 drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c      | 2523 ++++++++++++++++
 drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h      |  491 ++++
 drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c       | 2393 +++++++++++++++
 drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h       |  103 +
 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c   | 2574 +++++++++++++++++
 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h   |  191 ++
 .../gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h  |  453 +++
 .../drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c  |  905 ++++++
 .../drm/msm/disp/dpu1/dpu_encoder_phys_vid.c  |  922 ++++++
 drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c   | 1276 ++++++++
 drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h   |  136 +
 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c    |  155 +
 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h    |   53 +
 .../gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c    |  511 ++++
 .../gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h    |  804 +++++
 .../drm/msm/disp/dpu1/dpu_hw_catalog_format.h |  182 ++
 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c    |  323 +++
 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h    |  139 +
 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c    |  540 ++++
 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h    |  218 ++
 .../gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c | 1213 ++++++++
 .../gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h |  278 ++
 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c   |  349 +++
 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h   |  128 +
 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c     |  261 ++
 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h     |  122 +
 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h   |  465 +++
 .../gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c   |  250 ++
 .../gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h   |  136 +
 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c   |  753 +++++
 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h   |  424 +++
 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c    |  398 +++
 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h    |  202 ++
 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c   |  452 +++
 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h   |  358 +++
 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c   |  275 ++
 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h   |  128 +
 drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h      |   56 +
 drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c   |  186 ++
 drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.h   |   57 +
 drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c       |   66 +
 drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h       |   59 +
 drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c       | 1380 +++++++++
 drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h       |  402 +++
 drivers/gpu/drm/msm/disp/dpu1/dpu_kms_utils.c |  153 +
 drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c      |  259 ++
 drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c     | 1963 +++++++++++++
 drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h     |  175 ++
 .../gpu/drm/msm/disp/dpu1/dpu_power_handle.c  |  249 ++
 .../gpu/drm/msm/disp/dpu1/dpu_power_handle.h  |  225 ++
 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c        | 1079 +++++++
 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h        |  199 ++
 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h     | 1011 +++++++
 drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c      |  384 +++
 drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h      |   94 +
 drivers/gpu/drm/msm/msm_drv.c                 |  135 +-
 drivers/gpu/drm/msm/msm_drv.h                 |   81 +-
 drivers/gpu/drm/msm/msm_kms.h                 |    8 +
 include/uapi/media/msm_media_info.h           | 1376 +++++++++
 64 files changed, 32178 insertions(+), 15 deletions(-)
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.h
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_kms_utils.c
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
 create mode 100644 drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
 create mode 100644 include/uapi/media/msm_media_info.h

diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index cd40c050b2d7..1639ea8c0d13 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -1,5 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 ccflags-y := -Idrivers/gpu/drm/msm
+ccflags-y += -Idrivers/gpu/drm/msm/disp/dpu1
 ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi
 
 msm-y := \
@@ -45,6 +46,34 @@ msm-y := \
 	disp/mdp5/mdp5_mixer.o \
 	disp/mdp5/mdp5_plane.o \
 	disp/mdp5/mdp5_smp.o \
+	disp/dpu1/dpu_core_irq.o \
+	disp/dpu1/dpu_core_perf.o \
+	disp/dpu1/dpu_crtc.o \
+	disp/dpu1/dpu_encoder.o \
+	disp/dpu1/dpu_encoder_phys_cmd.o \
+	disp/dpu1/dpu_encoder_phys_vid.o \
+	disp/dpu1/dpu_formats.o \
+	disp/dpu1/dpu_hw_blk.o \
+	disp/dpu1/dpu_hw_catalog.o \
+	disp/dpu1/dpu_hw_cdm.o \
+	disp/dpu1/dpu_hw_ctl.o \
+	disp/dpu1/dpu_hw_interrupts.o \
+	disp/dpu1/dpu_hw_intf.o \
+	disp/dpu1/dpu_hw_lm.o \
+	disp/dpu1/dpu_hw_pingpong.o \
+	disp/dpu1/dpu_hw_sspp.o \
+	disp/dpu1/dpu_hw_top.o \
+	disp/dpu1/dpu_hw_util.o \
+	disp/dpu1/dpu_hw_vbif.o \
+	disp/dpu1/dpu_io_util.o \
+	disp/dpu1/dpu_irq.o \
+	disp/dpu1/dpu_kms.o \
+	disp/dpu1/dpu_kms_utils.o \
+	disp/dpu1/dpu_mdss.o \
+	disp/dpu1/dpu_plane.o \
+	disp/dpu1/dpu_power_handle.o \
+	disp/dpu1/dpu_rm.o \
+	disp/dpu1/dpu_vbif.o \
 	msm_atomic.o \
 	msm_debugfs.o \
 	msm_drv.o \
@@ -62,7 +91,8 @@ msm-y := \
 	msm_ringbuffer.o \
 	msm_submitqueue.o
 
-msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o
+msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o \
+			  disp/dpu1/dpu_dbg.o
 
 msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
 msm-$(CONFIG_COMMON_CLK) += disp/mdp4/mdp4_lvds_pll.o
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
new file mode 100644
index 000000000000..530c24dec017
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
@@ -0,0 +1,532 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/kthread.h>
+
+#include "dpu_core_irq.h"
+#include "dpu_trace.h"
+
+/**
+ * dpu_core_irq_callback_handler - dispatch core interrupts
+ * @arg:		private data of callback handler
+ * @irq_idx:		interrupt index
+ */
+static void dpu_core_irq_callback_handler(void *arg, int irq_idx)
+{
+	struct dpu_kms *dpu_kms = arg;
+	struct dpu_irq *irq_obj = &dpu_kms->irq_obj;
+	struct dpu_irq_callback *cb;
+	unsigned long irq_flags;
+
+	pr_debug("irq_idx=%d\n", irq_idx);
+
+	if (list_empty(&irq_obj->irq_cb_tbl[irq_idx])) {
+		DRM_ERROR("no registered cb, idx:%d enable_count:%d\n", irq_idx,
+			atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]));
+	}
+
+	atomic_inc(&irq_obj->irq_counts[irq_idx]);
+
+	/*
+	 * Perform registered function callback
+	 */
+	spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
+	list_for_each_entry(cb, &irq_obj->irq_cb_tbl[irq_idx], list)
+		if (cb->func)
+			cb->func(cb->arg, irq_idx);
+	spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
+
+	/*
+	 * Clear pending interrupt status in HW.
+	 * NOTE: dpu_core_irq_callback_handler is protected by top-level
+	 *       spinlock, so it is safe to clear any interrupt status here.
+	 */
+	dpu_kms->hw_intr->ops.clear_intr_status_nolock(
+			dpu_kms->hw_intr,
+			irq_idx);
+}
+
+int dpu_core_irq_idx_lookup(struct dpu_kms *dpu_kms,
+		enum dpu_intr_type intr_type, u32 instance_idx)
+{
+	if (!dpu_kms || !dpu_kms->hw_intr ||
+			!dpu_kms->hw_intr->ops.irq_idx_lookup)
+		return -EINVAL;
+
+	return dpu_kms->hw_intr->ops.irq_idx_lookup(intr_type,
+			instance_idx);
+}
+
+/**
+ * _dpu_core_irq_enable - enable core interrupt given by the index
+ * @dpu_kms:		Pointer to dpu kms context
+ * @irq_idx:		interrupt index
+ */
+static int _dpu_core_irq_enable(struct dpu_kms *dpu_kms, int irq_idx)
+{
+	unsigned long irq_flags;
+	int ret = 0, enable_count;
+
+	if (!dpu_kms || !dpu_kms->hw_intr ||
+			!dpu_kms->irq_obj.enable_counts ||
+			!dpu_kms->irq_obj.irq_counts) {
+		DPU_ERROR("invalid params\n");
+		return -EINVAL;
+	}
+
+	if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
+		DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+		return -EINVAL;
+	}
+
+	enable_count = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]);
+	DRM_DEBUG_KMS("irq_idx=%d enable_count=%d\n", irq_idx, enable_count);
+	trace_dpu_core_irq_enable_idx(irq_idx, enable_count);
+
+	if (atomic_inc_return(&dpu_kms->irq_obj.enable_counts[irq_idx]) == 1) {
+		ret = dpu_kms->hw_intr->ops.enable_irq(
+				dpu_kms->hw_intr,
+				irq_idx);
+		if (ret)
+			DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
+					irq_idx);
+
+		DPU_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
+
+		spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
+		/* empty callback list but interrupt is enabled */
+		if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx]))
+			DPU_ERROR("irq_idx=%d enabled with no callback\n",
+					irq_idx);
+		spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
+	}
+
+	return ret;
+}
+
+int dpu_core_irq_enable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count)
+{
+	int i, ret = 0, counts;
+
+	if (!dpu_kms || !irq_idxs || !irq_count) {
+		DPU_ERROR("invalid params\n");
+		return -EINVAL;
+	}
+
+	counts = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idxs[0]]);
+	if (counts)
+		DRM_ERROR("irq_idx=%d enable_count=%d\n", irq_idxs[0], counts);
+
+	for (i = 0; (i < irq_count) && !ret; i++)
+		ret = _dpu_core_irq_enable(dpu_kms, irq_idxs[i]);
+
+	return ret;
+}
+
+/**
+ * _dpu_core_irq_disable - disable core interrupt given by the index
+ * @dpu_kms:		Pointer to dpu kms context
+ * @irq_idx:		interrupt index
+ */
+static int _dpu_core_irq_disable(struct dpu_kms *dpu_kms, int irq_idx)
+{
+	int ret = 0, enable_count;
+
+	if (!dpu_kms || !dpu_kms->hw_intr || !dpu_kms->irq_obj.enable_counts) {
+		DPU_ERROR("invalid params\n");
+		return -EINVAL;
+	}
+
+	if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
+		DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+		return -EINVAL;
+	}
+
+	enable_count = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]);
+	DRM_DEBUG_KMS("irq_idx=%d enable_count=%d\n", irq_idx, enable_count);
+	trace_dpu_core_irq_disable_idx(irq_idx, enable_count);
+
+	if (atomic_dec_return(&dpu_kms->irq_obj.enable_counts[irq_idx]) == 0) {
+		ret = dpu_kms->hw_intr->ops.disable_irq(
+				dpu_kms->hw_intr,
+				irq_idx);
+		if (ret)
+			DPU_ERROR("Fail to disable IRQ for irq_idx:%d\n",
+					irq_idx);
+		DPU_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
+	}
+
+	return ret;
+}
+
+int dpu_core_irq_disable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count)
+{
+	int i, ret = 0, counts;
+
+	if (!dpu_kms || !irq_idxs || !irq_count) {
+		DPU_ERROR("invalid params\n");
+		return -EINVAL;
+	}
+
+	counts = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idxs[0]]);
+	if (counts == 2)
+		DRM_ERROR("irq_idx=%d enable_count=%d\n", irq_idxs[0], counts);
+
+	for (i = 0; (i < irq_count) && !ret; i++)
+		ret = _dpu_core_irq_disable(dpu_kms, irq_idxs[i]);
+
+	return ret;
+}
+
+/**
+ * dpu_core_irq_disable_nolock - disable core interrupt given by the index
+ *                               without lock
+ * @dpu_kms:		Pointer to dpu kms context
+ * @irq_idx:		interrupt index
+ */
+int dpu_core_irq_disable_nolock(struct dpu_kms *dpu_kms, int irq_idx)
+{
+	int ret = 0, enable_count;
+
+	if (!dpu_kms || !dpu_kms->hw_intr || !dpu_kms->irq_obj.enable_counts) {
+		DPU_ERROR("invalid params\n");
+		return -EINVAL;
+	}
+
+	if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
+		DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+		return -EINVAL;
+	}
+
+	enable_count = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]);
+	DRM_DEBUG_KMS("irq_idx=%d enable_count=%d\n", irq_idx, enable_count);
+	trace_dpu_core_irq_disable_nolock(irq_idx, enable_count);
+
+	if (atomic_dec_return(&dpu_kms->irq_obj.enable_counts[irq_idx]) == 0) {
+		ret = dpu_kms->hw_intr->ops.disable_irq_nolock(
+				dpu_kms->hw_intr,
+				irq_idx);
+		if (ret)
+			DPU_ERROR("Fail to disable IRQ for irq_idx:%d\n",
+					irq_idx);
+		DPU_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
+	}
+
+	return ret;
+}
+
+u32 dpu_core_irq_read_nolock(struct dpu_kms *dpu_kms, int irq_idx, bool clear)
+{
+	if (!dpu_kms || !dpu_kms->hw_intr ||
+			!dpu_kms->hw_intr->ops.get_interrupt_status)
+		return 0;
+
+	if (irq_idx < 0) {
+		DPU_ERROR("[%pS] invalid irq_idx=%d\n",
+				__builtin_return_address(0), irq_idx);
+		return 0;
+	}
+
+	return dpu_kms->hw_intr->ops.get_intr_status_nolock(dpu_kms->hw_intr,
+			irq_idx, clear);
+}
+
+u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear)
+{
+	if (!dpu_kms || !dpu_kms->hw_intr ||
+			!dpu_kms->hw_intr->ops.get_interrupt_status)
+		return 0;
+
+	if (irq_idx < 0) {
+		DPU_ERROR("[%pS] invalid irq_idx=%d\n",
+				__builtin_return_address(0), irq_idx);
+		return 0;
+	}
+
+	return dpu_kms->hw_intr->ops.get_interrupt_status(dpu_kms->hw_intr,
+			irq_idx, clear);
+}
+
+int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
+		struct dpu_irq_callback *register_irq_cb)
+{
+	unsigned long irq_flags;
+
+	if (!dpu_kms || !dpu_kms->irq_obj.irq_cb_tbl) {
+		DPU_ERROR("invalid params\n");
+		return -EINVAL;
+	}
+
+	if (!register_irq_cb || !register_irq_cb->func) {
+		DPU_ERROR("invalid irq_cb:%d func:%d\n",
+				register_irq_cb != NULL,
+				register_irq_cb ?
+					register_irq_cb->func != NULL : -1);
+		return -EINVAL;
+	}
+
+	if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
+		DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+		return -EINVAL;
+	}
+
+	DPU_DEBUG("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
+
+	spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
+	trace_dpu_core_irq_register_callback(irq_idx, register_irq_cb);
+	list_del_init(&register_irq_cb->list);
+	list_add_tail(&register_irq_cb->list,
+			&dpu_kms->irq_obj.irq_cb_tbl[irq_idx]);
+	spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
+
+	return 0;
+}
+
+int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
+		struct dpu_irq_callback *register_irq_cb)
+{
+	unsigned long irq_flags;
+
+	if (!dpu_kms || !dpu_kms->irq_obj.irq_cb_tbl) {
+		DPU_ERROR("invalid params\n");
+		return -EINVAL;
+	}
+
+	if (!register_irq_cb || !register_irq_cb->func) {
+		DPU_ERROR("invalid irq_cb:%d func:%d\n",
+				register_irq_cb != NULL,
+				register_irq_cb ?
+					register_irq_cb->func != NULL : -1);
+		return -EINVAL;
+	}
+
+	if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
+		DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+		return -EINVAL;
+	}
+
+	DPU_DEBUG("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
+
+	spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
+	trace_dpu_core_irq_unregister_callback(irq_idx, register_irq_cb);
+	list_del_init(&register_irq_cb->list);
+	/* empty callback list but interrupt is still enabled */
+	if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx]) &&
+			atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]))
+		DPU_ERROR("irq_idx=%d enabled with no callback\n", irq_idx);
+	spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
+
+	return 0;
+}
+
+static void dpu_clear_all_irqs(struct dpu_kms *dpu_kms)
+{
+	if (!dpu_kms || !dpu_kms->hw_intr ||
+			!dpu_kms->hw_intr->ops.clear_all_irqs)
+		return;
+
+	dpu_kms->hw_intr->ops.clear_all_irqs(dpu_kms->hw_intr);
+}
+
+static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
+{
+	if (!dpu_kms || !dpu_kms->hw_intr ||
+			!dpu_kms->hw_intr->ops.disable_all_irqs)
+		return;
+
+	dpu_kms->hw_intr->ops.disable_all_irqs(dpu_kms->hw_intr);
+}
+
+#ifdef CONFIG_DEBUG_FS
+#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix)				\
+static int __prefix ## _open(struct inode *inode, struct file *file)	\
+{									\
+	return single_open(file, __prefix ## _show, inode->i_private);	\
+}									\
+static const struct file_operations __prefix ## _fops = {		\
+	.owner = THIS_MODULE,						\
+	.open = __prefix ## _open,					\
+	.release = single_release,					\
+	.read = seq_read,						\
+	.llseek = seq_lseek,						\
+}
+
+static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
+{
+	struct dpu_irq *irq_obj = s->private;
+	struct dpu_irq_callback *cb;
+	unsigned long irq_flags;
+	int i, irq_count, enable_count, cb_count;
+
+	if (!irq_obj || !irq_obj->enable_counts || !irq_obj->irq_cb_tbl) {
+		DPU_ERROR("invalid parameters\n");
+		return 0;
+	}
+
+	for (i = 0; i < irq_obj->total_irqs; i++) {
+		spin_lock_irqsave(&irq_obj->cb_lock, irq_flags);
+		cb_count = 0;
+		irq_count = atomic_read(&irq_obj->irq_counts[i]);
+		enable_count = atomic_read(&irq_obj->enable_counts[i]);
+		list_for_each_entry(cb, &irq_obj->irq_cb_tbl[i], list)
+			cb_count++;
+		spin_unlock_irqrestore(&irq_obj->cb_lock, irq_flags);
+
+		if (irq_count || enable_count || cb_count)
+			seq_printf(s, "idx:%d irq:%d enable:%d cb:%d\n",
+					i, irq_count, enable_count, cb_count);
+	}
+
+	return 0;
+}
+
+DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_core_irq);
+
+int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
+		struct dentry *parent)
+{
+	dpu_kms->irq_obj.debugfs_file = debugfs_create_file("core_irq", 0600,
+			parent, &dpu_kms->irq_obj,
+			&dpu_debugfs_core_irq_fops);
+
+	return 0;
+}
+
+void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms)
+{
+	debugfs_remove(dpu_kms->irq_obj.debugfs_file);
+	dpu_kms->irq_obj.debugfs_file = NULL;
+}
+
+#else
+int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
+		struct dentry *parent)
+{
+	return 0;
+}
+
+void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms)
+{
+}
+#endif
+
+void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
+{
+	struct msm_drm_private *priv;
+	int i;
+
+	if (!dpu_kms) {
+		DPU_ERROR("invalid dpu_kms\n");
+		return;
+	} else if (!dpu_kms->dev) {
+		DPU_ERROR("invalid drm device\n");
+		return;
+	} else if (!dpu_kms->dev->dev_private) {
+		DPU_ERROR("invalid device private\n");
+		return;
+	}
+	priv = dpu_kms->dev->dev_private;
+
+	pm_runtime_get_sync(&dpu_kms->pdev->dev);
+	dpu_clear_all_irqs(dpu_kms);
+	dpu_disable_all_irqs(dpu_kms);
+	pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+	spin_lock_init(&dpu_kms->irq_obj.cb_lock);
+
+	/* Create irq callbacks for all possible irq_idx */
+	dpu_kms->irq_obj.total_irqs = dpu_kms->hw_intr->irq_idx_tbl_size;
+	dpu_kms->irq_obj.irq_cb_tbl = kcalloc(dpu_kms->irq_obj.total_irqs,
+			sizeof(struct list_head), GFP_KERNEL);
+	dpu_kms->irq_obj.enable_counts = kcalloc(dpu_kms->irq_obj.total_irqs,
+			sizeof(atomic_t), GFP_KERNEL);
+	dpu_kms->irq_obj.irq_counts = kcalloc(dpu_kms->irq_obj.total_irqs,
+			sizeof(atomic_t), GFP_KERNEL);
+	for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++) {
+		INIT_LIST_HEAD(&dpu_kms->irq_obj.irq_cb_tbl[i]);
+		atomic_set(&dpu_kms->irq_obj.enable_counts[i], 0);
+		atomic_set(&dpu_kms->irq_obj.irq_counts[i], 0);
+	}
+}
+
+int dpu_core_irq_postinstall(struct dpu_kms *dpu_kms)
+{
+	return 0;
+}
+
+void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms)
+{
+	struct msm_drm_private *priv;
+	int i;
+
+	if (!dpu_kms) {
+		DPU_ERROR("invalid dpu_kms\n");
+		return;
+	} else if (!dpu_kms->dev) {
+		DPU_ERROR("invalid drm device\n");
+		return;
+	} else if (!dpu_kms->dev->dev_private) {
+		DPU_ERROR("invalid device private\n");
+		return;
+	}
+	priv = dpu_kms->dev->dev_private;
+
+	pm_runtime_get_sync(&dpu_kms->pdev->dev);
+	for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++)
+		if (atomic_read(&dpu_kms->irq_obj.enable_counts[i]) ||
+				!list_empty(&dpu_kms->irq_obj.irq_cb_tbl[i]))
+			DPU_ERROR("irq_idx=%d still enabled/registered\n", i);
+
+	dpu_clear_all_irqs(dpu_kms);
+	dpu_disable_all_irqs(dpu_kms);
+	pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+	kfree(dpu_kms->irq_obj.irq_cb_tbl);
+	kfree(dpu_kms->irq_obj.enable_counts);
+	kfree(dpu_kms->irq_obj.irq_counts);
+	dpu_kms->irq_obj.irq_cb_tbl = NULL;
+	dpu_kms->irq_obj.enable_counts = NULL;
+	dpu_kms->irq_obj.irq_counts = NULL;
+	dpu_kms->irq_obj.total_irqs = 0;
+}
+
+irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms)
+{
+	/*
+	 * Read interrupt status from all sources. Interrupt status are
+	 * stored within hw_intr.
+	 * Function will also clear the interrupt status after reading.
+	 * Individual interrupt status bit will only get stored if it
+	 * is enabled.
+	 */
+	dpu_kms->hw_intr->ops.get_interrupt_statuses(dpu_kms->hw_intr);
+
+	/*
+	 * Dispatch to HW driver to handle interrupt lookup that is being
+	 * fired. When matching interrupt is located, HW driver will call to
+	 * dpu_core_irq_callback_handler with the irq_idx from the lookup table.
+	 * dpu_core_irq_callback_handler will perform the registered function
+	 * callback, and do the interrupt status clearing once the registered
+	 * callback is finished.
+	 */
+	dpu_kms->hw_intr->ops.dispatch_irqs(
+			dpu_kms->hw_intr,
+			dpu_core_irq_callback_handler,
+			dpu_kms);
+
+	return IRQ_HANDLED;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
new file mode 100644
index 000000000000..8fa59db4cbd5
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
@@ -0,0 +1,178 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DPU_CORE_IRQ_H__
+#define __DPU_CORE_IRQ_H__
+
+#include "dpu_kms.h"
+#include "dpu_hw_interrupts.h"
+
+/**
+ * dpu_core_irq_preinstall - perform pre-installation of core IRQ handler
+ * @dpu_kms:		DPU handle
+ * @return:		none
+ */
+void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_core_irq_postinstall - perform post-installation of core IRQ handler
+ * @dpu_kms:		DPU handle
+ * @return:		0 if success; error code otherwise
+ */
+int dpu_core_irq_postinstall(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_core_irq_uninstall - uninstall core IRQ handler
+ * @dpu_kms:		DPU handle
+ * @return:		none
+ */
+void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_core_irq - core IRQ handler
+ * @dpu_kms:		DPU handle
+ * @return:		interrupt handling status
+ */
+irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_core_irq_idx_lookup - IRQ helper function for lookup irq_idx from HW
+ *                      interrupt mapping table.
+ * @dpu_kms:		DPU handle
+ * @intr_type:		DPU HW interrupt type for lookup
+ * @instance_idx:	DPU HW block instance defined in dpu_hw_mdss.h
+ * @return:		irq_idx or -EINVAL when fail to lookup
+ */
+int dpu_core_irq_idx_lookup(
+		struct dpu_kms *dpu_kms,
+		enum dpu_intr_type intr_type,
+		uint32_t instance_idx);
+
+/**
+ * dpu_core_irq_enable - IRQ helper function for enabling one or more IRQs
+ * @dpu_kms:		DPU handle
+ * @irq_idxs:		Array of irq index
+ * @irq_count:		Number of irq_idx provided in the array
+ * @return:		0 for success enabling IRQ, otherwise failure
+ *
+ * This function increments count on each enable and decrements on each
+ * disable.  Interrupts is enabled if count is 0 before increment.
+ */
+int dpu_core_irq_enable(
+		struct dpu_kms *dpu_kms,
+		int *irq_idxs,
+		uint32_t irq_count);
+
+/**
+ * dpu_core_irq_disable - IRQ helper function for disabling one of more IRQs
+ * @dpu_kms:		DPU handle
+ * @irq_idxs:		Array of irq index
+ * @irq_count:		Number of irq_idx provided in the array
+ * @return:		0 for success disabling IRQ, otherwise failure
+ *
+ * This function increments count on each enable and decrements on each
+ * disable.  Interrupts is disabled if count is 0 after decrement.
+ */
+int dpu_core_irq_disable(
+		struct dpu_kms *dpu_kms,
+		int *irq_idxs,
+		uint32_t irq_count);
+
+/**
+ * dpu_core_irq_disable_nolock - no lock version of dpu_core_irq_disable
+ * @dpu_kms:		DPU handle
+ * @irq_idx:		Irq index
+ * @return:		0 for success disabling IRQ, otherwise failure
+ *
+ * This function increments count on each enable and decrements on each
+ * disable.  Interrupts is disabled if count is 0 after decrement.
+ */
+int dpu_core_irq_disable_nolock(
+		struct dpu_kms *dpu_kms,
+		int irq_idx);
+
+/**
+ * dpu_core_irq_read - IRQ helper function for reading IRQ status
+ * @dpu_kms:		DPU handle
+ * @irq_idx:		irq index
+ * @clear:		True to clear the irq after read
+ * @return:		non-zero if irq detected; otherwise no irq detected
+ */
+u32 dpu_core_irq_read(
+		struct dpu_kms *dpu_kms,
+		int irq_idx,
+		bool clear);
+
+/**
+ * dpu_core_irq_read - no lock version of dpu_core_irq_read
+ * @dpu_kms:		DPU handle
+ * @irq_idx:		irq index
+ * @clear:		True to clear the irq after read
+ * @return:		non-zero if irq detected; otherwise no irq detected
+ */
+u32 dpu_core_irq_read_nolock(
+		struct dpu_kms *dpu_kms,
+		int irq_idx,
+		bool clear);
+
+/**
+ * dpu_core_irq_register_callback - For registering callback function on IRQ
+ *                             interrupt
+ * @dpu_kms:		DPU handle
+ * @irq_idx:		irq index
+ * @irq_cb:		IRQ callback structure, containing callback function
+ *			and argument. Passing NULL for irq_cb will unregister
+ *			the callback for the given irq_idx
+ *			This must exist until un-registration.
+ * @return:		0 for success registering callback, otherwise failure
+ *
+ * This function supports registration of multiple callbacks for each interrupt.
+ */
+int dpu_core_irq_register_callback(
+		struct dpu_kms *dpu_kms,
+		int irq_idx,
+		struct dpu_irq_callback *irq_cb);
+
+/**
+ * dpu_core_irq_unregister_callback - For unregistering callback function on IRQ
+ *                             interrupt
+ * @dpu_kms:		DPU handle
+ * @irq_idx:		irq index
+ * @irq_cb:		IRQ callback structure, containing callback function
+ *			and argument. Passing NULL for irq_cb will unregister
+ *			the callback for the given irq_idx
+ *			This must match with registration.
+ * @return:		0 for success registering callback, otherwise failure
+ *
+ * This function supports registration of multiple callbacks for each interrupt.
+ */
+int dpu_core_irq_unregister_callback(
+		struct dpu_kms *dpu_kms,
+		int irq_idx,
+		struct dpu_irq_callback *irq_cb);
+
+/**
+ * dpu_debugfs_core_irq_init - register core irq debugfs
+ * @dpu_kms: pointer to kms
+ * @parent: debugfs directory root
+ * @Return: 0 on success
+ */
+int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
+		struct dentry *parent);
+
+/**
+ * dpu_debugfs_core_irq_destroy - deregister core irq debugfs
+ * @dpu_kms: pointer to kms
+ */
+void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms);
+
+#endif /* __DPU_CORE_IRQ_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
new file mode 100644
index 000000000000..41c5191f9056
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
@@ -0,0 +1,637 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/sort.h>
+#include <linux/clk.h>
+#include <linux/bitmap.h>
+
+#include "dpu_kms.h"
+#include "dpu_trace.h"
+#include "dpu_crtc.h"
+#include "dpu_core_perf.h"
+
+#define DPU_PERF_MODE_STRING_SIZE	128
+
+/**
+ * enum dpu_perf_mode - performance tuning mode
+ * @DPU_PERF_MODE_NORMAL: performance controlled by user mode client
+ * @DPU_PERF_MODE_MINIMUM: performance bounded by minimum setting
+ * @DPU_PERF_MODE_FIXED: performance bounded by fixed setting
+ */
+enum dpu_perf_mode {
+	DPU_PERF_MODE_NORMAL,
+	DPU_PERF_MODE_MINIMUM,
+	DPU_PERF_MODE_FIXED,
+	DPU_PERF_MODE_MAX
+};
+
+static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
+{
+	struct msm_drm_private *priv;
+
+	if (!crtc->dev || !crtc->dev->dev_private) {
+		DPU_ERROR("invalid device\n");
+		return NULL;
+	}
+
+	priv = crtc->dev->dev_private;
+	if (!priv || !priv->kms) {
+		DPU_ERROR("invalid kms\n");
+		return NULL;
+	}
+
+	return to_dpu_kms(priv->kms);
+}
+
+static bool _dpu_core_perf_crtc_is_power_on(struct drm_crtc *crtc)
+{
+	return dpu_crtc_is_enabled(crtc);
+}
+
+static bool _dpu_core_video_mode_intf_connected(struct drm_crtc *crtc)
+{
+	struct drm_crtc *tmp_crtc;
+	bool intf_connected = false;
+
+	if (!crtc)
+		goto end;
+
+	drm_for_each_crtc(tmp_crtc, crtc->dev) {
+		if ((dpu_crtc_get_intf_mode(tmp_crtc) == INTF_MODE_VIDEO) &&
+				_dpu_core_perf_crtc_is_power_on(tmp_crtc)) {
+			DPU_DEBUG("video interface connected crtc:%d\n",
+				tmp_crtc->base.id);
+			intf_connected = true;
+			goto end;
+		}
+	}
+
+end:
+	return intf_connected;
+}
+
+static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
+		struct drm_crtc *crtc,
+		struct drm_crtc_state *state,
+		struct dpu_core_perf_params *perf)
+{
+	struct dpu_crtc_state *dpu_cstate;
+	int i;
+
+	if (!kms || !kms->catalog || !crtc || !state || !perf) {
+		DPU_ERROR("invalid parameters\n");
+		return;
+	}
+
+	dpu_cstate = to_dpu_crtc_state(state);
+	memset(perf, 0, sizeof(struct dpu_core_perf_params));
+
+	if (!dpu_cstate->bw_control) {
+		for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+			perf->bw_ctl[i] = kms->catalog->perf.max_bw_high *
+					1000ULL;
+			perf->max_per_pipe_ib[i] = perf->bw_ctl[i];
+		}
+		perf->core_clk_rate = kms->perf.max_core_clk_rate;
+	} else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) {
+		for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+			perf->bw_ctl[i] = 0;
+			perf->max_per_pipe_ib[i] = 0;
+		}
+		perf->core_clk_rate = 0;
+	} else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED) {
+		for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+			perf->bw_ctl[i] = kms->perf.fix_core_ab_vote;
+			perf->max_per_pipe_ib[i] = kms->perf.fix_core_ib_vote;
+		}
+		perf->core_clk_rate = kms->perf.fix_core_clk_rate;
+	}
+
+	DPU_DEBUG(
+		"crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu llcc_ib=%llu llcc_ab=%llu mem_ib=%llu mem_ab=%llu\n",
+			crtc->base.id, perf->core_clk_rate,
+			perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_MNOC],
+			perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MNOC],
+			perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_LLCC],
+			perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_LLCC],
+			perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_EBI],
+			perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_EBI]);
+}
+
+int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
+		struct drm_crtc_state *state)
+{
+	u32 bw, threshold;
+	u64 bw_sum_of_intfs = 0;
+	enum dpu_crtc_client_type curr_client_type;
+	bool is_video_mode;
+	struct dpu_crtc_state *dpu_cstate;
+	struct drm_crtc *tmp_crtc;
+	struct dpu_kms *kms;
+	int i;
+
+	if (!crtc || !state) {
+		DPU_ERROR("invalid crtc\n");
+		return -EINVAL;
+	}
+
+	kms = _dpu_crtc_get_kms(crtc);
+	if (!kms || !kms->catalog) {
+		DPU_ERROR("invalid parameters\n");
+		return 0;
+	}
+
+	/* we only need bandwidth check on real-time clients (interfaces) */
+	if (dpu_crtc_get_client_type(crtc) == NRT_CLIENT)
+		return 0;
+
+	dpu_cstate = to_dpu_crtc_state(state);
+
+	/* obtain new values */
+	_dpu_core_perf_calc_crtc(kms, crtc, state, &dpu_cstate->new_perf);
+
+	for (i = DPU_POWER_HANDLE_DBUS_ID_MNOC;
+			i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+		bw_sum_of_intfs = dpu_cstate->new_perf.bw_ctl[i];
+		curr_client_type = dpu_crtc_get_client_type(crtc);
+
+		drm_for_each_crtc(tmp_crtc, crtc->dev) {
+			if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
+			    (dpu_crtc_get_client_type(tmp_crtc) ==
+					    curr_client_type) &&
+			    (tmp_crtc != crtc)) {
+				struct dpu_crtc_state *tmp_cstate =
+					to_dpu_crtc_state(tmp_crtc->state);
+
+				DPU_DEBUG("crtc:%d bw:%llu ctrl:%d\n",
+					tmp_crtc->base.id,
+					tmp_cstate->new_perf.bw_ctl[i],
+					tmp_cstate->bw_control);
+				/*
+				 * For bw check only use the bw if the
+				 * atomic property has been already set
+				 */
+				if (tmp_cstate->bw_control)
+					bw_sum_of_intfs +=
+						tmp_cstate->new_perf.bw_ctl[i];
+			}
+		}
+
+		/* convert bandwidth to kb */
+		bw = DIV_ROUND_UP_ULL(bw_sum_of_intfs, 1000);
+		DPU_DEBUG("calculated bandwidth=%uk\n", bw);
+
+		is_video_mode = dpu_crtc_get_intf_mode(crtc) == INTF_MODE_VIDEO;
+		threshold = (is_video_mode ||
+			_dpu_core_video_mode_intf_connected(crtc)) ?
+			kms->catalog->perf.max_bw_low :
+			kms->catalog->perf.max_bw_high;
+
+		DPU_DEBUG("final threshold bw limit = %d\n", threshold);
+
+		if (!dpu_cstate->bw_control) {
+			DPU_DEBUG("bypass bandwidth check\n");
+		} else if (!threshold) {
+			DPU_ERROR("no bandwidth limits specified\n");
+			return -E2BIG;
+		} else if (bw > threshold) {
+			DPU_ERROR("exceeds bandwidth: %ukb > %ukb\n", bw,
+					threshold);
+			return -E2BIG;
+		}
+	}
+
+	return 0;
+}
+
+static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
+		struct drm_crtc *crtc, u32 bus_id)
+{
+	struct dpu_core_perf_params perf = { { 0 } };
+	enum dpu_crtc_client_type curr_client_type
+					= dpu_crtc_get_client_type(crtc);
+	struct drm_crtc *tmp_crtc;
+	struct dpu_crtc_state *dpu_cstate;
+	int ret = 0;
+
+	drm_for_each_crtc(tmp_crtc, crtc->dev) {
+		if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
+			curr_client_type ==
+				dpu_crtc_get_client_type(tmp_crtc)) {
+			dpu_cstate = to_dpu_crtc_state(tmp_crtc->state);
+
+			perf.max_per_pipe_ib[bus_id] =
+				max(perf.max_per_pipe_ib[bus_id],
+				dpu_cstate->new_perf.max_per_pipe_ib[bus_id]);
+
+			DPU_DEBUG("crtc=%d bus_id=%d bw=%llu\n",
+				tmp_crtc->base.id, bus_id,
+				dpu_cstate->new_perf.bw_ctl[bus_id]);
+		}
+	}
+	return ret;
+}
+
+/**
+ * @dpu_core_perf_crtc_release_bw() - request zero bandwidth
+ * @crtc - pointer to a crtc
+ *
+ * Function checks a state variable for the crtc, if all pending commit
+ * requests are done, meaning no more bandwidth is needed, release
+ * bandwidth request.
+ */
+void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc)
+{
+	struct drm_crtc *tmp_crtc;
+	struct dpu_crtc *dpu_crtc;
+	struct dpu_crtc_state *dpu_cstate;
+	struct dpu_kms *kms;
+	int i;
+
+	if (!crtc) {
+		DPU_ERROR("invalid crtc\n");
+		return;
+	}
+
+	kms = _dpu_crtc_get_kms(crtc);
+	if (!kms || !kms->catalog) {
+		DPU_ERROR("invalid kms\n");
+		return;
+	}
+
+	dpu_crtc = to_dpu_crtc(crtc);
+	dpu_cstate = to_dpu_crtc_state(crtc->state);
+
+	/* only do this for command mode rt client */
+	if (dpu_crtc_get_intf_mode(crtc) != INTF_MODE_CMD)
+		return;
+
+	/*
+	 * If video interface present, cmd panel bandwidth cannot be
+	 * released.
+	 */
+	if (dpu_crtc_get_intf_mode(crtc) == INTF_MODE_CMD)
+		drm_for_each_crtc(tmp_crtc, crtc->dev) {
+			if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
+				dpu_crtc_get_intf_mode(tmp_crtc) ==
+						INTF_MODE_VIDEO)
+				return;
+		}
+
+	/* Release the bandwidth */
+	if (kms->perf.enable_bw_release) {
+		trace_dpu_cmd_release_bw(crtc->base.id);
+		DPU_DEBUG("Release BW crtc=%d\n", crtc->base.id);
+		for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+			dpu_crtc->cur_perf.bw_ctl[i] = 0;
+			_dpu_core_perf_crtc_update_bus(kms, crtc, i);
+		}
+	}
+}
+
+static int _dpu_core_perf_set_core_clk_rate(struct dpu_kms *kms, u64 rate)
+{
+	struct dss_clk *core_clk = kms->perf.core_clk;
+
+	if (core_clk->max_rate && (rate > core_clk->max_rate))
+		rate = core_clk->max_rate;
+
+	core_clk->rate = rate;
+	return msm_dss_clk_set_rate(core_clk, 1);
+}
+
+static u64 _dpu_core_perf_get_core_clk_rate(struct dpu_kms *kms)
+{
+	u64 clk_rate = kms->perf.perf_tune.min_core_clk;
+	struct drm_crtc *crtc;
+	struct dpu_crtc_state *dpu_cstate;
+
+	drm_for_each_crtc(crtc, kms->dev) {
+		if (_dpu_core_perf_crtc_is_power_on(crtc)) {
+			dpu_cstate = to_dpu_crtc_state(crtc->state);
+			clk_rate = max(dpu_cstate->new_perf.core_clk_rate,
+							clk_rate);
+			clk_rate = clk_round_rate(kms->perf.core_clk->clk,
+					clk_rate);
+		}
+	}
+
+	if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED)
+		clk_rate = kms->perf.fix_core_clk_rate;
+
+	DPU_DEBUG("clk:%llu\n", clk_rate);
+
+	return clk_rate;
+}
+
+int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
+		int params_changed, bool stop_req)
+{
+	struct dpu_core_perf_params *new, *old;
+	int update_bus = 0, update_clk = 0;
+	u64 clk_rate = 0;
+	struct dpu_crtc *dpu_crtc;
+	struct dpu_crtc_state *dpu_cstate;
+	int i;
+	struct msm_drm_private *priv;
+	struct dpu_kms *kms;
+	int ret;
+
+	if (!crtc) {
+		DPU_ERROR("invalid crtc\n");
+		return -EINVAL;
+	}
+
+	kms = _dpu_crtc_get_kms(crtc);
+	if (!kms || !kms->catalog) {
+		DPU_ERROR("invalid kms\n");
+		return -EINVAL;
+	}
+	priv = kms->dev->dev_private;
+
+	dpu_crtc = to_dpu_crtc(crtc);
+	dpu_cstate = to_dpu_crtc_state(crtc->state);
+
+	DPU_DEBUG("crtc:%d stop_req:%d core_clk:%llu\n",
+			crtc->base.id, stop_req, kms->perf.core_clk_rate);
+
+	old = &dpu_crtc->cur_perf;
+	new = &dpu_cstate->new_perf;
+
+	if (_dpu_core_perf_crtc_is_power_on(crtc) && !stop_req) {
+		for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+			/*
+			 * cases for bus bandwidth update.
+			 * 1. new bandwidth vote - "ab or ib vote" is higher
+			 *    than current vote for update request.
+			 * 2. new bandwidth vote - "ab or ib vote" is lower
+			 *    than current vote at end of commit or stop.
+			 */
+			if ((params_changed && ((new->bw_ctl[i] >
+						old->bw_ctl[i]) ||
+				  (new->max_per_pipe_ib[i] >
+						old->max_per_pipe_ib[i]))) ||
+			    (!params_changed && ((new->bw_ctl[i] <
+						old->bw_ctl[i]) ||
+				  (new->max_per_pipe_ib[i] <
+						old->max_per_pipe_ib[i])))) {
+				DPU_DEBUG(
+					"crtc=%d p=%d new_bw=%llu,old_bw=%llu\n",
+					crtc->base.id, params_changed,
+					new->bw_ctl[i], old->bw_ctl[i]);
+				old->bw_ctl[i] = new->bw_ctl[i];
+				old->max_per_pipe_ib[i] =
+						new->max_per_pipe_ib[i];
+				update_bus |= BIT(i);
+			}
+		}
+
+		if ((params_changed &&
+				(new->core_clk_rate > old->core_clk_rate)) ||
+				(!params_changed &&
+				(new->core_clk_rate < old->core_clk_rate))) {
+			old->core_clk_rate = new->core_clk_rate;
+			update_clk = 1;
+		}
+	} else {
+		DPU_DEBUG("crtc=%d disable\n", crtc->base.id);
+		memset(old, 0, sizeof(*old));
+		memset(new, 0, sizeof(*new));
+		update_bus = ~0;
+		update_clk = 1;
+	}
+	trace_dpu_perf_crtc_update(crtc->base.id,
+				new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MNOC],
+				new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_LLCC],
+				new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_EBI],
+				new->core_clk_rate, stop_req,
+				update_bus, update_clk);
+
+	for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+		if (update_bus & BIT(i)) {
+			ret = _dpu_core_perf_crtc_update_bus(kms, crtc, i);
+			if (ret) {
+				DPU_ERROR("crtc-%d: failed to update bw vote for bus-%d\n",
+					  crtc->base.id, i);
+				return ret;
+			}
+		}
+	}
+
+	/*
+	 * Update the clock after bandwidth vote to ensure
+	 * bandwidth is available before clock rate is increased.
+	 */
+	if (update_clk) {
+		clk_rate = _dpu_core_perf_get_core_clk_rate(kms);
+
+		trace_dpu_core_perf_update_clk(kms->dev, stop_req, clk_rate);
+
+		ret = _dpu_core_perf_set_core_clk_rate(kms, clk_rate);
+		if (ret) {
+			DPU_ERROR("failed to set %s clock rate %llu\n",
+					kms->perf.core_clk->clk_name, clk_rate);
+			return ret;
+		}
+
+		kms->perf.core_clk_rate = clk_rate;
+		DPU_DEBUG("update clk rate = %lld HZ\n", clk_rate);
+	}
+	return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static ssize_t _dpu_core_perf_mode_write(struct file *file,
+		    const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct dpu_core_perf *perf = file->private_data;
+	struct dpu_perf_cfg *cfg = &perf->catalog->perf;
+	u32 perf_mode = 0;
+	char buf[10];
+
+	if (!perf)
+		return -ENODEV;
+
+	if (count >= sizeof(buf))
+		return -EFAULT;
+
+	if (copy_from_user(buf, user_buf, count))
+		return -EFAULT;
+
+	buf[count] = 0;	/* end of string */
+
+	if (kstrtouint(buf, 0, &perf_mode))
+		return -EFAULT;
+
+	if (perf_mode >= DPU_PERF_MODE_MAX)
+		return -EFAULT;
+
+	if (perf_mode == DPU_PERF_MODE_FIXED) {
+		DRM_INFO("fix performance mode\n");
+	} else if (perf_mode == DPU_PERF_MODE_MINIMUM) {
+		/* run the driver with max clk and BW vote */
+		perf->perf_tune.min_core_clk = perf->max_core_clk_rate;
+		perf->perf_tune.min_bus_vote =
+				(u64) cfg->max_bw_high * 1000;
+		DRM_INFO("minimum performance mode\n");
+	} else if (perf_mode == DPU_PERF_MODE_NORMAL) {
+		/* reset the perf tune params to 0 */
+		perf->perf_tune.min_core_clk = 0;
+		perf->perf_tune.min_bus_vote = 0;
+		DRM_INFO("normal performance mode\n");
+	}
+	perf->perf_tune.mode = perf_mode;
+
+	return count;
+}
+
+static ssize_t _dpu_core_perf_mode_read(struct file *file,
+			char __user *buff, size_t count, loff_t *ppos)
+{
+	struct dpu_core_perf *perf = file->private_data;
+	int len = 0;
+	char buf[DPU_PERF_MODE_STRING_SIZE] = {'\0'};
+
+	if (!perf)
+		return -ENODEV;
+
+	if (*ppos)
+		return 0;	/* the end */
+
+	len = snprintf(buf, sizeof(buf),
+			"mode %d min_mdp_clk %llu min_bus_vote %llu\n",
+			perf->perf_tune.mode,
+			perf->perf_tune.min_core_clk,
+			perf->perf_tune.min_bus_vote);
+	if (len < 0 || len >= sizeof(buf))
+		return 0;
+
+	if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
+		return -EFAULT;
+
+	*ppos += len;   /* increase offset */
+
+	return len;
+}
+
+static const struct file_operations dpu_core_perf_mode_fops = {
+	.open = simple_open,
+	.read = _dpu_core_perf_mode_read,
+	.write = _dpu_core_perf_mode_write,
+};
+
+static void dpu_core_perf_debugfs_destroy(struct dpu_core_perf *perf)
+{
+	debugfs_remove_recursive(perf->debugfs_root);
+	perf->debugfs_root = NULL;
+}
+
+int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
+		struct dentry *parent)
+{
+	struct dpu_mdss_cfg *catalog = perf->catalog;
+	struct msm_drm_private *priv;
+	struct dpu_kms *dpu_kms;
+
+	priv = perf->dev->dev_private;
+	if (!priv || !priv->kms) {
+		DPU_ERROR("invalid KMS reference\n");
+		return -EINVAL;
+	}
+
+	dpu_kms = to_dpu_kms(priv->kms);
+
+	perf->debugfs_root = debugfs_create_dir("core_perf", parent);
+	if (!perf->debugfs_root) {
+		DPU_ERROR("failed to create core perf debugfs\n");
+		return -EINVAL;
+	}
+
+	debugfs_create_u64("max_core_clk_rate", 0600, perf->debugfs_root,
+			&perf->max_core_clk_rate);
+	debugfs_create_u64("core_clk_rate", 0600, perf->debugfs_root,
+			&perf->core_clk_rate);
+	debugfs_create_u32("enable_bw_release", 0600, perf->debugfs_root,
+			(u32 *)&perf->enable_bw_release);
+	debugfs_create_u32("threshold_low", 0600, perf->debugfs_root,
+			(u32 *)&catalog->perf.max_bw_low);
+	debugfs_create_u32("threshold_high", 0600, perf->debugfs_root,
+			(u32 *)&catalog->perf.max_bw_high);
+	debugfs_create_u32("min_core_ib", 0600, perf->debugfs_root,
+			(u32 *)&catalog->perf.min_core_ib);
+	debugfs_create_u32("min_llcc_ib", 0600, perf->debugfs_root,
+			(u32 *)&catalog->perf.min_llcc_ib);
+	debugfs_create_u32("min_dram_ib", 0600, perf->debugfs_root,
+			(u32 *)&catalog->perf.min_dram_ib);
+	debugfs_create_file("perf_mode", 0600, perf->debugfs_root,
+			(u32 *)perf, &dpu_core_perf_mode_fops);
+	debugfs_create_u64("fix_core_clk_rate", 0600, perf->debugfs_root,
+			&perf->fix_core_clk_rate);
+	debugfs_create_u64("fix_core_ib_vote", 0600, perf->debugfs_root,
+			&perf->fix_core_ib_vote);
+	debugfs_create_u64("fix_core_ab_vote", 0600, perf->debugfs_root,
+			&perf->fix_core_ab_vote);
+
+	return 0;
+}
+#else
+static void dpu_core_perf_debugfs_destroy(struct dpu_core_perf *perf)
+{
+}
+
+int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
+		struct dentry *parent)
+{
+	return 0;
+}
+#endif
+
+void dpu_core_perf_destroy(struct dpu_core_perf *perf)
+{
+	if (!perf) {
+		DPU_ERROR("invalid parameters\n");
+		return;
+	}
+
+	dpu_core_perf_debugfs_destroy(perf);
+	perf->max_core_clk_rate = 0;
+	perf->core_clk = NULL;
+	perf->phandle = NULL;
+	perf->catalog = NULL;
+	perf->dev = NULL;
+}
+
+int dpu_core_perf_init(struct dpu_core_perf *perf,
+		struct drm_device *dev,
+		struct dpu_mdss_cfg *catalog,
+		struct dpu_power_handle *phandle,
+		struct dss_clk *core_clk)
+{
+	perf->dev = dev;
+	perf->catalog = catalog;
+	perf->phandle = phandle;
+	perf->core_clk = core_clk;
+
+	perf->max_core_clk_rate = core_clk->max_rate;
+	if (!perf->max_core_clk_rate) {
+		DPU_DEBUG("optional max core clk rate, use default\n");
+		perf->max_core_clk_rate = DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
new file mode 100644
index 000000000000..fbcbe0c7527a
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
@@ -0,0 +1,133 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_CORE_PERF_H_
+#define _DPU_CORE_PERF_H_
+
+#include <linux/types.h>
+#include <linux/dcache.h>
+#include <linux/mutex.h>
+#include <drm/drm_crtc.h>
+
+#include "dpu_hw_catalog.h"
+#include "dpu_power_handle.h"
+
+#define	DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE	412500000
+
+/**
+ * struct dpu_core_perf_params - definition of performance parameters
+ * @max_per_pipe_ib: maximum instantaneous bandwidth request
+ * @bw_ctl: arbitrated bandwidth request
+ * @core_clk_rate: core clock rate request
+ */
+struct dpu_core_perf_params {
+	u64 max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_MAX];
+	u64 bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MAX];
+	u64 core_clk_rate;
+};
+
+/**
+ * struct dpu_core_perf_tune - definition of performance tuning control
+ * @mode: performance mode
+ * @min_core_clk: minimum core clock
+ * @min_bus_vote: minimum bus vote
+ */
+struct dpu_core_perf_tune {
+	u32 mode;
+	u64 min_core_clk;
+	u64 min_bus_vote;
+};
+
+/**
+ * struct dpu_core_perf - definition of core performance context
+ * @dev: Pointer to drm device
+ * @debugfs_root: top level debug folder
+ * @catalog: Pointer to catalog configuration
+ * @phandle: Pointer to power handler
+ * @core_clk: Pointer to core clock structure
+ * @core_clk_rate: current core clock rate
+ * @max_core_clk_rate: maximum allowable core clock rate
+ * @perf_tune: debug control for performance tuning
+ * @enable_bw_release: debug control for bandwidth release
+ * @fix_core_clk_rate: fixed core clock request in Hz used in mode 2
+ * @fix_core_ib_vote: fixed core ib vote in bps used in mode 2
+ * @fix_core_ab_vote: fixed core ab vote in bps used in mode 2
+ */
+struct dpu_core_perf {
+	struct drm_device *dev;
+	struct dentry *debugfs_root;
+	struct dpu_mdss_cfg *catalog;
+	struct dpu_power_handle *phandle;
+	struct dss_clk *core_clk;
+	u64 core_clk_rate;
+	u64 max_core_clk_rate;
+	struct dpu_core_perf_tune perf_tune;
+	u32 enable_bw_release;
+	u64 fix_core_clk_rate;
+	u64 fix_core_ib_vote;
+	u64 fix_core_ab_vote;
+};
+
+/**
+ * dpu_core_perf_crtc_check - validate performance of the given crtc state
+ * @crtc: Pointer to crtc
+ * @state: Pointer to new crtc state
+ * return: zero if success, or error code otherwise
+ */
+int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
+		struct drm_crtc_state *state);
+
+/**
+ * dpu_core_perf_crtc_update - update performance of the given crtc
+ * @crtc: Pointer to crtc
+ * @params_changed: true if crtc parameters are modified
+ * @stop_req: true if this is a stop request
+ * return: zero if success, or error code otherwise
+ */
+int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
+		int params_changed, bool stop_req);
+
+/**
+ * dpu_core_perf_crtc_release_bw - release bandwidth of the given crtc
+ * @crtc: Pointer to crtc
+ */
+void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc);
+
+/**
+ * dpu_core_perf_destroy - destroy the given core performance context
+ * @perf: Pointer to core performance context
+ */
+void dpu_core_perf_destroy(struct dpu_core_perf *perf);
+
+/**
+ * dpu_core_perf_init - initialize the given core performance context
+ * @perf: Pointer to core performance context
+ * @dev: Pointer to drm device
+ * @catalog: Pointer to catalog
+ * @phandle: Pointer to power handle
+ * @core_clk: pointer to core clock
+ */
+int dpu_core_perf_init(struct dpu_core_perf *perf,
+		struct drm_device *dev,
+		struct dpu_mdss_cfg *catalog,
+		struct dpu_power_handle *phandle,
+		struct dss_clk *core_clk);
+
+/**
+ * dpu_core_perf_debugfs_init - initialize debugfs for core performance context
+ * @perf: Pointer to core performance context
+ * @debugfs_parent: Pointer to parent debugfs
+ */
+int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
+		struct dentry *parent);
+
+#endif /* _DPU_CORE_PERF_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
new file mode 100644
index 000000000000..d17128222f45
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -0,0 +1,2523 @@
+/*
+ * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark at gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/sort.h>
+#include <linux/debugfs.h>
+#include <linux/ktime.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_flip_work.h>
+#include <drm/drm_rect.h>
+
+#include "dpu_kms.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_crtc.h"
+#include "dpu_plane.h"
+#include "dpu_encoder.h"
+#include "dpu_vbif.h"
+#include "dpu_power_handle.h"
+#include "dpu_core_perf.h"
+#include "dpu_trace.h"
+
+#define DPU_DRM_BLEND_OP_NOT_DEFINED    0
+#define DPU_DRM_BLEND_OP_OPAQUE         1
+#define DPU_DRM_BLEND_OP_PREMULTIPLIED  2
+#define DPU_DRM_BLEND_OP_COVERAGE       3
+#define DPU_DRM_BLEND_OP_MAX            4
+
+/* layer mixer index on dpu_crtc */
+#define LEFT_MIXER 0
+#define RIGHT_MIXER 1
+
+#define MISR_BUFF_SIZE			256
+
+static inline struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
+{
+	struct msm_drm_private *priv;
+
+	if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+		DPU_ERROR("invalid crtc\n");
+		return NULL;
+	}
+	priv = crtc->dev->dev_private;
+	if (!priv || !priv->kms) {
+		DPU_ERROR("invalid kms\n");
+		return NULL;
+	}
+
+	return to_dpu_kms(priv->kms);
+}
+
+static inline int _dpu_crtc_power_enable(struct dpu_crtc *dpu_crtc, bool enable)
+{
+	struct drm_crtc *crtc;
+	struct msm_drm_private *priv;
+	struct dpu_kms *dpu_kms;
+
+	if (!dpu_crtc) {
+		DPU_ERROR("invalid dpu crtc\n");
+		return -EINVAL;
+	}
+
+	crtc = &dpu_crtc->base;
+	if (!crtc->dev || !crtc->dev->dev_private) {
+		DPU_ERROR("invalid drm device\n");
+		return -EINVAL;
+	}
+
+	priv = crtc->dev->dev_private;
+	if (!priv->kms) {
+		DPU_ERROR("invalid kms\n");
+		return -EINVAL;
+	}
+
+	dpu_kms = to_dpu_kms(priv->kms);
+
+	if (enable)
+		pm_runtime_get_sync(&dpu_kms->pdev->dev);
+	else
+		pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+	return 0;
+}
+
+/**
+ * _dpu_crtc_rp_to_crtc - get crtc from resource pool object
+ * @rp: Pointer to resource pool
+ * return: Pointer to drm crtc if success; null otherwise
+ */
+static struct drm_crtc *_dpu_crtc_rp_to_crtc(struct dpu_crtc_respool *rp)
+{
+	if (!rp)
+		return NULL;
+
+	return container_of(rp, struct dpu_crtc_state, rp)->base.crtc;
+}
+
+/**
+ * _dpu_crtc_rp_reclaim - reclaim unused, or all if forced, resources in pool
+ * @rp: Pointer to resource pool
+ * @force: True to reclaim all resources; otherwise, reclaim only unused ones
+ * return: None
+ */
+static void _dpu_crtc_rp_reclaim(struct dpu_crtc_respool *rp, bool force)
+{
+	struct dpu_crtc_res *res, *next;
+	struct drm_crtc *crtc;
+
+	crtc = _dpu_crtc_rp_to_crtc(rp);
+	if (!crtc) {
+		DPU_ERROR("invalid crtc\n");
+		return;
+	}
+
+	DPU_DEBUG("crtc%d.%u %s\n", crtc->base.id, rp->sequence_id,
+			force ? "destroy" : "free_unused");
+
+	list_for_each_entry_safe(res, next, &rp->res_list, list) {
+		if (!force && !(res->flags & DPU_CRTC_RES_FLAG_FREE))
+			continue;
+		DPU_DEBUG("crtc%d.%u reclaim res:0x%x/0x%llx/%pK/%d\n",
+				crtc->base.id, rp->sequence_id,
+				res->type, res->tag, res->val,
+				atomic_read(&res->refcount));
+		list_del(&res->list);
+		if (res->ops.put)
+			res->ops.put(res->val);
+		kfree(res);
+	}
+}
+
+/**
+ * _dpu_crtc_rp_free_unused - free unused resource in pool
+ * @rp: Pointer to resource pool
+ * return: none
+ */
+static void _dpu_crtc_rp_free_unused(struct dpu_crtc_respool *rp)
+{
+	mutex_lock(rp->rp_lock);
+	_dpu_crtc_rp_reclaim(rp, false);
+	mutex_unlock(rp->rp_lock);
+}
+
+/**
+ * _dpu_crtc_rp_destroy - destroy resource pool
+ * @rp: Pointer to resource pool
+ * return: None
+ */
+static void _dpu_crtc_rp_destroy(struct dpu_crtc_respool *rp)
+{
+	mutex_lock(rp->rp_lock);
+	list_del_init(&rp->rp_list);
+	_dpu_crtc_rp_reclaim(rp, true);
+	mutex_unlock(rp->rp_lock);
+}
+
+/**
+ * _dpu_crtc_hw_blk_get - get callback for hardware block
+ * @val: Resource handle
+ * @type: Resource type
+ * @tag: Search tag for given resource
+ * return: Resource handle
+ */
+static void *_dpu_crtc_hw_blk_get(void *val, u32 type, u64 tag)
+{
+	DPU_DEBUG("res:%d/0x%llx/%pK\n", type, tag, val);
+	return dpu_hw_blk_get(val, type, tag);
+}
+
+/**
+ * _dpu_crtc_hw_blk_put - put callback for hardware block
+ * @val: Resource handle
+ * return: None
+ */
+static void _dpu_crtc_hw_blk_put(void *val)
+{
+	DPU_DEBUG("res://%pK\n", val);
+	dpu_hw_blk_put(val);
+}
+
+/**
+ * _dpu_crtc_rp_duplicate - duplicate resource pool and reset reference count
+ * @rp: Pointer to original resource pool
+ * @dup_rp: Pointer to duplicated resource pool
+ * return: None
+ */
+static void _dpu_crtc_rp_duplicate(struct dpu_crtc_respool *rp,
+		struct dpu_crtc_respool *dup_rp)
+{
+	struct dpu_crtc_res *res, *dup_res;
+	struct drm_crtc *crtc;
+
+	if (!rp || !dup_rp || !rp->rp_head) {
+		DPU_ERROR("invalid resource pool\n");
+		return;
+	}
+
+	crtc = _dpu_crtc_rp_to_crtc(rp);
+	if (!crtc) {
+		DPU_ERROR("invalid crtc\n");
+		return;
+	}
+
+	DPU_DEBUG("crtc%d.%u duplicate\n", crtc->base.id, rp->sequence_id);
+
+	mutex_lock(rp->rp_lock);
+	dup_rp->sequence_id = rp->sequence_id + 1;
+	INIT_LIST_HEAD(&dup_rp->res_list);
+	dup_rp->ops = rp->ops;
+	list_for_each_entry(res, &rp->res_list, list) {
+		dup_res = kzalloc(sizeof(struct dpu_crtc_res), GFP_KERNEL);
+		if (!dup_res) {
+			mutex_unlock(rp->rp_lock);
+			return;
+		}
+		INIT_LIST_HEAD(&dup_res->list);
+		atomic_set(&dup_res->refcount, 0);
+		dup_res->type = res->type;
+		dup_res->tag = res->tag;
+		dup_res->val = res->val;
+		dup_res->ops = res->ops;
+		dup_res->flags = DPU_CRTC_RES_FLAG_FREE;
+		DPU_DEBUG("crtc%d.%u dup res:0x%x/0x%llx/%pK/%d\n",
+				crtc->base.id, dup_rp->sequence_id,
+				dup_res->type, dup_res->tag, dup_res->val,
+				atomic_read(&dup_res->refcount));
+		list_add_tail(&dup_res->list, &dup_rp->res_list);
+		if (dup_res->ops.get)
+			dup_res->ops.get(dup_res->val, 0, -1);
+	}
+
+	dup_rp->rp_lock = rp->rp_lock;
+	dup_rp->rp_head = rp->rp_head;
+	INIT_LIST_HEAD(&dup_rp->rp_list);
+	list_add_tail(&dup_rp->rp_list, rp->rp_head);
+	mutex_unlock(rp->rp_lock);
+}
+
+/**
+ * _dpu_crtc_rp_reset - reset resource pool after allocation
+ * @rp: Pointer to original resource pool
+ * @rp_lock: Pointer to serialization resource pool lock
+ * @rp_head: Pointer to crtc resource pool head
+ * return: None
+ */
+static void _dpu_crtc_rp_reset(struct dpu_crtc_respool *rp,
+		struct mutex *rp_lock, struct list_head *rp_head)
+{
+	if (!rp || !rp_lock || !rp_head) {
+		DPU_ERROR("invalid resource pool\n");
+		return;
+	}
+
+	mutex_lock(rp_lock);
+	rp->rp_lock = rp_lock;
+	rp->rp_head = rp_head;
+	INIT_LIST_HEAD(&rp->rp_list);
+	rp->sequence_id = 0;
+	INIT_LIST_HEAD(&rp->res_list);
+	rp->ops.get = _dpu_crtc_hw_blk_get;
+	rp->ops.put = _dpu_crtc_hw_blk_put;
+	list_add_tail(&rp->rp_list, rp->rp_head);
+	mutex_unlock(rp_lock);
+}
+
+/**
+ * _dpu_crtc_rp_add_no_lock - add given resource to resource pool without lock
+ * @rp: Pointer to original resource pool
+ * @type: Resource type
+ * @tag: Search tag for given resource
+ * @val: Resource handle
+ * @ops: Resource callback operations
+ * return: 0 if success; error code otherwise
+ */
+static int _dpu_crtc_rp_add_no_lock(struct dpu_crtc_respool *rp, u32 type,
+		u64 tag, void *val, struct dpu_crtc_res_ops *ops)
+{
+	struct dpu_crtc_res *res;
+	struct drm_crtc *crtc;
+
+	if (!rp || !ops) {
+		DPU_ERROR("invalid resource pool/ops\n");
+		return -EINVAL;
+	}
+
+	crtc = _dpu_crtc_rp_to_crtc(rp);
+	if (!crtc) {
+		DPU_ERROR("invalid crtc\n");
+		return -EINVAL;
+	}
+
+	list_for_each_entry(res, &rp->res_list, list) {
+		if (res->type != type || res->tag != tag)
+			continue;
+		DPU_ERROR("crtc%d.%u already exist res:0x%x/0x%llx/%pK/%d\n",
+				crtc->base.id, rp->sequence_id,
+				res->type, res->tag, res->val,
+				atomic_read(&res->refcount));
+		return -EEXIST;
+	}
+	res = kzalloc(sizeof(struct dpu_crtc_res), GFP_KERNEL);
+	if (!res)
+		return -ENOMEM;
+	INIT_LIST_HEAD(&res->list);
+	atomic_set(&res->refcount, 1);
+	res->type = type;
+	res->tag = tag;
+	res->val = val;
+	res->ops = *ops;
+	list_add_tail(&res->list, &rp->res_list);
+	DPU_DEBUG("crtc%d.%u added res:0x%x/0x%llx\n",
+			crtc->base.id, rp->sequence_id, type, tag);
+	return 0;
+}
+
+/**
+ * _dpu_crtc_rp_add - add given resource to resource pool
+ * @rp: Pointer to original resource pool
+ * @type: Resource type
+ * @tag: Search tag for given resource
+ * @val: Resource handle
+ * @ops: Resource callback operations
+ * return: 0 if success; error code otherwise
+ */
+static int _dpu_crtc_rp_add(struct dpu_crtc_respool *rp, u32 type, u64 tag,
+		void *val, struct dpu_crtc_res_ops *ops)
+{
+	int rc;
+
+	if (!rp) {
+		DPU_ERROR("invalid resource pool\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(rp->rp_lock);
+	rc = _dpu_crtc_rp_add_no_lock(rp, type, tag, val, ops);
+	mutex_unlock(rp->rp_lock);
+	return rc;
+}
+
+/**
+ * _dpu_crtc_rp_get - lookup the resource from given resource pool and obtain
+ *	if available; otherwise, obtain resource from global pool
+ * @rp: Pointer to original resource pool
+ * @type: Resource type
+ * @tag:  Search tag for given resource
+ * return: Resource handle if success; pointer error or null otherwise
+ */
+static void *_dpu_crtc_rp_get(struct dpu_crtc_respool *rp, u32 type, u64 tag)
+{
+	struct dpu_crtc_respool *old_rp;
+	struct dpu_crtc_res *res;
+	void *val = NULL;
+	int rc;
+	struct drm_crtc *crtc;
+
+	if (!rp) {
+		DPU_ERROR("invalid resource pool\n");
+		return NULL;
+	}
+
+	crtc = _dpu_crtc_rp_to_crtc(rp);
+	if (!crtc) {
+		DPU_ERROR("invalid crtc\n");
+		return NULL;
+	}
+
+	mutex_lock(rp->rp_lock);
+	list_for_each_entry(res, &rp->res_list, list) {
+		if (res->type != type || res->tag != tag)
+			continue;
+		DPU_DEBUG("crtc%d.%u found res:0x%x/0x%llx/%pK/%d\n",
+				crtc->base.id, rp->sequence_id,
+				res->type, res->tag, res->val,
+				atomic_read(&res->refcount));
+		atomic_inc(&res->refcount);
+		res->flags &= ~DPU_CRTC_RES_FLAG_FREE;
+		mutex_unlock(rp->rp_lock);
+		return res->val;
+	}
+	list_for_each_entry(res, &rp->res_list, list) {
+		if (res->type != type || !(res->flags & DPU_CRTC_RES_FLAG_FREE))
+			continue;
+		DPU_DEBUG("crtc%d.%u retag res:0x%x/0x%llx/%pK/%d\n",
+				crtc->base.id, rp->sequence_id,
+				res->type, res->tag, res->val,
+				atomic_read(&res->refcount));
+		atomic_inc(&res->refcount);
+		res->tag = tag;
+		res->flags &= ~DPU_CRTC_RES_FLAG_FREE;
+		mutex_unlock(rp->rp_lock);
+		return res->val;
+	}
+	/* not in this rp, try to grab from global pool */
+	if (rp->ops.get)
+		val = rp->ops.get(NULL, type, -1);
+	if (!IS_ERR_OR_NULL(val))
+		goto add_res;
+	/*
+	 * Search older resource pools for hw blk with matching type,
+	 * necessary when resource is being used by this object,
+	 * but in previous states not yet cleaned up.
+	 *
+	 * This enables searching of all resources currently owned
+	 * by this crtc even though the resource might not be used
+	 * in the current atomic state. This allows those resources
+	 * to be re-acquired by the new atomic state immediately
+	 * without waiting for the resources to be fully released.
+	 */
+	else if (IS_ERR_OR_NULL(val) && (type < DPU_HW_BLK_MAX)) {
+		list_for_each_entry(old_rp, rp->rp_head, rp_list) {
+			if (old_rp == rp)
+				continue;
+
+			list_for_each_entry(res, &old_rp->res_list, list) {
+				if (res->type != type)
+					continue;
+				DRM_DEBUG_KMS("crtc%d.%u found res:0x%x//%pK/ "
+					      "in crtc%d.%d\n",
+					      crtc->base.id, rp->sequence_id,
+					      res->type, res->val,
+					      crtc->base.id,
+					      old_rp->sequence_id);
+				if (res->ops.get)
+					res->ops.get(res->val, 0, -1);
+				val = res->val;
+				break;
+			}
+
+			if (!IS_ERR_OR_NULL(val))
+				break;
+		}
+	}
+	if (IS_ERR_OR_NULL(val)) {
+		DPU_DEBUG("crtc%d.%u failed to get res:0x%x//\n",
+				crtc->base.id, rp->sequence_id, type);
+		mutex_unlock(rp->rp_lock);
+		return NULL;
+	}
+add_res:
+	rc = _dpu_crtc_rp_add_no_lock(rp, type, tag, val, &rp->ops);
+	if (rc) {
+		DPU_ERROR("crtc%d.%u failed to add res:0x%x/0x%llx\n",
+				crtc->base.id, rp->sequence_id, type, tag);
+		if (rp->ops.put)
+			rp->ops.put(val);
+		val = NULL;
+	}
+	mutex_unlock(rp->rp_lock);
+	return val;
+}
+
+/**
+ * _dpu_crtc_rp_put - return given resource to resource pool
+ * @rp: Pointer to original resource pool
+ * @type: Resource type
+ * @tag: Search tag for given resource
+ * return: None
+ */
+static void _dpu_crtc_rp_put(struct dpu_crtc_respool *rp, u32 type, u64 tag)
+{
+	struct dpu_crtc_res *res, *next;
+	struct drm_crtc *crtc;
+
+	if (!rp) {
+		DPU_ERROR("invalid resource pool\n");
+		return;
+	}
+
+	crtc = _dpu_crtc_rp_to_crtc(rp);
+	if (!crtc) {
+		DPU_ERROR("invalid crtc\n");
+		return;
+	}
+
+	mutex_lock(rp->rp_lock);
+	list_for_each_entry_safe(res, next, &rp->res_list, list) {
+		if (res->type != type || res->tag != tag)
+			continue;
+		DPU_DEBUG("crtc%d.%u found res:0x%x/0x%llx/%pK/%d\n",
+				crtc->base.id, rp->sequence_id,
+				res->type, res->tag, res->val,
+				atomic_read(&res->refcount));
+		if (res->flags & DPU_CRTC_RES_FLAG_FREE)
+			DPU_ERROR(
+				"crtc%d.%u already free res:0x%x/0x%llx/%pK/%d\n",
+					crtc->base.id, rp->sequence_id,
+					res->type, res->tag, res->val,
+					atomic_read(&res->refcount));
+		else if (atomic_dec_return(&res->refcount) == 0)
+			res->flags |= DPU_CRTC_RES_FLAG_FREE;
+
+		mutex_unlock(rp->rp_lock);
+		return;
+	}
+	DPU_ERROR("crtc%d.%u not found res:0x%x/0x%llx\n",
+			crtc->base.id, rp->sequence_id, type, tag);
+	mutex_unlock(rp->rp_lock);
+}
+
+int dpu_crtc_res_add(struct drm_crtc_state *state, u32 type, u64 tag,
+		void *val, struct dpu_crtc_res_ops *ops)
+{
+	struct dpu_crtc_respool *rp;
+
+	if (!state) {
+		DPU_ERROR("invalid parameters\n");
+		return -EINVAL;
+	}
+
+	rp = &to_dpu_crtc_state(state)->rp;
+	return _dpu_crtc_rp_add(rp, type, tag, val, ops);
+}
+
+void *dpu_crtc_res_get(struct drm_crtc_state *state, u32 type, u64 tag)
+{
+	struct dpu_crtc_respool *rp;
+	void *val;
+
+	if (!state) {
+		DPU_ERROR("invalid parameters\n");
+		return NULL;
+	}
+
+	rp = &to_dpu_crtc_state(state)->rp;
+	val = _dpu_crtc_rp_get(rp, type, tag);
+	if (IS_ERR(val)) {
+		DPU_ERROR("failed to get res type:0x%x:0x%llx\n",
+				type, tag);
+		return NULL;
+	}
+
+	return val;
+}
+
+void dpu_crtc_res_put(struct drm_crtc_state *state, u32 type, u64 tag)
+{
+	struct dpu_crtc_respool *rp;
+
+	if (!state) {
+		DPU_ERROR("invalid parameters\n");
+		return;
+	}
+
+	rp = &to_dpu_crtc_state(state)->rp;
+	_dpu_crtc_rp_put(rp, type, tag);
+}
+
+static void dpu_crtc_destroy(struct drm_crtc *crtc)
+{
+	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+
+	DPU_DEBUG("\n");
+
+	if (!crtc)
+		return;
+
+	dpu_crtc->phandle = NULL;
+
+	drm_crtc_cleanup(crtc);
+	mutex_destroy(&dpu_crtc->crtc_lock);
+	kfree(dpu_crtc);
+}
+
+static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
+		struct dpu_plane_state *pstate)
+{
+	struct dpu_hw_mixer *lm = mixer->hw_lm;
+
+	/* default to opaque blending */
+	lm->ops.setup_blend_config(lm, pstate->stage, 0XFF, 0,
+				DPU_BLEND_FG_ALPHA_FG_CONST |
+				DPU_BLEND_BG_ALPHA_BG_CONST);
+}
+
+static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
+{
+	struct dpu_crtc *dpu_crtc;
+	struct dpu_crtc_state *crtc_state;
+	int lm_idx, lm_horiz_position;
+
+	dpu_crtc = to_dpu_crtc(crtc);
+	crtc_state = to_dpu_crtc_state(crtc->state);
+
+	lm_horiz_position = 0;
+	for (lm_idx = 0; lm_idx < dpu_crtc->num_mixers; lm_idx++) {
+		const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx];
+		struct dpu_hw_mixer *hw_lm = dpu_crtc->mixers[lm_idx].hw_lm;
+		struct dpu_hw_mixer_cfg cfg;
+
+		if (!lm_roi || !drm_rect_visible(lm_roi))
+			continue;
+
+		cfg.out_width = drm_rect_width(lm_roi);
+		cfg.out_height = drm_rect_height(lm_roi);
+		cfg.right_mixer = lm_horiz_position++;
+		cfg.flags = 0;
+		hw_lm->ops.setup_mixer_out(hw_lm, &cfg);
+	}
+}
+
+static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
+	struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer)
+{
+	struct drm_plane *plane;
+	struct drm_framebuffer *fb;
+	struct drm_plane_state *state;
+	struct dpu_crtc_state *cstate;
+	struct dpu_plane_state *pstate = NULL;
+	struct dpu_format *format;
+	struct dpu_hw_ctl *ctl;
+	struct dpu_hw_mixer *lm;
+	struct dpu_hw_stage_cfg *stage_cfg;
+
+	u32 flush_mask;
+	uint32_t stage_idx, lm_idx;
+	int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 };
+	bool bg_alpha_enable = false;
+
+	if (!dpu_crtc || !mixer) {
+		DPU_ERROR("invalid dpu_crtc or mixer\n");
+		return;
+	}
+
+	ctl = mixer->hw_ctl;
+	lm = mixer->hw_lm;
+	stage_cfg = &dpu_crtc->stage_cfg;
+	cstate = to_dpu_crtc_state(crtc->state);
+
+	drm_atomic_crtc_for_each_plane(plane, crtc) {
+		state = plane->state;
+		if (!state)
+			continue;
+
+		pstate = to_dpu_plane_state(state);
+		fb = state->fb;
+
+		dpu_plane_get_ctl_flush(plane, ctl, &flush_mask);
+
+		DPU_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n",
+				crtc->base.id,
+				pstate->stage,
+				plane->base.id,
+				dpu_plane_pipe(plane) - SSPP_VIG0,
+				state->fb ? state->fb->base.id : -1);
+
+		format = to_dpu_format(msm_framebuffer_format(pstate->base.fb));
+		if (!format) {
+			DPU_ERROR("invalid format\n");
+			return;
+		}
+
+		if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable)
+			bg_alpha_enable = true;
+
+		stage_idx = zpos_cnt[pstate->stage]++;
+		stage_cfg->stage[pstate->stage][stage_idx] =
+					dpu_plane_pipe(plane);
+		stage_cfg->multirect_index[pstate->stage][stage_idx] =
+					pstate->multirect_index;
+
+		trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane),
+					   state, pstate, stage_idx,
+					   dpu_plane_pipe(plane) - SSPP_VIG0,
+					   format->base.pixel_format,
+					   fb ? fb->modifier : 0);
+
+		/* blend config update */
+		for (lm_idx = 0; lm_idx < dpu_crtc->num_mixers; lm_idx++) {
+			_dpu_crtc_setup_blend_cfg(mixer + lm_idx, pstate);
+
+			mixer[lm_idx].flush_mask |= flush_mask;
+
+			if (bg_alpha_enable && !format->alpha_enable)
+				mixer[lm_idx].mixer_op_mode = 0;
+			else
+				mixer[lm_idx].mixer_op_mode |=
+						1 << pstate->stage;
+		}
+	}
+
+	 _dpu_crtc_program_lm_output_roi(crtc);
+}
+
+/**
+ * _dpu_crtc_blend_setup - configure crtc mixers
+ * @crtc: Pointer to drm crtc structure
+ */
+static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
+{
+	struct dpu_crtc *dpu_crtc;
+	struct dpu_crtc_state *dpu_crtc_state;
+	struct dpu_crtc_mixer *mixer;
+	struct dpu_hw_ctl *ctl;
+	struct dpu_hw_mixer *lm;
+
+	int i;
+
+	if (!crtc)
+		return;
+
+	dpu_crtc = to_dpu_crtc(crtc);
+	dpu_crtc_state = to_dpu_crtc_state(crtc->state);
+	mixer = dpu_crtc->mixers;
+
+	DPU_DEBUG("%s\n", dpu_crtc->name);
+
+	if (dpu_crtc->num_mixers > CRTC_DUAL_MIXERS) {
+		DPU_ERROR("invalid number mixers: %d\n", dpu_crtc->num_mixers);
+		return;
+	}
+
+	for (i = 0; i < dpu_crtc->num_mixers; i++) {
+		if (!mixer[i].hw_lm || !mixer[i].hw_ctl) {
+			DPU_ERROR("invalid lm or ctl assigned to mixer\n");
+			return;
+		}
+		mixer[i].mixer_op_mode = 0;
+		mixer[i].flush_mask = 0;
+		if (mixer[i].hw_ctl->ops.clear_all_blendstages)
+			mixer[i].hw_ctl->ops.clear_all_blendstages(
+					mixer[i].hw_ctl);
+	}
+
+	/* initialize stage cfg */
+	memset(&dpu_crtc->stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg));
+
+	_dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer);
+
+	for (i = 0; i < dpu_crtc->num_mixers; i++) {
+		ctl = mixer[i].hw_ctl;
+		lm = mixer[i].hw_lm;
+
+		lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
+
+		mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl,
+			mixer[i].hw_lm->idx);
+
+		/* stage config flush mask */
+		ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
+
+		DPU_DEBUG("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
+			mixer[i].hw_lm->idx - LM_0,
+			mixer[i].mixer_op_mode,
+			ctl->idx - CTL_0,
+			mixer[i].flush_mask);
+
+		ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
+			&dpu_crtc->stage_cfg);
+	}
+}
+
+/**
+ *  _dpu_crtc_complete_flip - signal pending page_flip events
+ * Any pending vblank events are added to the vblank_event_list
+ * so that the next vblank interrupt shall signal them.
+ * However PAGE_FLIP events are not handled through the vblank_event_list.
+ * This API signals any pending PAGE_FLIP events requested through
+ * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event.
+ * if file!=NULL, this is preclose potential cancel-flip path
+ * @crtc: Pointer to drm crtc structure
+ * @file: Pointer to drm file
+ */
+static void _dpu_crtc_complete_flip(struct drm_crtc *crtc,
+		struct drm_file *file)
+{
+	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct drm_pending_vblank_event *event;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+	event = dpu_crtc->event;
+	if (event) {
+		/* if regular vblank case (!file) or if cancel-flip from
+		 * preclose on file that requested flip, then send the
+		 * event:
+		 */
+		if (!file || (event->base.file_priv == file)) {
+			dpu_crtc->event = NULL;
+			DRM_DEBUG_VBL("%s: send event: %pK\n",
+						dpu_crtc->name, event);
+			trace_dpu_crtc_complete_flip(DRMID(crtc));
+			drm_crtc_send_vblank_event(crtc, event);
+		}
+	}
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
+{
+	struct drm_encoder *encoder;
+
+	if (!crtc || !crtc->dev) {
+		DPU_ERROR("invalid crtc\n");
+		return INTF_MODE_NONE;
+	}
+
+	drm_for_each_encoder(encoder, crtc->dev)
+		if (encoder->crtc == crtc)
+			return dpu_encoder_get_intf_mode(encoder);
+
+	return INTF_MODE_NONE;
+}
+
+static void dpu_crtc_vblank_cb(void *data)
+{
+	struct drm_crtc *crtc = (struct drm_crtc *)data;
+	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+
+	/* keep statistics on vblank callback - with auto reset via debugfs */
+	if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0)
+		dpu_crtc->vblank_cb_time = ktime_get();
+	else
+		dpu_crtc->vblank_cb_count++;
+	_dpu_crtc_complete_flip(crtc, NULL);
+	drm_crtc_handle_vblank(crtc);
+	trace_dpu_crtc_vblank_cb(DRMID(crtc));
+}
+
+static void dpu_crtc_frame_event_work(struct kthread_work *work)
+{
+	struct msm_drm_private *priv;
+	struct dpu_crtc_frame_event *fevent;
+	struct drm_crtc *crtc;
+	struct dpu_crtc *dpu_crtc;
+	struct dpu_kms *dpu_kms;
+	unsigned long flags;
+	bool frame_done = false;
+
+	if (!work) {
+		DPU_ERROR("invalid work handle\n");
+		return;
+	}
+
+	fevent = container_of(work, struct dpu_crtc_frame_event, work);
+	if (!fevent->crtc || !fevent->crtc->state) {
+		DPU_ERROR("invalid crtc\n");
+		return;
+	}
+
+	crtc = fevent->crtc;
+	dpu_crtc = to_dpu_crtc(crtc);
+
+	dpu_kms = _dpu_crtc_get_kms(crtc);
+	if (!dpu_kms) {
+		DPU_ERROR("invalid kms handle\n");
+		return;
+	}
+	priv = dpu_kms->dev->dev_private;
+	DPU_ATRACE_BEGIN("crtc_frame_event");
+
+	DRM_DEBUG_KMS("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
+			ktime_to_ns(fevent->ts));
+
+	if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
+				| DPU_ENCODER_FRAME_EVENT_ERROR
+				| DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
+
+		if (atomic_read(&dpu_crtc->frame_pending) < 1) {
+			/* this should not happen */
+			DRM_ERROR("crtc%d ev:%u ts:%lld frame_pending:%d\n",
+					crtc->base.id,
+					fevent->event,
+					ktime_to_ns(fevent->ts),
+					atomic_read(&dpu_crtc->frame_pending));
+		} else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) {
+			/* release bandwidth and other resources */
+			trace_dpu_crtc_frame_event_done(DRMID(crtc),
+							fevent->event);
+			dpu_core_perf_crtc_release_bw(crtc);
+		} else {
+			trace_dpu_crtc_frame_event_more_pending(DRMID(crtc),
+								fevent->event);
+		}
+
+		if (fevent->event & DPU_ENCODER_FRAME_EVENT_DONE)
+			dpu_core_perf_crtc_update(crtc, 0, false);
+
+		if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
+					| DPU_ENCODER_FRAME_EVENT_ERROR))
+			frame_done = true;
+	}
+
+	if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)
+		DPU_ERROR("crtc%d ts:%lld received panel dead event\n",
+				crtc->base.id, ktime_to_ns(fevent->ts));
+
+	if (frame_done)
+		complete_all(&dpu_crtc->frame_done_comp);
+
+	spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
+	list_add_tail(&fevent->list, &dpu_crtc->frame_event_list);
+	spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
+	DPU_ATRACE_END("crtc_frame_event");
+}
+
+/*
+ * dpu_crtc_frame_event_cb - crtc frame event callback API. CRTC module
+ * registers this API to encoder for all frame event callbacks like
+ * frame_error, frame_done, idle_timeout, etc. Encoder may call different events
+ * from different context - IRQ, user thread, commit_thread, etc. Each event
+ * should be carefully reviewed and should be processed in proper task context
+ * to avoid schedulin delay or properly manage the irq context's bottom half
+ * processing.
+ */
+static void dpu_crtc_frame_event_cb(void *data, u32 event)
+{
+	struct drm_crtc *crtc = (struct drm_crtc *)data;
+	struct dpu_crtc *dpu_crtc;
+	struct msm_drm_private *priv;
+	struct dpu_crtc_frame_event *fevent;
+	unsigned long flags;
+	u32 crtc_id;
+
+	if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+		DPU_ERROR("invalid parameters\n");
+		return;
+	}
+
+	/* Nothing to do on idle event */
+	if (event & DPU_ENCODER_FRAME_EVENT_IDLE)
+		return;
+
+	dpu_crtc = to_dpu_crtc(crtc);
+	priv = crtc->dev->dev_private;
+	crtc_id = drm_crtc_index(crtc);
+
+	trace_dpu_crtc_frame_event_cb(DRMID(crtc), event);
+
+	spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
+	fevent = list_first_entry_or_null(&dpu_crtc->frame_event_list,
+			struct dpu_crtc_frame_event, list);
+	if (fevent)
+		list_del_init(&fevent->list);
+	spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
+
+	if (!fevent) {
+		DRM_ERROR("crtc%d event %d overflow\n", crtc->base.id, event);
+		return;
+	}
+
+	fevent->event = event;
+	fevent->crtc = crtc;
+	fevent->ts = ktime_get();
+	kthread_queue_work(&priv->event_thread[crtc_id].worker, &fevent->work);
+}
+
+void dpu_crtc_complete_commit(struct drm_crtc *crtc,
+		struct drm_crtc_state *old_state)
+{
+	if (!crtc || !crtc->state) {
+		DPU_ERROR("invalid crtc\n");
+		return;
+	}
+	trace_dpu_crtc_complete_commit(DRMID(crtc));
+}
+
+static void _dpu_crtc_setup_mixer_for_encoder(
+		struct drm_crtc *crtc,
+		struct drm_encoder *enc)
+{
+	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+	struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
+	struct dpu_rm *rm = &dpu_kms->rm;
+	struct dpu_crtc_mixer *mixer;
+	struct dpu_hw_ctl *last_valid_ctl = NULL;
+	int i;
+	struct dpu_rm_hw_iter lm_iter, ctl_iter;
+
+	dpu_rm_init_hw_iter(&lm_iter, enc->base.id, DPU_HW_BLK_LM);
+	dpu_rm_init_hw_iter(&ctl_iter, enc->base.id, DPU_HW_BLK_CTL);
+
+	/* Set up all the mixers and ctls reserved by this encoder */
+	for (i = dpu_crtc->num_mixers; i < ARRAY_SIZE(dpu_crtc->mixers); i++) {
+		mixer = &dpu_crtc->mixers[i];
+
+		if (!dpu_rm_get_hw(rm, &lm_iter))
+			break;
+		mixer->hw_lm = (struct dpu_hw_mixer *)lm_iter.hw;
+
+		/* CTL may be <= LMs, if <, multiple LMs controlled by 1 CTL */
+		if (!dpu_rm_get_hw(rm, &ctl_iter)) {
+			DPU_DEBUG("no ctl assigned to lm %d, using previous\n",
+					mixer->hw_lm->idx - LM_0);
+			mixer->hw_ctl = last_valid_ctl;
+		} else {
+			mixer->hw_ctl = (struct dpu_hw_ctl *)ctl_iter.hw;
+			last_valid_ctl = mixer->hw_ctl;
+		}
+
+		/* Shouldn't happen, mixers are always >= ctls */
+		if (!mixer->hw_ctl) {
+			DPU_ERROR("no valid ctls found for lm %d\n",
+					mixer->hw_lm->idx - LM_0);
+			return;
+		}
+
+		mixer->encoder = enc;
+
+		dpu_crtc->num_mixers++;
+		DPU_DEBUG("setup mixer %d: lm %d\n",
+				i, mixer->hw_lm->idx - LM_0);
+		DPU_DEBUG("setup mixer %d: ctl %d\n",
+				i, mixer->hw_ctl->idx - CTL_0);
+	}
+}
+
+static void _dpu_crtc_setup_mixers(struct drm_crtc *crtc)
+{
+	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+	struct drm_encoder *enc;
+
+	dpu_crtc->num_mixers = 0;
+	dpu_crtc->mixers_swapped = false;
+	memset(dpu_crtc->mixers, 0, sizeof(dpu_crtc->mixers));
+
+	mutex_lock(&dpu_crtc->crtc_lock);
+	/* Check for mixers on all encoders attached to this crtc */
+	list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
+		if (enc->crtc != crtc)
+			continue;
+
+		_dpu_crtc_setup_mixer_for_encoder(crtc, enc);
+	}
+
+	mutex_unlock(&dpu_crtc->crtc_lock);
+}
+
+static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
+		struct drm_crtc_state *state)
+{
+	struct dpu_crtc *dpu_crtc;
+	struct dpu_crtc_state *cstate;
+	struct drm_display_mode *adj_mode;
+	u32 crtc_split_width;
+	int i;
+
+	if (!crtc || !state) {
+		DPU_ERROR("invalid args\n");
+		return;
+	}
+
+	dpu_crtc = to_dpu_crtc(crtc);
+	cstate = to_dpu_crtc_state(state);
+
+	adj_mode = &state->adjusted_mode;
+	crtc_split_width = dpu_crtc_get_mixer_width(dpu_crtc, cstate, adj_mode);
+
+	for (i = 0; i < dpu_crtc->num_mixers; i++) {
+		struct drm_rect *r = &cstate->lm_bounds[i];
+		r->x1 = crtc_split_width * i;
+		r->y1 = 0;
+		r->x2 = r->x1 + crtc_split_width;
+		r->y2 = dpu_crtc_get_mixer_height(dpu_crtc, cstate, adj_mode);
+
+		trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
+	}
+
+	drm_mode_debug_printmodeline(adj_mode);
+}
+
+static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
+		struct drm_crtc_state *old_state)
+{
+	struct dpu_crtc *dpu_crtc;
+	struct drm_encoder *encoder;
+	struct drm_device *dev;
+	unsigned long flags;
+	struct dpu_crtc_smmu_state_data *smmu_state;
+
+	if (!crtc) {
+		DPU_ERROR("invalid crtc\n");
+		return;
+	}
+
+	if (!crtc->state->enable) {
+		DPU_DEBUG("crtc%d -> enable %d, skip atomic_begin\n",
+				crtc->base.id, crtc->state->enable);
+		return;
+	}
+
+	DPU_DEBUG("crtc%d\n", crtc->base.id);
+
+	dpu_crtc = to_dpu_crtc(crtc);
+	dev = crtc->dev;
+	smmu_state = &dpu_crtc->smmu_state;
+
+	if (!dpu_crtc->num_mixers) {
+		_dpu_crtc_setup_mixers(crtc);
+		_dpu_crtc_setup_lm_bounds(crtc, crtc->state);
+	}
+
+	if (dpu_crtc->event) {
+		WARN_ON(dpu_crtc->event);
+	} else {
+		spin_lock_irqsave(&dev->event_lock, flags);
+		dpu_crtc->event = crtc->state->event;
+		crtc->state->event = NULL;
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+	}
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		if (encoder->crtc != crtc)
+			continue;
+
+		/* encoder will trigger pending mask now */
+		dpu_encoder_trigger_kickoff_pending(encoder);
+	}
+
+	/*
+	 * If no mixers have been allocated in dpu_crtc_atomic_check(),
+	 * it means we are trying to flush a CRTC whose state is disabled:
+	 * nothing else needs to be done.
+	 */
+	if (unlikely(!dpu_crtc->num_mixers))
+		return;
+
+	_dpu_crtc_blend_setup(crtc);
+
+	/*
+	 * PP_DONE irq is only used by command mode for now.
+	 * It is better to request pending before FLUSH and START trigger
+	 * to make sure no pp_done irq missed.
+	 * This is safe because no pp_done will happen before SW trigger
+	 * in command mode.
+	 */
+}
+
+static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
+		struct drm_crtc_state *old_crtc_state)
+{
+	struct dpu_crtc *dpu_crtc;
+	struct drm_device *dev;
+	struct drm_plane *plane;
+	struct msm_drm_private *priv;
+	struct msm_drm_thread *event_thread;
+	unsigned long flags;
+	struct dpu_crtc_state *cstate;
+
+	if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+		DPU_ERROR("invalid crtc\n");
+		return;
+	}
+
+	if (!crtc->state->enable) {
+		DPU_DEBUG("crtc%d -> enable %d, skip atomic_flush\n",
+				crtc->base.id, crtc->state->enable);
+		return;
+	}
+
+	DPU_DEBUG("crtc%d\n", crtc->base.id);
+
+	dpu_crtc = to_dpu_crtc(crtc);
+	cstate = to_dpu_crtc_state(crtc->state);
+	dev = crtc->dev;
+	priv = dev->dev_private;
+
+	if (crtc->index >= ARRAY_SIZE(priv->event_thread)) {
+		DPU_ERROR("invalid crtc index[%d]\n", crtc->index);
+		return;
+	}
+
+	event_thread = &priv->event_thread[crtc->index];
+
+	if (dpu_crtc->event) {
+		DPU_DEBUG("already received dpu_crtc->event\n");
+	} else {
+		spin_lock_irqsave(&dev->event_lock, flags);
+		dpu_crtc->event = crtc->state->event;
+		crtc->state->event = NULL;
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+	}
+
+	/*
+	 * If no mixers has been allocated in dpu_crtc_atomic_check(),
+	 * it means we are trying to flush a CRTC whose state is disabled:
+	 * nothing else needs to be done.
+	 */
+	if (unlikely(!dpu_crtc->num_mixers))
+		return;
+
+	/*
+	 * For planes without commit update, drm framework will not add
+	 * those planes to current state since hardware update is not
+	 * required. However, if those planes were power collapsed since
+	 * last commit cycle, driver has to restore the hardware state
+	 * of those planes explicitly here prior to plane flush.
+	 */
+	drm_atomic_crtc_for_each_plane(plane, crtc)
+		dpu_plane_restore(plane);
+
+	/* update performance setting before crtc kickoff */
+	dpu_core_perf_crtc_update(crtc, 1, false);
+
+	/*
+	 * Final plane updates: Give each plane a chance to complete all
+	 *                      required writes/flushing before crtc's "flush
+	 *                      everything" call below.
+	 */
+	drm_atomic_crtc_for_each_plane(plane, crtc) {
+		if (dpu_crtc->smmu_state.transition_error)
+			dpu_plane_set_error(plane, true);
+		dpu_plane_flush(plane);
+	}
+
+	/* Kickoff will be scheduled by outer layer */
+}
+
+/**
+ * dpu_crtc_destroy_state - state destroy hook
+ * @crtc: drm CRTC
+ * @state: CRTC state object to release
+ */
+static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
+		struct drm_crtc_state *state)
+{
+	struct dpu_crtc *dpu_crtc;
+	struct dpu_crtc_state *cstate;
+
+	if (!crtc || !state) {
+		DPU_ERROR("invalid argument(s)\n");
+		return;
+	}
+
+	dpu_crtc = to_dpu_crtc(crtc);
+	cstate = to_dpu_crtc_state(state);
+
+	DPU_DEBUG("crtc%d\n", crtc->base.id);
+
+	_dpu_crtc_rp_destroy(&cstate->rp);
+
+	__drm_atomic_helper_crtc_destroy_state(state);
+
+	kfree(cstate);
+}
+
+static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
+{
+	struct dpu_crtc *dpu_crtc;
+	int ret, rc = 0;
+
+	if (!crtc) {
+		DPU_ERROR("invalid argument\n");
+		return -EINVAL;
+	}
+	dpu_crtc = to_dpu_crtc(crtc);
+
+	if (!atomic_read(&dpu_crtc->frame_pending)) {
+		DPU_DEBUG("no frames pending\n");
+		return 0;
+	}
+
+	DPU_ATRACE_BEGIN("frame done completion wait");
+	ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp,
+			msecs_to_jiffies(DPU_FRAME_DONE_TIMEOUT));
+	if (!ret) {
+		DRM_ERROR("frame done wait timed out, ret:%d\n", ret);
+		rc = -ETIMEDOUT;
+	}
+	DPU_ATRACE_END("frame done completion wait");
+
+	return rc;
+}
+
+void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
+{
+	struct drm_encoder *encoder;
+	struct drm_device *dev;
+	struct dpu_crtc *dpu_crtc;
+	struct msm_drm_private *priv;
+	struct dpu_kms *dpu_kms;
+	struct dpu_crtc_state *cstate;
+	int ret;
+
+	if (!crtc) {
+		DPU_ERROR("invalid argument\n");
+		return;
+	}
+	dev = crtc->dev;
+	dpu_crtc = to_dpu_crtc(crtc);
+	dpu_kms = _dpu_crtc_get_kms(crtc);
+
+	if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev_private) {
+		DPU_ERROR("invalid argument\n");
+		return;
+	}
+
+	priv = dpu_kms->dev->dev_private;
+	cstate = to_dpu_crtc_state(crtc->state);
+
+	/*
+	 * If no mixers has been allocated in dpu_crtc_atomic_check(),
+	 * it means we are trying to start a CRTC whose state is disabled:
+	 * nothing else needs to be done.
+	 */
+	if (unlikely(!dpu_crtc->num_mixers))
+		return;
+
+	DPU_ATRACE_BEGIN("crtc_commit");
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		struct dpu_encoder_kickoff_params params = { 0 };
+
+		if (encoder->crtc != crtc)
+			continue;
+
+		/*
+		 * Encoder will flush/start now, unless it has a tx pending.
+		 * If so, it may delay and flush at an irq event (e.g. ppdone)
+		 */
+		dpu_encoder_prepare_for_kickoff(encoder, &params);
+	}
+
+	/* wait for frame_event_done completion */
+	DPU_ATRACE_BEGIN("wait_for_frame_done_event");
+	ret = _dpu_crtc_wait_for_frame_done(crtc);
+	DPU_ATRACE_END("wait_for_frame_done_event");
+	if (ret) {
+		DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
+				crtc->base.id,
+				atomic_read(&dpu_crtc->frame_pending));
+		goto end;
+	}
+
+	if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
+		/* acquire bandwidth and other resources */
+		DPU_DEBUG("crtc%d first commit\n", crtc->base.id);
+	} else
+		DPU_DEBUG("crtc%d commit\n", crtc->base.id);
+
+	dpu_crtc->play_count++;
+
+	dpu_vbif_clear_errors(dpu_kms);
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		if (encoder->crtc != crtc)
+			continue;
+
+		dpu_encoder_kickoff(encoder);
+	}
+
+end:
+	reinit_completion(&dpu_crtc->frame_done_comp);
+	DPU_ATRACE_END("crtc_commit");
+}
+
+/**
+ * _dpu_crtc_vblank_enable_no_lock - update power resource and vblank request
+ * @dpu_crtc: Pointer to dpu crtc structure
+ * @enable: Whether to enable/disable vblanks
+ *
+ * @Return: error code
+ */
+static int _dpu_crtc_vblank_enable_no_lock(
+		struct dpu_crtc *dpu_crtc, bool enable)
+{
+	struct drm_device *dev;
+	struct drm_crtc *crtc;
+	struct drm_encoder *enc;
+
+	if (!dpu_crtc) {
+		DPU_ERROR("invalid crtc\n");
+		return -EINVAL;
+	}
+
+	crtc = &dpu_crtc->base;
+	dev = crtc->dev;
+
+	if (enable) {
+		int ret;
+
+		/* drop lock since power crtc cb may try to re-acquire lock */
+		mutex_unlock(&dpu_crtc->crtc_lock);
+		ret = _dpu_crtc_power_enable(dpu_crtc, true);
+		mutex_lock(&dpu_crtc->crtc_lock);
+		if (ret)
+			return ret;
+
+		list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
+			if (enc->crtc != crtc)
+				continue;
+
+			trace_dpu_crtc_vblank_enable(DRMID(&dpu_crtc->base),
+						     DRMID(enc), enable,
+						     dpu_crtc);
+
+			dpu_encoder_register_vblank_callback(enc,
+					dpu_crtc_vblank_cb, (void *)crtc);
+		}
+	} else {
+		list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
+			if (enc->crtc != crtc)
+				continue;
+
+			trace_dpu_crtc_vblank_enable(DRMID(&dpu_crtc->base),
+						     DRMID(enc), enable,
+						     dpu_crtc);
+
+			dpu_encoder_register_vblank_callback(enc, NULL, NULL);
+		}
+
+		/* drop lock since power crtc cb may try to re-acquire lock */
+		mutex_unlock(&dpu_crtc->crtc_lock);
+		_dpu_crtc_power_enable(dpu_crtc, false);
+		mutex_lock(&dpu_crtc->crtc_lock);
+	}
+
+	return 0;
+}
+
+/**
+ * _dpu_crtc_set_suspend - notify crtc of suspend enable/disable
+ * @crtc: Pointer to drm crtc object
+ * @enable: true to enable suspend, false to indicate resume
+ */
+static void _dpu_crtc_set_suspend(struct drm_crtc *crtc, bool enable)
+{
+	struct dpu_crtc *dpu_crtc;
+	struct msm_drm_private *priv;
+	struct dpu_kms *dpu_kms;
+	int ret = 0;
+
+	if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+		DPU_ERROR("invalid crtc\n");
+		return;
+	}
+	dpu_crtc = to_dpu_crtc(crtc);
+	priv = crtc->dev->dev_private;
+
+	if (!priv->kms) {
+		DPU_ERROR("invalid crtc kms\n");
+		return;
+	}
+	dpu_kms = to_dpu_kms(priv->kms);
+
+	DRM_DEBUG_KMS("crtc%d suspend = %d\n", crtc->base.id, enable);
+
+	mutex_lock(&dpu_crtc->crtc_lock);
+
+	/*
+	 * If the vblank is enabled, release a power reference on suspend
+	 * and take it back during resume (if it is still enabled).
+	 */
+	trace_dpu_crtc_set_suspend(DRMID(&dpu_crtc->base), enable, dpu_crtc);
+	if (dpu_crtc->suspend == enable)
+		DPU_DEBUG("crtc%d suspend already set to %d, ignoring update\n",
+				crtc->base.id, enable);
+	else if (dpu_crtc->enabled && dpu_crtc->vblank_requested) {
+		ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, !enable);
+		if (ret)
+			DPU_ERROR("%s vblank enable failed: %d\n",
+					dpu_crtc->name, ret);
+	}
+
+	dpu_crtc->suspend = enable;
+	mutex_unlock(&dpu_crtc->crtc_lock);
+}
+
+/**
+ * dpu_crtc_duplicate_state - state duplicate hook
+ * @crtc: Pointer to drm crtc structure
+ * @Returns: Pointer to new drm_crtc_state structure
+ */
+static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+	struct dpu_crtc *dpu_crtc;
+	struct dpu_crtc_state *cstate, *old_cstate;
+
+	if (!crtc || !crtc->state) {
+		DPU_ERROR("invalid argument(s)\n");
+		return NULL;
+	}
+
+	dpu_crtc = to_dpu_crtc(crtc);
+	old_cstate = to_dpu_crtc_state(crtc->state);
+	cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL);
+	if (!cstate) {
+		DPU_ERROR("failed to allocate state\n");
+		return NULL;
+	}
+
+	/* duplicate base helper */
+	__drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
+
+	_dpu_crtc_rp_duplicate(&old_cstate->rp, &cstate->rp);
+
+	return &cstate->base;
+}
+
+/**
+ * dpu_crtc_reset - reset hook for CRTCs
+ * Resets the atomic state for @crtc by freeing the state pointer (which might
+ * be NULL, e.g. at driver load time) and allocating a new empty state object.
+ * @crtc: Pointer to drm crtc structure
+ */
+static void dpu_crtc_reset(struct drm_crtc *crtc)
+{
+	struct dpu_crtc *dpu_crtc;
+	struct dpu_crtc_state *cstate;
+
+	if (!crtc) {
+		DPU_ERROR("invalid crtc\n");
+		return;
+	}
+
+	/* revert suspend actions, if necessary */
+	if (dpu_kms_is_suspend_state(crtc->dev))
+		_dpu_crtc_set_suspend(crtc, false);
+
+	/* remove previous state, if present */
+	if (crtc->state) {
+		dpu_crtc_destroy_state(crtc, crtc->state);
+		crtc->state = 0;
+	}
+
+	dpu_crtc = to_dpu_crtc(crtc);
+	cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
+	if (!cstate) {
+		DPU_ERROR("failed to allocate state\n");
+		return;
+	}
+
+	_dpu_crtc_rp_reset(&cstate->rp, &dpu_crtc->rp_lock,
+			&dpu_crtc->rp_head);
+
+	cstate->base.crtc = crtc;
+	crtc->state = &cstate->base;
+}
+
+static void dpu_crtc_handle_power_event(u32 event_type, void *arg)
+{
+	struct drm_crtc *crtc = arg;
+	struct dpu_crtc *dpu_crtc;
+	struct drm_encoder *encoder;
+	struct dpu_crtc_mixer *m;
+	u32 i, misr_status;
+
+	if (!crtc) {
+		DPU_ERROR("invalid crtc\n");
+		return;
+	}
+	dpu_crtc = to_dpu_crtc(crtc);
+
+	mutex_lock(&dpu_crtc->crtc_lock);
+
+	trace_dpu_crtc_handle_power_event(DRMID(crtc), event_type);
+
+	switch (event_type) {
+	case DPU_POWER_EVENT_POST_ENABLE:
+		/* restore encoder; crtc will be programmed during commit */
+		drm_for_each_encoder(encoder, crtc->dev) {
+			if (encoder->crtc != crtc)
+				continue;
+
+			dpu_encoder_virt_restore(encoder);
+		}
+
+		for (i = 0; i < dpu_crtc->num_mixers; ++i) {
+			m = &dpu_crtc->mixers[i];
+			if (!m->hw_lm || !m->hw_lm->ops.setup_misr ||
+					!dpu_crtc->misr_enable)
+				continue;
+
+			m->hw_lm->ops.setup_misr(m->hw_lm, true,
+					dpu_crtc->misr_frame_count);
+		}
+		break;
+	case DPU_POWER_EVENT_PRE_DISABLE:
+		for (i = 0; i < dpu_crtc->num_mixers; ++i) {
+			m = &dpu_crtc->mixers[i];
+			if (!m->hw_lm || !m->hw_lm->ops.collect_misr ||
+					!dpu_crtc->misr_enable)
+				continue;
+
+			misr_status = m->hw_lm->ops.collect_misr(m->hw_lm);
+			dpu_crtc->misr_data[i] = misr_status ? misr_status :
+							dpu_crtc->misr_data[i];
+		}
+		break;
+	case DPU_POWER_EVENT_POST_DISABLE:
+		/**
+		 * Nothing to do. All the planes on the CRTC will be
+		 * programmed for every frame
+		 */
+		break;
+	default:
+		DPU_DEBUG("event:%d not handled\n", event_type);
+		break;
+	}
+
+	mutex_unlock(&dpu_crtc->crtc_lock);
+}
+
+static void dpu_crtc_disable(struct drm_crtc *crtc)
+{
+	struct dpu_crtc *dpu_crtc;
+	struct dpu_crtc_state *cstate;
+	struct drm_display_mode *mode;
+	struct drm_encoder *encoder;
+	struct msm_drm_private *priv;
+	int ret;
+
+	if (!crtc || !crtc->dev || !crtc->dev->dev_private || !crtc->state) {
+		DPU_ERROR("invalid crtc\n");
+		return;
+	}
+	dpu_crtc = to_dpu_crtc(crtc);
+	cstate = to_dpu_crtc_state(crtc->state);
+	mode = &cstate->base.adjusted_mode;
+	priv = crtc->dev->dev_private;
+
+	DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
+
+	if (dpu_kms_is_suspend_state(crtc->dev))
+		_dpu_crtc_set_suspend(crtc, true);
+
+	mutex_lock(&dpu_crtc->crtc_lock);
+
+	/* wait for frame_event_done completion */
+	if (_dpu_crtc_wait_for_frame_done(crtc))
+		DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
+				crtc->base.id,
+				atomic_read(&dpu_crtc->frame_pending));
+
+	trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc);
+	if (dpu_crtc->enabled && !dpu_crtc->suspend &&
+			dpu_crtc->vblank_requested) {
+		ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, false);
+		if (ret)
+			DPU_ERROR("%s vblank enable failed: %d\n",
+					dpu_crtc->name, ret);
+	}
+	dpu_crtc->enabled = false;
+
+	if (atomic_read(&dpu_crtc->frame_pending)) {
+		trace_dpu_crtc_disable_frame_pending(DRMID(crtc),
+				     atomic_read(&dpu_crtc->frame_pending));
+		dpu_core_perf_crtc_release_bw(crtc);
+		atomic_set(&dpu_crtc->frame_pending, 0);
+	}
+
+	dpu_core_perf_crtc_update(crtc, 0, true);
+
+	drm_for_each_encoder(encoder, crtc->dev) {
+		if (encoder->crtc != crtc)
+			continue;
+		dpu_encoder_register_frame_event_callback(encoder, NULL, NULL);
+	}
+
+	if (dpu_crtc->power_event)
+		dpu_power_handle_unregister_event(dpu_crtc->phandle,
+				dpu_crtc->power_event);
+
+
+	memset(dpu_crtc->mixers, 0, sizeof(dpu_crtc->mixers));
+	dpu_crtc->num_mixers = 0;
+	dpu_crtc->mixers_swapped = false;
+
+	/* disable clk & bw control until clk & bw properties are set */
+	cstate->bw_control = false;
+	cstate->bw_split_vote = false;
+
+	mutex_unlock(&dpu_crtc->crtc_lock);
+}
+
+static void dpu_crtc_enable(struct drm_crtc *crtc,
+		struct drm_crtc_state *old_crtc_state)
+{
+	struct dpu_crtc *dpu_crtc;
+	struct drm_encoder *encoder;
+	struct msm_drm_private *priv;
+	int ret;
+
+	if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+		DPU_ERROR("invalid crtc\n");
+		return;
+	}
+	priv = crtc->dev->dev_private;
+
+	DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
+	dpu_crtc = to_dpu_crtc(crtc);
+
+	drm_for_each_encoder(encoder, crtc->dev) {
+		if (encoder->crtc != crtc)
+			continue;
+		dpu_encoder_register_frame_event_callback(encoder,
+				dpu_crtc_frame_event_cb, (void *)crtc);
+	}
+
+	mutex_lock(&dpu_crtc->crtc_lock);
+	trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
+	if (!dpu_crtc->enabled && !dpu_crtc->suspend &&
+			dpu_crtc->vblank_requested) {
+		ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, true);
+		if (ret)
+			DPU_ERROR("%s vblank enable failed: %d\n",
+					dpu_crtc->name, ret);
+	}
+	dpu_crtc->enabled = true;
+
+	mutex_unlock(&dpu_crtc->crtc_lock);
+
+	dpu_crtc->power_event = dpu_power_handle_register_event(
+		dpu_crtc->phandle,
+		DPU_POWER_EVENT_POST_ENABLE | DPU_POWER_EVENT_POST_DISABLE |
+		DPU_POWER_EVENT_PRE_DISABLE,
+		dpu_crtc_handle_power_event, crtc, dpu_crtc->name);
+
+}
+
+struct plane_state {
+	struct dpu_plane_state *dpu_pstate;
+	const struct drm_plane_state *drm_pstate;
+	int stage;
+	u32 pipe_id;
+};
+
+static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
+		struct drm_crtc_state *state)
+{
+	struct dpu_crtc *dpu_crtc;
+	struct plane_state *pstates;
+	struct dpu_crtc_state *cstate;
+
+	const struct drm_plane_state *pstate;
+	struct drm_plane *plane;
+	struct drm_display_mode *mode;
+
+	int cnt = 0, rc = 0, mixer_width, i, z_pos;
+
+	struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2];
+	int multirect_count = 0;
+	const struct drm_plane_state *pipe_staged[SSPP_MAX];
+	int left_zpos_cnt = 0, right_zpos_cnt = 0;
+	struct drm_rect crtc_rect = { 0 };
+
+	if (!crtc) {
+		DPU_ERROR("invalid crtc\n");
+		return -EINVAL;
+	}
+
+	pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL);
+
+	dpu_crtc = to_dpu_crtc(crtc);
+	cstate = to_dpu_crtc_state(state);
+
+	if (!state->enable || !state->active) {
+		DPU_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n",
+				crtc->base.id, state->enable, state->active);
+		goto end;
+	}
+
+	mode = &state->adjusted_mode;
+	DPU_DEBUG("%s: check", dpu_crtc->name);
+
+	/* force a full mode set if active state changed */
+	if (state->active_changed)
+		state->mode_changed = true;
+
+	memset(pipe_staged, 0, sizeof(pipe_staged));
+
+	mixer_width = dpu_crtc_get_mixer_width(dpu_crtc, cstate, mode);
+
+	_dpu_crtc_setup_lm_bounds(crtc, state);
+
+	crtc_rect.x2 = mode->hdisplay;
+	crtc_rect.y2 = mode->vdisplay;
+
+	 /* get plane state for all drm planes associated with crtc state */
+	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
+		struct drm_rect dst, clip = crtc_rect;
+
+		if (IS_ERR_OR_NULL(pstate)) {
+			rc = PTR_ERR(pstate);
+			DPU_ERROR("%s: failed to get plane%d state, %d\n",
+					dpu_crtc->name, plane->base.id, rc);
+			goto end;
+		}
+		if (cnt >= DPU_STAGE_MAX * 4)
+			continue;
+
+		pstates[cnt].dpu_pstate = to_dpu_plane_state(pstate);
+		pstates[cnt].drm_pstate = pstate;
+		pstates[cnt].stage = pstate->normalized_zpos;
+		pstates[cnt].pipe_id = dpu_plane_pipe(plane);
+
+		if (pipe_staged[pstates[cnt].pipe_id]) {
+			multirect_plane[multirect_count].r0 =
+				pipe_staged[pstates[cnt].pipe_id];
+			multirect_plane[multirect_count].r1 = pstate;
+			multirect_count++;
+
+			pipe_staged[pstates[cnt].pipe_id] = NULL;
+		} else {
+			pipe_staged[pstates[cnt].pipe_id] = pstate;
+		}
+
+		cnt++;
+
+		dst = drm_plane_state_dest(pstate);
+		if (!drm_rect_intersect(&clip, &dst) ||
+		    !drm_rect_equals(&clip, &dst)) {
+			DPU_ERROR("invalid vertical/horizontal destination\n");
+			DPU_ERROR("display: " DRM_RECT_FMT " plane: "
+				  DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect),
+				  DRM_RECT_ARG(&dst));
+			rc = -E2BIG;
+			goto end;
+		}
+	}
+
+	for (i = 1; i < SSPP_MAX; i++) {
+		if (pipe_staged[i]) {
+			dpu_plane_clear_multirect(pipe_staged[i]);
+
+			if (is_dpu_plane_virtual(pipe_staged[i]->plane)) {
+				DPU_ERROR(
+					"r1 only virt plane:%d not supported\n",
+					pipe_staged[i]->plane->base.id);
+				rc  = -EINVAL;
+				goto end;
+			}
+		}
+	}
+
+	z_pos = -1;
+	for (i = 0; i < cnt; i++) {
+		/* reset counts at every new blend stage */
+		if (pstates[i].stage != z_pos) {
+			left_zpos_cnt = 0;
+			right_zpos_cnt = 0;
+			z_pos = pstates[i].stage;
+		}
+
+		/* verify z_pos setting before using it */
+		if (z_pos >= DPU_STAGE_MAX - DPU_STAGE_0) {
+			DPU_ERROR("> %d plane stages assigned\n",
+					DPU_STAGE_MAX - DPU_STAGE_0);
+			rc = -EINVAL;
+			goto end;
+		} else if (pstates[i].drm_pstate->crtc_x < mixer_width) {
+			if (left_zpos_cnt == 2) {
+				DPU_ERROR("> 2 planes @ stage %d on left\n",
+					z_pos);
+				rc = -EINVAL;
+				goto end;
+			}
+			left_zpos_cnt++;
+
+		} else {
+			if (right_zpos_cnt == 2) {
+				DPU_ERROR("> 2 planes @ stage %d on right\n",
+					z_pos);
+				rc = -EINVAL;
+				goto end;
+			}
+			right_zpos_cnt++;
+		}
+
+		pstates[i].dpu_pstate->stage = z_pos + DPU_STAGE_0;
+		DPU_DEBUG("%s: zpos %d", dpu_crtc->name, z_pos);
+	}
+
+	for (i = 0; i < multirect_count; i++) {
+		if (dpu_plane_validate_multirect_v2(&multirect_plane[i])) {
+			DPU_ERROR(
+			"multirect validation failed for planes (%d - %d)\n",
+					multirect_plane[i].r0->plane->base.id,
+					multirect_plane[i].r1->plane->base.id);
+			rc = -EINVAL;
+			goto end;
+		}
+	}
+
+	rc = dpu_core_perf_crtc_check(crtc, state);
+	if (rc) {
+		DPU_ERROR("crtc%d failed performance check %d\n",
+				crtc->base.id, rc);
+		goto end;
+	}
+
+	/* validate source split:
+	 * use pstates sorted by stage to check planes on same stage
+	 * we assume that all pipes are in source split so its valid to compare
+	 * without taking into account left/right mixer placement
+	 */
+	for (i = 1; i < cnt; i++) {
+		struct plane_state *prv_pstate, *cur_pstate;
+		struct drm_rect left_rect, right_rect;
+		int32_t left_pid, right_pid;
+		int32_t stage;
+
+		prv_pstate = &pstates[i - 1];
+		cur_pstate = &pstates[i];
+		if (prv_pstate->stage != cur_pstate->stage)
+			continue;
+
+		stage = cur_pstate->stage;
+
+		left_pid = prv_pstate->dpu_pstate->base.plane->base.id;
+		left_rect = drm_plane_state_dest(prv_pstate->drm_pstate);
+
+		right_pid = cur_pstate->dpu_pstate->base.plane->base.id;
+		right_rect = drm_plane_state_dest(cur_pstate->drm_pstate);
+
+		if (right_rect.x1 < left_rect.x1) {
+			swap(left_pid, right_pid);
+			swap(left_rect, right_rect);
+		}
+
+		/**
+		 * - planes are enumerated in pipe-priority order such that
+		 *   planes with lower drm_id must be left-most in a shared
+		 *   blend-stage when using source split.
+		 * - planes in source split must be contiguous in width
+		 * - planes in source split must have same dest yoff and height
+		 */
+		if (right_pid < left_pid) {
+			DPU_ERROR(
+				"invalid src split cfg. priority mismatch. stage: %d left: %d right: %d\n",
+				stage, left_pid, right_pid);
+			rc = -EINVAL;
+			goto end;
+		} else if (right_rect.x1 != drm_rect_width(&left_rect)) {
+			DPU_ERROR("non-contiguous coordinates for src split. "
+				  "stage: %d left: " DRM_RECT_FMT " right: "
+				  DRM_RECT_FMT "\n", stage,
+				  DRM_RECT_ARG(&left_rect),
+				  DRM_RECT_ARG(&right_rect));
+			rc = -EINVAL;
+			goto end;
+		} else if (left_rect.y1 != right_rect.y1 ||
+			   drm_rect_height(&left_rect) != drm_rect_height(&right_rect)) {
+			DPU_ERROR("source split at stage: %d. invalid "
+				  "yoff/height: left: " DRM_RECT_FMT " right: "
+				  DRM_RECT_FMT "\n", stage,
+				  DRM_RECT_ARG(&left_rect),
+				  DRM_RECT_ARG(&right_rect));
+			rc = -EINVAL;
+			goto end;
+		}
+	}
+
+end:
+	_dpu_crtc_rp_free_unused(&cstate->rp);
+	kfree(pstates);
+	return rc;
+}
+
+int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
+{
+	struct dpu_crtc *dpu_crtc;
+	int ret;
+
+	if (!crtc) {
+		DPU_ERROR("invalid crtc\n");
+		return -EINVAL;
+	}
+	dpu_crtc = to_dpu_crtc(crtc);
+
+	mutex_lock(&dpu_crtc->crtc_lock);
+	trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc);
+	if (dpu_crtc->enabled && !dpu_crtc->suspend) {
+		ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, en);
+		if (ret)
+			DPU_ERROR("%s vblank enable failed: %d\n",
+					dpu_crtc->name, ret);
+	}
+	dpu_crtc->vblank_requested = en;
+	mutex_unlock(&dpu_crtc->crtc_lock);
+
+	return 0;
+}
+
+void dpu_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
+{
+	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+
+	DPU_DEBUG("%s: cancel: %p\n", dpu_crtc->name, file);
+	_dpu_crtc_complete_flip(crtc, file);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
+{
+	struct dpu_crtc *dpu_crtc;
+	struct dpu_plane_state *pstate = NULL;
+	struct dpu_crtc_mixer *m;
+
+	struct drm_crtc *crtc;
+	struct drm_plane *plane;
+	struct drm_display_mode *mode;
+	struct drm_framebuffer *fb;
+	struct drm_plane_state *state;
+	struct dpu_crtc_state *cstate;
+
+	int i, out_width;
+
+	if (!s || !s->private)
+		return -EINVAL;
+
+	dpu_crtc = s->private;
+	crtc = &dpu_crtc->base;
+	cstate = to_dpu_crtc_state(crtc->state);
+
+	mutex_lock(&dpu_crtc->crtc_lock);
+	mode = &crtc->state->adjusted_mode;
+	out_width = dpu_crtc_get_mixer_width(dpu_crtc, cstate, mode);
+
+	seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
+				mode->hdisplay, mode->vdisplay);
+
+	seq_puts(s, "\n");
+
+	for (i = 0; i < dpu_crtc->num_mixers; ++i) {
+		m = &dpu_crtc->mixers[i];
+		if (!m->hw_lm)
+			seq_printf(s, "\tmixer[%d] has no lm\n", i);
+		else if (!m->hw_ctl)
+			seq_printf(s, "\tmixer[%d] has no ctl\n", i);
+		else
+			seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
+				m->hw_lm->idx - LM_0, m->hw_ctl->idx - CTL_0,
+				out_width, mode->vdisplay);
+	}
+
+	seq_puts(s, "\n");
+
+	drm_atomic_crtc_for_each_plane(plane, crtc) {
+		pstate = to_dpu_plane_state(plane->state);
+		state = plane->state;
+
+		if (!pstate || !state)
+			continue;
+
+		seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id,
+			pstate->stage);
+
+		if (plane->state->fb) {
+			fb = plane->state->fb;
+
+			seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ",
+				fb->base.id, (char *) &fb->format->format,
+				fb->width, fb->height);
+			for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i)
+				seq_printf(s, "cpp[%d]:%u ",
+						i, fb->format->cpp[i]);
+			seq_puts(s, "\n\t");
+
+			seq_printf(s, "modifier:%8llu ", fb->modifier);
+			seq_puts(s, "\n");
+
+			seq_puts(s, "\t");
+			for (i = 0; i < ARRAY_SIZE(fb->pitches); i++)
+				seq_printf(s, "pitches[%d]:%8u ", i,
+							fb->pitches[i]);
+			seq_puts(s, "\n");
+
+			seq_puts(s, "\t");
+			for (i = 0; i < ARRAY_SIZE(fb->offsets); i++)
+				seq_printf(s, "offsets[%d]:%8u ", i,
+							fb->offsets[i]);
+			seq_puts(s, "\n");
+		}
+
+		seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n",
+			state->src_x, state->src_y, state->src_w, state->src_h);
+
+		seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n",
+			state->crtc_x, state->crtc_y, state->crtc_w,
+			state->crtc_h);
+		seq_printf(s, "\tmultirect: mode: %d index: %d\n",
+			pstate->multirect_mode, pstate->multirect_index);
+
+		seq_puts(s, "\n");
+	}
+	if (dpu_crtc->vblank_cb_count) {
+		ktime_t diff = ktime_sub(ktime_get(), dpu_crtc->vblank_cb_time);
+		s64 diff_ms = ktime_to_ms(diff);
+		s64 fps = diff_ms ? div_s64(
+				dpu_crtc->vblank_cb_count * 1000, diff_ms) : 0;
+
+		seq_printf(s,
+			"vblank fps:%lld count:%u total:%llums total_framecount:%llu\n",
+				fps, dpu_crtc->vblank_cb_count,
+				ktime_to_ms(diff), dpu_crtc->play_count);
+
+		/* reset time & count for next measurement */
+		dpu_crtc->vblank_cb_count = 0;
+		dpu_crtc->vblank_cb_time = ktime_set(0, 0);
+	}
+
+	seq_printf(s, "vblank_enable:%d\n", dpu_crtc->vblank_requested);
+
+	mutex_unlock(&dpu_crtc->crtc_lock);
+
+	return 0;
+}
+
+static int _dpu_debugfs_status_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, _dpu_debugfs_status_show, inode->i_private);
+}
+
+static ssize_t _dpu_crtc_misr_setup(struct file *file,
+		const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct dpu_crtc *dpu_crtc;
+	struct dpu_crtc_mixer *m;
+	int i = 0, rc;
+	char buf[MISR_BUFF_SIZE + 1];
+	u32 frame_count, enable;
+	size_t buff_copy;
+
+	if (!file || !file->private_data)
+		return -EINVAL;
+
+	dpu_crtc = file->private_data;
+	buff_copy = min_t(size_t, count, MISR_BUFF_SIZE);
+	if (copy_from_user(buf, user_buf, buff_copy)) {
+		DPU_ERROR("buffer copy failed\n");
+		return -EINVAL;
+	}
+
+	buf[buff_copy] = 0; /* end of string */
+
+	if (sscanf(buf, "%u %u", &enable, &frame_count) != 2)
+		return -EINVAL;
+
+	rc = _dpu_crtc_power_enable(dpu_crtc, true);
+	if (rc)
+		return rc;
+
+	mutex_lock(&dpu_crtc->crtc_lock);
+	dpu_crtc->misr_enable = enable;
+	dpu_crtc->misr_frame_count = frame_count;
+	for (i = 0; i < dpu_crtc->num_mixers; ++i) {
+		dpu_crtc->misr_data[i] = 0;
+		m = &dpu_crtc->mixers[i];
+		if (!m->hw_lm || !m->hw_lm->ops.setup_misr)
+			continue;
+
+		m->hw_lm->ops.setup_misr(m->hw_lm, enable, frame_count);
+	}
+	mutex_unlock(&dpu_crtc->crtc_lock);
+	_dpu_crtc_power_enable(dpu_crtc, false);
+
+	return count;
+}
+
+static ssize_t _dpu_crtc_misr_read(struct file *file,
+		char __user *user_buff, size_t count, loff_t *ppos)
+{
+	struct dpu_crtc *dpu_crtc;
+	struct dpu_crtc_mixer *m;
+	int i = 0, rc;
+	u32 misr_status;
+	ssize_t len = 0;
+	char buf[MISR_BUFF_SIZE + 1] = {'\0'};
+
+	if (*ppos)
+		return 0;
+
+	if (!file || !file->private_data)
+		return -EINVAL;
+
+	dpu_crtc = file->private_data;
+	rc = _dpu_crtc_power_enable(dpu_crtc, true);
+	if (rc)
+		return rc;
+
+	mutex_lock(&dpu_crtc->crtc_lock);
+	if (!dpu_crtc->misr_enable) {
+		len += snprintf(buf + len, MISR_BUFF_SIZE - len,
+			"disabled\n");
+		goto buff_check;
+	}
+
+	for (i = 0; i < dpu_crtc->num_mixers; ++i) {
+		m = &dpu_crtc->mixers[i];
+		if (!m->hw_lm || !m->hw_lm->ops.collect_misr)
+			continue;
+
+		misr_status = m->hw_lm->ops.collect_misr(m->hw_lm);
+		dpu_crtc->misr_data[i] = misr_status ? misr_status :
+							dpu_crtc->misr_data[i];
+		len += snprintf(buf + len, MISR_BUFF_SIZE - len, "lm idx:%d\n",
+					m->hw_lm->idx - LM_0);
+		len += snprintf(buf + len, MISR_BUFF_SIZE - len, "0x%x\n",
+							dpu_crtc->misr_data[i]);
+	}
+
+buff_check:
+	if (count <= len) {
+		len = 0;
+		goto end;
+	}
+
+	if (copy_to_user(user_buff, buf, len)) {
+		len = -EFAULT;
+		goto end;
+	}
+
+	*ppos += len;   /* increase offset */
+
+end:
+	mutex_unlock(&dpu_crtc->crtc_lock);
+	_dpu_crtc_power_enable(dpu_crtc, false);
+	return len;
+}
+
+#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix)                          \
+static int __prefix ## _open(struct inode *inode, struct file *file)	\
+{									\
+	return single_open(file, __prefix ## _show, inode->i_private);	\
+}									\
+static const struct file_operations __prefix ## _fops = {		\
+	.owner = THIS_MODULE,						\
+	.open = __prefix ## _open,					\
+	.release = single_release,					\
+	.read = seq_read,						\
+	.llseek = seq_lseek,						\
+}
+
+static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
+{
+	struct drm_crtc *crtc = (struct drm_crtc *) s->private;
+	struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+	struct dpu_crtc_res *res;
+	struct dpu_crtc_respool *rp;
+	int i;
+
+	seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc));
+	seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
+	seq_printf(s, "core_clk_rate: %llu\n",
+			dpu_crtc->cur_perf.core_clk_rate);
+	for (i = DPU_POWER_HANDLE_DBUS_ID_MNOC;
+			i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+		seq_printf(s, "bw_ctl[%s]: %llu\n",
+				dpu_power_handle_get_dbus_name(i),
+				dpu_crtc->cur_perf.bw_ctl[i]);
+		seq_printf(s, "max_per_pipe_ib[%s]: %llu\n",
+				dpu_power_handle_get_dbus_name(i),
+				dpu_crtc->cur_perf.max_per_pipe_ib[i]);
+	}
+
+	mutex_lock(&dpu_crtc->rp_lock);
+	list_for_each_entry(rp, &dpu_crtc->rp_head, rp_list) {
+		seq_printf(s, "rp.%d: ", rp->sequence_id);
+		list_for_each_entry(res, &rp->res_list, list)
+			seq_printf(s, "0x%x/0x%llx/%pK/%d ",
+					res->type, res->tag, res->val,
+					atomic_read(&res->refcount));
+		seq_puts(s, "\n");
+	}
+	mutex_unlock(&dpu_crtc->rp_lock);
+
+	return 0;
+}
+DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_crtc_debugfs_state);
+
+static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
+{
+	struct dpu_crtc *dpu_crtc;
+	struct dpu_kms *dpu_kms;
+
+	static const struct file_operations debugfs_status_fops = {
+		.open =		_dpu_debugfs_status_open,
+		.read =		seq_read,
+		.llseek =	seq_lseek,
+		.release =	single_release,
+	};
+	static const struct file_operations debugfs_misr_fops = {
+		.open =		simple_open,
+		.read =		_dpu_crtc_misr_read,
+		.write =	_dpu_crtc_misr_setup,
+	};
+
+	if (!crtc)
+		return -EINVAL;
+	dpu_crtc = to_dpu_crtc(crtc);
+
+	dpu_kms = _dpu_crtc_get_kms(crtc);
+	if (!dpu_kms)
+		return -EINVAL;
+
+	dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name,
+			crtc->dev->primary->debugfs_root);
+	if (!dpu_crtc->debugfs_root)
+		return -ENOMEM;
+
+	/* don't error check these */
+	debugfs_create_file("status", 0400,
+			dpu_crtc->debugfs_root,
+			dpu_crtc, &debugfs_status_fops);
+	debugfs_create_file("state", 0600,
+			dpu_crtc->debugfs_root,
+			&dpu_crtc->base,
+			&dpu_crtc_debugfs_state_fops);
+	debugfs_create_file("misr_data", 0600, dpu_crtc->debugfs_root,
+					dpu_crtc, &debugfs_misr_fops);
+
+	return 0;
+}
+
+static void _dpu_crtc_destroy_debugfs(struct drm_crtc *crtc)
+{
+	struct dpu_crtc *dpu_crtc;
+
+	if (!crtc)
+		return;
+	dpu_crtc = to_dpu_crtc(crtc);
+	debugfs_remove_recursive(dpu_crtc->debugfs_root);
+}
+#else
+static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
+{
+	return 0;
+}
+
+static void _dpu_crtc_destroy_debugfs(struct drm_crtc *crtc)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static int dpu_crtc_late_register(struct drm_crtc *crtc)
+{
+	return _dpu_crtc_init_debugfs(crtc);
+}
+
+static void dpu_crtc_early_unregister(struct drm_crtc *crtc)
+{
+	_dpu_crtc_destroy_debugfs(crtc);
+}
+
+static const struct drm_crtc_funcs dpu_crtc_funcs = {
+	.set_config = drm_atomic_helper_set_config,
+	.destroy = dpu_crtc_destroy,
+	.page_flip = drm_atomic_helper_page_flip,
+	.reset = dpu_crtc_reset,
+	.atomic_duplicate_state = dpu_crtc_duplicate_state,
+	.atomic_destroy_state = dpu_crtc_destroy_state,
+	.late_register = dpu_crtc_late_register,
+	.early_unregister = dpu_crtc_early_unregister,
+};
+
+static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
+	.disable = dpu_crtc_disable,
+	.atomic_enable = dpu_crtc_enable,
+	.atomic_check = dpu_crtc_atomic_check,
+	.atomic_begin = dpu_crtc_atomic_begin,
+	.atomic_flush = dpu_crtc_atomic_flush,
+};
+
+static void _dpu_crtc_event_cb(struct kthread_work *work)
+{
+	struct dpu_crtc_event *event;
+	struct dpu_crtc *dpu_crtc;
+	unsigned long irq_flags;
+
+	if (!work) {
+		DPU_ERROR("invalid work item\n");
+		return;
+	}
+
+	event = container_of(work, struct dpu_crtc_event, kt_work);
+
+	/* set dpu_crtc to NULL for static work structures */
+	dpu_crtc = event->dpu_crtc;
+	if (!dpu_crtc)
+		return;
+
+	if (event->cb_func)
+		event->cb_func(&dpu_crtc->base, event->usr);
+
+	spin_lock_irqsave(&dpu_crtc->event_lock, irq_flags);
+	list_add_tail(&event->list, &dpu_crtc->event_free_list);
+	spin_unlock_irqrestore(&dpu_crtc->event_lock, irq_flags);
+}
+
+int dpu_crtc_event_queue(struct drm_crtc *crtc,
+		void (*func)(struct drm_crtc *crtc, void *usr), void *usr)
+{
+	unsigned long irq_flags;
+	struct dpu_crtc *dpu_crtc;
+	struct msm_drm_private *priv;
+	struct dpu_crtc_event *event = NULL;
+	u32 crtc_id;
+
+	if (!crtc || !crtc->dev || !crtc->dev->dev_private || !func) {
+		DPU_ERROR("invalid parameters\n");
+		return -EINVAL;
+	}
+	dpu_crtc = to_dpu_crtc(crtc);
+	priv = crtc->dev->dev_private;
+	crtc_id = drm_crtc_index(crtc);
+
+	/*
+	 * Obtain an event struct from the private cache. This event
+	 * queue may be called from ISR contexts, so use a private
+	 * cache to avoid calling any memory allocation functions.
+	 */
+	spin_lock_irqsave(&dpu_crtc->event_lock, irq_flags);
+	if (!list_empty(&dpu_crtc->event_free_list)) {
+		event = list_first_entry(&dpu_crtc->event_free_list,
+				struct dpu_crtc_event, list);
+		list_del_init(&event->list);
+	}
+	spin_unlock_irqrestore(&dpu_crtc->event_lock, irq_flags);
+
+	if (!event)
+		return -ENOMEM;
+
+	/* populate event node */
+	event->dpu_crtc = dpu_crtc;
+	event->cb_func = func;
+	event->usr = usr;
+
+	/* queue new event request */
+	kthread_init_work(&event->kt_work, _dpu_crtc_event_cb);
+	kthread_queue_work(&priv->event_thread[crtc_id].worker,
+			&event->kt_work);
+
+	return 0;
+}
+
+static int _dpu_crtc_init_events(struct dpu_crtc *dpu_crtc)
+{
+	int i, rc = 0;
+
+	if (!dpu_crtc) {
+		DPU_ERROR("invalid crtc\n");
+		return -EINVAL;
+	}
+
+	spin_lock_init(&dpu_crtc->event_lock);
+
+	INIT_LIST_HEAD(&dpu_crtc->event_free_list);
+	for (i = 0; i < DPU_CRTC_MAX_EVENT_COUNT; ++i)
+		list_add_tail(&dpu_crtc->event_cache[i].list,
+				&dpu_crtc->event_free_list);
+
+	return rc;
+}
+
+/* initialize crtc */
+struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane)
+{
+	struct drm_crtc *crtc = NULL;
+	struct dpu_crtc *dpu_crtc = NULL;
+	struct msm_drm_private *priv = NULL;
+	struct dpu_kms *kms = NULL;
+	int i, rc;
+
+	priv = dev->dev_private;
+	kms = to_dpu_kms(priv->kms);
+
+	dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL);
+	if (!dpu_crtc)
+		return ERR_PTR(-ENOMEM);
+
+	crtc = &dpu_crtc->base;
+	crtc->dev = dev;
+
+	mutex_init(&dpu_crtc->crtc_lock);
+	spin_lock_init(&dpu_crtc->spin_lock);
+	atomic_set(&dpu_crtc->frame_pending, 0);
+
+	mutex_init(&dpu_crtc->rp_lock);
+	INIT_LIST_HEAD(&dpu_crtc->rp_head);
+
+	init_completion(&dpu_crtc->frame_done_comp);
+
+	INIT_LIST_HEAD(&dpu_crtc->frame_event_list);
+
+	for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) {
+		INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list);
+		list_add(&dpu_crtc->frame_events[i].list,
+				&dpu_crtc->frame_event_list);
+		kthread_init_work(&dpu_crtc->frame_events[i].work,
+				dpu_crtc_frame_event_work);
+	}
+
+	drm_crtc_init_with_planes(dev, crtc, plane, NULL, &dpu_crtc_funcs,
+				NULL);
+
+	drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
+	plane->crtc = crtc;
+
+	/* save user friendly CRTC name for later */
+	snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
+
+	/* initialize event handling */
+	rc = _dpu_crtc_init_events(dpu_crtc);
+	if (rc) {
+		drm_crtc_cleanup(crtc);
+		kfree(dpu_crtc);
+		return ERR_PTR(rc);
+	}
+
+	dpu_crtc->phandle = &kms->phandle;
+
+	DPU_DEBUG("%s: successfully initialized crtc\n", dpu_crtc->name);
+	return crtc;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
new file mode 100644
index 000000000000..1284e991f686
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
@@ -0,0 +1,491 @@
+/*
+ * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark at gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _DPU_CRTC_H_
+#define _DPU_CRTC_H_
+
+#include <linux/kthread.h>
+#include <drm/drm_crtc.h>
+#include "dpu_kms.h"
+#include "dpu_core_perf.h"
+#include "dpu_hw_blk.h"
+
+#define DPU_CRTC_NAME_SIZE	12
+
+/* define the maximum number of in-flight frame events */
+#define DPU_CRTC_FRAME_EVENT_SIZE	4
+
+/**
+ * enum dpu_crtc_client_type: crtc client type
+ * @RT_CLIENT:	RealTime client like video/cmd mode display
+ *              voting through apps rsc
+ * @NRT_CLIENT:	Non-RealTime client like WB display
+ *              voting through apps rsc
+ */
+enum dpu_crtc_client_type {
+	RT_CLIENT,
+	NRT_CLIENT,
+};
+
+/**
+ * enum dpu_crtc_smmu_state:	smmu state
+ * @ATTACHED:	 all the context banks are attached.
+ * @DETACHED:	 all the context banks are detached.
+ * @ATTACH_ALL_REQ:	 transient state of attaching context banks.
+ * @DETACH_ALL_REQ:	 transient state of detaching context banks.
+ */
+enum dpu_crtc_smmu_state {
+	ATTACHED = 0,
+	DETACHED,
+	ATTACH_ALL_REQ,
+	DETACH_ALL_REQ,
+};
+
+/**
+ * enum dpu_crtc_smmu_state_transition_type: state transition type
+ * @NONE: no pending state transitions
+ * @PRE_COMMIT: state transitions should be done before processing the commit
+ * @POST_COMMIT: state transitions to be done after processing the commit.
+ */
+enum dpu_crtc_smmu_state_transition_type {
+	NONE,
+	PRE_COMMIT,
+	POST_COMMIT
+};
+
+/**
+ * struct dpu_crtc_smmu_state_data: stores the smmu state and transition type
+ * @state: current state of smmu context banks
+ * @transition_type: transition request type
+ * @transition_error: whether there is error while transitioning the state
+ */
+struct dpu_crtc_smmu_state_data {
+	uint32_t state;
+	uint32_t transition_type;
+	uint32_t transition_error;
+};
+
+/**
+ * struct dpu_crtc_mixer: stores the map for each virtual pipeline in the CRTC
+ * @hw_lm:	LM HW Driver context
+ * @hw_ctl:	CTL Path HW driver context
+ * @encoder:	Encoder attached to this lm & ctl
+ * @mixer_op_mode:	mixer blending operation mode
+ * @flush_mask:	mixer flush mask for ctl, mixer and pipe
+ */
+struct dpu_crtc_mixer {
+	struct dpu_hw_mixer *hw_lm;
+	struct dpu_hw_ctl *hw_ctl;
+	struct drm_encoder *encoder;
+	u32 mixer_op_mode;
+	u32 flush_mask;
+};
+
+/**
+ * struct dpu_crtc_frame_event: stores crtc frame event for crtc processing
+ * @work:	base work structure
+ * @crtc:	Pointer to crtc handling this event
+ * @list:	event list
+ * @ts:		timestamp at queue entry
+ * @event:	event identifier
+ */
+struct dpu_crtc_frame_event {
+	struct kthread_work work;
+	struct drm_crtc *crtc;
+	struct list_head list;
+	ktime_t ts;
+	u32 event;
+};
+
+/**
+ * struct dpu_crtc_event - event callback tracking structure
+ * @list:     Linked list tracking node
+ * @kt_work:  Kthread worker structure
+ * @dpu_crtc: Pointer to associated dpu_crtc structure
+ * @cb_func:  Pointer to callback function
+ * @usr:      Pointer to user data to be provided to the callback
+ */
+struct dpu_crtc_event {
+	struct list_head list;
+	struct kthread_work kt_work;
+	void *dpu_crtc;
+
+	void (*cb_func)(struct drm_crtc *crtc, void *usr);
+	void *usr;
+};
+
+/*
+ * Maximum number of free event structures to cache
+ */
+#define DPU_CRTC_MAX_EVENT_COUNT	16
+
+/**
+ * struct dpu_crtc - virtualized CRTC data structure
+ * @base          : Base drm crtc structure
+ * @name          : ASCII description of this crtc
+ * @num_ctls      : Number of ctl paths in use
+ * @num_mixers    : Number of mixers in use
+ * @mixers_swapped: Whether the mixers have been swapped for left/right update
+ *                  especially in the case of DSC Merge.
+ * @mixers        : List of active mixers
+ * @event         : Pointer to last received drm vblank event. If there is a
+ *                  pending vblank event, this will be non-null.
+ * @vsync_count   : Running count of received vsync events
+ * @drm_requested_vblank : Whether vblanks have been enabled in the encoder
+ * @property_info : Opaque structure for generic property support
+ * @property_defaults : Array of default values for generic property support
+ * @stage_cfg     : H/w mixer stage configuration
+ * @debugfs_root  : Parent of debugfs node
+ * @vblank_cb_count : count of vblank callback since last reset
+ * @play_count    : frame count between crtc enable and disable
+ * @vblank_cb_time  : ktime at vblank count reset
+ * @vblank_requested : whether the user has requested vblank events
+ * @suspend         : whether or not a suspend operation is in progress
+ * @enabled       : whether the DPU CRTC is currently enabled. updated in the
+ *                  commit-thread, not state-swap time which is earlier, so
+ *                  safe to make decisions on during VBLANK on/off work
+ * @feature_list  : list of color processing features supported on a crtc
+ * @active_list   : list of color processing features are active
+ * @dirty_list    : list of color processing features are dirty
+ * @ad_dirty: list containing ad properties that are dirty
+ * @ad_active: list containing ad properties that are active
+ * @crtc_lock     : crtc lock around create, destroy and access.
+ * @frame_pending : Whether or not an update is pending
+ * @frame_events  : static allocation of in-flight frame events
+ * @frame_event_list : available frame event list
+ * @spin_lock     : spin lock for frame event, transaction status, etc...
+ * @frame_done_comp    : for frame_event_done synchronization
+ * @event_thread  : Pointer to event handler thread
+ * @event_worker  : Event worker queue
+ * @event_cache   : Local cache of event worker structures
+ * @event_free_list : List of available event structures
+ * @event_lock    : Spinlock around event handling code
+ * @misr_enable   : boolean entry indicates misr enable/disable status.
+ * @misr_frame_count  : misr frame count provided by client
+ * @misr_data     : store misr data before turning off the clocks.
+ * @phandle: Pointer to power handler
+ * @power_event   : registered power event handle
+ * @cur_perf      : current performance committed to clock/bandwidth driver
+ * @rp_lock       : serialization lock for resource pool
+ * @rp_head       : list of active resource pool
+ * @scl3_cfg_lut  : qseed3 lut config
+ */
+struct dpu_crtc {
+	struct drm_crtc base;
+	char name[DPU_CRTC_NAME_SIZE];
+
+	/* HW Resources reserved for the crtc */
+	u32 num_ctls;
+	u32 num_mixers;
+	bool mixers_swapped;
+	struct dpu_crtc_mixer mixers[CRTC_DUAL_MIXERS];
+	struct dpu_hw_scaler3_lut_cfg *scl3_lut_cfg;
+
+	struct drm_pending_vblank_event *event;
+	u32 vsync_count;
+
+	struct dpu_hw_stage_cfg stage_cfg;
+	struct dentry *debugfs_root;
+
+	u32 vblank_cb_count;
+	u64 play_count;
+	ktime_t vblank_cb_time;
+	bool vblank_requested;
+	bool suspend;
+	bool enabled;
+
+	struct list_head feature_list;
+	struct list_head active_list;
+	struct list_head dirty_list;
+	struct list_head ad_dirty;
+	struct list_head ad_active;
+
+	struct mutex crtc_lock;
+
+	atomic_t frame_pending;
+	struct dpu_crtc_frame_event frame_events[DPU_CRTC_FRAME_EVENT_SIZE];
+	struct list_head frame_event_list;
+	spinlock_t spin_lock;
+	struct completion frame_done_comp;
+
+	/* for handling internal event thread */
+	struct dpu_crtc_event event_cache[DPU_CRTC_MAX_EVENT_COUNT];
+	struct list_head event_free_list;
+	spinlock_t event_lock;
+	bool misr_enable;
+	u32 misr_frame_count;
+	u32 misr_data[CRTC_DUAL_MIXERS];
+
+	struct dpu_power_handle *phandle;
+	struct dpu_power_event *power_event;
+
+	struct dpu_core_perf_params cur_perf;
+
+	struct mutex rp_lock;
+	struct list_head rp_head;
+
+	struct dpu_crtc_smmu_state_data smmu_state;
+};
+
+#define to_dpu_crtc(x) container_of(x, struct dpu_crtc, base)
+
+/**
+ * struct dpu_crtc_res_ops - common operations for crtc resources
+ * @get: get given resource
+ * @put: put given resource
+ */
+struct dpu_crtc_res_ops {
+	void *(*get)(void *val, u32 type, u64 tag);
+	void (*put)(void *val);
+};
+
+#define DPU_CRTC_RES_FLAG_FREE		BIT(0)
+
+/**
+ * struct dpu_crtc_res - definition of crtc resources
+ * @list: list of crtc resource
+ * @type: crtc resource type
+ * @tag: unique identifier per type
+ * @refcount: reference/usage count
+ * @ops: callback operations
+ * @val: resource handle associated with type/tag
+ * @flags: customization flags
+ */
+struct dpu_crtc_res {
+	struct list_head list;
+	u32 type;
+	u64 tag;
+	atomic_t refcount;
+	struct dpu_crtc_res_ops ops;
+	void *val;
+	u32 flags;
+};
+
+/**
+ * dpu_crtc_respool - crtc resource pool
+ * @rp_lock: pointer to serialization lock
+ * @rp_head: pointer to head of active resource pools of this crtc
+ * @rp_list: list of crtc resource pool
+ * @sequence_id: sequence identifier, incremented per state duplication
+ * @res_list: list of resource managed by this resource pool
+ * @ops: resource operations for parent resource pool
+ */
+struct dpu_crtc_respool {
+	struct mutex *rp_lock;
+	struct list_head *rp_head;
+	struct list_head rp_list;
+	u32 sequence_id;
+	struct list_head res_list;
+	struct dpu_crtc_res_ops ops;
+};
+
+/**
+ * struct dpu_crtc_state - dpu container for atomic crtc state
+ * @base: Base drm crtc state structure
+ * @is_ppsplit    : Whether current topology requires PPSplit special handling
+ * @bw_control    : true if bw/clk controlled by core bw/clk properties
+ * @bw_split_vote : true if bw controlled by llcc/dram bw properties
+ * @lm_bounds     : LM boundaries based on current mode full resolution, no ROI.
+ *                  Origin top left of CRTC.
+ * @property_state: Local storage for msm_prop properties
+ * @property_values: Current crtc property values
+ * @input_fence_timeout_ns : Cached input fence timeout, in ns
+ * @new_perf: new performance state being requested
+ */
+struct dpu_crtc_state {
+	struct drm_crtc_state base;
+
+	bool bw_control;
+	bool bw_split_vote;
+
+	bool is_ppsplit;
+	struct drm_rect lm_bounds[CRTC_DUAL_MIXERS];
+
+	uint64_t input_fence_timeout_ns;
+
+	struct dpu_core_perf_params new_perf;
+	struct dpu_crtc_respool rp;
+};
+
+#define to_dpu_crtc_state(x) \
+	container_of(x, struct dpu_crtc_state, base)
+
+/**
+ * dpu_crtc_get_mixer_width - get the mixer width
+ * Mixer width will be same as panel width(/2 for split)
+ */
+static inline int dpu_crtc_get_mixer_width(struct dpu_crtc *dpu_crtc,
+	struct dpu_crtc_state *cstate, struct drm_display_mode *mode)
+{
+	u32 mixer_width;
+
+	if (!dpu_crtc || !cstate || !mode)
+		return 0;
+
+	mixer_width = (dpu_crtc->num_mixers == CRTC_DUAL_MIXERS ?
+			mode->hdisplay / CRTC_DUAL_MIXERS : mode->hdisplay);
+
+	return mixer_width;
+}
+
+/**
+ * dpu_crtc_get_mixer_height - get the mixer height
+ * Mixer height will be same as panel height
+ */
+static inline int dpu_crtc_get_mixer_height(struct dpu_crtc *dpu_crtc,
+		struct dpu_crtc_state *cstate, struct drm_display_mode *mode)
+{
+	if (!dpu_crtc || !cstate || !mode)
+		return 0;
+
+	return mode->vdisplay;
+}
+
+/**
+ * dpu_crtc_frame_pending - retun the number of pending frames
+ * @crtc: Pointer to drm crtc object
+ */
+static inline int dpu_crtc_frame_pending(struct drm_crtc *crtc)
+{
+	struct dpu_crtc *dpu_crtc;
+
+	if (!crtc)
+		return -EINVAL;
+
+	dpu_crtc = to_dpu_crtc(crtc);
+	return atomic_read(&dpu_crtc->frame_pending);
+}
+
+/**
+ * dpu_crtc_vblank - enable or disable vblanks for this crtc
+ * @crtc: Pointer to drm crtc object
+ * @en: true to enable vblanks, false to disable
+ */
+int dpu_crtc_vblank(struct drm_crtc *crtc, bool en);
+
+/**
+ * dpu_crtc_commit_kickoff - trigger kickoff of the commit for this crtc
+ * @crtc: Pointer to drm crtc object
+ */
+void dpu_crtc_commit_kickoff(struct drm_crtc *crtc);
+
+/**
+ * dpu_crtc_complete_commit - callback signalling completion of current commit
+ * @crtc: Pointer to drm crtc object
+ * @old_state: Pointer to drm crtc old state object
+ */
+void dpu_crtc_complete_commit(struct drm_crtc *crtc,
+		struct drm_crtc_state *old_state);
+
+/**
+ * dpu_crtc_init - create a new crtc object
+ * @dev: dpu device
+ * @plane: base plane
+ * @Return: new crtc object or error
+ */
+struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane);
+
+/**
+ * dpu_crtc_cancel_pending_flip - complete flip for clients on lastclose
+ * @crtc: Pointer to drm crtc object
+ * @file: client to cancel's file handle
+ */
+void dpu_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
+
+/**
+ * dpu_crtc_register_custom_event - api for enabling/disabling crtc event
+ * @kms: Pointer to dpu_kms
+ * @crtc_drm: Pointer to crtc object
+ * @event: Event that client is interested
+ * @en: Flag to enable/disable the event
+ */
+int dpu_crtc_register_custom_event(struct dpu_kms *kms,
+		struct drm_crtc *crtc_drm, u32 event, bool en);
+
+/**
+ * dpu_crtc_get_intf_mode - get interface mode of the given crtc
+ * @crtc: Pointert to crtc
+ */
+enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc);
+
+/**
+ * dpu_crtc_get_client_type - check the crtc type- rt, nrt etc.
+ * @crtc: Pointer to crtc
+ */
+static inline enum dpu_crtc_client_type dpu_crtc_get_client_type(
+						struct drm_crtc *crtc)
+{
+	struct dpu_crtc_state *cstate =
+			crtc ? to_dpu_crtc_state(crtc->state) : NULL;
+
+	if (!cstate)
+		return NRT_CLIENT;
+
+	return RT_CLIENT;
+}
+
+/**
+ * dpu_crtc_is_enabled - check if dpu crtc is enabled or not
+ * @crtc: Pointer to crtc
+ */
+static inline bool dpu_crtc_is_enabled(struct drm_crtc *crtc)
+{
+	return crtc ? crtc->enabled : false;
+}
+
+/**
+ * dpu_crtc_event_queue - request event callback
+ * @crtc: Pointer to drm crtc structure
+ * @func: Pointer to callback function
+ * @usr: Pointer to user data to be passed to callback
+ * Returns: Zero on success
+ */
+int dpu_crtc_event_queue(struct drm_crtc *crtc,
+		void (*func)(struct drm_crtc *crtc, void *usr), void *usr);
+
+/**
+ * dpu_crtc_res_add - add given resource to resource pool in crtc state
+ * @state: Pointer to drm crtc state
+ * @type: Resource type
+ * @tag: Search tag for given resource
+ * @val: Resource handle
+ * @ops: Resource callback operations
+ * return: 0 if success; error code otherwise
+ */
+int dpu_crtc_res_add(struct drm_crtc_state *state, u32 type, u64 tag,
+		void *val, struct dpu_crtc_res_ops *ops);
+
+/**
+ * dpu_crtc_res_get - get given resource from resource pool in crtc state
+ * @state: Pointer to drm crtc state
+ * @type: Resource type
+ * @tag: Search tag for given resource
+ * return: Resource handle if success; pointer error or null otherwise
+ */
+void *dpu_crtc_res_get(struct drm_crtc_state *state, u32 type, u64 tag);
+
+/**
+ * dpu_crtc_res_put - return given resource to resource pool in crtc state
+ * @state: Pointer to drm crtc state
+ * @type: Resource type
+ * @tag: Search tag for given resource
+ * return: None
+ */
+void dpu_crtc_res_put(struct drm_crtc_state *state, u32 type, u64 tag);
+
+#endif /* _DPU_CRTC_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c
new file mode 100644
index 000000000000..ae2aee7ed9e1
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c
@@ -0,0 +1,2393 @@
+/* Copyright (c) 2009-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/ktime.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/dma-buf.h>
+#include <linux/slab.h>
+#include <linux/list_sort.h>
+#include <linux/pm_runtime.h>
+
+#include "dpu_dbg.h"
+#include "disp/dpu1/dpu_hw_catalog.h"
+
+
+#define DEFAULT_DBGBUS_DPU	DPU_DBG_DUMP_IN_MEM
+#define DEFAULT_DBGBUS_VBIFRT	DPU_DBG_DUMP_IN_MEM
+#define REG_BASE_NAME_LEN	80
+
+#define DBGBUS_FLAGS_DSPP	BIT(0)
+#define DBGBUS_DSPP_STATUS	0x34C
+
+#define DBGBUS_NAME_DPU		"dpu"
+#define DBGBUS_NAME_VBIF_RT	"vbif_rt"
+
+/* offsets from dpu top address for the debug buses */
+#define DBGBUS_SSPP0	0x188
+#define DBGBUS_AXI_INTF	0x194
+#define DBGBUS_SSPP1	0x298
+#define DBGBUS_DSPP	0x348
+#define DBGBUS_PERIPH	0x418
+
+#define TEST_MASK(id, tp)	((id << 4) | (tp << 1) | BIT(0))
+
+/* following offsets are with respect to MDP VBIF base for DBG BUS access */
+#define MMSS_VBIF_CLKON			0x4
+#define MMSS_VBIF_TEST_BUS_OUT_CTRL	0x210
+#define MMSS_VBIF_TEST_BUS_OUT		0x230
+
+/* Vbif error info */
+#define MMSS_VBIF_PND_ERR		0x190
+#define MMSS_VBIF_SRC_ERR		0x194
+#define MMSS_VBIF_XIN_HALT_CTRL1	0x204
+#define MMSS_VBIF_ERR_INFO		0X1a0
+#define MMSS_VBIF_ERR_INFO_1		0x1a4
+#define MMSS_VBIF_CLIENT_NUM		14
+
+/**
+ * struct dpu_dbg_reg_base - register region base.
+ *	may sub-ranges: sub-ranges are used for dumping
+ *	or may not have sub-ranges: dumping is base -> max_offset
+ * @reg_base_head: head of this node
+ * @name: register base name
+ * @base: base pointer
+ * @off: cached offset of region for manual register dumping
+ * @cnt: cached range of region for manual register dumping
+ * @max_offset: length of region
+ * @buf: buffer used for manual register dumping
+ * @buf_len:  buffer length used for manual register dumping
+ * @cb: callback for external dump function, null if not defined
+ * @cb_ptr: private pointer to callback function
+ */
+struct dpu_dbg_reg_base {
+	struct list_head reg_base_head;
+	char name[REG_BASE_NAME_LEN];
+	void __iomem *base;
+	size_t off;
+	size_t cnt;
+	size_t max_offset;
+	char *buf;
+	size_t buf_len;
+	void (*cb)(void *ptr);
+	void *cb_ptr;
+};
+
+struct dpu_debug_bus_entry {
+	u32 wr_addr;
+	u32 block_id;
+	u32 test_id;
+	void (*analyzer)(void __iomem *mem_base,
+				struct dpu_debug_bus_entry *entry, u32 val);
+};
+
+struct vbif_debug_bus_entry {
+	u32 disable_bus_addr;
+	u32 block_bus_addr;
+	u32 bit_offset;
+	u32 block_cnt;
+	u32 test_pnt_start;
+	u32 test_pnt_cnt;
+};
+
+struct dpu_dbg_debug_bus_common {
+	char *name;
+	u32 enable_mask;
+	bool include_in_deferred_work;
+	u32 flags;
+	u32 entries_size;
+	u32 *dumped_content;
+};
+
+struct dpu_dbg_dpu_debug_bus {
+	struct dpu_dbg_debug_bus_common cmn;
+	struct dpu_debug_bus_entry *entries;
+	u32 top_blk_off;
+};
+
+struct dpu_dbg_vbif_debug_bus {
+	struct dpu_dbg_debug_bus_common cmn;
+	struct vbif_debug_bus_entry *entries;
+};
+
+/**
+ * struct dpu_dbg_base - global dpu debug base structure
+ * @reg_base_list: list of register dumping regions
+ * @dev: device pointer
+ * @dump_work: work struct for deferring register dump work to separate thread
+ * @dbgbus_dpu: debug bus structure for the dpu
+ * @dbgbus_vbif_rt: debug bus structure for the realtime vbif
+ */
+static struct dpu_dbg_base {
+	struct list_head reg_base_list;
+	struct device *dev;
+
+	struct work_struct dump_work;
+
+	struct dpu_dbg_dpu_debug_bus dbgbus_dpu;
+	struct dpu_dbg_vbif_debug_bus dbgbus_vbif_rt;
+} dpu_dbg_base;
+
+static void _dpu_debug_bus_xbar_dump(void __iomem *mem_base,
+		struct dpu_debug_bus_entry *entry, u32 val)
+{
+	dev_err(dpu_dbg_base.dev, "xbar 0x%x %d %d 0x%x\n",
+			entry->wr_addr, entry->block_id, entry->test_id, val);
+}
+
+static void _dpu_debug_bus_lm_dump(void __iomem *mem_base,
+		struct dpu_debug_bus_entry *entry, u32 val)
+{
+	if (!(val & 0xFFF000))
+		return;
+
+	dev_err(dpu_dbg_base.dev, "lm 0x%x %d %d 0x%x\n",
+			entry->wr_addr, entry->block_id, entry->test_id, val);
+}
+
+static void _dpu_debug_bus_ppb0_dump(void __iomem *mem_base,
+		struct dpu_debug_bus_entry *entry, u32 val)
+{
+	if (!(val & BIT(15)))
+		return;
+
+	dev_err(dpu_dbg_base.dev, "ppb0 0x%x %d %d 0x%x\n",
+			entry->wr_addr, entry->block_id, entry->test_id, val);
+}
+
+static void _dpu_debug_bus_ppb1_dump(void __iomem *mem_base,
+		struct dpu_debug_bus_entry *entry, u32 val)
+{
+	if (!(val & BIT(15)))
+		return;
+
+	dev_err(dpu_dbg_base.dev, "ppb1 0x%x %d %d 0x%x\n",
+			entry->wr_addr, entry->block_id, entry->test_id, val);
+}
+
+static struct dpu_debug_bus_entry dbg_bus_dpu_8998[] = {
+
+	/* Unpack 0 sspp 0*/
+	{ DBGBUS_SSPP0, 50, 2 },
+	{ DBGBUS_SSPP0, 60, 2 },
+	{ DBGBUS_SSPP0, 70, 2 },
+	{ DBGBUS_SSPP0, 85, 2 },
+
+	/* Upack 0 sspp 1*/
+	{ DBGBUS_SSPP1, 50, 2 },
+	{ DBGBUS_SSPP1, 60, 2 },
+	{ DBGBUS_SSPP1, 70, 2 },
+	{ DBGBUS_SSPP1, 85, 2 },
+
+	/* scheduler */
+	{ DBGBUS_DSPP, 130, 0 },
+	{ DBGBUS_DSPP, 130, 1 },
+	{ DBGBUS_DSPP, 130, 2 },
+	{ DBGBUS_DSPP, 130, 3 },
+	{ DBGBUS_DSPP, 130, 4 },
+	{ DBGBUS_DSPP, 130, 5 },
+
+	/* qseed */
+	{ DBGBUS_SSPP0, 6, 0},
+	{ DBGBUS_SSPP0, 6, 1},
+	{ DBGBUS_SSPP0, 26, 0},
+	{ DBGBUS_SSPP0, 26, 1},
+	{ DBGBUS_SSPP1, 6, 0},
+	{ DBGBUS_SSPP1, 6, 1},
+	{ DBGBUS_SSPP1, 26, 0},
+	{ DBGBUS_SSPP1, 26, 1},
+
+	/* scale */
+	{ DBGBUS_SSPP0, 16, 0},
+	{ DBGBUS_SSPP0, 16, 1},
+	{ DBGBUS_SSPP0, 36, 0},
+	{ DBGBUS_SSPP0, 36, 1},
+	{ DBGBUS_SSPP1, 16, 0},
+	{ DBGBUS_SSPP1, 16, 1},
+	{ DBGBUS_SSPP1, 36, 0},
+	{ DBGBUS_SSPP1, 36, 1},
+
+	/* fetch sspp0 */
+
+	/* vig 0 */
+	{ DBGBUS_SSPP0, 0, 0 },
+	{ DBGBUS_SSPP0, 0, 1 },
+	{ DBGBUS_SSPP0, 0, 2 },
+	{ DBGBUS_SSPP0, 0, 3 },
+	{ DBGBUS_SSPP0, 0, 4 },
+	{ DBGBUS_SSPP0, 0, 5 },
+	{ DBGBUS_SSPP0, 0, 6 },
+	{ DBGBUS_SSPP0, 0, 7 },
+
+	{ DBGBUS_SSPP0, 1, 0 },
+	{ DBGBUS_SSPP0, 1, 1 },
+	{ DBGBUS_SSPP0, 1, 2 },
+	{ DBGBUS_SSPP0, 1, 3 },
+	{ DBGBUS_SSPP0, 1, 4 },
+	{ DBGBUS_SSPP0, 1, 5 },
+	{ DBGBUS_SSPP0, 1, 6 },
+	{ DBGBUS_SSPP0, 1, 7 },
+
+	{ DBGBUS_SSPP0, 2, 0 },
+	{ DBGBUS_SSPP0, 2, 1 },
+	{ DBGBUS_SSPP0, 2, 2 },
+	{ DBGBUS_SSPP0, 2, 3 },
+	{ DBGBUS_SSPP0, 2, 4 },
+	{ DBGBUS_SSPP0, 2, 5 },
+	{ DBGBUS_SSPP0, 2, 6 },
+	{ DBGBUS_SSPP0, 2, 7 },
+
+	{ DBGBUS_SSPP0, 4, 0 },
+	{ DBGBUS_SSPP0, 4, 1 },
+	{ DBGBUS_SSPP0, 4, 2 },
+	{ DBGBUS_SSPP0, 4, 3 },
+	{ DBGBUS_SSPP0, 4, 4 },
+	{ DBGBUS_SSPP0, 4, 5 },
+	{ DBGBUS_SSPP0, 4, 6 },
+	{ DBGBUS_SSPP0, 4, 7 },
+
+	{ DBGBUS_SSPP0, 5, 0 },
+	{ DBGBUS_SSPP0, 5, 1 },
+	{ DBGBUS_SSPP0, 5, 2 },
+	{ DBGBUS_SSPP0, 5, 3 },
+	{ DBGBUS_SSPP0, 5, 4 },
+	{ DBGBUS_SSPP0, 5, 5 },
+	{ DBGBUS_SSPP0, 5, 6 },
+	{ DBGBUS_SSPP0, 5, 7 },
+
+	/* vig 2 */
+	{ DBGBUS_SSPP0, 20, 0 },
+	{ DBGBUS_SSPP0, 20, 1 },
+	{ DBGBUS_SSPP0, 20, 2 },
+	{ DBGBUS_SSPP0, 20, 3 },
+	{ DBGBUS_SSPP0, 20, 4 },
+	{ DBGBUS_SSPP0, 20, 5 },
+	{ DBGBUS_SSPP0, 20, 6 },
+	{ DBGBUS_SSPP0, 20, 7 },
+
+	{ DBGBUS_SSPP0, 21, 0 },
+	{ DBGBUS_SSPP0, 21, 1 },
+	{ DBGBUS_SSPP0, 21, 2 },
+	{ DBGBUS_SSPP0, 21, 3 },
+	{ DBGBUS_SSPP0, 21, 4 },
+	{ DBGBUS_SSPP0, 21, 5 },
+	{ DBGBUS_SSPP0, 21, 6 },
+	{ DBGBUS_SSPP0, 21, 7 },
+
+	{ DBGBUS_SSPP0, 22, 0 },
+	{ DBGBUS_SSPP0, 22, 1 },
+	{ DBGBUS_SSPP0, 22, 2 },
+	{ DBGBUS_SSPP0, 22, 3 },
+	{ DBGBUS_SSPP0, 22, 4 },
+	{ DBGBUS_SSPP0, 22, 5 },
+	{ DBGBUS_SSPP0, 22, 6 },
+	{ DBGBUS_SSPP0, 22, 7 },
+
+	{ DBGBUS_SSPP0, 24, 0 },
+	{ DBGBUS_SSPP0, 24, 1 },
+	{ DBGBUS_SSPP0, 24, 2 },
+	{ DBGBUS_SSPP0, 24, 3 },
+	{ DBGBUS_SSPP0, 24, 4 },
+	{ DBGBUS_SSPP0, 24, 5 },
+	{ DBGBUS_SSPP0, 24, 6 },
+	{ DBGBUS_SSPP0, 24, 7 },
+
+	{ DBGBUS_SSPP0, 25, 0 },
+	{ DBGBUS_SSPP0, 25, 1 },
+	{ DBGBUS_SSPP0, 25, 2 },
+	{ DBGBUS_SSPP0, 25, 3 },
+	{ DBGBUS_SSPP0, 25, 4 },
+	{ DBGBUS_SSPP0, 25, 5 },
+	{ DBGBUS_SSPP0, 25, 6 },
+	{ DBGBUS_SSPP0, 25, 7 },
+
+	/* dma 2 */
+	{ DBGBUS_SSPP0, 30, 0 },
+	{ DBGBUS_SSPP0, 30, 1 },
+	{ DBGBUS_SSPP0, 30, 2 },
+	{ DBGBUS_SSPP0, 30, 3 },
+	{ DBGBUS_SSPP0, 30, 4 },
+	{ DBGBUS_SSPP0, 30, 5 },
+	{ DBGBUS_SSPP0, 30, 6 },
+	{ DBGBUS_SSPP0, 30, 7 },
+
+	{ DBGBUS_SSPP0, 31, 0 },
+	{ DBGBUS_SSPP0, 31, 1 },
+	{ DBGBUS_SSPP0, 31, 2 },
+	{ DBGBUS_SSPP0, 31, 3 },
+	{ DBGBUS_SSPP0, 31, 4 },
+	{ DBGBUS_SSPP0, 31, 5 },
+	{ DBGBUS_SSPP0, 31, 6 },
+	{ DBGBUS_SSPP0, 31, 7 },
+
+	{ DBGBUS_SSPP0, 32, 0 },
+	{ DBGBUS_SSPP0, 32, 1 },
+	{ DBGBUS_SSPP0, 32, 2 },
+	{ DBGBUS_SSPP0, 32, 3 },
+	{ DBGBUS_SSPP0, 32, 4 },
+	{ DBGBUS_SSPP0, 32, 5 },
+	{ DBGBUS_SSPP0, 32, 6 },
+	{ DBGBUS_SSPP0, 32, 7 },
+
+	{ DBGBUS_SSPP0, 33, 0 },
+	{ DBGBUS_SSPP0, 33, 1 },
+	{ DBGBUS_SSPP0, 33, 2 },
+	{ DBGBUS_SSPP0, 33, 3 },
+	{ DBGBUS_SSPP0, 33, 4 },
+	{ DBGBUS_SSPP0, 33, 5 },
+	{ DBGBUS_SSPP0, 33, 6 },
+	{ DBGBUS_SSPP0, 33, 7 },
+
+	{ DBGBUS_SSPP0, 34, 0 },
+	{ DBGBUS_SSPP0, 34, 1 },
+	{ DBGBUS_SSPP0, 34, 2 },
+	{ DBGBUS_SSPP0, 34, 3 },
+	{ DBGBUS_SSPP0, 34, 4 },
+	{ DBGBUS_SSPP0, 34, 5 },
+	{ DBGBUS_SSPP0, 34, 6 },
+	{ DBGBUS_SSPP0, 34, 7 },
+
+	{ DBGBUS_SSPP0, 35, 0 },
+	{ DBGBUS_SSPP0, 35, 1 },
+	{ DBGBUS_SSPP0, 35, 2 },
+	{ DBGBUS_SSPP0, 35, 3 },
+
+	/* dma 0 */
+	{ DBGBUS_SSPP0, 40, 0 },
+	{ DBGBUS_SSPP0, 40, 1 },
+	{ DBGBUS_SSPP0, 40, 2 },
+	{ DBGBUS_SSPP0, 40, 3 },
+	{ DBGBUS_SSPP0, 40, 4 },
+	{ DBGBUS_SSPP0, 40, 5 },
+	{ DBGBUS_SSPP0, 40, 6 },
+	{ DBGBUS_SSPP0, 40, 7 },
+
+	{ DBGBUS_SSPP0, 41, 0 },
+	{ DBGBUS_SSPP0, 41, 1 },
+	{ DBGBUS_SSPP0, 41, 2 },
+	{ DBGBUS_SSPP0, 41, 3 },
+	{ DBGBUS_SSPP0, 41, 4 },
+	{ DBGBUS_SSPP0, 41, 5 },
+	{ DBGBUS_SSPP0, 41, 6 },
+	{ DBGBUS_SSPP0, 41, 7 },
+
+	{ DBGBUS_SSPP0, 42, 0 },
+	{ DBGBUS_SSPP0, 42, 1 },
+	{ DBGBUS_SSPP0, 42, 2 },
+	{ DBGBUS_SSPP0, 42, 3 },
+	{ DBGBUS_SSPP0, 42, 4 },
+	{ DBGBUS_SSPP0, 42, 5 },
+	{ DBGBUS_SSPP0, 42, 6 },
+	{ DBGBUS_SSPP0, 42, 7 },
+
+	{ DBGBUS_SSPP0, 44, 0 },
+	{ DBGBUS_SSPP0, 44, 1 },
+	{ DBGBUS_SSPP0, 44, 2 },
+	{ DBGBUS_SSPP0, 44, 3 },
+	{ DBGBUS_SSPP0, 44, 4 },
+	{ DBGBUS_SSPP0, 44, 5 },
+	{ DBGBUS_SSPP0, 44, 6 },
+	{ DBGBUS_SSPP0, 44, 7 },
+
+	{ DBGBUS_SSPP0, 45, 0 },
+	{ DBGBUS_SSPP0, 45, 1 },
+	{ DBGBUS_SSPP0, 45, 2 },
+	{ DBGBUS_SSPP0, 45, 3 },
+	{ DBGBUS_SSPP0, 45, 4 },
+	{ DBGBUS_SSPP0, 45, 5 },
+	{ DBGBUS_SSPP0, 45, 6 },
+	{ DBGBUS_SSPP0, 45, 7 },
+
+	/* fetch sspp1 */
+	/* vig 1 */
+	{ DBGBUS_SSPP1, 0, 0 },
+	{ DBGBUS_SSPP1, 0, 1 },
+	{ DBGBUS_SSPP1, 0, 2 },
+	{ DBGBUS_SSPP1, 0, 3 },
+	{ DBGBUS_SSPP1, 0, 4 },
+	{ DBGBUS_SSPP1, 0, 5 },
+	{ DBGBUS_SSPP1, 0, 6 },
+	{ DBGBUS_SSPP1, 0, 7 },
+
+	{ DBGBUS_SSPP1, 1, 0 },
+	{ DBGBUS_SSPP1, 1, 1 },
+	{ DBGBUS_SSPP1, 1, 2 },
+	{ DBGBUS_SSPP1, 1, 3 },
+	{ DBGBUS_SSPP1, 1, 4 },
+	{ DBGBUS_SSPP1, 1, 5 },
+	{ DBGBUS_SSPP1, 1, 6 },
+	{ DBGBUS_SSPP1, 1, 7 },
+
+	{ DBGBUS_SSPP1, 2, 0 },
+	{ DBGBUS_SSPP1, 2, 1 },
+	{ DBGBUS_SSPP1, 2, 2 },
+	{ DBGBUS_SSPP1, 2, 3 },
+	{ DBGBUS_SSPP1, 2, 4 },
+	{ DBGBUS_SSPP1, 2, 5 },
+	{ DBGBUS_SSPP1, 2, 6 },
+	{ DBGBUS_SSPP1, 2, 7 },
+
+	{ DBGBUS_SSPP1, 4, 0 },
+	{ DBGBUS_SSPP1, 4, 1 },
+	{ DBGBUS_SSPP1, 4, 2 },
+	{ DBGBUS_SSPP1, 4, 3 },
+	{ DBGBUS_SSPP1, 4, 4 },
+	{ DBGBUS_SSPP1, 4, 5 },
+	{ DBGBUS_SSPP1, 4, 6 },
+	{ DBGBUS_SSPP1, 4, 7 },
+
+	{ DBGBUS_SSPP1, 5, 0 },
+	{ DBGBUS_SSPP1, 5, 1 },
+	{ DBGBUS_SSPP1, 5, 2 },
+	{ DBGBUS_SSPP1, 5, 3 },
+	{ DBGBUS_SSPP1, 5, 4 },
+	{ DBGBUS_SSPP1, 5, 5 },
+	{ DBGBUS_SSPP1, 5, 6 },
+	{ DBGBUS_SSPP1, 5, 7 },
+
+	/* vig 3 */
+	{ DBGBUS_SSPP1, 20, 0 },
+	{ DBGBUS_SSPP1, 20, 1 },
+	{ DBGBUS_SSPP1, 20, 2 },
+	{ DBGBUS_SSPP1, 20, 3 },
+	{ DBGBUS_SSPP1, 20, 4 },
+	{ DBGBUS_SSPP1, 20, 5 },
+	{ DBGBUS_SSPP1, 20, 6 },
+	{ DBGBUS_SSPP1, 20, 7 },
+
+	{ DBGBUS_SSPP1, 21, 0 },
+	{ DBGBUS_SSPP1, 21, 1 },
+	{ DBGBUS_SSPP1, 21, 2 },
+	{ DBGBUS_SSPP1, 21, 3 },
+	{ DBGBUS_SSPP1, 21, 4 },
+	{ DBGBUS_SSPP1, 21, 5 },
+	{ DBGBUS_SSPP1, 21, 6 },
+	{ DBGBUS_SSPP1, 21, 7 },
+
+	{ DBGBUS_SSPP1, 22, 0 },
+	{ DBGBUS_SSPP1, 22, 1 },
+	{ DBGBUS_SSPP1, 22, 2 },
+	{ DBGBUS_SSPP1, 22, 3 },
+	{ DBGBUS_SSPP1, 22, 4 },
+	{ DBGBUS_SSPP1, 22, 5 },
+	{ DBGBUS_SSPP1, 22, 6 },
+	{ DBGBUS_SSPP1, 22, 7 },
+
+	{ DBGBUS_SSPP1, 24, 0 },
+	{ DBGBUS_SSPP1, 24, 1 },
+	{ DBGBUS_SSPP1, 24, 2 },
+	{ DBGBUS_SSPP1, 24, 3 },
+	{ DBGBUS_SSPP1, 24, 4 },
+	{ DBGBUS_SSPP1, 24, 5 },
+	{ DBGBUS_SSPP1, 24, 6 },
+	{ DBGBUS_SSPP1, 24, 7 },
+
+	{ DBGBUS_SSPP1, 25, 0 },
+	{ DBGBUS_SSPP1, 25, 1 },
+	{ DBGBUS_SSPP1, 25, 2 },
+	{ DBGBUS_SSPP1, 25, 3 },
+	{ DBGBUS_SSPP1, 25, 4 },
+	{ DBGBUS_SSPP1, 25, 5 },
+	{ DBGBUS_SSPP1, 25, 6 },
+	{ DBGBUS_SSPP1, 25, 7 },
+
+	/* dma 3 */
+	{ DBGBUS_SSPP1, 30, 0 },
+	{ DBGBUS_SSPP1, 30, 1 },
+	{ DBGBUS_SSPP1, 30, 2 },
+	{ DBGBUS_SSPP1, 30, 3 },
+	{ DBGBUS_SSPP1, 30, 4 },
+	{ DBGBUS_SSPP1, 30, 5 },
+	{ DBGBUS_SSPP1, 30, 6 },
+	{ DBGBUS_SSPP1, 30, 7 },
+
+	{ DBGBUS_SSPP1, 31, 0 },
+	{ DBGBUS_SSPP1, 31, 1 },
+	{ DBGBUS_SSPP1, 31, 2 },
+	{ DBGBUS_SSPP1, 31, 3 },
+	{ DBGBUS_SSPP1, 31, 4 },
+	{ DBGBUS_SSPP1, 31, 5 },
+	{ DBGBUS_SSPP1, 31, 6 },
+	{ DBGBUS_SSPP1, 31, 7 },
+
+	{ DBGBUS_SSPP1, 32, 0 },
+	{ DBGBUS_SSPP1, 32, 1 },
+	{ DBGBUS_SSPP1, 32, 2 },
+	{ DBGBUS_SSPP1, 32, 3 },
+	{ DBGBUS_SSPP1, 32, 4 },
+	{ DBGBUS_SSPP1, 32, 5 },
+	{ DBGBUS_SSPP1, 32, 6 },
+	{ DBGBUS_SSPP1, 32, 7 },
+
+	{ DBGBUS_SSPP1, 33, 0 },
+	{ DBGBUS_SSPP1, 33, 1 },
+	{ DBGBUS_SSPP1, 33, 2 },
+	{ DBGBUS_SSPP1, 33, 3 },
+	{ DBGBUS_SSPP1, 33, 4 },
+	{ DBGBUS_SSPP1, 33, 5 },
+	{ DBGBUS_SSPP1, 33, 6 },
+	{ DBGBUS_SSPP1, 33, 7 },
+
+	{ DBGBUS_SSPP1, 34, 0 },
+	{ DBGBUS_SSPP1, 34, 1 },
+	{ DBGBUS_SSPP1, 34, 2 },
+	{ DBGBUS_SSPP1, 34, 3 },
+	{ DBGBUS_SSPP1, 34, 4 },
+	{ DBGBUS_SSPP1, 34, 5 },
+	{ DBGBUS_SSPP1, 34, 6 },
+	{ DBGBUS_SSPP1, 34, 7 },
+
+	{ DBGBUS_SSPP1, 35, 0 },
+	{ DBGBUS_SSPP1, 35, 1 },
+	{ DBGBUS_SSPP1, 35, 2 },
+
+	/* dma 1 */
+	{ DBGBUS_SSPP1, 40, 0 },
+	{ DBGBUS_SSPP1, 40, 1 },
+	{ DBGBUS_SSPP1, 40, 2 },
+	{ DBGBUS_SSPP1, 40, 3 },
+	{ DBGBUS_SSPP1, 40, 4 },
+	{ DBGBUS_SSPP1, 40, 5 },
+	{ DBGBUS_SSPP1, 40, 6 },
+	{ DBGBUS_SSPP1, 40, 7 },
+
+	{ DBGBUS_SSPP1, 41, 0 },
+	{ DBGBUS_SSPP1, 41, 1 },
+	{ DBGBUS_SSPP1, 41, 2 },
+	{ DBGBUS_SSPP1, 41, 3 },
+	{ DBGBUS_SSPP1, 41, 4 },
+	{ DBGBUS_SSPP1, 41, 5 },
+	{ DBGBUS_SSPP1, 41, 6 },
+	{ DBGBUS_SSPP1, 41, 7 },
+
+	{ DBGBUS_SSPP1, 42, 0 },
+	{ DBGBUS_SSPP1, 42, 1 },
+	{ DBGBUS_SSPP1, 42, 2 },
+	{ DBGBUS_SSPP1, 42, 3 },
+	{ DBGBUS_SSPP1, 42, 4 },
+	{ DBGBUS_SSPP1, 42, 5 },
+	{ DBGBUS_SSPP1, 42, 6 },
+	{ DBGBUS_SSPP1, 42, 7 },
+
+	{ DBGBUS_SSPP1, 44, 0 },
+	{ DBGBUS_SSPP1, 44, 1 },
+	{ DBGBUS_SSPP1, 44, 2 },
+	{ DBGBUS_SSPP1, 44, 3 },
+	{ DBGBUS_SSPP1, 44, 4 },
+	{ DBGBUS_SSPP1, 44, 5 },
+	{ DBGBUS_SSPP1, 44, 6 },
+	{ DBGBUS_SSPP1, 44, 7 },
+
+	{ DBGBUS_SSPP1, 45, 0 },
+	{ DBGBUS_SSPP1, 45, 1 },
+	{ DBGBUS_SSPP1, 45, 2 },
+	{ DBGBUS_SSPP1, 45, 3 },
+	{ DBGBUS_SSPP1, 45, 4 },
+	{ DBGBUS_SSPP1, 45, 5 },
+	{ DBGBUS_SSPP1, 45, 6 },
+	{ DBGBUS_SSPP1, 45, 7 },
+
+	/* cursor 1 */
+	{ DBGBUS_SSPP1, 80, 0 },
+	{ DBGBUS_SSPP1, 80, 1 },
+	{ DBGBUS_SSPP1, 80, 2 },
+	{ DBGBUS_SSPP1, 80, 3 },
+	{ DBGBUS_SSPP1, 80, 4 },
+	{ DBGBUS_SSPP1, 80, 5 },
+	{ DBGBUS_SSPP1, 80, 6 },
+	{ DBGBUS_SSPP1, 80, 7 },
+
+	{ DBGBUS_SSPP1, 81, 0 },
+	{ DBGBUS_SSPP1, 81, 1 },
+	{ DBGBUS_SSPP1, 81, 2 },
+	{ DBGBUS_SSPP1, 81, 3 },
+	{ DBGBUS_SSPP1, 81, 4 },
+	{ DBGBUS_SSPP1, 81, 5 },
+	{ DBGBUS_SSPP1, 81, 6 },
+	{ DBGBUS_SSPP1, 81, 7 },
+
+	{ DBGBUS_SSPP1, 82, 0 },
+	{ DBGBUS_SSPP1, 82, 1 },
+	{ DBGBUS_SSPP1, 82, 2 },
+	{ DBGBUS_SSPP1, 82, 3 },
+	{ DBGBUS_SSPP1, 82, 4 },
+	{ DBGBUS_SSPP1, 82, 5 },
+	{ DBGBUS_SSPP1, 82, 6 },
+	{ DBGBUS_SSPP1, 82, 7 },
+
+	{ DBGBUS_SSPP1, 83, 0 },
+	{ DBGBUS_SSPP1, 83, 1 },
+	{ DBGBUS_SSPP1, 83, 2 },
+	{ DBGBUS_SSPP1, 83, 3 },
+	{ DBGBUS_SSPP1, 83, 4 },
+	{ DBGBUS_SSPP1, 83, 5 },
+	{ DBGBUS_SSPP1, 83, 6 },
+	{ DBGBUS_SSPP1, 83, 7 },
+
+	{ DBGBUS_SSPP1, 84, 0 },
+	{ DBGBUS_SSPP1, 84, 1 },
+	{ DBGBUS_SSPP1, 84, 2 },
+	{ DBGBUS_SSPP1, 84, 3 },
+	{ DBGBUS_SSPP1, 84, 4 },
+	{ DBGBUS_SSPP1, 84, 5 },
+	{ DBGBUS_SSPP1, 84, 6 },
+	{ DBGBUS_SSPP1, 84, 7 },
+
+	/* dspp */
+	{ DBGBUS_DSPP, 13, 0 },
+	{ DBGBUS_DSPP, 19, 0 },
+	{ DBGBUS_DSPP, 14, 0 },
+	{ DBGBUS_DSPP, 14, 1 },
+	{ DBGBUS_DSPP, 14, 3 },
+	{ DBGBUS_DSPP, 20, 0 },
+	{ DBGBUS_DSPP, 20, 1 },
+	{ DBGBUS_DSPP, 20, 3 },
+
+	/* ppb_0 */
+	{ DBGBUS_DSPP, 31, 0, _dpu_debug_bus_ppb0_dump },
+	{ DBGBUS_DSPP, 33, 0, _dpu_debug_bus_ppb0_dump },
+	{ DBGBUS_DSPP, 35, 0, _dpu_debug_bus_ppb0_dump },
+	{ DBGBUS_DSPP, 42, 0, _dpu_debug_bus_ppb0_dump },
+
+	/* ppb_1 */
+	{ DBGBUS_DSPP, 32, 0, _dpu_debug_bus_ppb1_dump },
+	{ DBGBUS_DSPP, 34, 0, _dpu_debug_bus_ppb1_dump },
+	{ DBGBUS_DSPP, 36, 0, _dpu_debug_bus_ppb1_dump },
+	{ DBGBUS_DSPP, 43, 0, _dpu_debug_bus_ppb1_dump },
+
+	/* lm_lut */
+	{ DBGBUS_DSPP, 109, 0 },
+	{ DBGBUS_DSPP, 105, 0 },
+	{ DBGBUS_DSPP, 103, 0 },
+
+	/* tear-check */
+	{ DBGBUS_PERIPH, 63, 0 },
+	{ DBGBUS_PERIPH, 64, 0 },
+	{ DBGBUS_PERIPH, 65, 0 },
+	{ DBGBUS_PERIPH, 73, 0 },
+	{ DBGBUS_PERIPH, 74, 0 },
+
+	/* crossbar */
+	{ DBGBUS_DSPP, 0, 0, _dpu_debug_bus_xbar_dump },
+
+	/* rotator */
+	{ DBGBUS_DSPP, 9, 0},
+
+	/* blend */
+	/* LM0 */
+	{ DBGBUS_DSPP, 63, 0},
+	{ DBGBUS_DSPP, 63, 1},
+	{ DBGBUS_DSPP, 63, 2},
+	{ DBGBUS_DSPP, 63, 3},
+	{ DBGBUS_DSPP, 63, 4},
+	{ DBGBUS_DSPP, 63, 5},
+	{ DBGBUS_DSPP, 63, 6},
+	{ DBGBUS_DSPP, 63, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 64, 0},
+	{ DBGBUS_DSPP, 64, 1},
+	{ DBGBUS_DSPP, 64, 2},
+	{ DBGBUS_DSPP, 64, 3},
+	{ DBGBUS_DSPP, 64, 4},
+	{ DBGBUS_DSPP, 64, 5},
+	{ DBGBUS_DSPP, 64, 6},
+	{ DBGBUS_DSPP, 64, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 65, 0},
+	{ DBGBUS_DSPP, 65, 1},
+	{ DBGBUS_DSPP, 65, 2},
+	{ DBGBUS_DSPP, 65, 3},
+	{ DBGBUS_DSPP, 65, 4},
+	{ DBGBUS_DSPP, 65, 5},
+	{ DBGBUS_DSPP, 65, 6},
+	{ DBGBUS_DSPP, 65, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 66, 0},
+	{ DBGBUS_DSPP, 66, 1},
+	{ DBGBUS_DSPP, 66, 2},
+	{ DBGBUS_DSPP, 66, 3},
+	{ DBGBUS_DSPP, 66, 4},
+	{ DBGBUS_DSPP, 66, 5},
+	{ DBGBUS_DSPP, 66, 6},
+	{ DBGBUS_DSPP, 66, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 67, 0},
+	{ DBGBUS_DSPP, 67, 1},
+	{ DBGBUS_DSPP, 67, 2},
+	{ DBGBUS_DSPP, 67, 3},
+	{ DBGBUS_DSPP, 67, 4},
+	{ DBGBUS_DSPP, 67, 5},
+	{ DBGBUS_DSPP, 67, 6},
+	{ DBGBUS_DSPP, 67, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 68, 0},
+	{ DBGBUS_DSPP, 68, 1},
+	{ DBGBUS_DSPP, 68, 2},
+	{ DBGBUS_DSPP, 68, 3},
+	{ DBGBUS_DSPP, 68, 4},
+	{ DBGBUS_DSPP, 68, 5},
+	{ DBGBUS_DSPP, 68, 6},
+	{ DBGBUS_DSPP, 68, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 69, 0},
+	{ DBGBUS_DSPP, 69, 1},
+	{ DBGBUS_DSPP, 69, 2},
+	{ DBGBUS_DSPP, 69, 3},
+	{ DBGBUS_DSPP, 69, 4},
+	{ DBGBUS_DSPP, 69, 5},
+	{ DBGBUS_DSPP, 69, 6},
+	{ DBGBUS_DSPP, 69, 7, _dpu_debug_bus_lm_dump },
+
+	/* LM1 */
+	{ DBGBUS_DSPP, 70, 0},
+	{ DBGBUS_DSPP, 70, 1},
+	{ DBGBUS_DSPP, 70, 2},
+	{ DBGBUS_DSPP, 70, 3},
+	{ DBGBUS_DSPP, 70, 4},
+	{ DBGBUS_DSPP, 70, 5},
+	{ DBGBUS_DSPP, 70, 6},
+	{ DBGBUS_DSPP, 70, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 71, 0},
+	{ DBGBUS_DSPP, 71, 1},
+	{ DBGBUS_DSPP, 71, 2},
+	{ DBGBUS_DSPP, 71, 3},
+	{ DBGBUS_DSPP, 71, 4},
+	{ DBGBUS_DSPP, 71, 5},
+	{ DBGBUS_DSPP, 71, 6},
+	{ DBGBUS_DSPP, 71, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 72, 0},
+	{ DBGBUS_DSPP, 72, 1},
+	{ DBGBUS_DSPP, 72, 2},
+	{ DBGBUS_DSPP, 72, 3},
+	{ DBGBUS_DSPP, 72, 4},
+	{ DBGBUS_DSPP, 72, 5},
+	{ DBGBUS_DSPP, 72, 6},
+	{ DBGBUS_DSPP, 72, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 73, 0},
+	{ DBGBUS_DSPP, 73, 1},
+	{ DBGBUS_DSPP, 73, 2},
+	{ DBGBUS_DSPP, 73, 3},
+	{ DBGBUS_DSPP, 73, 4},
+	{ DBGBUS_DSPP, 73, 5},
+	{ DBGBUS_DSPP, 73, 6},
+	{ DBGBUS_DSPP, 73, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 74, 0},
+	{ DBGBUS_DSPP, 74, 1},
+	{ DBGBUS_DSPP, 74, 2},
+	{ DBGBUS_DSPP, 74, 3},
+	{ DBGBUS_DSPP, 74, 4},
+	{ DBGBUS_DSPP, 74, 5},
+	{ DBGBUS_DSPP, 74, 6},
+	{ DBGBUS_DSPP, 74, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 75, 0},
+	{ DBGBUS_DSPP, 75, 1},
+	{ DBGBUS_DSPP, 75, 2},
+	{ DBGBUS_DSPP, 75, 3},
+	{ DBGBUS_DSPP, 75, 4},
+	{ DBGBUS_DSPP, 75, 5},
+	{ DBGBUS_DSPP, 75, 6},
+	{ DBGBUS_DSPP, 75, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 76, 0},
+	{ DBGBUS_DSPP, 76, 1},
+	{ DBGBUS_DSPP, 76, 2},
+	{ DBGBUS_DSPP, 76, 3},
+	{ DBGBUS_DSPP, 76, 4},
+	{ DBGBUS_DSPP, 76, 5},
+	{ DBGBUS_DSPP, 76, 6},
+	{ DBGBUS_DSPP, 76, 7, _dpu_debug_bus_lm_dump },
+
+	/* LM2 */
+	{ DBGBUS_DSPP, 77, 0},
+	{ DBGBUS_DSPP, 77, 1},
+	{ DBGBUS_DSPP, 77, 2},
+	{ DBGBUS_DSPP, 77, 3},
+	{ DBGBUS_DSPP, 77, 4},
+	{ DBGBUS_DSPP, 77, 5},
+	{ DBGBUS_DSPP, 77, 6},
+	{ DBGBUS_DSPP, 77, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 78, 0},
+	{ DBGBUS_DSPP, 78, 1},
+	{ DBGBUS_DSPP, 78, 2},
+	{ DBGBUS_DSPP, 78, 3},
+	{ DBGBUS_DSPP, 78, 4},
+	{ DBGBUS_DSPP, 78, 5},
+	{ DBGBUS_DSPP, 78, 6},
+	{ DBGBUS_DSPP, 78, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 79, 0},
+	{ DBGBUS_DSPP, 79, 1},
+	{ DBGBUS_DSPP, 79, 2},
+	{ DBGBUS_DSPP, 79, 3},
+	{ DBGBUS_DSPP, 79, 4},
+	{ DBGBUS_DSPP, 79, 5},
+	{ DBGBUS_DSPP, 79, 6},
+	{ DBGBUS_DSPP, 79, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 80, 0},
+	{ DBGBUS_DSPP, 80, 1},
+	{ DBGBUS_DSPP, 80, 2},
+	{ DBGBUS_DSPP, 80, 3},
+	{ DBGBUS_DSPP, 80, 4},
+	{ DBGBUS_DSPP, 80, 5},
+	{ DBGBUS_DSPP, 80, 6},
+	{ DBGBUS_DSPP, 80, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 81, 0},
+	{ DBGBUS_DSPP, 81, 1},
+	{ DBGBUS_DSPP, 81, 2},
+	{ DBGBUS_DSPP, 81, 3},
+	{ DBGBUS_DSPP, 81, 4},
+	{ DBGBUS_DSPP, 81, 5},
+	{ DBGBUS_DSPP, 81, 6},
+	{ DBGBUS_DSPP, 81, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 82, 0},
+	{ DBGBUS_DSPP, 82, 1},
+	{ DBGBUS_DSPP, 82, 2},
+	{ DBGBUS_DSPP, 82, 3},
+	{ DBGBUS_DSPP, 82, 4},
+	{ DBGBUS_DSPP, 82, 5},
+	{ DBGBUS_DSPP, 82, 6},
+	{ DBGBUS_DSPP, 82, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 83, 0},
+	{ DBGBUS_DSPP, 83, 1},
+	{ DBGBUS_DSPP, 83, 2},
+	{ DBGBUS_DSPP, 83, 3},
+	{ DBGBUS_DSPP, 83, 4},
+	{ DBGBUS_DSPP, 83, 5},
+	{ DBGBUS_DSPP, 83, 6},
+	{ DBGBUS_DSPP, 83, 7, _dpu_debug_bus_lm_dump },
+
+	/* csc */
+	{ DBGBUS_SSPP0, 7, 0},
+	{ DBGBUS_SSPP0, 7, 1},
+	{ DBGBUS_SSPP0, 27, 0},
+	{ DBGBUS_SSPP0, 27, 1},
+	{ DBGBUS_SSPP1, 7, 0},
+	{ DBGBUS_SSPP1, 7, 1},
+	{ DBGBUS_SSPP1, 27, 0},
+	{ DBGBUS_SSPP1, 27, 1},
+
+	/* pcc */
+	{ DBGBUS_SSPP0, 3,  3},
+	{ DBGBUS_SSPP0, 23, 3},
+	{ DBGBUS_SSPP0, 33, 3},
+	{ DBGBUS_SSPP0, 43, 3},
+	{ DBGBUS_SSPP1, 3,  3},
+	{ DBGBUS_SSPP1, 23, 3},
+	{ DBGBUS_SSPP1, 33, 3},
+	{ DBGBUS_SSPP1, 43, 3},
+
+	/* spa */
+	{ DBGBUS_SSPP0, 8,  0},
+	{ DBGBUS_SSPP0, 28, 0},
+	{ DBGBUS_SSPP1, 8,  0},
+	{ DBGBUS_SSPP1, 28, 0},
+	{ DBGBUS_DSPP, 13, 0},
+	{ DBGBUS_DSPP, 19, 0},
+
+	/* igc */
+	{ DBGBUS_SSPP0, 9,  0},
+	{ DBGBUS_SSPP0, 9,  1},
+	{ DBGBUS_SSPP0, 9,  3},
+	{ DBGBUS_SSPP0, 29, 0},
+	{ DBGBUS_SSPP0, 29, 1},
+	{ DBGBUS_SSPP0, 29, 3},
+	{ DBGBUS_SSPP0, 17, 0},
+	{ DBGBUS_SSPP0, 17, 1},
+	{ DBGBUS_SSPP0, 17, 3},
+	{ DBGBUS_SSPP0, 37, 0},
+	{ DBGBUS_SSPP0, 37, 1},
+	{ DBGBUS_SSPP0, 37, 3},
+	{ DBGBUS_SSPP0, 46, 0},
+	{ DBGBUS_SSPP0, 46, 1},
+	{ DBGBUS_SSPP0, 46, 3},
+
+	{ DBGBUS_SSPP1, 9,  0},
+	{ DBGBUS_SSPP1, 9,  1},
+	{ DBGBUS_SSPP1, 9,  3},
+	{ DBGBUS_SSPP1, 29, 0},
+	{ DBGBUS_SSPP1, 29, 1},
+	{ DBGBUS_SSPP1, 29, 3},
+	{ DBGBUS_SSPP1, 17, 0},
+	{ DBGBUS_SSPP1, 17, 1},
+	{ DBGBUS_SSPP1, 17, 3},
+	{ DBGBUS_SSPP1, 37, 0},
+	{ DBGBUS_SSPP1, 37, 1},
+	{ DBGBUS_SSPP1, 37, 3},
+	{ DBGBUS_SSPP1, 46, 0},
+	{ DBGBUS_SSPP1, 46, 1},
+	{ DBGBUS_SSPP1, 46, 3},
+
+	{ DBGBUS_DSPP, 14, 0},
+	{ DBGBUS_DSPP, 14, 1},
+	{ DBGBUS_DSPP, 14, 3},
+	{ DBGBUS_DSPP, 20, 0},
+	{ DBGBUS_DSPP, 20, 1},
+	{ DBGBUS_DSPP, 20, 3},
+
+	{ DBGBUS_PERIPH, 60, 0},
+};
+
+static struct dpu_debug_bus_entry dbg_bus_dpu_sdm845[] = {
+
+	/* Unpack 0 sspp 0*/
+	{ DBGBUS_SSPP0, 50, 2 },
+	{ DBGBUS_SSPP0, 60, 2 },
+	{ DBGBUS_SSPP0, 70, 2 },
+
+	/* Upack 0 sspp 1*/
+	{ DBGBUS_SSPP1, 50, 2 },
+	{ DBGBUS_SSPP1, 60, 2 },
+	{ DBGBUS_SSPP1, 70, 2 },
+
+	/* scheduler */
+	{ DBGBUS_DSPP, 130, 0 },
+	{ DBGBUS_DSPP, 130, 1 },
+	{ DBGBUS_DSPP, 130, 2 },
+	{ DBGBUS_DSPP, 130, 3 },
+	{ DBGBUS_DSPP, 130, 4 },
+	{ DBGBUS_DSPP, 130, 5 },
+
+	/* qseed */
+	{ DBGBUS_SSPP0, 6, 0},
+	{ DBGBUS_SSPP0, 6, 1},
+	{ DBGBUS_SSPP0, 26, 0},
+	{ DBGBUS_SSPP0, 26, 1},
+	{ DBGBUS_SSPP1, 6, 0},
+	{ DBGBUS_SSPP1, 6, 1},
+	{ DBGBUS_SSPP1, 26, 0},
+	{ DBGBUS_SSPP1, 26, 1},
+
+	/* scale */
+	{ DBGBUS_SSPP0, 16, 0},
+	{ DBGBUS_SSPP0, 16, 1},
+	{ DBGBUS_SSPP0, 36, 0},
+	{ DBGBUS_SSPP0, 36, 1},
+	{ DBGBUS_SSPP1, 16, 0},
+	{ DBGBUS_SSPP1, 16, 1},
+	{ DBGBUS_SSPP1, 36, 0},
+	{ DBGBUS_SSPP1, 36, 1},
+
+	/* fetch sspp0 */
+
+	/* vig 0 */
+	{ DBGBUS_SSPP0, 0, 0 },
+	{ DBGBUS_SSPP0, 0, 1 },
+	{ DBGBUS_SSPP0, 0, 2 },
+	{ DBGBUS_SSPP0, 0, 3 },
+	{ DBGBUS_SSPP0, 0, 4 },
+	{ DBGBUS_SSPP0, 0, 5 },
+	{ DBGBUS_SSPP0, 0, 6 },
+	{ DBGBUS_SSPP0, 0, 7 },
+
+	{ DBGBUS_SSPP0, 1, 0 },
+	{ DBGBUS_SSPP0, 1, 1 },
+	{ DBGBUS_SSPP0, 1, 2 },
+	{ DBGBUS_SSPP0, 1, 3 },
+	{ DBGBUS_SSPP0, 1, 4 },
+	{ DBGBUS_SSPP0, 1, 5 },
+	{ DBGBUS_SSPP0, 1, 6 },
+	{ DBGBUS_SSPP0, 1, 7 },
+
+	{ DBGBUS_SSPP0, 2, 0 },
+	{ DBGBUS_SSPP0, 2, 1 },
+	{ DBGBUS_SSPP0, 2, 2 },
+	{ DBGBUS_SSPP0, 2, 3 },
+	{ DBGBUS_SSPP0, 2, 4 },
+	{ DBGBUS_SSPP0, 2, 5 },
+	{ DBGBUS_SSPP0, 2, 6 },
+	{ DBGBUS_SSPP0, 2, 7 },
+
+	{ DBGBUS_SSPP0, 4, 0 },
+	{ DBGBUS_SSPP0, 4, 1 },
+	{ DBGBUS_SSPP0, 4, 2 },
+	{ DBGBUS_SSPP0, 4, 3 },
+	{ DBGBUS_SSPP0, 4, 4 },
+	{ DBGBUS_SSPP0, 4, 5 },
+	{ DBGBUS_SSPP0, 4, 6 },
+	{ DBGBUS_SSPP0, 4, 7 },
+
+	{ DBGBUS_SSPP0, 5, 0 },
+	{ DBGBUS_SSPP0, 5, 1 },
+	{ DBGBUS_SSPP0, 5, 2 },
+	{ DBGBUS_SSPP0, 5, 3 },
+	{ DBGBUS_SSPP0, 5, 4 },
+	{ DBGBUS_SSPP0, 5, 5 },
+	{ DBGBUS_SSPP0, 5, 6 },
+	{ DBGBUS_SSPP0, 5, 7 },
+
+	/* vig 2 */
+	{ DBGBUS_SSPP0, 20, 0 },
+	{ DBGBUS_SSPP0, 20, 1 },
+	{ DBGBUS_SSPP0, 20, 2 },
+	{ DBGBUS_SSPP0, 20, 3 },
+	{ DBGBUS_SSPP0, 20, 4 },
+	{ DBGBUS_SSPP0, 20, 5 },
+	{ DBGBUS_SSPP0, 20, 6 },
+	{ DBGBUS_SSPP0, 20, 7 },
+
+	{ DBGBUS_SSPP0, 21, 0 },
+	{ DBGBUS_SSPP0, 21, 1 },
+	{ DBGBUS_SSPP0, 21, 2 },
+	{ DBGBUS_SSPP0, 21, 3 },
+	{ DBGBUS_SSPP0, 21, 4 },
+	{ DBGBUS_SSPP0, 21, 5 },
+	{ DBGBUS_SSPP0, 21, 6 },
+	{ DBGBUS_SSPP0, 21, 7 },
+
+	{ DBGBUS_SSPP0, 22, 0 },
+	{ DBGBUS_SSPP0, 22, 1 },
+	{ DBGBUS_SSPP0, 22, 2 },
+	{ DBGBUS_SSPP0, 22, 3 },
+	{ DBGBUS_SSPP0, 22, 4 },
+	{ DBGBUS_SSPP0, 22, 5 },
+	{ DBGBUS_SSPP0, 22, 6 },
+	{ DBGBUS_SSPP0, 22, 7 },
+
+	{ DBGBUS_SSPP0, 24, 0 },
+	{ DBGBUS_SSPP0, 24, 1 },
+	{ DBGBUS_SSPP0, 24, 2 },
+	{ DBGBUS_SSPP0, 24, 3 },
+	{ DBGBUS_SSPP0, 24, 4 },
+	{ DBGBUS_SSPP0, 24, 5 },
+	{ DBGBUS_SSPP0, 24, 6 },
+	{ DBGBUS_SSPP0, 24, 7 },
+
+	{ DBGBUS_SSPP0, 25, 0 },
+	{ DBGBUS_SSPP0, 25, 1 },
+	{ DBGBUS_SSPP0, 25, 2 },
+	{ DBGBUS_SSPP0, 25, 3 },
+	{ DBGBUS_SSPP0, 25, 4 },
+	{ DBGBUS_SSPP0, 25, 5 },
+	{ DBGBUS_SSPP0, 25, 6 },
+	{ DBGBUS_SSPP0, 25, 7 },
+
+	/* dma 2 */
+	{ DBGBUS_SSPP0, 30, 0 },
+	{ DBGBUS_SSPP0, 30, 1 },
+	{ DBGBUS_SSPP0, 30, 2 },
+	{ DBGBUS_SSPP0, 30, 3 },
+	{ DBGBUS_SSPP0, 30, 4 },
+	{ DBGBUS_SSPP0, 30, 5 },
+	{ DBGBUS_SSPP0, 30, 6 },
+	{ DBGBUS_SSPP0, 30, 7 },
+
+	{ DBGBUS_SSPP0, 31, 0 },
+	{ DBGBUS_SSPP0, 31, 1 },
+	{ DBGBUS_SSPP0, 31, 2 },
+	{ DBGBUS_SSPP0, 31, 3 },
+	{ DBGBUS_SSPP0, 31, 4 },
+	{ DBGBUS_SSPP0, 31, 5 },
+	{ DBGBUS_SSPP0, 31, 6 },
+	{ DBGBUS_SSPP0, 31, 7 },
+
+	{ DBGBUS_SSPP0, 32, 0 },
+	{ DBGBUS_SSPP0, 32, 1 },
+	{ DBGBUS_SSPP0, 32, 2 },
+	{ DBGBUS_SSPP0, 32, 3 },
+	{ DBGBUS_SSPP0, 32, 4 },
+	{ DBGBUS_SSPP0, 32, 5 },
+	{ DBGBUS_SSPP0, 32, 6 },
+	{ DBGBUS_SSPP0, 32, 7 },
+
+	{ DBGBUS_SSPP0, 33, 0 },
+	{ DBGBUS_SSPP0, 33, 1 },
+	{ DBGBUS_SSPP0, 33, 2 },
+	{ DBGBUS_SSPP0, 33, 3 },
+	{ DBGBUS_SSPP0, 33, 4 },
+	{ DBGBUS_SSPP0, 33, 5 },
+	{ DBGBUS_SSPP0, 33, 6 },
+	{ DBGBUS_SSPP0, 33, 7 },
+
+	{ DBGBUS_SSPP0, 34, 0 },
+	{ DBGBUS_SSPP0, 34, 1 },
+	{ DBGBUS_SSPP0, 34, 2 },
+	{ DBGBUS_SSPP0, 34, 3 },
+	{ DBGBUS_SSPP0, 34, 4 },
+	{ DBGBUS_SSPP0, 34, 5 },
+	{ DBGBUS_SSPP0, 34, 6 },
+	{ DBGBUS_SSPP0, 34, 7 },
+
+	{ DBGBUS_SSPP0, 35, 0 },
+	{ DBGBUS_SSPP0, 35, 1 },
+	{ DBGBUS_SSPP0, 35, 2 },
+	{ DBGBUS_SSPP0, 35, 3 },
+
+	/* dma 0 */
+	{ DBGBUS_SSPP0, 40, 0 },
+	{ DBGBUS_SSPP0, 40, 1 },
+	{ DBGBUS_SSPP0, 40, 2 },
+	{ DBGBUS_SSPP0, 40, 3 },
+	{ DBGBUS_SSPP0, 40, 4 },
+	{ DBGBUS_SSPP0, 40, 5 },
+	{ DBGBUS_SSPP0, 40, 6 },
+	{ DBGBUS_SSPP0, 40, 7 },
+
+	{ DBGBUS_SSPP0, 41, 0 },
+	{ DBGBUS_SSPP0, 41, 1 },
+	{ DBGBUS_SSPP0, 41, 2 },
+	{ DBGBUS_SSPP0, 41, 3 },
+	{ DBGBUS_SSPP0, 41, 4 },
+	{ DBGBUS_SSPP0, 41, 5 },
+	{ DBGBUS_SSPP0, 41, 6 },
+	{ DBGBUS_SSPP0, 41, 7 },
+
+	{ DBGBUS_SSPP0, 42, 0 },
+	{ DBGBUS_SSPP0, 42, 1 },
+	{ DBGBUS_SSPP0, 42, 2 },
+	{ DBGBUS_SSPP0, 42, 3 },
+	{ DBGBUS_SSPP0, 42, 4 },
+	{ DBGBUS_SSPP0, 42, 5 },
+	{ DBGBUS_SSPP0, 42, 6 },
+	{ DBGBUS_SSPP0, 42, 7 },
+
+	{ DBGBUS_SSPP0, 44, 0 },
+	{ DBGBUS_SSPP0, 44, 1 },
+	{ DBGBUS_SSPP0, 44, 2 },
+	{ DBGBUS_SSPP0, 44, 3 },
+	{ DBGBUS_SSPP0, 44, 4 },
+	{ DBGBUS_SSPP0, 44, 5 },
+	{ DBGBUS_SSPP0, 44, 6 },
+	{ DBGBUS_SSPP0, 44, 7 },
+
+	{ DBGBUS_SSPP0, 45, 0 },
+	{ DBGBUS_SSPP0, 45, 1 },
+	{ DBGBUS_SSPP0, 45, 2 },
+	{ DBGBUS_SSPP0, 45, 3 },
+	{ DBGBUS_SSPP0, 45, 4 },
+	{ DBGBUS_SSPP0, 45, 5 },
+	{ DBGBUS_SSPP0, 45, 6 },
+	{ DBGBUS_SSPP0, 45, 7 },
+
+	/* fetch sspp1 */
+	/* vig 1 */
+	{ DBGBUS_SSPP1, 0, 0 },
+	{ DBGBUS_SSPP1, 0, 1 },
+	{ DBGBUS_SSPP1, 0, 2 },
+	{ DBGBUS_SSPP1, 0, 3 },
+	{ DBGBUS_SSPP1, 0, 4 },
+	{ DBGBUS_SSPP1, 0, 5 },
+	{ DBGBUS_SSPP1, 0, 6 },
+	{ DBGBUS_SSPP1, 0, 7 },
+
+	{ DBGBUS_SSPP1, 1, 0 },
+	{ DBGBUS_SSPP1, 1, 1 },
+	{ DBGBUS_SSPP1, 1, 2 },
+	{ DBGBUS_SSPP1, 1, 3 },
+	{ DBGBUS_SSPP1, 1, 4 },
+	{ DBGBUS_SSPP1, 1, 5 },
+	{ DBGBUS_SSPP1, 1, 6 },
+	{ DBGBUS_SSPP1, 1, 7 },
+
+	{ DBGBUS_SSPP1, 2, 0 },
+	{ DBGBUS_SSPP1, 2, 1 },
+	{ DBGBUS_SSPP1, 2, 2 },
+	{ DBGBUS_SSPP1, 2, 3 },
+	{ DBGBUS_SSPP1, 2, 4 },
+	{ DBGBUS_SSPP1, 2, 5 },
+	{ DBGBUS_SSPP1, 2, 6 },
+	{ DBGBUS_SSPP1, 2, 7 },
+
+	{ DBGBUS_SSPP1, 4, 0 },
+	{ DBGBUS_SSPP1, 4, 1 },
+	{ DBGBUS_SSPP1, 4, 2 },
+	{ DBGBUS_SSPP1, 4, 3 },
+	{ DBGBUS_SSPP1, 4, 4 },
+	{ DBGBUS_SSPP1, 4, 5 },
+	{ DBGBUS_SSPP1, 4, 6 },
+	{ DBGBUS_SSPP1, 4, 7 },
+
+	{ DBGBUS_SSPP1, 5, 0 },
+	{ DBGBUS_SSPP1, 5, 1 },
+	{ DBGBUS_SSPP1, 5, 2 },
+	{ DBGBUS_SSPP1, 5, 3 },
+	{ DBGBUS_SSPP1, 5, 4 },
+	{ DBGBUS_SSPP1, 5, 5 },
+	{ DBGBUS_SSPP1, 5, 6 },
+	{ DBGBUS_SSPP1, 5, 7 },
+
+	/* vig 3 */
+	{ DBGBUS_SSPP1, 20, 0 },
+	{ DBGBUS_SSPP1, 20, 1 },
+	{ DBGBUS_SSPP1, 20, 2 },
+	{ DBGBUS_SSPP1, 20, 3 },
+	{ DBGBUS_SSPP1, 20, 4 },
+	{ DBGBUS_SSPP1, 20, 5 },
+	{ DBGBUS_SSPP1, 20, 6 },
+	{ DBGBUS_SSPP1, 20, 7 },
+
+	{ DBGBUS_SSPP1, 21, 0 },
+	{ DBGBUS_SSPP1, 21, 1 },
+	{ DBGBUS_SSPP1, 21, 2 },
+	{ DBGBUS_SSPP1, 21, 3 },
+	{ DBGBUS_SSPP1, 21, 4 },
+	{ DBGBUS_SSPP1, 21, 5 },
+	{ DBGBUS_SSPP1, 21, 6 },
+	{ DBGBUS_SSPP1, 21, 7 },
+
+	{ DBGBUS_SSPP1, 22, 0 },
+	{ DBGBUS_SSPP1, 22, 1 },
+	{ DBGBUS_SSPP1, 22, 2 },
+	{ DBGBUS_SSPP1, 22, 3 },
+	{ DBGBUS_SSPP1, 22, 4 },
+	{ DBGBUS_SSPP1, 22, 5 },
+	{ DBGBUS_SSPP1, 22, 6 },
+	{ DBGBUS_SSPP1, 22, 7 },
+
+	{ DBGBUS_SSPP1, 24, 0 },
+	{ DBGBUS_SSPP1, 24, 1 },
+	{ DBGBUS_SSPP1, 24, 2 },
+	{ DBGBUS_SSPP1, 24, 3 },
+	{ DBGBUS_SSPP1, 24, 4 },
+	{ DBGBUS_SSPP1, 24, 5 },
+	{ DBGBUS_SSPP1, 24, 6 },
+	{ DBGBUS_SSPP1, 24, 7 },
+
+	{ DBGBUS_SSPP1, 25, 0 },
+	{ DBGBUS_SSPP1, 25, 1 },
+	{ DBGBUS_SSPP1, 25, 2 },
+	{ DBGBUS_SSPP1, 25, 3 },
+	{ DBGBUS_SSPP1, 25, 4 },
+	{ DBGBUS_SSPP1, 25, 5 },
+	{ DBGBUS_SSPP1, 25, 6 },
+	{ DBGBUS_SSPP1, 25, 7 },
+
+	/* dma 3 */
+	{ DBGBUS_SSPP1, 30, 0 },
+	{ DBGBUS_SSPP1, 30, 1 },
+	{ DBGBUS_SSPP1, 30, 2 },
+	{ DBGBUS_SSPP1, 30, 3 },
+	{ DBGBUS_SSPP1, 30, 4 },
+	{ DBGBUS_SSPP1, 30, 5 },
+	{ DBGBUS_SSPP1, 30, 6 },
+	{ DBGBUS_SSPP1, 30, 7 },
+
+	{ DBGBUS_SSPP1, 31, 0 },
+	{ DBGBUS_SSPP1, 31, 1 },
+	{ DBGBUS_SSPP1, 31, 2 },
+	{ DBGBUS_SSPP1, 31, 3 },
+	{ DBGBUS_SSPP1, 31, 4 },
+	{ DBGBUS_SSPP1, 31, 5 },
+	{ DBGBUS_SSPP1, 31, 6 },
+	{ DBGBUS_SSPP1, 31, 7 },
+
+	{ DBGBUS_SSPP1, 32, 0 },
+	{ DBGBUS_SSPP1, 32, 1 },
+	{ DBGBUS_SSPP1, 32, 2 },
+	{ DBGBUS_SSPP1, 32, 3 },
+	{ DBGBUS_SSPP1, 32, 4 },
+	{ DBGBUS_SSPP1, 32, 5 },
+	{ DBGBUS_SSPP1, 32, 6 },
+	{ DBGBUS_SSPP1, 32, 7 },
+
+	{ DBGBUS_SSPP1, 33, 0 },
+	{ DBGBUS_SSPP1, 33, 1 },
+	{ DBGBUS_SSPP1, 33, 2 },
+	{ DBGBUS_SSPP1, 33, 3 },
+	{ DBGBUS_SSPP1, 33, 4 },
+	{ DBGBUS_SSPP1, 33, 5 },
+	{ DBGBUS_SSPP1, 33, 6 },
+	{ DBGBUS_SSPP1, 33, 7 },
+
+	{ DBGBUS_SSPP1, 34, 0 },
+	{ DBGBUS_SSPP1, 34, 1 },
+	{ DBGBUS_SSPP1, 34, 2 },
+	{ DBGBUS_SSPP1, 34, 3 },
+	{ DBGBUS_SSPP1, 34, 4 },
+	{ DBGBUS_SSPP1, 34, 5 },
+	{ DBGBUS_SSPP1, 34, 6 },
+	{ DBGBUS_SSPP1, 34, 7 },
+
+	{ DBGBUS_SSPP1, 35, 0 },
+	{ DBGBUS_SSPP1, 35, 1 },
+	{ DBGBUS_SSPP1, 35, 2 },
+
+	/* dma 1 */
+	{ DBGBUS_SSPP1, 40, 0 },
+	{ DBGBUS_SSPP1, 40, 1 },
+	{ DBGBUS_SSPP1, 40, 2 },
+	{ DBGBUS_SSPP1, 40, 3 },
+	{ DBGBUS_SSPP1, 40, 4 },
+	{ DBGBUS_SSPP1, 40, 5 },
+	{ DBGBUS_SSPP1, 40, 6 },
+	{ DBGBUS_SSPP1, 40, 7 },
+
+	{ DBGBUS_SSPP1, 41, 0 },
+	{ DBGBUS_SSPP1, 41, 1 },
+	{ DBGBUS_SSPP1, 41, 2 },
+	{ DBGBUS_SSPP1, 41, 3 },
+	{ DBGBUS_SSPP1, 41, 4 },
+	{ DBGBUS_SSPP1, 41, 5 },
+	{ DBGBUS_SSPP1, 41, 6 },
+	{ DBGBUS_SSPP1, 41, 7 },
+
+	{ DBGBUS_SSPP1, 42, 0 },
+	{ DBGBUS_SSPP1, 42, 1 },
+	{ DBGBUS_SSPP1, 42, 2 },
+	{ DBGBUS_SSPP1, 42, 3 },
+	{ DBGBUS_SSPP1, 42, 4 },
+	{ DBGBUS_SSPP1, 42, 5 },
+	{ DBGBUS_SSPP1, 42, 6 },
+	{ DBGBUS_SSPP1, 42, 7 },
+
+	{ DBGBUS_SSPP1, 44, 0 },
+	{ DBGBUS_SSPP1, 44, 1 },
+	{ DBGBUS_SSPP1, 44, 2 },
+	{ DBGBUS_SSPP1, 44, 3 },
+	{ DBGBUS_SSPP1, 44, 4 },
+	{ DBGBUS_SSPP1, 44, 5 },
+	{ DBGBUS_SSPP1, 44, 6 },
+	{ DBGBUS_SSPP1, 44, 7 },
+
+	{ DBGBUS_SSPP1, 45, 0 },
+	{ DBGBUS_SSPP1, 45, 1 },
+	{ DBGBUS_SSPP1, 45, 2 },
+	{ DBGBUS_SSPP1, 45, 3 },
+	{ DBGBUS_SSPP1, 45, 4 },
+	{ DBGBUS_SSPP1, 45, 5 },
+	{ DBGBUS_SSPP1, 45, 6 },
+	{ DBGBUS_SSPP1, 45, 7 },
+
+	/* dspp */
+	{ DBGBUS_DSPP, 13, 0 },
+	{ DBGBUS_DSPP, 19, 0 },
+	{ DBGBUS_DSPP, 14, 0 },
+	{ DBGBUS_DSPP, 14, 1 },
+	{ DBGBUS_DSPP, 14, 3 },
+	{ DBGBUS_DSPP, 20, 0 },
+	{ DBGBUS_DSPP, 20, 1 },
+	{ DBGBUS_DSPP, 20, 3 },
+
+	/* ppb_0 */
+	{ DBGBUS_DSPP, 31, 0, _dpu_debug_bus_ppb0_dump },
+	{ DBGBUS_DSPP, 33, 0, _dpu_debug_bus_ppb0_dump },
+	{ DBGBUS_DSPP, 35, 0, _dpu_debug_bus_ppb0_dump },
+	{ DBGBUS_DSPP, 42, 0, _dpu_debug_bus_ppb0_dump },
+
+	/* ppb_1 */
+	{ DBGBUS_DSPP, 32, 0, _dpu_debug_bus_ppb1_dump },
+	{ DBGBUS_DSPP, 34, 0, _dpu_debug_bus_ppb1_dump },
+	{ DBGBUS_DSPP, 36, 0, _dpu_debug_bus_ppb1_dump },
+	{ DBGBUS_DSPP, 43, 0, _dpu_debug_bus_ppb1_dump },
+
+	/* lm_lut */
+	{ DBGBUS_DSPP, 109, 0 },
+	{ DBGBUS_DSPP, 105, 0 },
+	{ DBGBUS_DSPP, 103, 0 },
+
+	/* crossbar */
+	{ DBGBUS_DSPP, 0, 0, _dpu_debug_bus_xbar_dump },
+
+	/* rotator */
+	{ DBGBUS_DSPP, 9, 0},
+
+	/* blend */
+	/* LM0 */
+	{ DBGBUS_DSPP, 63, 1},
+	{ DBGBUS_DSPP, 63, 2},
+	{ DBGBUS_DSPP, 63, 3},
+	{ DBGBUS_DSPP, 63, 4},
+	{ DBGBUS_DSPP, 63, 5},
+	{ DBGBUS_DSPP, 63, 6},
+	{ DBGBUS_DSPP, 63, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 64, 1},
+	{ DBGBUS_DSPP, 64, 2},
+	{ DBGBUS_DSPP, 64, 3},
+	{ DBGBUS_DSPP, 64, 4},
+	{ DBGBUS_DSPP, 64, 5},
+	{ DBGBUS_DSPP, 64, 6},
+	{ DBGBUS_DSPP, 64, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 65, 1},
+	{ DBGBUS_DSPP, 65, 2},
+	{ DBGBUS_DSPP, 65, 3},
+	{ DBGBUS_DSPP, 65, 4},
+	{ DBGBUS_DSPP, 65, 5},
+	{ DBGBUS_DSPP, 65, 6},
+	{ DBGBUS_DSPP, 65, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 66, 1},
+	{ DBGBUS_DSPP, 66, 2},
+	{ DBGBUS_DSPP, 66, 3},
+	{ DBGBUS_DSPP, 66, 4},
+	{ DBGBUS_DSPP, 66, 5},
+	{ DBGBUS_DSPP, 66, 6},
+	{ DBGBUS_DSPP, 66, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 67, 1},
+	{ DBGBUS_DSPP, 67, 2},
+	{ DBGBUS_DSPP, 67, 3},
+	{ DBGBUS_DSPP, 67, 4},
+	{ DBGBUS_DSPP, 67, 5},
+	{ DBGBUS_DSPP, 67, 6},
+	{ DBGBUS_DSPP, 67, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 68, 1},
+	{ DBGBUS_DSPP, 68, 2},
+	{ DBGBUS_DSPP, 68, 3},
+	{ DBGBUS_DSPP, 68, 4},
+	{ DBGBUS_DSPP, 68, 5},
+	{ DBGBUS_DSPP, 68, 6},
+	{ DBGBUS_DSPP, 68, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 69, 1},
+	{ DBGBUS_DSPP, 69, 2},
+	{ DBGBUS_DSPP, 69, 3},
+	{ DBGBUS_DSPP, 69, 4},
+	{ DBGBUS_DSPP, 69, 5},
+	{ DBGBUS_DSPP, 69, 6},
+	{ DBGBUS_DSPP, 69, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 84, 1},
+	{ DBGBUS_DSPP, 84, 2},
+	{ DBGBUS_DSPP, 84, 3},
+	{ DBGBUS_DSPP, 84, 4},
+	{ DBGBUS_DSPP, 84, 5},
+	{ DBGBUS_DSPP, 84, 6},
+	{ DBGBUS_DSPP, 84, 7, _dpu_debug_bus_lm_dump },
+
+
+	{ DBGBUS_DSPP, 85, 1},
+	{ DBGBUS_DSPP, 85, 2},
+	{ DBGBUS_DSPP, 85, 3},
+	{ DBGBUS_DSPP, 85, 4},
+	{ DBGBUS_DSPP, 85, 5},
+	{ DBGBUS_DSPP, 85, 6},
+	{ DBGBUS_DSPP, 85, 7, _dpu_debug_bus_lm_dump },
+
+
+	{ DBGBUS_DSPP, 86, 1},
+	{ DBGBUS_DSPP, 86, 2},
+	{ DBGBUS_DSPP, 86, 3},
+	{ DBGBUS_DSPP, 86, 4},
+	{ DBGBUS_DSPP, 86, 5},
+	{ DBGBUS_DSPP, 86, 6},
+	{ DBGBUS_DSPP, 86, 7, _dpu_debug_bus_lm_dump },
+
+
+	{ DBGBUS_DSPP, 87, 1},
+	{ DBGBUS_DSPP, 87, 2},
+	{ DBGBUS_DSPP, 87, 3},
+	{ DBGBUS_DSPP, 87, 4},
+	{ DBGBUS_DSPP, 87, 5},
+	{ DBGBUS_DSPP, 87, 6},
+	{ DBGBUS_DSPP, 87, 7, _dpu_debug_bus_lm_dump },
+
+	/* LM1 */
+	{ DBGBUS_DSPP, 70, 1},
+	{ DBGBUS_DSPP, 70, 2},
+	{ DBGBUS_DSPP, 70, 3},
+	{ DBGBUS_DSPP, 70, 4},
+	{ DBGBUS_DSPP, 70, 5},
+	{ DBGBUS_DSPP, 70, 6},
+	{ DBGBUS_DSPP, 70, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 71, 1},
+	{ DBGBUS_DSPP, 71, 2},
+	{ DBGBUS_DSPP, 71, 3},
+	{ DBGBUS_DSPP, 71, 4},
+	{ DBGBUS_DSPP, 71, 5},
+	{ DBGBUS_DSPP, 71, 6},
+	{ DBGBUS_DSPP, 71, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 72, 1},
+	{ DBGBUS_DSPP, 72, 2},
+	{ DBGBUS_DSPP, 72, 3},
+	{ DBGBUS_DSPP, 72, 4},
+	{ DBGBUS_DSPP, 72, 5},
+	{ DBGBUS_DSPP, 72, 6},
+	{ DBGBUS_DSPP, 72, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 73, 1},
+	{ DBGBUS_DSPP, 73, 2},
+	{ DBGBUS_DSPP, 73, 3},
+	{ DBGBUS_DSPP, 73, 4},
+	{ DBGBUS_DSPP, 73, 5},
+	{ DBGBUS_DSPP, 73, 6},
+	{ DBGBUS_DSPP, 73, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 74, 1},
+	{ DBGBUS_DSPP, 74, 2},
+	{ DBGBUS_DSPP, 74, 3},
+	{ DBGBUS_DSPP, 74, 4},
+	{ DBGBUS_DSPP, 74, 5},
+	{ DBGBUS_DSPP, 74, 6},
+	{ DBGBUS_DSPP, 74, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 75, 1},
+	{ DBGBUS_DSPP, 75, 2},
+	{ DBGBUS_DSPP, 75, 3},
+	{ DBGBUS_DSPP, 75, 4},
+	{ DBGBUS_DSPP, 75, 5},
+	{ DBGBUS_DSPP, 75, 6},
+	{ DBGBUS_DSPP, 75, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 76, 1},
+	{ DBGBUS_DSPP, 76, 2},
+	{ DBGBUS_DSPP, 76, 3},
+	{ DBGBUS_DSPP, 76, 4},
+	{ DBGBUS_DSPP, 76, 5},
+	{ DBGBUS_DSPP, 76, 6},
+	{ DBGBUS_DSPP, 76, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 88, 1},
+	{ DBGBUS_DSPP, 88, 2},
+	{ DBGBUS_DSPP, 88, 3},
+	{ DBGBUS_DSPP, 88, 4},
+	{ DBGBUS_DSPP, 88, 5},
+	{ DBGBUS_DSPP, 88, 6},
+	{ DBGBUS_DSPP, 88, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 89, 1},
+	{ DBGBUS_DSPP, 89, 2},
+	{ DBGBUS_DSPP, 89, 3},
+	{ DBGBUS_DSPP, 89, 4},
+	{ DBGBUS_DSPP, 89, 5},
+	{ DBGBUS_DSPP, 89, 6},
+	{ DBGBUS_DSPP, 89, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 90, 1},
+	{ DBGBUS_DSPP, 90, 2},
+	{ DBGBUS_DSPP, 90, 3},
+	{ DBGBUS_DSPP, 90, 4},
+	{ DBGBUS_DSPP, 90, 5},
+	{ DBGBUS_DSPP, 90, 6},
+	{ DBGBUS_DSPP, 90, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 91, 1},
+	{ DBGBUS_DSPP, 91, 2},
+	{ DBGBUS_DSPP, 91, 3},
+	{ DBGBUS_DSPP, 91, 4},
+	{ DBGBUS_DSPP, 91, 5},
+	{ DBGBUS_DSPP, 91, 6},
+	{ DBGBUS_DSPP, 91, 7, _dpu_debug_bus_lm_dump },
+
+	/* LM2 */
+	{ DBGBUS_DSPP, 77, 0},
+	{ DBGBUS_DSPP, 77, 1},
+	{ DBGBUS_DSPP, 77, 2},
+	{ DBGBUS_DSPP, 77, 3},
+	{ DBGBUS_DSPP, 77, 4},
+	{ DBGBUS_DSPP, 77, 5},
+	{ DBGBUS_DSPP, 77, 6},
+	{ DBGBUS_DSPP, 77, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 78, 0},
+	{ DBGBUS_DSPP, 78, 1},
+	{ DBGBUS_DSPP, 78, 2},
+	{ DBGBUS_DSPP, 78, 3},
+	{ DBGBUS_DSPP, 78, 4},
+	{ DBGBUS_DSPP, 78, 5},
+	{ DBGBUS_DSPP, 78, 6},
+	{ DBGBUS_DSPP, 78, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 79, 0},
+	{ DBGBUS_DSPP, 79, 1},
+	{ DBGBUS_DSPP, 79, 2},
+	{ DBGBUS_DSPP, 79, 3},
+	{ DBGBUS_DSPP, 79, 4},
+	{ DBGBUS_DSPP, 79, 5},
+	{ DBGBUS_DSPP, 79, 6},
+	{ DBGBUS_DSPP, 79, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 80, 0},
+	{ DBGBUS_DSPP, 80, 1},
+	{ DBGBUS_DSPP, 80, 2},
+	{ DBGBUS_DSPP, 80, 3},
+	{ DBGBUS_DSPP, 80, 4},
+	{ DBGBUS_DSPP, 80, 5},
+	{ DBGBUS_DSPP, 80, 6},
+	{ DBGBUS_DSPP, 80, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 81, 0},
+	{ DBGBUS_DSPP, 81, 1},
+	{ DBGBUS_DSPP, 81, 2},
+	{ DBGBUS_DSPP, 81, 3},
+	{ DBGBUS_DSPP, 81, 4},
+	{ DBGBUS_DSPP, 81, 5},
+	{ DBGBUS_DSPP, 81, 6},
+	{ DBGBUS_DSPP, 81, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 82, 0},
+	{ DBGBUS_DSPP, 82, 1},
+	{ DBGBUS_DSPP, 82, 2},
+	{ DBGBUS_DSPP, 82, 3},
+	{ DBGBUS_DSPP, 82, 4},
+	{ DBGBUS_DSPP, 82, 5},
+	{ DBGBUS_DSPP, 82, 6},
+	{ DBGBUS_DSPP, 82, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 83, 0},
+	{ DBGBUS_DSPP, 83, 1},
+	{ DBGBUS_DSPP, 83, 2},
+	{ DBGBUS_DSPP, 83, 3},
+	{ DBGBUS_DSPP, 83, 4},
+	{ DBGBUS_DSPP, 83, 5},
+	{ DBGBUS_DSPP, 83, 6},
+	{ DBGBUS_DSPP, 83, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 92, 1},
+	{ DBGBUS_DSPP, 92, 2},
+	{ DBGBUS_DSPP, 92, 3},
+	{ DBGBUS_DSPP, 92, 4},
+	{ DBGBUS_DSPP, 92, 5},
+	{ DBGBUS_DSPP, 92, 6},
+	{ DBGBUS_DSPP, 92, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 93, 1},
+	{ DBGBUS_DSPP, 93, 2},
+	{ DBGBUS_DSPP, 93, 3},
+	{ DBGBUS_DSPP, 93, 4},
+	{ DBGBUS_DSPP, 93, 5},
+	{ DBGBUS_DSPP, 93, 6},
+	{ DBGBUS_DSPP, 93, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 94, 1},
+	{ DBGBUS_DSPP, 94, 2},
+	{ DBGBUS_DSPP, 94, 3},
+	{ DBGBUS_DSPP, 94, 4},
+	{ DBGBUS_DSPP, 94, 5},
+	{ DBGBUS_DSPP, 94, 6},
+	{ DBGBUS_DSPP, 94, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 95, 1},
+	{ DBGBUS_DSPP, 95, 2},
+	{ DBGBUS_DSPP, 95, 3},
+	{ DBGBUS_DSPP, 95, 4},
+	{ DBGBUS_DSPP, 95, 5},
+	{ DBGBUS_DSPP, 95, 6},
+	{ DBGBUS_DSPP, 95, 7, _dpu_debug_bus_lm_dump },
+
+	/* LM5 */
+	{ DBGBUS_DSPP, 110, 1},
+	{ DBGBUS_DSPP, 110, 2},
+	{ DBGBUS_DSPP, 110, 3},
+	{ DBGBUS_DSPP, 110, 4},
+	{ DBGBUS_DSPP, 110, 5},
+	{ DBGBUS_DSPP, 110, 6},
+	{ DBGBUS_DSPP, 110, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 111, 1},
+	{ DBGBUS_DSPP, 111, 2},
+	{ DBGBUS_DSPP, 111, 3},
+	{ DBGBUS_DSPP, 111, 4},
+	{ DBGBUS_DSPP, 111, 5},
+	{ DBGBUS_DSPP, 111, 6},
+	{ DBGBUS_DSPP, 111, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 112, 1},
+	{ DBGBUS_DSPP, 112, 2},
+	{ DBGBUS_DSPP, 112, 3},
+	{ DBGBUS_DSPP, 112, 4},
+	{ DBGBUS_DSPP, 112, 5},
+	{ DBGBUS_DSPP, 112, 6},
+	{ DBGBUS_DSPP, 112, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 113, 1},
+	{ DBGBUS_DSPP, 113, 2},
+	{ DBGBUS_DSPP, 113, 3},
+	{ DBGBUS_DSPP, 113, 4},
+	{ DBGBUS_DSPP, 113, 5},
+	{ DBGBUS_DSPP, 113, 6},
+	{ DBGBUS_DSPP, 113, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 114, 1},
+	{ DBGBUS_DSPP, 114, 2},
+	{ DBGBUS_DSPP, 114, 3},
+	{ DBGBUS_DSPP, 114, 4},
+	{ DBGBUS_DSPP, 114, 5},
+	{ DBGBUS_DSPP, 114, 6},
+	{ DBGBUS_DSPP, 114, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 115, 1},
+	{ DBGBUS_DSPP, 115, 2},
+	{ DBGBUS_DSPP, 115, 3},
+	{ DBGBUS_DSPP, 115, 4},
+	{ DBGBUS_DSPP, 115, 5},
+	{ DBGBUS_DSPP, 115, 6},
+	{ DBGBUS_DSPP, 115, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 116, 1},
+	{ DBGBUS_DSPP, 116, 2},
+	{ DBGBUS_DSPP, 116, 3},
+	{ DBGBUS_DSPP, 116, 4},
+	{ DBGBUS_DSPP, 116, 5},
+	{ DBGBUS_DSPP, 116, 6},
+	{ DBGBUS_DSPP, 116, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 117, 1},
+	{ DBGBUS_DSPP, 117, 2},
+	{ DBGBUS_DSPP, 117, 3},
+	{ DBGBUS_DSPP, 117, 4},
+	{ DBGBUS_DSPP, 117, 5},
+	{ DBGBUS_DSPP, 117, 6},
+	{ DBGBUS_DSPP, 117, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 118, 1},
+	{ DBGBUS_DSPP, 118, 2},
+	{ DBGBUS_DSPP, 118, 3},
+	{ DBGBUS_DSPP, 118, 4},
+	{ DBGBUS_DSPP, 118, 5},
+	{ DBGBUS_DSPP, 118, 6},
+	{ DBGBUS_DSPP, 118, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 119, 1},
+	{ DBGBUS_DSPP, 119, 2},
+	{ DBGBUS_DSPP, 119, 3},
+	{ DBGBUS_DSPP, 119, 4},
+	{ DBGBUS_DSPP, 119, 5},
+	{ DBGBUS_DSPP, 119, 6},
+	{ DBGBUS_DSPP, 119, 7, _dpu_debug_bus_lm_dump },
+
+	{ DBGBUS_DSPP, 120, 1},
+	{ DBGBUS_DSPP, 120, 2},
+	{ DBGBUS_DSPP, 120, 3},
+	{ DBGBUS_DSPP, 120, 4},
+	{ DBGBUS_DSPP, 120, 5},
+	{ DBGBUS_DSPP, 120, 6},
+	{ DBGBUS_DSPP, 120, 7, _dpu_debug_bus_lm_dump },
+
+	/* csc */
+	{ DBGBUS_SSPP0, 7, 0},
+	{ DBGBUS_SSPP0, 7, 1},
+	{ DBGBUS_SSPP0, 27, 0},
+	{ DBGBUS_SSPP0, 27, 1},
+	{ DBGBUS_SSPP1, 7, 0},
+	{ DBGBUS_SSPP1, 7, 1},
+	{ DBGBUS_SSPP1, 27, 0},
+	{ DBGBUS_SSPP1, 27, 1},
+
+	/* pcc */
+	{ DBGBUS_SSPP0, 3,  3},
+	{ DBGBUS_SSPP0, 23, 3},
+	{ DBGBUS_SSPP0, 33, 3},
+	{ DBGBUS_SSPP0, 43, 3},
+	{ DBGBUS_SSPP1, 3,  3},
+	{ DBGBUS_SSPP1, 23, 3},
+	{ DBGBUS_SSPP1, 33, 3},
+	{ DBGBUS_SSPP1, 43, 3},
+
+	/* spa */
+	{ DBGBUS_SSPP0, 8,  0},
+	{ DBGBUS_SSPP0, 28, 0},
+	{ DBGBUS_SSPP1, 8,  0},
+	{ DBGBUS_SSPP1, 28, 0},
+	{ DBGBUS_DSPP, 13, 0},
+	{ DBGBUS_DSPP, 19, 0},
+
+	/* igc */
+	{ DBGBUS_SSPP0, 17, 0},
+	{ DBGBUS_SSPP0, 17, 1},
+	{ DBGBUS_SSPP0, 17, 3},
+	{ DBGBUS_SSPP0, 37, 0},
+	{ DBGBUS_SSPP0, 37, 1},
+	{ DBGBUS_SSPP0, 37, 3},
+	{ DBGBUS_SSPP0, 46, 0},
+	{ DBGBUS_SSPP0, 46, 1},
+	{ DBGBUS_SSPP0, 46, 3},
+
+	{ DBGBUS_SSPP1, 17, 0},
+	{ DBGBUS_SSPP1, 17, 1},
+	{ DBGBUS_SSPP1, 17, 3},
+	{ DBGBUS_SSPP1, 37, 0},
+	{ DBGBUS_SSPP1, 37, 1},
+	{ DBGBUS_SSPP1, 37, 3},
+	{ DBGBUS_SSPP1, 46, 0},
+	{ DBGBUS_SSPP1, 46, 1},
+	{ DBGBUS_SSPP1, 46, 3},
+
+	{ DBGBUS_DSPP, 14, 0},
+	{ DBGBUS_DSPP, 14, 1},
+	{ DBGBUS_DSPP, 14, 3},
+	{ DBGBUS_DSPP, 20, 0},
+	{ DBGBUS_DSPP, 20, 1},
+	{ DBGBUS_DSPP, 20, 3},
+
+	/* intf0-3 */
+	{ DBGBUS_PERIPH, 0, 0},
+	{ DBGBUS_PERIPH, 1, 0},
+	{ DBGBUS_PERIPH, 2, 0},
+	{ DBGBUS_PERIPH, 3, 0},
+
+	/* te counter wrapper */
+	{ DBGBUS_PERIPH, 60, 0},
+
+	/* dsc0 */
+	{ DBGBUS_PERIPH, 47, 0},
+	{ DBGBUS_PERIPH, 47, 1},
+	{ DBGBUS_PERIPH, 47, 2},
+	{ DBGBUS_PERIPH, 47, 3},
+	{ DBGBUS_PERIPH, 47, 4},
+	{ DBGBUS_PERIPH, 47, 5},
+	{ DBGBUS_PERIPH, 47, 6},
+	{ DBGBUS_PERIPH, 47, 7},
+
+	/* dsc1 */
+	{ DBGBUS_PERIPH, 48, 0},
+	{ DBGBUS_PERIPH, 48, 1},
+	{ DBGBUS_PERIPH, 48, 2},
+	{ DBGBUS_PERIPH, 48, 3},
+	{ DBGBUS_PERIPH, 48, 4},
+	{ DBGBUS_PERIPH, 48, 5},
+	{ DBGBUS_PERIPH, 48, 6},
+	{ DBGBUS_PERIPH, 48, 7},
+
+	/* dsc2 */
+	{ DBGBUS_PERIPH, 51, 0},
+	{ DBGBUS_PERIPH, 51, 1},
+	{ DBGBUS_PERIPH, 51, 2},
+	{ DBGBUS_PERIPH, 51, 3},
+	{ DBGBUS_PERIPH, 51, 4},
+	{ DBGBUS_PERIPH, 51, 5},
+	{ DBGBUS_PERIPH, 51, 6},
+	{ DBGBUS_PERIPH, 51, 7},
+
+	/* dsc3 */
+	{ DBGBUS_PERIPH, 52, 0},
+	{ DBGBUS_PERIPH, 52, 1},
+	{ DBGBUS_PERIPH, 52, 2},
+	{ DBGBUS_PERIPH, 52, 3},
+	{ DBGBUS_PERIPH, 52, 4},
+	{ DBGBUS_PERIPH, 52, 5},
+	{ DBGBUS_PERIPH, 52, 6},
+	{ DBGBUS_PERIPH, 52, 7},
+
+	/* tear-check */
+	{ DBGBUS_PERIPH, 63, 0 },
+	{ DBGBUS_PERIPH, 64, 0 },
+	{ DBGBUS_PERIPH, 65, 0 },
+	{ DBGBUS_PERIPH, 73, 0 },
+	{ DBGBUS_PERIPH, 74, 0 },
+
+	/* cdwn */
+	{ DBGBUS_PERIPH, 80, 0},
+	{ DBGBUS_PERIPH, 80, 1},
+	{ DBGBUS_PERIPH, 80, 2},
+
+	{ DBGBUS_PERIPH, 81, 0},
+	{ DBGBUS_PERIPH, 81, 1},
+	{ DBGBUS_PERIPH, 81, 2},
+
+	{ DBGBUS_PERIPH, 82, 0},
+	{ DBGBUS_PERIPH, 82, 1},
+	{ DBGBUS_PERIPH, 82, 2},
+	{ DBGBUS_PERIPH, 82, 3},
+	{ DBGBUS_PERIPH, 82, 4},
+	{ DBGBUS_PERIPH, 82, 5},
+	{ DBGBUS_PERIPH, 82, 6},
+	{ DBGBUS_PERIPH, 82, 7},
+
+	/* hdmi */
+	{ DBGBUS_PERIPH, 68, 0},
+	{ DBGBUS_PERIPH, 68, 1},
+	{ DBGBUS_PERIPH, 68, 2},
+	{ DBGBUS_PERIPH, 68, 3},
+	{ DBGBUS_PERIPH, 68, 4},
+	{ DBGBUS_PERIPH, 68, 5},
+
+	/* edp */
+	{ DBGBUS_PERIPH, 69, 0},
+	{ DBGBUS_PERIPH, 69, 1},
+	{ DBGBUS_PERIPH, 69, 2},
+	{ DBGBUS_PERIPH, 69, 3},
+	{ DBGBUS_PERIPH, 69, 4},
+	{ DBGBUS_PERIPH, 69, 5},
+
+	/* dsi0 */
+	{ DBGBUS_PERIPH, 70, 0},
+	{ DBGBUS_PERIPH, 70, 1},
+	{ DBGBUS_PERIPH, 70, 2},
+	{ DBGBUS_PERIPH, 70, 3},
+	{ DBGBUS_PERIPH, 70, 4},
+	{ DBGBUS_PERIPH, 70, 5},
+
+	/* dsi1 */
+	{ DBGBUS_PERIPH, 71, 0},
+	{ DBGBUS_PERIPH, 71, 1},
+	{ DBGBUS_PERIPH, 71, 2},
+	{ DBGBUS_PERIPH, 71, 3},
+	{ DBGBUS_PERIPH, 71, 4},
+	{ DBGBUS_PERIPH, 71, 5},
+};
+
+static struct vbif_debug_bus_entry vbif_dbg_bus_msm8998[] = {
+	{0x214, 0x21c, 16, 2, 0x0, 0xd},     /* arb clients */
+	{0x214, 0x21c, 16, 2, 0x80, 0xc0},   /* arb clients */
+	{0x214, 0x21c, 16, 2, 0x100, 0x140}, /* arb clients */
+	{0x214, 0x21c, 0, 16, 0x0, 0xf},     /* xin blocks - axi side */
+	{0x214, 0x21c, 0, 16, 0x80, 0xa4},   /* xin blocks - axi side */
+	{0x214, 0x21c, 0, 15, 0x100, 0x124}, /* xin blocks - axi side */
+	{0x21c, 0x214, 0, 14, 0, 0xc}, /* xin blocks - clock side */
+};
+
+/**
+ * _dpu_dbg_enable_power - use callback to turn power on for hw register access
+ * @enable: whether to turn power on or off
+ */
+static inline void _dpu_dbg_enable_power(int enable)
+{
+	if (enable)
+		pm_runtime_get_sync(dpu_dbg_base.dev);
+	else
+		pm_runtime_put_sync(dpu_dbg_base.dev);
+}
+
+static void _dpu_dbg_dump_dpu_dbg_bus(struct dpu_dbg_dpu_debug_bus *bus)
+{
+	bool in_log, in_mem;
+	u32 **dump_mem = NULL;
+	u32 *dump_addr = NULL;
+	u32 status = 0;
+	struct dpu_debug_bus_entry *head;
+	phys_addr_t phys = 0;
+	int list_size;
+	int i;
+	u32 offset;
+	void __iomem *mem_base = NULL;
+	struct dpu_dbg_reg_base *reg_base;
+
+	if (!bus || !bus->cmn.entries_size)
+		return;
+
+	list_for_each_entry(reg_base, &dpu_dbg_base.reg_base_list,
+			reg_base_head)
+		if (strlen(reg_base->name) &&
+			!strcmp(reg_base->name, bus->cmn.name))
+			mem_base = reg_base->base + bus->top_blk_off;
+
+	if (!mem_base) {
+		pr_err("unable to find mem_base for %s\n", bus->cmn.name);
+		return;
+	}
+
+	dump_mem = &bus->cmn.dumped_content;
+
+	/* will keep in memory 4 entries of 4 bytes each */
+	list_size = (bus->cmn.entries_size * 4 * 4);
+
+	in_log = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_LOG);
+	in_mem = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_MEM);
+
+	if (!in_log && !in_mem)
+		return;
+
+	dev_info(dpu_dbg_base.dev, "======== start %s dump =========\n",
+			bus->cmn.name);
+
+	if (in_mem) {
+		if (!(*dump_mem))
+			*dump_mem = dma_alloc_coherent(dpu_dbg_base.dev,
+				list_size, &phys, GFP_KERNEL);
+
+		if (*dump_mem) {
+			dump_addr = *dump_mem;
+			dev_info(dpu_dbg_base.dev,
+				"%s: start_addr:0x%pK len:0x%x\n",
+				__func__, dump_addr, list_size);
+		} else {
+			in_mem = false;
+			pr_err("dump_mem: allocation fails\n");
+		}
+	}
+
+	_dpu_dbg_enable_power(true);
+	for (i = 0; i < bus->cmn.entries_size; i++) {
+		head = bus->entries + i;
+		writel_relaxed(TEST_MASK(head->block_id, head->test_id),
+				mem_base + head->wr_addr);
+		wmb(); /* make sure test bits were written */
+
+		if (bus->cmn.flags & DBGBUS_FLAGS_DSPP) {
+			offset = DBGBUS_DSPP_STATUS;
+			/* keep DSPP test point enabled */
+			if (head->wr_addr != DBGBUS_DSPP)
+				writel_relaxed(0xF, mem_base + DBGBUS_DSPP);
+		} else {
+			offset = head->wr_addr + 0x4;
+		}
+
+		status = readl_relaxed(mem_base + offset);
+
+		if (in_log)
+			dev_info(dpu_dbg_base.dev,
+					"waddr=0x%x blk=%d tst=%d val=0x%x\n",
+					head->wr_addr, head->block_id,
+					head->test_id, status);
+
+		if (dump_addr && in_mem) {
+			dump_addr[i*4]     = head->wr_addr;
+			dump_addr[i*4 + 1] = head->block_id;
+			dump_addr[i*4 + 2] = head->test_id;
+			dump_addr[i*4 + 3] = status;
+		}
+
+		if (head->analyzer)
+			head->analyzer(mem_base, head, status);
+
+		/* Disable debug bus once we are done */
+		writel_relaxed(0, mem_base + head->wr_addr);
+		if (bus->cmn.flags & DBGBUS_FLAGS_DSPP &&
+						head->wr_addr != DBGBUS_DSPP)
+			writel_relaxed(0x0, mem_base + DBGBUS_DSPP);
+	}
+	_dpu_dbg_enable_power(false);
+
+	dev_info(dpu_dbg_base.dev, "======== end %s dump =========\n",
+			bus->cmn.name);
+}
+
+static void _dpu_dbg_dump_vbif_debug_bus_entry(
+		struct vbif_debug_bus_entry *head, void __iomem *mem_base,
+		u32 *dump_addr, bool in_log)
+{
+	int i, j;
+	u32 val;
+
+	if (!dump_addr && !in_log)
+		return;
+
+	for (i = 0; i < head->block_cnt; i++) {
+		writel_relaxed(1 << (i + head->bit_offset),
+				mem_base + head->block_bus_addr);
+		/* make sure that current bus blcok enable */
+		wmb();
+		for (j = head->test_pnt_start; j < head->test_pnt_cnt; j++) {
+			writel_relaxed(j, mem_base + head->block_bus_addr + 4);
+			/* make sure that test point is enabled */
+			wmb();
+			val = readl_relaxed(mem_base + MMSS_VBIF_TEST_BUS_OUT);
+			if (dump_addr) {
+				*dump_addr++ = head->block_bus_addr;
+				*dump_addr++ = i;
+				*dump_addr++ = j;
+				*dump_addr++ = val;
+			}
+			if (in_log)
+				dev_info(dpu_dbg_base.dev,
+					"testpoint:%x arb/xin id=%d index=%d val=0x%x\n",
+					head->block_bus_addr, i, j, val);
+		}
+	}
+}
+
+static void _dpu_dbg_dump_vbif_dbg_bus(struct dpu_dbg_vbif_debug_bus *bus)
+{
+	bool in_log, in_mem;
+	u32 **dump_mem = NULL;
+	u32 *dump_addr = NULL;
+	u32 value, d0, d1;
+	unsigned long reg, reg1, reg2;
+	struct vbif_debug_bus_entry *head;
+	phys_addr_t phys = 0;
+	int i, list_size = 0;
+	void __iomem *mem_base = NULL;
+	struct vbif_debug_bus_entry *dbg_bus;
+	u32 bus_size;
+	struct dpu_dbg_reg_base *reg_base;
+
+	if (!bus || !bus->cmn.entries_size)
+		return;
+
+	list_for_each_entry(reg_base, &dpu_dbg_base.reg_base_list,
+			reg_base_head)
+		if (strlen(reg_base->name) &&
+			!strcmp(reg_base->name, bus->cmn.name))
+			mem_base = reg_base->base;
+
+	if (!mem_base) {
+		pr_err("unable to find mem_base for %s\n", bus->cmn.name);
+		return;
+	}
+
+	dbg_bus = bus->entries;
+	bus_size = bus->cmn.entries_size;
+	list_size = bus->cmn.entries_size;
+	dump_mem = &bus->cmn.dumped_content;
+
+	dev_info(dpu_dbg_base.dev, "======== start %s dump =========\n",
+			bus->cmn.name);
+
+	if (!dump_mem || !dbg_bus || !bus_size || !list_size)
+		return;
+
+	/* allocate memory for each test point */
+	for (i = 0; i < bus_size; i++) {
+		head = dbg_bus + i;
+		list_size += (head->block_cnt * head->test_pnt_cnt);
+	}
+
+	/* 4 bytes * 4 entries for each test point*/
+	list_size *= 16;
+
+	in_log = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_LOG);
+	in_mem = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_MEM);
+
+	if (!in_log && !in_mem)
+		return;
+
+	if (in_mem) {
+		if (!(*dump_mem))
+			*dump_mem = dma_alloc_coherent(dpu_dbg_base.dev,
+				list_size, &phys, GFP_KERNEL);
+
+		if (*dump_mem) {
+			dump_addr = *dump_mem;
+			dev_info(dpu_dbg_base.dev,
+				"%s: start_addr:0x%pK len:0x%x\n",
+				__func__, dump_addr, list_size);
+		} else {
+			in_mem = false;
+			pr_err("dump_mem: allocation fails\n");
+		}
+	}
+
+	_dpu_dbg_enable_power(true);
+
+	value = readl_relaxed(mem_base + MMSS_VBIF_CLKON);
+	writel_relaxed(value | BIT(1), mem_base + MMSS_VBIF_CLKON);
+
+	/* make sure that vbif core is on */
+	wmb();
+
+	/**
+	 * Extract VBIF error info based on XIN halt and error status.
+	 * If the XIN client is not in HALT state, or an error is detected,
+	 * then retrieve the VBIF error info for it.
+	 */
+	reg = readl_relaxed(mem_base + MMSS_VBIF_XIN_HALT_CTRL1);
+	reg1 = readl_relaxed(mem_base + MMSS_VBIF_PND_ERR);
+	reg2 = readl_relaxed(mem_base + MMSS_VBIF_SRC_ERR);
+	dev_err(dpu_dbg_base.dev,
+			"XIN HALT:0x%lX, PND ERR:0x%lX, SRC ERR:0x%lX\n",
+			reg, reg1, reg2);
+	reg >>= 16;
+	reg &= ~(reg1 | reg2);
+	for (i = 0; i < MMSS_VBIF_CLIENT_NUM; i++) {
+		if (!test_bit(0, &reg)) {
+			writel_relaxed(i, mem_base + MMSS_VBIF_ERR_INFO);
+			/* make sure reg write goes through */
+			wmb();
+
+			d0 = readl_relaxed(mem_base + MMSS_VBIF_ERR_INFO);
+			d1 = readl_relaxed(mem_base + MMSS_VBIF_ERR_INFO_1);
+
+			dev_err(dpu_dbg_base.dev,
+					"Client:%d, errinfo=0x%X, errinfo1=0x%X\n",
+					i, d0, d1);
+		}
+		reg >>= 1;
+	}
+
+	for (i = 0; i < bus_size; i++) {
+		head = dbg_bus + i;
+
+		writel_relaxed(0, mem_base + head->disable_bus_addr);
+		writel_relaxed(BIT(0), mem_base + MMSS_VBIF_TEST_BUS_OUT_CTRL);
+		/* make sure that other bus is off */
+		wmb();
+
+		_dpu_dbg_dump_vbif_debug_bus_entry(head, mem_base, dump_addr,
+				in_log);
+		if (dump_addr)
+			dump_addr += (head->block_cnt * head->test_pnt_cnt * 4);
+	}
+
+	_dpu_dbg_enable_power(false);
+
+	dev_info(dpu_dbg_base.dev, "======== end %s dump =========\n",
+			bus->cmn.name);
+}
+
+/**
+ * _dpu_dump_array - dump array of register bases
+ * @name: string indicating origin of dump
+ * @dump_dbgbus_dpu: whether to dump the dpu debug bus
+ * @dump_dbgbus_vbif_rt: whether to dump the vbif rt debug bus
+ */
+static void _dpu_dump_array(const char *name, bool dump_dbgbus_dpu,
+			    bool dump_dbgbus_vbif_rt)
+{
+	if (dump_dbgbus_dpu)
+		_dpu_dbg_dump_dpu_dbg_bus(&dpu_dbg_base.dbgbus_dpu);
+
+	if (dump_dbgbus_vbif_rt)
+		_dpu_dbg_dump_vbif_dbg_bus(&dpu_dbg_base.dbgbus_vbif_rt);
+}
+
+/**
+ * _dpu_dump_work - deferred dump work function
+ * @work: work structure
+ */
+static void _dpu_dump_work(struct work_struct *work)
+{
+	_dpu_dump_array("dpudump_workitem",
+		dpu_dbg_base.dbgbus_dpu.cmn.include_in_deferred_work,
+		dpu_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work);
+}
+
+void dpu_dbg_dump(bool queue_work, const char *name, bool dump_dbgbus_dpu,
+		  bool dump_dbgbus_vbif_rt)
+{
+	if (queue_work && work_pending(&dpu_dbg_base.dump_work))
+		return;
+
+	if (!queue_work) {
+		_dpu_dump_array(name, dump_dbgbus_dpu, dump_dbgbus_vbif_rt);
+		return;
+	}
+
+	/* schedule work to dump later */
+	dpu_dbg_base.dbgbus_dpu.cmn.include_in_deferred_work = dump_dbgbus_dpu;
+	dpu_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work =
+			dump_dbgbus_vbif_rt;
+	schedule_work(&dpu_dbg_base.dump_work);
+}
+
+/*
+ * dpu_dbg_debugfs_open - debugfs open handler for debug dump
+ * @inode: debugfs inode
+ * @file: file handle
+ */
+static int dpu_dbg_debugfs_open(struct inode *inode, struct file *file)
+{
+	/* non-seekable */
+	file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+/**
+ * dpu_dbg_dump_write - debugfs write handler for debug dump
+ * @file: file handler
+ * @user_buf: user buffer content from debugfs
+ * @count: size of user buffer
+ * @ppos: position offset of user buffer
+ */
+static ssize_t dpu_dbg_dump_write(struct file *file,
+	const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	_dpu_dump_array("dump_debugfs", true, true);
+	return count;
+}
+
+static const struct file_operations dpu_dbg_dump_fops = {
+	.open = dpu_dbg_debugfs_open,
+	.write = dpu_dbg_dump_write,
+};
+
+int dpu_dbg_debugfs_register(struct dentry *debugfs_root)
+{
+	static struct dpu_dbg_base *dbg = &dpu_dbg_base;
+	char debug_name[80] = "";
+
+	if (!debugfs_root)
+		return -EINVAL;
+
+	debugfs_create_file("dump", 0600, debugfs_root, NULL,
+			&dpu_dbg_dump_fops);
+
+	if (dbg->dbgbus_dpu.entries) {
+		dbg->dbgbus_dpu.cmn.name = DBGBUS_NAME_DPU;
+		snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
+				dbg->dbgbus_dpu.cmn.name);
+		dbg->dbgbus_dpu.cmn.enable_mask = DEFAULT_DBGBUS_DPU;
+		debugfs_create_u32(debug_name, 0600, debugfs_root,
+				&dbg->dbgbus_dpu.cmn.enable_mask);
+	}
+
+	if (dbg->dbgbus_vbif_rt.entries) {
+		dbg->dbgbus_vbif_rt.cmn.name = DBGBUS_NAME_VBIF_RT;
+		snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
+				dbg->dbgbus_vbif_rt.cmn.name);
+		dbg->dbgbus_vbif_rt.cmn.enable_mask = DEFAULT_DBGBUS_VBIFRT;
+		debugfs_create_u32(debug_name, 0600, debugfs_root,
+				&dbg->dbgbus_vbif_rt.cmn.enable_mask);
+	}
+
+	return 0;
+}
+
+static void _dpu_dbg_debugfs_destroy(void)
+{
+}
+
+void dpu_dbg_init_dbg_buses(u32 hwversion)
+{
+	static struct dpu_dbg_base *dbg = &dpu_dbg_base;
+
+	memset(&dbg->dbgbus_dpu, 0, sizeof(dbg->dbgbus_dpu));
+	memset(&dbg->dbgbus_vbif_rt, 0, sizeof(dbg->dbgbus_vbif_rt));
+
+	if (IS_MSM8998_TARGET(hwversion)) {
+		dbg->dbgbus_dpu.entries = dbg_bus_dpu_8998;
+		dbg->dbgbus_dpu.cmn.entries_size = ARRAY_SIZE(dbg_bus_dpu_8998);
+		dbg->dbgbus_dpu.cmn.flags = DBGBUS_FLAGS_DSPP;
+
+		dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus_msm8998;
+		dbg->dbgbus_vbif_rt.cmn.entries_size =
+				ARRAY_SIZE(vbif_dbg_bus_msm8998);
+	} else if (IS_SDM845_TARGET(hwversion) || IS_SDM670_TARGET(hwversion)) {
+		dbg->dbgbus_dpu.entries = dbg_bus_dpu_sdm845;
+		dbg->dbgbus_dpu.cmn.entries_size =
+				ARRAY_SIZE(dbg_bus_dpu_sdm845);
+		dbg->dbgbus_dpu.cmn.flags = DBGBUS_FLAGS_DSPP;
+
+		/* vbif is unchanged vs 8998 */
+		dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus_msm8998;
+		dbg->dbgbus_vbif_rt.cmn.entries_size =
+				ARRAY_SIZE(vbif_dbg_bus_msm8998);
+	} else {
+		pr_err("unsupported chipset id %X\n", hwversion);
+	}
+}
+
+int dpu_dbg_init(struct device *dev)
+{
+	if (!dev) {
+		pr_err("invalid params\n");
+		return -EINVAL;
+	}
+
+	INIT_LIST_HEAD(&dpu_dbg_base.reg_base_list);
+	dpu_dbg_base.dev = dev;
+
+	INIT_WORK(&dpu_dbg_base.dump_work, _dpu_dump_work);
+
+	return 0;
+}
+
+/**
+ * dpu_dbg_destroy - destroy dpu debug facilities
+ */
+void dpu_dbg_destroy(void)
+{
+	_dpu_dbg_debugfs_destroy();
+}
+
+void dpu_dbg_set_dpu_top_offset(u32 blk_off)
+{
+	dpu_dbg_base.dbgbus_dpu.top_blk_off = blk_off;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h
new file mode 100644
index 000000000000..1e6fa945f98b
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h
@@ -0,0 +1,103 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DPU_DBG_H_
+#define DPU_DBG_H_
+
+#include <stdarg.h>
+#include <linux/debugfs.h>
+#include <linux/list.h>
+
+enum dpu_dbg_dump_flag {
+	DPU_DBG_DUMP_IN_LOG = BIT(0),
+	DPU_DBG_DUMP_IN_MEM = BIT(1),
+};
+
+#if defined(CONFIG_DEBUG_FS)
+
+/**
+ * dpu_dbg_init_dbg_buses - initialize debug bus dumping support for the chipset
+ * @hwversion:		Chipset revision
+ */
+void dpu_dbg_init_dbg_buses(u32 hwversion);
+
+/**
+ * dpu_dbg_init - initialize global dpu debug facilities: regdump
+ * @dev:		device handle
+ * Returns:		0 or -ERROR
+ */
+int dpu_dbg_init(struct device *dev);
+
+/**
+ * dpu_dbg_debugfs_register - register entries at the given debugfs dir
+ * @debugfs_root:	debugfs root in which to create dpu debug entries
+ * Returns:	0 or -ERROR
+ */
+int dpu_dbg_debugfs_register(struct dentry *debugfs_root);
+
+/**
+ * dpu_dbg_destroy - destroy the global dpu debug facilities
+ * Returns:	none
+ */
+void dpu_dbg_destroy(void);
+
+/**
+ * dpu_dbg_dump - trigger dumping of all dpu_dbg facilities
+ * @queue_work:	  whether to queue the dumping work to the work_struct
+ * @name:	  string indicating origin of dump
+ * @dump_dbgbus:  dump the dpu debug bus
+ * @dump_vbif_rt: dump the vbif rt bus
+ * Returns:	none
+ */
+void dpu_dbg_dump(bool queue_work, const char *name, bool dump_dbgbus_dpu,
+		  bool dump_dbgbus_vbif_rt);
+
+/**
+ * dpu_dbg_set_dpu_top_offset - set the target specific offset from mdss base
+ *	address of the top registers. Used for accessing debug bus controls.
+ * @blk_off: offset from mdss base of the top block
+ */
+void dpu_dbg_set_dpu_top_offset(u32 blk_off);
+
+#else
+
+static inline void dpu_dbg_init_dbg_buses(u32 hwversion)
+{
+}
+
+static inline int dpu_dbg_init(struct device *dev)
+{
+	return 0;
+}
+
+static inline int dpu_dbg_debugfs_register(struct dentry *debugfs_root)
+{
+	return 0;
+}
+
+static inline void dpu_dbg_destroy(void)
+{
+}
+
+static inline void dpu_dbg_dump(bool queue_work, const char *name,
+				bool dump_dbgbus_dpu, bool dump_dbgbus_vbif_rt)
+{
+}
+
+static inline void dpu_dbg_set_dpu_top_offset(u32 blk_off)
+{
+}
+
+#endif /* defined(CONFIG_DEBUG_FS) */
+
+
+#endif /* DPU_DBG_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
new file mode 100644
index 000000000000..ce4faee12adc
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -0,0 +1,2574 @@
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark at gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/kthread.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include "msm_drv.h"
+#include "dpu_kms.h"
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_intf.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_formats.h"
+#include "dpu_encoder_phys.h"
+#include "dpu_crtc.h"
+#include "dpu_trace.h"
+#include "dpu_core_irq.h"
+
+#define DPU_DEBUG_ENC(e, fmt, ...) DPU_DEBUG("enc%d " fmt,\
+		(e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
+
+#define DPU_ERROR_ENC(e, fmt, ...) DPU_ERROR("enc%d " fmt,\
+		(e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
+
+#define DPU_DEBUG_PHYS(p, fmt, ...) DPU_DEBUG("enc%d intf%d pp%d " fmt,\
+		(p) ? (p)->parent->base.id : -1, \
+		(p) ? (p)->intf_idx - INTF_0 : -1, \
+		(p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \
+		##__VA_ARGS__)
+
+#define DPU_ERROR_PHYS(p, fmt, ...) DPU_ERROR("enc%d intf%d pp%d " fmt,\
+		(p) ? (p)->parent->base.id : -1, \
+		(p) ? (p)->intf_idx - INTF_0 : -1, \
+		(p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \
+		##__VA_ARGS__)
+
+/*
+ * Two to anticipate panels that can do cmd/vid dynamic switching
+ * plan is to create all possible physical encoder types, and switch between
+ * them at runtime
+ */
+#define NUM_PHYS_ENCODER_TYPES 2
+
+#define MAX_PHYS_ENCODERS_PER_VIRTUAL \
+	(MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES)
+
+#define MAX_CHANNELS_PER_ENC 2
+
+#define MISR_BUFF_SIZE			256
+
+#define IDLE_SHORT_TIMEOUT	1
+
+#define MAX_VDISPLAY_SPLIT 1080
+
+/**
+ * enum dpu_enc_rc_events - events for resource control state machine
+ * @DPU_ENC_RC_EVENT_KICKOFF:
+ *	This event happens at NORMAL priority.
+ *	Event that signals the start of the transfer. When this event is
+ *	received, enable MDP/DSI core clocks. Regardless of the previous
+ *	state, the resource should be in ON state at the end of this event.
+ * @DPU_ENC_RC_EVENT_FRAME_DONE:
+ *	This event happens at INTERRUPT level.
+ *	Event signals the end of the data transfer after the PP FRAME_DONE
+ *	event. At the end of this event, a delayed work is scheduled to go to
+ *	IDLE_PC state after IDLE_TIMEOUT time.
+ * @DPU_ENC_RC_EVENT_PRE_STOP:
+ *	This event happens at NORMAL priority.
+ *	This event, when received during the ON state, leave the RC STATE
+ *	in the PRE_OFF state. It should be followed by the STOP event as
+ *	part of encoder disable.
+ *	If received during IDLE or OFF states, it will do nothing.
+ * @DPU_ENC_RC_EVENT_STOP:
+ *	This event happens at NORMAL priority.
+ *	When this event is received, disable all the MDP/DSI core clocks, and
+ *	disable IRQs. It should be called from the PRE_OFF or IDLE states.
+ *	IDLE is expected when IDLE_PC has run, and PRE_OFF did nothing.
+ *	PRE_OFF is expected when PRE_STOP was executed during the ON state.
+ *	Resource state should be in OFF at the end of the event.
+ * @DPU_ENC_RC_EVENT_ENTER_IDLE:
+ *	This event happens at NORMAL priority from a work item.
+ *	Event signals that there were no frame updates for IDLE_TIMEOUT time.
+ *	This would disable MDP/DSI core clocks and change the resource state
+ *	to IDLE.
+ */
+enum dpu_enc_rc_events {
+	DPU_ENC_RC_EVENT_KICKOFF = 1,
+	DPU_ENC_RC_EVENT_FRAME_DONE,
+	DPU_ENC_RC_EVENT_PRE_STOP,
+	DPU_ENC_RC_EVENT_STOP,
+	DPU_ENC_RC_EVENT_ENTER_IDLE
+};
+
+/*
+ * enum dpu_enc_rc_states - states that the resource control maintains
+ * @DPU_ENC_RC_STATE_OFF: Resource is in OFF state
+ * @DPU_ENC_RC_STATE_PRE_OFF: Resource is transitioning to OFF state
+ * @DPU_ENC_RC_STATE_ON: Resource is in ON state
+ * @DPU_ENC_RC_STATE_MODESET: Resource is in modeset state
+ * @DPU_ENC_RC_STATE_IDLE: Resource is in IDLE state
+ */
+enum dpu_enc_rc_states {
+	DPU_ENC_RC_STATE_OFF,
+	DPU_ENC_RC_STATE_PRE_OFF,
+	DPU_ENC_RC_STATE_ON,
+	DPU_ENC_RC_STATE_IDLE
+};
+
+/**
+ * struct dpu_encoder_virt - virtual encoder. Container of one or more physical
+ *	encoders. Virtual encoder manages one "logical" display. Physical
+ *	encoders manage one intf block, tied to a specific panel/sub-panel.
+ *	Virtual encoder defers as much as possible to the physical encoders.
+ *	Virtual encoder registers itself with the DRM Framework as the encoder.
+ * @base:		drm_encoder base class for registration with DRM
+ * @enc_spin_lock:	Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ * @bus_scaling_client:	Client handle to the bus scaling interface
+ * @num_phys_encs:	Actual number of physical encoders contained.
+ * @phys_encs:		Container of physical encoders managed.
+ * @cur_master:		Pointer to the current master in this mode. Optimization
+ *			Only valid after enable. Cleared as disable.
+ * @hw_pp		Handle to the pingpong blocks used for the display. No.
+ *			pingpong blocks can be different than num_phys_encs.
+ * @intfs_swapped	Whether or not the phys_enc interfaces have been swapped
+ *			for partial update right-only cases, such as pingpong
+ *			split where virtual pingpong does not generate IRQs
+ * @crtc_vblank_cb:	Callback into the upper layer / CRTC for
+ *			notification of the VBLANK
+ * @crtc_vblank_cb_data:	Data from upper layer for VBLANK notification
+ * @crtc_kickoff_cb:		Callback into CRTC that will flush & start
+ *				all CTL paths
+ * @crtc_kickoff_cb_data:	Opaque user data given to crtc_kickoff_cb
+ * @debugfs_root:		Debug file system root file node
+ * @enc_lock:			Lock around physical encoder create/destroy and
+				access.
+ * @frame_busy_mask:		Bitmask tracking which phys_enc we are still
+ *				busy processing current command.
+ *				Bit0 = phys_encs[0] etc.
+ * @crtc_frame_event_cb:	callback handler for frame event
+ * @crtc_frame_event_cb_data:	callback handler private data
+ * @frame_done_timeout:		frame done timeout in Hz
+ * @frame_done_timer:		watchdog timer for frame done event
+ * @vsync_event_timer:		vsync timer
+ * @disp_info:			local copy of msm_display_info struct
+ * @misr_enable:		misr enable/disable status
+ * @misr_frame_count:		misr frame count before start capturing the data
+ * @idle_pc_supported:		indicate if idle power collaps is supported
+ * @rc_lock:			resource control mutex lock to protect
+ *				virt encoder over various state changes
+ * @rc_state:			resource controller state
+ * @delayed_off_work:		delayed worker to schedule disabling of
+ *				clks and resources after IDLE_TIMEOUT time.
+ * @vsync_event_work:		worker to handle vsync event for autorefresh
+ * @topology:                   topology of the display
+ * @mode_set_complete:          flag to indicate modeset completion
+ * @idle_timeout:		idle timeout duration in milliseconds
+ */
+struct dpu_encoder_virt {
+	struct drm_encoder base;
+	spinlock_t enc_spinlock;
+	uint32_t bus_scaling_client;
+
+	uint32_t display_num_of_h_tiles;
+
+	unsigned int num_phys_encs;
+	struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
+	struct dpu_encoder_phys *cur_master;
+	struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
+
+	bool intfs_swapped;
+
+	void (*crtc_vblank_cb)(void *);
+	void *crtc_vblank_cb_data;
+
+	struct dentry *debugfs_root;
+	struct mutex enc_lock;
+	DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL);
+	void (*crtc_frame_event_cb)(void *, u32 event);
+	void *crtc_frame_event_cb_data;
+
+	atomic_t frame_done_timeout;
+	struct timer_list frame_done_timer;
+	struct timer_list vsync_event_timer;
+
+	struct msm_display_info disp_info;
+	bool misr_enable;
+	u32 misr_frame_count;
+
+	bool idle_pc_supported;
+	struct mutex rc_lock;
+	enum dpu_enc_rc_states rc_state;
+	struct kthread_delayed_work delayed_off_work;
+	struct kthread_work vsync_event_work;
+	struct msm_display_topology topology;
+	bool mode_set_complete;
+
+	u32 idle_timeout;
+};
+
+#define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
+static inline int _dpu_encoder_power_enable(struct dpu_encoder_virt *dpu_enc,
+								bool enable)
+{
+	struct drm_encoder *drm_enc;
+	struct msm_drm_private *priv;
+	struct dpu_kms *dpu_kms;
+
+	if (!dpu_enc) {
+		DPU_ERROR("invalid dpu enc\n");
+		return -EINVAL;
+	}
+
+	drm_enc = &dpu_enc->base;
+	if (!drm_enc->dev || !drm_enc->dev->dev_private) {
+		DPU_ERROR("drm device invalid\n");
+		return -EINVAL;
+	}
+
+	priv = drm_enc->dev->dev_private;
+	if (!priv->kms) {
+		DPU_ERROR("invalid kms\n");
+		return -EINVAL;
+	}
+
+	dpu_kms = to_dpu_kms(priv->kms);
+
+	if (enable)
+		pm_runtime_get_sync(&dpu_kms->pdev->dev);
+	else
+		pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+	return 0;
+}
+
+void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
+		enum dpu_intr_idx intr_idx)
+{
+	DRM_ERROR("irq timeout id=%u, intf=%d, pp=%d, intr=%d\n",
+		  DRMID(phys_enc->parent), phys_enc->intf_idx - INTF_0,
+		  phys_enc->hw_pp->idx - PINGPONG_0, intr_idx);
+
+	if (phys_enc->parent_ops.handle_frame_done)
+		phys_enc->parent_ops.handle_frame_done(
+				phys_enc->parent, phys_enc,
+				DPU_ENCODER_FRAME_EVENT_ERROR);
+}
+
+int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
+		enum dpu_intr_idx intr_idx,
+		struct dpu_encoder_wait_info *wait_info)
+{
+	struct dpu_encoder_irq *irq;
+	u32 irq_status;
+	int ret;
+
+	if (!phys_enc || !wait_info || intr_idx >= INTR_IDX_MAX) {
+		DPU_ERROR("invalid params\n");
+		return -EINVAL;
+	}
+	irq = &phys_enc->irq[intr_idx];
+
+	/* note: do master / slave checking outside */
+
+	/* return EWOULDBLOCK since we know the wait isn't necessary */
+	if (phys_enc->enable_state == DPU_ENC_DISABLED) {
+		DRM_ERROR("encoder is disabled id=%u, intr=%d, hw=%d, irq=%d",
+			  DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+			  irq->irq_idx);
+		return -EWOULDBLOCK;
+	}
+
+	if (irq->irq_idx < 0) {
+		DRM_DEBUG_KMS("skip irq wait id=%u, intr=%d, hw=%d, irq=%s",
+			      DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+			      irq->name);
+		return 0;
+	}
+
+	DRM_DEBUG_KMS("id=%u, intr=%d, hw=%d, irq=%d, pp=%d, pending_cnt=%d",
+		      DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+		      irq->irq_idx, phys_enc->hw_pp->idx - PINGPONG_0,
+		      atomic_read(wait_info->atomic_cnt));
+
+	ret = dpu_encoder_helper_wait_event_timeout(
+			DRMID(phys_enc->parent),
+			irq->hw_idx,
+			wait_info);
+
+	if (ret <= 0) {
+		irq_status = dpu_core_irq_read(phys_enc->dpu_kms,
+				irq->irq_idx, true);
+		if (irq_status) {
+			unsigned long flags;
+
+			DRM_DEBUG_KMS("irq not triggered id=%u, intr=%d, "
+				      "hw=%d, irq=%d, pp=%d, atomic_cnt=%d",
+				      DRMID(phys_enc->parent), intr_idx,
+				      irq->hw_idx, irq->irq_idx,
+				      phys_enc->hw_pp->idx - PINGPONG_0,
+				      atomic_read(wait_info->atomic_cnt));
+			local_irq_save(flags);
+			irq->cb.func(phys_enc, irq->irq_idx);
+			local_irq_restore(flags);
+			ret = 0;
+		} else {
+			ret = -ETIMEDOUT;
+			DRM_DEBUG_KMS("irq timeout id=%u, intr=%d, "
+				      "hw=%d, irq=%d, pp=%d, atomic_cnt=%d",
+				      DRMID(phys_enc->parent), intr_idx,
+				      irq->hw_idx, irq->irq_idx,
+				      phys_enc->hw_pp->idx - PINGPONG_0,
+				      atomic_read(wait_info->atomic_cnt));
+		}
+	} else {
+		ret = 0;
+		trace_dpu_enc_irq_wait_success(DRMID(phys_enc->parent),
+			intr_idx, irq->hw_idx, irq->irq_idx,
+			phys_enc->hw_pp->idx - PINGPONG_0,
+			atomic_read(wait_info->atomic_cnt));
+	}
+
+	return ret;
+}
+
+int dpu_encoder_helper_register_irq(struct dpu_encoder_phys *phys_enc,
+		enum dpu_intr_idx intr_idx)
+{
+	struct dpu_encoder_irq *irq;
+	int ret = 0;
+
+	if (!phys_enc || intr_idx >= INTR_IDX_MAX) {
+		DPU_ERROR("invalid params\n");
+		return -EINVAL;
+	}
+	irq = &phys_enc->irq[intr_idx];
+
+	if (irq->irq_idx >= 0) {
+		DPU_DEBUG_PHYS(phys_enc,
+				"skipping already registered irq %s type %d\n",
+				irq->name, irq->intr_type);
+		return 0;
+	}
+
+	irq->irq_idx = dpu_core_irq_idx_lookup(phys_enc->dpu_kms,
+			irq->intr_type, irq->hw_idx);
+	if (irq->irq_idx < 0) {
+		DPU_ERROR_PHYS(phys_enc,
+			"failed to lookup IRQ index for %s type:%d\n",
+			irq->name, irq->intr_type);
+		return -EINVAL;
+	}
+
+	ret = dpu_core_irq_register_callback(phys_enc->dpu_kms, irq->irq_idx,
+			&irq->cb);
+	if (ret) {
+		DPU_ERROR_PHYS(phys_enc,
+			"failed to register IRQ callback for %s\n",
+			irq->name);
+		irq->irq_idx = -EINVAL;
+		return ret;
+	}
+
+	ret = dpu_core_irq_enable(phys_enc->dpu_kms, &irq->irq_idx, 1);
+	if (ret) {
+		DRM_ERROR("enable failed id=%u, intr=%d, hw=%d, irq=%d",
+			  DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+			  irq->irq_idx);
+		dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
+				irq->irq_idx, &irq->cb);
+		irq->irq_idx = -EINVAL;
+		return ret;
+	}
+
+	trace_dpu_enc_irq_register_success(DRMID(phys_enc->parent), intr_idx,
+				irq->hw_idx, irq->irq_idx);
+
+	return ret;
+}
+
+int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
+		enum dpu_intr_idx intr_idx)
+{
+	struct dpu_encoder_irq *irq;
+	int ret;
+
+	if (!phys_enc) {
+		DPU_ERROR("invalid encoder\n");
+		return -EINVAL;
+	}
+	irq = &phys_enc->irq[intr_idx];
+
+	/* silently skip irqs that weren't registered */
+	if (irq->irq_idx < 0) {
+		DRM_ERROR("duplicate unregister id=%u, intr=%d, hw=%d, irq=%d",
+			  DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+			  irq->irq_idx);
+		return 0;
+	}
+
+	ret = dpu_core_irq_disable(phys_enc->dpu_kms, &irq->irq_idx, 1);
+	if (ret) {
+		DRM_ERROR("diable failed id=%u, intr=%d, hw=%d, irq=%d ret=%d",
+			  DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+			  irq->irq_idx, ret);
+	}
+
+	ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms, irq->irq_idx,
+			&irq->cb);
+	if (ret) {
+		DRM_ERROR("unreg cb fail id=%u, intr=%d, hw=%d, irq=%d ret=%d",
+			  DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+			  irq->irq_idx, ret);
+	}
+
+	trace_dpu_enc_irq_unregister_success(DRMID(phys_enc->parent), intr_idx,
+					     irq->hw_idx, irq->irq_idx);
+
+	irq->irq_idx = -EINVAL;
+
+	return 0;
+}
+
+void dpu_encoder_get_hw_resources(struct drm_encoder *drm_enc,
+		struct dpu_encoder_hw_resources *hw_res,
+		struct drm_connector_state *conn_state)
+{
+	struct dpu_encoder_virt *dpu_enc = NULL;
+	int i = 0;
+
+	if (!hw_res || !drm_enc || !conn_state) {
+		DPU_ERROR("invalid argument(s), drm_enc %d, res %d, state %d\n",
+				drm_enc != 0, hw_res != 0, conn_state != 0);
+		return;
+	}
+
+	dpu_enc = to_dpu_encoder_virt(drm_enc);
+	DPU_DEBUG_ENC(dpu_enc, "\n");
+
+	/* Query resources used by phys encs, expected to be without overlap */
+	memset(hw_res, 0, sizeof(*hw_res));
+	hw_res->display_num_of_h_tiles = dpu_enc->display_num_of_h_tiles;
+
+	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+		if (phys && phys->ops.get_hw_resources)
+			phys->ops.get_hw_resources(phys, hw_res, conn_state);
+	}
+}
+
+void dpu_encoder_destroy(struct drm_encoder *drm_enc)
+{
+	struct dpu_encoder_virt *dpu_enc = NULL;
+	int i = 0;
+
+	if (!drm_enc) {
+		DPU_ERROR("invalid encoder\n");
+		return;
+	}
+
+	dpu_enc = to_dpu_encoder_virt(drm_enc);
+	DPU_DEBUG_ENC(dpu_enc, "\n");
+
+	mutex_lock(&dpu_enc->enc_lock);
+
+	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+		if (phys && phys->ops.destroy) {
+			phys->ops.destroy(phys);
+			--dpu_enc->num_phys_encs;
+			dpu_enc->phys_encs[i] = NULL;
+		}
+	}
+
+	if (dpu_enc->num_phys_encs)
+		DPU_ERROR_ENC(dpu_enc, "expected 0 num_phys_encs not %d\n",
+				dpu_enc->num_phys_encs);
+	dpu_enc->num_phys_encs = 0;
+	mutex_unlock(&dpu_enc->enc_lock);
+
+	drm_encoder_cleanup(drm_enc);
+	mutex_destroy(&dpu_enc->enc_lock);
+
+	kfree(dpu_enc);
+}
+
+void dpu_encoder_helper_split_config(
+		struct dpu_encoder_phys *phys_enc,
+		enum dpu_intf interface)
+{
+	struct dpu_encoder_virt *dpu_enc;
+	struct split_pipe_cfg cfg = { 0 };
+	struct dpu_hw_mdp *hw_mdptop;
+	struct msm_display_info *disp_info;
+
+	if (!phys_enc || !phys_enc->hw_mdptop || !phys_enc->parent) {
+		DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
+		return;
+	}
+
+	dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
+	hw_mdptop = phys_enc->hw_mdptop;
+	disp_info = &dpu_enc->disp_info;
+
+	if (disp_info->intf_type != DRM_MODE_CONNECTOR_DSI)
+		return;
+
+	/**
+	 * disable split modes since encoder will be operating in as the only
+	 * encoder, either for the entire use case in the case of, for example,
+	 * single DSI, or for this frame in the case of left/right only partial
+	 * update.
+	 */
+	if (phys_enc->split_role == ENC_ROLE_SOLO) {
+		if (hw_mdptop->ops.setup_split_pipe)
+			hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
+		return;
+	}
+
+	cfg.en = true;
+	cfg.mode = phys_enc->intf_mode;
+	cfg.intf = interface;
+
+	if (cfg.en && phys_enc->ops.needs_single_flush &&
+			phys_enc->ops.needs_single_flush(phys_enc))
+		cfg.split_flush_en = true;
+
+	if (phys_enc->split_role == ENC_ROLE_MASTER) {
+		DPU_DEBUG_ENC(dpu_enc, "enable %d\n", cfg.en);
+
+		if (hw_mdptop->ops.setup_split_pipe)
+			hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
+	}
+}
+
+static void _dpu_encoder_adjust_mode(struct drm_connector *connector,
+		struct drm_display_mode *adj_mode)
+{
+	struct drm_display_mode *cur_mode;
+
+	if (!connector || !adj_mode)
+		return;
+
+	list_for_each_entry(cur_mode, &connector->modes, head) {
+		if (cur_mode->vdisplay == adj_mode->vdisplay &&
+			cur_mode->hdisplay == adj_mode->hdisplay &&
+			cur_mode->vrefresh == adj_mode->vrefresh) {
+			adj_mode->private = cur_mode->private;
+			adj_mode->private_flags |= cur_mode->private_flags;
+		}
+	}
+}
+
+static struct msm_display_topology dpu_encoder_get_topology(
+			struct dpu_encoder_virt *dpu_enc,
+			struct dpu_kms *dpu_kms,
+			struct drm_display_mode *mode)
+{
+	struct msm_display_topology topology;
+	int i, intf_count = 0;
+
+	for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
+		if (dpu_enc->phys_encs[i])
+			intf_count++;
+
+	/* User split topology for width > 1080 */
+	topology.num_lm = (mode->vdisplay > MAX_VDISPLAY_SPLIT) ? 2 : 1;
+	topology.num_enc = 0;
+	topology.num_intf = intf_count;
+
+	return topology;
+}
+static int dpu_encoder_virt_atomic_check(
+		struct drm_encoder *drm_enc,
+		struct drm_crtc_state *crtc_state,
+		struct drm_connector_state *conn_state)
+{
+	struct dpu_encoder_virt *dpu_enc;
+	struct msm_drm_private *priv;
+	struct dpu_kms *dpu_kms;
+	const struct drm_display_mode *mode;
+	struct drm_display_mode *adj_mode;
+	struct msm_display_topology topology;
+	int i = 0;
+	int ret = 0;
+
+	if (!drm_enc || !crtc_state || !conn_state) {
+		DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n",
+				drm_enc != 0, crtc_state != 0, conn_state != 0);
+		return -EINVAL;
+	}
+
+	dpu_enc = to_dpu_encoder_virt(drm_enc);
+	DPU_DEBUG_ENC(dpu_enc, "\n");
+
+	priv = drm_enc->dev->dev_private;
+	dpu_kms = to_dpu_kms(priv->kms);
+	mode = &crtc_state->mode;
+	adj_mode = &crtc_state->adjusted_mode;
+	trace_dpu_enc_atomic_check(DRMID(drm_enc));
+
+	/*
+	 * display drivers may populate private fields of the drm display mode
+	 * structure while registering possible modes of a connector with DRM.
+	 * These private fields are not populated back while DRM invokes
+	 * the mode_set callbacks. This module retrieves and populates the
+	 * private fields of the given mode.
+	 */
+	_dpu_encoder_adjust_mode(conn_state->connector, adj_mode);
+
+	/* perform atomic check on the first physical encoder (master) */
+	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+		if (phys && phys->ops.atomic_check)
+			ret = phys->ops.atomic_check(phys, crtc_state,
+					conn_state);
+		else if (phys && phys->ops.mode_fixup)
+			if (!phys->ops.mode_fixup(phys, mode, adj_mode))
+				ret = -EINVAL;
+
+		if (ret) {
+			DPU_ERROR_ENC(dpu_enc,
+					"mode unsupported, phys idx %d\n", i);
+			break;
+		}
+	}
+
+	topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
+
+	/* Reserve dynamic resources now. Indicating AtomicTest phase */
+	if (!ret) {
+		/*
+		 * Avoid reserving resources when mode set is pending. Topology
+		 * info may not be available to complete reservation.
+		 */
+		if (drm_atomic_crtc_needs_modeset(crtc_state)
+				&& dpu_enc->mode_set_complete) {
+			ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, crtc_state,
+				conn_state, topology, true);
+			dpu_enc->mode_set_complete = false;
+		}
+	}
+
+	if (!ret)
+		drm_mode_set_crtcinfo(adj_mode, 0);
+
+	trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags,
+			adj_mode->private_flags);
+
+	return ret;
+}
+
+static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc,
+			struct msm_display_info *disp_info)
+{
+	struct dpu_vsync_source_cfg vsync_cfg = { 0 };
+	struct msm_drm_private *priv;
+	struct dpu_kms *dpu_kms;
+	struct dpu_hw_mdp *hw_mdptop;
+	struct drm_encoder *drm_enc;
+	int i;
+
+	if (!dpu_enc || !disp_info) {
+		DPU_ERROR("invalid param dpu_enc:%d or disp_info:%d\n",
+					dpu_enc != NULL, disp_info != NULL);
+		return;
+	} else if (dpu_enc->num_phys_encs > ARRAY_SIZE(dpu_enc->hw_pp)) {
+		DPU_ERROR("invalid num phys enc %d/%d\n",
+				dpu_enc->num_phys_encs,
+				(int) ARRAY_SIZE(dpu_enc->hw_pp));
+		return;
+	}
+
+	drm_enc = &dpu_enc->base;
+	/* this pointers are checked in virt_enable_helper */
+	priv = drm_enc->dev->dev_private;
+
+	dpu_kms = to_dpu_kms(priv->kms);
+	if (!dpu_kms) {
+		DPU_ERROR("invalid dpu_kms\n");
+		return;
+	}
+
+	hw_mdptop = dpu_kms->hw_mdp;
+	if (!hw_mdptop) {
+		DPU_ERROR("invalid mdptop\n");
+		return;
+	}
+
+	if (hw_mdptop->ops.setup_vsync_source &&
+			disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) {
+		for (i = 0; i < dpu_enc->num_phys_encs; i++)
+			vsync_cfg.ppnumber[i] = dpu_enc->hw_pp[i]->idx;
+
+		vsync_cfg.pp_count = dpu_enc->num_phys_encs;
+		if (disp_info->is_te_using_watchdog_timer)
+			vsync_cfg.vsync_source = DPU_VSYNC_SOURCE_WD_TIMER_0;
+		else
+			vsync_cfg.vsync_source = DPU_VSYNC0_SOURCE_GPIO;
+
+		hw_mdptop->ops.setup_vsync_source(hw_mdptop, &vsync_cfg);
+	}
+}
+
+static void _dpu_encoder_irq_control(struct drm_encoder *drm_enc, bool enable)
+{
+	struct dpu_encoder_virt *dpu_enc;
+	int i;
+
+	if (!drm_enc) {
+		DPU_ERROR("invalid encoder\n");
+		return;
+	}
+
+	dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+	DPU_DEBUG_ENC(dpu_enc, "enable:%d\n", enable);
+	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+		if (phys && phys->ops.irq_control)
+			phys->ops.irq_control(phys, enable);
+	}
+
+}
+
+static void _dpu_encoder_resource_control_helper(struct drm_encoder *drm_enc,
+		bool enable)
+{
+	struct msm_drm_private *priv;
+	struct dpu_kms *dpu_kms;
+	struct dpu_encoder_virt *dpu_enc;
+
+	dpu_enc = to_dpu_encoder_virt(drm_enc);
+	priv = drm_enc->dev->dev_private;
+	dpu_kms = to_dpu_kms(priv->kms);
+
+	trace_dpu_enc_rc_helper(DRMID(drm_enc), enable);
+
+	if (!dpu_enc->cur_master) {
+		DPU_ERROR("encoder master not set\n");
+		return;
+	}
+
+	if (enable) {
+		/* enable DPU core clks */
+		pm_runtime_get_sync(&dpu_kms->pdev->dev);
+
+		/* enable all the irq */
+		_dpu_encoder_irq_control(drm_enc, true);
+
+	} else {
+		/* disable all the irq */
+		_dpu_encoder_irq_control(drm_enc, false);
+
+		/* disable DPU core clks */
+		pm_runtime_put_sync(&dpu_kms->pdev->dev);
+	}
+
+}
+
+static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
+		u32 sw_event)
+{
+	struct dpu_encoder_virt *dpu_enc;
+	struct msm_drm_private *priv;
+	struct msm_drm_thread *disp_thread;
+	bool is_vid_mode = false;
+
+	if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private ||
+			!drm_enc->crtc) {
+		DPU_ERROR("invalid parameters\n");
+		return -EINVAL;
+	}
+	dpu_enc = to_dpu_encoder_virt(drm_enc);
+	priv = drm_enc->dev->dev_private;
+	is_vid_mode = dpu_enc->disp_info.capabilities &
+						MSM_DISPLAY_CAP_VID_MODE;
+
+	if (drm_enc->crtc->index >= ARRAY_SIZE(priv->disp_thread)) {
+		DPU_ERROR("invalid crtc index\n");
+		return -EINVAL;
+	}
+	disp_thread = &priv->disp_thread[drm_enc->crtc->index];
+
+	/*
+	 * when idle_pc is not supported, process only KICKOFF, STOP and MODESET
+	 * events and return early for other events (ie wb display).
+	 */
+	if (!dpu_enc->idle_pc_supported &&
+			(sw_event != DPU_ENC_RC_EVENT_KICKOFF &&
+			sw_event != DPU_ENC_RC_EVENT_STOP &&
+			sw_event != DPU_ENC_RC_EVENT_PRE_STOP))
+		return 0;
+
+	trace_dpu_enc_rc(DRMID(drm_enc), sw_event, dpu_enc->idle_pc_supported,
+			 dpu_enc->rc_state, "begin");
+
+	switch (sw_event) {
+	case DPU_ENC_RC_EVENT_KICKOFF:
+		/* cancel delayed off work, if any */
+		if (kthread_cancel_delayed_work_sync(
+				&dpu_enc->delayed_off_work))
+			DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
+					sw_event);
+
+		mutex_lock(&dpu_enc->rc_lock);
+
+		/* return if the resource control is already in ON state */
+		if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
+			DRM_DEBUG_KMS("id;%u, sw_event:%d, rc in ON state\n",
+				      DRMID(drm_enc), sw_event);
+			mutex_unlock(&dpu_enc->rc_lock);
+			return 0;
+		} else if (dpu_enc->rc_state != DPU_ENC_RC_STATE_OFF &&
+				dpu_enc->rc_state != DPU_ENC_RC_STATE_IDLE) {
+			DRM_DEBUG_KMS("id;%u, sw_event:%d, rc in state %d\n",
+				      DRMID(drm_enc), sw_event,
+				      dpu_enc->rc_state);
+			mutex_unlock(&dpu_enc->rc_lock);
+			return -EINVAL;
+		}
+
+		if (is_vid_mode && dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE)
+			_dpu_encoder_irq_control(drm_enc, true);
+		else
+			_dpu_encoder_resource_control_helper(drm_enc, true);
+
+		dpu_enc->rc_state = DPU_ENC_RC_STATE_ON;
+
+		trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+				 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+				 "kickoff");
+
+		mutex_unlock(&dpu_enc->rc_lock);
+		break;
+
+	case DPU_ENC_RC_EVENT_FRAME_DONE:
+		/*
+		 * mutex lock is not used as this event happens at interrupt
+		 * context. And locking is not required as, the other events
+		 * like KICKOFF and STOP does a wait-for-idle before executing
+		 * the resource_control
+		 */
+		if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
+			DRM_DEBUG_KMS("id:%d, sw_event:%d,rc:%d-unexpected\n",
+				      DRMID(drm_enc), sw_event,
+				      dpu_enc->rc_state);
+			return -EINVAL;
+		}
+
+		/*
+		 * schedule off work item only when there are no
+		 * frames pending
+		 */
+		if (dpu_crtc_frame_pending(drm_enc->crtc) > 1) {
+			DRM_DEBUG_KMS("id:%d skip schedule work\n",
+				      DRMID(drm_enc));
+			return 0;
+		}
+
+		kthread_queue_delayed_work(
+			&disp_thread->worker,
+			&dpu_enc->delayed_off_work,
+			msecs_to_jiffies(dpu_enc->idle_timeout));
+
+		trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+				 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+				 "frame done");
+		break;
+
+	case DPU_ENC_RC_EVENT_PRE_STOP:
+		/* cancel delayed off work, if any */
+		if (kthread_cancel_delayed_work_sync(
+				&dpu_enc->delayed_off_work))
+			DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
+					sw_event);
+
+		mutex_lock(&dpu_enc->rc_lock);
+
+		if (is_vid_mode &&
+			  dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
+			_dpu_encoder_irq_control(drm_enc, true);
+		}
+		/* skip if is already OFF or IDLE, resources are off already */
+		else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF ||
+				dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
+			DRM_DEBUG_KMS("id:%u, sw_event:%d, rc in %d state\n",
+				      DRMID(drm_enc), sw_event,
+				      dpu_enc->rc_state);
+			mutex_unlock(&dpu_enc->rc_lock);
+			return 0;
+		}
+
+		dpu_enc->rc_state = DPU_ENC_RC_STATE_PRE_OFF;
+
+		trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+				 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+				 "pre stop");
+
+		mutex_unlock(&dpu_enc->rc_lock);
+		break;
+
+	case DPU_ENC_RC_EVENT_STOP:
+		mutex_lock(&dpu_enc->rc_lock);
+
+		/* return if the resource control is already in OFF state */
+		if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF) {
+			DRM_DEBUG_KMS("id: %u, sw_event:%d, rc in OFF state\n",
+				      DRMID(drm_enc), sw_event);
+			mutex_unlock(&dpu_enc->rc_lock);
+			return 0;
+		} else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
+			DRM_ERROR("id: %u, sw_event:%d, rc in state %d\n",
+				  DRMID(drm_enc), sw_event, dpu_enc->rc_state);
+			mutex_unlock(&dpu_enc->rc_lock);
+			return -EINVAL;
+		}
+
+		/**
+		 * expect to arrive here only if in either idle state or pre-off
+		 * and in IDLE state the resources are already disabled
+		 */
+		if (dpu_enc->rc_state == DPU_ENC_RC_STATE_PRE_OFF)
+			_dpu_encoder_resource_control_helper(drm_enc, false);
+
+		dpu_enc->rc_state = DPU_ENC_RC_STATE_OFF;
+
+		trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+				 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+				 "stop");
+
+		mutex_unlock(&dpu_enc->rc_lock);
+		break;
+
+	case DPU_ENC_RC_EVENT_ENTER_IDLE:
+		mutex_lock(&dpu_enc->rc_lock);
+
+		if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
+			DRM_ERROR("id: %u, sw_event:%d, rc:%d !ON state\n",
+				  DRMID(drm_enc), sw_event, dpu_enc->rc_state);
+			mutex_unlock(&dpu_enc->rc_lock);
+			return 0;
+		}
+
+		/*
+		 * if we are in ON but a frame was just kicked off,
+		 * ignore the IDLE event, it's probably a stale timer event
+		 */
+		if (dpu_enc->frame_busy_mask[0]) {
+			DRM_ERROR("id:%u, sw_event:%d, rc:%d frame pending\n",
+				  DRMID(drm_enc), sw_event, dpu_enc->rc_state);
+			mutex_unlock(&dpu_enc->rc_lock);
+			return 0;
+		}
+
+		if (is_vid_mode)
+			_dpu_encoder_irq_control(drm_enc, false);
+		else
+			_dpu_encoder_resource_control_helper(drm_enc, false);
+
+		dpu_enc->rc_state = DPU_ENC_RC_STATE_IDLE;
+
+		trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+				 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+				 "idle");
+
+		mutex_unlock(&dpu_enc->rc_lock);
+		break;
+
+	default:
+		DRM_ERROR("id:%u, unexpected sw_event: %d\n", DRMID(drm_enc),
+			  sw_event);
+		trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+				 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+				 "error");
+		break;
+	}
+
+	trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+			 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+			 "end");
+	return 0;
+}
+
+static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
+				      struct drm_display_mode *mode,
+				      struct drm_display_mode *adj_mode)
+{
+	struct dpu_encoder_virt *dpu_enc;
+	struct msm_drm_private *priv;
+	struct dpu_kms *dpu_kms;
+	struct list_head *connector_list;
+	struct drm_connector *conn = NULL, *conn_iter;
+	struct dpu_rm_hw_iter pp_iter;
+	struct msm_display_topology topology;
+	enum dpu_rm_topology_name topology_name;
+	int i = 0, ret;
+
+	if (!drm_enc) {
+		DPU_ERROR("invalid encoder\n");
+		return;
+	}
+
+	dpu_enc = to_dpu_encoder_virt(drm_enc);
+	DPU_DEBUG_ENC(dpu_enc, "\n");
+
+	priv = drm_enc->dev->dev_private;
+	dpu_kms = to_dpu_kms(priv->kms);
+	connector_list = &dpu_kms->dev->mode_config.connector_list;
+
+	trace_dpu_enc_mode_set(DRMID(drm_enc));
+
+	list_for_each_entry(conn_iter, connector_list, head)
+		if (conn_iter->encoder == drm_enc)
+			conn = conn_iter;
+
+	if (!conn) {
+		DPU_ERROR_ENC(dpu_enc, "failed to find attached connector\n");
+		return;
+	} else if (!conn->state) {
+		DPU_ERROR_ENC(dpu_enc, "invalid connector state\n");
+		return;
+	}
+
+	topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
+
+	/* Reserve dynamic resources now. Indicating non-AtomicTest phase */
+	ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, drm_enc->crtc->state,
+			conn->state, topology, false);
+	if (ret) {
+		DPU_ERROR_ENC(dpu_enc,
+				"failed to reserve hw resources, %d\n", ret);
+		return;
+	}
+
+	dpu_rm_init_hw_iter(&pp_iter, drm_enc->base.id, DPU_HW_BLK_PINGPONG);
+	for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
+		dpu_enc->hw_pp[i] = NULL;
+		if (!dpu_rm_get_hw(&dpu_kms->rm, &pp_iter))
+			break;
+		dpu_enc->hw_pp[i] = (struct dpu_hw_pingpong *) pp_iter.hw;
+	}
+
+	topology_name = dpu_rm_get_topology_name(topology);
+	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+		if (phys) {
+			if (!dpu_enc->hw_pp[i]) {
+				DPU_ERROR_ENC(dpu_enc,
+				    "invalid pingpong block for the encoder\n");
+				return;
+			}
+			phys->hw_pp = dpu_enc->hw_pp[i];
+			phys->connector = conn->state->connector;
+			phys->topology_name = topology_name;
+			if (phys->ops.mode_set)
+				phys->ops.mode_set(phys, mode, adj_mode);
+		}
+	}
+
+	dpu_enc->mode_set_complete = true;
+}
+
+static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
+{
+	struct dpu_encoder_virt *dpu_enc = NULL;
+	struct msm_drm_private *priv;
+	struct dpu_kms *dpu_kms;
+
+	if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
+		DPU_ERROR("invalid parameters\n");
+		return;
+	}
+
+	priv = drm_enc->dev->dev_private;
+	dpu_kms = to_dpu_kms(priv->kms);
+	if (!dpu_kms) {
+		DPU_ERROR("invalid dpu_kms\n");
+		return;
+	}
+
+	dpu_enc = to_dpu_encoder_virt(drm_enc);
+	if (!dpu_enc || !dpu_enc->cur_master) {
+		DPU_ERROR("invalid dpu encoder/master\n");
+		return;
+	}
+
+	if (dpu_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DisplayPort &&
+	    dpu_enc->cur_master->hw_mdptop &&
+	    dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select)
+		dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select(
+					dpu_enc->cur_master->hw_mdptop);
+
+	if (dpu_enc->cur_master->hw_mdptop &&
+			dpu_enc->cur_master->hw_mdptop->ops.reset_ubwc)
+		dpu_enc->cur_master->hw_mdptop->ops.reset_ubwc(
+				dpu_enc->cur_master->hw_mdptop,
+				dpu_kms->catalog);
+
+	_dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info);
+}
+
+void dpu_encoder_virt_restore(struct drm_encoder *drm_enc)
+{
+	struct dpu_encoder_virt *dpu_enc = NULL;
+	int i;
+
+	if (!drm_enc) {
+		DPU_ERROR("invalid encoder\n");
+		return;
+	}
+	dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+		if (phys && (phys != dpu_enc->cur_master) && phys->ops.restore)
+			phys->ops.restore(phys);
+	}
+
+	if (dpu_enc->cur_master && dpu_enc->cur_master->ops.restore)
+		dpu_enc->cur_master->ops.restore(dpu_enc->cur_master);
+
+	_dpu_encoder_virt_enable_helper(drm_enc);
+}
+
+static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
+{
+	struct dpu_encoder_virt *dpu_enc = NULL;
+	int i, ret = 0;
+	struct drm_display_mode *cur_mode = NULL;
+
+	if (!drm_enc) {
+		DPU_ERROR("invalid encoder\n");
+		return;
+	}
+	dpu_enc = to_dpu_encoder_virt(drm_enc);
+	cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;
+
+	trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
+			     cur_mode->vdisplay);
+
+	dpu_enc->cur_master = NULL;
+	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+		if (phys && phys->ops.is_master && phys->ops.is_master(phys)) {
+			DPU_DEBUG_ENC(dpu_enc, "master is now idx %d\n", i);
+			dpu_enc->cur_master = phys;
+			break;
+		}
+	}
+
+	if (!dpu_enc->cur_master) {
+		DPU_ERROR("virt encoder has no master! num_phys %d\n", i);
+		return;
+	}
+
+	ret = dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
+	if (ret) {
+		DPU_ERROR_ENC(dpu_enc, "dpu resource control failed: %d\n",
+				ret);
+		return;
+	}
+
+	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+		if (!phys)
+			continue;
+
+		if (phys != dpu_enc->cur_master) {
+			if (phys->ops.enable)
+				phys->ops.enable(phys);
+		}
+
+		if (dpu_enc->misr_enable && (dpu_enc->disp_info.capabilities &
+		     MSM_DISPLAY_CAP_VID_MODE) && phys->ops.setup_misr)
+			phys->ops.setup_misr(phys, true,
+						dpu_enc->misr_frame_count);
+	}
+
+	if (dpu_enc->cur_master->ops.enable)
+		dpu_enc->cur_master->ops.enable(dpu_enc->cur_master);
+
+	_dpu_encoder_virt_enable_helper(drm_enc);
+}
+
+static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
+{
+	struct dpu_encoder_virt *dpu_enc = NULL;
+	struct msm_drm_private *priv;
+	struct dpu_kms *dpu_kms;
+	struct drm_display_mode *mode;
+	int i = 0;
+
+	if (!drm_enc) {
+		DPU_ERROR("invalid encoder\n");
+		return;
+	} else if (!drm_enc->dev) {
+		DPU_ERROR("invalid dev\n");
+		return;
+	} else if (!drm_enc->dev->dev_private) {
+		DPU_ERROR("invalid dev_private\n");
+		return;
+	}
+
+	mode = &drm_enc->crtc->state->adjusted_mode;
+
+	dpu_enc = to_dpu_encoder_virt(drm_enc);
+	DPU_DEBUG_ENC(dpu_enc, "\n");
+
+	priv = drm_enc->dev->dev_private;
+	dpu_kms = to_dpu_kms(priv->kms);
+
+	trace_dpu_enc_disable(DRMID(drm_enc));
+
+	/* wait for idle */
+	dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
+
+	dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP);
+
+	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+		if (phys && phys->ops.disable)
+			phys->ops.disable(phys);
+	}
+
+	/* after phys waits for frame-done, should be no more frames pending */
+	if (atomic_xchg(&dpu_enc->frame_done_timeout, 0)) {
+		DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id);
+		del_timer_sync(&dpu_enc->frame_done_timer);
+	}
+
+	dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP);
+
+	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+		if (dpu_enc->phys_encs[i])
+			dpu_enc->phys_encs[i]->connector = NULL;
+	}
+
+	dpu_enc->cur_master = NULL;
+
+	DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
+
+	dpu_rm_release(&dpu_kms->rm, drm_enc);
+}
+
+static enum dpu_intf dpu_encoder_get_intf(struct dpu_mdss_cfg *catalog,
+		enum dpu_intf_type type, u32 controller_id)
+{
+	int i = 0;
+
+	for (i = 0; i < catalog->intf_count; i++) {
+		if (catalog->intf[i].type == type
+		    && catalog->intf[i].controller_id == controller_id) {
+			return catalog->intf[i].id;
+		}
+	}
+
+	return INTF_MAX;
+}
+
+static void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
+		struct dpu_encoder_phys *phy_enc)
+{
+	struct dpu_encoder_virt *dpu_enc = NULL;
+	unsigned long lock_flags;
+
+	if (!drm_enc || !phy_enc)
+		return;
+
+	DPU_ATRACE_BEGIN("encoder_vblank_callback");
+	dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+	spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+	if (dpu_enc->crtc_vblank_cb)
+		dpu_enc->crtc_vblank_cb(dpu_enc->crtc_vblank_cb_data);
+	spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+
+	atomic_inc(&phy_enc->vsync_cnt);
+	DPU_ATRACE_END("encoder_vblank_callback");
+}
+
+static void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,
+		struct dpu_encoder_phys *phy_enc)
+{
+	if (!phy_enc)
+		return;
+
+	DPU_ATRACE_BEGIN("encoder_underrun_callback");
+	atomic_inc(&phy_enc->underrun_cnt);
+	trace_dpu_enc_underrun_cb(DRMID(drm_enc),
+				  atomic_read(&phy_enc->underrun_cnt));
+	DPU_ATRACE_END("encoder_underrun_callback");
+}
+
+void dpu_encoder_register_vblank_callback(struct drm_encoder *drm_enc,
+		void (*vbl_cb)(void *), void *vbl_data)
+{
+	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+	unsigned long lock_flags;
+	bool enable;
+	int i;
+
+	enable = vbl_cb ? true : false;
+
+	if (!drm_enc) {
+		DPU_ERROR("invalid encoder\n");
+		return;
+	}
+	trace_dpu_enc_vblank_cb(DRMID(drm_enc), enable);
+
+	spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+	dpu_enc->crtc_vblank_cb = vbl_cb;
+	dpu_enc->crtc_vblank_cb_data = vbl_data;
+	spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+
+	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+		if (phys && phys->ops.control_vblank_irq)
+			phys->ops.control_vblank_irq(phys, enable);
+	}
+}
+
+void dpu_encoder_register_frame_event_callback(struct drm_encoder *drm_enc,
+		void (*frame_event_cb)(void *, u32 event),
+		void *frame_event_cb_data)
+{
+	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+	unsigned long lock_flags;
+	bool enable;
+
+	enable = frame_event_cb ? true : false;
+
+	if (!drm_enc) {
+		DPU_ERROR("invalid encoder\n");
+		return;
+	}
+	trace_dpu_enc_frame_event_cb(DRMID(drm_enc), enable);
+
+	spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+	dpu_enc->crtc_frame_event_cb = frame_event_cb;
+	dpu_enc->crtc_frame_event_cb_data = frame_event_cb_data;
+	spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+}
+
+static void dpu_encoder_frame_done_callback(
+		struct drm_encoder *drm_enc,
+		struct dpu_encoder_phys *ready_phys, u32 event)
+{
+	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+	unsigned int i;
+
+	if (event & (DPU_ENCODER_FRAME_EVENT_DONE
+			| DPU_ENCODER_FRAME_EVENT_ERROR
+			| DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
+
+		if (!dpu_enc->frame_busy_mask[0]) {
+			/**
+			 * suppress frame_done without waiter,
+			 * likely autorefresh
+			 */
+			trace_dpu_enc_frame_done_cb_not_busy(DRMID(drm_enc),
+					event, ready_phys->intf_idx);
+			return;
+		}
+
+		/* One of the physical encoders has become idle */
+		for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+			if (dpu_enc->phys_encs[i] == ready_phys) {
+				clear_bit(i, dpu_enc->frame_busy_mask);
+				trace_dpu_enc_frame_done_cb(DRMID(drm_enc), i,
+						dpu_enc->frame_busy_mask[0]);
+			}
+		}
+
+		if (!dpu_enc->frame_busy_mask[0]) {
+			atomic_set(&dpu_enc->frame_done_timeout, 0);
+			del_timer(&dpu_enc->frame_done_timer);
+
+			dpu_encoder_resource_control(drm_enc,
+					DPU_ENC_RC_EVENT_FRAME_DONE);
+
+			if (dpu_enc->crtc_frame_event_cb)
+				dpu_enc->crtc_frame_event_cb(
+					dpu_enc->crtc_frame_event_cb_data,
+					event);
+		}
+	} else {
+		if (dpu_enc->crtc_frame_event_cb)
+			dpu_enc->crtc_frame_event_cb(
+				dpu_enc->crtc_frame_event_cb_data, event);
+	}
+}
+
+static void dpu_encoder_off_work(struct kthread_work *work)
+{
+	struct dpu_encoder_virt *dpu_enc = container_of(work,
+			struct dpu_encoder_virt, delayed_off_work.work);
+
+	if (!dpu_enc) {
+		DPU_ERROR("invalid dpu encoder\n");
+		return;
+	}
+
+	dpu_encoder_resource_control(&dpu_enc->base,
+						DPU_ENC_RC_EVENT_ENTER_IDLE);
+
+	dpu_encoder_frame_done_callback(&dpu_enc->base, NULL,
+				DPU_ENCODER_FRAME_EVENT_IDLE);
+}
+
+/**
+ * _dpu_encoder_trigger_flush - trigger flush for a physical encoder
+ * drm_enc: Pointer to drm encoder structure
+ * phys: Pointer to physical encoder structure
+ * extra_flush_bits: Additional bit mask to include in flush trigger
+ */
+static inline void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
+		struct dpu_encoder_phys *phys, uint32_t extra_flush_bits)
+{
+	struct dpu_hw_ctl *ctl;
+	int pending_kickoff_cnt;
+	u32 ret = UINT_MAX;
+
+	if (!drm_enc || !phys) {
+		DPU_ERROR("invalid argument(s), drm_enc %d, phys_enc %d\n",
+				drm_enc != 0, phys != 0);
+		return;
+	}
+
+	if (!phys->hw_pp) {
+		DPU_ERROR("invalid pingpong hw\n");
+		return;
+	}
+
+	ctl = phys->hw_ctl;
+	if (!ctl || !ctl->ops.trigger_flush) {
+		DPU_ERROR("missing trigger cb\n");
+		return;
+	}
+
+	pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
+
+	if (extra_flush_bits && ctl->ops.update_pending_flush)
+		ctl->ops.update_pending_flush(ctl, extra_flush_bits);
+
+	ctl->ops.trigger_flush(ctl);
+
+	if (ctl->ops.get_pending_flush)
+		ret = ctl->ops.get_pending_flush(ctl);
+
+	trace_dpu_enc_trigger_flush(DRMID(drm_enc), phys->intf_idx,
+				    pending_kickoff_cnt, ctl->idx, ret);
+}
+
+/**
+ * _dpu_encoder_trigger_start - trigger start for a physical encoder
+ * phys: Pointer to physical encoder structure
+ */
+static inline void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
+{
+	if (!phys) {
+		DPU_ERROR("invalid argument(s)\n");
+		return;
+	}
+
+	if (!phys->hw_pp) {
+		DPU_ERROR("invalid pingpong hw\n");
+		return;
+	}
+
+	if (phys->ops.trigger_start && phys->enable_state != DPU_ENC_DISABLED)
+		phys->ops.trigger_start(phys);
+}
+
+void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_hw_ctl *ctl;
+
+	if (!phys_enc) {
+		DPU_ERROR("invalid encoder\n");
+		return;
+	}
+
+	ctl = phys_enc->hw_ctl;
+	if (ctl && ctl->ops.trigger_start) {
+		ctl->ops.trigger_start(ctl);
+		trace_dpu_enc_trigger_start(DRMID(phys_enc->parent), ctl->idx);
+	}
+}
+
+int dpu_encoder_helper_wait_event_timeout(
+		int32_t drm_id,
+		int32_t hw_id,
+		struct dpu_encoder_wait_info *info)
+{
+	int rc = 0;
+	s64 expected_time = ktime_to_ms(ktime_get()) + info->timeout_ms;
+	s64 jiffies = msecs_to_jiffies(info->timeout_ms);
+	s64 time;
+
+	do {
+		rc = wait_event_timeout(*(info->wq),
+				atomic_read(info->atomic_cnt) == 0, jiffies);
+		time = ktime_to_ms(ktime_get());
+
+		trace_dpu_enc_wait_event_timeout(drm_id, hw_id, rc, time,
+						 expected_time,
+						 atomic_read(info->atomic_cnt));
+	/* If we timed out, counter is valid and time is less, wait again */
+	} while (atomic_read(info->atomic_cnt) && (rc == 0) &&
+			(time < expected_time));
+
+	return rc;
+}
+
+void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_encoder_virt *dpu_enc;
+	struct dpu_hw_ctl *ctl;
+	int rc;
+
+	if (!phys_enc) {
+		DPU_ERROR("invalid encoder\n");
+		return;
+	}
+	dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
+	ctl = phys_enc->hw_ctl;
+
+	if (!ctl || !ctl->ops.reset)
+		return;
+
+	DRM_DEBUG_KMS("id:%u ctl %d reset\n", DRMID(phys_enc->parent),
+		      ctl->idx);
+
+	rc = ctl->ops.reset(ctl);
+	if (rc) {
+		DPU_ERROR_ENC(dpu_enc, "ctl %d reset failure\n",  ctl->idx);
+		dpu_dbg_dump(false, __func__, true, true);
+	}
+
+	phys_enc->enable_state = DPU_ENC_ENABLED;
+}
+
+/**
+ * _dpu_encoder_kickoff_phys - handle physical encoder kickoff
+ *	Iterate through the physical encoders and perform consolidated flush
+ *	and/or control start triggering as needed. This is done in the virtual
+ *	encoder rather than the individual physical ones in order to handle
+ *	use cases that require visibility into multiple physical encoders at
+ *	a time.
+ * dpu_enc: Pointer to virtual encoder structure
+ */
+static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
+{
+	struct dpu_hw_ctl *ctl;
+	uint32_t i, pending_flush;
+	unsigned long lock_flags;
+
+	if (!dpu_enc) {
+		DPU_ERROR("invalid encoder\n");
+		return;
+	}
+
+	pending_flush = 0x0;
+
+	/* update pending counts and trigger kickoff ctl flush atomically */
+	spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+
+	/* don't perform flush/start operations for slave encoders */
+	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+		if (!phys || phys->enable_state == DPU_ENC_DISABLED)
+			continue;
+
+		ctl = phys->hw_ctl;
+		if (!ctl)
+			continue;
+
+		if (phys->split_role != ENC_ROLE_SLAVE)
+			set_bit(i, dpu_enc->frame_busy_mask);
+		if (!phys->ops.needs_single_flush ||
+				!phys->ops.needs_single_flush(phys))
+			_dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0);
+		else if (ctl->ops.get_pending_flush)
+			pending_flush |= ctl->ops.get_pending_flush(ctl);
+	}
+
+	/* for split flush, combine pending flush masks and send to master */
+	if (pending_flush && dpu_enc->cur_master) {
+		_dpu_encoder_trigger_flush(
+				&dpu_enc->base,
+				dpu_enc->cur_master,
+				pending_flush);
+	}
+
+	_dpu_encoder_trigger_start(dpu_enc->cur_master);
+
+	spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+}
+
+bool dpu_encoder_check_mode(struct drm_encoder *drm_enc, u32 mode)
+{
+	struct dpu_encoder_virt *dpu_enc;
+	struct msm_display_info *disp_info;
+
+	if (!drm_enc) {
+		DPU_ERROR("invalid encoder\n");
+		return false;
+	}
+
+	dpu_enc = to_dpu_encoder_virt(drm_enc);
+	disp_info = &dpu_enc->disp_info;
+
+	return (disp_info->capabilities & mode);
+}
+
+void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
+{
+	struct dpu_encoder_virt *dpu_enc;
+	struct dpu_encoder_phys *phys;
+	unsigned int i;
+	struct dpu_hw_ctl *ctl;
+	struct msm_display_info *disp_info;
+
+	if (!drm_enc) {
+		DPU_ERROR("invalid encoder\n");
+		return;
+	}
+	dpu_enc = to_dpu_encoder_virt(drm_enc);
+	disp_info = &dpu_enc->disp_info;
+
+	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+		phys = dpu_enc->phys_encs[i];
+
+		if (phys && phys->hw_ctl) {
+			ctl = phys->hw_ctl;
+			if (ctl->ops.clear_pending_flush)
+				ctl->ops.clear_pending_flush(ctl);
+
+			/* update only for command mode primary ctl */
+			if ((phys == dpu_enc->cur_master) &&
+			   (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE)
+			    && ctl->ops.trigger_pending)
+				ctl->ops.trigger_pending(ctl);
+		}
+	}
+}
+
+static u32 _dpu_encoder_calculate_linetime(struct dpu_encoder_virt *dpu_enc,
+		struct drm_display_mode *mode)
+{
+	u64 pclk_rate;
+	u32 pclk_period;
+	u32 line_time;
+
+	/*
+	 * For linetime calculation, only operate on master encoder.
+	 */
+	if (!dpu_enc->cur_master)
+		return 0;
+
+	if (!dpu_enc->cur_master->ops.get_line_count) {
+		DPU_ERROR("get_line_count function not defined\n");
+		return 0;
+	}
+
+	pclk_rate = mode->clock; /* pixel clock in kHz */
+	if (pclk_rate == 0) {
+		DPU_ERROR("pclk is 0, cannot calculate line time\n");
+		return 0;
+	}
+
+	pclk_period = DIV_ROUND_UP_ULL(1000000000ull, pclk_rate);
+	if (pclk_period == 0) {
+		DPU_ERROR("pclk period is 0\n");
+		return 0;
+	}
+
+	/*
+	 * Line time calculation based on Pixel clock and HTOTAL.
+	 * Final unit is in ns.
+	 */
+	line_time = (pclk_period * mode->htotal) / 1000;
+	if (line_time == 0) {
+		DPU_ERROR("line time calculation is 0\n");
+		return 0;
+	}
+
+	DPU_DEBUG_ENC(dpu_enc,
+			"clk_rate=%lldkHz, clk_period=%d, linetime=%dns\n",
+			pclk_rate, pclk_period, line_time);
+
+	return line_time;
+}
+
+static int _dpu_encoder_wakeup_time(struct drm_encoder *drm_enc,
+		ktime_t *wakeup_time)
+{
+	struct drm_display_mode *mode;
+	struct dpu_encoder_virt *dpu_enc;
+	u32 cur_line;
+	u32 line_time;
+	u32 vtotal, time_to_vsync;
+	ktime_t cur_time;
+
+	dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+	if (!drm_enc->crtc || !drm_enc->crtc->state) {
+		DPU_ERROR("crtc/crtc state object is NULL\n");
+		return -EINVAL;
+	}
+	mode = &drm_enc->crtc->state->adjusted_mode;
+
+	line_time = _dpu_encoder_calculate_linetime(dpu_enc, mode);
+	if (!line_time)
+		return -EINVAL;
+
+	cur_line = dpu_enc->cur_master->ops.get_line_count(dpu_enc->cur_master);
+
+	vtotal = mode->vtotal;
+	if (cur_line >= vtotal)
+		time_to_vsync = line_time * vtotal;
+	else
+		time_to_vsync = line_time * (vtotal - cur_line);
+
+	if (time_to_vsync == 0) {
+		DPU_ERROR("time to vsync should not be zero, vtotal=%d\n",
+				vtotal);
+		return -EINVAL;
+	}
+
+	cur_time = ktime_get();
+	*wakeup_time = ktime_add_ns(cur_time, time_to_vsync);
+
+	DPU_DEBUG_ENC(dpu_enc,
+			"cur_line=%u vtotal=%u time_to_vsync=%u, cur_time=%lld, wakeup_time=%lld\n",
+			cur_line, vtotal, time_to_vsync,
+			ktime_to_ms(cur_time),
+			ktime_to_ms(*wakeup_time));
+	return 0;
+}
+
+static void dpu_encoder_vsync_event_handler(struct timer_list *t)
+{
+	struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
+			vsync_event_timer);
+	struct drm_encoder *drm_enc = &dpu_enc->base;
+	struct msm_drm_private *priv;
+	struct msm_drm_thread *event_thread;
+
+	if (!drm_enc->dev || !drm_enc->dev->dev_private ||
+			!drm_enc->crtc) {
+		DPU_ERROR("invalid parameters\n");
+		return;
+	}
+
+	priv = drm_enc->dev->dev_private;
+
+	if (drm_enc->crtc->index >= ARRAY_SIZE(priv->event_thread)) {
+		DPU_ERROR("invalid crtc index\n");
+		return;
+	}
+	event_thread = &priv->event_thread[drm_enc->crtc->index];
+	if (!event_thread) {
+		DPU_ERROR("event_thread not found for crtc:%d\n",
+				drm_enc->crtc->index);
+		return;
+	}
+
+	del_timer(&dpu_enc->vsync_event_timer);
+}
+
+static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work)
+{
+	struct dpu_encoder_virt *dpu_enc = container_of(work,
+			struct dpu_encoder_virt, vsync_event_work);
+	ktime_t wakeup_time;
+
+	if (!dpu_enc) {
+		DPU_ERROR("invalid dpu encoder\n");
+		return;
+	}
+
+	if (_dpu_encoder_wakeup_time(&dpu_enc->base, &wakeup_time))
+		return;
+
+	trace_dpu_enc_vsync_event_work(DRMID(&dpu_enc->base), wakeup_time);
+	mod_timer(&dpu_enc->vsync_event_timer,
+			nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
+}
+
+void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
+		struct dpu_encoder_kickoff_params *params)
+{
+	struct dpu_encoder_virt *dpu_enc;
+	struct dpu_encoder_phys *phys;
+	bool needs_hw_reset = false;
+	unsigned int i;
+
+	if (!drm_enc || !params) {
+		DPU_ERROR("invalid args\n");
+		return;
+	}
+	dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+	trace_dpu_enc_prepare_kickoff(DRMID(drm_enc));
+
+	/* prepare for next kickoff, may include waiting on previous kickoff */
+	DPU_ATRACE_BEGIN("enc_prepare_for_kickoff");
+	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+		phys = dpu_enc->phys_encs[i];
+		if (phys) {
+			if (phys->ops.prepare_for_kickoff)
+				phys->ops.prepare_for_kickoff(phys, params);
+			if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET)
+				needs_hw_reset = true;
+		}
+	}
+	DPU_ATRACE_END("enc_prepare_for_kickoff");
+
+	dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
+
+	/* if any phys needs reset, reset all phys, in-order */
+	if (needs_hw_reset) {
+		trace_dpu_enc_prepare_kickoff_reset(DRMID(drm_enc));
+		for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+			phys = dpu_enc->phys_encs[i];
+			if (phys && phys->ops.hw_reset)
+				phys->ops.hw_reset(phys);
+		}
+	}
+}
+
+void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
+{
+	struct dpu_encoder_virt *dpu_enc;
+	struct dpu_encoder_phys *phys;
+	ktime_t wakeup_time;
+	unsigned int i;
+
+	if (!drm_enc) {
+		DPU_ERROR("invalid encoder\n");
+		return;
+	}
+	DPU_ATRACE_BEGIN("encoder_kickoff");
+	dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+	trace_dpu_enc_kickoff(DRMID(drm_enc));
+
+	atomic_set(&dpu_enc->frame_done_timeout,
+			DPU_FRAME_DONE_TIMEOUT * 1000 /
+			drm_enc->crtc->state->adjusted_mode.vrefresh);
+	mod_timer(&dpu_enc->frame_done_timer, jiffies +
+		((atomic_read(&dpu_enc->frame_done_timeout) * HZ) / 1000));
+
+	/* All phys encs are ready to go, trigger the kickoff */
+	_dpu_encoder_kickoff_phys(dpu_enc);
+
+	/* allow phys encs to handle any post-kickoff business */
+	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+		phys = dpu_enc->phys_encs[i];
+		if (phys && phys->ops.handle_post_kickoff)
+			phys->ops.handle_post_kickoff(phys);
+	}
+
+	if (dpu_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DSI &&
+			!_dpu_encoder_wakeup_time(drm_enc, &wakeup_time)) {
+		trace_dpu_enc_early_kickoff(DRMID(drm_enc),
+					    ktime_to_ms(wakeup_time));
+		mod_timer(&dpu_enc->vsync_event_timer,
+				nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
+	}
+
+	DPU_ATRACE_END("encoder_kickoff");
+}
+
+int dpu_encoder_helper_hw_release(struct dpu_encoder_phys *phys_enc,
+		struct drm_framebuffer *fb)
+{
+	struct drm_encoder *drm_enc;
+	struct dpu_hw_mixer_cfg mixer;
+	struct dpu_rm_hw_iter lm_iter;
+	bool lm_valid = false;
+
+	if (!phys_enc || !phys_enc->parent) {
+		DPU_ERROR("invalid encoder\n");
+		return -EINVAL;
+	}
+
+	drm_enc = phys_enc->parent;
+	memset(&mixer, 0, sizeof(mixer));
+
+	/* reset associated CTL/LMs */
+	if (phys_enc->hw_ctl->ops.clear_pending_flush)
+		phys_enc->hw_ctl->ops.clear_pending_flush(phys_enc->hw_ctl);
+	if (phys_enc->hw_ctl->ops.clear_all_blendstages)
+		phys_enc->hw_ctl->ops.clear_all_blendstages(phys_enc->hw_ctl);
+
+	dpu_rm_init_hw_iter(&lm_iter, drm_enc->base.id, DPU_HW_BLK_LM);
+	while (dpu_rm_get_hw(&phys_enc->dpu_kms->rm, &lm_iter)) {
+		struct dpu_hw_mixer *hw_lm = (struct dpu_hw_mixer *)lm_iter.hw;
+
+		if (!hw_lm)
+			continue;
+
+		/* need to flush LM to remove it */
+		if (phys_enc->hw_ctl->ops.get_bitmask_mixer &&
+				phys_enc->hw_ctl->ops.update_pending_flush)
+			phys_enc->hw_ctl->ops.update_pending_flush(
+					phys_enc->hw_ctl,
+					phys_enc->hw_ctl->ops.get_bitmask_mixer(
+					phys_enc->hw_ctl, hw_lm->idx));
+
+		if (fb) {
+			/* assume a single LM if targeting a frame buffer */
+			if (lm_valid)
+				continue;
+
+			mixer.out_height = fb->height;
+			mixer.out_width = fb->width;
+
+			if (hw_lm->ops.setup_mixer_out)
+				hw_lm->ops.setup_mixer_out(hw_lm, &mixer);
+		}
+
+		lm_valid = true;
+
+		/* only enable border color on LM */
+		if (phys_enc->hw_ctl->ops.setup_blendstage)
+			phys_enc->hw_ctl->ops.setup_blendstage(
+					phys_enc->hw_ctl, hw_lm->idx, NULL);
+	}
+
+	if (!lm_valid) {
+		DPU_DEBUG_ENC(to_dpu_encoder_virt(drm_enc), "lm not found\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc)
+{
+	struct dpu_encoder_virt *dpu_enc;
+	struct dpu_encoder_phys *phys;
+	int i;
+
+	if (!drm_enc) {
+		DPU_ERROR("invalid encoder\n");
+		return;
+	}
+	dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+		phys = dpu_enc->phys_encs[i];
+		if (phys && phys->ops.prepare_commit)
+			phys->ops.prepare_commit(phys);
+	}
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int _dpu_encoder_status_show(struct seq_file *s, void *data)
+{
+	struct dpu_encoder_virt *dpu_enc;
+	int i;
+
+	if (!s || !s->private)
+		return -EINVAL;
+
+	dpu_enc = s->private;
+
+	mutex_lock(&dpu_enc->enc_lock);
+	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+		if (!phys)
+			continue;
+
+		seq_printf(s, "intf:%d    vsync:%8d     underrun:%8d    ",
+				phys->intf_idx - INTF_0,
+				atomic_read(&phys->vsync_cnt),
+				atomic_read(&phys->underrun_cnt));
+
+		switch (phys->intf_mode) {
+		case INTF_MODE_VIDEO:
+			seq_puts(s, "mode: video\n");
+			break;
+		case INTF_MODE_CMD:
+			seq_puts(s, "mode: command\n");
+			break;
+		default:
+			seq_puts(s, "mode: ???\n");
+			break;
+		}
+	}
+	mutex_unlock(&dpu_enc->enc_lock);
+
+	return 0;
+}
+
+static int _dpu_encoder_debugfs_status_open(struct inode *inode,
+		struct file *file)
+{
+	return single_open(file, _dpu_encoder_status_show, inode->i_private);
+}
+
+static ssize_t _dpu_encoder_misr_setup(struct file *file,
+		const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct dpu_encoder_virt *dpu_enc;
+	int i = 0, rc;
+	char buf[MISR_BUFF_SIZE + 1];
+	size_t buff_copy;
+	u32 frame_count, enable;
+
+	if (!file || !file->private_data)
+		return -EINVAL;
+
+	dpu_enc = file->private_data;
+
+	buff_copy = min_t(size_t, count, MISR_BUFF_SIZE);
+	if (copy_from_user(buf, user_buf, buff_copy))
+		return -EINVAL;
+
+	buf[buff_copy] = 0; /* end of string */
+
+	if (sscanf(buf, "%u %u", &enable, &frame_count) != 2)
+		return -EINVAL;
+
+	rc = _dpu_encoder_power_enable(dpu_enc, true);
+	if (rc)
+		return rc;
+
+	mutex_lock(&dpu_enc->enc_lock);
+	dpu_enc->misr_enable = enable;
+	dpu_enc->misr_frame_count = frame_count;
+	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+		if (!phys || !phys->ops.setup_misr)
+			continue;
+
+		phys->ops.setup_misr(phys, enable, frame_count);
+	}
+	mutex_unlock(&dpu_enc->enc_lock);
+	_dpu_encoder_power_enable(dpu_enc, false);
+
+	return count;
+}
+
+static ssize_t _dpu_encoder_misr_read(struct file *file,
+		char __user *user_buff, size_t count, loff_t *ppos)
+{
+	struct dpu_encoder_virt *dpu_enc;
+	int i = 0, len = 0;
+	char buf[MISR_BUFF_SIZE + 1] = {'\0'};
+	int rc;
+
+	if (*ppos)
+		return 0;
+
+	if (!file || !file->private_data)
+		return -EINVAL;
+
+	dpu_enc = file->private_data;
+
+	rc = _dpu_encoder_power_enable(dpu_enc, true);
+	if (rc)
+		return rc;
+
+	mutex_lock(&dpu_enc->enc_lock);
+	if (!dpu_enc->misr_enable) {
+		len += snprintf(buf + len, MISR_BUFF_SIZE - len,
+			"disabled\n");
+		goto buff_check;
+	} else if (dpu_enc->disp_info.capabilities &
+						~MSM_DISPLAY_CAP_VID_MODE) {
+		len += snprintf(buf + len, MISR_BUFF_SIZE - len,
+			"unsupported\n");
+		goto buff_check;
+	}
+
+	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+		if (!phys || !phys->ops.collect_misr)
+			continue;
+
+		len += snprintf(buf + len, MISR_BUFF_SIZE - len,
+			"Intf idx:%d\n", phys->intf_idx - INTF_0);
+		len += snprintf(buf + len, MISR_BUFF_SIZE - len, "0x%x\n",
+					phys->ops.collect_misr(phys));
+	}
+
+buff_check:
+	if (count <= len) {
+		len = 0;
+		goto end;
+	}
+
+	if (copy_to_user(user_buff, buf, len)) {
+		len = -EFAULT;
+		goto end;
+	}
+
+	*ppos += len;   /* increase offset */
+
+end:
+	mutex_unlock(&dpu_enc->enc_lock);
+	_dpu_encoder_power_enable(dpu_enc, false);
+	return len;
+}
+
+static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
+{
+	struct dpu_encoder_virt *dpu_enc;
+	struct msm_drm_private *priv;
+	struct dpu_kms *dpu_kms;
+	int i;
+
+	static const struct file_operations debugfs_status_fops = {
+		.open =		_dpu_encoder_debugfs_status_open,
+		.read =		seq_read,
+		.llseek =	seq_lseek,
+		.release =	single_release,
+	};
+
+	static const struct file_operations debugfs_misr_fops = {
+		.open = simple_open,
+		.read = _dpu_encoder_misr_read,
+		.write = _dpu_encoder_misr_setup,
+	};
+
+	char name[DPU_NAME_SIZE];
+
+	if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
+		DPU_ERROR("invalid encoder or kms\n");
+		return -EINVAL;
+	}
+
+	dpu_enc = to_dpu_encoder_virt(drm_enc);
+	priv = drm_enc->dev->dev_private;
+	dpu_kms = to_dpu_kms(priv->kms);
+
+	snprintf(name, DPU_NAME_SIZE, "encoder%u", drm_enc->base.id);
+
+	/* create overall sub-directory for the encoder */
+	dpu_enc->debugfs_root = debugfs_create_dir(name,
+			drm_enc->dev->primary->debugfs_root);
+	if (!dpu_enc->debugfs_root)
+		return -ENOMEM;
+
+	/* don't error check these */
+	debugfs_create_file("status", 0600,
+		dpu_enc->debugfs_root, dpu_enc, &debugfs_status_fops);
+
+	debugfs_create_file("misr_data", 0600,
+		dpu_enc->debugfs_root, dpu_enc, &debugfs_misr_fops);
+
+	for (i = 0; i < dpu_enc->num_phys_encs; i++)
+		if (dpu_enc->phys_encs[i] &&
+				dpu_enc->phys_encs[i]->ops.late_register)
+			dpu_enc->phys_encs[i]->ops.late_register(
+					dpu_enc->phys_encs[i],
+					dpu_enc->debugfs_root);
+
+	return 0;
+}
+
+static void _dpu_encoder_destroy_debugfs(struct drm_encoder *drm_enc)
+{
+	struct dpu_encoder_virt *dpu_enc;
+
+	if (!drm_enc)
+		return;
+
+	dpu_enc = to_dpu_encoder_virt(drm_enc);
+	debugfs_remove_recursive(dpu_enc->debugfs_root);
+}
+#else
+static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
+{
+	return 0;
+}
+
+static void _dpu_encoder_destroy_debugfs(struct drm_encoder *drm_enc)
+{
+}
+#endif
+
+static int dpu_encoder_late_register(struct drm_encoder *encoder)
+{
+	return _dpu_encoder_init_debugfs(encoder);
+}
+
+static void dpu_encoder_early_unregister(struct drm_encoder *encoder)
+{
+	_dpu_encoder_destroy_debugfs(encoder);
+}
+
+static int dpu_encoder_virt_add_phys_encs(
+		u32 display_caps,
+		struct dpu_encoder_virt *dpu_enc,
+		struct dpu_enc_phys_init_params *params)
+{
+	struct dpu_encoder_phys *enc = NULL;
+
+	DPU_DEBUG_ENC(dpu_enc, "\n");
+
+	/*
+	 * We may create up to NUM_PHYS_ENCODER_TYPES physical encoder types
+	 * in this function, check up-front.
+	 */
+	if (dpu_enc->num_phys_encs + NUM_PHYS_ENCODER_TYPES >=
+			ARRAY_SIZE(dpu_enc->phys_encs)) {
+		DPU_ERROR_ENC(dpu_enc, "too many physical encoders %d\n",
+			  dpu_enc->num_phys_encs);
+		return -EINVAL;
+	}
+
+	if (display_caps & MSM_DISPLAY_CAP_VID_MODE) {
+		enc = dpu_encoder_phys_vid_init(params);
+
+		if (IS_ERR_OR_NULL(enc)) {
+			DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n",
+				PTR_ERR(enc));
+			return enc == 0 ? -EINVAL : PTR_ERR(enc);
+		}
+
+		dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
+		++dpu_enc->num_phys_encs;
+	}
+
+	if (display_caps & MSM_DISPLAY_CAP_CMD_MODE) {
+		enc = dpu_encoder_phys_cmd_init(params);
+
+		if (IS_ERR_OR_NULL(enc)) {
+			DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n",
+				PTR_ERR(enc));
+			return enc == 0 ? -EINVAL : PTR_ERR(enc);
+		}
+
+		dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
+		++dpu_enc->num_phys_encs;
+	}
+
+	return 0;
+}
+
+static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
+				 struct dpu_kms *dpu_kms,
+				 struct msm_display_info *disp_info,
+				 int *drm_enc_mode)
+{
+	int ret = 0;
+	int i = 0;
+	enum dpu_intf_type intf_type;
+	struct dpu_encoder_virt_ops parent_ops = {
+		dpu_encoder_vblank_callback,
+		dpu_encoder_underrun_callback,
+		dpu_encoder_frame_done_callback,
+	};
+	struct dpu_enc_phys_init_params phys_params;
+
+	if (!dpu_enc || !dpu_kms) {
+		DPU_ERROR("invalid arg(s), enc %d kms %d\n",
+				dpu_enc != 0, dpu_kms != 0);
+		return -EINVAL;
+	}
+
+	memset(&phys_params, 0, sizeof(phys_params));
+	phys_params.dpu_kms = dpu_kms;
+	phys_params.parent = &dpu_enc->base;
+	phys_params.parent_ops = parent_ops;
+	phys_params.enc_spinlock = &dpu_enc->enc_spinlock;
+
+	DPU_DEBUG("\n");
+
+	if (disp_info->intf_type == DRM_MODE_CONNECTOR_DSI) {
+		*drm_enc_mode = DRM_MODE_ENCODER_DSI;
+		intf_type = INTF_DSI;
+	} else if (disp_info->intf_type == DRM_MODE_CONNECTOR_HDMIA) {
+		*drm_enc_mode = DRM_MODE_ENCODER_TMDS;
+		intf_type = INTF_HDMI;
+	} else if (disp_info->intf_type == DRM_MODE_CONNECTOR_DisplayPort) {
+		*drm_enc_mode = DRM_MODE_ENCODER_TMDS;
+		intf_type = INTF_DP;
+	} else {
+		DPU_ERROR_ENC(dpu_enc, "unsupported display interface type\n");
+		return -EINVAL;
+	}
+
+	WARN_ON(disp_info->num_of_h_tiles < 1);
+
+	dpu_enc->display_num_of_h_tiles = disp_info->num_of_h_tiles;
+
+	DPU_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles);
+
+	if ((disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) ||
+	    (disp_info->capabilities & MSM_DISPLAY_CAP_VID_MODE))
+		dpu_enc->idle_pc_supported =
+				dpu_kms->catalog->caps->has_idle_pc;
+
+	mutex_lock(&dpu_enc->enc_lock);
+	for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) {
+		/*
+		 * Left-most tile is at index 0, content is controller id
+		 * h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right
+		 * h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right
+		 */
+		u32 controller_id = disp_info->h_tile_instance[i];
+
+		if (disp_info->num_of_h_tiles > 1) {
+			if (i == 0)
+				phys_params.split_role = ENC_ROLE_MASTER;
+			else
+				phys_params.split_role = ENC_ROLE_SLAVE;
+		} else {
+			phys_params.split_role = ENC_ROLE_SOLO;
+		}
+
+		DPU_DEBUG("h_tile_instance %d = %d, split_role %d\n",
+				i, controller_id, phys_params.split_role);
+
+		phys_params.intf_idx = dpu_encoder_get_intf(dpu_kms->catalog,
+													intf_type,
+													controller_id);
+		if (phys_params.intf_idx == INTF_MAX) {
+			DPU_ERROR_ENC(dpu_enc, "could not get intf: type %d, id %d\n",
+						  intf_type, controller_id);
+			ret = -EINVAL;
+		}
+
+		if (!ret) {
+			ret = dpu_encoder_virt_add_phys_encs(disp_info->capabilities,
+												 dpu_enc,
+												 &phys_params);
+			if (ret)
+				DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n");
+		}
+	}
+
+	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+		if (phys) {
+			atomic_set(&phys->vsync_cnt, 0);
+			atomic_set(&phys->underrun_cnt, 0);
+		}
+	}
+	mutex_unlock(&dpu_enc->enc_lock);
+
+	return ret;
+}
+
+static void dpu_encoder_frame_done_timeout(struct timer_list *t)
+{
+	struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
+			frame_done_timer);
+	struct drm_encoder *drm_enc = &dpu_enc->base;
+	struct msm_drm_private *priv;
+	u32 event;
+
+	if (!drm_enc->dev || !drm_enc->dev->dev_private) {
+		DPU_ERROR("invalid parameters\n");
+		return;
+	}
+	priv = drm_enc->dev->dev_private;
+
+	if (!dpu_enc->frame_busy_mask[0] || !dpu_enc->crtc_frame_event_cb) {
+		DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n",
+			      DRMID(drm_enc), dpu_enc->frame_busy_mask[0]);
+		return;
+	} else if (!atomic_xchg(&dpu_enc->frame_done_timeout, 0)) {
+		DRM_DEBUG_KMS("id:%u invalid timeout\n", DRMID(drm_enc));
+		return;
+	}
+
+	DPU_ERROR_ENC(dpu_enc, "frame done timeout\n");
+
+	event = DPU_ENCODER_FRAME_EVENT_ERROR;
+	trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event);
+	dpu_enc->crtc_frame_event_cb(dpu_enc->crtc_frame_event_cb_data, event);
+}
+
+static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = {
+	.mode_set = dpu_encoder_virt_mode_set,
+	.disable = dpu_encoder_virt_disable,
+	.enable = dpu_kms_encoder_enable,
+	.atomic_check = dpu_encoder_virt_atomic_check,
+
+	/* This is called by dpu_kms_encoder_enable */
+	.commit = dpu_encoder_virt_enable,
+};
+
+static const struct drm_encoder_funcs dpu_encoder_funcs = {
+		.destroy = dpu_encoder_destroy,
+		.late_register = dpu_encoder_late_register,
+		.early_unregister = dpu_encoder_early_unregister,
+};
+
+int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
+		struct msm_display_info *disp_info)
+{
+	struct msm_drm_private *priv = dev->dev_private;
+	struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
+	struct drm_encoder *drm_enc = NULL;
+	struct dpu_encoder_virt *dpu_enc = NULL;
+	int drm_enc_mode = DRM_MODE_ENCODER_NONE;
+	int ret = 0;
+
+	dpu_enc = to_dpu_encoder_virt(enc);
+
+	mutex_init(&dpu_enc->enc_lock);
+	ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info,
+			&drm_enc_mode);
+	if (ret)
+		goto fail;
+
+	dpu_enc->cur_master = NULL;
+	spin_lock_init(&dpu_enc->enc_spinlock);
+
+	atomic_set(&dpu_enc->frame_done_timeout, 0);
+	timer_setup(&dpu_enc->frame_done_timer,
+			dpu_encoder_frame_done_timeout, 0);
+
+	if (disp_info->intf_type == DRM_MODE_CONNECTOR_DSI)
+		timer_setup(&dpu_enc->vsync_event_timer,
+				dpu_encoder_vsync_event_handler,
+				0);
+
+
+	mutex_init(&dpu_enc->rc_lock);
+	kthread_init_delayed_work(&dpu_enc->delayed_off_work,
+			dpu_encoder_off_work);
+	dpu_enc->idle_timeout = IDLE_TIMEOUT;
+
+	kthread_init_work(&dpu_enc->vsync_event_work,
+			dpu_encoder_vsync_event_work_handler);
+
+	memcpy(&dpu_enc->disp_info, disp_info, sizeof(*disp_info));
+
+	DPU_DEBUG_ENC(dpu_enc, "created\n");
+
+	return ret;
+
+fail:
+	DPU_ERROR("failed to create encoder\n");
+	if (drm_enc)
+		dpu_encoder_destroy(drm_enc);
+
+	return ret;
+
+
+}
+
+struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
+		int drm_enc_mode)
+{
+	struct dpu_encoder_virt *dpu_enc = NULL;
+	int rc = 0;
+
+	dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL);
+	if (!dpu_enc)
+		return ERR_PTR(ENOMEM);
+
+	rc = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs,
+			drm_enc_mode, NULL);
+	if (rc) {
+		devm_kfree(dev->dev, dpu_enc);
+		return ERR_PTR(rc);
+	}
+
+	drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs);
+
+	return &dpu_enc->base;
+}
+
+int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
+	enum msm_event_wait event)
+{
+	int (*fn_wait)(struct dpu_encoder_phys *phys_enc) = NULL;
+	struct dpu_encoder_virt *dpu_enc = NULL;
+	int i, ret = 0;
+
+	if (!drm_enc) {
+		DPU_ERROR("invalid encoder\n");
+		return -EINVAL;
+	}
+	dpu_enc = to_dpu_encoder_virt(drm_enc);
+	DPU_DEBUG_ENC(dpu_enc, "\n");
+
+	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+		switch (event) {
+		case MSM_ENC_COMMIT_DONE:
+			fn_wait = phys->ops.wait_for_commit_done;
+			break;
+		case MSM_ENC_TX_COMPLETE:
+			fn_wait = phys->ops.wait_for_tx_complete;
+			break;
+		case MSM_ENC_VBLANK:
+			fn_wait = phys->ops.wait_for_vblank;
+			break;
+		default:
+			DPU_ERROR_ENC(dpu_enc, "unknown wait event %d\n",
+					event);
+			return -EINVAL;
+		};
+
+		if (phys && fn_wait) {
+			DPU_ATRACE_BEGIN("wait_for_completion_event");
+			ret = fn_wait(phys);
+			DPU_ATRACE_END("wait_for_completion_event");
+			if (ret)
+				return ret;
+		}
+	}
+
+	return ret;
+}
+
+enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder)
+{
+	struct dpu_encoder_virt *dpu_enc = NULL;
+	int i;
+
+	if (!encoder) {
+		DPU_ERROR("invalid encoder\n");
+		return INTF_MODE_NONE;
+	}
+	dpu_enc = to_dpu_encoder_virt(encoder);
+
+	if (dpu_enc->cur_master)
+		return dpu_enc->cur_master->intf_mode;
+
+	for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+		struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+		if (phys)
+			return phys->intf_mode;
+	}
+
+	return INTF_MODE_NONE;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
new file mode 100644
index 000000000000..ce92901ed227
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
@@ -0,0 +1,191 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark at gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __DPU_ENCODER_H__
+#define __DPU_ENCODER_H__
+
+#include <drm/drm_crtc.h>
+#include "dpu_hw_mdss.h"
+
+#define DPU_ENCODER_FRAME_EVENT_DONE			BIT(0)
+#define DPU_ENCODER_FRAME_EVENT_ERROR			BIT(1)
+#define DPU_ENCODER_FRAME_EVENT_PANEL_DEAD		BIT(2)
+#define DPU_ENCODER_FRAME_EVENT_IDLE			BIT(3)
+
+#define IDLE_TIMEOUT	(66 - 16/2)
+
+/**
+ * Encoder functions and data types
+ * @intfs:	Interfaces this encoder is using, INTF_MODE_NONE if unused
+ * @needs_cdm:	Encoder requests a CDM based on pixel format conversion needs
+ * @display_num_of_h_tiles: Number of horizontal tiles in case of split
+ *                          interface
+ * @topology:   Topology of the display
+ */
+struct dpu_encoder_hw_resources {
+	enum dpu_intf_mode intfs[INTF_MAX];
+	bool needs_cdm;
+	u32 display_num_of_h_tiles;
+};
+
+/**
+ * dpu_encoder_kickoff_params - info encoder requires at kickoff
+ * @affected_displays:  bitmask, bit set means the ROI of the commit lies within
+ *                      the bounds of the physical display at the bit index
+ */
+struct dpu_encoder_kickoff_params {
+	unsigned long affected_displays;
+};
+
+/**
+ * dpu_encoder_get_hw_resources - Populate table of required hardware resources
+ * @encoder:	encoder pointer
+ * @hw_res:	resource table to populate with encoder required resources
+ * @conn_state:	report hw reqs based on this proposed connector state
+ */
+void dpu_encoder_get_hw_resources(struct drm_encoder *encoder,
+		struct dpu_encoder_hw_resources *hw_res,
+		struct drm_connector_state *conn_state);
+
+/**
+ * dpu_encoder_register_vblank_callback - provide callback to encoder that
+ *	will be called on the next vblank.
+ * @encoder:	encoder pointer
+ * @cb:		callback pointer, provide NULL to deregister and disable IRQs
+ * @data:	user data provided to callback
+ */
+void dpu_encoder_register_vblank_callback(struct drm_encoder *encoder,
+		void (*cb)(void *), void *data);
+
+/**
+ * dpu_encoder_register_frame_event_callback - provide callback to encoder that
+ *	will be called after the request is complete, or other events.
+ * @encoder:	encoder pointer
+ * @cb:		callback pointer, provide NULL to deregister
+ * @data:	user data provided to callback
+ */
+void dpu_encoder_register_frame_event_callback(struct drm_encoder *encoder,
+		void (*cb)(void *, u32), void *data);
+
+/**
+ * dpu_encoder_prepare_for_kickoff - schedule double buffer flip of the ctl
+ *	path (i.e. ctl flush and start) at next appropriate time.
+ *	Immediately: if no previous commit is outstanding.
+ *	Delayed: Block until next trigger can be issued.
+ * @encoder:	encoder pointer
+ * @params:	kickoff time parameters
+ */
+void dpu_encoder_prepare_for_kickoff(struct drm_encoder *encoder,
+		struct dpu_encoder_kickoff_params *params);
+
+/**
+ * dpu_encoder_trigger_kickoff_pending - Clear the flush bits from previous
+ *        kickoff and trigger the ctl prepare progress for command mode display.
+ * @encoder:	encoder pointer
+ */
+void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_kickoff - trigger a double buffer flip of the ctl path
+ *	(i.e. ctl flush and start) immediately.
+ * @encoder:	encoder pointer
+ */
+void dpu_encoder_kickoff(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_wait_for_event - Waits for encoder events
+ * @encoder:	encoder pointer
+ * @event:      event to wait for
+ * MSM_ENC_COMMIT_DONE -  Wait for hardware to have flushed the current pending
+ *                        frames to hardware at a vblank or ctl_start
+ *                        Encoders will map this differently depending on the
+ *                        panel type.
+ *	                  vid mode -> vsync_irq
+ *                        cmd mode -> ctl_start
+ * MSM_ENC_TX_COMPLETE -  Wait for the hardware to transfer all the pixels to
+ *                        the panel. Encoders will map this differently
+ *                        depending on the panel type.
+ *                        vid mode -> vsync_irq
+ *                        cmd mode -> pp_done
+ * Returns: 0 on success, -EWOULDBLOCK if already signaled, error otherwise
+ */
+int dpu_encoder_wait_for_event(struct drm_encoder *drm_encoder,
+						enum msm_event_wait event);
+
+/*
+ * dpu_encoder_get_intf_mode - get interface mode of the given encoder
+ * @encoder: Pointer to drm encoder object
+ */
+enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_virt_restore - restore the encoder configs
+ * @encoder:	encoder pointer
+ */
+void dpu_encoder_virt_restore(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_check_mode - check if given mode is supported or not
+ * @drm_enc: Pointer to drm encoder object
+ * @mode: Mode to be checked
+ * @Return: true if it is cmd mode
+ */
+bool dpu_encoder_check_mode(struct drm_encoder *drm_enc, u32 mode);
+
+/**
+ * dpu_encoder_init - initialize virtual encoder object
+ * @dev:        Pointer to drm device structure
+ * @disp_info:  Pointer to display information structure
+ * Returns:     Pointer to newly created drm encoder
+ */
+struct drm_encoder *dpu_encoder_init(
+		struct drm_device *dev,
+		int drm_enc_mode);
+
+/**
+ * dpu_encoder_setup - setup dpu_encoder for the display probed
+ * @dev:		Pointer to drm device structure
+ * @enc:		Pointer to the drm_encoder
+ * @disp_info:	Pointer to the display info
+ */
+int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
+		struct msm_display_info *disp_info);
+
+/**
+ * dpu_encoder_destroy - destroy previously initialized virtual encoder
+ * @drm_enc:    Pointer to previously created drm encoder structure
+ */
+void dpu_encoder_destroy(struct drm_encoder *drm_enc);
+
+/**
+ * dpu_encoder_prepare_commit - prepare encoder at the very beginning of an
+ *	atomic commit, before any registers are written
+ * @drm_enc:    Pointer to previously created drm encoder structure
+ */
+void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc);
+
+/**
+ * dpu_encoder_set_idle_timeout - set the idle timeout for video
+ *                    and command mode encoders.
+ * @drm_enc:    Pointer to previously created drm encoder structure
+ * @idle_timeout:    idle timeout duration in milliseconds
+ */
+void dpu_encoder_set_idle_timeout(struct drm_encoder *drm_enc,
+							u32 idle_timeout);
+
+#endif /* __DPU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
new file mode 100644
index 000000000000..15459beb96c5
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
@@ -0,0 +1,453 @@
+/*
+ * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DPU_ENCODER_PHYS_H__
+#define __DPU_ENCODER_PHYS_H__
+
+#include <linux/jiffies.h>
+
+#include "dpu_kms.h"
+#include "dpu_hw_intf.h"
+#include "dpu_hw_pingpong.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_hw_top.h"
+#include "dpu_hw_cdm.h"
+#include "dpu_encoder.h"
+
+#define DPU_ENCODER_NAME_MAX	16
+
+/* wait for at most 2 vsync for lowest refresh rate (24hz) */
+#define KICKOFF_TIMEOUT_MS		84
+#define KICKOFF_TIMEOUT_JIFFIES		msecs_to_jiffies(KICKOFF_TIMEOUT_MS)
+
+/**
+ * enum dpu_enc_split_role - Role this physical encoder will play in a
+ *	split-panel configuration, where one panel is master, and others slaves.
+ *	Masters have extra responsibilities, like managing the VBLANK IRQ.
+ * @ENC_ROLE_SOLO:	This is the one and only panel. This encoder is master.
+ * @ENC_ROLE_MASTER:	This encoder is the master of a split panel config.
+ * @ENC_ROLE_SLAVE:	This encoder is not the master of a split panel config.
+ */
+enum dpu_enc_split_role {
+	ENC_ROLE_SOLO,
+	ENC_ROLE_MASTER,
+	ENC_ROLE_SLAVE,
+};
+
+/**
+ * enum dpu_enc_enable_state - current enabled state of the physical encoder
+ * @DPU_ENC_DISABLING:	Encoder transitioning to disable state
+ *			Events bounding transition are encoder type specific
+ * @DPU_ENC_DISABLED:	Encoder is disabled
+ * @DPU_ENC_ENABLING:	Encoder transitioning to enabled
+ *			Events bounding transition are encoder type specific
+ * @DPU_ENC_ENABLED:	Encoder is enabled
+ * @DPU_ENC_ERR_NEEDS_HW_RESET:	Encoder is enabled, but requires a hw_reset
+ *				to recover from a previous error
+ */
+enum dpu_enc_enable_state {
+	DPU_ENC_DISABLING,
+	DPU_ENC_DISABLED,
+	DPU_ENC_ENABLING,
+	DPU_ENC_ENABLED,
+	DPU_ENC_ERR_NEEDS_HW_RESET
+};
+
+struct dpu_encoder_phys;
+
+/**
+ * struct dpu_encoder_virt_ops - Interface the containing virtual encoder
+ *	provides for the physical encoders to use to callback.
+ * @handle_vblank_virt:	Notify virtual encoder of vblank IRQ reception
+ *			Note: This is called from IRQ handler context.
+ * @handle_underrun_virt: Notify virtual encoder of underrun IRQ reception
+ *			Note: This is called from IRQ handler context.
+ * @handle_frame_done:	Notify virtual encoder that this phys encoder
+ *			completes last request frame.
+ */
+struct dpu_encoder_virt_ops {
+	void (*handle_vblank_virt)(struct drm_encoder *,
+			struct dpu_encoder_phys *phys);
+	void (*handle_underrun_virt)(struct drm_encoder *,
+			struct dpu_encoder_phys *phys);
+	void (*handle_frame_done)(struct drm_encoder *,
+			struct dpu_encoder_phys *phys, u32 event);
+};
+
+/**
+ * struct dpu_encoder_phys_ops - Interface the physical encoders provide to
+ *	the containing virtual encoder.
+ * @late_register:		DRM Call. Add Userspace interfaces, debugfs.
+ * @prepare_commit:		MSM Atomic Call, start of atomic commit sequence
+ * @is_master:			Whether this phys_enc is the current master
+ *				encoder. Can be switched at enable time. Based
+ *				on split_role and current mode (CMD/VID).
+ * @mode_fixup:			DRM Call. Fixup a DRM mode.
+ * @mode_set:			DRM Call. Set a DRM mode.
+ *				This likely caches the mode, for use at enable.
+ * @enable:			DRM Call. Enable a DRM mode.
+ * @disable:			DRM Call. Disable mode.
+ * @atomic_check:		DRM Call. Atomic check new DRM state.
+ * @destroy:			DRM Call. Destroy and release resources.
+ * @get_hw_resources:		Populate the structure with the hardware
+ *				resources that this phys_enc is using.
+ *				Expect no overlap between phys_encs.
+ * @control_vblank_irq		Register/Deregister for VBLANK IRQ
+ * @wait_for_commit_done:	Wait for hardware to have flushed the
+ *				current pending frames to hardware
+ * @wait_for_tx_complete:	Wait for hardware to transfer the pixels
+ *				to the panel
+ * @wait_for_vblank:		Wait for VBLANK, for sub-driver internal use
+ * @prepare_for_kickoff:	Do any work necessary prior to a kickoff
+ *				For CMD encoder, may wait for previous tx done
+ * @handle_post_kickoff:	Do any work necessary post-kickoff work
+ * @trigger_start:		Process start event on physical encoder
+ * @needs_single_flush:		Whether encoder slaves need to be flushed
+ * @setup_misr:		Sets up MISR, enable and disables based on sysfs
+ * @collect_misr:		Collects MISR data on frame update
+ * @hw_reset:			Issue HW recovery such as CTL reset and clear
+ *				DPU_ENC_ERR_NEEDS_HW_RESET state
+ * @irq_control:		Handler to enable/disable all the encoder IRQs
+ * @prepare_idle_pc:		phys encoder can update the vsync_enable status
+ *                              on idle power collapse prepare
+ * @restore:			Restore all the encoder configs.
+ * @get_line_count:		Obtain current vertical line count
+ */
+
+struct dpu_encoder_phys_ops {
+	int (*late_register)(struct dpu_encoder_phys *encoder,
+			struct dentry *debugfs_root);
+	void (*prepare_commit)(struct dpu_encoder_phys *encoder);
+	bool (*is_master)(struct dpu_encoder_phys *encoder);
+	bool (*mode_fixup)(struct dpu_encoder_phys *encoder,
+			const struct drm_display_mode *mode,
+			struct drm_display_mode *adjusted_mode);
+	void (*mode_set)(struct dpu_encoder_phys *encoder,
+			struct drm_display_mode *mode,
+			struct drm_display_mode *adjusted_mode);
+	void (*enable)(struct dpu_encoder_phys *encoder);
+	void (*disable)(struct dpu_encoder_phys *encoder);
+	int (*atomic_check)(struct dpu_encoder_phys *encoder,
+			    struct drm_crtc_state *crtc_state,
+			    struct drm_connector_state *conn_state);
+	void (*destroy)(struct dpu_encoder_phys *encoder);
+	void (*get_hw_resources)(struct dpu_encoder_phys *encoder,
+			struct dpu_encoder_hw_resources *hw_res,
+			struct drm_connector_state *conn_state);
+	int (*control_vblank_irq)(struct dpu_encoder_phys *enc, bool enable);
+	int (*wait_for_commit_done)(struct dpu_encoder_phys *phys_enc);
+	int (*wait_for_tx_complete)(struct dpu_encoder_phys *phys_enc);
+	int (*wait_for_vblank)(struct dpu_encoder_phys *phys_enc);
+	void (*prepare_for_kickoff)(struct dpu_encoder_phys *phys_enc,
+			struct dpu_encoder_kickoff_params *params);
+	void (*handle_post_kickoff)(struct dpu_encoder_phys *phys_enc);
+	void (*trigger_start)(struct dpu_encoder_phys *phys_enc);
+	bool (*needs_single_flush)(struct dpu_encoder_phys *phys_enc);
+
+	void (*setup_misr)(struct dpu_encoder_phys *phys_encs,
+				bool enable, u32 frame_count);
+	u32 (*collect_misr)(struct dpu_encoder_phys *phys_enc);
+	void (*hw_reset)(struct dpu_encoder_phys *phys_enc);
+	void (*irq_control)(struct dpu_encoder_phys *phys, bool enable);
+	void (*prepare_idle_pc)(struct dpu_encoder_phys *phys_enc);
+	void (*restore)(struct dpu_encoder_phys *phys);
+	int (*get_line_count)(struct dpu_encoder_phys *phys);
+};
+
+/**
+ * enum dpu_intr_idx - dpu encoder interrupt index
+ * @INTR_IDX_VSYNC:    Vsync interrupt for video mode panel
+ * @INTR_IDX_PINGPONG: Pingpong done unterrupt for cmd mode panel
+ * @INTR_IDX_UNDERRUN: Underrun unterrupt for video and cmd mode panel
+ * @INTR_IDX_RDPTR:    Readpointer done unterrupt for cmd mode panel
+ */
+enum dpu_intr_idx {
+	INTR_IDX_VSYNC,
+	INTR_IDX_PINGPONG,
+	INTR_IDX_UNDERRUN,
+	INTR_IDX_CTL_START,
+	INTR_IDX_RDPTR,
+	INTR_IDX_MAX,
+};
+
+/**
+ * dpu_encoder_irq - tracking structure for interrupts
+ * @name:		string name of interrupt
+ * @intr_type:		Encoder interrupt type
+ * @intr_idx:		Encoder interrupt enumeration
+ * @hw_idx:		HW Block ID
+ * @irq_idx:		IRQ interface lookup index from DPU IRQ framework
+ *			will be -EINVAL if IRQ is not registered
+ * @irq_cb:		interrupt callback
+ */
+struct dpu_encoder_irq {
+	const char *name;
+	enum dpu_intr_type intr_type;
+	enum dpu_intr_idx intr_idx;
+	int hw_idx;
+	int irq_idx;
+	struct dpu_irq_callback cb;
+};
+
+/**
+ * struct dpu_encoder_phys - physical encoder that drives a single INTF block
+ *	tied to a specific panel / sub-panel. Abstract type, sub-classed by
+ *	phys_vid or phys_cmd for video mode or command mode encs respectively.
+ * @parent:		Pointer to the containing virtual encoder
+ * @connector:		If a mode is set, cached pointer to the active connector
+ * @ops:		Operations exposed to the virtual encoder
+ * @parent_ops:		Callbacks exposed by the parent to the phys_enc
+ * @hw_mdptop:		Hardware interface to the top registers
+ * @hw_ctl:		Hardware interface to the ctl registers
+ * @hw_cdm:		Hardware interface to the cdm registers
+ * @cdm_cfg:		Chroma-down hardware configuration
+ * @hw_pp:		Hardware interface to the ping pong registers
+ * @dpu_kms:		Pointer to the dpu_kms top level
+ * @cached_mode:	DRM mode cached at mode_set time, acted on in enable
+ * @enabled:		Whether the encoder has enabled and running a mode
+ * @split_role:		Role to play in a split-panel configuration
+ * @intf_mode:		Interface mode
+ * @intf_idx:		Interface index on dpu hardware
+ * @topology_name:	topology selected for the display
+ * @enc_spinlock:	Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ * @enable_state:	Enable state tracking
+ * @vblank_refcount:	Reference count of vblank request
+ * @vsync_cnt:		Vsync count for the physical encoder
+ * @underrun_cnt:	Underrun count for the physical encoder
+ * @pending_kickoff_cnt:	Atomic counter tracking the number of kickoffs
+ *				vs. the number of done/vblank irqs. Should hover
+ *				between 0-2 Incremented when a new kickoff is
+ *				scheduled. Decremented in irq handler
+ * @pending_ctlstart_cnt:	Atomic counter tracking the number of ctl start
+ *                              pending.
+ * @pending_kickoff_wq:		Wait queue for blocking until kickoff completes
+ * @irq:			IRQ tracking structures
+ */
+struct dpu_encoder_phys {
+	struct drm_encoder *parent;
+	struct drm_connector *connector;
+	struct dpu_encoder_phys_ops ops;
+	struct dpu_encoder_virt_ops parent_ops;
+	struct dpu_hw_mdp *hw_mdptop;
+	struct dpu_hw_ctl *hw_ctl;
+	struct dpu_hw_cdm *hw_cdm;
+	struct dpu_hw_cdm_cfg cdm_cfg;
+	struct dpu_hw_pingpong *hw_pp;
+	struct dpu_kms *dpu_kms;
+	struct drm_display_mode cached_mode;
+	enum dpu_enc_split_role split_role;
+	enum dpu_intf_mode intf_mode;
+	enum dpu_intf intf_idx;
+	enum dpu_rm_topology_name topology_name;
+	spinlock_t *enc_spinlock;
+	enum dpu_enc_enable_state enable_state;
+	atomic_t vblank_refcount;
+	atomic_t vsync_cnt;
+	atomic_t underrun_cnt;
+	atomic_t pending_ctlstart_cnt;
+	atomic_t pending_kickoff_cnt;
+	wait_queue_head_t pending_kickoff_wq;
+	struct dpu_encoder_irq irq[INTR_IDX_MAX];
+};
+
+static inline int dpu_encoder_phys_inc_pending(struct dpu_encoder_phys *phys)
+{
+	atomic_inc_return(&phys->pending_ctlstart_cnt);
+	return atomic_inc_return(&phys->pending_kickoff_cnt);
+}
+
+/**
+ * struct dpu_encoder_phys_vid - sub-class of dpu_encoder_phys to handle video
+ *	mode specific operations
+ * @base:	Baseclass physical encoder structure
+ * @hw_intf:	Hardware interface to the intf registers
+ * @timing_params: Current timing parameter
+ */
+struct dpu_encoder_phys_vid {
+	struct dpu_encoder_phys base;
+	struct dpu_hw_intf *hw_intf;
+	struct intf_timing_params timing_params;
+};
+
+/**
+ * struct dpu_encoder_phys_cmd - sub-class of dpu_encoder_phys to handle command
+ *	mode specific operations
+ * @base:	Baseclass physical encoder structure
+ * @intf_idx:	Intf Block index used by this phys encoder
+ * @stream_sel:	Stream selection for multi-stream interfaces
+ * @serialize_wait4pp:	serialize wait4pp feature waits for pp_done interrupt
+ *			after ctl_start instead of before next frame kickoff
+ * @pp_timeout_report_cnt: number of pingpong done irq timeout errors
+ * @pending_vblank_cnt: Atomic counter tracking pending wait for VBLANK
+ * @pending_vblank_wq: Wait queue for blocking until VBLANK received
+ */
+struct dpu_encoder_phys_cmd {
+	struct dpu_encoder_phys base;
+	int stream_sel;
+	bool serialize_wait4pp;
+	int pp_timeout_report_cnt;
+	atomic_t pending_vblank_cnt;
+	wait_queue_head_t pending_vblank_wq;
+};
+
+/**
+ * struct dpu_enc_phys_init_params - initialization parameters for phys encs
+ * @dpu_kms:		Pointer to the dpu_kms top level
+ * @parent:		Pointer to the containing virtual encoder
+ * @parent_ops:		Callbacks exposed by the parent to the phys_enc
+ * @split_role:		Role to play in a split-panel configuration
+ * @intf_idx:		Interface index this phys_enc will control
+ * @enc_spinlock:	Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ */
+struct dpu_enc_phys_init_params {
+	struct dpu_kms *dpu_kms;
+	struct drm_encoder *parent;
+	struct dpu_encoder_virt_ops parent_ops;
+	enum dpu_enc_split_role split_role;
+	enum dpu_intf intf_idx;
+	spinlock_t *enc_spinlock;
+};
+
+/**
+ * dpu_encoder_wait_info - container for passing arguments to irq wait functions
+ * @wq: wait queue structure
+ * @atomic_cnt: wait until atomic_cnt equals zero
+ * @timeout_ms: timeout value in milliseconds
+ */
+struct dpu_encoder_wait_info {
+	wait_queue_head_t *wq;
+	atomic_t *atomic_cnt;
+	s64 timeout_ms;
+};
+
+/**
+ * dpu_encoder_phys_vid_init - Construct a new video mode physical encoder
+ * @p:	Pointer to init params structure
+ * Return: Error code or newly allocated encoder
+ */
+struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
+		struct dpu_enc_phys_init_params *p);
+
+/**
+ * dpu_encoder_phys_cmd_init - Construct a new command mode physical encoder
+ * @p:	Pointer to init params structure
+ * Return: Error code or newly allocated encoder
+ */
+struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
+		struct dpu_enc_phys_init_params *p);
+
+/**
+ * dpu_encoder_helper_trigger_start - control start helper function
+ *	This helper function may be optionally specified by physical
+ *	encoders if they require ctl_start triggering.
+ * @phys_enc: Pointer to physical encoder structure
+ */
+void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc);
+
+/**
+ * dpu_encoder_helper_wait_event_timeout - wait for event with timeout
+ *	taking into account that jiffies may jump between reads leading to
+ *	incorrectly detected timeouts. Prevent failure in this scenario by
+ *	making sure that elapsed time during wait is valid.
+ * @drm_id: drm object id for logging
+ * @hw_id: hw instance id for logging
+ * @info: wait info structure
+ */
+int dpu_encoder_helper_wait_event_timeout(
+		int32_t drm_id,
+		int32_t hw_id,
+		struct dpu_encoder_wait_info *info);
+
+/**
+ * dpu_encoder_helper_hw_reset - issue ctl hw reset
+ *	This helper function may be optionally specified by physical
+ *	encoders if they require ctl hw reset. If state is currently
+ *	DPU_ENC_ERR_NEEDS_HW_RESET, it is set back to DPU_ENC_ENABLED.
+ * @phys_enc: Pointer to physical encoder structure
+ */
+void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc);
+
+static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode(
+		struct dpu_encoder_phys *phys_enc)
+{
+	if (!phys_enc || phys_enc->enable_state == DPU_ENC_DISABLING)
+		return BLEND_3D_NONE;
+
+	if (phys_enc->split_role == ENC_ROLE_SOLO &&
+	    phys_enc->topology_name == DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE)
+		return BLEND_3D_H_ROW_INT;
+
+	return BLEND_3D_NONE;
+}
+
+/**
+ * dpu_encoder_helper_split_config - split display configuration helper function
+ *	This helper function may be used by physical encoders to configure
+ *	the split display related registers.
+ * @phys_enc: Pointer to physical encoder structure
+ * @interface: enum dpu_intf setting
+ */
+void dpu_encoder_helper_split_config(
+		struct dpu_encoder_phys *phys_enc,
+		enum dpu_intf interface);
+
+/**
+ * dpu_encoder_helper_hw_release - prepare for h/w reset during disable
+ * @phys_enc: Pointer to physical encoder structure
+ * @fb: Optional fb for specifying new mixer output resolution, may be NULL
+ * Return: Zero on success
+ */
+int dpu_encoder_helper_hw_release(struct dpu_encoder_phys *phys_enc,
+		struct drm_framebuffer *fb);
+
+/**
+ * dpu_encoder_helper_report_irq_timeout - utility to report error that irq has
+ *	timed out, including reporting frame error event to crtc and debug dump
+ * @phys_enc: Pointer to physical encoder structure
+ * @intr_idx: Failing interrupt index
+ */
+void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
+		enum dpu_intr_idx intr_idx);
+
+/**
+ * dpu_encoder_helper_wait_for_irq - utility to wait on an irq.
+ *	note: will call dpu_encoder_helper_wait_for_irq on timeout
+ * @phys_enc: Pointer to physical encoder structure
+ * @intr_idx: encoder interrupt index
+ * @wait_info: wait info struct
+ * @Return: 0 or -ERROR
+ */
+int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
+		enum dpu_intr_idx intr_idx,
+		struct dpu_encoder_wait_info *wait_info);
+
+/**
+ * dpu_encoder_helper_register_irq - register and enable an irq
+ * @phys_enc: Pointer to physical encoder structure
+ * @intr_idx: encoder interrupt index
+ * @Return: 0 or -ERROR
+ */
+int dpu_encoder_helper_register_irq(struct dpu_encoder_phys *phys_enc,
+		enum dpu_intr_idx intr_idx);
+
+/**
+ * dpu_encoder_helper_unregister_irq - unregister and disable an irq
+ * @phys_enc: Pointer to physical encoder structure
+ * @intr_idx: encoder interrupt index
+ * @Return: 0 or -ERROR
+ */
+int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
+		enum dpu_intr_idx intr_idx);
+
+#endif /* __dpu_encoder_phys_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
new file mode 100644
index 000000000000..9519dbc24266
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -0,0 +1,905 @@
+/*
+ * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+#include "dpu_encoder_phys.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_core_irq.h"
+#include "dpu_formats.h"
+#include "dpu_trace.h"
+
+#define DPU_DEBUG_CMDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
+		(e) && (e)->base.parent ? \
+		(e)->base.parent->base.id : -1, \
+		(e) ? (e)->base.intf_idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define DPU_ERROR_CMDENC(e, fmt, ...) DPU_ERROR("enc%d intf%d " fmt, \
+		(e) && (e)->base.parent ? \
+		(e)->base.parent->base.id : -1, \
+		(e) ? (e)->base.intf_idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define to_dpu_encoder_phys_cmd(x) \
+	container_of(x, struct dpu_encoder_phys_cmd, base)
+
+#define PP_TIMEOUT_MAX_TRIALS	10
+
+/*
+ * Tearcheck sync start and continue thresholds are empirically found
+ * based on common panels In the future, may want to allow panels to override
+ * these default values
+ */
+#define DEFAULT_TEARCHECK_SYNC_THRESH_START	4
+#define DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE	4
+
+#define DPU_ENC_WR_PTR_START_TIMEOUT_US 20000
+
+static inline int _dpu_encoder_phys_cmd_get_idle_timeout(
+		struct dpu_encoder_phys_cmd *cmd_enc)
+{
+	return KICKOFF_TIMEOUT_MS;
+}
+
+static inline bool dpu_encoder_phys_cmd_is_master(
+		struct dpu_encoder_phys *phys_enc)
+{
+	return (phys_enc->split_role != ENC_ROLE_SLAVE) ? true : false;
+}
+
+static bool dpu_encoder_phys_cmd_mode_fixup(
+		struct dpu_encoder_phys *phys_enc,
+		const struct drm_display_mode *mode,
+		struct drm_display_mode *adj_mode)
+{
+	if (phys_enc)
+		DPU_DEBUG_CMDENC(to_dpu_encoder_phys_cmd(phys_enc), "\n");
+	return true;
+}
+
+static void _dpu_encoder_phys_cmd_update_intf_cfg(
+		struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_encoder_phys_cmd *cmd_enc =
+			to_dpu_encoder_phys_cmd(phys_enc);
+	struct dpu_hw_ctl *ctl;
+	struct dpu_hw_intf_cfg intf_cfg = { 0 };
+
+	if (!phys_enc)
+		return;
+
+	ctl = phys_enc->hw_ctl;
+	if (!ctl || !ctl->ops.setup_intf_cfg)
+		return;
+
+	intf_cfg.intf = phys_enc->intf_idx;
+	intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_CMD;
+	intf_cfg.stream_sel = cmd_enc->stream_sel;
+	intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+	ctl->ops.setup_intf_cfg(ctl, &intf_cfg);
+}
+
+static void dpu_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
+{
+	struct dpu_encoder_phys *phys_enc = arg;
+	unsigned long lock_flags;
+	int new_cnt;
+	u32 event = DPU_ENCODER_FRAME_EVENT_DONE;
+
+	if (!phys_enc || !phys_enc->hw_pp)
+		return;
+
+	DPU_ATRACE_BEGIN("pp_done_irq");
+	/* notify all synchronous clients first, then asynchronous clients */
+	if (phys_enc->parent_ops.handle_frame_done)
+		phys_enc->parent_ops.handle_frame_done(phys_enc->parent,
+				phys_enc, event);
+
+	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+	new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
+	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+	trace_dpu_enc_phys_cmd_pp_tx_done(DRMID(phys_enc->parent),
+					  phys_enc->hw_pp->idx - PINGPONG_0,
+					  new_cnt, event);
+
+	/* Signal any waiting atomic commit thread */
+	wake_up_all(&phys_enc->pending_kickoff_wq);
+	DPU_ATRACE_END("pp_done_irq");
+}
+
+static void dpu_encoder_phys_cmd_pp_rd_ptr_irq(void *arg, int irq_idx)
+{
+	struct dpu_encoder_phys *phys_enc = arg;
+	struct dpu_encoder_phys_cmd *cmd_enc;
+
+	if (!phys_enc || !phys_enc->hw_pp)
+		return;
+
+	DPU_ATRACE_BEGIN("rd_ptr_irq");
+	cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+	if (phys_enc->parent_ops.handle_vblank_virt)
+		phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
+			phys_enc);
+
+	atomic_add_unless(&cmd_enc->pending_vblank_cnt, -1, 0);
+	wake_up_all(&cmd_enc->pending_vblank_wq);
+	DPU_ATRACE_END("rd_ptr_irq");
+}
+
+static void dpu_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx)
+{
+	struct dpu_encoder_phys *phys_enc = arg;
+	struct dpu_encoder_phys_cmd *cmd_enc;
+
+	if (!phys_enc || !phys_enc->hw_ctl)
+		return;
+
+	DPU_ATRACE_BEGIN("ctl_start_irq");
+	cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+	atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
+
+	/* Signal any waiting ctl start interrupt */
+	wake_up_all(&phys_enc->pending_kickoff_wq);
+	DPU_ATRACE_END("ctl_start_irq");
+}
+
+static void dpu_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx)
+{
+	struct dpu_encoder_phys *phys_enc = arg;
+
+	if (!phys_enc)
+		return;
+
+	if (phys_enc->parent_ops.handle_underrun_virt)
+		phys_enc->parent_ops.handle_underrun_virt(phys_enc->parent,
+			phys_enc);
+}
+
+static void _dpu_encoder_phys_cmd_setup_irq_hw_idx(
+		struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_encoder_irq *irq;
+
+	irq = &phys_enc->irq[INTR_IDX_CTL_START];
+	irq->hw_idx = phys_enc->hw_ctl->idx;
+	irq->irq_idx = -EINVAL;
+
+	irq = &phys_enc->irq[INTR_IDX_PINGPONG];
+	irq->hw_idx = phys_enc->hw_pp->idx;
+	irq->irq_idx = -EINVAL;
+
+	irq = &phys_enc->irq[INTR_IDX_RDPTR];
+	irq->hw_idx = phys_enc->hw_pp->idx;
+	irq->irq_idx = -EINVAL;
+
+	irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
+	irq->hw_idx = phys_enc->intf_idx;
+	irq->irq_idx = -EINVAL;
+}
+
+static void dpu_encoder_phys_cmd_mode_set(
+		struct dpu_encoder_phys *phys_enc,
+		struct drm_display_mode *mode,
+		struct drm_display_mode *adj_mode)
+{
+	struct dpu_encoder_phys_cmd *cmd_enc =
+		to_dpu_encoder_phys_cmd(phys_enc);
+	struct dpu_rm *rm = &phys_enc->dpu_kms->rm;
+	struct dpu_rm_hw_iter iter;
+	int i, instance;
+
+	if (!phys_enc || !mode || !adj_mode) {
+		DPU_ERROR("invalid args\n");
+		return;
+	}
+	phys_enc->cached_mode = *adj_mode;
+	DPU_DEBUG_CMDENC(cmd_enc, "caching mode:\n");
+	drm_mode_debug_printmodeline(adj_mode);
+
+	instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
+
+	/* Retrieve previously allocated HW Resources. Shouldn't fail */
+	dpu_rm_init_hw_iter(&iter, phys_enc->parent->base.id, DPU_HW_BLK_CTL);
+	for (i = 0; i <= instance; i++) {
+		if (dpu_rm_get_hw(rm, &iter))
+			phys_enc->hw_ctl = (struct dpu_hw_ctl *)iter.hw;
+	}
+
+	if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
+		DPU_ERROR_CMDENC(cmd_enc, "failed to init ctl: %ld\n",
+				PTR_ERR(phys_enc->hw_ctl));
+		phys_enc->hw_ctl = NULL;
+		return;
+	}
+
+	_dpu_encoder_phys_cmd_setup_irq_hw_idx(phys_enc);
+}
+
+static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
+		struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_encoder_phys_cmd *cmd_enc =
+			to_dpu_encoder_phys_cmd(phys_enc);
+	u32 frame_event = DPU_ENCODER_FRAME_EVENT_ERROR;
+	bool do_log = false;
+
+	if (!phys_enc || !phys_enc->hw_pp || !phys_enc->hw_ctl)
+		return -EINVAL;
+
+	cmd_enc->pp_timeout_report_cnt++;
+	if (cmd_enc->pp_timeout_report_cnt == PP_TIMEOUT_MAX_TRIALS) {
+		frame_event |= DPU_ENCODER_FRAME_EVENT_PANEL_DEAD;
+		do_log = true;
+	} else if (cmd_enc->pp_timeout_report_cnt == 1) {
+		do_log = true;
+	}
+
+	trace_dpu_enc_phys_cmd_pdone_timeout(DRMID(phys_enc->parent),
+		     phys_enc->hw_pp->idx - PINGPONG_0,
+		     cmd_enc->pp_timeout_report_cnt,
+		     atomic_read(&phys_enc->pending_kickoff_cnt),
+		     frame_event);
+
+	/* to avoid flooding, only log first time, and "dead" time */
+	if (do_log) {
+		DRM_ERROR("id:%d pp:%d kickoff timeout %d cnt %d koff_cnt %d\n",
+			  DRMID(phys_enc->parent),
+			  phys_enc->hw_pp->idx - PINGPONG_0,
+			  phys_enc->hw_ctl->idx - CTL_0,
+			  cmd_enc->pp_timeout_report_cnt,
+			  atomic_read(&phys_enc->pending_kickoff_cnt));
+
+		dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_RDPTR);
+		dpu_dbg_dump(false, __func__, true, true);
+	}
+
+	atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
+
+	/* request a ctl reset before the next kickoff */
+	phys_enc->enable_state = DPU_ENC_ERR_NEEDS_HW_RESET;
+
+	if (phys_enc->parent_ops.handle_frame_done)
+		phys_enc->parent_ops.handle_frame_done(
+				phys_enc->parent, phys_enc, frame_event);
+
+	return -ETIMEDOUT;
+}
+
+static int _dpu_encoder_phys_cmd_wait_for_idle(
+		struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_encoder_phys_cmd *cmd_enc =
+			to_dpu_encoder_phys_cmd(phys_enc);
+	struct dpu_encoder_wait_info wait_info;
+	int ret;
+
+	if (!phys_enc) {
+		DPU_ERROR("invalid encoder\n");
+		return -EINVAL;
+	}
+
+	wait_info.wq = &phys_enc->pending_kickoff_wq;
+	wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
+	wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
+
+	ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_PINGPONG,
+			&wait_info);
+	if (ret == -ETIMEDOUT)
+		_dpu_encoder_phys_cmd_handle_ppdone_timeout(phys_enc);
+	else if (!ret)
+		cmd_enc->pp_timeout_report_cnt = 0;
+
+	return ret;
+}
+
+static int dpu_encoder_phys_cmd_control_vblank_irq(
+		struct dpu_encoder_phys *phys_enc,
+		bool enable)
+{
+	int ret = 0;
+	int refcount;
+
+	if (!phys_enc || !phys_enc->hw_pp) {
+		DPU_ERROR("invalid encoder\n");
+		return -EINVAL;
+	}
+
+	refcount = atomic_read(&phys_enc->vblank_refcount);
+
+	/* Slave encoders don't report vblank */
+	if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+		goto end;
+
+	/* protect against negative */
+	if (!enable && refcount == 0) {
+		ret = -EINVAL;
+		goto end;
+	}
+
+	DRM_DEBUG_KMS("id:%u pp:%d enable=%s/%d\n", DRMID(phys_enc->parent),
+		      phys_enc->hw_pp->idx - PINGPONG_0,
+		      enable ? "true" : "false", refcount);
+
+	if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
+		ret = dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_RDPTR);
+	else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
+		ret = dpu_encoder_helper_unregister_irq(phys_enc,
+				INTR_IDX_RDPTR);
+
+end:
+	if (ret) {
+		DRM_ERROR("vblank irq err id:%u pp:%d ret:%d, enable %s/%d\n",
+			  DRMID(phys_enc->parent),
+			  phys_enc->hw_pp->idx - PINGPONG_0, ret,
+			  enable ? "true" : "false", refcount);
+	}
+
+	return ret;
+}
+
+void dpu_encoder_phys_cmd_irq_control(struct dpu_encoder_phys *phys_enc,
+		bool enable)
+{
+	struct dpu_encoder_phys_cmd *cmd_enc;
+
+	if (!phys_enc)
+		return;
+
+	cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+	trace_dpu_enc_phys_cmd_irq_ctrl(DRMID(phys_enc->parent),
+			phys_enc->hw_pp->idx - PINGPONG_0,
+			enable, atomic_read(&phys_enc->vblank_refcount));
+
+	if (enable) {
+		dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_PINGPONG);
+		dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_UNDERRUN);
+		dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, true);
+
+		if (dpu_encoder_phys_cmd_is_master(phys_enc))
+			dpu_encoder_helper_register_irq(phys_enc,
+					INTR_IDX_CTL_START);
+	} else {
+		if (dpu_encoder_phys_cmd_is_master(phys_enc))
+			dpu_encoder_helper_unregister_irq(phys_enc,
+					INTR_IDX_CTL_START);
+
+		dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_UNDERRUN);
+		dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
+		dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_PINGPONG);
+	}
+}
+
+static void dpu_encoder_phys_cmd_tearcheck_config(
+		struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_encoder_phys_cmd *cmd_enc =
+		to_dpu_encoder_phys_cmd(phys_enc);
+	struct dpu_hw_tear_check tc_cfg = { 0 };
+	struct drm_display_mode *mode;
+	bool tc_enable = true;
+	u32 vsync_hz;
+	struct msm_drm_private *priv;
+	struct dpu_kms *dpu_kms;
+
+	if (!phys_enc || !phys_enc->hw_pp) {
+		DPU_ERROR("invalid encoder\n");
+		return;
+	}
+	mode = &phys_enc->cached_mode;
+
+	DPU_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
+
+	if (!phys_enc->hw_pp->ops.setup_tearcheck ||
+		!phys_enc->hw_pp->ops.enable_tearcheck) {
+		DPU_DEBUG_CMDENC(cmd_enc, "tearcheck not supported\n");
+		return;
+	}
+
+	dpu_kms = phys_enc->dpu_kms;
+	if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev_private) {
+		DPU_ERROR("invalid device\n");
+		return;
+	}
+	priv = dpu_kms->dev->dev_private;
+
+	/*
+	 * TE default: dsi byte clock calculated base on 70 fps;
+	 * around 14 ms to complete a kickoff cycle if te disabled;
+	 * vclk_line base on 60 fps; write is faster than read;
+	 * init == start == rdptr;
+	 *
+	 * vsync_count is ratio of MDP VSYNC clock frequency to LCD panel
+	 * frequency divided by the no. of rows (lines) in the LCDpanel.
+	 */
+	vsync_hz = dpu_kms_get_clk_rate(dpu_kms, "vsync_clk");
+	if (vsync_hz <= 0) {
+		DPU_DEBUG_CMDENC(cmd_enc, "invalid - vsync_hz %u\n",
+				 vsync_hz);
+		return;
+	}
+
+	tc_cfg.vsync_count = vsync_hz / (mode->vtotal * mode->vrefresh);
+
+	/* enable external TE after kickoff to avoid premature autorefresh */
+	tc_cfg.hw_vsync_mode = 0;
+
+	/*
+	 * By setting sync_cfg_height to near max register value, we essentially
+	 * disable dpu hw generated TE signal, since hw TE will arrive first.
+	 * Only caveat is if due to error, we hit wrap-around.
+	 */
+	tc_cfg.sync_cfg_height = 0xFFF0;
+	tc_cfg.vsync_init_val = mode->vdisplay;
+	tc_cfg.sync_threshold_start = DEFAULT_TEARCHECK_SYNC_THRESH_START;
+	tc_cfg.sync_threshold_continue = DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE;
+	tc_cfg.start_pos = mode->vdisplay;
+	tc_cfg.rd_ptr_irq = mode->vdisplay + 1;
+
+	DPU_DEBUG_CMDENC(cmd_enc,
+		"tc %d vsync_clk_speed_hz %u vtotal %u vrefresh %u\n",
+		phys_enc->hw_pp->idx - PINGPONG_0, vsync_hz,
+		mode->vtotal, mode->vrefresh);
+	DPU_DEBUG_CMDENC(cmd_enc,
+		"tc %d enable %u start_pos %u rd_ptr_irq %u\n",
+		phys_enc->hw_pp->idx - PINGPONG_0, tc_enable, tc_cfg.start_pos,
+		tc_cfg.rd_ptr_irq);
+	DPU_DEBUG_CMDENC(cmd_enc,
+		"tc %d hw_vsync_mode %u vsync_count %u vsync_init_val %u\n",
+		phys_enc->hw_pp->idx - PINGPONG_0, tc_cfg.hw_vsync_mode,
+		tc_cfg.vsync_count, tc_cfg.vsync_init_val);
+	DPU_DEBUG_CMDENC(cmd_enc,
+		"tc %d cfgheight %u thresh_start %u thresh_cont %u\n",
+		phys_enc->hw_pp->idx - PINGPONG_0, tc_cfg.sync_cfg_height,
+		tc_cfg.sync_threshold_start, tc_cfg.sync_threshold_continue);
+
+	phys_enc->hw_pp->ops.setup_tearcheck(phys_enc->hw_pp, &tc_cfg);
+	phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, tc_enable);
+}
+
+static void _dpu_encoder_phys_cmd_pingpong_config(
+		struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_encoder_phys_cmd *cmd_enc =
+		to_dpu_encoder_phys_cmd(phys_enc);
+
+	if (!phys_enc || !phys_enc->hw_ctl || !phys_enc->hw_pp
+			|| !phys_enc->hw_ctl->ops.setup_intf_cfg) {
+		DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != 0);
+		return;
+	}
+
+	DPU_DEBUG_CMDENC(cmd_enc, "pp %d, enabling mode:\n",
+			phys_enc->hw_pp->idx - PINGPONG_0);
+	drm_mode_debug_printmodeline(&phys_enc->cached_mode);
+
+	_dpu_encoder_phys_cmd_update_intf_cfg(phys_enc);
+	dpu_encoder_phys_cmd_tearcheck_config(phys_enc);
+}
+
+static bool dpu_encoder_phys_cmd_needs_single_flush(
+		struct dpu_encoder_phys *phys_enc)
+{
+	/**
+	 * we do separate flush for each CTL and let
+	 * CTL_START synchronize them
+	 */
+	return false;
+}
+
+static void dpu_encoder_phys_cmd_enable_helper(
+		struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_hw_ctl *ctl;
+	u32 flush_mask = 0;
+
+	if (!phys_enc || !phys_enc->hw_ctl || !phys_enc->hw_pp) {
+		DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
+		return;
+	}
+
+	dpu_encoder_helper_split_config(phys_enc, phys_enc->intf_idx);
+
+	_dpu_encoder_phys_cmd_pingpong_config(phys_enc);
+
+	if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+		goto skip_flush;
+
+	ctl = phys_enc->hw_ctl;
+	ctl->ops.get_bitmask_intf(ctl, &flush_mask, phys_enc->intf_idx);
+	ctl->ops.update_pending_flush(ctl, flush_mask);
+
+skip_flush:
+	return;
+}
+
+static void dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_encoder_phys_cmd *cmd_enc =
+		to_dpu_encoder_phys_cmd(phys_enc);
+
+	if (!phys_enc || !phys_enc->hw_pp) {
+		DPU_ERROR("invalid phys encoder\n");
+		return;
+	}
+
+	DPU_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
+
+	if (phys_enc->enable_state == DPU_ENC_ENABLED) {
+		DPU_ERROR("already enabled\n");
+		return;
+	}
+
+	dpu_encoder_phys_cmd_enable_helper(phys_enc);
+	phys_enc->enable_state = DPU_ENC_ENABLED;
+}
+
+static void _dpu_encoder_phys_cmd_connect_te(
+		struct dpu_encoder_phys *phys_enc, bool enable)
+{
+	if (!phys_enc || !phys_enc->hw_pp ||
+			!phys_enc->hw_pp->ops.connect_external_te)
+		return;
+
+	trace_dpu_enc_phys_cmd_connect_te(DRMID(phys_enc->parent), enable);
+	phys_enc->hw_pp->ops.connect_external_te(phys_enc->hw_pp, enable);
+}
+
+static void dpu_encoder_phys_cmd_prepare_idle_pc(
+		struct dpu_encoder_phys *phys_enc)
+{
+	_dpu_encoder_phys_cmd_connect_te(phys_enc, false);
+}
+
+static int dpu_encoder_phys_cmd_get_line_count(
+		struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_hw_pingpong *hw_pp;
+
+	if (!phys_enc || !phys_enc->hw_pp)
+		return -EINVAL;
+
+	if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+		return -EINVAL;
+
+	hw_pp = phys_enc->hw_pp;
+	if (!hw_pp->ops.get_line_count)
+		return -EINVAL;
+
+	return hw_pp->ops.get_line_count(hw_pp);
+}
+
+static void dpu_encoder_phys_cmd_disable(struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_encoder_phys_cmd *cmd_enc =
+		to_dpu_encoder_phys_cmd(phys_enc);
+
+	if (!phys_enc || !phys_enc->hw_pp) {
+		DPU_ERROR("invalid encoder\n");
+		return;
+	}
+	DRM_DEBUG_KMS("id:%u pp:%d state:%d\n", DRMID(phys_enc->parent),
+		      phys_enc->hw_pp->idx - PINGPONG_0,
+		      phys_enc->enable_state);
+
+	if (phys_enc->enable_state == DPU_ENC_DISABLED) {
+		DPU_ERROR_CMDENC(cmd_enc, "already disabled\n");
+		return;
+	}
+
+	if (phys_enc->hw_pp->ops.enable_tearcheck)
+		phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, false);
+	phys_enc->enable_state = DPU_ENC_DISABLED;
+}
+
+static void dpu_encoder_phys_cmd_destroy(struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_encoder_phys_cmd *cmd_enc =
+		to_dpu_encoder_phys_cmd(phys_enc);
+
+	if (!phys_enc) {
+		DPU_ERROR("invalid encoder\n");
+		return;
+	}
+	kfree(cmd_enc);
+}
+
+static void dpu_encoder_phys_cmd_get_hw_resources(
+		struct dpu_encoder_phys *phys_enc,
+		struct dpu_encoder_hw_resources *hw_res,
+		struct drm_connector_state *conn_state)
+{
+	struct dpu_encoder_phys_cmd *cmd_enc =
+		to_dpu_encoder_phys_cmd(phys_enc);
+
+	if (!phys_enc) {
+		DPU_ERROR("invalid encoder\n");
+		return;
+	}
+
+	if ((phys_enc->intf_idx - INTF_0) >= INTF_MAX) {
+		DPU_ERROR("invalid intf idx:%d\n", phys_enc->intf_idx);
+		return;
+	}
+
+	DPU_DEBUG_CMDENC(cmd_enc, "\n");
+	hw_res->intfs[phys_enc->intf_idx - INTF_0] = INTF_MODE_CMD;
+}
+
+static void dpu_encoder_phys_cmd_prepare_for_kickoff(
+		struct dpu_encoder_phys *phys_enc,
+		struct dpu_encoder_kickoff_params *params)
+{
+	struct dpu_encoder_phys_cmd *cmd_enc =
+			to_dpu_encoder_phys_cmd(phys_enc);
+	int ret;
+
+	if (!phys_enc || !phys_enc->hw_pp) {
+		DPU_ERROR("invalid encoder\n");
+		return;
+	}
+	DRM_DEBUG_KMS("id:%u pp:%d pending_cnt:%d\n", DRMID(phys_enc->parent),
+		      phys_enc->hw_pp->idx - PINGPONG_0,
+		      atomic_read(&phys_enc->pending_kickoff_cnt));
+
+	/*
+	 * Mark kickoff request as outstanding. If there are more than one,
+	 * outstanding, then we have to wait for the previous one to complete
+	 */
+	ret = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
+	if (ret) {
+		/* force pending_kickoff_cnt 0 to discard failed kickoff */
+		atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+		DRM_ERROR("failed wait_for_idle: id:%u ret:%d pp:%d\n",
+			  DRMID(phys_enc->parent), ret,
+			  phys_enc->hw_pp->idx - PINGPONG_0);
+	}
+
+	DPU_DEBUG_CMDENC(cmd_enc, "pp:%d pending_cnt %d\n",
+			phys_enc->hw_pp->idx - PINGPONG_0,
+			atomic_read(&phys_enc->pending_kickoff_cnt));
+}
+
+static int _dpu_encoder_phys_cmd_wait_for_ctl_start(
+		struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_encoder_phys_cmd *cmd_enc =
+			to_dpu_encoder_phys_cmd(phys_enc);
+	struct dpu_encoder_wait_info wait_info;
+	int ret;
+
+	if (!phys_enc || !phys_enc->hw_ctl) {
+		DPU_ERROR("invalid argument(s)\n");
+		return -EINVAL;
+	}
+
+	wait_info.wq = &phys_enc->pending_kickoff_wq;
+	wait_info.atomic_cnt = &phys_enc->pending_ctlstart_cnt;
+	wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
+
+	ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_CTL_START,
+			&wait_info);
+	if (ret == -ETIMEDOUT) {
+		DPU_ERROR_CMDENC(cmd_enc, "ctl start interrupt wait failed\n");
+		ret = -EINVAL;
+	} else if (!ret)
+		ret = 0;
+
+	return ret;
+}
+
+static int dpu_encoder_phys_cmd_wait_for_tx_complete(
+		struct dpu_encoder_phys *phys_enc)
+{
+	int rc;
+	struct dpu_encoder_phys_cmd *cmd_enc;
+
+	if (!phys_enc)
+		return -EINVAL;
+
+	cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+	rc = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
+	if (rc) {
+		DRM_ERROR("failed wait_for_idle: id:%u ret:%d intf:%d\n",
+			  DRMID(phys_enc->parent), rc,
+			  phys_enc->intf_idx - INTF_0);
+	}
+
+	return rc;
+}
+
+static int dpu_encoder_phys_cmd_wait_for_commit_done(
+		struct dpu_encoder_phys *phys_enc)
+{
+	int rc = 0;
+	struct dpu_encoder_phys_cmd *cmd_enc;
+
+	if (!phys_enc)
+		return -EINVAL;
+
+	cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+	/* only required for master controller */
+	if (dpu_encoder_phys_cmd_is_master(phys_enc))
+		rc = _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc);
+
+	/* required for both controllers */
+	if (!rc && cmd_enc->serialize_wait4pp)
+		dpu_encoder_phys_cmd_prepare_for_kickoff(phys_enc, NULL);
+
+	return rc;
+}
+
+static int dpu_encoder_phys_cmd_wait_for_vblank(
+		struct dpu_encoder_phys *phys_enc)
+{
+	int rc = 0;
+	struct dpu_encoder_phys_cmd *cmd_enc;
+	struct dpu_encoder_wait_info wait_info;
+
+	if (!phys_enc)
+		return -EINVAL;
+
+	cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+	/* only required for master controller */
+	if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+		return rc;
+
+	wait_info.wq = &cmd_enc->pending_vblank_wq;
+	wait_info.atomic_cnt = &cmd_enc->pending_vblank_cnt;
+	wait_info.timeout_ms = _dpu_encoder_phys_cmd_get_idle_timeout(cmd_enc);
+
+	atomic_inc(&cmd_enc->pending_vblank_cnt);
+
+	rc = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_RDPTR,
+			&wait_info);
+
+	return rc;
+}
+
+static void dpu_encoder_phys_cmd_handle_post_kickoff(
+		struct dpu_encoder_phys *phys_enc)
+{
+	if (!phys_enc)
+		return;
+
+	/**
+	 * re-enable external TE, either for the first time after enabling
+	 * or if disabled for Autorefresh
+	 */
+	_dpu_encoder_phys_cmd_connect_te(phys_enc, true);
+}
+
+static void dpu_encoder_phys_cmd_trigger_start(
+		struct dpu_encoder_phys *phys_enc)
+{
+	if (!phys_enc)
+		return;
+
+	dpu_encoder_helper_trigger_start(phys_enc);
+}
+
+static void dpu_encoder_phys_cmd_init_ops(
+		struct dpu_encoder_phys_ops *ops)
+{
+	ops->is_master = dpu_encoder_phys_cmd_is_master;
+	ops->mode_set = dpu_encoder_phys_cmd_mode_set;
+	ops->mode_fixup = dpu_encoder_phys_cmd_mode_fixup;
+	ops->enable = dpu_encoder_phys_cmd_enable;
+	ops->disable = dpu_encoder_phys_cmd_disable;
+	ops->destroy = dpu_encoder_phys_cmd_destroy;
+	ops->get_hw_resources = dpu_encoder_phys_cmd_get_hw_resources;
+	ops->control_vblank_irq = dpu_encoder_phys_cmd_control_vblank_irq;
+	ops->wait_for_commit_done = dpu_encoder_phys_cmd_wait_for_commit_done;
+	ops->prepare_for_kickoff = dpu_encoder_phys_cmd_prepare_for_kickoff;
+	ops->wait_for_tx_complete = dpu_encoder_phys_cmd_wait_for_tx_complete;
+	ops->wait_for_vblank = dpu_encoder_phys_cmd_wait_for_vblank;
+	ops->trigger_start = dpu_encoder_phys_cmd_trigger_start;
+	ops->needs_single_flush = dpu_encoder_phys_cmd_needs_single_flush;
+	ops->hw_reset = dpu_encoder_helper_hw_reset;
+	ops->irq_control = dpu_encoder_phys_cmd_irq_control;
+	ops->restore = dpu_encoder_phys_cmd_enable_helper;
+	ops->prepare_idle_pc = dpu_encoder_phys_cmd_prepare_idle_pc;
+	ops->handle_post_kickoff = dpu_encoder_phys_cmd_handle_post_kickoff;
+	ops->get_line_count = dpu_encoder_phys_cmd_get_line_count;
+}
+
+struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
+		struct dpu_enc_phys_init_params *p)
+{
+	struct dpu_encoder_phys *phys_enc = NULL;
+	struct dpu_encoder_phys_cmd *cmd_enc = NULL;
+	struct dpu_hw_mdp *hw_mdp;
+	struct dpu_encoder_irq *irq;
+	int i, ret = 0;
+
+	DPU_DEBUG("intf %d\n", p->intf_idx - INTF_0);
+
+	cmd_enc = kzalloc(sizeof(*cmd_enc), GFP_KERNEL);
+	if (!cmd_enc) {
+		ret = -ENOMEM;
+		DPU_ERROR("failed to allocate\n");
+		goto fail;
+	}
+	phys_enc = &cmd_enc->base;
+
+	hw_mdp = dpu_rm_get_mdp(&p->dpu_kms->rm);
+	if (IS_ERR_OR_NULL(hw_mdp)) {
+		ret = PTR_ERR(hw_mdp);
+		DPU_ERROR("failed to get mdptop\n");
+		goto fail_mdp_init;
+	}
+	phys_enc->hw_mdptop = hw_mdp;
+	phys_enc->intf_idx = p->intf_idx;
+
+	dpu_encoder_phys_cmd_init_ops(&phys_enc->ops);
+	phys_enc->parent = p->parent;
+	phys_enc->parent_ops = p->parent_ops;
+	phys_enc->dpu_kms = p->dpu_kms;
+	phys_enc->split_role = p->split_role;
+	phys_enc->intf_mode = INTF_MODE_CMD;
+	phys_enc->enc_spinlock = p->enc_spinlock;
+	cmd_enc->stream_sel = 0;
+	phys_enc->enable_state = DPU_ENC_DISABLED;
+	for (i = 0; i < INTR_IDX_MAX; i++) {
+		irq = &phys_enc->irq[i];
+		INIT_LIST_HEAD(&irq->cb.list);
+		irq->irq_idx = -EINVAL;
+		irq->hw_idx = -EINVAL;
+		irq->cb.arg = phys_enc;
+	}
+
+	irq = &phys_enc->irq[INTR_IDX_CTL_START];
+	irq->name = "ctl_start";
+	irq->intr_type = DPU_IRQ_TYPE_CTL_START;
+	irq->intr_idx = INTR_IDX_CTL_START;
+	irq->cb.func = dpu_encoder_phys_cmd_ctl_start_irq;
+
+	irq = &phys_enc->irq[INTR_IDX_PINGPONG];
+	irq->name = "pp_done";
+	irq->intr_type = DPU_IRQ_TYPE_PING_PONG_COMP;
+	irq->intr_idx = INTR_IDX_PINGPONG;
+	irq->cb.func = dpu_encoder_phys_cmd_pp_tx_done_irq;
+
+	irq = &phys_enc->irq[INTR_IDX_RDPTR];
+	irq->name = "pp_rd_ptr";
+	irq->intr_type = DPU_IRQ_TYPE_PING_PONG_RD_PTR;
+	irq->intr_idx = INTR_IDX_RDPTR;
+	irq->cb.func = dpu_encoder_phys_cmd_pp_rd_ptr_irq;
+
+	irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
+	irq->name = "underrun";
+	irq->intr_type = DPU_IRQ_TYPE_INTF_UNDER_RUN;
+	irq->intr_idx = INTR_IDX_UNDERRUN;
+	irq->cb.func = dpu_encoder_phys_cmd_underrun_irq;
+
+	atomic_set(&phys_enc->vblank_refcount, 0);
+	atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+	atomic_set(&phys_enc->pending_ctlstart_cnt, 0);
+	atomic_set(&cmd_enc->pending_vblank_cnt, 0);
+	init_waitqueue_head(&phys_enc->pending_kickoff_wq);
+	init_waitqueue_head(&cmd_enc->pending_vblank_wq);
+
+	DPU_DEBUG_CMDENC(cmd_enc, "created\n");
+
+	return phys_enc;
+
+fail_mdp_init:
+	kfree(cmd_enc);
+fail:
+	return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
new file mode 100644
index 000000000000..110c463077ed
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -0,0 +1,922 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+#include "dpu_encoder_phys.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_core_irq.h"
+#include "dpu_formats.h"
+#include "dpu_trace.h"
+
+#define DPU_DEBUG_VIDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
+		(e) && (e)->base.parent ? \
+		(e)->base.parent->base.id : -1, \
+		(e) && (e)->hw_intf ? \
+		(e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define DPU_ERROR_VIDENC(e, fmt, ...) DPU_ERROR("enc%d intf%d " fmt, \
+		(e) && (e)->base.parent ? \
+		(e)->base.parent->base.id : -1, \
+		(e) && (e)->hw_intf ? \
+		(e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define to_dpu_encoder_phys_vid(x) \
+	container_of(x, struct dpu_encoder_phys_vid, base)
+
+static bool dpu_encoder_phys_vid_is_master(
+		struct dpu_encoder_phys *phys_enc)
+{
+	bool ret = false;
+
+	if (phys_enc->split_role != ENC_ROLE_SLAVE)
+		ret = true;
+
+	return ret;
+}
+
+static void drm_mode_to_intf_timing_params(
+		const struct dpu_encoder_phys_vid *vid_enc,
+		const struct drm_display_mode *mode,
+		struct intf_timing_params *timing)
+{
+	memset(timing, 0, sizeof(*timing));
+
+	if ((mode->htotal < mode->hsync_end)
+			|| (mode->hsync_start < mode->hdisplay)
+			|| (mode->vtotal < mode->vsync_end)
+			|| (mode->vsync_start < mode->vdisplay)
+			|| (mode->hsync_end < mode->hsync_start)
+			|| (mode->vsync_end < mode->vsync_start)) {
+		DPU_ERROR(
+		    "invalid params - hstart:%d,hend:%d,htot:%d,hdisplay:%d\n",
+				mode->hsync_start, mode->hsync_end,
+				mode->htotal, mode->hdisplay);
+		DPU_ERROR("vstart:%d,vend:%d,vtot:%d,vdisplay:%d\n",
+				mode->vsync_start, mode->vsync_end,
+				mode->vtotal, mode->vdisplay);
+		return;
+	}
+
+	/*
+	 * https://www.kernel.org/doc/htmldocs/drm/ch02s05.html
+	 *  Active Region      Front Porch   Sync   Back Porch
+	 * <-----------------><------------><-----><----------->
+	 * <- [hv]display --->
+	 * <--------- [hv]sync_start ------>
+	 * <----------------- [hv]sync_end ------->
+	 * <---------------------------- [hv]total ------------->
+	 */
+	timing->width = mode->hdisplay;	/* active width */
+	timing->height = mode->vdisplay;	/* active height */
+	timing->xres = timing->width;
+	timing->yres = timing->height;
+	timing->h_back_porch = mode->htotal - mode->hsync_end;
+	timing->h_front_porch = mode->hsync_start - mode->hdisplay;
+	timing->v_back_porch = mode->vtotal - mode->vsync_end;
+	timing->v_front_porch = mode->vsync_start - mode->vdisplay;
+	timing->hsync_pulse_width = mode->hsync_end - mode->hsync_start;
+	timing->vsync_pulse_width = mode->vsync_end - mode->vsync_start;
+	timing->hsync_polarity = (mode->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0;
+	timing->vsync_polarity = (mode->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0;
+	timing->border_clr = 0;
+	timing->underflow_clr = 0xff;
+	timing->hsync_skew = mode->hskew;
+
+	/* DSI controller cannot handle active-low sync signals. */
+	if (vid_enc->hw_intf->cap->type == INTF_DSI) {
+		timing->hsync_polarity = 0;
+		timing->vsync_polarity = 0;
+	}
+
+	/*
+	 * For edp only:
+	 * DISPLAY_V_START = (VBP * HCYCLE) + HBP
+	 * DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP
+	 */
+	/*
+	 * if (vid_enc->hw->cap->type == INTF_EDP) {
+	 * display_v_start += mode->htotal - mode->hsync_start;
+	 * display_v_end -= mode->hsync_start - mode->hdisplay;
+	 * }
+	 */
+}
+
+static inline u32 get_horizontal_total(const struct intf_timing_params *timing)
+{
+	u32 active = timing->xres;
+	u32 inactive =
+	    timing->h_back_porch + timing->h_front_porch +
+	    timing->hsync_pulse_width;
+	return active + inactive;
+}
+
+static inline u32 get_vertical_total(const struct intf_timing_params *timing)
+{
+	u32 active = timing->yres;
+	u32 inactive =
+	    timing->v_back_porch + timing->v_front_porch +
+	    timing->vsync_pulse_width;
+	return active + inactive;
+}
+
+/*
+ * programmable_fetch_get_num_lines:
+ *	Number of fetch lines in vertical front porch
+ * @timing: Pointer to the intf timing information for the requested mode
+ *
+ * Returns the number of fetch lines in vertical front porch at which mdp
+ * can start fetching the next frame.
+ *
+ * Number of needed prefetch lines is anything that cannot be absorbed in the
+ * start of frame time (back porch + vsync pulse width).
+ *
+ * Some panels have very large VFP, however we only need a total number of
+ * lines based on the chip worst case latencies.
+ */
+static u32 programmable_fetch_get_num_lines(
+		struct dpu_encoder_phys_vid *vid_enc,
+		const struct intf_timing_params *timing)
+{
+	u32 worst_case_needed_lines =
+	    vid_enc->hw_intf->cap->prog_fetch_lines_worst_case;
+	u32 start_of_frame_lines =
+	    timing->v_back_porch + timing->vsync_pulse_width;
+	u32 needed_vfp_lines = worst_case_needed_lines - start_of_frame_lines;
+	u32 actual_vfp_lines = 0;
+
+	/* Fetch must be outside active lines, otherwise undefined. */
+	if (start_of_frame_lines >= worst_case_needed_lines) {
+		DPU_DEBUG_VIDENC(vid_enc,
+				"prog fetch is not needed, large vbp+vsw\n");
+		actual_vfp_lines = 0;
+	} else if (timing->v_front_porch < needed_vfp_lines) {
+		/* Warn fetch needed, but not enough porch in panel config */
+		pr_warn_once
+			("low vbp+vfp may lead to perf issues in some cases\n");
+		DPU_DEBUG_VIDENC(vid_enc,
+				"less vfp than fetch req, using entire vfp\n");
+		actual_vfp_lines = timing->v_front_porch;
+	} else {
+		DPU_DEBUG_VIDENC(vid_enc, "room in vfp for needed prefetch\n");
+		actual_vfp_lines = needed_vfp_lines;
+	}
+
+	DPU_DEBUG_VIDENC(vid_enc,
+		"v_front_porch %u v_back_porch %u vsync_pulse_width %u\n",
+		timing->v_front_porch, timing->v_back_porch,
+		timing->vsync_pulse_width);
+	DPU_DEBUG_VIDENC(vid_enc,
+		"wc_lines %u needed_vfp_lines %u actual_vfp_lines %u\n",
+		worst_case_needed_lines, needed_vfp_lines, actual_vfp_lines);
+
+	return actual_vfp_lines;
+}
+
+/*
+ * programmable_fetch_config: Programs HW to prefetch lines by offsetting
+ *	the start of fetch into the vertical front porch for cases where the
+ *	vsync pulse width and vertical back porch time is insufficient
+ *
+ *	Gets # of lines to pre-fetch, then calculate VSYNC counter value.
+ *	HW layer requires VSYNC counter of first pixel of tgt VFP line.
+ *
+ * @timing: Pointer to the intf timing information for the requested mode
+ */
+static void programmable_fetch_config(struct dpu_encoder_phys *phys_enc,
+				      const struct intf_timing_params *timing)
+{
+	struct dpu_encoder_phys_vid *vid_enc =
+		to_dpu_encoder_phys_vid(phys_enc);
+	struct intf_prog_fetch f = { 0 };
+	u32 vfp_fetch_lines = 0;
+	u32 horiz_total = 0;
+	u32 vert_total = 0;
+	u32 vfp_fetch_start_vsync_counter = 0;
+	unsigned long lock_flags;
+
+	if (WARN_ON_ONCE(!vid_enc->hw_intf->ops.setup_prg_fetch))
+		return;
+
+	vfp_fetch_lines = programmable_fetch_get_num_lines(vid_enc, timing);
+	if (vfp_fetch_lines) {
+		vert_total = get_vertical_total(timing);
+		horiz_total = get_horizontal_total(timing);
+		vfp_fetch_start_vsync_counter =
+		    (vert_total - vfp_fetch_lines) * horiz_total + 1;
+		f.enable = 1;
+		f.fetch_start = vfp_fetch_start_vsync_counter;
+	}
+
+	DPU_DEBUG_VIDENC(vid_enc,
+		"vfp_fetch_lines %u vfp_fetch_start_vsync_counter %u\n",
+		vfp_fetch_lines, vfp_fetch_start_vsync_counter);
+
+	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+	vid_enc->hw_intf->ops.setup_prg_fetch(vid_enc->hw_intf, &f);
+	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+}
+
+static bool dpu_encoder_phys_vid_mode_fixup(
+		struct dpu_encoder_phys *phys_enc,
+		const struct drm_display_mode *mode,
+		struct drm_display_mode *adj_mode)
+{
+	if (phys_enc)
+		DPU_DEBUG_VIDENC(to_dpu_encoder_phys_vid(phys_enc), "\n");
+
+	/*
+	 * Modifying mode has consequences when the mode comes back to us
+	 */
+	return true;
+}
+
+static void dpu_encoder_phys_vid_setup_timing_engine(
+		struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_encoder_phys_vid *vid_enc;
+	struct drm_display_mode mode;
+	struct intf_timing_params timing_params = { 0 };
+	const struct dpu_format *fmt = NULL;
+	u32 fmt_fourcc = DRM_FORMAT_RGB888;
+	unsigned long lock_flags;
+	struct dpu_hw_intf_cfg intf_cfg = { 0 };
+
+	if (!phys_enc || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
+		DPU_ERROR("invalid encoder %d\n", phys_enc != 0);
+		return;
+	}
+
+	mode = phys_enc->cached_mode;
+	vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+	if (!vid_enc->hw_intf->ops.setup_timing_gen) {
+		DPU_ERROR("timing engine setup is not supported\n");
+		return;
+	}
+
+	DPU_DEBUG_VIDENC(vid_enc, "enabling mode:\n");
+	drm_mode_debug_printmodeline(&mode);
+
+	if (phys_enc->split_role != ENC_ROLE_SOLO) {
+		mode.hdisplay >>= 1;
+		mode.htotal >>= 1;
+		mode.hsync_start >>= 1;
+		mode.hsync_end >>= 1;
+
+		DPU_DEBUG_VIDENC(vid_enc,
+			"split_role %d, halve horizontal %d %d %d %d\n",
+			phys_enc->split_role,
+			mode.hdisplay, mode.htotal,
+			mode.hsync_start, mode.hsync_end);
+	}
+
+	drm_mode_to_intf_timing_params(vid_enc, &mode, &timing_params);
+
+	fmt = dpu_get_dpu_format(fmt_fourcc);
+	DPU_DEBUG_VIDENC(vid_enc, "fmt_fourcc 0x%X\n", fmt_fourcc);
+
+	intf_cfg.intf = vid_enc->hw_intf->idx;
+	intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_VID;
+	intf_cfg.stream_sel = 0; /* Don't care value for video mode */
+	intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+
+	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+	vid_enc->hw_intf->ops.setup_timing_gen(vid_enc->hw_intf,
+			&timing_params, fmt);
+	phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
+	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+	programmable_fetch_config(phys_enc, &timing_params);
+
+	vid_enc->timing_params = timing_params;
+}
+
+static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
+{
+	struct dpu_encoder_phys *phys_enc = arg;
+	struct dpu_hw_ctl *hw_ctl;
+	unsigned long lock_flags;
+	u32 flush_register = 0;
+	int new_cnt = -1, old_cnt = -1;
+
+	if (!phys_enc)
+		return;
+
+	hw_ctl = phys_enc->hw_ctl;
+	if (!hw_ctl)
+		return;
+
+	DPU_ATRACE_BEGIN("vblank_irq");
+
+	if (phys_enc->parent_ops.handle_vblank_virt)
+		phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
+				phys_enc);
+
+	old_cnt  = atomic_read(&phys_enc->pending_kickoff_cnt);
+
+	/*
+	 * only decrement the pending flush count if we've actually flushed
+	 * hardware. due to sw irq latency, vblank may have already happened
+	 * so we need to double-check with hw that it accepted the flush bits
+	 */
+	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+	if (hw_ctl && hw_ctl->ops.get_flush_register)
+		flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
+
+	if (flush_register == 0)
+		new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt,
+				-1, 0);
+	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+	/* Signal any waiting atomic commit thread */
+	wake_up_all(&phys_enc->pending_kickoff_wq);
+	DPU_ATRACE_END("vblank_irq");
+}
+
+static void dpu_encoder_phys_vid_underrun_irq(void *arg, int irq_idx)
+{
+	struct dpu_encoder_phys *phys_enc = arg;
+
+	if (!phys_enc)
+		return;
+
+	if (phys_enc->parent_ops.handle_underrun_virt)
+		phys_enc->parent_ops.handle_underrun_virt(phys_enc->parent,
+			phys_enc);
+}
+
+static bool _dpu_encoder_phys_is_dual_ctl(struct dpu_encoder_phys *phys_enc)
+{
+	if (!phys_enc)
+		return false;
+
+	if (phys_enc->topology_name == DPU_RM_TOPOLOGY_DUALPIPE)
+		return true;
+
+	return false;
+}
+
+static bool dpu_encoder_phys_vid_needs_single_flush(
+		struct dpu_encoder_phys *phys_enc)
+{
+	return (phys_enc && _dpu_encoder_phys_is_dual_ctl(phys_enc));
+}
+
+static void _dpu_encoder_phys_vid_setup_irq_hw_idx(
+		struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_encoder_irq *irq;
+
+	/*
+	 * Initialize irq->hw_idx only when irq is not registered.
+	 * Prevent invalidating irq->irq_idx as modeset may be
+	 * called many times during dfps.
+	 */
+
+	irq = &phys_enc->irq[INTR_IDX_VSYNC];
+	if (irq->irq_idx < 0)
+		irq->hw_idx = phys_enc->intf_idx;
+
+	irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
+	if (irq->irq_idx < 0)
+		irq->hw_idx = phys_enc->intf_idx;
+}
+
+static void dpu_encoder_phys_vid_mode_set(
+		struct dpu_encoder_phys *phys_enc,
+		struct drm_display_mode *mode,
+		struct drm_display_mode *adj_mode)
+{
+	struct dpu_rm *rm;
+	struct dpu_rm_hw_iter iter;
+	int i, instance;
+	struct dpu_encoder_phys_vid *vid_enc;
+
+	if (!phys_enc || !phys_enc->dpu_kms) {
+		DPU_ERROR("invalid encoder/kms\n");
+		return;
+	}
+
+	rm = &phys_enc->dpu_kms->rm;
+	vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+	if (adj_mode) {
+		phys_enc->cached_mode = *adj_mode;
+		drm_mode_debug_printmodeline(adj_mode);
+		DPU_DEBUG_VIDENC(vid_enc, "caching mode:\n");
+	}
+
+	instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
+
+	/* Retrieve previously allocated HW Resources. Shouldn't fail */
+	dpu_rm_init_hw_iter(&iter, phys_enc->parent->base.id, DPU_HW_BLK_CTL);
+	for (i = 0; i <= instance; i++) {
+		if (dpu_rm_get_hw(rm, &iter))
+			phys_enc->hw_ctl = (struct dpu_hw_ctl *)iter.hw;
+	}
+	if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
+		DPU_ERROR_VIDENC(vid_enc, "failed to init ctl, %ld\n",
+				PTR_ERR(phys_enc->hw_ctl));
+		phys_enc->hw_ctl = NULL;
+		return;
+	}
+
+	_dpu_encoder_phys_vid_setup_irq_hw_idx(phys_enc);
+}
+
+static int dpu_encoder_phys_vid_control_vblank_irq(
+		struct dpu_encoder_phys *phys_enc,
+		bool enable)
+{
+	int ret = 0;
+	struct dpu_encoder_phys_vid *vid_enc;
+	int refcount;
+
+	if (!phys_enc) {
+		DPU_ERROR("invalid encoder\n");
+		return -EINVAL;
+	}
+
+	refcount = atomic_read(&phys_enc->vblank_refcount);
+	vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+	/* Slave encoders don't report vblank */
+	if (!dpu_encoder_phys_vid_is_master(phys_enc))
+		goto end;
+
+	/* protect against negative */
+	if (!enable && refcount == 0) {
+		ret = -EINVAL;
+		goto end;
+	}
+
+	DRM_DEBUG_KMS("id:%u enable=%d/%d\n", DRMID(phys_enc->parent), enable,
+		      atomic_read(&phys_enc->vblank_refcount));
+
+	if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
+		ret = dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_VSYNC);
+	else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
+		ret = dpu_encoder_helper_unregister_irq(phys_enc,
+				INTR_IDX_VSYNC);
+
+end:
+	if (ret) {
+		DRM_ERROR("failed: id:%u intf:%d ret:%d enable:%d refcnt:%d\n",
+			  DRMID(phys_enc->parent),
+			  vid_enc->hw_intf->idx - INTF_0, ret, enable,
+			  refcount);
+	}
+	return ret;
+}
+
+static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
+{
+	struct msm_drm_private *priv;
+	struct dpu_encoder_phys_vid *vid_enc;
+	struct dpu_hw_intf *intf;
+	struct dpu_hw_ctl *ctl;
+	u32 flush_mask = 0;
+
+	if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev ||
+			!phys_enc->parent->dev->dev_private) {
+		DPU_ERROR("invalid encoder/device\n");
+		return;
+	}
+	priv = phys_enc->parent->dev->dev_private;
+
+	vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+	intf = vid_enc->hw_intf;
+	ctl = phys_enc->hw_ctl;
+	if (!vid_enc->hw_intf || !phys_enc->hw_ctl) {
+		DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
+				vid_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
+		return;
+	}
+
+	DPU_DEBUG_VIDENC(vid_enc, "\n");
+
+	if (WARN_ON(!vid_enc->hw_intf->ops.enable_timing))
+		return;
+
+	dpu_encoder_helper_split_config(phys_enc, vid_enc->hw_intf->idx);
+
+	dpu_encoder_phys_vid_setup_timing_engine(phys_enc);
+
+	/*
+	 * For single flush cases (dual-ctl or pp-split), skip setting the
+	 * flush bit for the slave intf, since both intfs use same ctl
+	 * and HW will only flush the master.
+	 */
+	if (dpu_encoder_phys_vid_needs_single_flush(phys_enc) &&
+		!dpu_encoder_phys_vid_is_master(phys_enc))
+		goto skip_flush;
+
+	ctl->ops.get_bitmask_intf(ctl, &flush_mask, intf->idx);
+	ctl->ops.update_pending_flush(ctl, flush_mask);
+
+skip_flush:
+	DPU_DEBUG_VIDENC(vid_enc, "update pending flush ctl %d flush_mask %x\n",
+		ctl->idx - CTL_0, flush_mask);
+
+	/* ctl_flush & timing engine enable will be triggered by framework */
+	if (phys_enc->enable_state == DPU_ENC_DISABLED)
+		phys_enc->enable_state = DPU_ENC_ENABLING;
+}
+
+static void dpu_encoder_phys_vid_destroy(struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_encoder_phys_vid *vid_enc;
+
+	if (!phys_enc) {
+		DPU_ERROR("invalid encoder\n");
+		return;
+	}
+
+	vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+	DPU_DEBUG_VIDENC(vid_enc, "\n");
+	kfree(vid_enc);
+}
+
+static void dpu_encoder_phys_vid_get_hw_resources(
+		struct dpu_encoder_phys *phys_enc,
+		struct dpu_encoder_hw_resources *hw_res,
+		struct drm_connector_state *conn_state)
+{
+	struct dpu_encoder_phys_vid *vid_enc;
+
+	if (!phys_enc || !hw_res) {
+		DPU_ERROR("invalid arg(s), enc %d hw_res %d conn_state %d\n",
+				phys_enc != 0, hw_res != 0, conn_state != 0);
+		return;
+	}
+
+	vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+	if (!vid_enc->hw_intf) {
+		DPU_ERROR("invalid arg(s), hw_intf\n");
+		return;
+	}
+
+	DPU_DEBUG_VIDENC(vid_enc, "\n");
+	hw_res->intfs[vid_enc->hw_intf->idx - INTF_0] = INTF_MODE_VIDEO;
+}
+
+static int _dpu_encoder_phys_vid_wait_for_vblank(
+		struct dpu_encoder_phys *phys_enc, bool notify)
+{
+	struct dpu_encoder_wait_info wait_info;
+	int ret;
+
+	if (!phys_enc) {
+		pr_err("invalid encoder\n");
+		return -EINVAL;
+	}
+
+	wait_info.wq = &phys_enc->pending_kickoff_wq;
+	wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
+	wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
+
+	if (!dpu_encoder_phys_vid_is_master(phys_enc)) {
+		if (notify && phys_enc->parent_ops.handle_frame_done)
+			phys_enc->parent_ops.handle_frame_done(
+					phys_enc->parent, phys_enc,
+					DPU_ENCODER_FRAME_EVENT_DONE);
+		return 0;
+	}
+
+	/* Wait for kickoff to complete */
+	ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_VSYNC,
+			&wait_info);
+
+	if (ret == -ETIMEDOUT) {
+		dpu_encoder_helper_report_irq_timeout(phys_enc, INTR_IDX_VSYNC);
+	} else if (!ret && notify && phys_enc->parent_ops.handle_frame_done)
+		phys_enc->parent_ops.handle_frame_done(
+				phys_enc->parent, phys_enc,
+				DPU_ENCODER_FRAME_EVENT_DONE);
+
+	return ret;
+}
+
+static int dpu_encoder_phys_vid_wait_for_vblank(
+		struct dpu_encoder_phys *phys_enc)
+{
+	return _dpu_encoder_phys_vid_wait_for_vblank(phys_enc, true);
+}
+
+static void dpu_encoder_phys_vid_prepare_for_kickoff(
+		struct dpu_encoder_phys *phys_enc,
+		struct dpu_encoder_kickoff_params *params)
+{
+	struct dpu_encoder_phys_vid *vid_enc;
+	struct dpu_hw_ctl *ctl;
+	int rc;
+
+	if (!phys_enc || !params) {
+		DPU_ERROR("invalid encoder/parameters\n");
+		return;
+	}
+	vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+	ctl = phys_enc->hw_ctl;
+	if (!ctl || !ctl->ops.wait_reset_status)
+		return;
+
+	/*
+	 * hw supports hardware initiated ctl reset, so before we kickoff a new
+	 * frame, need to check and wait for hw initiated ctl reset completion
+	 */
+	rc = ctl->ops.wait_reset_status(ctl);
+	if (rc) {
+		DPU_ERROR_VIDENC(vid_enc, "ctl %d reset failure: %d\n",
+				ctl->idx, rc);
+		dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_VSYNC);
+		dpu_dbg_dump(false, __func__, true, true);
+	}
+}
+
+static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
+{
+	struct msm_drm_private *priv;
+	struct dpu_encoder_phys_vid *vid_enc;
+	unsigned long lock_flags;
+	int ret;
+
+	if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev ||
+			!phys_enc->parent->dev->dev_private) {
+		DPU_ERROR("invalid encoder/device\n");
+		return;
+	}
+	priv = phys_enc->parent->dev->dev_private;
+
+	vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+	if (!vid_enc->hw_intf || !phys_enc->hw_ctl) {
+		DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
+				vid_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
+		return;
+	}
+
+	DPU_DEBUG_VIDENC(vid_enc, "\n");
+
+	if (WARN_ON(!vid_enc->hw_intf->ops.enable_timing))
+		return;
+
+	if (phys_enc->enable_state == DPU_ENC_DISABLED) {
+		DPU_ERROR("already disabled\n");
+		return;
+	}
+
+	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+	vid_enc->hw_intf->ops.enable_timing(vid_enc->hw_intf, 0);
+	if (dpu_encoder_phys_vid_is_master(phys_enc))
+		dpu_encoder_phys_inc_pending(phys_enc);
+	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+	/*
+	 * Wait for a vsync so we know the ENABLE=0 latched before
+	 * the (connector) source of the vsync's gets disabled,
+	 * otherwise we end up in a funny state if we re-enable
+	 * before the disable latches, which results that some of
+	 * the settings changes for the new modeset (like new
+	 * scanout buffer) don't latch properly..
+	 */
+	if (dpu_encoder_phys_vid_is_master(phys_enc)) {
+		ret = _dpu_encoder_phys_vid_wait_for_vblank(phys_enc, false);
+		if (ret) {
+			atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+			DRM_ERROR("wait disable failed: id:%u intf:%d ret:%d\n",
+				  DRMID(phys_enc->parent),
+				  vid_enc->hw_intf->idx - INTF_0, ret);
+		}
+	}
+
+	phys_enc->enable_state = DPU_ENC_DISABLED;
+}
+
+static void dpu_encoder_phys_vid_handle_post_kickoff(
+		struct dpu_encoder_phys *phys_enc)
+{
+	unsigned long lock_flags;
+	struct dpu_encoder_phys_vid *vid_enc;
+
+	if (!phys_enc) {
+		DPU_ERROR("invalid encoder\n");
+		return;
+	}
+
+	vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+	DPU_DEBUG_VIDENC(vid_enc, "enable_state %d\n", phys_enc->enable_state);
+
+	/*
+	 * Video mode must flush CTL before enabling timing engine
+	 * Video encoders need to turn on their interfaces now
+	 */
+	if (phys_enc->enable_state == DPU_ENC_ENABLING) {
+		trace_dpu_enc_phys_vid_post_kickoff(DRMID(phys_enc->parent),
+				    vid_enc->hw_intf->idx - INTF_0);
+		spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+		vid_enc->hw_intf->ops.enable_timing(vid_enc->hw_intf, 1);
+		spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+		phys_enc->enable_state = DPU_ENC_ENABLED;
+	}
+}
+
+static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc,
+		bool enable)
+{
+	struct dpu_encoder_phys_vid *vid_enc;
+	int ret;
+
+	if (!phys_enc)
+		return;
+
+	vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+	trace_dpu_enc_phys_vid_irq_ctrl(DRMID(phys_enc->parent),
+			    vid_enc->hw_intf->idx - INTF_0,
+			    enable,
+			    atomic_read(&phys_enc->vblank_refcount));
+
+	if (enable) {
+		ret = dpu_encoder_phys_vid_control_vblank_irq(phys_enc, true);
+		if (ret)
+			return;
+
+		dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_UNDERRUN);
+	} else {
+		dpu_encoder_phys_vid_control_vblank_irq(phys_enc, false);
+		dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_UNDERRUN);
+	}
+}
+
+static void dpu_encoder_phys_vid_setup_misr(struct dpu_encoder_phys *phys_enc,
+						bool enable, u32 frame_count)
+{
+	struct dpu_encoder_phys_vid *vid_enc;
+
+	if (!phys_enc)
+		return;
+	vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+	if (vid_enc->hw_intf && vid_enc->hw_intf->ops.setup_misr)
+		vid_enc->hw_intf->ops.setup_misr(vid_enc->hw_intf,
+							enable, frame_count);
+}
+
+static u32 dpu_encoder_phys_vid_collect_misr(struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_encoder_phys_vid *vid_enc;
+
+	if (!phys_enc)
+		return 0;
+	vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+	return vid_enc->hw_intf && vid_enc->hw_intf->ops.collect_misr ?
+		vid_enc->hw_intf->ops.collect_misr(vid_enc->hw_intf) : 0;
+}
+
+static int dpu_encoder_phys_vid_get_line_count(
+		struct dpu_encoder_phys *phys_enc)
+{
+	struct dpu_encoder_phys_vid *vid_enc;
+
+	if (!phys_enc)
+		return -EINVAL;
+
+	if (!dpu_encoder_phys_vid_is_master(phys_enc))
+		return -EINVAL;
+
+	vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+	if (!vid_enc->hw_intf || !vid_enc->hw_intf->ops.get_line_count)
+		return -EINVAL;
+
+	return vid_enc->hw_intf->ops.get_line_count(vid_enc->hw_intf);
+}
+
+static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
+{
+	ops->is_master = dpu_encoder_phys_vid_is_master;
+	ops->mode_set = dpu_encoder_phys_vid_mode_set;
+	ops->mode_fixup = dpu_encoder_phys_vid_mode_fixup;
+	ops->enable = dpu_encoder_phys_vid_enable;
+	ops->disable = dpu_encoder_phys_vid_disable;
+	ops->destroy = dpu_encoder_phys_vid_destroy;
+	ops->get_hw_resources = dpu_encoder_phys_vid_get_hw_resources;
+	ops->control_vblank_irq = dpu_encoder_phys_vid_control_vblank_irq;
+	ops->wait_for_commit_done = dpu_encoder_phys_vid_wait_for_vblank;
+	ops->wait_for_vblank = dpu_encoder_phys_vid_wait_for_vblank;
+	ops->wait_for_tx_complete = dpu_encoder_phys_vid_wait_for_vblank;
+	ops->irq_control = dpu_encoder_phys_vid_irq_control;
+	ops->prepare_for_kickoff = dpu_encoder_phys_vid_prepare_for_kickoff;
+	ops->handle_post_kickoff = dpu_encoder_phys_vid_handle_post_kickoff;
+	ops->needs_single_flush = dpu_encoder_phys_vid_needs_single_flush;
+	ops->setup_misr = dpu_encoder_phys_vid_setup_misr;
+	ops->collect_misr = dpu_encoder_phys_vid_collect_misr;
+	ops->hw_reset = dpu_encoder_helper_hw_reset;
+	ops->get_line_count = dpu_encoder_phys_vid_get_line_count;
+}
+
+struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
+		struct dpu_enc_phys_init_params *p)
+{
+	struct dpu_encoder_phys *phys_enc = NULL;
+	struct dpu_encoder_phys_vid *vid_enc = NULL;
+	struct dpu_rm_hw_iter iter;
+	struct dpu_hw_mdp *hw_mdp;
+	struct dpu_encoder_irq *irq;
+	int i, ret = 0;
+
+	if (!p) {
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	vid_enc = kzalloc(sizeof(*vid_enc), GFP_KERNEL);
+	if (!vid_enc) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	phys_enc = &vid_enc->base;
+
+	hw_mdp = dpu_rm_get_mdp(&p->dpu_kms->rm);
+	if (IS_ERR_OR_NULL(hw_mdp)) {
+		ret = PTR_ERR(hw_mdp);
+		DPU_ERROR("failed to get mdptop\n");
+		goto fail;
+	}
+	phys_enc->hw_mdptop = hw_mdp;
+	phys_enc->intf_idx = p->intf_idx;
+
+	/**
+	 * hw_intf resource permanently assigned to this encoder
+	 * Other resources allocated at atomic commit time by use case
+	 */
+	dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_INTF);
+	while (dpu_rm_get_hw(&p->dpu_kms->rm, &iter)) {
+		struct dpu_hw_intf *hw_intf = (struct dpu_hw_intf *)iter.hw;
+
+		if (hw_intf->idx == p->intf_idx) {
+			vid_enc->hw_intf = hw_intf;
+			break;
+		}
+	}
+
+	if (!vid_enc->hw_intf) {
+		ret = -EINVAL;
+		DPU_ERROR("failed to get hw_intf\n");
+		goto fail;
+	}
+
+	DPU_DEBUG_VIDENC(vid_enc, "\n");
+
+	dpu_encoder_phys_vid_init_ops(&phys_enc->ops);
+	phys_enc->parent = p->parent;
+	phys_enc->parent_ops = p->parent_ops;
+	phys_enc->dpu_kms = p->dpu_kms;
+	phys_enc->split_role = p->split_role;
+	phys_enc->intf_mode = INTF_MODE_VIDEO;
+	phys_enc->enc_spinlock = p->enc_spinlock;
+	for (i = 0; i < INTR_IDX_MAX; i++) {
+		irq = &phys_enc->irq[i];
+		INIT_LIST_HEAD(&irq->cb.list);
+		irq->irq_idx = -EINVAL;
+		irq->hw_idx = -EINVAL;
+		irq->cb.arg = phys_enc;
+	}
+
+	irq = &phys_enc->irq[INTR_IDX_VSYNC];
+	irq->name = "vsync_irq";
+	irq->intr_type = DPU_IRQ_TYPE_INTF_VSYNC;
+	irq->intr_idx = INTR_IDX_VSYNC;
+	irq->cb.func = dpu_encoder_phys_vid_vblank_irq;
+
+	irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
+	irq->name = "underrun";
+	irq->intr_type = DPU_IRQ_TYPE_INTF_UNDER_RUN;
+	irq->intr_idx = INTR_IDX_UNDERRUN;
+	irq->cb.func = dpu_encoder_phys_vid_underrun_irq;
+
+	atomic_set(&phys_enc->vblank_refcount, 0);
+	atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+	init_waitqueue_head(&phys_enc->pending_kickoff_wq);
+	phys_enc->enable_state = DPU_ENC_DISABLED;
+
+	DPU_DEBUG_VIDENC(vid_enc, "created intf idx:%d\n", p->intf_idx);
+
+	return phys_enc;
+
+fail:
+	DPU_ERROR("failed to create encoder\n");
+	if (vid_enc)
+		dpu_encoder_phys_vid_destroy(phys_enc);
+
+	return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
new file mode 100644
index 000000000000..44fefc97e1b3
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
@@ -0,0 +1,1276 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <uapi/drm/drm_fourcc.h>
+#include <uapi/media/msm_media_info.h>
+
+#include "dpu_kms.h"
+#include "dpu_formats.h"
+
+#define DPU_UBWC_META_MACRO_W_H		16
+#define DPU_UBWC_META_BLOCK_SIZE	256
+#define DPU_UBWC_PLANE_SIZE_ALIGNMENT	4096
+
+#define DPU_TILE_HEIGHT_DEFAULT	1
+#define DPU_TILE_HEIGHT_TILED	4
+#define DPU_TILE_HEIGHT_UBWC	4
+#define DPU_TILE_HEIGHT_NV12	8
+
+#define DPU_MAX_IMG_WIDTH		0x3FFF
+#define DPU_MAX_IMG_HEIGHT		0x3FFF
+
+/**
+ * DPU supported format packing, bpp, and other format
+ * information.
+ * DPU currently only supports interleaved RGB formats
+ * UBWC support for a pixel format is indicated by the flag,
+ * there is additional meta data plane for such formats
+ */
+
+#define INTERLEAVED_RGB_FMT(fmt, a, r, g, b, e0, e1, e2, e3, uc, alpha,   \
+bp, flg, fm, np)                                                          \
+{                                                                         \
+	.base.pixel_format = DRM_FORMAT_ ## fmt,                          \
+	.fetch_planes = DPU_PLANE_INTERLEAVED,                            \
+	.alpha_enable = alpha,                                            \
+	.element = { (e0), (e1), (e2), (e3) },                            \
+	.bits = { g, b, r, a },                                           \
+	.chroma_sample = DPU_CHROMA_RGB,                                  \
+	.unpack_align_msb = 0,                                            \
+	.unpack_tight = 1,                                                \
+	.unpack_count = uc,                                               \
+	.bpp = bp,                                                        \
+	.fetch_mode = fm,                                                 \
+	.flag = {(flg)},                                                  \
+	.num_planes = np,                                                 \
+	.tile_height = DPU_TILE_HEIGHT_DEFAULT                            \
+}
+
+#define INTERLEAVED_RGB_FMT_TILED(fmt, a, r, g, b, e0, e1, e2, e3, uc,    \
+alpha, bp, flg, fm, np, th)                                               \
+{                                                                         \
+	.base.pixel_format = DRM_FORMAT_ ## fmt,                          \
+	.fetch_planes = DPU_PLANE_INTERLEAVED,                            \
+	.alpha_enable = alpha,                                            \
+	.element = { (e0), (e1), (e2), (e3) },                            \
+	.bits = { g, b, r, a },                                           \
+	.chroma_sample = DPU_CHROMA_RGB,                                  \
+	.unpack_align_msb = 0,                                            \
+	.unpack_tight = 1,                                                \
+	.unpack_count = uc,                                               \
+	.bpp = bp,                                                        \
+	.fetch_mode = fm,                                                 \
+	.flag = {(flg)},                                                  \
+	.num_planes = np,                                                 \
+	.tile_height = th                                                 \
+}
+
+
+#define INTERLEAVED_YUV_FMT(fmt, a, r, g, b, e0, e1, e2, e3,              \
+alpha, chroma, count, bp, flg, fm, np)                                    \
+{                                                                         \
+	.base.pixel_format = DRM_FORMAT_ ## fmt,                          \
+	.fetch_planes = DPU_PLANE_INTERLEAVED,                            \
+	.alpha_enable = alpha,                                            \
+	.element = { (e0), (e1), (e2), (e3)},                             \
+	.bits = { g, b, r, a },                                           \
+	.chroma_sample = chroma,                                          \
+	.unpack_align_msb = 0,                                            \
+	.unpack_tight = 1,                                                \
+	.unpack_count = count,                                            \
+	.bpp = bp,                                                        \
+	.fetch_mode = fm,                                                 \
+	.flag = {(flg)},                                                  \
+	.num_planes = np,                                                 \
+	.tile_height = DPU_TILE_HEIGHT_DEFAULT                            \
+}
+
+#define PSEUDO_YUV_FMT(fmt, a, r, g, b, e0, e1, chroma, flg, fm, np)      \
+{                                                                         \
+	.base.pixel_format = DRM_FORMAT_ ## fmt,                          \
+	.fetch_planes = DPU_PLANE_PSEUDO_PLANAR,                          \
+	.alpha_enable = false,                                            \
+	.element = { (e0), (e1), 0, 0 },                                  \
+	.bits = { g, b, r, a },                                           \
+	.chroma_sample = chroma,                                          \
+	.unpack_align_msb = 0,                                            \
+	.unpack_tight = 1,                                                \
+	.unpack_count = 2,                                                \
+	.bpp = 2,                                                         \
+	.fetch_mode = fm,                                                 \
+	.flag = {(flg)},                                                  \
+	.num_planes = np,                                                 \
+	.tile_height = DPU_TILE_HEIGHT_DEFAULT                            \
+}
+
+#define PSEUDO_YUV_FMT_TILED(fmt, a, r, g, b, e0, e1, chroma,             \
+flg, fm, np, th)                                                          \
+{                                                                         \
+	.base.pixel_format = DRM_FORMAT_ ## fmt,                          \
+	.fetch_planes = DPU_PLANE_PSEUDO_PLANAR,                          \
+	.alpha_enable = false,                                            \
+	.element = { (e0), (e1), 0, 0 },                                  \
+	.bits = { g, b, r, a },                                           \
+	.chroma_sample = chroma,                                          \
+	.unpack_align_msb = 0,                                            \
+	.unpack_tight = 1,                                                \
+	.unpack_count = 2,                                                \
+	.bpp = 2,                                                         \
+	.fetch_mode = fm,                                                 \
+	.flag = {(flg)},                                                  \
+	.num_planes = np,                                                 \
+	.tile_height = th                                                 \
+}
+
+#define PSEUDO_YUV_FMT_LOOSE(fmt, a, r, g, b, e0, e1, chroma, flg, fm, np)\
+{                                                                         \
+	.base.pixel_format = DRM_FORMAT_ ## fmt,                          \
+	.fetch_planes = DPU_PLANE_PSEUDO_PLANAR,                          \
+	.alpha_enable = false,                                            \
+	.element = { (e0), (e1), 0, 0 },                                  \
+	.bits = { g, b, r, a },                                           \
+	.chroma_sample = chroma,                                          \
+	.unpack_align_msb = 1,                                            \
+	.unpack_tight = 0,                                                \
+	.unpack_count = 2,                                                \
+	.bpp = 2,                                                         \
+	.fetch_mode = fm,                                                 \
+	.flag = {(flg)},                                                  \
+	.num_planes = np,                                                 \
+	.tile_height = DPU_TILE_HEIGHT_DEFAULT                            \
+}
+
+#define PSEUDO_YUV_FMT_LOOSE_TILED(fmt, a, r, g, b, e0, e1, chroma,       \
+flg, fm, np, th)                                                          \
+{                                                                         \
+	.base.pixel_format = DRM_FORMAT_ ## fmt,                          \
+	.fetch_planes = DPU_PLANE_PSEUDO_PLANAR,                          \
+	.alpha_enable = false,                                            \
+	.element = { (e0), (e1), 0, 0 },                                  \
+	.bits = { g, b, r, a },                                           \
+	.chroma_sample = chroma,                                          \
+	.unpack_align_msb = 1,                                            \
+	.unpack_tight = 0,                                                \
+	.unpack_count = 2,                                                \
+	.bpp = 2,                                                         \
+	.fetch_mode = fm,                                                 \
+	.flag = {(flg)},                                                  \
+	.num_planes = np,                                                 \
+	.tile_height = th                                                 \
+}
+
+
+#define PLANAR_YUV_FMT(fmt, a, r, g, b, e0, e1, e2, alpha, chroma, bp,    \
+flg, fm, np)                                                      \
+{                                                                         \
+	.base.pixel_format = DRM_FORMAT_ ## fmt,                          \
+	.fetch_planes = DPU_PLANE_PLANAR,                                 \
+	.alpha_enable = alpha,                                            \
+	.element = { (e0), (e1), (e2), 0 },                               \
+	.bits = { g, b, r, a },                                           \
+	.chroma_sample = chroma,                                          \
+	.unpack_align_msb = 0,                                            \
+	.unpack_tight = 1,                                                \
+	.unpack_count = 1,                                                \
+	.bpp = bp,                                                        \
+	.fetch_mode = fm,                                                 \
+	.flag = {(flg)},                                                  \
+	.num_planes = np,                                                 \
+	.tile_height = DPU_TILE_HEIGHT_DEFAULT                            \
+}
+
+/*
+ * struct dpu_media_color_map - maps drm format to media format
+ * @format: DRM base pixel format
+ * @color: Media API color related to DRM format
+ */
+struct dpu_media_color_map {
+	uint32_t format;
+	uint32_t color;
+};
+
+static const struct dpu_format dpu_format_map[] = {
+	INTERLEAVED_RGB_FMT(ARGB8888,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+		true, 4, 0,
+		DPU_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(ABGR8888,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		true, 4, 0,
+		DPU_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(XBGR8888,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		true, 4, 0,
+		DPU_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(RGBA8888,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+		true, 4, 0,
+		DPU_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(BGRA8888,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+		true, 4, 0,
+		DPU_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(BGRX8888,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+		false, 4, 0,
+		DPU_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(XRGB8888,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+		false, 4, 0,
+		DPU_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(RGBX8888,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+		false, 4, 0,
+		DPU_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(RGB888,
+		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
+		false, 3, 0,
+		DPU_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(BGR888,
+		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+		false, 3, 0,
+		DPU_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(RGB565,
+		0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
+		false, 2, 0,
+		DPU_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(BGR565,
+		0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+		false, 2, 0,
+		DPU_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(ARGB1555,
+		COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+		true, 2, 0,
+		DPU_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(ABGR1555,
+		COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		true, 2, 0,
+		DPU_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(RGBA5551,
+		COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+		true, 2, 0,
+		DPU_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(BGRA5551,
+		COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+		true, 2, 0,
+		DPU_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(XRGB1555,
+		COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+		false, 2, 0,
+		DPU_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(XBGR1555,
+		COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		false, 2, 0,
+		DPU_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(RGBX5551,
+		COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+		false, 2, 0,
+		DPU_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(BGRX5551,
+		COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+		false, 2, 0,
+		DPU_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(ARGB4444,
+		COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+		true, 2, 0,
+		DPU_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(ABGR4444,
+		COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		true, 2, 0,
+		DPU_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(RGBA4444,
+		COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+		true, 2, 0,
+		DPU_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(BGRA4444,
+		COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+		true, 2, 0,
+		DPU_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(XRGB4444,
+		COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+		false, 2, 0,
+		DPU_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(XBGR4444,
+		COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		false, 2, 0,
+		DPU_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(RGBX4444,
+		COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+		false, 2, 0,
+		DPU_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(BGRX4444,
+		COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+		false, 2, 0,
+		DPU_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(BGRA1010102,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+		true, 4, DPU_FORMAT_FLAG_DX,
+		DPU_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(RGBA1010102,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+		true, 4, DPU_FORMAT_FLAG_DX,
+		DPU_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(ABGR2101010,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		true, 4, DPU_FORMAT_FLAG_DX,
+		DPU_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(ARGB2101010,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+		true, 4, DPU_FORMAT_FLAG_DX,
+		DPU_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(XRGB2101010,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+		false, 4, DPU_FORMAT_FLAG_DX,
+		DPU_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(BGRX1010102,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+		false, 4, DPU_FORMAT_FLAG_DX,
+		DPU_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(XBGR2101010,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		false, 4, DPU_FORMAT_FLAG_DX,
+		DPU_FETCH_LINEAR, 1),
+
+	INTERLEAVED_RGB_FMT(RGBX1010102,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+		false, 4, DPU_FORMAT_FLAG_DX,
+		DPU_FETCH_LINEAR, 1),
+
+	PSEUDO_YUV_FMT(NV12,
+		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C1_B_Cb, C2_R_Cr,
+		DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV,
+		DPU_FETCH_LINEAR, 2),
+
+	PSEUDO_YUV_FMT(NV21,
+		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C1_B_Cb,
+		DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV,
+		DPU_FETCH_LINEAR, 2),
+
+	PSEUDO_YUV_FMT(NV16,
+		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C1_B_Cb, C2_R_Cr,
+		DPU_CHROMA_H2V1, DPU_FORMAT_FLAG_YUV,
+		DPU_FETCH_LINEAR, 2),
+
+	PSEUDO_YUV_FMT(NV61,
+		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C1_B_Cb,
+		DPU_CHROMA_H2V1, DPU_FORMAT_FLAG_YUV,
+		DPU_FETCH_LINEAR, 2),
+
+	INTERLEAVED_YUV_FMT(VYUY,
+		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C0_G_Y,
+		false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV,
+		DPU_FETCH_LINEAR, 2),
+
+	INTERLEAVED_YUV_FMT(UYVY,
+		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, C0_G_Y,
+		false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV,
+		DPU_FETCH_LINEAR, 2),
+
+	INTERLEAVED_YUV_FMT(YUYV,
+		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C0_G_Y, C1_B_Cb, C0_G_Y, C2_R_Cr,
+		false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV,
+		DPU_FETCH_LINEAR, 2),
+
+	INTERLEAVED_YUV_FMT(YVYU,
+		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C0_G_Y, C2_R_Cr, C0_G_Y, C1_B_Cb,
+		false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV,
+		DPU_FETCH_LINEAR, 2),
+
+	PLANAR_YUV_FMT(YUV420,
+		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C1_B_Cb, C0_G_Y,
+		false, DPU_CHROMA_420, 1, DPU_FORMAT_FLAG_YUV,
+		DPU_FETCH_LINEAR, 3),
+
+	PLANAR_YUV_FMT(YVU420,
+		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C1_B_Cb, C2_R_Cr, C0_G_Y,
+		false, DPU_CHROMA_420, 1, DPU_FORMAT_FLAG_YUV,
+		DPU_FETCH_LINEAR, 3),
+};
+
+/*
+ * A5x tile formats tables:
+ * These tables hold the A5x tile formats supported.
+ */
+static const struct dpu_format dpu_format_map_tile[] = {
+	INTERLEAVED_RGB_FMT_TILED(BGR565,
+		0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+		false, 2, 0,
+		DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+	INTERLEAVED_RGB_FMT_TILED(ARGB8888,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+		true, 4, 0,
+		DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+	INTERLEAVED_RGB_FMT_TILED(ABGR8888,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+		true, 4, 0,
+		DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+	INTERLEAVED_RGB_FMT_TILED(XBGR8888,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		false, 4, 0,
+		DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+	INTERLEAVED_RGB_FMT_TILED(RGBA8888,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		true, 4, 0,
+		DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+	INTERLEAVED_RGB_FMT_TILED(BGRA8888,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+		true, 4, 0,
+		DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+	INTERLEAVED_RGB_FMT_TILED(BGRX8888,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+		false, 4, 0,
+		DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+	INTERLEAVED_RGB_FMT_TILED(XRGB8888,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+		false, 4, 0,
+		DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+	INTERLEAVED_RGB_FMT_TILED(RGBX8888,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		false, 4, 0,
+		DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+	INTERLEAVED_RGB_FMT_TILED(ABGR2101010,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		true, 4, DPU_FORMAT_FLAG_DX,
+		DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+	INTERLEAVED_RGB_FMT_TILED(XBGR2101010,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		true, 4, DPU_FORMAT_FLAG_DX,
+		DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+	PSEUDO_YUV_FMT_TILED(NV12,
+		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C1_B_Cb, C2_R_Cr,
+		DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV,
+		DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_NV12),
+
+	PSEUDO_YUV_FMT_TILED(NV21,
+		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C1_B_Cb,
+		DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV,
+		DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_NV12),
+};
+
+static const struct dpu_format dpu_format_map_p010_tile[] = {
+	PSEUDO_YUV_FMT_LOOSE_TILED(NV12,
+		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C1_B_Cb, C2_R_Cr,
+		DPU_CHROMA_420, (DPU_FORMAT_FLAG_YUV | DPU_FORMAT_FLAG_DX),
+		DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_NV12),
+};
+
+static const struct dpu_format dpu_format_map_tp10_tile[] = {
+	PSEUDO_YUV_FMT_TILED(NV12,
+		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C1_B_Cb, C2_R_Cr,
+		DPU_CHROMA_420, (DPU_FORMAT_FLAG_YUV | DPU_FORMAT_FLAG_DX),
+		DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_NV12),
+};
+
+/*
+ * UBWC formats table:
+ * This table holds the UBWC formats supported.
+ * If a compression ratio needs to be used for this or any other format,
+ * the data will be passed by user-space.
+ */
+static const struct dpu_format dpu_format_map_ubwc[] = {
+	INTERLEAVED_RGB_FMT_TILED(BGR565,
+		0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+		false, 2, DPU_FORMAT_FLAG_COMPRESSED,
+		DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+	INTERLEAVED_RGB_FMT_TILED(ABGR8888,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		true, 4, DPU_FORMAT_FLAG_COMPRESSED,
+		DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+	INTERLEAVED_RGB_FMT_TILED(XBGR8888,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		false, 4, DPU_FORMAT_FLAG_COMPRESSED,
+		DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+	INTERLEAVED_RGB_FMT_TILED(ABGR2101010,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		true, 4, DPU_FORMAT_FLAG_DX | DPU_FORMAT_FLAG_COMPRESSED,
+		DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+	INTERLEAVED_RGB_FMT_TILED(XBGR2101010,
+		COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+		true, 4, DPU_FORMAT_FLAG_DX | DPU_FORMAT_FLAG_COMPRESSED,
+		DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+	PSEUDO_YUV_FMT_TILED(NV12,
+		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C1_B_Cb, C2_R_Cr,
+		DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV |
+				DPU_FORMAT_FLAG_COMPRESSED,
+		DPU_FETCH_UBWC, 4, DPU_TILE_HEIGHT_NV12),
+};
+
+static const struct dpu_format dpu_format_map_p010[] = {
+	PSEUDO_YUV_FMT_LOOSE(NV12,
+		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C1_B_Cb, C2_R_Cr,
+		DPU_CHROMA_420, (DPU_FORMAT_FLAG_YUV | DPU_FORMAT_FLAG_DX),
+		DPU_FETCH_LINEAR, 2),
+};
+
+static const struct dpu_format dpu_format_map_p010_ubwc[] = {
+	PSEUDO_YUV_FMT_LOOSE_TILED(NV12,
+		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C1_B_Cb, C2_R_Cr,
+		DPU_CHROMA_420, (DPU_FORMAT_FLAG_YUV | DPU_FORMAT_FLAG_DX |
+				DPU_FORMAT_FLAG_COMPRESSED),
+		DPU_FETCH_UBWC, 4, DPU_TILE_HEIGHT_NV12),
+};
+
+static const struct dpu_format dpu_format_map_tp10_ubwc[] = {
+	PSEUDO_YUV_FMT_TILED(NV12,
+		0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+		C1_B_Cb, C2_R_Cr,
+		DPU_CHROMA_420, (DPU_FORMAT_FLAG_YUV | DPU_FORMAT_FLAG_DX |
+				DPU_FORMAT_FLAG_COMPRESSED),
+		DPU_FETCH_UBWC, 4, DPU_TILE_HEIGHT_NV12),
+};
+
+/* _dpu_get_v_h_subsample_rate - Get subsample rates for all formats we support
+ *   Note: Not using the drm_format_*_subsampling since we have formats
+ */
+static void _dpu_get_v_h_subsample_rate(
+	enum dpu_chroma_samp_type chroma_sample,
+	uint32_t *v_sample,
+	uint32_t *h_sample)
+{
+	if (!v_sample || !h_sample)
+		return;
+
+	switch (chroma_sample) {
+	case DPU_CHROMA_H2V1:
+		*v_sample = 1;
+		*h_sample = 2;
+		break;
+	case DPU_CHROMA_H1V2:
+		*v_sample = 2;
+		*h_sample = 1;
+		break;
+	case DPU_CHROMA_420:
+		*v_sample = 2;
+		*h_sample = 2;
+		break;
+	default:
+		*v_sample = 1;
+		*h_sample = 1;
+		break;
+	}
+}
+
+static int _dpu_format_get_media_color_ubwc(const struct dpu_format *fmt)
+{
+	static const struct dpu_media_color_map dpu_media_ubwc_map[] = {
+		{DRM_FORMAT_ABGR8888, COLOR_FMT_RGBA8888_UBWC},
+		{DRM_FORMAT_XBGR8888, COLOR_FMT_RGBA8888_UBWC},
+		{DRM_FORMAT_ABGR2101010, COLOR_FMT_RGBA1010102_UBWC},
+		{DRM_FORMAT_XBGR2101010, COLOR_FMT_RGBA1010102_UBWC},
+		{DRM_FORMAT_BGR565, COLOR_FMT_RGB565_UBWC},
+	};
+	int color_fmt = -1;
+	int i;
+
+	if (fmt->base.pixel_format == DRM_FORMAT_NV12) {
+		if (DPU_FORMAT_IS_DX(fmt)) {
+			if (fmt->unpack_tight)
+				color_fmt = COLOR_FMT_NV12_BPP10_UBWC;
+			else
+				color_fmt = COLOR_FMT_P010_UBWC;
+		} else
+			color_fmt = COLOR_FMT_NV12_UBWC;
+		return color_fmt;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(dpu_media_ubwc_map); ++i)
+		if (fmt->base.pixel_format == dpu_media_ubwc_map[i].format) {
+			color_fmt = dpu_media_ubwc_map[i].color;
+			break;
+		}
+	return color_fmt;
+}
+
+static int _dpu_format_get_plane_sizes_ubwc(
+		const struct dpu_format *fmt,
+		const uint32_t width,
+		const uint32_t height,
+		struct dpu_hw_fmt_layout *layout)
+{
+	int i;
+	int color;
+	bool meta = DPU_FORMAT_IS_UBWC(fmt);
+
+	memset(layout, 0, sizeof(struct dpu_hw_fmt_layout));
+	layout->format = fmt;
+	layout->width = width;
+	layout->height = height;
+	layout->num_planes = fmt->num_planes;
+
+	color = _dpu_format_get_media_color_ubwc(fmt);
+	if (color < 0) {
+		DRM_ERROR("UBWC format not supported for fmt: %4.4s\n",
+			(char *)&fmt->base.pixel_format);
+		return -EINVAL;
+	}
+
+	if (DPU_FORMAT_IS_YUV(layout->format)) {
+		uint32_t y_sclines, uv_sclines;
+		uint32_t y_meta_scanlines = 0;
+		uint32_t uv_meta_scanlines = 0;
+
+		layout->num_planes = 2;
+		layout->plane_pitch[0] = VENUS_Y_STRIDE(color, width);
+		y_sclines = VENUS_Y_SCANLINES(color, height);
+		layout->plane_size[0] = MSM_MEDIA_ALIGN(layout->plane_pitch[0] *
+			y_sclines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+
+		layout->plane_pitch[1] = VENUS_UV_STRIDE(color, width);
+		uv_sclines = VENUS_UV_SCANLINES(color, height);
+		layout->plane_size[1] = MSM_MEDIA_ALIGN(layout->plane_pitch[1] *
+			uv_sclines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+
+		if (!meta)
+			goto done;
+
+		layout->num_planes += 2;
+		layout->plane_pitch[2] = VENUS_Y_META_STRIDE(color, width);
+		y_meta_scanlines = VENUS_Y_META_SCANLINES(color, height);
+		layout->plane_size[2] = MSM_MEDIA_ALIGN(layout->plane_pitch[2] *
+			y_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+
+		layout->plane_pitch[3] = VENUS_UV_META_STRIDE(color, width);
+		uv_meta_scanlines = VENUS_UV_META_SCANLINES(color, height);
+		layout->plane_size[3] = MSM_MEDIA_ALIGN(layout->plane_pitch[3] *
+			uv_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+
+	} else {
+		uint32_t rgb_scanlines, rgb_meta_scanlines;
+
+		layout->num_planes = 1;
+
+		layout->plane_pitch[0] = VENUS_RGB_STRIDE(color, width);
+		rgb_scanlines = VENUS_RGB_SCANLINES(color, height);
+		layout->plane_size[0] = MSM_MEDIA_ALIGN(layout->plane_pitch[0] *
+			rgb_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+
+		if (!meta)
+			goto done;
+		layout->num_planes += 2;
+		layout->plane_pitch[2] = VENUS_RGB_META_STRIDE(color, width);
+		rgb_meta_scanlines = VENUS_RGB_META_SCANLINES(color, height);
+		layout->plane_size[2] = MSM_MEDIA_ALIGN(layout->plane_pitch[2] *
+			rgb_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+	}
+
+done:
+	for (i = 0; i < DPU_MAX_PLANES; i++)
+		layout->total_size += layout->plane_size[i];
+
+	return 0;
+}
+
+static int _dpu_format_get_plane_sizes_linear(
+		const struct dpu_format *fmt,
+		const uint32_t width,
+		const uint32_t height,
+		struct dpu_hw_fmt_layout *layout,
+		const uint32_t *pitches)
+{
+	int i;
+
+	memset(layout, 0, sizeof(struct dpu_hw_fmt_layout));
+	layout->format = fmt;
+	layout->width = width;
+	layout->height = height;
+	layout->num_planes = fmt->num_planes;
+
+	/* Due to memset above, only need to set planes of interest */
+	if (fmt->fetch_planes == DPU_PLANE_INTERLEAVED) {
+		layout->num_planes = 1;
+		layout->plane_size[0] = width * height * layout->format->bpp;
+		layout->plane_pitch[0] = width * layout->format->bpp;
+	} else {
+		uint32_t v_subsample, h_subsample;
+		uint32_t chroma_samp;
+		uint32_t bpp = 1;
+
+		chroma_samp = fmt->chroma_sample;
+		_dpu_get_v_h_subsample_rate(chroma_samp, &v_subsample,
+				&h_subsample);
+
+		if (width % h_subsample || height % v_subsample) {
+			DRM_ERROR("mismatch in subsample vs dimensions\n");
+			return -EINVAL;
+		}
+
+		if ((fmt->base.pixel_format == DRM_FORMAT_NV12) &&
+			(DPU_FORMAT_IS_DX(fmt)))
+			bpp = 2;
+		layout->plane_pitch[0] = width * bpp;
+		layout->plane_pitch[1] = layout->plane_pitch[0] / h_subsample;
+		layout->plane_size[0] = layout->plane_pitch[0] * height;
+		layout->plane_size[1] = layout->plane_pitch[1] *
+				(height / v_subsample);
+
+		if (fmt->fetch_planes == DPU_PLANE_PSEUDO_PLANAR) {
+			layout->num_planes = 2;
+			layout->plane_size[1] *= 2;
+			layout->plane_pitch[1] *= 2;
+		} else {
+			/* planar */
+			layout->num_planes = 3;
+			layout->plane_size[2] = layout->plane_size[1];
+			layout->plane_pitch[2] = layout->plane_pitch[1];
+		}
+	}
+
+	/*
+	 * linear format: allow user allocated pitches if they are greater than
+	 * the requirement.
+	 * ubwc format: pitch values are computed uniformly across
+	 * all the components based on ubwc specifications.
+	 */
+	for (i = 0; i < layout->num_planes && i < DPU_MAX_PLANES; ++i) {
+		if (pitches && layout->plane_pitch[i] < pitches[i])
+			layout->plane_pitch[i] = pitches[i];
+	}
+
+	for (i = 0; i < DPU_MAX_PLANES; i++)
+		layout->total_size += layout->plane_size[i];
+
+	return 0;
+}
+
+int dpu_format_get_plane_sizes(
+		const struct dpu_format *fmt,
+		const uint32_t w,
+		const uint32_t h,
+		struct dpu_hw_fmt_layout *layout,
+		const uint32_t *pitches)
+{
+	if (!layout || !fmt) {
+		DRM_ERROR("invalid pointer\n");
+		return -EINVAL;
+	}
+
+	if ((w > DPU_MAX_IMG_WIDTH) || (h > DPU_MAX_IMG_HEIGHT)) {
+		DRM_ERROR("image dimensions outside max range\n");
+		return -ERANGE;
+	}
+
+	if (DPU_FORMAT_IS_UBWC(fmt) || DPU_FORMAT_IS_TILE(fmt))
+		return _dpu_format_get_plane_sizes_ubwc(fmt, w, h, layout);
+
+	return _dpu_format_get_plane_sizes_linear(fmt, w, h, layout, pitches);
+}
+
+int dpu_format_get_block_size(const struct dpu_format *fmt,
+		uint32_t *w, uint32_t *h)
+{
+	if (!fmt || !w || !h) {
+		DRM_ERROR("invalid pointer\n");
+		return -EINVAL;
+	}
+
+	/* TP10 is 96x96 and all others are 128x128 */
+	if (DPU_FORMAT_IS_YUV(fmt) && DPU_FORMAT_IS_DX(fmt) &&
+			(fmt->num_planes == 2) && fmt->unpack_tight)
+		*w = *h = 96;
+	else
+		*w = *h = 128;
+
+	return 0;
+}
+
+uint32_t dpu_format_get_framebuffer_size(
+		const uint32_t format,
+		const uint32_t width,
+		const uint32_t height,
+		const uint32_t *pitches,
+		const uint64_t modifiers)
+{
+	const struct dpu_format *fmt;
+	struct dpu_hw_fmt_layout layout;
+
+	fmt = dpu_get_dpu_format_ext(format, modifiers);
+	if (!fmt)
+		return 0;
+
+	if (!pitches)
+		return -EINVAL;
+
+	if (dpu_format_get_plane_sizes(fmt, width, height, &layout, pitches))
+		layout.total_size = 0;
+
+	return layout.total_size;
+}
+
+static int _dpu_format_populate_addrs_ubwc(
+		struct msm_gem_address_space *aspace,
+		struct drm_framebuffer *fb,
+		struct dpu_hw_fmt_layout *layout)
+{
+	uint32_t base_addr = 0;
+	bool meta;
+
+	if (!fb || !layout) {
+		DRM_ERROR("invalid pointers\n");
+		return -EINVAL;
+	}
+
+	if (aspace)
+		base_addr = msm_framebuffer_iova(fb, aspace, 0);
+	if (!base_addr) {
+		DRM_ERROR("failed to retrieve base addr\n");
+		return -EFAULT;
+	}
+
+	meta = DPU_FORMAT_IS_UBWC(layout->format);
+
+	/* Per-format logic for verifying active planes */
+	if (DPU_FORMAT_IS_YUV(layout->format)) {
+		/************************************************/
+		/*      UBWC            **                      */
+		/*      buffer          **      DPU PLANE       */
+		/*      format          **                      */
+		/************************************************/
+		/* -------------------  ** -------------------- */
+		/* |      Y meta     |  ** |    Y bitstream   | */
+		/* |       data      |  ** |       plane      | */
+		/* -------------------  ** -------------------- */
+		/* |    Y bitstream  |  ** |  CbCr bitstream  | */
+		/* |       data      |  ** |       plane      | */
+		/* -------------------  ** -------------------- */
+		/* |   Cbcr metadata |  ** |       Y meta     | */
+		/* |       data      |  ** |       plane      | */
+		/* -------------------  ** -------------------- */
+		/* |  CbCr bitstream |  ** |     CbCr meta    | */
+		/* |       data      |  ** |       plane      | */
+		/* -------------------  ** -------------------- */
+		/************************************************/
+
+		/* configure Y bitstream plane */
+		layout->plane_addr[0] = base_addr + layout->plane_size[2];
+
+		/* configure CbCr bitstream plane */
+		layout->plane_addr[1] = base_addr + layout->plane_size[0]
+			+ layout->plane_size[2] + layout->plane_size[3];
+
+		if (!meta)
+			goto done;
+
+		/* configure Y metadata plane */
+		layout->plane_addr[2] = base_addr;
+
+		/* configure CbCr metadata plane */
+		layout->plane_addr[3] = base_addr + layout->plane_size[0]
+			+ layout->plane_size[2];
+
+	} else {
+		/************************************************/
+		/*      UBWC            **                      */
+		/*      buffer          **      DPU PLANE       */
+		/*      format          **                      */
+		/************************************************/
+		/* -------------------  ** -------------------- */
+		/* |      RGB meta   |  ** |   RGB bitstream  | */
+		/* |       data      |  ** |       plane      | */
+		/* -------------------  ** -------------------- */
+		/* |  RGB bitstream  |  ** |       NONE       | */
+		/* |       data      |  ** |                  | */
+		/* -------------------  ** -------------------- */
+		/*                      ** |     RGB meta     | */
+		/*                      ** |       plane      | */
+		/*                      ** -------------------- */
+		/************************************************/
+
+		layout->plane_addr[0] = base_addr + layout->plane_size[2];
+		layout->plane_addr[1] = 0;
+
+		if (!meta)
+			goto done;
+
+		layout->plane_addr[2] = base_addr;
+		layout->plane_addr[3] = 0;
+	}
+done:
+	return 0;
+}
+
+static int _dpu_format_populate_addrs_linear(
+		struct msm_gem_address_space *aspace,
+		struct drm_framebuffer *fb,
+		struct dpu_hw_fmt_layout *layout)
+{
+	unsigned int i;
+
+	/* Can now check the pitches given vs pitches expected */
+	for (i = 0; i < layout->num_planes; ++i) {
+		if (layout->plane_pitch[i] > fb->pitches[i]) {
+			DRM_ERROR("plane %u expected pitch %u, fb %u\n",
+				i, layout->plane_pitch[i], fb->pitches[i]);
+			return -EINVAL;
+		}
+	}
+
+	/* Populate addresses for simple formats here */
+	for (i = 0; i < layout->num_planes; ++i) {
+		if (aspace)
+			layout->plane_addr[i] =
+				msm_framebuffer_iova(fb, aspace, i);
+		if (!layout->plane_addr[i]) {
+			DRM_ERROR("failed to retrieve base addr\n");
+			return -EFAULT;
+		}
+	}
+
+	return 0;
+}
+
+int dpu_format_populate_layout(
+		struct msm_gem_address_space *aspace,
+		struct drm_framebuffer *fb,
+		struct dpu_hw_fmt_layout *layout)
+{
+	uint32_t plane_addr[DPU_MAX_PLANES];
+	int i, ret;
+
+	if (!fb || !layout) {
+		DRM_ERROR("invalid arguments\n");
+		return -EINVAL;
+	}
+
+	if ((fb->width > DPU_MAX_IMG_WIDTH) ||
+			(fb->height > DPU_MAX_IMG_HEIGHT)) {
+		DRM_ERROR("image dimensions outside max range\n");
+		return -ERANGE;
+	}
+
+	layout->format = to_dpu_format(msm_framebuffer_format(fb));
+
+	/* Populate the plane sizes etc via get_format */
+	ret = dpu_format_get_plane_sizes(layout->format, fb->width, fb->height,
+			layout, fb->pitches);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < DPU_MAX_PLANES; ++i)
+		plane_addr[i] = layout->plane_addr[i];
+
+	/* Populate the addresses given the fb */
+	if (DPU_FORMAT_IS_UBWC(layout->format) ||
+			DPU_FORMAT_IS_TILE(layout->format))
+		ret = _dpu_format_populate_addrs_ubwc(aspace, fb, layout);
+	else
+		ret = _dpu_format_populate_addrs_linear(aspace, fb, layout);
+
+	/* check if anything changed */
+	if (!ret && !memcmp(plane_addr, layout->plane_addr, sizeof(plane_addr)))
+		ret = -EAGAIN;
+
+	return ret;
+}
+
+int dpu_format_check_modified_format(
+		const struct msm_kms *kms,
+		const struct msm_format *msm_fmt,
+		const struct drm_mode_fb_cmd2 *cmd,
+		struct drm_gem_object **bos)
+{
+	int ret, i, num_base_fmt_planes;
+	const struct dpu_format *fmt;
+	struct dpu_hw_fmt_layout layout;
+	uint32_t bos_total_size = 0;
+
+	if (!msm_fmt || !cmd || !bos) {
+		DRM_ERROR("invalid arguments\n");
+		return -EINVAL;
+	}
+
+	fmt = to_dpu_format(msm_fmt);
+	num_base_fmt_planes = drm_format_num_planes(fmt->base.pixel_format);
+
+	ret = dpu_format_get_plane_sizes(fmt, cmd->width, cmd->height,
+			&layout, cmd->pitches);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < num_base_fmt_planes; i++) {
+		if (!bos[i]) {
+			DRM_ERROR("invalid handle for plane %d\n", i);
+			return -EINVAL;
+		}
+		if ((i == 0) || (bos[i] != bos[0]))
+			bos_total_size += bos[i]->size;
+	}
+
+	if (bos_total_size < layout.total_size) {
+		DRM_ERROR("buffers total size too small %u expected %u\n",
+				bos_total_size, layout.total_size);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+const struct dpu_format *dpu_get_dpu_format_ext(
+		const uint32_t format,
+		const uint64_t modifier)
+{
+	uint32_t i = 0;
+	const struct dpu_format *fmt = NULL;
+	const struct dpu_format *map = NULL;
+	ssize_t map_size = 0;
+
+	/*
+	 * Currently only support exactly zero or one modifier.
+	 * All planes use the same modifier.
+	 */
+	DPU_DEBUG("plane format modifier 0x%llX\n", modifier);
+
+	switch (modifier) {
+	case 0:
+		map = dpu_format_map;
+		map_size = ARRAY_SIZE(dpu_format_map);
+		break;
+	case DRM_FORMAT_MOD_QCOM_COMPRESSED:
+	case DRM_FORMAT_MOD_QCOM_COMPRESSED | DRM_FORMAT_MOD_QCOM_TILE:
+		map = dpu_format_map_ubwc;
+		map_size = ARRAY_SIZE(dpu_format_map_ubwc);
+		DPU_DEBUG("found fmt: %4.4s  DRM_FORMAT_MOD_QCOM_COMPRESSED\n",
+				(char *)&format);
+		break;
+	case DRM_FORMAT_MOD_QCOM_DX:
+		map = dpu_format_map_p010;
+		map_size = ARRAY_SIZE(dpu_format_map_p010);
+		DPU_DEBUG("found fmt: %4.4s DRM_FORMAT_MOD_QCOM_DX\n",
+				(char *)&format);
+		break;
+	case (DRM_FORMAT_MOD_QCOM_DX | DRM_FORMAT_MOD_QCOM_COMPRESSED):
+	case (DRM_FORMAT_MOD_QCOM_DX | DRM_FORMAT_MOD_QCOM_COMPRESSED |
+			DRM_FORMAT_MOD_QCOM_TILE):
+		map = dpu_format_map_p010_ubwc;
+		map_size = ARRAY_SIZE(dpu_format_map_p010_ubwc);
+		DPU_DEBUG(
+			"found fmt: %4.4s DRM_FORMAT_MOD_QCOM_COMPRESSED/DX\n",
+				(char *)&format);
+		break;
+	case (DRM_FORMAT_MOD_QCOM_DX | DRM_FORMAT_MOD_QCOM_COMPRESSED |
+		DRM_FORMAT_MOD_QCOM_TIGHT):
+	case (DRM_FORMAT_MOD_QCOM_DX | DRM_FORMAT_MOD_QCOM_COMPRESSED |
+		DRM_FORMAT_MOD_QCOM_TIGHT | DRM_FORMAT_MOD_QCOM_TILE):
+		map = dpu_format_map_tp10_ubwc;
+		map_size = ARRAY_SIZE(dpu_format_map_tp10_ubwc);
+		DPU_DEBUG(
+			"found fmt: %4.4s DRM_FORMAT_MOD_QCOM_COMPRESSED/DX/TIGHT\n",
+				(char *)&format);
+		break;
+	case DRM_FORMAT_MOD_QCOM_TILE:
+		map = dpu_format_map_tile;
+		map_size = ARRAY_SIZE(dpu_format_map_tile);
+		DPU_DEBUG("found fmt: %4.4s DRM_FORMAT_MOD_QCOM_TILE\n",
+				(char *)&format);
+		break;
+	case (DRM_FORMAT_MOD_QCOM_TILE | DRM_FORMAT_MOD_QCOM_DX):
+		map = dpu_format_map_p010_tile;
+		map_size = ARRAY_SIZE(dpu_format_map_p010_tile);
+		DPU_DEBUG("found fmt: %4.4s DRM_FORMAT_MOD_QCOM_TILE/DX\n",
+				(char *)&format);
+		break;
+	case (DRM_FORMAT_MOD_QCOM_TILE | DRM_FORMAT_MOD_QCOM_DX |
+			DRM_FORMAT_MOD_QCOM_TIGHT):
+		map = dpu_format_map_tp10_tile;
+		map_size = ARRAY_SIZE(dpu_format_map_tp10_tile);
+		DPU_DEBUG(
+			"found fmt: %4.4s DRM_FORMAT_MOD_QCOM_TILE/DX/TIGHT\n",
+				(char *)&format);
+		break;
+	default:
+		DPU_ERROR("unsupported format modifier %llX\n", modifier);
+		return NULL;
+	}
+
+	for (i = 0; i < map_size; i++) {
+		if (format == map[i].base.pixel_format) {
+			fmt = &map[i];
+			break;
+		}
+	}
+
+	if (fmt == NULL)
+		DPU_ERROR("unsupported fmt: %4.4s modifier 0x%llX\n",
+			(char *)&format, modifier);
+	else
+		DPU_DEBUG("fmt %4.4s mod 0x%llX ubwc %d yuv %d\n",
+				(char *)&format, modifier,
+				DPU_FORMAT_IS_UBWC(fmt),
+				DPU_FORMAT_IS_YUV(fmt));
+
+	return fmt;
+}
+
+const struct msm_format *dpu_get_msm_format(
+		struct msm_kms *kms,
+		const uint32_t format,
+		const uint64_t modifiers)
+{
+	const struct dpu_format *fmt = dpu_get_dpu_format_ext(format,
+			modifiers);
+	if (fmt)
+		return &fmt->base;
+	return NULL;
+}
+
+uint32_t dpu_populate_formats(
+		const struct dpu_format_extended *format_list,
+		uint32_t *pixel_formats,
+		uint64_t *pixel_modifiers,
+		uint32_t pixel_formats_max)
+{
+	uint32_t i, fourcc_format;
+
+	if (!format_list || !pixel_formats)
+		return 0;
+
+	for (i = 0, fourcc_format = 0;
+			format_list->fourcc_format && i < pixel_formats_max;
+			++format_list) {
+		/* verify if listed format is in dpu_format_map? */
+
+		/* optionally return modified formats */
+		if (pixel_modifiers) {
+			/* assume same modifier for all fb planes */
+			pixel_formats[i] = format_list->fourcc_format;
+			pixel_modifiers[i++] = format_list->modifier;
+		} else {
+			/* assume base formats grouped together */
+			if (fourcc_format != format_list->fourcc_format) {
+				fourcc_format = format_list->fourcc_format;
+				pixel_formats[i++] = fourcc_format;
+			}
+		}
+	}
+
+	return i;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
new file mode 100644
index 000000000000..b55bfd13e296
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
@@ -0,0 +1,136 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_FORMATS_H
+#define _DPU_FORMATS_H
+
+#include <drm/drm_fourcc.h>
+#include "msm_gem.h"
+#include "dpu_hw_mdss.h"
+
+/**
+ * dpu_get_dpu_format_ext() - Returns dpu format structure pointer.
+ * @format:          DRM FourCC Code
+ * @modifiers:       format modifier array from client, one per plane
+ */
+const struct dpu_format *dpu_get_dpu_format_ext(
+		const uint32_t format,
+		const uint64_t modifier);
+
+#define dpu_get_dpu_format(f) dpu_get_dpu_format_ext(f, 0)
+
+/**
+ * dpu_get_msm_format - get an dpu_format by its msm_format base
+ *                     callback function registers with the msm_kms layer
+ * @kms:             kms driver
+ * @format:          DRM FourCC Code
+ * @modifiers:       data layout modifier
+ */
+const struct msm_format *dpu_get_msm_format(
+		struct msm_kms *kms,
+		const uint32_t format,
+		const uint64_t modifiers);
+
+/**
+ * dpu_populate_formats - populate the given array with fourcc codes supported
+ * @format_list:       pointer to list of possible formats
+ * @pixel_formats:     array to populate with fourcc codes
+ * @pixel_modifiers:   array to populate with drm modifiers, can be NULL
+ * @pixel_formats_max: length of pixel formats array
+ * Return: number of elements populated
+ */
+uint32_t dpu_populate_formats(
+		const struct dpu_format_extended *format_list,
+		uint32_t *pixel_formats,
+		uint64_t *pixel_modifiers,
+		uint32_t pixel_formats_max);
+
+/**
+ * dpu_format_get_plane_sizes - calculate size and layout of given buffer format
+ * @fmt:             pointer to dpu_format
+ * @w:               width of the buffer
+ * @h:               height of the buffer
+ * @layout:          layout of the buffer
+ * @pitches:         array of size [DPU_MAX_PLANES] to populate
+ *		     pitch for each plane
+ *
+ * Return: size of the buffer
+ */
+int dpu_format_get_plane_sizes(
+		const struct dpu_format *fmt,
+		const uint32_t w,
+		const uint32_t h,
+		struct dpu_hw_fmt_layout *layout,
+		const uint32_t *pitches);
+
+/**
+ * dpu_format_get_block_size - get block size of given format when
+ *	operating in block mode
+ * @fmt:             pointer to dpu_format
+ * @w:               pointer to width of the block
+ * @h:               pointer to height of the block
+ *
+ * Return: 0 if success; error oode otherwise
+ */
+int dpu_format_get_block_size(const struct dpu_format *fmt,
+		uint32_t *w, uint32_t *h);
+
+/**
+ * dpu_format_check_modified_format - validate format and buffers for
+ *                   dpu non-standard, i.e. modified format
+ * @kms:             kms driver
+ * @msm_fmt:         pointer to the msm_fmt base pointer of an dpu_format
+ * @cmd:             fb_cmd2 structure user request
+ * @bos:             gem buffer object list
+ *
+ * Return: error code on failure, 0 on success
+ */
+int dpu_format_check_modified_format(
+		const struct msm_kms *kms,
+		const struct msm_format *msm_fmt,
+		const struct drm_mode_fb_cmd2 *cmd,
+		struct drm_gem_object **bos);
+
+/**
+ * dpu_format_populate_layout - populate the given format layout based on
+ *                     mmu, fb, and format found in the fb
+ * @aspace:            address space pointer
+ * @fb:                framebuffer pointer
+ * @fmtl:              format layout structure to populate
+ *
+ * Return: error code on failure, -EAGAIN if success but the addresses
+ *         are the same as before or 0 if new addresses were populated
+ */
+int dpu_format_populate_layout(
+		struct msm_gem_address_space *aspace,
+		struct drm_framebuffer *fb,
+		struct dpu_hw_fmt_layout *fmtl);
+
+/**
+ * dpu_format_get_framebuffer_size - get framebuffer memory size
+ * @format:            DRM pixel format
+ * @width:             pixel width
+ * @height:            pixel height
+ * @pitches:           array of size [DPU_MAX_PLANES] to populate
+ *		       pitch for each plane
+ * @modifiers:         drm modifier
+ *
+ * Return: memory size required for frame buffer
+ */
+uint32_t dpu_format_get_framebuffer_size(
+		const uint32_t format,
+		const uint32_t width,
+		const uint32_t height,
+		const uint32_t *pitches,
+		const uint64_t modifiers);
+
+#endif /*_DPU_FORMATS_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c
new file mode 100644
index 000000000000..58d29e43faef
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c
@@ -0,0 +1,155 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/mutex.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_blk.h"
+
+/* Serialization lock for dpu_hw_blk_list */
+static DEFINE_MUTEX(dpu_hw_blk_lock);
+
+/* List of all hw block objects */
+static LIST_HEAD(dpu_hw_blk_list);
+
+/**
+ * dpu_hw_blk_init - initialize hw block object
+ * @type: hw block type - enum dpu_hw_blk_type
+ * @id: instance id of the hw block
+ * @ops: Pointer to block operations
+ * return: 0 if success; error code otherwise
+ */
+int dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
+		struct dpu_hw_blk_ops *ops)
+{
+	if (!hw_blk) {
+		pr_err("invalid parameters\n");
+		return -EINVAL;
+	}
+
+	INIT_LIST_HEAD(&hw_blk->list);
+	hw_blk->type = type;
+	hw_blk->id = id;
+	atomic_set(&hw_blk->refcount, 0);
+
+	if (ops)
+		hw_blk->ops = *ops;
+
+	mutex_lock(&dpu_hw_blk_lock);
+	list_add(&hw_blk->list, &dpu_hw_blk_list);
+	mutex_unlock(&dpu_hw_blk_lock);
+
+	return 0;
+}
+
+/**
+ * dpu_hw_blk_destroy - destroy hw block object.
+ * @hw_blk:  pointer to hw block object
+ * return: none
+ */
+void dpu_hw_blk_destroy(struct dpu_hw_blk *hw_blk)
+{
+	if (!hw_blk) {
+		pr_err("invalid parameters\n");
+		return;
+	}
+
+	if (atomic_read(&hw_blk->refcount))
+		pr_err("hw_blk:%d.%d invalid refcount\n", hw_blk->type,
+				hw_blk->id);
+
+	mutex_lock(&dpu_hw_blk_lock);
+	list_del(&hw_blk->list);
+	mutex_unlock(&dpu_hw_blk_lock);
+}
+
+/**
+ * dpu_hw_blk_get - get hw_blk from free pool
+ * @hw_blk: if specified, increment reference count only
+ * @type: if hw_blk is not specified, allocate the next available of this type
+ * @id: if specified (>= 0), allocate the given instance of the above type
+ * return: pointer to hw block object
+ */
+struct dpu_hw_blk *dpu_hw_blk_get(struct dpu_hw_blk *hw_blk, u32 type, int id)
+{
+	struct dpu_hw_blk *curr;
+	int rc, refcount;
+
+	if (!hw_blk) {
+		mutex_lock(&dpu_hw_blk_lock);
+		list_for_each_entry(curr, &dpu_hw_blk_list, list) {
+			if ((curr->type != type) ||
+					(id >= 0 && curr->id != id) ||
+					(id < 0 &&
+						atomic_read(&curr->refcount)))
+				continue;
+
+			hw_blk = curr;
+			break;
+		}
+		mutex_unlock(&dpu_hw_blk_lock);
+	}
+
+	if (!hw_blk) {
+		pr_debug("no hw_blk:%d\n", type);
+		return NULL;
+	}
+
+	refcount = atomic_inc_return(&hw_blk->refcount);
+
+	if (refcount == 1 && hw_blk->ops.start) {
+		rc = hw_blk->ops.start(hw_blk);
+		if (rc) {
+			pr_err("failed to start  hw_blk:%d rc:%d\n", type, rc);
+			goto error_start;
+		}
+	}
+
+	pr_debug("hw_blk:%d.%d refcount:%d\n", hw_blk->type,
+			hw_blk->id, refcount);
+	return hw_blk;
+
+error_start:
+	dpu_hw_blk_put(hw_blk);
+	return ERR_PTR(rc);
+}
+
+/**
+ * dpu_hw_blk_put - put hw_blk to free pool if decremented refcount is zero
+ * @hw_blk: hw block to be freed
+ * @free_blk: function to be called when reference count goes to zero
+ */
+void dpu_hw_blk_put(struct dpu_hw_blk *hw_blk)
+{
+	if (!hw_blk) {
+		pr_err("invalid parameters\n");
+		return;
+	}
+
+	pr_debug("hw_blk:%d.%d refcount:%d\n", hw_blk->type, hw_blk->id,
+			atomic_read(&hw_blk->refcount));
+
+	if (!atomic_read(&hw_blk->refcount)) {
+		pr_err("hw_blk:%d.%d invalid put\n", hw_blk->type, hw_blk->id);
+		return;
+	}
+
+	if (atomic_dec_return(&hw_blk->refcount))
+		return;
+
+	if (hw_blk->ops.stop)
+		hw_blk->ops.stop(hw_blk);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h
new file mode 100644
index 000000000000..0f4ca8af1ec5
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h
@@ -0,0 +1,53 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_BLK_H
+#define _DPU_HW_BLK_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/atomic.h>
+
+struct dpu_hw_blk;
+
+/**
+ * struct dpu_hw_blk_ops - common hardware block operations
+ * @start: start operation on first get
+ * @stop: stop operation on last put
+ */
+struct dpu_hw_blk_ops {
+	int (*start)(struct dpu_hw_blk *);
+	void (*stop)(struct dpu_hw_blk *);
+};
+
+/**
+ * struct dpu_hw_blk - definition of hardware block object
+ * @list: list of hardware blocks
+ * @type: hardware block type
+ * @id: instance id
+ * @refcount: reference/usage count
+ */
+struct dpu_hw_blk {
+	struct list_head list;
+	u32 type;
+	int id;
+	atomic_t refcount;
+	struct dpu_hw_blk_ops ops;
+};
+
+int dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
+		struct dpu_hw_blk_ops *ops);
+void dpu_hw_blk_destroy(struct dpu_hw_blk *hw_blk);
+
+struct dpu_hw_blk *dpu_hw_blk_get(struct dpu_hw_blk *hw_blk, u32 type, int id);
+void dpu_hw_blk_put(struct dpu_hw_blk *hw_blk);
+#endif /*_DPU_HW_BLK_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
new file mode 100644
index 000000000000..1793cfd29a07
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -0,0 +1,511 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/slab.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_catalog_format.h"
+#include "dpu_kms.h"
+
+#define VIG_SDM845_MASK \
+	(BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_SCALER_QSEED3) | BIT(DPU_SSPP_QOS) |\
+	BIT(DPU_SSPP_CSC_10BIT) | BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_QOS_8LVL) |\
+	BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_EXCL_RECT))
+
+#define DMA_SDM845_MASK \
+	(BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_QOS_8LVL) |\
+	BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\
+	BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_EXCL_RECT))
+
+#define MIXER_SDM845_MASK \
+	(BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER))
+
+#define PINGPONG_SDM845_MASK BIT(DPU_PINGPONG_DITHER)
+
+#define PINGPONG_SDM845_SPLIT_MASK \
+	(PINGPONG_SDM845_MASK | BIT(DPU_PINGPONG_TE2))
+
+#define DEFAULT_PIXEL_RAM_SIZE		(50 * 1024)
+#define DEFAULT_DPU_LINE_WIDTH		2048
+#define DEFAULT_DPU_OUTPUT_LINE_WIDTH	2560
+
+#define MAX_HORZ_DECIMATION	4
+#define MAX_VERT_DECIMATION	4
+
+#define MAX_UPSCALE_RATIO	20
+#define MAX_DOWNSCALE_RATIO	4
+#define SSPP_UNITY_SCALE	1
+
+#define STRCAT(X, Y) (X Y)
+
+/*************************************************************
+ * DPU sub blocks config
+ *************************************************************/
+/* DPU top level caps */
+static const struct dpu_caps sdm845_dpu_caps = {
+	.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+	.max_mixer_blendstages = 0xb,
+	.qseed_type = DPU_SSPP_SCALER_QSEED3,
+	.smart_dma_rev = DPU_SSPP_SMART_DMA_V2,
+	.ubwc_version = DPU_HW_UBWC_VER_20,
+	.has_src_split = true,
+	.has_dim_layer = true,
+	.has_idle_pc = true,
+};
+
+static struct dpu_mdp_cfg sdm845_mdp[] = {
+	{
+	.name = "top_0", .id = MDP_TOP,
+	.base = 0x0, .len = 0x45C,
+	.features = 0,
+	.highest_bank_bit = 0x2,
+	.has_dest_scaler = true,
+	.clk_ctrls[DPU_CLK_CTRL_VIG0] = {
+			.reg_off = 0x2AC, .bit_off = 0},
+	.clk_ctrls[DPU_CLK_CTRL_VIG1] = {
+			.reg_off = 0x2B4, .bit_off = 0},
+	.clk_ctrls[DPU_CLK_CTRL_VIG2] = {
+			.reg_off = 0x2BC, .bit_off = 0},
+	.clk_ctrls[DPU_CLK_CTRL_VIG3] = {
+			.reg_off = 0x2C4, .bit_off = 0},
+	.clk_ctrls[DPU_CLK_CTRL_DMA0] = {
+			.reg_off = 0x2AC, .bit_off = 8},
+	.clk_ctrls[DPU_CLK_CTRL_DMA1] = {
+			.reg_off = 0x2B4, .bit_off = 8},
+	.clk_ctrls[DPU_CLK_CTRL_CURSOR0] = {
+			.reg_off = 0x2BC, .bit_off = 8},
+	.clk_ctrls[DPU_CLK_CTRL_CURSOR1] = {
+			.reg_off = 0x2C4, .bit_off = 8},
+	},
+};
+
+/*************************************************************
+ * CTL sub blocks config
+ *************************************************************/
+static struct dpu_ctl_cfg sdm845_ctl[] = {
+	{
+	.name = "ctl_0", .id = CTL_0,
+	.base = 0x1000, .len = 0xE4,
+	.features = BIT(DPU_CTL_SPLIT_DISPLAY)
+	},
+	{
+	.name = "ctl_1", .id = CTL_1,
+	.base = 0x1200, .len = 0xE4,
+	.features = BIT(DPU_CTL_SPLIT_DISPLAY)
+	},
+	{
+	.name = "ctl_2", .id = CTL_2,
+	.base = 0x1400, .len = 0xE4,
+	.features = 0
+	},
+	{
+	.name = "ctl_3", .id = CTL_3,
+	.base = 0x1600, .len = 0xE4,
+	.features = 0
+	},
+	{
+	.name = "ctl_4", .id = CTL_4,
+	.base = 0x1800, .len = 0xE4,
+	.features = 0
+	},
+};
+
+/*************************************************************
+ * SSPP sub blocks config
+ *************************************************************/
+
+/* SSPP common configuration */
+static const struct dpu_sspp_blks_common sdm845_sspp_common = {
+	.maxlinewidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+	.pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+	.maxhdeciexp = MAX_HORZ_DECIMATION,
+	.maxvdeciexp = MAX_VERT_DECIMATION,
+};
+
+#define _VIG_SBLK(num, sdma_pri) \
+	{ \
+	.common = &sdm845_sspp_common, \
+	.maxdwnscale = MAX_DOWNSCALE_RATIO, \
+	.maxupscale = MAX_UPSCALE_RATIO, \
+	.smart_dma_priority = sdma_pri, \
+	.src_blk = {.name = STRCAT("sspp_src_", num), \
+		.id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \
+	.scaler_blk = {.name = STRCAT("sspp_scaler", num), \
+		.id = DPU_SSPP_SCALER_QSEED3, \
+		.base = 0xa00, .len = 0xa0,}, \
+	.csc_blk = {.name = STRCAT("sspp_csc", num), \
+		.id = DPU_SSPP_CSC_10BIT, \
+		.base = 0x1a00, .len = 0x100,}, \
+	.format_list = plane_formats_yuv, \
+	.virt_format_list = plane_formats, \
+	}
+
+#define _DMA_SBLK(num, sdma_pri) \
+	{ \
+	.common = &sdm845_sspp_common, \
+	.maxdwnscale = SSPP_UNITY_SCALE, \
+	.maxupscale = SSPP_UNITY_SCALE, \
+	.smart_dma_priority = sdma_pri, \
+	.src_blk = {.name = STRCAT("sspp_src_", num), \
+		.id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \
+	.format_list = plane_formats, \
+	.virt_format_list = plane_formats, \
+	}
+
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_0 = _VIG_SBLK("0", 5);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_1 = _VIG_SBLK("1", 6);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_2 = _VIG_SBLK("2", 7);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_3 = _VIG_SBLK("3", 8);
+
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_0 = _DMA_SBLK("8", 1);
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_1 = _DMA_SBLK("9", 2);
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_2 = _DMA_SBLK("10", 3);
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_3 = _DMA_SBLK("11", 4);
+
+#define SSPP_VIG_BLK(_name, _id, _base, _sblk, _xinid, _clkctrl) \
+	{ \
+	.name = _name, .id = _id, \
+	.base = _base, .len = 0x1c8, \
+	.features = VIG_SDM845_MASK, \
+	.sblk = &_sblk, \
+	.xin_id = _xinid, \
+	.type = SSPP_TYPE_VIG, \
+	.clk_ctrl = _clkctrl \
+	}
+
+#define SSPP_DMA_BLK(_name, _id, _base, _sblk, _xinid, _clkctrl) \
+	{ \
+	.name = _name, .id = _id, \
+	.base = _base, .len = 0x1c8, \
+	.features = DMA_SDM845_MASK, \
+	.sblk = &_sblk, \
+	.xin_id = _xinid, \
+	.type = SSPP_TYPE_DMA, \
+	.clk_ctrl = _clkctrl \
+	}
+
+static struct dpu_sspp_cfg sdm845_sspp[] = {
+	SSPP_VIG_BLK("sspp_0", SSPP_VIG0, 0x4000,
+		sdm845_vig_sblk_0, 0, DPU_CLK_CTRL_VIG0),
+	SSPP_VIG_BLK("sspp_1", SSPP_VIG1, 0x6000,
+		sdm845_vig_sblk_1, 4, DPU_CLK_CTRL_VIG1),
+	SSPP_VIG_BLK("sspp_2", SSPP_VIG2, 0x8000,
+		sdm845_vig_sblk_2, 8, DPU_CLK_CTRL_VIG2),
+	SSPP_VIG_BLK("sspp_3", SSPP_VIG3, 0xa000,
+		sdm845_vig_sblk_3, 12, DPU_CLK_CTRL_VIG3),
+	SSPP_DMA_BLK("sspp_8", SSPP_DMA0, 0x24000,
+		sdm845_dma_sblk_0, 1, DPU_CLK_CTRL_DMA0),
+	SSPP_DMA_BLK("sspp_9", SSPP_DMA1, 0x26000,
+		sdm845_dma_sblk_1, 5, DPU_CLK_CTRL_DMA1),
+	SSPP_DMA_BLK("sspp_10", SSPP_DMA2, 0x28000,
+		sdm845_dma_sblk_2, 9, DPU_CLK_CTRL_CURSOR0),
+	SSPP_DMA_BLK("sspp_11", SSPP_DMA3, 0x2a000,
+		sdm845_dma_sblk_3, 13, DPU_CLK_CTRL_CURSOR1),
+};
+
+/*************************************************************
+ * MIXER sub blocks config
+ *************************************************************/
+static const struct dpu_lm_sub_blks sdm845_lm_sblk = {
+	.maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+	.maxblendstages = 11, /* excluding base layer */
+	.blendstage_base = { /* offsets relative to mixer base */
+		0x20, 0x38, 0x50, 0x68, 0x80, 0x98,
+		0xb0, 0xc8, 0xe0, 0xf8, 0x110
+	},
+};
+
+#define LM_BLK(_name, _id, _base, _ds, _pp, _lmpair) \
+	{ \
+	.name = _name, .id = _id, \
+	.base = _base, .len = 0x320, \
+	.features = MIXER_SDM845_MASK, \
+	.sblk = &sdm845_lm_sblk, \
+	.ds = _ds, \
+	.pingpong = _pp, \
+	.lm_pair_mask = (1 << _lmpair) \
+	}
+
+static struct dpu_lm_cfg sdm845_lm[] = {
+	LM_BLK("lm_0", LM_0, 0x44000, DS_0, PINGPONG_0, LM_1),
+	LM_BLK("lm_1", LM_1, 0x45000, DS_1, PINGPONG_1, LM_0),
+	LM_BLK("lm_2", LM_2, 0x46000, DS_MAX, PINGPONG_2, LM_5),
+	LM_BLK("lm_3", LM_3, 0x0, DS_MAX, PINGPONG_MAX, 0),
+	LM_BLK("lm_4", LM_4, 0x0, DS_MAX, PINGPONG_MAX, 0),
+	LM_BLK("lm_5", LM_5, 0x49000, DS_MAX, PINGPONG_3, LM_2),
+};
+
+/*************************************************************
+ * DS sub blocks config
+ *************************************************************/
+static const struct dpu_ds_top_cfg sdm845_ds_top = {
+	.name = "ds_top_0", .id = DS_TOP,
+	.base = 0x60000, .len = 0xc,
+	.maxinputwidth = DEFAULT_DPU_LINE_WIDTH,
+	.maxoutputwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+	.maxupscale = MAX_UPSCALE_RATIO,
+};
+
+#define DS_BLK(_name, _id, _base) \
+	{\
+	.name = _name, .id = _id, \
+	.base = _base, .len = 0x800, \
+	.features = DPU_SSPP_SCALER_QSEED3, \
+	.top = &sdm845_ds_top \
+	}
+
+static struct dpu_ds_cfg sdm845_ds[] = {
+	DS_BLK("ds_0", DS_0, 0x800),
+	DS_BLK("ds_1", DS_1, 0x1000),
+};
+
+/*************************************************************
+ * PINGPONG sub blocks config
+ *************************************************************/
+static const struct dpu_pingpong_sub_blks sdm845_pp_sblk_te = {
+	.te2 = {.id = DPU_PINGPONG_TE2, .base = 0x2000, .len = 0x0,
+		.version = 0x1},
+	.dither = {.id = DPU_PINGPONG_DITHER, .base = 0x30e0,
+		.len = 0x20, .version = 0x10000},
+};
+
+static const struct dpu_pingpong_sub_blks sdm845_pp_sblk = {
+	.dither = {.id = DPU_PINGPONG_DITHER, .base = 0x30e0,
+		.len = 0x20, .version = 0x10000},
+};
+
+#define PP_BLK_TE(_name, _id, _base) \
+	{\
+	.name = _name, .id = _id, \
+	.base = _base, .len = 0xd4, \
+	.features = PINGPONG_SDM845_SPLIT_MASK, \
+	.sblk = &sdm845_pp_sblk_te \
+	}
+#define PP_BLK(_name, _id, _base) \
+	{\
+	.name = _name, .id = _id, \
+	.base = _base, .len = 0xd4, \
+	.features = PINGPONG_SDM845_MASK, \
+	.sblk = &sdm845_pp_sblk \
+	}
+
+static struct dpu_pingpong_cfg sdm845_pp[] = {
+	PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000),
+	PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800),
+	PP_BLK("pingpong_2", PINGPONG_2, 0x71000),
+	PP_BLK("pingpong_3", PINGPONG_3, 0x71800),
+};
+
+/*************************************************************
+ * INTF sub blocks config
+ *************************************************************/
+#define INTF_BLK(_name, _id, _base, _type, _ctrl_id) \
+	{\
+	.name = _name, .id = _id, \
+	.base = _base, .len = 0x280, \
+	.type = _type, \
+	.controller_id = _ctrl_id, \
+	.prog_fetch_lines_worst_case = 24 \
+	}
+
+static struct dpu_intf_cfg sdm845_intf[] = {
+	INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0),
+	INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0),
+	INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1),
+	INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1),
+};
+
+/*************************************************************
+ * CDM sub blocks config
+ *************************************************************/
+static struct dpu_cdm_cfg sdm845_cdm[] = {
+	{
+	.name = "cdm_0", .id = CDM_0,
+	.base = 0x79200, .len = 0x224,
+	.features = 0,
+	.intf_connect = BIT(INTF_3),
+	},
+};
+
+/*************************************************************
+ * VBIF sub blocks config
+ *************************************************************/
+/* VBIF QOS remap */
+static u32 sdm845_rt_pri_lvl[] = {3, 3, 4, 4, 5, 5, 6, 6};
+static u32 sdm845_nrt_pri_lvl[] = {3, 3, 3, 3, 3, 3, 3, 3};
+
+static struct dpu_vbif_cfg sdm845_vbif[] = {
+	{
+	.name = "vbif_0", .id = VBIF_0,
+	.base = 0, .len = 0x1040,
+	.features = BIT(DPU_VBIF_QOS_REMAP),
+	.xin_halt_timeout = 0x4000,
+	.qos_rt_tbl = {
+		.npriority_lvl = ARRAY_SIZE(sdm845_rt_pri_lvl),
+		.priority_lvl = sdm845_rt_pri_lvl,
+		},
+	.qos_nrt_tbl = {
+		.npriority_lvl = ARRAY_SIZE(sdm845_nrt_pri_lvl),
+		.priority_lvl = sdm845_nrt_pri_lvl,
+		},
+	.memtype_count = 14,
+	.memtype = {3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3},
+	},
+};
+
+static struct dpu_reg_dma_cfg sdm845_regdma = {
+	.base = 0x0, .version = 0x1, .trigger_sel_off = 0x119c
+};
+
+/*************************************************************
+ * PERF data config
+ *************************************************************/
+
+/* SSPP QOS LUTs */
+static struct dpu_qos_lut_entry sdm845_qos_linear[] = {
+	{.fl = 4, .lut = 0x357},
+	{.fl = 5, .lut = 0x3357},
+	{.fl = 6, .lut = 0x23357},
+	{.fl = 7, .lut = 0x223357},
+	{.fl = 8, .lut = 0x2223357},
+	{.fl = 9, .lut = 0x22223357},
+	{.fl = 10, .lut = 0x222223357},
+	{.fl = 11, .lut = 0x2222223357},
+	{.fl = 12, .lut = 0x22222223357},
+	{.fl = 13, .lut = 0x222222223357},
+	{.fl = 14, .lut = 0x1222222223357},
+	{.fl = 0, .lut = 0x11222222223357}
+};
+
+static struct dpu_qos_lut_entry sdm845_qos_macrotile[] = {
+	{.fl = 10, .lut = 0x344556677},
+	{.fl = 11, .lut = 0x3344556677},
+	{.fl = 12, .lut = 0x23344556677},
+	{.fl = 13, .lut = 0x223344556677},
+	{.fl = 14, .lut = 0x1223344556677},
+	{.fl = 0, .lut = 0x112233344556677},
+};
+
+static struct dpu_qos_lut_entry sdm845_qos_nrt[] = {
+	{.fl = 0, .lut = 0x0},
+};
+
+static struct dpu_perf_cfg sdm845_perf_data = {
+	.max_bw_low = 6800000,
+	.max_bw_high = 6800000,
+	.min_core_ib = 2400000,
+	.min_llcc_ib = 800000,
+	.min_dram_ib = 800000,
+	.core_ib_ff = "6.0",
+	.core_clk_ff = "1.0",
+	.comp_ratio_rt =
+	"NV12/5/1/1.23 AB24/5/1/1.23 XB24/5/1/1.23",
+	.comp_ratio_nrt =
+	"NV12/5/1/1.25 AB24/5/1/1.25 XB24/5/1/1.25",
+	.undersized_prefill_lines = 2,
+	.xtra_prefill_lines = 2,
+	.dest_scale_prefill_lines = 3,
+	.macrotile_prefill_lines = 4,
+	.yuv_nv12_prefill_lines = 8,
+	.linear_prefill_lines = 1,
+	.downscaling_prefill_lines = 1,
+	.amortizable_threshold = 25,
+	.min_prefill_lines = 24,
+	.danger_lut_tbl = {0xf, 0xffff, 0x0},
+	.qos_lut_tbl = {
+		{.nentry = ARRAY_SIZE(sdm845_qos_linear),
+		.entries = sdm845_qos_linear
+		},
+		{.nentry = ARRAY_SIZE(sdm845_qos_macrotile),
+		.entries = sdm845_qos_macrotile
+		},
+		{.nentry = ARRAY_SIZE(sdm845_qos_nrt),
+		.entries = sdm845_qos_nrt
+		},
+	},
+	.cdp_cfg = {
+		{.rd_enable = 1, .wr_enable = 1},
+		{.rd_enable = 1, .wr_enable = 0}
+	},
+};
+
+/*************************************************************
+ * Hardware catalog init
+ *************************************************************/
+
+/*
+ * sdm845_cfg_init(): populate sdm845 dpu sub-blocks reg offsets
+ * and instance counts.
+ */
+void sdm845_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
+{
+	*dpu_cfg = (struct dpu_mdss_cfg){
+		.caps = &sdm845_dpu_caps,
+		.mdp_count = ARRAY_SIZE(sdm845_mdp),
+		.mdp = sdm845_mdp,
+		.ctl_count = ARRAY_SIZE(sdm845_ctl),
+		.ctl = sdm845_ctl,
+		.sspp_count = ARRAY_SIZE(sdm845_sspp),
+		.sspp = sdm845_sspp,
+		.mixer_count = ARRAY_SIZE(sdm845_lm),
+		.mixer = sdm845_lm,
+		.ds_count = ARRAY_SIZE(sdm845_ds),
+		.ds = sdm845_ds,
+		.pingpong_count = ARRAY_SIZE(sdm845_pp),
+		.pingpong = sdm845_pp,
+		.cdm_count = ARRAY_SIZE(sdm845_cdm),
+		.cdm = sdm845_cdm,
+		.intf_count = ARRAY_SIZE(sdm845_intf),
+		.intf = sdm845_intf,
+		.vbif_count = ARRAY_SIZE(sdm845_vbif),
+		.vbif = sdm845_vbif,
+		.reg_dma_count = 1,
+		.dma_cfg = sdm845_regdma,
+		.perf = sdm845_perf_data,
+	};
+}
+
+static struct dpu_mdss_hw_cfg_handler cfg_handler[] = {
+	{ .hw_rev = DPU_HW_VER_400, .cfg_init = sdm845_cfg_init},
+	{ .hw_rev = DPU_HW_VER_401, .cfg_init = sdm845_cfg_init},
+};
+
+void dpu_hw_catalog_deinit(struct dpu_mdss_cfg *dpu_cfg)
+{
+	kfree(dpu_cfg);
+}
+
+struct dpu_mdss_cfg *dpu_hw_catalog_init(u32 hw_rev)
+{
+	int i;
+	struct dpu_mdss_cfg *dpu_cfg;
+
+	dpu_cfg = kzalloc(sizeof(*dpu_cfg), GFP_KERNEL);
+	if (!dpu_cfg)
+		return ERR_PTR(-ENOMEM);
+
+	for (i = 0; i < ARRAY_SIZE(cfg_handler); i++) {
+		if (cfg_handler[i].hw_rev == hw_rev) {
+			cfg_handler[i].cfg_init(dpu_cfg);
+			dpu_cfg->hwversion = hw_rev;
+			return dpu_cfg;
+		}
+	}
+
+	DPU_ERROR("unsupported chipset id:%X\n", hw_rev);
+	dpu_hw_catalog_deinit(dpu_cfg);
+	return ERR_PTR(-ENODEV);
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
new file mode 100644
index 000000000000..f0cb0d4fc80e
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -0,0 +1,804 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_CATALOG_H
+#define _DPU_HW_CATALOG_H
+
+#include <linux/kernel.h>
+#include <linux/bug.h>
+#include <linux/bitmap.h>
+#include <linux/err.h>
+#include <drm/drmP.h>
+
+/**
+ * Max hardware block count: For ex: max 12 SSPP pipes or
+ * 5 ctl paths. In all cases, it can have max 12 hardware blocks
+ * based on current design
+ */
+#define MAX_BLOCKS    12
+
+#define DPU_HW_VER(MAJOR, MINOR, STEP) (((MAJOR & 0xF) << 28)    |\
+		((MINOR & 0xFFF) << 16)  |\
+		(STEP & 0xFFFF))
+
+#define DPU_HW_MAJOR(rev)		((rev) >> 28)
+#define DPU_HW_MINOR(rev)		(((rev) >> 16) & 0xFFF)
+#define DPU_HW_STEP(rev)		((rev) & 0xFFFF)
+#define DPU_HW_MAJOR_MINOR(rev)		((rev) >> 16)
+
+#define IS_DPU_MAJOR_MINOR_SAME(rev1, rev2)   \
+	(DPU_HW_MAJOR_MINOR((rev1)) == DPU_HW_MAJOR_MINOR((rev2)))
+
+#define DPU_HW_VER_170	DPU_HW_VER(1, 7, 0) /* 8996 v1.0 */
+#define DPU_HW_VER_171	DPU_HW_VER(1, 7, 1) /* 8996 v2.0 */
+#define DPU_HW_VER_172	DPU_HW_VER(1, 7, 2) /* 8996 v3.0 */
+#define DPU_HW_VER_300	DPU_HW_VER(3, 0, 0) /* 8998 v1.0 */
+#define DPU_HW_VER_301	DPU_HW_VER(3, 0, 1) /* 8998 v1.1 */
+#define DPU_HW_VER_400	DPU_HW_VER(4, 0, 0) /* sdm845 v1.0 */
+#define DPU_HW_VER_401	DPU_HW_VER(4, 0, 1) /* sdm845 v2.0 */
+#define DPU_HW_VER_410	DPU_HW_VER(4, 1, 0) /* sdm670 v1.0 */
+#define DPU_HW_VER_500	DPU_HW_VER(5, 0, 0) /* sdm855 v1.0 */
+
+
+#define IS_MSM8996_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_170)
+#define IS_MSM8998_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_300)
+#define IS_SDM845_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_400)
+#define IS_SDM670_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_410)
+#define IS_SDM855_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_500)
+
+
+#define DPU_HW_BLK_NAME_LEN	16
+
+#define MAX_IMG_WIDTH 0x3fff
+#define MAX_IMG_HEIGHT 0x3fff
+
+#define CRTC_DUAL_MIXERS	2
+
+#define MAX_XIN_COUNT 16
+
+/**
+ * Supported UBWC feature versions
+ */
+enum {
+	DPU_HW_UBWC_VER_10 = 0x100,
+	DPU_HW_UBWC_VER_20 = 0x200,
+	DPU_HW_UBWC_VER_30 = 0x300,
+};
+
+#define IS_UBWC_20_SUPPORTED(rev)       ((rev) >= DPU_HW_UBWC_VER_20)
+
+/**
+ * MDP TOP BLOCK features
+ * @DPU_MDP_PANIC_PER_PIPE Panic configuration needs to be be done per pipe
+ * @DPU_MDP_10BIT_SUPPORT, Chipset supports 10 bit pixel formats
+ * @DPU_MDP_BWC,           MDSS HW supports Bandwidth compression.
+ * @DPU_MDP_UBWC_1_0,      This chipsets supports Universal Bandwidth
+ *                         compression initial revision
+ * @DPU_MDP_UBWC_1_5,      Universal Bandwidth compression version 1.5
+ * @DPU_MDP_MAX            Maximum value
+
+ */
+enum {
+	DPU_MDP_PANIC_PER_PIPE = 0x1,
+	DPU_MDP_10BIT_SUPPORT,
+	DPU_MDP_BWC,
+	DPU_MDP_UBWC_1_0,
+	DPU_MDP_UBWC_1_5,
+	DPU_MDP_MAX
+};
+
+/**
+ * SSPP sub-blocks/features
+ * @DPU_SSPP_SRC             Src and fetch part of the pipes,
+ * @DPU_SSPP_SCALER_QSEED2,  QSEED2 algorithm support
+ * @DPU_SSPP_SCALER_QSEED3,  QSEED3 alogorithm support
+ * @DPU_SSPP_SCALER_RGB,     RGB Scaler, supported by RGB pipes
+ * @DPU_SSPP_CSC,            Support of Color space converion
+ * @DPU_SSPP_CSC_10BIT,      Support of 10-bit Color space conversion
+ * @DPU_SSPP_CURSOR,         SSPP can be used as a cursor layer
+ * @DPU_SSPP_QOS,            SSPP support QoS control, danger/safe/creq
+ * @DPU_SSPP_QOS_8LVL,       SSPP support 8-level QoS control
+ * @DPU_SSPP_EXCL_RECT,      SSPP supports exclusion rect
+ * @DPU_SSPP_SMART_DMA_V1,   SmartDMA 1.0 support
+ * @DPU_SSPP_SMART_DMA_V2,   SmartDMA 2.0 support
+ * @DPU_SSPP_TS_PREFILL      Supports prefill with traffic shaper
+ * @DPU_SSPP_TS_PREFILL_REC1 Supports prefill with traffic shaper multirec
+ * @DPU_SSPP_CDP             Supports client driven prefetch
+ * @DPU_SSPP_MAX             maximum value
+ */
+enum {
+	DPU_SSPP_SRC = 0x1,
+	DPU_SSPP_SCALER_QSEED2,
+	DPU_SSPP_SCALER_QSEED3,
+	DPU_SSPP_SCALER_RGB,
+	DPU_SSPP_CSC,
+	DPU_SSPP_CSC_10BIT,
+	DPU_SSPP_CURSOR,
+	DPU_SSPP_QOS,
+	DPU_SSPP_QOS_8LVL,
+	DPU_SSPP_EXCL_RECT,
+	DPU_SSPP_SMART_DMA_V1,
+	DPU_SSPP_SMART_DMA_V2,
+	DPU_SSPP_TS_PREFILL,
+	DPU_SSPP_TS_PREFILL_REC1,
+	DPU_SSPP_CDP,
+	DPU_SSPP_MAX
+};
+
+/*
+ * MIXER sub-blocks/features
+ * @DPU_MIXER_LAYER           Layer mixer layer blend configuration,
+ * @DPU_MIXER_SOURCESPLIT     Layer mixer supports source-split configuration
+ * @DPU_MIXER_GC              Gamma correction block
+ * @DPU_DIM_LAYER             Layer mixer supports dim layer
+ * @DPU_MIXER_MAX             maximum value
+ */
+enum {
+	DPU_MIXER_LAYER = 0x1,
+	DPU_MIXER_SOURCESPLIT,
+	DPU_MIXER_GC,
+	DPU_DIM_LAYER,
+	DPU_MIXER_MAX
+};
+
+/**
+ * PINGPONG sub-blocks
+ * @DPU_PINGPONG_TE         Tear check block
+ * @DPU_PINGPONG_TE2        Additional tear check block for split pipes
+ * @DPU_PINGPONG_SPLIT      PP block supports split fifo
+ * @DPU_PINGPONG_SLAVE      PP block is a suitable slave for split fifo
+ * @DPU_PINGPONG_DITHER,    Dither blocks
+ * @DPU_PINGPONG_MAX
+ */
+enum {
+	DPU_PINGPONG_TE = 0x1,
+	DPU_PINGPONG_TE2,
+	DPU_PINGPONG_SPLIT,
+	DPU_PINGPONG_SLAVE,
+	DPU_PINGPONG_DITHER,
+	DPU_PINGPONG_MAX
+};
+
+/**
+ * CTL sub-blocks
+ * @DPU_CTL_SPLIT_DISPLAY       CTL supports video mode split display
+ * @DPU_CTL_MAX
+ */
+enum {
+	DPU_CTL_SPLIT_DISPLAY = 0x1,
+	DPU_CTL_MAX
+};
+
+/**
+ * VBIF sub-blocks and features
+ * @DPU_VBIF_QOS_OTLIM        VBIF supports OT Limit
+ * @DPU_VBIF_QOS_REMAP        VBIF supports QoS priority remap
+ * @DPU_VBIF_MAX              maximum value
+ */
+enum {
+	DPU_VBIF_QOS_OTLIM = 0x1,
+	DPU_VBIF_QOS_REMAP,
+	DPU_VBIF_MAX
+};
+
+/**
+ * MACRO DPU_HW_BLK_INFO - information of HW blocks inside DPU
+ * @name:              string name for debug purposes
+ * @id:                enum identifying this block
+ * @base:              register base offset to mdss
+ * @len:               length of hardware block
+ * @features           bit mask identifying sub-blocks/features
+ */
+#define DPU_HW_BLK_INFO \
+	char name[DPU_HW_BLK_NAME_LEN]; \
+	u32 id; \
+	u32 base; \
+	u32 len; \
+	unsigned long features
+
+/**
+ * MACRO DPU_HW_SUBBLK_INFO - information of HW sub-block inside DPU
+ * @name:              string name for debug purposes
+ * @id:                enum identifying this sub-block
+ * @base:              offset of this sub-block relative to the block
+ *                     offset
+ * @len                register block length of this sub-block
+ */
+#define DPU_HW_SUBBLK_INFO \
+	char name[DPU_HW_BLK_NAME_LEN]; \
+	u32 id; \
+	u32 base; \
+	u32 len
+
+/**
+ * struct dpu_src_blk: SSPP part of the source pipes
+ * @info:   HW register and features supported by this sub-blk
+ */
+struct dpu_src_blk {
+	DPU_HW_SUBBLK_INFO;
+};
+
+/**
+ * struct dpu_scaler_blk: Scaler information
+ * @info:   HW register and features supported by this sub-blk
+ * @version: qseed block revision
+ */
+struct dpu_scaler_blk {
+	DPU_HW_SUBBLK_INFO;
+	u32 version;
+};
+
+struct dpu_csc_blk {
+	DPU_HW_SUBBLK_INFO;
+};
+
+/**
+ * struct dpu_pp_blk : Pixel processing sub-blk information
+ * @info:   HW register and features supported by this sub-blk
+ * @version: HW Algorithm version
+ */
+struct dpu_pp_blk {
+	DPU_HW_SUBBLK_INFO;
+	u32 version;
+};
+
+/**
+ * struct dpu_format_extended - define dpu specific pixel format+modifier
+ * @fourcc_format: Base FOURCC pixel format code
+ * @modifier: 64-bit drm format modifier, same modifier must be applied to all
+ *            framebuffer planes
+ */
+struct dpu_format_extended {
+	uint32_t fourcc_format;
+	uint64_t modifier;
+};
+
+/**
+ * enum dpu_qos_lut_usage - define QoS LUT use cases
+ */
+enum dpu_qos_lut_usage {
+	DPU_QOS_LUT_USAGE_LINEAR,
+	DPU_QOS_LUT_USAGE_MACROTILE,
+	DPU_QOS_LUT_USAGE_NRT,
+	DPU_QOS_LUT_USAGE_MAX,
+};
+
+/**
+ * struct dpu_qos_lut_entry - define QoS LUT table entry
+ * @fl: fill level, or zero on last entry to indicate default lut
+ * @lut: lut to use if equal to or less than fill level
+ */
+struct dpu_qos_lut_entry {
+	u32 fl;
+	u64 lut;
+};
+
+/**
+ * struct dpu_qos_lut_tbl - define QoS LUT table
+ * @nentry: number of entry in this table
+ * @entries: Pointer to table entries
+ */
+struct dpu_qos_lut_tbl {
+	u32 nentry;
+	struct dpu_qos_lut_entry *entries;
+};
+
+/**
+ * struct dpu_caps - define DPU capabilities
+ * @max_mixer_width    max layer mixer line width support.
+ * @max_mixer_blendstages max layer mixer blend stages or
+ *                       supported z order
+ * @qseed_type         qseed2 or qseed3 support.
+ * @smart_dma_rev      Supported version of SmartDMA feature.
+ * @ubwc_version       UBWC feature version (0x0 for not supported)
+ * @has_src_split      source split feature status
+ * @has_dim_layer      dim layer feature status
+ * @has_idle_pc        indicate if idle power collapse feature is supported
+ */
+struct dpu_caps {
+	u32 max_mixer_width;
+	u32 max_mixer_blendstages;
+	u32 qseed_type;
+	u32 smart_dma_rev;
+	u32 ubwc_version;
+	bool has_src_split;
+	bool has_dim_layer;
+	bool has_idle_pc;
+};
+
+/**
+ * struct dpu_sspp_blks_common : SSPP sub-blocks common configuration
+ * @maxwidth: max pixelwidth supported by this pipe
+ * @pixel_ram_size: size of latency hiding and de-tiling buffer in bytes
+ * @maxhdeciexp: max horizontal decimation supported by this pipe
+ *				(max is 2^value)
+ * @maxvdeciexp: max vertical decimation supported by this pipe
+ *				(max is 2^value)
+ */
+struct dpu_sspp_blks_common {
+	u32 maxlinewidth;
+	u32 pixel_ram_size;
+	u32 maxhdeciexp;
+	u32 maxvdeciexp;
+};
+
+/**
+ * struct dpu_sspp_sub_blks : SSPP sub-blocks
+ * common: Pointer to common configurations shared by sub blocks
+ * @creq_vblank: creq priority during vertical blanking
+ * @danger_vblank: danger priority during vertical blanking
+ * @maxdwnscale: max downscale ratio supported(without DECIMATION)
+ * @maxupscale:  maxupscale ratio supported
+ * @smart_dma_priority: hw priority of rect1 of multirect pipe
+ * @max_per_pipe_bw: maximum allowable bandwidth of this pipe in kBps
+ * @src_blk:
+ * @scaler_blk:
+ * @csc_blk:
+ * @hsic:
+ * @memcolor:
+ * @pcc_blk:
+ * @igc_blk:
+ * @format_list: Pointer to list of supported formats
+ * @virt_format_list: Pointer to list of supported formats for virtual planes
+ */
+struct dpu_sspp_sub_blks {
+	const struct dpu_sspp_blks_common *common;
+	u32 creq_vblank;
+	u32 danger_vblank;
+	u32 maxdwnscale;
+	u32 maxupscale;
+	u32 smart_dma_priority;
+	u32 max_per_pipe_bw;
+	struct dpu_src_blk src_blk;
+	struct dpu_scaler_blk scaler_blk;
+	struct dpu_pp_blk csc_blk;
+	struct dpu_pp_blk hsic_blk;
+	struct dpu_pp_blk memcolor_blk;
+	struct dpu_pp_blk pcc_blk;
+	struct dpu_pp_blk igc_blk;
+
+	const struct dpu_format_extended *format_list;
+	const struct dpu_format_extended *virt_format_list;
+};
+
+/**
+ * struct dpu_lm_sub_blks:      information of mixer block
+ * @maxwidth:               Max pixel width supported by this mixer
+ * @maxblendstages:         Max number of blend-stages supported
+ * @blendstage_base:        Blend-stage register base offset
+ * @gc: gamma correction block
+ */
+struct dpu_lm_sub_blks {
+	u32 maxwidth;
+	u32 maxblendstages;
+	u32 blendstage_base[MAX_BLOCKS];
+	struct dpu_pp_blk gc;
+};
+
+struct dpu_pingpong_sub_blks {
+	struct dpu_pp_blk te;
+	struct dpu_pp_blk te2;
+	struct dpu_pp_blk dither;
+};
+
+/**
+ * dpu_clk_ctrl_type - Defines top level clock control signals
+ */
+enum dpu_clk_ctrl_type {
+	DPU_CLK_CTRL_NONE,
+	DPU_CLK_CTRL_VIG0,
+	DPU_CLK_CTRL_VIG1,
+	DPU_CLK_CTRL_VIG2,
+	DPU_CLK_CTRL_VIG3,
+	DPU_CLK_CTRL_VIG4,
+	DPU_CLK_CTRL_RGB0,
+	DPU_CLK_CTRL_RGB1,
+	DPU_CLK_CTRL_RGB2,
+	DPU_CLK_CTRL_RGB3,
+	DPU_CLK_CTRL_DMA0,
+	DPU_CLK_CTRL_DMA1,
+	DPU_CLK_CTRL_CURSOR0,
+	DPU_CLK_CTRL_CURSOR1,
+	DPU_CLK_CTRL_INLINE_ROT0_SSPP,
+	DPU_CLK_CTRL_MAX,
+};
+
+/* struct dpu_clk_ctrl_reg : Clock control register
+ * @reg_off:           register offset
+ * @bit_off:           bit offset
+ */
+struct dpu_clk_ctrl_reg {
+	u32 reg_off;
+	u32 bit_off;
+};
+
+/* struct dpu_mdp_cfg : MDP TOP-BLK instance info
+ * @id:                index identifying this block
+ * @base:              register base offset to mdss
+ * @features           bit mask identifying sub-blocks/features
+ * @highest_bank_bit:  UBWC parameter
+ * @ubwc_static:       ubwc static configuration
+ * @ubwc_swizzle:      ubwc default swizzle setting
+ * @has_dest_scaler:   indicates support of destination scaler
+ * @clk_ctrls          clock control register definition
+ */
+struct dpu_mdp_cfg {
+	DPU_HW_BLK_INFO;
+	u32 highest_bank_bit;
+	u32 ubwc_static;
+	u32 ubwc_swizzle;
+	bool has_dest_scaler;
+	struct dpu_clk_ctrl_reg clk_ctrls[DPU_CLK_CTRL_MAX];
+};
+
+/* struct dpu_mdp_cfg : MDP TOP-BLK instance info
+ * @id:                index identifying this block
+ * @base:              register base offset to mdss
+ * @features           bit mask identifying sub-blocks/features
+ */
+struct dpu_ctl_cfg {
+	DPU_HW_BLK_INFO;
+};
+
+/**
+ * struct dpu_sspp_cfg - information of source pipes
+ * @id:                index identifying this block
+ * @base               register offset of this block
+ * @features           bit mask identifying sub-blocks/features
+ * @sblk:              SSPP sub-blocks information
+ * @xin_id:            bus client identifier
+ * @clk_ctrl           clock control identifier
+ * @type               sspp type identifier
+ */
+struct dpu_sspp_cfg {
+	DPU_HW_BLK_INFO;
+	const struct dpu_sspp_sub_blks *sblk;
+	u32 xin_id;
+	enum dpu_clk_ctrl_type clk_ctrl;
+	u32 type;
+};
+
+/**
+ * struct dpu_lm_cfg - information of layer mixer blocks
+ * @id:                index identifying this block
+ * @base               register offset of this block
+ * @features           bit mask identifying sub-blocks/features
+ * @sblk:              LM Sub-blocks information
+ * @pingpong:          ID of connected PingPong, PINGPONG_MAX if unsupported
+ * @ds:                ID of connected DS, DS_MAX if unsupported
+ * @lm_pair_mask:      Bitmask of LMs that can be controlled by same CTL
+ */
+struct dpu_lm_cfg {
+	DPU_HW_BLK_INFO;
+	const struct dpu_lm_sub_blks *sblk;
+	u32 pingpong;
+	u32 ds;
+	unsigned long lm_pair_mask;
+};
+
+/**
+ * struct dpu_ds_top_cfg - information of dest scaler top
+ * @id               enum identifying this block
+ * @base             register offset of this block
+ * @features         bit mask identifying features
+ * @version          hw version of dest scaler
+ * @maxinputwidth    maximum input line width
+ * @maxoutputwidth   maximum output line width
+ * @maxupscale       maximum upscale ratio
+ */
+struct dpu_ds_top_cfg {
+	DPU_HW_BLK_INFO;
+	u32 version;
+	u32 maxinputwidth;
+	u32 maxoutputwidth;
+	u32 maxupscale;
+};
+
+/**
+ * struct dpu_ds_cfg - information of dest scaler blocks
+ * @id          enum identifying this block
+ * @base        register offset wrt DS top offset
+ * @features    bit mask identifying features
+ * @version     hw version of the qseed block
+ * @top         DS top information
+ */
+struct dpu_ds_cfg {
+	DPU_HW_BLK_INFO;
+	u32 version;
+	const struct dpu_ds_top_cfg *top;
+};
+
+/**
+ * struct dpu_pingpong_cfg - information of PING-PONG blocks
+ * @id                 enum identifying this block
+ * @base               register offset of this block
+ * @features           bit mask identifying sub-blocks/features
+ * @sblk               sub-blocks information
+ */
+struct dpu_pingpong_cfg  {
+	DPU_HW_BLK_INFO;
+	const struct dpu_pingpong_sub_blks *sblk;
+};
+
+/**
+ * struct dpu_cdm_cfg - information of chroma down blocks
+ * @id                 enum identifying this block
+ * @base               register offset of this block
+ * @features           bit mask identifying sub-blocks/features
+ * @intf_connect       Bitmask of INTF IDs this CDM can connect to
+ */
+struct dpu_cdm_cfg   {
+	DPU_HW_BLK_INFO;
+	unsigned long intf_connect;
+};
+
+/**
+ * struct dpu_intf_cfg - information of timing engine blocks
+ * @id                 enum identifying this block
+ * @base               register offset of this block
+ * @features           bit mask identifying sub-blocks/features
+ * @type:              Interface type(DSI, DP, HDMI)
+ * @controller_id:     Controller Instance ID in case of multiple of intf type
+ * @prog_fetch_lines_worst_case	Worst case latency num lines needed to prefetch
+ */
+struct dpu_intf_cfg  {
+	DPU_HW_BLK_INFO;
+	u32 type;   /* interface type*/
+	u32 controller_id;
+	u32 prog_fetch_lines_worst_case;
+};
+
+/**
+ * struct dpu_vbif_dynamic_ot_cfg - dynamic OT setting
+ * @pps                pixel per seconds
+ * @ot_limit           OT limit to use up to specified pixel per second
+ */
+struct dpu_vbif_dynamic_ot_cfg {
+	u64 pps;
+	u32 ot_limit;
+};
+
+/**
+ * struct dpu_vbif_dynamic_ot_tbl - dynamic OT setting table
+ * @count              length of cfg
+ * @cfg                pointer to array of configuration settings with
+ *                     ascending requirements
+ */
+struct dpu_vbif_dynamic_ot_tbl {
+	u32 count;
+	struct dpu_vbif_dynamic_ot_cfg *cfg;
+};
+
+/**
+ * struct dpu_vbif_qos_tbl - QoS priority table
+ * @npriority_lvl      num of priority level
+ * @priority_lvl       pointer to array of priority level in ascending order
+ */
+struct dpu_vbif_qos_tbl {
+	u32 npriority_lvl;
+	u32 *priority_lvl;
+};
+
+/**
+ * struct dpu_vbif_cfg - information of VBIF blocks
+ * @id                 enum identifying this block
+ * @base               register offset of this block
+ * @features           bit mask identifying sub-blocks/features
+ * @ot_rd_limit        default OT read limit
+ * @ot_wr_limit        default OT write limit
+ * @xin_halt_timeout   maximum time (in usec) for xin to halt
+ * @dynamic_ot_rd_tbl  dynamic OT read configuration table
+ * @dynamic_ot_wr_tbl  dynamic OT write configuration table
+ * @qos_rt_tbl         real-time QoS priority table
+ * @qos_nrt_tbl        non-real-time QoS priority table
+ * @memtype_count      number of defined memtypes
+ * @memtype            array of xin memtype definitions
+ */
+struct dpu_vbif_cfg {
+	DPU_HW_BLK_INFO;
+	u32 default_ot_rd_limit;
+	u32 default_ot_wr_limit;
+	u32 xin_halt_timeout;
+	struct dpu_vbif_dynamic_ot_tbl dynamic_ot_rd_tbl;
+	struct dpu_vbif_dynamic_ot_tbl dynamic_ot_wr_tbl;
+	struct dpu_vbif_qos_tbl qos_rt_tbl;
+	struct dpu_vbif_qos_tbl qos_nrt_tbl;
+	u32 memtype_count;
+	u32 memtype[MAX_XIN_COUNT];
+};
+/**
+ * struct dpu_reg_dma_cfg - information of lut dma blocks
+ * @id                 enum identifying this block
+ * @base               register offset of this block
+ * @features           bit mask identifying sub-blocks/features
+ * @version            version of lutdma hw block
+ * @trigger_sel_off    offset to trigger select registers of lutdma
+ */
+struct dpu_reg_dma_cfg {
+	DPU_HW_BLK_INFO;
+	u32 version;
+	u32 trigger_sel_off;
+};
+
+/**
+ * Define CDP use cases
+ * @DPU_PERF_CDP_UDAGE_RT: real-time use cases
+ * @DPU_PERF_CDP_USAGE_NRT: non real-time use cases such as WFD
+ */
+enum {
+	DPU_PERF_CDP_USAGE_RT,
+	DPU_PERF_CDP_USAGE_NRT,
+	DPU_PERF_CDP_USAGE_MAX
+};
+
+/**
+ * struct dpu_perf_cdp_cfg - define CDP use case configuration
+ * @rd_enable: true if read pipe CDP is enabled
+ * @wr_enable: true if write pipe CDP is enabled
+ */
+struct dpu_perf_cdp_cfg {
+	bool rd_enable;
+	bool wr_enable;
+};
+
+/**
+ * struct dpu_perf_cfg - performance control settings
+ * @max_bw_low         low threshold of maximum bandwidth (kbps)
+ * @max_bw_high        high threshold of maximum bandwidth (kbps)
+ * @min_core_ib        minimum bandwidth for core (kbps)
+ * @min_core_ib        minimum mnoc ib vote in kbps
+ * @min_llcc_ib        minimum llcc ib vote in kbps
+ * @min_dram_ib        minimum dram ib vote in kbps
+ * @core_ib_ff         core instantaneous bandwidth fudge factor
+ * @core_clk_ff        core clock fudge factor
+ * @comp_ratio_rt      string of 0 or more of <fourcc>/<ven>/<mod>/<comp ratio>
+ * @comp_ratio_nrt     string of 0 or more of <fourcc>/<ven>/<mod>/<comp ratio>
+ * @undersized_prefill_lines   undersized prefill in lines
+ * @xtra_prefill_lines         extra prefill latency in lines
+ * @dest_scale_prefill_lines   destination scaler latency in lines
+ * @macrotile_perfill_lines    macrotile latency in lines
+ * @yuv_nv12_prefill_lines     yuv_nv12 latency in lines
+ * @linear_prefill_lines       linear latency in lines
+ * @downscaling_prefill_lines  downscaling latency in lines
+ * @amortizable_theshold minimum y position for traffic shaping prefill
+ * @min_prefill_lines  minimum pipeline latency in lines
+ * @safe_lut_tbl: LUT tables for safe signals
+ * @danger_lut_tbl: LUT tables for danger signals
+ * @qos_lut_tbl: LUT tables for QoS signals
+ * @cdp_cfg            cdp use case configurations
+ */
+struct dpu_perf_cfg {
+	u32 max_bw_low;
+	u32 max_bw_high;
+	u32 min_core_ib;
+	u32 min_llcc_ib;
+	u32 min_dram_ib;
+	const char *core_ib_ff;
+	const char *core_clk_ff;
+	const char *comp_ratio_rt;
+	const char *comp_ratio_nrt;
+	u32 undersized_prefill_lines;
+	u32 xtra_prefill_lines;
+	u32 dest_scale_prefill_lines;
+	u32 macrotile_prefill_lines;
+	u32 yuv_nv12_prefill_lines;
+	u32 linear_prefill_lines;
+	u32 downscaling_prefill_lines;
+	u32 amortizable_threshold;
+	u32 min_prefill_lines;
+	u32 safe_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
+	u32 danger_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
+	struct dpu_qos_lut_tbl qos_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
+	struct dpu_perf_cdp_cfg cdp_cfg[DPU_PERF_CDP_USAGE_MAX];
+};
+
+/**
+ * struct dpu_mdss_cfg - information of MDSS HW
+ * This is the main catalog data structure representing
+ * this HW version. Contains number of instances,
+ * register offsets, capabilities of the all MDSS HW sub-blocks.
+ *
+ * @dma_formats        Supported formats for dma pipe
+ * @cursor_formats     Supported formats for cursor pipe
+ * @vig_formats        Supported formats for vig pipe
+ */
+struct dpu_mdss_cfg {
+	u32 hwversion;
+
+	const struct dpu_caps *caps;
+
+	u32 mdp_count;
+	struct dpu_mdp_cfg *mdp;
+
+	u32 ctl_count;
+	struct dpu_ctl_cfg *ctl;
+
+	u32 sspp_count;
+	struct dpu_sspp_cfg *sspp;
+
+	u32 mixer_count;
+	struct dpu_lm_cfg *mixer;
+
+	u32 ds_count;
+	struct dpu_ds_cfg *ds;
+
+	u32 pingpong_count;
+	struct dpu_pingpong_cfg *pingpong;
+
+	u32 cdm_count;
+	struct dpu_cdm_cfg *cdm;
+
+	u32 intf_count;
+	struct dpu_intf_cfg *intf;
+
+	u32 vbif_count;
+	struct dpu_vbif_cfg *vbif;
+
+	u32 reg_dma_count;
+	struct dpu_reg_dma_cfg dma_cfg;
+
+	u32 ad_count;
+
+	/* Add additional block data structures here */
+
+	struct dpu_perf_cfg perf;
+	struct dpu_format_extended *dma_formats;
+	struct dpu_format_extended *cursor_formats;
+	struct dpu_format_extended *vig_formats;
+};
+
+struct dpu_mdss_hw_cfg_handler {
+	u32 hw_rev;
+	void (*cfg_init)(struct dpu_mdss_cfg *dpu_cfg);
+};
+
+/*
+ * Access Macros
+ */
+#define BLK_MDP(s) ((s)->mdp)
+#define BLK_CTL(s) ((s)->ctl)
+#define BLK_VIG(s) ((s)->vig)
+#define BLK_RGB(s) ((s)->rgb)
+#define BLK_DMA(s) ((s)->dma)
+#define BLK_CURSOR(s) ((s)->cursor)
+#define BLK_MIXER(s) ((s)->mixer)
+#define BLK_DS(s) ((s)->ds)
+#define BLK_PINGPONG(s) ((s)->pingpong)
+#define BLK_CDM(s) ((s)->cdm)
+#define BLK_INTF(s) ((s)->intf)
+#define BLK_AD(s) ((s)->ad)
+
+/**
+ * dpu_hw_catalog_init - dpu hardware catalog init API retrieves
+ * hardcoded target specific catalog information in config structure
+ * @hw_rev:       caller needs provide the hardware revision.
+ *
+ * Return: dpu config structure
+ */
+struct dpu_mdss_cfg *dpu_hw_catalog_init(u32 hw_rev);
+
+/**
+ * dpu_hw_catalog_deinit - dpu hardware catalog cleanup
+ * @dpu_cfg:      pointer returned from init function
+ */
+void dpu_hw_catalog_deinit(struct dpu_mdss_cfg *dpu_cfg);
+
+/**
+ * dpu_hw_sspp_multirect_enabled - check multirect enabled for the sspp
+ * @cfg:          pointer to sspp cfg
+ */
+static inline bool dpu_hw_sspp_multirect_enabled(const struct dpu_sspp_cfg *cfg)
+{
+	return test_bit(DPU_SSPP_SMART_DMA_V1, &cfg->features) ||
+			 test_bit(DPU_SSPP_SMART_DMA_V2, &cfg->features);
+}
+#endif /* _DPU_HW_CATALOG_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h
new file mode 100644
index 000000000000..031c41163088
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h
@@ -0,0 +1,182 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hw_mdss.h"
+
+static const struct dpu_format_extended plane_formats[] = {
+	{DRM_FORMAT_ARGB8888, 0},
+	{DRM_FORMAT_ABGR8888, 0},
+	{DRM_FORMAT_RGBA8888, 0},
+	{DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_BGRA8888, 0},
+	{DRM_FORMAT_XRGB8888, 0},
+	{DRM_FORMAT_RGBX8888, 0},
+	{DRM_FORMAT_BGRX8888, 0},
+	{DRM_FORMAT_XBGR8888, 0},
+	{DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_RGB888, 0},
+	{DRM_FORMAT_BGR888, 0},
+	{DRM_FORMAT_RGB565, 0},
+	{DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_BGR565, 0},
+	{DRM_FORMAT_ARGB1555, 0},
+	{DRM_FORMAT_ABGR1555, 0},
+	{DRM_FORMAT_RGBA5551, 0},
+	{DRM_FORMAT_BGRA5551, 0},
+	{DRM_FORMAT_XRGB1555, 0},
+	{DRM_FORMAT_XBGR1555, 0},
+	{DRM_FORMAT_RGBX5551, 0},
+	{DRM_FORMAT_BGRX5551, 0},
+	{DRM_FORMAT_ARGB4444, 0},
+	{DRM_FORMAT_ABGR4444, 0},
+	{DRM_FORMAT_RGBA4444, 0},
+	{DRM_FORMAT_BGRA4444, 0},
+	{DRM_FORMAT_XRGB4444, 0},
+	{DRM_FORMAT_XBGR4444, 0},
+	{DRM_FORMAT_RGBX4444, 0},
+	{DRM_FORMAT_BGRX4444, 0},
+	{0, 0},
+};
+
+static const struct dpu_format_extended plane_formats_yuv[] = {
+	{DRM_FORMAT_ARGB8888, 0},
+	{DRM_FORMAT_ABGR8888, 0},
+	{DRM_FORMAT_RGBA8888, 0},
+	{DRM_FORMAT_BGRX8888, 0},
+	{DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_BGRA8888, 0},
+	{DRM_FORMAT_XRGB8888, 0},
+	{DRM_FORMAT_XBGR8888, 0},
+	{DRM_FORMAT_RGBX8888, 0},
+	{DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_RGB888, 0},
+	{DRM_FORMAT_BGR888, 0},
+	{DRM_FORMAT_RGB565, 0},
+	{DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_BGR565, 0},
+	{DRM_FORMAT_ARGB1555, 0},
+	{DRM_FORMAT_ABGR1555, 0},
+	{DRM_FORMAT_RGBA5551, 0},
+	{DRM_FORMAT_BGRA5551, 0},
+	{DRM_FORMAT_XRGB1555, 0},
+	{DRM_FORMAT_XBGR1555, 0},
+	{DRM_FORMAT_RGBX5551, 0},
+	{DRM_FORMAT_BGRX5551, 0},
+	{DRM_FORMAT_ARGB4444, 0},
+	{DRM_FORMAT_ABGR4444, 0},
+	{DRM_FORMAT_RGBA4444, 0},
+	{DRM_FORMAT_BGRA4444, 0},
+	{DRM_FORMAT_XRGB4444, 0},
+	{DRM_FORMAT_XBGR4444, 0},
+	{DRM_FORMAT_RGBX4444, 0},
+	{DRM_FORMAT_BGRX4444, 0},
+
+	{DRM_FORMAT_NV12, 0},
+	{DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_NV21, 0},
+	{DRM_FORMAT_NV16, 0},
+	{DRM_FORMAT_NV61, 0},
+	{DRM_FORMAT_VYUY, 0},
+	{DRM_FORMAT_UYVY, 0},
+	{DRM_FORMAT_YUYV, 0},
+	{DRM_FORMAT_YVYU, 0},
+	{DRM_FORMAT_YUV420, 0},
+	{DRM_FORMAT_YVU420, 0},
+	{0, 0},
+};
+
+static const struct dpu_format_extended cursor_formats[] = {
+	{DRM_FORMAT_ARGB8888, 0},
+	{DRM_FORMAT_ABGR8888, 0},
+	{DRM_FORMAT_RGBA8888, 0},
+	{DRM_FORMAT_BGRA8888, 0},
+	{DRM_FORMAT_XRGB8888, 0},
+	{DRM_FORMAT_ARGB1555, 0},
+	{DRM_FORMAT_ABGR1555, 0},
+	{DRM_FORMAT_RGBA5551, 0},
+	{DRM_FORMAT_BGRA5551, 0},
+	{DRM_FORMAT_ARGB4444, 0},
+	{DRM_FORMAT_ABGR4444, 0},
+	{DRM_FORMAT_RGBA4444, 0},
+	{DRM_FORMAT_BGRA4444, 0},
+	{0, 0},
+};
+
+static const struct dpu_format_extended wb2_formats[] = {
+	{DRM_FORMAT_RGB565, 0},
+	{DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_RGB888, 0},
+	{DRM_FORMAT_ARGB8888, 0},
+	{DRM_FORMAT_RGBA8888, 0},
+	{DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_XRGB8888, 0},
+	{DRM_FORMAT_RGBX8888, 0},
+	{DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_ARGB1555, 0},
+	{DRM_FORMAT_RGBA5551, 0},
+	{DRM_FORMAT_XRGB1555, 0},
+	{DRM_FORMAT_RGBX5551, 0},
+	{DRM_FORMAT_ARGB4444, 0},
+	{DRM_FORMAT_RGBA4444, 0},
+	{DRM_FORMAT_RGBX4444, 0},
+	{DRM_FORMAT_XRGB4444, 0},
+
+	{DRM_FORMAT_BGR565, 0},
+	{DRM_FORMAT_BGR888, 0},
+	{DRM_FORMAT_ABGR8888, 0},
+	{DRM_FORMAT_BGRA8888, 0},
+	{DRM_FORMAT_BGRX8888, 0},
+	{DRM_FORMAT_XBGR8888, 0},
+	{DRM_FORMAT_ABGR1555, 0},
+	{DRM_FORMAT_BGRA5551, 0},
+	{DRM_FORMAT_XBGR1555, 0},
+	{DRM_FORMAT_BGRX5551, 0},
+	{DRM_FORMAT_ABGR4444, 0},
+	{DRM_FORMAT_BGRA4444, 0},
+	{DRM_FORMAT_BGRX4444, 0},
+	{DRM_FORMAT_XBGR4444, 0},
+
+	{DRM_FORMAT_YUV420, 0},
+	{DRM_FORMAT_NV12, 0},
+	{DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_NV16, 0},
+	{DRM_FORMAT_YUYV, 0},
+
+	{0, 0},
+};
+
+static const struct dpu_format_extended rgb_10bit_formats[] = {
+	{DRM_FORMAT_BGRA1010102, 0},
+	{DRM_FORMAT_BGRX1010102, 0},
+	{DRM_FORMAT_RGBA1010102, 0},
+	{DRM_FORMAT_RGBX1010102, 0},
+	{DRM_FORMAT_ABGR2101010, 0},
+	{DRM_FORMAT_ABGR2101010, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_XBGR2101010, 0},
+	{DRM_FORMAT_XBGR2101010, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+	{DRM_FORMAT_ARGB2101010, 0},
+	{DRM_FORMAT_XRGB2101010, 0},
+};
+
+static const struct dpu_format_extended p010_formats[] = {
+	{DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_DX},
+};
+
+static const struct dpu_format_extended p010_ubwc_formats[] = {
+	{DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_DX |
+		DRM_FORMAT_MOD_QCOM_COMPRESSED},
+};
+
+static const struct dpu_format_extended tp10_ubwc_formats[] = {
+	{DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED |
+		DRM_FORMAT_MOD_QCOM_DX | DRM_FORMAT_MOD_QCOM_TIGHT},
+};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
new file mode 100644
index 000000000000..da6f0609be5f
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
@@ -0,0 +1,323 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_cdm.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define CDM_CSC_10_OPMODE                  0x000
+#define CDM_CSC_10_BASE                    0x004
+
+#define CDM_CDWN2_OP_MODE                  0x100
+#define CDM_CDWN2_CLAMP_OUT                0x104
+#define CDM_CDWN2_PARAMS_3D_0              0x108
+#define CDM_CDWN2_PARAMS_3D_1              0x10C
+#define CDM_CDWN2_COEFF_COSITE_H_0         0x110
+#define CDM_CDWN2_COEFF_COSITE_H_1         0x114
+#define CDM_CDWN2_COEFF_COSITE_H_2         0x118
+#define CDM_CDWN2_COEFF_OFFSITE_H_0        0x11C
+#define CDM_CDWN2_COEFF_OFFSITE_H_1        0x120
+#define CDM_CDWN2_COEFF_OFFSITE_H_2        0x124
+#define CDM_CDWN2_COEFF_COSITE_V           0x128
+#define CDM_CDWN2_COEFF_OFFSITE_V          0x12C
+#define CDM_CDWN2_OUT_SIZE                 0x130
+
+#define CDM_HDMI_PACK_OP_MODE              0x200
+#define CDM_CSC_10_MATRIX_COEFF_0          0x004
+
+/**
+ * Horizontal coefficients for cosite chroma downscale
+ * s13 representation of coefficients
+ */
+static u32 cosite_h_coeff[] = {0x00000016, 0x000001cc, 0x0100009e};
+
+/**
+ * Horizontal coefficients for offsite chroma downscale
+ */
+static u32 offsite_h_coeff[] = {0x000b0005, 0x01db01eb, 0x00e40046};
+
+/**
+ * Vertical coefficients for cosite chroma downscale
+ */
+static u32 cosite_v_coeff[] = {0x00080004};
+/**
+ * Vertical coefficients for offsite chroma downscale
+ */
+static u32 offsite_v_coeff[] = {0x00060002};
+
+/* Limited Range rgb2yuv coeff with clamp and bias values for CSC 10 module */
+static struct dpu_csc_cfg rgb2yuv_cfg = {
+	{
+		0x0083, 0x0102, 0x0032,
+		0x1fb5, 0x1f6c, 0x00e1,
+		0x00e1, 0x1f45, 0x1fdc
+	},
+	{ 0x00, 0x00, 0x00 },
+	{ 0x0040, 0x0200, 0x0200 },
+	{ 0x000, 0x3ff, 0x000, 0x3ff, 0x000, 0x3ff },
+	{ 0x040, 0x3ac, 0x040, 0x3c0, 0x040, 0x3c0 },
+};
+
+static struct dpu_cdm_cfg *_cdm_offset(enum dpu_cdm cdm,
+		struct dpu_mdss_cfg *m,
+		void __iomem *addr,
+		struct dpu_hw_blk_reg_map *b)
+{
+	int i;
+
+	for (i = 0; i < m->cdm_count; i++) {
+		if (cdm == m->cdm[i].id) {
+			b->base_off = addr;
+			b->blk_off = m->cdm[i].base;
+			b->length = m->cdm[i].len;
+			b->hwversion = m->hwversion;
+			b->log_mask = DPU_DBG_MASK_CDM;
+			return &m->cdm[i];
+		}
+	}
+
+	return ERR_PTR(-EINVAL);
+}
+
+static int dpu_hw_cdm_setup_csc_10bit(struct dpu_hw_cdm *ctx,
+		struct dpu_csc_cfg *data)
+{
+	dpu_hw_csc_setup(&ctx->hw, CDM_CSC_10_MATRIX_COEFF_0, data, true);
+
+	return 0;
+}
+
+static int dpu_hw_cdm_setup_cdwn(struct dpu_hw_cdm *ctx,
+		struct dpu_hw_cdm_cfg *cfg)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+	u32 opmode = 0;
+	u32 out_size = 0;
+
+	if (cfg->output_bit_depth == CDM_CDWN_OUTPUT_10BIT)
+		opmode &= ~BIT(7);
+	else
+		opmode |= BIT(7);
+
+	/* ENABLE DWNS_H bit */
+	opmode |= BIT(1);
+
+	switch (cfg->h_cdwn_type) {
+	case CDM_CDWN_DISABLE:
+		/* CLEAR METHOD_H field */
+		opmode &= ~(0x18);
+		/* CLEAR DWNS_H bit */
+		opmode &= ~BIT(1);
+		break;
+	case CDM_CDWN_PIXEL_DROP:
+		/* Clear METHOD_H field (pixel drop is 0) */
+		opmode &= ~(0x18);
+		break;
+	case CDM_CDWN_AVG:
+		/* Clear METHOD_H field (Average is 0x1) */
+		opmode &= ~(0x18);
+		opmode |= (0x1 << 0x3);
+		break;
+	case CDM_CDWN_COSITE:
+		/* Clear METHOD_H field (Average is 0x2) */
+		opmode &= ~(0x18);
+		opmode |= (0x2 << 0x3);
+		/* Co-site horizontal coefficients */
+		DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_0,
+				cosite_h_coeff[0]);
+		DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_1,
+				cosite_h_coeff[1]);
+		DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_2,
+				cosite_h_coeff[2]);
+		break;
+	case CDM_CDWN_OFFSITE:
+		/* Clear METHOD_H field (Average is 0x3) */
+		opmode &= ~(0x18);
+		opmode |= (0x3 << 0x3);
+
+		/* Off-site horizontal coefficients */
+		DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_0,
+				offsite_h_coeff[0]);
+		DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_1,
+				offsite_h_coeff[1]);
+		DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_2,
+				offsite_h_coeff[2]);
+		break;
+	default:
+		pr_err("%s invalid horz down sampling type\n", __func__);
+		return -EINVAL;
+	}
+
+	/* ENABLE DWNS_V bit */
+	opmode |= BIT(2);
+
+	switch (cfg->v_cdwn_type) {
+	case CDM_CDWN_DISABLE:
+		/* CLEAR METHOD_V field */
+		opmode &= ~(0x60);
+		/* CLEAR DWNS_V bit */
+		opmode &= ~BIT(2);
+		break;
+	case CDM_CDWN_PIXEL_DROP:
+		/* Clear METHOD_V field (pixel drop is 0) */
+		opmode &= ~(0x60);
+		break;
+	case CDM_CDWN_AVG:
+		/* Clear METHOD_V field (Average is 0x1) */
+		opmode &= ~(0x60);
+		opmode |= (0x1 << 0x5);
+		break;
+	case CDM_CDWN_COSITE:
+		/* Clear METHOD_V field (Average is 0x2) */
+		opmode &= ~(0x60);
+		opmode |= (0x2 << 0x5);
+		/* Co-site vertical coefficients */
+		DPU_REG_WRITE(c,
+				CDM_CDWN2_COEFF_COSITE_V,
+				cosite_v_coeff[0]);
+		break;
+	case CDM_CDWN_OFFSITE:
+		/* Clear METHOD_V field (Average is 0x3) */
+		opmode &= ~(0x60);
+		opmode |= (0x3 << 0x5);
+
+		/* Off-site vertical coefficients */
+		DPU_REG_WRITE(c,
+				CDM_CDWN2_COEFF_OFFSITE_V,
+				offsite_v_coeff[0]);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (cfg->v_cdwn_type || cfg->h_cdwn_type)
+		opmode |= BIT(0); /* EN CDWN module */
+	else
+		opmode &= ~BIT(0);
+
+	out_size = (cfg->output_width & 0xFFFF) |
+		((cfg->output_height & 0xFFFF) << 16);
+	DPU_REG_WRITE(c, CDM_CDWN2_OUT_SIZE, out_size);
+	DPU_REG_WRITE(c, CDM_CDWN2_OP_MODE, opmode);
+	DPU_REG_WRITE(c, CDM_CDWN2_CLAMP_OUT,
+			((0x3FF << 16) | 0x0));
+
+	return 0;
+}
+
+int dpu_hw_cdm_enable(struct dpu_hw_cdm *ctx,
+		struct dpu_hw_cdm_cfg *cdm)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+	const struct dpu_format *fmt = cdm->output_fmt;
+	struct cdm_output_cfg cdm_cfg = { 0 };
+	u32 opmode = 0;
+	u32 csc = 0;
+
+	if (!DPU_FORMAT_IS_YUV(fmt))
+		return -EINVAL;
+
+	if (cdm->output_type == CDM_CDWN_OUTPUT_HDMI) {
+		if (fmt->chroma_sample != DPU_CHROMA_H1V2)
+			return -EINVAL; /*unsupported format */
+		opmode = BIT(0);
+		opmode |= (fmt->chroma_sample << 1);
+		cdm_cfg.intf_en = true;
+	}
+
+	csc |= BIT(2);
+	csc &= ~BIT(1);
+	csc |= BIT(0);
+
+	if (ctx->hw_mdp && ctx->hw_mdp->ops.setup_cdm_output)
+		ctx->hw_mdp->ops.setup_cdm_output(ctx->hw_mdp, &cdm_cfg);
+
+	DPU_REG_WRITE(c, CDM_CSC_10_OPMODE, csc);
+	DPU_REG_WRITE(c, CDM_HDMI_PACK_OP_MODE, opmode);
+	return 0;
+}
+
+void dpu_hw_cdm_disable(struct dpu_hw_cdm *ctx)
+{
+	struct cdm_output_cfg cdm_cfg = { 0 };
+
+	if (ctx->hw_mdp && ctx->hw_mdp->ops.setup_cdm_output)
+		ctx->hw_mdp->ops.setup_cdm_output(ctx->hw_mdp, &cdm_cfg);
+}
+
+static void _setup_cdm_ops(struct dpu_hw_cdm_ops *ops,
+	unsigned long features)
+{
+	ops->setup_csc_data = dpu_hw_cdm_setup_csc_10bit;
+	ops->setup_cdwn = dpu_hw_cdm_setup_cdwn;
+	ops->enable = dpu_hw_cdm_enable;
+	ops->disable = dpu_hw_cdm_disable;
+}
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+	.start = NULL,
+	.stop = NULL,
+};
+
+struct dpu_hw_cdm *dpu_hw_cdm_init(enum dpu_cdm idx,
+		void __iomem *addr,
+		struct dpu_mdss_cfg *m,
+		struct dpu_hw_mdp *hw_mdp)
+{
+	struct dpu_hw_cdm *c;
+	struct dpu_cdm_cfg *cfg;
+	int rc;
+
+	c = kzalloc(sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return ERR_PTR(-ENOMEM);
+
+	cfg = _cdm_offset(idx, m, addr, &c->hw);
+	if (IS_ERR_OR_NULL(cfg)) {
+		kfree(c);
+		return ERR_PTR(-EINVAL);
+	}
+
+	c->idx = idx;
+	c->caps = cfg;
+	_setup_cdm_ops(&c->ops, c->caps->features);
+	c->hw_mdp = hw_mdp;
+
+	rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_CDM, idx, &dpu_hw_ops);
+	if (rc) {
+		DPU_ERROR("failed to init hw blk %d\n", rc);
+		goto blk_init_error;
+	}
+
+	/*
+	 * Perform any default initialization for the chroma down module
+	 * @setup default csc coefficients
+	 */
+	dpu_hw_cdm_setup_csc_10bit(c, &rgb2yuv_cfg);
+
+	return c;
+
+blk_init_error:
+	kzfree(c);
+
+	return ERR_PTR(rc);
+}
+
+void dpu_hw_cdm_destroy(struct dpu_hw_cdm *cdm)
+{
+	if (cdm)
+		dpu_hw_blk_destroy(&cdm->base);
+	kfree(cdm);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h
new file mode 100644
index 000000000000..5cceb1ecb8e0
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h
@@ -0,0 +1,139 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_CDM_H
+#define _DPU_HW_CDM_H
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_top.h"
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_cdm;
+
+struct dpu_hw_cdm_cfg {
+	u32 output_width;
+	u32 output_height;
+	u32 output_bit_depth;
+	u32 h_cdwn_type;
+	u32 v_cdwn_type;
+	const struct dpu_format *output_fmt;
+	u32 output_type;
+	int flags;
+};
+
+enum dpu_hw_cdwn_type {
+	CDM_CDWN_DISABLE,
+	CDM_CDWN_PIXEL_DROP,
+	CDM_CDWN_AVG,
+	CDM_CDWN_COSITE,
+	CDM_CDWN_OFFSITE,
+};
+
+enum dpu_hw_cdwn_output_type {
+	CDM_CDWN_OUTPUT_HDMI,
+	CDM_CDWN_OUTPUT_WB,
+};
+
+enum dpu_hw_cdwn_output_bit_depth {
+	CDM_CDWN_OUTPUT_8BIT,
+	CDM_CDWN_OUTPUT_10BIT,
+};
+
+/**
+ * struct dpu_hw_cdm_ops : Interface to the chroma down Hw driver functions
+ *                         Assumption is these functions will be called after
+ *                         clocks are enabled
+ *  @setup_csc:            Programs the csc matrix
+ *  @setup_cdwn:           Sets up the chroma down sub module
+ *  @enable:               Enables the output to interface and programs the
+ *                         output packer
+ *  @disable:              Puts the cdm in bypass mode
+ */
+struct dpu_hw_cdm_ops {
+	/**
+	 * Programs the CSC matrix for conversion from RGB space to YUV space,
+	 * it is optional to call this function as this matrix is automatically
+	 * set during initialization, user should call this if it wants
+	 * to program a different matrix than default matrix.
+	 * @cdm:          Pointer to the chroma down context structure
+	 * @data          Pointer to CSC configuration data
+	 * return:        0 if success; error code otherwise
+	 */
+	int (*setup_csc_data)(struct dpu_hw_cdm *cdm,
+			struct dpu_csc_cfg *data);
+
+	/**
+	 * Programs the Chroma downsample part.
+	 * @cdm         Pointer to chroma down context
+	 */
+	int (*setup_cdwn)(struct dpu_hw_cdm *cdm,
+	struct dpu_hw_cdm_cfg *cfg);
+
+	/**
+	 * Enable the CDM module
+	 * @cdm         Pointer to chroma down context
+	 */
+	int (*enable)(struct dpu_hw_cdm *cdm,
+	struct dpu_hw_cdm_cfg *cfg);
+
+	/**
+	 * Disable the CDM module
+	 * @cdm         Pointer to chroma down context
+	 */
+	void (*disable)(struct dpu_hw_cdm *cdm);
+};
+
+struct dpu_hw_cdm {
+	struct dpu_hw_blk base;
+	struct dpu_hw_blk_reg_map hw;
+
+	/* chroma down */
+	const struct dpu_cdm_cfg *caps;
+	enum  dpu_cdm  idx;
+
+	/* mdp top hw driver */
+	struct dpu_hw_mdp *hw_mdp;
+
+	/* ops */
+	struct dpu_hw_cdm_ops ops;
+};
+
+/**
+ * dpu_hw_cdm - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_cdm *to_dpu_hw_cdm(struct dpu_hw_blk *hw)
+{
+	return container_of(hw, struct dpu_hw_cdm, base);
+}
+
+/**
+ * dpu_hw_cdm_init - initializes the cdm hw driver object.
+ * should be called once before accessing every cdm.
+ * @idx:  cdm index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m :   pointer to mdss catalog data
+ * @hw_mdp:  pointer to mdp top hw driver object
+ */
+struct dpu_hw_cdm *dpu_hw_cdm_init(enum dpu_cdm idx,
+		void __iomem *addr,
+		struct dpu_mdss_cfg *m,
+		struct dpu_hw_mdp *hw_mdp);
+
+/**
+ * dpu_hw_cdm_destroy - destroys CDM driver context
+ * @cdm:   pointer to CDM driver context
+ */
+void dpu_hw_cdm_destroy(struct dpu_hw_cdm *cdm);
+
+#endif /*_DPU_HW_CDM_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
new file mode 100644
index 000000000000..06be7cf7ce50
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -0,0 +1,540 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include "dpu_hwio.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define   CTL_LAYER(lm)                 \
+	(((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
+#define   CTL_LAYER_EXT(lm)             \
+	(0x40 + (((lm) - LM_0) * 0x004))
+#define   CTL_LAYER_EXT2(lm)             \
+	(0x70 + (((lm) - LM_0) * 0x004))
+#define   CTL_LAYER_EXT3(lm)             \
+	(0xA0 + (((lm) - LM_0) * 0x004))
+#define   CTL_TOP                       0x014
+#define   CTL_FLUSH                     0x018
+#define   CTL_START                     0x01C
+#define   CTL_PREPARE                   0x0d0
+#define   CTL_SW_RESET                  0x030
+#define   CTL_LAYER_EXTN_OFFSET         0x40
+
+#define CTL_MIXER_BORDER_OUT            BIT(24)
+#define CTL_FLUSH_MASK_CTL              BIT(17)
+
+#define DPU_REG_RESET_TIMEOUT_US        2000
+
+static struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl,
+		struct dpu_mdss_cfg *m,
+		void __iomem *addr,
+		struct dpu_hw_blk_reg_map *b)
+{
+	int i;
+
+	for (i = 0; i < m->ctl_count; i++) {
+		if (ctl == m->ctl[i].id) {
+			b->base_off = addr;
+			b->blk_off = m->ctl[i].base;
+			b->length = m->ctl[i].len;
+			b->hwversion = m->hwversion;
+			b->log_mask = DPU_DBG_MASK_CTL;
+			return &m->ctl[i];
+		}
+	}
+	return ERR_PTR(-ENOMEM);
+}
+
+static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
+		enum dpu_lm lm)
+{
+	int i;
+	int stages = -EINVAL;
+
+	for (i = 0; i < count; i++) {
+		if (lm == mixer[i].id) {
+			stages = mixer[i].sblk->maxblendstages;
+			break;
+		}
+	}
+
+	return stages;
+}
+
+static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
+{
+	DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
+}
+
+static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
+{
+	DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
+}
+
+static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
+{
+	ctx->pending_flush_mask = 0x0;
+}
+
+static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
+		u32 flushbits)
+{
+	ctx->pending_flush_mask |= flushbits;
+}
+
+static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
+{
+	if (!ctx)
+		return 0x0;
+
+	return ctx->pending_flush_mask;
+}
+
+static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
+{
+
+	DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
+}
+
+static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+	return DPU_REG_READ(c, CTL_FLUSH);
+}
+
+static inline uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
+	enum dpu_sspp sspp)
+{
+	uint32_t flushbits = 0;
+
+	switch (sspp) {
+	case SSPP_VIG0:
+		flushbits =  BIT(0);
+		break;
+	case SSPP_VIG1:
+		flushbits = BIT(1);
+		break;
+	case SSPP_VIG2:
+		flushbits = BIT(2);
+		break;
+	case SSPP_VIG3:
+		flushbits = BIT(18);
+		break;
+	case SSPP_RGB0:
+		flushbits = BIT(3);
+		break;
+	case SSPP_RGB1:
+		flushbits = BIT(4);
+		break;
+	case SSPP_RGB2:
+		flushbits = BIT(5);
+		break;
+	case SSPP_RGB3:
+		flushbits = BIT(19);
+		break;
+	case SSPP_DMA0:
+		flushbits = BIT(11);
+		break;
+	case SSPP_DMA1:
+		flushbits = BIT(12);
+		break;
+	case SSPP_DMA2:
+		flushbits = BIT(24);
+		break;
+	case SSPP_DMA3:
+		flushbits = BIT(25);
+		break;
+	case SSPP_CURSOR0:
+		flushbits = BIT(22);
+		break;
+	case SSPP_CURSOR1:
+		flushbits = BIT(23);
+		break;
+	default:
+		break;
+	}
+
+	return flushbits;
+}
+
+static inline uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
+	enum dpu_lm lm)
+{
+	uint32_t flushbits = 0;
+
+	switch (lm) {
+	case LM_0:
+		flushbits = BIT(6);
+		break;
+	case LM_1:
+		flushbits = BIT(7);
+		break;
+	case LM_2:
+		flushbits = BIT(8);
+		break;
+	case LM_3:
+		flushbits = BIT(9);
+		break;
+	case LM_4:
+		flushbits = BIT(10);
+		break;
+	case LM_5:
+		flushbits = BIT(20);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	flushbits |= CTL_FLUSH_MASK_CTL;
+
+	return flushbits;
+}
+
+static inline int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx,
+		u32 *flushbits, enum dpu_intf intf)
+{
+	switch (intf) {
+	case INTF_0:
+		*flushbits |= BIT(31);
+		break;
+	case INTF_1:
+		*flushbits |= BIT(30);
+		break;
+	case INTF_2:
+		*flushbits |= BIT(29);
+		break;
+	case INTF_3:
+		*flushbits |= BIT(28);
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static inline int dpu_hw_ctl_get_bitmask_cdm(struct dpu_hw_ctl *ctx,
+		u32 *flushbits, enum dpu_cdm cdm)
+{
+	switch (cdm) {
+	case CDM_0:
+		*flushbits |= BIT(26);
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+	ktime_t timeout;
+	u32 status;
+
+	timeout = ktime_add_us(ktime_get(), timeout_us);
+
+	/*
+	 * it takes around 30us to have mdp finish resetting its ctl path
+	 * poll every 50us so that reset should be completed at 1st poll
+	 */
+	do {
+		status = DPU_REG_READ(c, CTL_SW_RESET);
+		status &= 0x1;
+		if (status)
+			usleep_range(20, 50);
+	} while (status && ktime_compare_safe(ktime_get(), timeout) < 0);
+
+	return status;
+}
+
+static int dpu_hw_ctl_reset_control(struct dpu_hw_ctl *ctx)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+	pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx);
+	DPU_REG_WRITE(c, CTL_SW_RESET, 0x1);
+	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl *ctx)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+	u32 status;
+
+	status = DPU_REG_READ(c, CTL_SW_RESET);
+	status &= 0x01;
+	if (!status)
+		return 0;
+
+	pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx);
+	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) {
+		pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+	int i;
+
+	for (i = 0; i < ctx->mixer_count; i++) {
+		DPU_REG_WRITE(c, CTL_LAYER(LM_0 + i), 0);
+		DPU_REG_WRITE(c, CTL_LAYER_EXT(LM_0 + i), 0);
+		DPU_REG_WRITE(c, CTL_LAYER_EXT2(LM_0 + i), 0);
+		DPU_REG_WRITE(c, CTL_LAYER_EXT3(LM_0 + i), 0);
+	}
+}
+
+static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
+	enum dpu_lm lm, struct dpu_hw_stage_cfg *stage_cfg)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+	u32 mixercfg = 0, mixercfg_ext = 0, mix, ext;
+	u32 mixercfg_ext2 = 0, mixercfg_ext3 = 0;
+	int i, j;
+	u8 stages;
+	int pipes_per_stage;
+
+	stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
+	if (stages < 0)
+		return;
+
+	if (test_bit(DPU_MIXER_SOURCESPLIT,
+		&ctx->mixer_hw_caps->features))
+		pipes_per_stage = PIPES_PER_STAGE;
+	else
+		pipes_per_stage = 1;
+
+	mixercfg = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */
+
+	if (!stage_cfg)
+		goto exit;
+
+	for (i = 0; i <= stages; i++) {
+		/* overflow to ext register if 'i + 1 > 7' */
+		mix = (i + 1) & 0x7;
+		ext = i >= 7;
+
+		for (j = 0 ; j < pipes_per_stage; j++) {
+			enum dpu_sspp_multirect_index rect_index =
+				stage_cfg->multirect_index[i][j];
+
+			switch (stage_cfg->stage[i][j]) {
+			case SSPP_VIG0:
+				if (rect_index == DPU_SSPP_RECT_1) {
+					mixercfg_ext3 |= ((i + 1) & 0xF) << 0;
+				} else {
+					mixercfg |= mix << 0;
+					mixercfg_ext |= ext << 0;
+				}
+				break;
+			case SSPP_VIG1:
+				if (rect_index == DPU_SSPP_RECT_1) {
+					mixercfg_ext3 |= ((i + 1) & 0xF) << 4;
+				} else {
+					mixercfg |= mix << 3;
+					mixercfg_ext |= ext << 2;
+				}
+				break;
+			case SSPP_VIG2:
+				if (rect_index == DPU_SSPP_RECT_1) {
+					mixercfg_ext3 |= ((i + 1) & 0xF) << 8;
+				} else {
+					mixercfg |= mix << 6;
+					mixercfg_ext |= ext << 4;
+				}
+				break;
+			case SSPP_VIG3:
+				if (rect_index == DPU_SSPP_RECT_1) {
+					mixercfg_ext3 |= ((i + 1) & 0xF) << 12;
+				} else {
+					mixercfg |= mix << 26;
+					mixercfg_ext |= ext << 6;
+				}
+				break;
+			case SSPP_RGB0:
+				mixercfg |= mix << 9;
+				mixercfg_ext |= ext << 8;
+				break;
+			case SSPP_RGB1:
+				mixercfg |= mix << 12;
+				mixercfg_ext |= ext << 10;
+				break;
+			case SSPP_RGB2:
+				mixercfg |= mix << 15;
+				mixercfg_ext |= ext << 12;
+				break;
+			case SSPP_RGB3:
+				mixercfg |= mix << 29;
+				mixercfg_ext |= ext << 14;
+				break;
+			case SSPP_DMA0:
+				if (rect_index == DPU_SSPP_RECT_1) {
+					mixercfg_ext2 |= ((i + 1) & 0xF) << 8;
+				} else {
+					mixercfg |= mix << 18;
+					mixercfg_ext |= ext << 16;
+				}
+				break;
+			case SSPP_DMA1:
+				if (rect_index == DPU_SSPP_RECT_1) {
+					mixercfg_ext2 |= ((i + 1) & 0xF) << 12;
+				} else {
+					mixercfg |= mix << 21;
+					mixercfg_ext |= ext << 18;
+				}
+				break;
+			case SSPP_DMA2:
+				if (rect_index == DPU_SSPP_RECT_1) {
+					mixercfg_ext2 |= ((i + 1) & 0xF) << 16;
+				} else {
+					mix |= (i + 1) & 0xF;
+					mixercfg_ext2 |= mix << 0;
+				}
+				break;
+			case SSPP_DMA3:
+				if (rect_index == DPU_SSPP_RECT_1) {
+					mixercfg_ext2 |= ((i + 1) & 0xF) << 20;
+				} else {
+					mix |= (i + 1) & 0xF;
+					mixercfg_ext2 |= mix << 4;
+				}
+				break;
+			case SSPP_CURSOR0:
+				mixercfg_ext |= ((i + 1) & 0xF) << 20;
+				break;
+			case SSPP_CURSOR1:
+				mixercfg_ext |= ((i + 1) & 0xF) << 26;
+				break;
+			default:
+				break;
+			}
+		}
+	}
+
+exit:
+	DPU_REG_WRITE(c, CTL_LAYER(lm), mixercfg);
+	DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext);
+	DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg_ext2);
+	DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg_ext3);
+}
+
+static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
+		struct dpu_hw_intf_cfg *cfg)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+	u32 intf_cfg = 0;
+
+	intf_cfg |= (cfg->intf & 0xF) << 4;
+
+	if (cfg->mode_3d) {
+		intf_cfg |= BIT(19);
+		intf_cfg |= (cfg->mode_3d - 0x1) << 20;
+	}
+
+	switch (cfg->intf_mode_sel) {
+	case DPU_CTL_MODE_SEL_VID:
+		intf_cfg &= ~BIT(17);
+		intf_cfg &= ~(0x3 << 15);
+		break;
+	case DPU_CTL_MODE_SEL_CMD:
+		intf_cfg |= BIT(17);
+		intf_cfg |= ((cfg->stream_sel & 0x3) << 15);
+		break;
+	default:
+		pr_err("unknown interface type %d\n", cfg->intf_mode_sel);
+		return;
+	}
+
+	DPU_REG_WRITE(c, CTL_TOP, intf_cfg);
+}
+
+static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
+		unsigned long cap)
+{
+	ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
+	ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
+	ops->get_pending_flush = dpu_hw_ctl_get_pending_flush;
+	ops->trigger_flush = dpu_hw_ctl_trigger_flush;
+	ops->get_flush_register = dpu_hw_ctl_get_flush_register;
+	ops->trigger_start = dpu_hw_ctl_trigger_start;
+	ops->trigger_pending = dpu_hw_ctl_trigger_pending;
+	ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
+	ops->reset = dpu_hw_ctl_reset_control;
+	ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
+	ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
+	ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
+	ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp;
+	ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer;
+	ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf;
+	ops->get_bitmask_cdm = dpu_hw_ctl_get_bitmask_cdm;
+};
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+	.start = NULL,
+	.stop = NULL,
+};
+
+struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
+		void __iomem *addr,
+		struct dpu_mdss_cfg *m)
+{
+	struct dpu_hw_ctl *c;
+	struct dpu_ctl_cfg *cfg;
+	int rc;
+
+	c = kzalloc(sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return ERR_PTR(-ENOMEM);
+
+	cfg = _ctl_offset(idx, m, addr, &c->hw);
+	if (IS_ERR_OR_NULL(cfg)) {
+		kfree(c);
+		pr_err("failed to create dpu_hw_ctl %d\n", idx);
+		return ERR_PTR(-EINVAL);
+	}
+
+	c->caps = cfg;
+	_setup_ctl_ops(&c->ops, c->caps->features);
+	c->idx = idx;
+	c->mixer_count = m->mixer_count;
+	c->mixer_hw_caps = m->mixer;
+
+	rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_CTL, idx, &dpu_hw_ops);
+	if (rc) {
+		DPU_ERROR("failed to init hw blk %d\n", rc);
+		goto blk_init_error;
+	}
+
+	return c;
+
+blk_init_error:
+	kzfree(c);
+
+	return ERR_PTR(rc);
+}
+
+void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx)
+{
+	if (ctx)
+		dpu_hw_blk_destroy(&ctx->base);
+	kfree(ctx);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
new file mode 100644
index 000000000000..c66a71f8b839
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
@@ -0,0 +1,218 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_CTL_H
+#define _DPU_HW_CTL_H
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_sspp.h"
+#include "dpu_hw_blk.h"
+
+/**
+ * dpu_ctl_mode_sel: Interface mode selection
+ * DPU_CTL_MODE_SEL_VID:    Video mode interface
+ * DPU_CTL_MODE_SEL_CMD:    Command mode interface
+ */
+enum dpu_ctl_mode_sel {
+	DPU_CTL_MODE_SEL_VID = 0,
+	DPU_CTL_MODE_SEL_CMD
+};
+
+struct dpu_hw_ctl;
+/**
+ * struct dpu_hw_stage_cfg - blending stage cfg
+ * @stage : SSPP_ID at each stage
+ * @multirect_index: index of the rectangle of SSPP.
+ */
+struct dpu_hw_stage_cfg {
+	enum dpu_sspp stage[DPU_STAGE_MAX][PIPES_PER_STAGE];
+	enum dpu_sspp_multirect_index multirect_index
+					[DPU_STAGE_MAX][PIPES_PER_STAGE];
+};
+
+/**
+ * struct dpu_hw_intf_cfg :Describes how the DPU writes data to output interface
+ * @intf :                 Interface id
+ * @mode_3d:               3d mux configuration
+ * @intf_mode_sel:         Interface mode, cmd / vid
+ * @stream_sel:            Stream selection for multi-stream interfaces
+ */
+struct dpu_hw_intf_cfg {
+	enum dpu_intf intf;
+	enum dpu_3d_blend_mode mode_3d;
+	enum dpu_ctl_mode_sel intf_mode_sel;
+	int stream_sel;
+};
+
+/**
+ * struct dpu_hw_ctl_ops - Interface to the wb Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_ctl_ops {
+	/**
+	 * kickoff hw operation for Sw controlled interfaces
+	 * DSI cmd mode and WB interface are SW controlled
+	 * @ctx       : ctl path ctx pointer
+	 */
+	void (*trigger_start)(struct dpu_hw_ctl *ctx);
+
+	/**
+	 * kickoff prepare is in progress hw operation for sw
+	 * controlled interfaces: DSI cmd mode and WB interface
+	 * are SW controlled
+	 * @ctx       : ctl path ctx pointer
+	 */
+	void (*trigger_pending)(struct dpu_hw_ctl *ctx);
+
+	/**
+	 * Clear the value of the cached pending_flush_mask
+	 * No effect on hardware
+	 * @ctx       : ctl path ctx pointer
+	 */
+	void (*clear_pending_flush)(struct dpu_hw_ctl *ctx);
+
+	/**
+	 * Query the value of the cached pending_flush_mask
+	 * No effect on hardware
+	 * @ctx       : ctl path ctx pointer
+	 */
+	u32 (*get_pending_flush)(struct dpu_hw_ctl *ctx);
+
+	/**
+	 * OR in the given flushbits to the cached pending_flush_mask
+	 * No effect on hardware
+	 * @ctx       : ctl path ctx pointer
+	 * @flushbits : module flushmask
+	 */
+	void (*update_pending_flush)(struct dpu_hw_ctl *ctx,
+		u32 flushbits);
+
+	/**
+	 * Write the value of the pending_flush_mask to hardware
+	 * @ctx       : ctl path ctx pointer
+	 */
+	void (*trigger_flush)(struct dpu_hw_ctl *ctx);
+
+	/**
+	 * Read the value of the flush register
+	 * @ctx       : ctl path ctx pointer
+	 * @Return: value of the ctl flush register.
+	 */
+	u32 (*get_flush_register)(struct dpu_hw_ctl *ctx);
+
+	/**
+	 * Setup ctl_path interface config
+	 * @ctx
+	 * @cfg    : interface config structure pointer
+	 */
+	void (*setup_intf_cfg)(struct dpu_hw_ctl *ctx,
+		struct dpu_hw_intf_cfg *cfg);
+
+	int (*reset)(struct dpu_hw_ctl *c);
+
+	/*
+	 * wait_reset_status - checks ctl reset status
+	 * @ctx       : ctl path ctx pointer
+	 *
+	 * This function checks the ctl reset status bit.
+	 * If the reset bit is set, it keeps polling the status till the hw
+	 * reset is complete.
+	 * Returns: 0 on success or -error if reset incomplete within interval
+	 */
+	int (*wait_reset_status)(struct dpu_hw_ctl *ctx);
+
+	uint32_t (*get_bitmask_sspp)(struct dpu_hw_ctl *ctx,
+		enum dpu_sspp blk);
+
+	uint32_t (*get_bitmask_mixer)(struct dpu_hw_ctl *ctx,
+		enum dpu_lm blk);
+
+	int (*get_bitmask_intf)(struct dpu_hw_ctl *ctx,
+		u32 *flushbits,
+		enum dpu_intf blk);
+
+	int (*get_bitmask_cdm)(struct dpu_hw_ctl *ctx,
+		u32 *flushbits,
+		enum dpu_cdm blk);
+
+	/**
+	 * Set all blend stages to disabled
+	 * @ctx       : ctl path ctx pointer
+	 */
+	void (*clear_all_blendstages)(struct dpu_hw_ctl *ctx);
+
+	/**
+	 * Configure layer mixer to pipe configuration
+	 * @ctx       : ctl path ctx pointer
+	 * @lm        : layer mixer enumeration
+	 * @cfg       : blend stage configuration
+	 */
+	void (*setup_blendstage)(struct dpu_hw_ctl *ctx,
+		enum dpu_lm lm, struct dpu_hw_stage_cfg *cfg);
+};
+
+/**
+ * struct dpu_hw_ctl : CTL PATH driver object
+ * @base: hardware block base structure
+ * @hw: block register map object
+ * @idx: control path index
+ * @caps: control path capabilities
+ * @mixer_count: number of mixers
+ * @mixer_hw_caps: mixer hardware capabilities
+ * @pending_flush_mask: storage for pending ctl_flush managed via ops
+ * @ops: operation list
+ */
+struct dpu_hw_ctl {
+	struct dpu_hw_blk base;
+	struct dpu_hw_blk_reg_map hw;
+
+	/* ctl path */
+	int idx;
+	const struct dpu_ctl_cfg *caps;
+	int mixer_count;
+	const struct dpu_lm_cfg *mixer_hw_caps;
+	u32 pending_flush_mask;
+
+	/* ops */
+	struct dpu_hw_ctl_ops ops;
+};
+
+/**
+ * dpu_hw_ctl - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_ctl *to_dpu_hw_ctl(struct dpu_hw_blk *hw)
+{
+	return container_of(hw, struct dpu_hw_ctl, base);
+}
+
+/**
+ * dpu_hw_ctl_init(): Initializes the ctl_path hw driver object.
+ * should be called before accessing every ctl path registers.
+ * @idx:  ctl_path index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m :   pointer to mdss catalog data
+ */
+struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
+		void __iomem *addr,
+		struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_ctl_destroy(): Destroys ctl driver context
+ * should be called to free the context
+ */
+void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx);
+
+#endif /*_DPU_HW_CTL_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
new file mode 100644
index 000000000000..a7bced2e05bf
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
@@ -0,0 +1,1213 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/slab.h>
+
+#include "dpu_kms.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_mdss.h"
+
+/**
+ * Register offsets in MDSS register file for the interrupt registers
+ * w.r.t. to the MDP base
+ */
+#define MDP_SSPP_TOP0_OFF		0x0
+#define MDP_INTF_0_OFF			0x6A000
+#define MDP_INTF_1_OFF			0x6A800
+#define MDP_INTF_2_OFF			0x6B000
+#define MDP_INTF_3_OFF			0x6B800
+#define MDP_INTF_4_OFF			0x6C000
+#define MDP_AD4_0_OFF			0x7C000
+#define MDP_AD4_1_OFF			0x7D000
+#define MDP_AD4_INTR_EN_OFF		0x41c
+#define MDP_AD4_INTR_CLEAR_OFF		0x424
+#define MDP_AD4_INTR_STATUS_OFF		0x420
+
+/**
+ * WB interrupt status bit definitions
+ */
+#define DPU_INTR_WB_0_DONE BIT(0)
+#define DPU_INTR_WB_1_DONE BIT(1)
+#define DPU_INTR_WB_2_DONE BIT(4)
+
+/**
+ * WDOG timer interrupt status bit definitions
+ */
+#define DPU_INTR_WD_TIMER_0_DONE BIT(2)
+#define DPU_INTR_WD_TIMER_1_DONE BIT(3)
+#define DPU_INTR_WD_TIMER_2_DONE BIT(5)
+#define DPU_INTR_WD_TIMER_3_DONE BIT(6)
+#define DPU_INTR_WD_TIMER_4_DONE BIT(7)
+
+/**
+ * Pingpong interrupt status bit definitions
+ */
+#define DPU_INTR_PING_PONG_0_DONE BIT(8)
+#define DPU_INTR_PING_PONG_1_DONE BIT(9)
+#define DPU_INTR_PING_PONG_2_DONE BIT(10)
+#define DPU_INTR_PING_PONG_3_DONE BIT(11)
+#define DPU_INTR_PING_PONG_0_RD_PTR BIT(12)
+#define DPU_INTR_PING_PONG_1_RD_PTR BIT(13)
+#define DPU_INTR_PING_PONG_2_RD_PTR BIT(14)
+#define DPU_INTR_PING_PONG_3_RD_PTR BIT(15)
+#define DPU_INTR_PING_PONG_0_WR_PTR BIT(16)
+#define DPU_INTR_PING_PONG_1_WR_PTR BIT(17)
+#define DPU_INTR_PING_PONG_2_WR_PTR BIT(18)
+#define DPU_INTR_PING_PONG_3_WR_PTR BIT(19)
+#define DPU_INTR_PING_PONG_0_AUTOREFRESH_DONE BIT(20)
+#define DPU_INTR_PING_PONG_1_AUTOREFRESH_DONE BIT(21)
+#define DPU_INTR_PING_PONG_2_AUTOREFRESH_DONE BIT(22)
+#define DPU_INTR_PING_PONG_3_AUTOREFRESH_DONE BIT(23)
+
+/**
+ * Interface interrupt status bit definitions
+ */
+#define DPU_INTR_INTF_0_UNDERRUN BIT(24)
+#define DPU_INTR_INTF_1_UNDERRUN BIT(26)
+#define DPU_INTR_INTF_2_UNDERRUN BIT(28)
+#define DPU_INTR_INTF_3_UNDERRUN BIT(30)
+#define DPU_INTR_INTF_0_VSYNC BIT(25)
+#define DPU_INTR_INTF_1_VSYNC BIT(27)
+#define DPU_INTR_INTF_2_VSYNC BIT(29)
+#define DPU_INTR_INTF_3_VSYNC BIT(31)
+
+/**
+ * Pingpong Secondary interrupt status bit definitions
+ */
+#define DPU_INTR_PING_PONG_S0_AUTOREFRESH_DONE BIT(0)
+#define DPU_INTR_PING_PONG_S0_WR_PTR BIT(4)
+#define DPU_INTR_PING_PONG_S0_RD_PTR BIT(8)
+#define DPU_INTR_PING_PONG_S0_TEAR_DETECTED BIT(22)
+#define DPU_INTR_PING_PONG_S0_TE_DETECTED BIT(28)
+
+/**
+ * Pingpong TEAR detection interrupt status bit definitions
+ */
+#define DPU_INTR_PING_PONG_0_TEAR_DETECTED BIT(16)
+#define DPU_INTR_PING_PONG_1_TEAR_DETECTED BIT(17)
+#define DPU_INTR_PING_PONG_2_TEAR_DETECTED BIT(18)
+#define DPU_INTR_PING_PONG_3_TEAR_DETECTED BIT(19)
+
+/**
+ * Pingpong TE detection interrupt status bit definitions
+ */
+#define DPU_INTR_PING_PONG_0_TE_DETECTED BIT(24)
+#define DPU_INTR_PING_PONG_1_TE_DETECTED BIT(25)
+#define DPU_INTR_PING_PONG_2_TE_DETECTED BIT(26)
+#define DPU_INTR_PING_PONG_3_TE_DETECTED BIT(27)
+
+/**
+ * Ctl start interrupt status bit definitions
+ */
+#define DPU_INTR_CTL_0_START BIT(9)
+#define DPU_INTR_CTL_1_START BIT(10)
+#define DPU_INTR_CTL_2_START BIT(11)
+#define DPU_INTR_CTL_3_START BIT(12)
+#define DPU_INTR_CTL_4_START BIT(13)
+
+/**
+ * Concurrent WB overflow interrupt status bit definitions
+ */
+#define DPU_INTR_CWB_2_OVERFLOW BIT(14)
+#define DPU_INTR_CWB_3_OVERFLOW BIT(15)
+
+/**
+ * Histogram VIG done interrupt status bit definitions
+ */
+#define DPU_INTR_HIST_VIG_0_DONE BIT(0)
+#define DPU_INTR_HIST_VIG_1_DONE BIT(4)
+#define DPU_INTR_HIST_VIG_2_DONE BIT(8)
+#define DPU_INTR_HIST_VIG_3_DONE BIT(10)
+
+/**
+ * Histogram VIG reset Sequence done interrupt status bit definitions
+ */
+#define DPU_INTR_HIST_VIG_0_RSTSEQ_DONE BIT(1)
+#define DPU_INTR_HIST_VIG_1_RSTSEQ_DONE BIT(5)
+#define DPU_INTR_HIST_VIG_2_RSTSEQ_DONE BIT(9)
+#define DPU_INTR_HIST_VIG_3_RSTSEQ_DONE BIT(11)
+
+/**
+ * Histogram DSPP done interrupt status bit definitions
+ */
+#define DPU_INTR_HIST_DSPP_0_DONE BIT(12)
+#define DPU_INTR_HIST_DSPP_1_DONE BIT(16)
+#define DPU_INTR_HIST_DSPP_2_DONE BIT(20)
+#define DPU_INTR_HIST_DSPP_3_DONE BIT(22)
+
+/**
+ * Histogram DSPP reset Sequence done interrupt status bit definitions
+ */
+#define DPU_INTR_HIST_DSPP_0_RSTSEQ_DONE BIT(13)
+#define DPU_INTR_HIST_DSPP_1_RSTSEQ_DONE BIT(17)
+#define DPU_INTR_HIST_DSPP_2_RSTSEQ_DONE BIT(21)
+#define DPU_INTR_HIST_DSPP_3_RSTSEQ_DONE BIT(23)
+
+/**
+ * INTF interrupt status bit definitions
+ */
+#define DPU_INTR_VIDEO_INTO_STATIC BIT(0)
+#define DPU_INTR_VIDEO_OUTOF_STATIC BIT(1)
+#define DPU_INTR_DSICMD_0_INTO_STATIC BIT(2)
+#define DPU_INTR_DSICMD_0_OUTOF_STATIC BIT(3)
+#define DPU_INTR_DSICMD_1_INTO_STATIC BIT(4)
+#define DPU_INTR_DSICMD_1_OUTOF_STATIC BIT(5)
+#define DPU_INTR_DSICMD_2_INTO_STATIC BIT(6)
+#define DPU_INTR_DSICMD_2_OUTOF_STATIC BIT(7)
+#define DPU_INTR_PROG_LINE BIT(8)
+
+/**
+ * AD4 interrupt status bit definitions
+ */
+#define DPU_INTR_BRIGHTPR_UPDATED BIT(4)
+#define DPU_INTR_DARKENH_UPDATED BIT(3)
+#define DPU_INTR_STREN_OUTROI_UPDATED BIT(2)
+#define DPU_INTR_STREN_INROI_UPDATED BIT(1)
+#define DPU_INTR_BACKLIGHT_UPDATED BIT(0)
+/**
+ * struct dpu_intr_reg - array of DPU register sets
+ * @clr_off:	offset to CLEAR reg
+ * @en_off:	offset to ENABLE reg
+ * @status_off:	offset to STATUS reg
+ */
+struct dpu_intr_reg {
+	u32 clr_off;
+	u32 en_off;
+	u32 status_off;
+};
+
+/**
+ * struct dpu_irq_type - maps each irq with i/f
+ * @intr_type:		type of interrupt listed in dpu_intr_type
+ * @instance_idx:	instance index of the associated HW block in DPU
+ * @irq_mask:		corresponding bit in the interrupt status reg
+ * @reg_idx:		which reg set to use
+ */
+struct dpu_irq_type {
+	u32 intr_type;
+	u32 instance_idx;
+	u32 irq_mask;
+	u32 reg_idx;
+};
+
+/**
+ * List of DPU interrupt registers
+ */
+static const struct dpu_intr_reg dpu_intr_set[] = {
+	{
+		MDP_SSPP_TOP0_OFF+INTR_CLEAR,
+		MDP_SSPP_TOP0_OFF+INTR_EN,
+		MDP_SSPP_TOP0_OFF+INTR_STATUS
+	},
+	{
+		MDP_SSPP_TOP0_OFF+INTR2_CLEAR,
+		MDP_SSPP_TOP0_OFF+INTR2_EN,
+		MDP_SSPP_TOP0_OFF+INTR2_STATUS
+	},
+	{
+		MDP_SSPP_TOP0_OFF+HIST_INTR_CLEAR,
+		MDP_SSPP_TOP0_OFF+HIST_INTR_EN,
+		MDP_SSPP_TOP0_OFF+HIST_INTR_STATUS
+	},
+	{
+		MDP_INTF_0_OFF+INTF_INTR_CLEAR,
+		MDP_INTF_0_OFF+INTF_INTR_EN,
+		MDP_INTF_0_OFF+INTF_INTR_STATUS
+	},
+	{
+		MDP_INTF_1_OFF+INTF_INTR_CLEAR,
+		MDP_INTF_1_OFF+INTF_INTR_EN,
+		MDP_INTF_1_OFF+INTF_INTR_STATUS
+	},
+	{
+		MDP_INTF_2_OFF+INTF_INTR_CLEAR,
+		MDP_INTF_2_OFF+INTF_INTR_EN,
+		MDP_INTF_2_OFF+INTF_INTR_STATUS
+	},
+	{
+		MDP_INTF_3_OFF+INTF_INTR_CLEAR,
+		MDP_INTF_3_OFF+INTF_INTR_EN,
+		MDP_INTF_3_OFF+INTF_INTR_STATUS
+	},
+	{
+		MDP_INTF_4_OFF+INTF_INTR_CLEAR,
+		MDP_INTF_4_OFF+INTF_INTR_EN,
+		MDP_INTF_4_OFF+INTF_INTR_STATUS
+	},
+	{
+		MDP_AD4_0_OFF + MDP_AD4_INTR_CLEAR_OFF,
+		MDP_AD4_0_OFF + MDP_AD4_INTR_EN_OFF,
+		MDP_AD4_0_OFF + MDP_AD4_INTR_STATUS_OFF,
+	},
+	{
+		MDP_AD4_1_OFF + MDP_AD4_INTR_CLEAR_OFF,
+		MDP_AD4_1_OFF + MDP_AD4_INTR_EN_OFF,
+		MDP_AD4_1_OFF + MDP_AD4_INTR_STATUS_OFF,
+	}
+};
+
+/**
+ * IRQ mapping table - use for lookup an irq_idx in this table that have
+ *                     a matching interface type and instance index.
+ */
+static const struct dpu_irq_type dpu_irq_map[] = {
+	/* BEGIN MAP_RANGE: 0-31, INTR */
+	/* irq_idx: 0-3 */
+	{ DPU_IRQ_TYPE_WB_ROT_COMP, WB_0, DPU_INTR_WB_0_DONE, 0},
+	{ DPU_IRQ_TYPE_WB_ROT_COMP, WB_1, DPU_INTR_WB_1_DONE, 0},
+	{ DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_0, DPU_INTR_WD_TIMER_0_DONE, 0},
+	{ DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_1, DPU_INTR_WD_TIMER_1_DONE, 0},
+	/* irq_idx: 4-7 */
+	{ DPU_IRQ_TYPE_WB_WFD_COMP, WB_2, DPU_INTR_WB_2_DONE, 0},
+	{ DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_2, DPU_INTR_WD_TIMER_2_DONE, 0},
+	{ DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_3, DPU_INTR_WD_TIMER_3_DONE, 0},
+	{ DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_4, DPU_INTR_WD_TIMER_4_DONE, 0},
+	/* irq_idx: 8-11 */
+	{ DPU_IRQ_TYPE_PING_PONG_COMP, PINGPONG_0,
+		DPU_INTR_PING_PONG_0_DONE, 0},
+	{ DPU_IRQ_TYPE_PING_PONG_COMP, PINGPONG_1,
+		DPU_INTR_PING_PONG_1_DONE, 0},
+	{ DPU_IRQ_TYPE_PING_PONG_COMP, PINGPONG_2,
+		DPU_INTR_PING_PONG_2_DONE, 0},
+	{ DPU_IRQ_TYPE_PING_PONG_COMP, PINGPONG_3,
+		DPU_INTR_PING_PONG_3_DONE, 0},
+	/* irq_idx: 12-15 */
+	{ DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_0,
+		DPU_INTR_PING_PONG_0_RD_PTR, 0},
+	{ DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_1,
+		DPU_INTR_PING_PONG_1_RD_PTR, 0},
+	{ DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_2,
+		DPU_INTR_PING_PONG_2_RD_PTR, 0},
+	{ DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_3,
+		DPU_INTR_PING_PONG_3_RD_PTR, 0},
+	/* irq_idx: 16-19 */
+	{ DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_0,
+		DPU_INTR_PING_PONG_0_WR_PTR, 0},
+	{ DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_1,
+		DPU_INTR_PING_PONG_1_WR_PTR, 0},
+	{ DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_2,
+		DPU_INTR_PING_PONG_2_WR_PTR, 0},
+	{ DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_3,
+		DPU_INTR_PING_PONG_3_WR_PTR, 0},
+	/* irq_idx: 20-23 */
+	{ DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_0,
+		DPU_INTR_PING_PONG_0_AUTOREFRESH_DONE, 0},
+	{ DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_1,
+		DPU_INTR_PING_PONG_1_AUTOREFRESH_DONE, 0},
+	{ DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_2,
+		DPU_INTR_PING_PONG_2_AUTOREFRESH_DONE, 0},
+	{ DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_3,
+		DPU_INTR_PING_PONG_3_AUTOREFRESH_DONE, 0},
+	/* irq_idx: 24-27 */
+	{ DPU_IRQ_TYPE_INTF_UNDER_RUN, INTF_0, DPU_INTR_INTF_0_UNDERRUN, 0},
+	{ DPU_IRQ_TYPE_INTF_VSYNC, INTF_0, DPU_INTR_INTF_0_VSYNC, 0},
+	{ DPU_IRQ_TYPE_INTF_UNDER_RUN, INTF_1, DPU_INTR_INTF_1_UNDERRUN, 0},
+	{ DPU_IRQ_TYPE_INTF_VSYNC, INTF_1, DPU_INTR_INTF_1_VSYNC, 0},
+	/* irq_idx: 28-31 */
+	{ DPU_IRQ_TYPE_INTF_UNDER_RUN, INTF_2, DPU_INTR_INTF_2_UNDERRUN, 0},
+	{ DPU_IRQ_TYPE_INTF_VSYNC, INTF_2, DPU_INTR_INTF_2_VSYNC, 0},
+	{ DPU_IRQ_TYPE_INTF_UNDER_RUN, INTF_3, DPU_INTR_INTF_3_UNDERRUN, 0},
+	{ DPU_IRQ_TYPE_INTF_VSYNC, INTF_3, DPU_INTR_INTF_3_VSYNC, 0},
+
+	/* BEGIN MAP_RANGE: 32-64, INTR2 */
+	/* irq_idx: 32-35 */
+	{ DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_S0,
+		DPU_INTR_PING_PONG_S0_AUTOREFRESH_DONE, 1},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+	/* irq_idx: 36-39 */
+	{ DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_S0,
+		DPU_INTR_PING_PONG_S0_WR_PTR, 1},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+	/* irq_idx: 40 */
+	{ DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_S0,
+		DPU_INTR_PING_PONG_S0_RD_PTR, 1},
+	/* irq_idx: 41-45 */
+	{ DPU_IRQ_TYPE_CTL_START, CTL_0,
+		DPU_INTR_CTL_0_START, 1},
+	{ DPU_IRQ_TYPE_CTL_START, CTL_1,
+		DPU_INTR_CTL_1_START, 1},
+	{ DPU_IRQ_TYPE_CTL_START, CTL_2,
+		DPU_INTR_CTL_2_START, 1},
+	{ DPU_IRQ_TYPE_CTL_START, CTL_3,
+		DPU_INTR_CTL_3_START, 1},
+	{ DPU_IRQ_TYPE_CTL_START, CTL_4,
+		DPU_INTR_CTL_4_START, 1},
+	/* irq_idx: 46-47 */
+	{ DPU_IRQ_TYPE_CWB_OVERFLOW, CWB_2, DPU_INTR_CWB_2_OVERFLOW, 1},
+	{ DPU_IRQ_TYPE_CWB_OVERFLOW, CWB_3, DPU_INTR_CWB_3_OVERFLOW, 1},
+	/* irq_idx: 48-51 */
+	{ DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_0,
+		DPU_INTR_PING_PONG_0_TEAR_DETECTED, 1},
+	{ DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_1,
+		DPU_INTR_PING_PONG_1_TEAR_DETECTED, 1},
+	{ DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_2,
+		DPU_INTR_PING_PONG_2_TEAR_DETECTED, 1},
+	{ DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_3,
+		DPU_INTR_PING_PONG_3_TEAR_DETECTED, 1},
+	/* irq_idx: 52-55 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+	{ DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_S0,
+		DPU_INTR_PING_PONG_S0_TEAR_DETECTED, 1},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+	/* irq_idx: 56-59 */
+	{ DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_0,
+		DPU_INTR_PING_PONG_0_TE_DETECTED, 1},
+	{ DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_1,
+		DPU_INTR_PING_PONG_1_TE_DETECTED, 1},
+	{ DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_2,
+		DPU_INTR_PING_PONG_2_TE_DETECTED, 1},
+	{ DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_3,
+		DPU_INTR_PING_PONG_3_TE_DETECTED, 1},
+	/* irq_idx: 60-63 */
+	{ DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_S0,
+		DPU_INTR_PING_PONG_S0_TE_DETECTED, 1},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+
+	/* BEGIN MAP_RANGE: 64-95 HIST */
+	/* irq_idx: 64-67 */
+	{ DPU_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG0, DPU_INTR_HIST_VIG_0_DONE, 2},
+	{ DPU_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG0,
+		DPU_INTR_HIST_VIG_0_RSTSEQ_DONE, 2},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+	/* irq_idx: 68-71 */
+	{ DPU_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG1, DPU_INTR_HIST_VIG_1_DONE, 2},
+	{ DPU_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG1,
+		DPU_INTR_HIST_VIG_1_RSTSEQ_DONE, 2},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+	/* irq_idx: 72-75 */
+	{ DPU_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG2, DPU_INTR_HIST_VIG_2_DONE, 2},
+	{ DPU_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG2,
+		DPU_INTR_HIST_VIG_2_RSTSEQ_DONE, 2},
+	{ DPU_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG3, DPU_INTR_HIST_VIG_3_DONE, 2},
+	{ DPU_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG3,
+		DPU_INTR_HIST_VIG_3_RSTSEQ_DONE, 2},
+	/* irq_idx: 76-79 */
+	{ DPU_IRQ_TYPE_HIST_DSPP_DONE, DSPP_0, DPU_INTR_HIST_DSPP_0_DONE, 2},
+	{ DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_0,
+		DPU_INTR_HIST_DSPP_0_RSTSEQ_DONE, 2},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+	/* irq_idx: 80-83 */
+	{ DPU_IRQ_TYPE_HIST_DSPP_DONE, DSPP_1, DPU_INTR_HIST_DSPP_1_DONE, 2},
+	{ DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_1,
+		DPU_INTR_HIST_DSPP_1_RSTSEQ_DONE, 2},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+	/* irq_idx: 84-87 */
+	{ DPU_IRQ_TYPE_HIST_DSPP_DONE, DSPP_2, DPU_INTR_HIST_DSPP_2_DONE, 2},
+	{ DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_2,
+		DPU_INTR_HIST_DSPP_2_RSTSEQ_DONE, 2},
+	{ DPU_IRQ_TYPE_HIST_DSPP_DONE, DSPP_3, DPU_INTR_HIST_DSPP_3_DONE, 2},
+	{ DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_3,
+		DPU_INTR_HIST_DSPP_3_RSTSEQ_DONE, 2},
+	/* irq_idx: 88-91 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+	/* irq_idx: 92-95 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+
+	/* BEGIN MAP_RANGE: 96-127 INTF_0_INTR */
+	/* irq_idx: 96-99 */
+	{ DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_0,
+		DPU_INTR_VIDEO_INTO_STATIC, 3},
+	{ DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_0,
+		DPU_INTR_VIDEO_OUTOF_STATIC, 3},
+	{ DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_0,
+		DPU_INTR_DSICMD_0_INTO_STATIC, 3},
+	{ DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_0,
+		DPU_INTR_DSICMD_0_OUTOF_STATIC, 3},
+	/* irq_idx: 100-103 */
+	{ DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_0,
+		DPU_INTR_DSICMD_1_INTO_STATIC, 3},
+	{ DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_0,
+		DPU_INTR_DSICMD_1_OUTOF_STATIC, 3},
+	{ DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_0,
+		DPU_INTR_DSICMD_2_INTO_STATIC, 3},
+	{ DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_0,
+		DPU_INTR_DSICMD_2_OUTOF_STATIC, 3},
+	/* irq_idx: 104-107 */
+	{ DPU_IRQ_TYPE_PROG_LINE, INTF_0, DPU_INTR_PROG_LINE, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	/* irq_idx: 108-111 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	/* irq_idx: 112-115 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	/* irq_idx: 116-119 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	/* irq_idx: 120-123 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	/* irq_idx: 124-127 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+
+	/* BEGIN MAP_RANGE: 128-159 INTF_1_INTR */
+	/* irq_idx: 128-131 */
+	{ DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_1,
+		DPU_INTR_VIDEO_INTO_STATIC, 4},
+	{ DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_1,
+		DPU_INTR_VIDEO_OUTOF_STATIC, 4},
+	{ DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_1,
+		DPU_INTR_DSICMD_0_INTO_STATIC, 4},
+	{ DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_1,
+		DPU_INTR_DSICMD_0_OUTOF_STATIC, 4},
+	/* irq_idx: 132-135 */
+	{ DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_1,
+		DPU_INTR_DSICMD_1_INTO_STATIC, 4},
+	{ DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_1,
+		DPU_INTR_DSICMD_1_OUTOF_STATIC, 4},
+	{ DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_1,
+		DPU_INTR_DSICMD_2_INTO_STATIC, 4},
+	{ DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_1,
+		DPU_INTR_DSICMD_2_OUTOF_STATIC, 4},
+	/* irq_idx: 136-139 */
+	{ DPU_IRQ_TYPE_PROG_LINE, INTF_1, DPU_INTR_PROG_LINE, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	/* irq_idx: 140-143 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	/* irq_idx: 144-147 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	/* irq_idx: 148-151 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	/* irq_idx: 152-155 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	/* irq_idx: 156-159 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+
+	/* BEGIN MAP_RANGE: 160-191 INTF_2_INTR */
+	/* irq_idx: 160-163 */
+	{ DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_2,
+		DPU_INTR_VIDEO_INTO_STATIC, 5},
+	{ DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_2,
+		DPU_INTR_VIDEO_OUTOF_STATIC, 5},
+	{ DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_2,
+		DPU_INTR_DSICMD_0_INTO_STATIC, 5},
+	{ DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_2,
+		DPU_INTR_DSICMD_0_OUTOF_STATIC, 5},
+	/* irq_idx: 164-167 */
+	{ DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_2,
+		DPU_INTR_DSICMD_1_INTO_STATIC, 5},
+	{ DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_2,
+		DPU_INTR_DSICMD_1_OUTOF_STATIC, 5},
+	{ DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_2,
+		DPU_INTR_DSICMD_2_INTO_STATIC, 5},
+	{ DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_2,
+		DPU_INTR_DSICMD_2_OUTOF_STATIC, 5},
+	/* irq_idx: 168-171 */
+	{ DPU_IRQ_TYPE_PROG_LINE, INTF_2, DPU_INTR_PROG_LINE, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	/* irq_idx: 172-175 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	/* irq_idx: 176-179 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	/* irq_idx: 180-183 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	/* irq_idx: 184-187 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	/* irq_idx: 188-191 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+
+	/* BEGIN MAP_RANGE: 192-223 INTF_3_INTR */
+	/* irq_idx: 192-195 */
+	{ DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_3,
+		DPU_INTR_VIDEO_INTO_STATIC, 6},
+	{ DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_3,
+		DPU_INTR_VIDEO_OUTOF_STATIC, 6},
+	{ DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_3,
+		DPU_INTR_DSICMD_0_INTO_STATIC, 6},
+	{ DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_3,
+		DPU_INTR_DSICMD_0_OUTOF_STATIC, 6},
+	/* irq_idx: 196-199 */
+	{ DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_3,
+		DPU_INTR_DSICMD_1_INTO_STATIC, 6},
+	{ DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_3,
+		DPU_INTR_DSICMD_1_OUTOF_STATIC, 6},
+	{ DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_3,
+		DPU_INTR_DSICMD_2_INTO_STATIC, 6},
+	{ DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_3,
+		DPU_INTR_DSICMD_2_OUTOF_STATIC, 6},
+	/* irq_idx: 200-203 */
+	{ DPU_IRQ_TYPE_PROG_LINE, INTF_3, DPU_INTR_PROG_LINE, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	/* irq_idx: 204-207 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	/* irq_idx: 208-211 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	/* irq_idx: 212-215 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	/* irq_idx: 216-219 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	/* irq_idx: 220-223 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+
+	/* BEGIN MAP_RANGE: 224-255 INTF_4_INTR */
+	/* irq_idx: 224-227 */
+	{ DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_4,
+		DPU_INTR_VIDEO_INTO_STATIC, 7},
+	{ DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_4,
+		DPU_INTR_VIDEO_OUTOF_STATIC, 7},
+	{ DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_4,
+		DPU_INTR_DSICMD_0_INTO_STATIC, 7},
+	{ DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_4,
+		DPU_INTR_DSICMD_0_OUTOF_STATIC, 7},
+	/* irq_idx: 228-231 */
+	{ DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_4,
+		DPU_INTR_DSICMD_1_INTO_STATIC, 7},
+	{ DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_4,
+		DPU_INTR_DSICMD_1_OUTOF_STATIC, 7},
+	{ DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_4,
+		DPU_INTR_DSICMD_2_INTO_STATIC, 7},
+	{ DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_4,
+		DPU_INTR_DSICMD_2_OUTOF_STATIC, 7},
+	/* irq_idx: 232-235 */
+	{ DPU_IRQ_TYPE_PROG_LINE, INTF_4, DPU_INTR_PROG_LINE, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	/* irq_idx: 236-239 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	/* irq_idx: 240-243 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	/* irq_idx: 244-247 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	/* irq_idx: 248-251 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	/* irq_idx: 252-255 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+
+	/* BEGIN MAP_RANGE: 256-287 AD4_0_INTR */
+	/* irq_idx: 256-259 */
+	{ DPU_IRQ_TYPE_AD4_BL_DONE, DSPP_0, DPU_INTR_BACKLIGHT_UPDATED, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	/* irq_idx: 260-263 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	/* irq_idx: 264-267 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	/* irq_idx: 268-271 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	/* irq_idx: 272-275 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	/* irq_idx: 276-279 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	/* irq_idx: 280-283 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	/* irq_idx: 284-287 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+
+	/* BEGIN MAP_RANGE: 288-319 AD4_1_INTR */
+	/* irq_idx: 288-291 */
+	{ DPU_IRQ_TYPE_AD4_BL_DONE, DSPP_1, DPU_INTR_BACKLIGHT_UPDATED, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	/* irq_idx: 292-295 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	/* irq_idx: 296-299 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	/* irq_idx: 300-303 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	/* irq_idx: 304-307 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	/* irq_idx: 308-311 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	/* irq_idx: 312-315 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	/* irq_idx: 315-319 */
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+	{ DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+};
+
+static int dpu_hw_intr_irqidx_lookup(enum dpu_intr_type intr_type,
+		u32 instance_idx)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(dpu_irq_map); i++) {
+		if (intr_type == dpu_irq_map[i].intr_type &&
+			instance_idx == dpu_irq_map[i].instance_idx)
+			return i;
+	}
+
+	pr_debug("IRQ lookup fail!! intr_type=%d, instance_idx=%d\n",
+			intr_type, instance_idx);
+	return -EINVAL;
+}
+
+static void dpu_hw_intr_set_mask(struct dpu_hw_intr *intr, uint32_t reg_off,
+		uint32_t mask)
+{
+	if (!intr)
+		return;
+
+	DPU_REG_WRITE(&intr->hw, reg_off, mask);
+
+	/* ensure register writes go through */
+	wmb();
+}
+
+static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr,
+		void (*cbfunc)(void *, int),
+		void *arg)
+{
+	int reg_idx;
+	int irq_idx;
+	int start_idx;
+	int end_idx;
+	u32 irq_status;
+	unsigned long irq_flags;
+
+	if (!intr)
+		return;
+
+	/*
+	 * The dispatcher will save the IRQ status before calling here.
+	 * Now need to go through each IRQ status and find matching
+	 * irq lookup index.
+	 */
+	spin_lock_irqsave(&intr->irq_lock, irq_flags);
+	for (reg_idx = 0; reg_idx < ARRAY_SIZE(dpu_intr_set); reg_idx++) {
+		irq_status = intr->save_irq_status[reg_idx];
+
+		/*
+		 * Each Interrupt register has a range of 32 indexes, and
+		 * that is static for dpu_irq_map.
+		 */
+		start_idx = reg_idx * 32;
+		end_idx = start_idx + 32;
+
+		if (start_idx >= ARRAY_SIZE(dpu_irq_map) ||
+				end_idx > ARRAY_SIZE(dpu_irq_map))
+			continue;
+
+		/*
+		 * Search through matching intr status from irq map.
+		 * start_idx and end_idx defined the search range in
+		 * the dpu_irq_map.
+		 */
+		for (irq_idx = start_idx;
+				(irq_idx < end_idx) && irq_status;
+				irq_idx++)
+			if ((irq_status & dpu_irq_map[irq_idx].irq_mask) &&
+				(dpu_irq_map[irq_idx].reg_idx == reg_idx)) {
+				/*
+				 * Once a match on irq mask, perform a callback
+				 * to the given cbfunc. cbfunc will take care
+				 * the interrupt status clearing. If cbfunc is
+				 * not provided, then the interrupt clearing
+				 * is here.
+				 */
+				if (cbfunc)
+					cbfunc(arg, irq_idx);
+				else
+					intr->ops.clear_intr_status_nolock(
+							intr, irq_idx);
+
+				/*
+				 * When callback finish, clear the irq_status
+				 * with the matching mask. Once irq_status
+				 * is all cleared, the search can be stopped.
+				 */
+				irq_status &= ~dpu_irq_map[irq_idx].irq_mask;
+			}
+	}
+	spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+}
+
+static int dpu_hw_intr_enable_irq(struct dpu_hw_intr *intr, int irq_idx)
+{
+	int reg_idx;
+	unsigned long irq_flags;
+	const struct dpu_intr_reg *reg;
+	const struct dpu_irq_type *irq;
+	const char *dbgstr = NULL;
+	uint32_t cache_irq_mask;
+
+	if (!intr)
+		return -EINVAL;
+
+	if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(dpu_irq_map)) {
+		pr_err("invalid IRQ index: [%d]\n", irq_idx);
+		return -EINVAL;
+	}
+
+	irq = &dpu_irq_map[irq_idx];
+	reg_idx = irq->reg_idx;
+	reg = &dpu_intr_set[reg_idx];
+
+	spin_lock_irqsave(&intr->irq_lock, irq_flags);
+	cache_irq_mask = intr->cache_irq_mask[reg_idx];
+	if (cache_irq_mask & irq->irq_mask) {
+		dbgstr = "DPU IRQ already set:";
+	} else {
+		dbgstr = "DPU IRQ enabled:";
+
+		cache_irq_mask |= irq->irq_mask;
+		/* Cleaning any pending interrupt */
+		DPU_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
+		/* Enabling interrupts with the new mask */
+		DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
+
+		/* ensure register write goes through */
+		wmb();
+
+		intr->cache_irq_mask[reg_idx] = cache_irq_mask;
+	}
+	spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+
+	pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
+			irq->irq_mask, cache_irq_mask);
+
+	return 0;
+}
+
+static int dpu_hw_intr_disable_irq_nolock(struct dpu_hw_intr *intr, int irq_idx)
+{
+	int reg_idx;
+	const struct dpu_intr_reg *reg;
+	const struct dpu_irq_type *irq;
+	const char *dbgstr = NULL;
+	uint32_t cache_irq_mask;
+
+	if (!intr)
+		return -EINVAL;
+
+	if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(dpu_irq_map)) {
+		pr_err("invalid IRQ index: [%d]\n", irq_idx);
+		return -EINVAL;
+	}
+
+	irq = &dpu_irq_map[irq_idx];
+	reg_idx = irq->reg_idx;
+	reg = &dpu_intr_set[reg_idx];
+
+	cache_irq_mask = intr->cache_irq_mask[reg_idx];
+	if ((cache_irq_mask & irq->irq_mask) == 0) {
+		dbgstr = "DPU IRQ is already cleared:";
+	} else {
+		dbgstr = "DPU IRQ mask disable:";
+
+		cache_irq_mask &= ~irq->irq_mask;
+		/* Disable interrupts based on the new mask */
+		DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
+		/* Cleaning any pending interrupt */
+		DPU_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
+
+		/* ensure register write goes through */
+		wmb();
+
+		intr->cache_irq_mask[reg_idx] = cache_irq_mask;
+	}
+
+	pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
+			irq->irq_mask, cache_irq_mask);
+
+	return 0;
+}
+
+static int dpu_hw_intr_disable_irq(struct dpu_hw_intr *intr, int irq_idx)
+{
+	unsigned long irq_flags;
+
+	if (!intr)
+		return -EINVAL;
+
+	if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(dpu_irq_map)) {
+		pr_err("invalid IRQ index: [%d]\n", irq_idx);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&intr->irq_lock, irq_flags);
+	dpu_hw_intr_disable_irq_nolock(intr, irq_idx);
+	spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+
+	return 0;
+}
+
+static int dpu_hw_intr_clear_irqs(struct dpu_hw_intr *intr)
+{
+	int i;
+
+	if (!intr)
+		return -EINVAL;
+
+	for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++)
+		DPU_REG_WRITE(&intr->hw, dpu_intr_set[i].clr_off, 0xffffffff);
+
+	/* ensure register writes go through */
+	wmb();
+
+	return 0;
+}
+
+static int dpu_hw_intr_disable_irqs(struct dpu_hw_intr *intr)
+{
+	int i;
+
+	if (!intr)
+		return -EINVAL;
+
+	for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++)
+		DPU_REG_WRITE(&intr->hw, dpu_intr_set[i].en_off, 0x00000000);
+
+	/* ensure register writes go through */
+	wmb();
+
+	return 0;
+}
+
+static int dpu_hw_intr_get_valid_interrupts(struct dpu_hw_intr *intr,
+		uint32_t *mask)
+{
+	if (!intr || !mask)
+		return -EINVAL;
+
+	*mask = IRQ_SOURCE_MDP | IRQ_SOURCE_DSI0 | IRQ_SOURCE_DSI1
+		| IRQ_SOURCE_HDMI | IRQ_SOURCE_EDP;
+
+	return 0;
+}
+
+static void dpu_hw_intr_get_interrupt_statuses(struct dpu_hw_intr *intr)
+{
+	int i;
+	u32 enable_mask;
+	unsigned long irq_flags;
+
+	if (!intr)
+		return;
+
+	spin_lock_irqsave(&intr->irq_lock, irq_flags);
+	for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
+		/* Read interrupt status */
+		intr->save_irq_status[i] = DPU_REG_READ(&intr->hw,
+				dpu_intr_set[i].status_off);
+
+		/* Read enable mask */
+		enable_mask = DPU_REG_READ(&intr->hw, dpu_intr_set[i].en_off);
+
+		/* and clear the interrupt */
+		if (intr->save_irq_status[i])
+			DPU_REG_WRITE(&intr->hw, dpu_intr_set[i].clr_off,
+					intr->save_irq_status[i]);
+
+		/* Finally update IRQ status based on enable mask */
+		intr->save_irq_status[i] &= enable_mask;
+	}
+
+	/* ensure register writes go through */
+	wmb();
+
+	spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+}
+
+static void dpu_hw_intr_clear_intr_status_nolock(struct dpu_hw_intr *intr,
+		int irq_idx)
+{
+	int reg_idx;
+
+	if (!intr)
+		return;
+
+	reg_idx = dpu_irq_map[irq_idx].reg_idx;
+	DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
+			dpu_irq_map[irq_idx].irq_mask);
+
+	/* ensure register writes go through */
+	wmb();
+}
+
+static void dpu_hw_intr_clear_interrupt_status(struct dpu_hw_intr *intr,
+		int irq_idx)
+{
+	unsigned long irq_flags;
+
+	if (!intr)
+		return;
+
+	spin_lock_irqsave(&intr->irq_lock, irq_flags);
+	dpu_hw_intr_clear_intr_status_nolock(intr, irq_idx);
+	spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+}
+
+static u32 dpu_hw_intr_get_intr_status_nolock(struct dpu_hw_intr *intr,
+		int irq_idx, bool clear)
+{
+	int reg_idx;
+	u32 intr_status;
+
+	if (!intr)
+		return 0;
+
+	if (irq_idx >= ARRAY_SIZE(dpu_irq_map) || irq_idx < 0) {
+		pr_err("invalid IRQ index: [%d]\n", irq_idx);
+		return 0;
+	}
+
+	reg_idx = dpu_irq_map[irq_idx].reg_idx;
+	intr_status = DPU_REG_READ(&intr->hw,
+			dpu_intr_set[reg_idx].status_off) &
+					dpu_irq_map[irq_idx].irq_mask;
+	if (intr_status && clear)
+		DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
+				intr_status);
+
+	/* ensure register writes go through */
+	wmb();
+
+	return intr_status;
+}
+
+static u32 dpu_hw_intr_get_interrupt_status(struct dpu_hw_intr *intr,
+		int irq_idx, bool clear)
+{
+	int reg_idx;
+	unsigned long irq_flags;
+	u32 intr_status;
+
+	if (!intr)
+		return 0;
+
+	if (irq_idx >= ARRAY_SIZE(dpu_irq_map) || irq_idx < 0) {
+		pr_err("invalid IRQ index: [%d]\n", irq_idx);
+		return 0;
+	}
+
+	spin_lock_irqsave(&intr->irq_lock, irq_flags);
+
+	reg_idx = dpu_irq_map[irq_idx].reg_idx;
+	intr_status = DPU_REG_READ(&intr->hw,
+			dpu_intr_set[reg_idx].status_off) &
+					dpu_irq_map[irq_idx].irq_mask;
+	if (intr_status && clear)
+		DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
+				intr_status);
+
+	/* ensure register writes go through */
+	wmb();
+
+	spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+
+	return intr_status;
+}
+
+static void __setup_intr_ops(struct dpu_hw_intr_ops *ops)
+{
+	ops->set_mask = dpu_hw_intr_set_mask;
+	ops->irq_idx_lookup = dpu_hw_intr_irqidx_lookup;
+	ops->enable_irq = dpu_hw_intr_enable_irq;
+	ops->disable_irq = dpu_hw_intr_disable_irq;
+	ops->disable_irq_nolock = dpu_hw_intr_disable_irq_nolock;
+	ops->dispatch_irqs = dpu_hw_intr_dispatch_irq;
+	ops->clear_all_irqs = dpu_hw_intr_clear_irqs;
+	ops->disable_all_irqs = dpu_hw_intr_disable_irqs;
+	ops->get_valid_interrupts = dpu_hw_intr_get_valid_interrupts;
+	ops->get_interrupt_statuses = dpu_hw_intr_get_interrupt_statuses;
+	ops->clear_interrupt_status = dpu_hw_intr_clear_interrupt_status;
+	ops->clear_intr_status_nolock = dpu_hw_intr_clear_intr_status_nolock;
+	ops->get_interrupt_status = dpu_hw_intr_get_interrupt_status;
+	ops->get_intr_status_nolock = dpu_hw_intr_get_intr_status_nolock;
+}
+
+static void __intr_offset(struct dpu_mdss_cfg *m,
+		void __iomem *addr, struct dpu_hw_blk_reg_map *hw)
+{
+	hw->base_off = addr;
+	hw->blk_off = m->mdp[0].base;
+	hw->hwversion = m->hwversion;
+}
+
+struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
+		struct dpu_mdss_cfg *m)
+{
+	struct dpu_hw_intr *intr;
+
+	if (!addr || !m)
+		return ERR_PTR(-EINVAL);
+
+	intr = kzalloc(sizeof(*intr), GFP_KERNEL);
+	if (!intr)
+		return ERR_PTR(-ENOMEM);
+
+	__intr_offset(m, addr, &intr->hw);
+	__setup_intr_ops(&intr->ops);
+
+	intr->irq_idx_tbl_size = ARRAY_SIZE(dpu_irq_map);
+
+	intr->cache_irq_mask = kcalloc(ARRAY_SIZE(dpu_intr_set), sizeof(u32),
+			GFP_KERNEL);
+	if (intr->cache_irq_mask == NULL) {
+		kfree(intr);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	intr->save_irq_status = kcalloc(ARRAY_SIZE(dpu_intr_set), sizeof(u32),
+			GFP_KERNEL);
+	if (intr->save_irq_status == NULL) {
+		kfree(intr->cache_irq_mask);
+		kfree(intr);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	spin_lock_init(&intr->irq_lock);
+
+	return intr;
+}
+
+void dpu_hw_intr_destroy(struct dpu_hw_intr *intr)
+{
+	if (intr) {
+		kfree(intr->cache_irq_mask);
+		kfree(intr->save_irq_status);
+		kfree(intr);
+	}
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
new file mode 100644
index 000000000000..b52cdca78607
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
@@ -0,0 +1,278 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_INTERRUPTS_H
+#define _DPU_HW_INTERRUPTS_H
+
+#include <linux/types.h>
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_mdss.h"
+
+#define IRQ_SOURCE_MDP		BIT(0)
+#define IRQ_SOURCE_DSI0		BIT(4)
+#define IRQ_SOURCE_DSI1		BIT(5)
+#define IRQ_SOURCE_HDMI		BIT(8)
+#define IRQ_SOURCE_EDP		BIT(12)
+#define IRQ_SOURCE_MHL		BIT(16)
+
+/**
+ * dpu_intr_type - HW Interrupt Type
+ * @DPU_IRQ_TYPE_WB_ROT_COMP:		WB rotator done
+ * @DPU_IRQ_TYPE_WB_WFD_COMP:		WB WFD done
+ * @DPU_IRQ_TYPE_PING_PONG_COMP:	PingPong done
+ * @DPU_IRQ_TYPE_PING_PONG_RD_PTR:	PingPong read pointer
+ * @DPU_IRQ_TYPE_PING_PONG_WR_PTR:	PingPong write pointer
+ * @DPU_IRQ_TYPE_PING_PONG_AUTO_REF:	PingPong auto refresh
+ * @DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK:	PingPong Tear check
+ * @DPU_IRQ_TYPE_PING_PONG_TE_CHECK:	PingPong TE detection
+ * @DPU_IRQ_TYPE_INTF_UNDER_RUN:	INTF underrun
+ * @DPU_IRQ_TYPE_INTF_VSYNC:		INTF VSYNC
+ * @DPU_IRQ_TYPE_CWB_OVERFLOW:		Concurrent WB overflow
+ * @DPU_IRQ_TYPE_HIST_VIG_DONE:		VIG Histogram done
+ * @DPU_IRQ_TYPE_HIST_VIG_RSTSEQ:	VIG Histogram reset
+ * @DPU_IRQ_TYPE_HIST_DSPP_DONE:	DSPP Histogram done
+ * @DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ:	DSPP Histogram reset
+ * @DPU_IRQ_TYPE_WD_TIMER:		Watchdog timer
+ * @DPU_IRQ_TYPE_SFI_VIDEO_IN:		Video static frame INTR into static
+ * @DPU_IRQ_TYPE_SFI_VIDEO_OUT:		Video static frame INTR out-of static
+ * @DPU_IRQ_TYPE_SFI_CMD_0_IN:		DSI CMD0 static frame INTR into static
+ * @DPU_IRQ_TYPE_SFI_CMD_0_OUT:		DSI CMD0 static frame INTR out-of static
+ * @DPU_IRQ_TYPE_SFI_CMD_1_IN:		DSI CMD1 static frame INTR into static
+ * @DPU_IRQ_TYPE_SFI_CMD_1_OUT:		DSI CMD1 static frame INTR out-of static
+ * @DPU_IRQ_TYPE_SFI_CMD_2_IN:		DSI CMD2 static frame INTR into static
+ * @DPU_IRQ_TYPE_SFI_CMD_2_OUT:		DSI CMD2 static frame INTR out-of static
+ * @DPU_IRQ_TYPE_PROG_LINE:		Programmable Line interrupt
+ * @DPU_IRQ_TYPE_AD4_BL_DONE:		AD4 backlight
+ * @DPU_IRQ_TYPE_CTL_START:		Control start
+ * @DPU_IRQ_TYPE_RESERVED:		Reserved for expansion
+ */
+enum dpu_intr_type {
+	DPU_IRQ_TYPE_WB_ROT_COMP,
+	DPU_IRQ_TYPE_WB_WFD_COMP,
+	DPU_IRQ_TYPE_PING_PONG_COMP,
+	DPU_IRQ_TYPE_PING_PONG_RD_PTR,
+	DPU_IRQ_TYPE_PING_PONG_WR_PTR,
+	DPU_IRQ_TYPE_PING_PONG_AUTO_REF,
+	DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK,
+	DPU_IRQ_TYPE_PING_PONG_TE_CHECK,
+	DPU_IRQ_TYPE_INTF_UNDER_RUN,
+	DPU_IRQ_TYPE_INTF_VSYNC,
+	DPU_IRQ_TYPE_CWB_OVERFLOW,
+	DPU_IRQ_TYPE_HIST_VIG_DONE,
+	DPU_IRQ_TYPE_HIST_VIG_RSTSEQ,
+	DPU_IRQ_TYPE_HIST_DSPP_DONE,
+	DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ,
+	DPU_IRQ_TYPE_WD_TIMER,
+	DPU_IRQ_TYPE_SFI_VIDEO_IN,
+	DPU_IRQ_TYPE_SFI_VIDEO_OUT,
+	DPU_IRQ_TYPE_SFI_CMD_0_IN,
+	DPU_IRQ_TYPE_SFI_CMD_0_OUT,
+	DPU_IRQ_TYPE_SFI_CMD_1_IN,
+	DPU_IRQ_TYPE_SFI_CMD_1_OUT,
+	DPU_IRQ_TYPE_SFI_CMD_2_IN,
+	DPU_IRQ_TYPE_SFI_CMD_2_OUT,
+	DPU_IRQ_TYPE_PROG_LINE,
+	DPU_IRQ_TYPE_AD4_BL_DONE,
+	DPU_IRQ_TYPE_CTL_START,
+	DPU_IRQ_TYPE_RESERVED,
+};
+
+struct dpu_hw_intr;
+
+/**
+ * Interrupt operations.
+ */
+struct dpu_hw_intr_ops {
+	/**
+	 * set_mask - Programs the given interrupt register with the
+	 *            given interrupt mask. Register value will get overwritten.
+	 * @intr:	HW interrupt handle
+	 * @reg_off:	MDSS HW register offset
+	 * @irqmask:	IRQ mask value
+	 */
+	void (*set_mask)(
+			struct dpu_hw_intr *intr,
+			uint32_t reg,
+			uint32_t irqmask);
+
+	/**
+	 * irq_idx_lookup - Lookup IRQ index on the HW interrupt type
+	 *                 Used for all irq related ops
+	 * @intr_type:		Interrupt type defined in dpu_intr_type
+	 * @instance_idx:	HW interrupt block instance
+	 * @return:		irq_idx or -EINVAL for lookup fail
+	 */
+	int (*irq_idx_lookup)(
+			enum dpu_intr_type intr_type,
+			u32 instance_idx);
+
+	/**
+	 * enable_irq - Enable IRQ based on lookup IRQ index
+	 * @intr:	HW interrupt handle
+	 * @irq_idx:	Lookup irq index return from irq_idx_lookup
+	 * @return:	0 for success, otherwise failure
+	 */
+	int (*enable_irq)(
+			struct dpu_hw_intr *intr,
+			int irq_idx);
+
+	/**
+	 * disable_irq - Disable IRQ based on lookup IRQ index
+	 * @intr:	HW interrupt handle
+	 * @irq_idx:	Lookup irq index return from irq_idx_lookup
+	 * @return:	0 for success, otherwise failure
+	 */
+	int (*disable_irq)(
+			struct dpu_hw_intr *intr,
+			int irq_idx);
+
+	/**
+	 * disable_irq_nolock - Disable IRQ based on IRQ index without lock
+	 * @intr:	HW interrupt handle
+	 * @irq_idx:	Lookup irq index return from irq_idx_lookup
+	 * @return:	0 for success, otherwise failure
+	 */
+	int (*disable_irq_nolock)(
+			struct dpu_hw_intr *intr,
+			int irq_idx);
+
+	/**
+	 * clear_all_irqs - Clears all the interrupts (i.e. acknowledges
+	 *                  any asserted IRQs). Useful during reset.
+	 * @intr:	HW interrupt handle
+	 * @return:	0 for success, otherwise failure
+	 */
+	int (*clear_all_irqs)(
+			struct dpu_hw_intr *intr);
+
+	/**
+	 * disable_all_irqs - Disables all the interrupts. Useful during reset.
+	 * @intr:	HW interrupt handle
+	 * @return:	0 for success, otherwise failure
+	 */
+	int (*disable_all_irqs)(
+			struct dpu_hw_intr *intr);
+
+	/**
+	 * dispatch_irqs - IRQ dispatcher will call the given callback
+	 *                 function when a matching interrupt status bit is
+	 *                 found in the irq mapping table.
+	 * @intr:	HW interrupt handle
+	 * @cbfunc:	Callback function pointer
+	 * @arg:	Argument to pass back during callback
+	 */
+	void (*dispatch_irqs)(
+			struct dpu_hw_intr *intr,
+			void (*cbfunc)(void *arg, int irq_idx),
+			void *arg);
+
+	/**
+	 * get_interrupt_statuses - Gets and store value from all interrupt
+	 *                          status registers that are currently fired.
+	 * @intr:	HW interrupt handle
+	 */
+	void (*get_interrupt_statuses)(
+			struct dpu_hw_intr *intr);
+
+	/**
+	 * clear_interrupt_status - Clears HW interrupt status based on given
+	 *                          lookup IRQ index.
+	 * @intr:	HW interrupt handle
+	 * @irq_idx:	Lookup irq index return from irq_idx_lookup
+	 */
+	void (*clear_interrupt_status)(
+			struct dpu_hw_intr *intr,
+			int irq_idx);
+
+	/**
+	 * clear_intr_status_nolock() - clears the HW interrupts without lock
+	 * @intr:	HW interrupt handle
+	 * @irq_idx:	Lookup irq index return from irq_idx_lookup
+	 */
+	void (*clear_intr_status_nolock)(
+			struct dpu_hw_intr *intr,
+			int irq_idx);
+
+	/**
+	 * get_interrupt_status - Gets HW interrupt status, and clear if set,
+	 *                        based on given lookup IRQ index.
+	 * @intr:	HW interrupt handle
+	 * @irq_idx:	Lookup irq index return from irq_idx_lookup
+	 * @clear:	True to clear irq after read
+	 */
+	u32 (*get_interrupt_status)(
+			struct dpu_hw_intr *intr,
+			int irq_idx,
+			bool clear);
+
+	/**
+	 * get_intr_status_nolock - nolock version of get_interrupt_status
+	 * @intr:	HW interrupt handle
+	 * @irq_idx:	Lookup irq index return from irq_idx_lookup
+	 * @clear:	True to clear irq after read
+	 */
+	u32 (*get_intr_status_nolock)(
+			struct dpu_hw_intr *intr,
+			int irq_idx,
+			bool clear);
+
+	/**
+	 * get_valid_interrupts - Gets a mask of all valid interrupt sources
+	 *                        within DPU. These are actually status bits
+	 *                        within interrupt registers that specify the
+	 *                        source of the interrupt in IRQs. For example,
+	 *                        valid interrupt sources can be MDP, DSI,
+	 *                        HDMI etc.
+	 * @intr:	HW interrupt handle
+	 * @mask:	Returning the interrupt source MASK
+	 * @return:	0 for success, otherwise failure
+	 */
+	int (*get_valid_interrupts)(
+			struct dpu_hw_intr *intr,
+			uint32_t *mask);
+};
+
+/**
+ * struct dpu_hw_intr: hw interrupts handling data structure
+ * @hw:               virtual address mapping
+ * @ops:              function pointer mapping for IRQ handling
+ * @cache_irq_mask:   array of IRQ enable masks reg storage created during init
+ * @save_irq_status:  array of IRQ status reg storage created during init
+ * @irq_idx_tbl_size: total number of irq_idx mapped in the hw_interrupts
+ * @irq_lock:         spinlock for accessing IRQ resources
+ */
+struct dpu_hw_intr {
+	struct dpu_hw_blk_reg_map hw;
+	struct dpu_hw_intr_ops ops;
+	u32 *cache_irq_mask;
+	u32 *save_irq_status;
+	u32 irq_idx_tbl_size;
+	spinlock_t irq_lock;
+};
+
+/**
+ * dpu_hw_intr_init(): Initializes the interrupts hw object
+ * @addr: mapped register io address of MDP
+ * @m :   pointer to mdss catalog data
+ */
+struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
+		struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_intr_destroy(): Cleanup interrutps hw object
+ * @intr: pointer to interrupts hw object
+ */
+void dpu_hw_intr_destroy(struct dpu_hw_intr *intr);
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
new file mode 100644
index 000000000000..d280df5613c9
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
@@ -0,0 +1,349 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_intf.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define INTF_TIMING_ENGINE_EN           0x000
+#define INTF_CONFIG                     0x004
+#define INTF_HSYNC_CTL                  0x008
+#define INTF_VSYNC_PERIOD_F0            0x00C
+#define INTF_VSYNC_PERIOD_F1            0x010
+#define INTF_VSYNC_PULSE_WIDTH_F0       0x014
+#define INTF_VSYNC_PULSE_WIDTH_F1       0x018
+#define INTF_DISPLAY_V_START_F0         0x01C
+#define INTF_DISPLAY_V_START_F1         0x020
+#define INTF_DISPLAY_V_END_F0           0x024
+#define INTF_DISPLAY_V_END_F1           0x028
+#define INTF_ACTIVE_V_START_F0          0x02C
+#define INTF_ACTIVE_V_START_F1          0x030
+#define INTF_ACTIVE_V_END_F0            0x034
+#define INTF_ACTIVE_V_END_F1            0x038
+#define INTF_DISPLAY_HCTL               0x03C
+#define INTF_ACTIVE_HCTL                0x040
+#define INTF_BORDER_COLOR               0x044
+#define INTF_UNDERFLOW_COLOR            0x048
+#define INTF_HSYNC_SKEW                 0x04C
+#define INTF_POLARITY_CTL               0x050
+#define INTF_TEST_CTL                   0x054
+#define INTF_TP_COLOR0                  0x058
+#define INTF_TP_COLOR1                  0x05C
+#define INTF_FRAME_LINE_COUNT_EN        0x0A8
+#define INTF_FRAME_COUNT                0x0AC
+#define   INTF_LINE_COUNT               0x0B0
+
+#define   INTF_DEFLICKER_CONFIG         0x0F0
+#define   INTF_DEFLICKER_STRNG_COEFF    0x0F4
+#define   INTF_DEFLICKER_WEAK_COEFF     0x0F8
+
+#define   INTF_DSI_CMD_MODE_TRIGGER_EN  0x084
+#define   INTF_PANEL_FORMAT             0x090
+#define   INTF_TPG_ENABLE               0x100
+#define   INTF_TPG_MAIN_CONTROL         0x104
+#define   INTF_TPG_VIDEO_CONFIG         0x108
+#define   INTF_TPG_COMPONENT_LIMITS     0x10C
+#define   INTF_TPG_RECTANGLE            0x110
+#define   INTF_TPG_INITIAL_VALUE        0x114
+#define   INTF_TPG_BLK_WHITE_PATTERN_FRAMES   0x118
+#define   INTF_TPG_RGB_MAPPING          0x11C
+#define   INTF_PROG_FETCH_START         0x170
+#define   INTF_PROG_ROT_START           0x174
+
+#define   INTF_FRAME_LINE_COUNT_EN      0x0A8
+#define   INTF_FRAME_COUNT              0x0AC
+#define   INTF_LINE_COUNT               0x0B0
+
+#define INTF_MISR_CTRL			0x180
+#define INTF_MISR_SIGNATURE		0x184
+
+static struct dpu_intf_cfg *_intf_offset(enum dpu_intf intf,
+		struct dpu_mdss_cfg *m,
+		void __iomem *addr,
+		struct dpu_hw_blk_reg_map *b)
+{
+	int i;
+
+	for (i = 0; i < m->intf_count; i++) {
+		if ((intf == m->intf[i].id) &&
+		(m->intf[i].type != INTF_NONE)) {
+			b->base_off = addr;
+			b->blk_off = m->intf[i].base;
+			b->length = m->intf[i].len;
+			b->hwversion = m->hwversion;
+			b->log_mask = DPU_DBG_MASK_INTF;
+			return &m->intf[i];
+		}
+	}
+
+	return ERR_PTR(-EINVAL);
+}
+
+static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
+		const struct intf_timing_params *p,
+		const struct dpu_format *fmt)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+	u32 hsync_period, vsync_period;
+	u32 display_v_start, display_v_end;
+	u32 hsync_start_x, hsync_end_x;
+	u32 active_h_start, active_h_end;
+	u32 active_v_start, active_v_end;
+	u32 active_hctl, display_hctl, hsync_ctl;
+	u32 polarity_ctl, den_polarity, hsync_polarity, vsync_polarity;
+	u32 panel_format;
+	u32 intf_cfg;
+
+	/* read interface_cfg */
+	intf_cfg = DPU_REG_READ(c, INTF_CONFIG);
+	hsync_period = p->hsync_pulse_width + p->h_back_porch + p->width +
+	p->h_front_porch;
+	vsync_period = p->vsync_pulse_width + p->v_back_porch + p->height +
+	p->v_front_porch;
+
+	display_v_start = ((p->vsync_pulse_width + p->v_back_porch) *
+	hsync_period) + p->hsync_skew;
+	display_v_end = ((vsync_period - p->v_front_porch) * hsync_period) +
+	p->hsync_skew - 1;
+
+	if (ctx->cap->type == INTF_EDP || ctx->cap->type == INTF_DP) {
+		display_v_start += p->hsync_pulse_width + p->h_back_porch;
+		display_v_end -= p->h_front_porch;
+	}
+
+	hsync_start_x = p->h_back_porch + p->hsync_pulse_width;
+	hsync_end_x = hsync_period - p->h_front_porch - 1;
+
+	if (p->width != p->xres) {
+		active_h_start = hsync_start_x;
+		active_h_end = active_h_start + p->xres - 1;
+	} else {
+		active_h_start = 0;
+		active_h_end = 0;
+	}
+
+	if (p->height != p->yres) {
+		active_v_start = display_v_start;
+		active_v_end = active_v_start + (p->yres * hsync_period) - 1;
+	} else {
+		active_v_start = 0;
+		active_v_end = 0;
+	}
+
+	if (active_h_end) {
+		active_hctl = (active_h_end << 16) | active_h_start;
+		intf_cfg |= BIT(29);	/* ACTIVE_H_ENABLE */
+	} else {
+		active_hctl = 0;
+	}
+
+	if (active_v_end)
+		intf_cfg |= BIT(30); /* ACTIVE_V_ENABLE */
+
+	hsync_ctl = (hsync_period << 16) | p->hsync_pulse_width;
+	display_hctl = (hsync_end_x << 16) | hsync_start_x;
+
+	den_polarity = 0;
+	if (ctx->cap->type == INTF_HDMI) {
+		hsync_polarity = p->yres >= 720 ? 0 : 1;
+		vsync_polarity = p->yres >= 720 ? 0 : 1;
+	} else {
+		hsync_polarity = 0;
+		vsync_polarity = 0;
+	}
+	polarity_ctl = (den_polarity << 2) | /*  DEN Polarity  */
+		(vsync_polarity << 1) | /* VSYNC Polarity */
+		(hsync_polarity << 0);  /* HSYNC Polarity */
+
+	if (!DPU_FORMAT_IS_YUV(fmt))
+		panel_format = (fmt->bits[C0_G_Y] |
+				(fmt->bits[C1_B_Cb] << 2) |
+				(fmt->bits[C2_R_Cr] << 4) |
+				(0x21 << 8));
+	else
+		/* Interface treats all the pixel data in RGB888 format */
+		panel_format = (COLOR_8BIT |
+				(COLOR_8BIT << 2) |
+				(COLOR_8BIT << 4) |
+				(0x21 << 8));
+
+	DPU_REG_WRITE(c, INTF_HSYNC_CTL, hsync_ctl);
+	DPU_REG_WRITE(c, INTF_VSYNC_PERIOD_F0, vsync_period * hsync_period);
+	DPU_REG_WRITE(c, INTF_VSYNC_PULSE_WIDTH_F0,
+			p->vsync_pulse_width * hsync_period);
+	DPU_REG_WRITE(c, INTF_DISPLAY_HCTL, display_hctl);
+	DPU_REG_WRITE(c, INTF_DISPLAY_V_START_F0, display_v_start);
+	DPU_REG_WRITE(c, INTF_DISPLAY_V_END_F0, display_v_end);
+	DPU_REG_WRITE(c, INTF_ACTIVE_HCTL,  active_hctl);
+	DPU_REG_WRITE(c, INTF_ACTIVE_V_START_F0, active_v_start);
+	DPU_REG_WRITE(c, INTF_ACTIVE_V_END_F0, active_v_end);
+	DPU_REG_WRITE(c, INTF_BORDER_COLOR, p->border_clr);
+	DPU_REG_WRITE(c, INTF_UNDERFLOW_COLOR, p->underflow_clr);
+	DPU_REG_WRITE(c, INTF_HSYNC_SKEW, p->hsync_skew);
+	DPU_REG_WRITE(c, INTF_POLARITY_CTL, polarity_ctl);
+	DPU_REG_WRITE(c, INTF_FRAME_LINE_COUNT_EN, 0x3);
+	DPU_REG_WRITE(c, INTF_CONFIG, intf_cfg);
+	DPU_REG_WRITE(c, INTF_PANEL_FORMAT, panel_format);
+}
+
+static void dpu_hw_intf_enable_timing_engine(
+		struct dpu_hw_intf *intf,
+		u8 enable)
+{
+	struct dpu_hw_blk_reg_map *c = &intf->hw;
+	/* Note: Display interface select is handled in top block hw layer */
+	DPU_REG_WRITE(c, INTF_TIMING_ENGINE_EN, enable != 0);
+}
+
+static void dpu_hw_intf_setup_prg_fetch(
+		struct dpu_hw_intf *intf,
+		const struct intf_prog_fetch *fetch)
+{
+	struct dpu_hw_blk_reg_map *c = &intf->hw;
+	int fetch_enable;
+
+	/*
+	 * Fetch should always be outside the active lines. If the fetching
+	 * is programmed within active region, hardware behavior is unknown.
+	 */
+
+	fetch_enable = DPU_REG_READ(c, INTF_CONFIG);
+	if (fetch->enable) {
+		fetch_enable |= BIT(31);
+		DPU_REG_WRITE(c, INTF_PROG_FETCH_START,
+				fetch->fetch_start);
+	} else {
+		fetch_enable &= ~BIT(31);
+	}
+
+	DPU_REG_WRITE(c, INTF_CONFIG, fetch_enable);
+}
+
+static void dpu_hw_intf_get_status(
+		struct dpu_hw_intf *intf,
+		struct intf_status *s)
+{
+	struct dpu_hw_blk_reg_map *c = &intf->hw;
+
+	s->is_en = DPU_REG_READ(c, INTF_TIMING_ENGINE_EN);
+	if (s->is_en) {
+		s->frame_count = DPU_REG_READ(c, INTF_FRAME_COUNT);
+		s->line_count = DPU_REG_READ(c, INTF_LINE_COUNT);
+	} else {
+		s->line_count = 0;
+		s->frame_count = 0;
+	}
+}
+
+static void dpu_hw_intf_setup_misr(struct dpu_hw_intf *intf,
+						bool enable, u32 frame_count)
+{
+	struct dpu_hw_blk_reg_map *c = &intf->hw;
+	u32 config = 0;
+
+	DPU_REG_WRITE(c, INTF_MISR_CTRL, MISR_CTRL_STATUS_CLEAR);
+	/* clear misr data */
+	wmb();
+
+	if (enable)
+		config = (frame_count & MISR_FRAME_COUNT_MASK) |
+			MISR_CTRL_ENABLE | INTF_MISR_CTRL_FREE_RUN_MASK;
+
+	DPU_REG_WRITE(c, INTF_MISR_CTRL, config);
+}
+
+static u32 dpu_hw_intf_collect_misr(struct dpu_hw_intf *intf)
+{
+	struct dpu_hw_blk_reg_map *c = &intf->hw;
+
+	return DPU_REG_READ(c, INTF_MISR_SIGNATURE);
+}
+
+static u32 dpu_hw_intf_get_line_count(struct dpu_hw_intf *intf)
+{
+	struct dpu_hw_blk_reg_map *c;
+
+	if (!intf)
+		return 0;
+
+	c = &intf->hw;
+
+	return DPU_REG_READ(c, INTF_LINE_COUNT);
+}
+
+static void _setup_intf_ops(struct dpu_hw_intf_ops *ops,
+		unsigned long cap)
+{
+	ops->setup_timing_gen = dpu_hw_intf_setup_timing_engine;
+	ops->setup_prg_fetch  = dpu_hw_intf_setup_prg_fetch;
+	ops->get_status = dpu_hw_intf_get_status;
+	ops->enable_timing = dpu_hw_intf_enable_timing_engine;
+	ops->setup_misr = dpu_hw_intf_setup_misr;
+	ops->collect_misr = dpu_hw_intf_collect_misr;
+	ops->get_line_count = dpu_hw_intf_get_line_count;
+}
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+	.start = NULL,
+	.stop = NULL,
+};
+
+struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
+		void __iomem *addr,
+		struct dpu_mdss_cfg *m)
+{
+	struct dpu_hw_intf *c;
+	struct dpu_intf_cfg *cfg;
+	int rc;
+
+	c = kzalloc(sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return ERR_PTR(-ENOMEM);
+
+	cfg = _intf_offset(idx, m, addr, &c->hw);
+	if (IS_ERR_OR_NULL(cfg)) {
+		kfree(c);
+		pr_err("failed to create dpu_hw_intf %d\n", idx);
+		return ERR_PTR(-EINVAL);
+	}
+
+	/*
+	 * Assign ops
+	 */
+	c->idx = idx;
+	c->cap = cfg;
+	c->mdss = m;
+	_setup_intf_ops(&c->ops, c->cap->features);
+
+	rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_INTF, idx, &dpu_hw_ops);
+	if (rc) {
+		DPU_ERROR("failed to init hw blk %d\n", rc);
+		goto blk_init_error;
+	}
+
+	return c;
+
+blk_init_error:
+	kzfree(c);
+
+	return ERR_PTR(rc);
+}
+
+void dpu_hw_intf_destroy(struct dpu_hw_intf *intf)
+{
+	if (intf)
+		dpu_hw_blk_destroy(&intf->base);
+	kfree(intf);
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
new file mode 100644
index 000000000000..a79d735da68d
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
@@ -0,0 +1,128 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_INTF_H
+#define _DPU_HW_INTF_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_intf;
+
+/* intf timing settings */
+struct intf_timing_params {
+	u32 width;		/* active width */
+	u32 height;		/* active height */
+	u32 xres;		/* Display panel width */
+	u32 yres;		/* Display panel height */
+
+	u32 h_back_porch;
+	u32 h_front_porch;
+	u32 v_back_porch;
+	u32 v_front_porch;
+	u32 hsync_pulse_width;
+	u32 vsync_pulse_width;
+	u32 hsync_polarity;
+	u32 vsync_polarity;
+	u32 border_clr;
+	u32 underflow_clr;
+	u32 hsync_skew;
+};
+
+struct intf_prog_fetch {
+	u8 enable;
+	/* vsync counter for the front porch pixel line */
+	u32 fetch_start;
+};
+
+struct intf_status {
+	u8 is_en;		/* interface timing engine is enabled or not */
+	u32 frame_count;	/* frame count since timing engine enabled */
+	u32 line_count;		/* current line count including blanking */
+};
+
+/**
+ * struct dpu_hw_intf_ops : Interface to the interface Hw driver functions
+ *  Assumption is these functions will be called after clocks are enabled
+ * @ setup_timing_gen : programs the timing engine
+ * @ setup_prog_fetch : enables/disables the programmable fetch logic
+ * @ enable_timing: enable/disable timing engine
+ * @ get_status: returns if timing engine is enabled or not
+ * @ setup_misr: enables/disables MISR in HW register
+ * @ collect_misr: reads and stores MISR data from HW register
+ * @ get_line_count: reads current vertical line counter
+ */
+struct dpu_hw_intf_ops {
+	void (*setup_timing_gen)(struct dpu_hw_intf *intf,
+			const struct intf_timing_params *p,
+			const struct dpu_format *fmt);
+
+	void (*setup_prg_fetch)(struct dpu_hw_intf *intf,
+			const struct intf_prog_fetch *fetch);
+
+	void (*enable_timing)(struct dpu_hw_intf *intf,
+			u8 enable);
+
+	void (*get_status)(struct dpu_hw_intf *intf,
+			struct intf_status *status);
+
+	void (*setup_misr)(struct dpu_hw_intf *intf,
+			bool enable, u32 frame_count);
+
+	u32 (*collect_misr)(struct dpu_hw_intf *intf);
+
+	u32 (*get_line_count)(struct dpu_hw_intf *intf);
+};
+
+struct dpu_hw_intf {
+	struct dpu_hw_blk base;
+	struct dpu_hw_blk_reg_map hw;
+
+	/* intf */
+	enum dpu_intf idx;
+	const struct dpu_intf_cfg *cap;
+	const struct dpu_mdss_cfg *mdss;
+
+	/* ops */
+	struct dpu_hw_intf_ops ops;
+};
+
+/**
+ * to_dpu_hw_intf - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_intf *to_dpu_hw_intf(struct dpu_hw_blk *hw)
+{
+	return container_of(hw, struct dpu_hw_intf, base);
+}
+
+/**
+ * dpu_hw_intf_init(): Initializes the intf driver for the passed
+ * interface idx.
+ * @idx:  interface index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m :   pointer to mdss catalog data
+ */
+struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
+		void __iomem *addr,
+		struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_intf_destroy(): Destroys INTF driver context
+ * @intf:   Pointer to INTF driver context
+ */
+void dpu_hw_intf_destroy(struct dpu_hw_intf *intf);
+
+#endif /*_DPU_HW_INTF_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
new file mode 100644
index 000000000000..4ab72b0f07a5
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
@@ -0,0 +1,261 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_kms.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hwio.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define LM_OP_MODE                        0x00
+#define LM_OUT_SIZE                       0x04
+#define LM_BORDER_COLOR_0                 0x08
+#define LM_BORDER_COLOR_1                 0x010
+
+/* These register are offset to mixer base + stage base */
+#define LM_BLEND0_OP                     0x00
+#define LM_BLEND0_CONST_ALPHA            0x04
+#define LM_FG_COLOR_FILL_COLOR_0         0x08
+#define LM_FG_COLOR_FILL_COLOR_1         0x0C
+#define LM_FG_COLOR_FILL_SIZE            0x10
+#define LM_FG_COLOR_FILL_XY              0x14
+
+#define LM_BLEND0_FG_ALPHA               0x04
+#define LM_BLEND0_BG_ALPHA               0x08
+
+#define LM_MISR_CTRL			0x310
+#define LM_MISR_SIGNATURE		0x314
+
+static struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer,
+		struct dpu_mdss_cfg *m,
+		void __iomem *addr,
+		struct dpu_hw_blk_reg_map *b)
+{
+	int i;
+
+	for (i = 0; i < m->mixer_count; i++) {
+		if (mixer == m->mixer[i].id) {
+			b->base_off = addr;
+			b->blk_off = m->mixer[i].base;
+			b->length = m->mixer[i].len;
+			b->hwversion = m->hwversion;
+			b->log_mask = DPU_DBG_MASK_LM;
+			return &m->mixer[i];
+		}
+	}
+
+	return ERR_PTR(-ENOMEM);
+}
+
+/**
+ * _stage_offset(): returns the relative offset of the blend registers
+ * for the stage to be setup
+ * @c:     mixer ctx contains the mixer to be programmed
+ * @stage: stage index to setup
+ */
+static inline int _stage_offset(struct dpu_hw_mixer *ctx, enum dpu_stage stage)
+{
+	const struct dpu_lm_sub_blks *sblk = ctx->cap->sblk;
+	int rc;
+
+	if (stage == DPU_STAGE_BASE)
+		rc = -EINVAL;
+	else if (stage <= sblk->maxblendstages)
+		rc = sblk->blendstage_base[stage - DPU_STAGE_0];
+	else
+		rc = -EINVAL;
+
+	return rc;
+}
+
+static void dpu_hw_lm_setup_out(struct dpu_hw_mixer *ctx,
+		struct dpu_hw_mixer_cfg *mixer)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+	u32 outsize;
+	u32 op_mode;
+
+	op_mode = DPU_REG_READ(c, LM_OP_MODE);
+
+	outsize = mixer->out_height << 16 | mixer->out_width;
+	DPU_REG_WRITE(c, LM_OUT_SIZE, outsize);
+
+	/* SPLIT_LEFT_RIGHT */
+	if (mixer->right_mixer)
+		op_mode |= BIT(31);
+	else
+		op_mode &= ~BIT(31);
+	DPU_REG_WRITE(c, LM_OP_MODE, op_mode);
+}
+
+static void dpu_hw_lm_setup_border_color(struct dpu_hw_mixer *ctx,
+		struct dpu_mdss_color *color,
+		u8 border_en)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+	if (border_en) {
+		DPU_REG_WRITE(c, LM_BORDER_COLOR_0,
+			(color->color_0 & 0xFFF) |
+			((color->color_1 & 0xFFF) << 0x10));
+		DPU_REG_WRITE(c, LM_BORDER_COLOR_1,
+			(color->color_2 & 0xFFF) |
+			((color->color_3 & 0xFFF) << 0x10));
+	}
+}
+
+static void dpu_hw_lm_setup_blend_config_sdm845(struct dpu_hw_mixer *ctx,
+	u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+	int stage_off;
+	u32 const_alpha;
+
+	if (stage == DPU_STAGE_BASE)
+		return;
+
+	stage_off = _stage_offset(ctx, stage);
+	if (WARN_ON(stage_off < 0))
+		return;
+
+	const_alpha = (bg_alpha & 0xFF) | ((fg_alpha & 0xFF) << 16);
+	DPU_REG_WRITE(c, LM_BLEND0_CONST_ALPHA + stage_off, const_alpha);
+	DPU_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op);
+}
+
+static void dpu_hw_lm_setup_blend_config(struct dpu_hw_mixer *ctx,
+	u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+	int stage_off;
+
+	if (stage == DPU_STAGE_BASE)
+		return;
+
+	stage_off = _stage_offset(ctx, stage);
+	if (WARN_ON(stage_off < 0))
+		return;
+
+	DPU_REG_WRITE(c, LM_BLEND0_FG_ALPHA + stage_off, fg_alpha);
+	DPU_REG_WRITE(c, LM_BLEND0_BG_ALPHA + stage_off, bg_alpha);
+	DPU_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op);
+}
+
+static void dpu_hw_lm_setup_color3(struct dpu_hw_mixer *ctx,
+	uint32_t mixer_op_mode)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+	int op_mode;
+
+	/* read the existing op_mode configuration */
+	op_mode = DPU_REG_READ(c, LM_OP_MODE);
+
+	op_mode = (op_mode & (BIT(31) | BIT(30))) | mixer_op_mode;
+
+	DPU_REG_WRITE(c, LM_OP_MODE, op_mode);
+}
+
+static void dpu_hw_lm_gc(struct dpu_hw_mixer *mixer,
+			void *cfg)
+{
+}
+
+static void dpu_hw_lm_setup_misr(struct dpu_hw_mixer *ctx,
+				bool enable, u32 frame_count)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+	u32 config = 0;
+
+	DPU_REG_WRITE(c, LM_MISR_CTRL, MISR_CTRL_STATUS_CLEAR);
+	/* clear misr data */
+	wmb();
+
+	if (enable)
+		config = (frame_count & MISR_FRAME_COUNT_MASK) |
+			MISR_CTRL_ENABLE | INTF_MISR_CTRL_FREE_RUN_MASK;
+
+	DPU_REG_WRITE(c, LM_MISR_CTRL, config);
+}
+
+static u32 dpu_hw_lm_collect_misr(struct dpu_hw_mixer *ctx)
+{
+	struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+	return DPU_REG_READ(c, LM_MISR_SIGNATURE);
+}
+
+static void _setup_mixer_ops(struct dpu_mdss_cfg *m,
+		struct dpu_hw_lm_ops *ops,
+		unsigned long features)
+{
+	ops->setup_mixer_out = dpu_hw_lm_setup_out;
+	if (IS_SDM845_TARGET(m->hwversion) || IS_SDM670_TARGET(m->hwversion))
+		ops->setup_blend_config = dpu_hw_lm_setup_blend_config_sdm845;
+	else
+		ops->setup_blend_config = dpu_hw_lm_setup_blend_config;
+	ops->setup_alpha_out = dpu_hw_lm_setup_color3;
+	ops->setup_border_color = dpu_hw_lm_setup_border_color;
+	ops->setup_gc = dpu_hw_lm_gc;
+	ops->setup_misr = dpu_hw_lm_setup_misr;
+	ops->collect_misr = dpu_hw_lm_collect_misr;
+};
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+	.start = NULL,
+	.stop = NULL,
+};
+
+struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
+		void __iomem *addr,
+		struct dpu_mdss_cfg *m)
+{
+	struct dpu_hw_mixer *c;
+	struct dpu_lm_cfg *cfg;
+	int rc;
+
+	c = kzalloc(sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return ERR_PTR(-ENOMEM);
+
+	cfg = _lm_offset(idx, m, addr, &c->hw);
+	if (IS_ERR_OR_NULL(cfg)) {
+		kfree(c);
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* Assign ops */
+	c->idx = idx;
+	c->cap = cfg;
+	_setup_mixer_ops(m, &c->ops, c->cap->features);
+
+	rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_LM, idx, &dpu_hw_ops);
+	if (rc) {
+		DPU_ERROR("failed to init hw blk %d\n", rc);
+		goto blk_init_error;
+	}
+
+	return c;
+
+blk_init_error:
+	kzfree(c);
+
+	return ERR_PTR(rc);
+}
+
+void dpu_hw_lm_destroy(struct dpu_hw_mixer *lm)
+{
+	if (lm)
+		dpu_hw_blk_destroy(&lm->base);
+	kfree(lm);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
new file mode 100644
index 000000000000..e29e5dab31bf
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
@@ -0,0 +1,122 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_LM_H
+#define _DPU_HW_LM_H
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_mixer;
+
+struct dpu_hw_mixer_cfg {
+	u32 out_width;
+	u32 out_height;
+	bool right_mixer;
+	int flags;
+};
+
+struct dpu_hw_color3_cfg {
+	u8 keep_fg[DPU_STAGE_MAX];
+};
+
+/**
+ *
+ * struct dpu_hw_lm_ops : Interface to the mixer Hw driver functions
+ *  Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_lm_ops {
+	/*
+	 * Sets up mixer output width and height
+	 * and border color if enabled
+	 */
+	void (*setup_mixer_out)(struct dpu_hw_mixer *ctx,
+		struct dpu_hw_mixer_cfg *cfg);
+
+	/*
+	 * Alpha blending configuration
+	 * for the specified stage
+	 */
+	void (*setup_blend_config)(struct dpu_hw_mixer *ctx, uint32_t stage,
+		uint32_t fg_alpha, uint32_t bg_alpha, uint32_t blend_op);
+
+	/*
+	 * Alpha color component selection from either fg or bg
+	 */
+	void (*setup_alpha_out)(struct dpu_hw_mixer *ctx, uint32_t mixer_op);
+
+	/**
+	 * setup_border_color : enable/disable border color
+	 */
+	void (*setup_border_color)(struct dpu_hw_mixer *ctx,
+		struct dpu_mdss_color *color,
+		u8 border_en);
+	/**
+	 * setup_gc : enable/disable gamma correction feature
+	 */
+	void (*setup_gc)(struct dpu_hw_mixer *mixer,
+			void *cfg);
+
+	/* setup_misr: enables/disables MISR in HW register */
+	void (*setup_misr)(struct dpu_hw_mixer *ctx,
+			bool enable, u32 frame_count);
+
+	/* collect_misr: reads and stores MISR data from HW register */
+	u32 (*collect_misr)(struct dpu_hw_mixer *ctx);
+};
+
+struct dpu_hw_mixer {
+	struct dpu_hw_blk base;
+	struct dpu_hw_blk_reg_map hw;
+
+	/* lm */
+	enum dpu_lm  idx;
+	const struct dpu_lm_cfg   *cap;
+	const struct dpu_mdp_cfg  *mdp;
+	const struct dpu_ctl_cfg  *ctl;
+
+	/* ops */
+	struct dpu_hw_lm_ops ops;
+
+	/* store mixer info specific to display */
+	struct dpu_hw_mixer_cfg cfg;
+};
+
+/**
+ * to_dpu_hw_mixer - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_mixer *to_dpu_hw_mixer(struct dpu_hw_blk *hw)
+{
+	return container_of(hw, struct dpu_hw_mixer, base);
+}
+
+/**
+ * dpu_hw_lm_init(): Initializes the mixer hw driver object.
+ * should be called once before accessing every mixer.
+ * @idx:  mixer index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m :   pointer to mdss catalog data
+ */
+struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
+		void __iomem *addr,
+		struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_lm_destroy(): Destroys layer mixer driver context
+ * @lm:   Pointer to LM driver context
+ */
+void dpu_hw_lm_destroy(struct dpu_hw_mixer *lm);
+
+#endif /*_DPU_HW_LM_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
new file mode 100644
index 000000000000..35e6bf930924
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
@@ -0,0 +1,465 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_MDSS_H
+#define _DPU_HW_MDSS_H
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+
+#include "msm_drv.h"
+
+#define DPU_DBG_NAME			"dpu"
+
+#define DPU_NONE                        0
+
+#ifndef DPU_CSC_MATRIX_COEFF_SIZE
+#define DPU_CSC_MATRIX_COEFF_SIZE	9
+#endif
+
+#ifndef DPU_CSC_CLAMP_SIZE
+#define DPU_CSC_CLAMP_SIZE		6
+#endif
+
+#ifndef DPU_CSC_BIAS_SIZE
+#define DPU_CSC_BIAS_SIZE		3
+#endif
+
+#ifndef DPU_MAX_PLANES
+#define DPU_MAX_PLANES			4
+#endif
+
+#define PIPES_PER_STAGE			2
+#ifndef DPU_MAX_DE_CURVES
+#define DPU_MAX_DE_CURVES		3
+#endif
+
+enum dpu_format_flags {
+	DPU_FORMAT_FLAG_YUV_BIT,
+	DPU_FORMAT_FLAG_DX_BIT,
+	DPU_FORMAT_FLAG_COMPRESSED_BIT,
+	DPU_FORMAT_FLAG_BIT_MAX,
+};
+
+#define DPU_FORMAT_FLAG_YUV		BIT(DPU_FORMAT_FLAG_YUV_BIT)
+#define DPU_FORMAT_FLAG_DX		BIT(DPU_FORMAT_FLAG_DX_BIT)
+#define DPU_FORMAT_FLAG_COMPRESSED	BIT(DPU_FORMAT_FLAG_COMPRESSED_BIT)
+#define DPU_FORMAT_IS_YUV(X)		\
+	(test_bit(DPU_FORMAT_FLAG_YUV_BIT, (X)->flag))
+#define DPU_FORMAT_IS_DX(X)		\
+	(test_bit(DPU_FORMAT_FLAG_DX_BIT, (X)->flag))
+#define DPU_FORMAT_IS_LINEAR(X)		((X)->fetch_mode == DPU_FETCH_LINEAR)
+#define DPU_FORMAT_IS_TILE(X) \
+	(((X)->fetch_mode == DPU_FETCH_UBWC) && \
+			!test_bit(DPU_FORMAT_FLAG_COMPRESSED_BIT, (X)->flag))
+#define DPU_FORMAT_IS_UBWC(X) \
+	(((X)->fetch_mode == DPU_FETCH_UBWC) && \
+			test_bit(DPU_FORMAT_FLAG_COMPRESSED_BIT, (X)->flag))
+
+#define DPU_BLEND_FG_ALPHA_FG_CONST	(0 << 0)
+#define DPU_BLEND_FG_ALPHA_BG_CONST	(1 << 0)
+#define DPU_BLEND_FG_ALPHA_FG_PIXEL	(2 << 0)
+#define DPU_BLEND_FG_ALPHA_BG_PIXEL	(3 << 0)
+#define DPU_BLEND_FG_INV_ALPHA		(1 << 2)
+#define DPU_BLEND_FG_MOD_ALPHA		(1 << 3)
+#define DPU_BLEND_FG_INV_MOD_ALPHA	(1 << 4)
+#define DPU_BLEND_FG_TRANSP_EN		(1 << 5)
+#define DPU_BLEND_BG_ALPHA_FG_CONST	(0 << 8)
+#define DPU_BLEND_BG_ALPHA_BG_CONST	(1 << 8)
+#define DPU_BLEND_BG_ALPHA_FG_PIXEL	(2 << 8)
+#define DPU_BLEND_BG_ALPHA_BG_PIXEL	(3 << 8)
+#define DPU_BLEND_BG_INV_ALPHA		(1 << 10)
+#define DPU_BLEND_BG_MOD_ALPHA		(1 << 11)
+#define DPU_BLEND_BG_INV_MOD_ALPHA	(1 << 12)
+#define DPU_BLEND_BG_TRANSP_EN		(1 << 13)
+
+#define DPU_VSYNC0_SOURCE_GPIO		0
+#define DPU_VSYNC1_SOURCE_GPIO		1
+#define DPU_VSYNC2_SOURCE_GPIO		2
+#define DPU_VSYNC_SOURCE_INTF_0		3
+#define DPU_VSYNC_SOURCE_INTF_1		4
+#define DPU_VSYNC_SOURCE_INTF_2		5
+#define DPU_VSYNC_SOURCE_INTF_3		6
+#define DPU_VSYNC_SOURCE_WD_TIMER_4	11
+#define DPU_VSYNC_SOURCE_WD_TIMER_3	12
+#define DPU_VSYNC_SOURCE_WD_TIMER_2	13
+#define DPU_VSYNC_SOURCE_WD_TIMER_1	14
+#define DPU_VSYNC_SOURCE_WD_TIMER_0	15
+
+enum dpu_hw_blk_type {
+	DPU_HW_BLK_TOP = 0,
+	DPU_HW_BLK_SSPP,
+	DPU_HW_BLK_LM,
+	DPU_HW_BLK_CTL,
+	DPU_HW_BLK_CDM,
+	DPU_HW_BLK_PINGPONG,
+	DPU_HW_BLK_INTF,
+	DPU_HW_BLK_WB,
+	DPU_HW_BLK_MAX,
+};
+
+enum dpu_mdp {
+	MDP_TOP = 0x1,
+	MDP_MAX,
+};
+
+enum dpu_sspp {
+	SSPP_NONE,
+	SSPP_VIG0,
+	SSPP_VIG1,
+	SSPP_VIG2,
+	SSPP_VIG3,
+	SSPP_RGB0,
+	SSPP_RGB1,
+	SSPP_RGB2,
+	SSPP_RGB3,
+	SSPP_DMA0,
+	SSPP_DMA1,
+	SSPP_DMA2,
+	SSPP_DMA3,
+	SSPP_CURSOR0,
+	SSPP_CURSOR1,
+	SSPP_MAX
+};
+
+enum dpu_sspp_type {
+	SSPP_TYPE_VIG,
+	SSPP_TYPE_RGB,
+	SSPP_TYPE_DMA,
+	SSPP_TYPE_CURSOR,
+	SSPP_TYPE_MAX
+};
+
+enum dpu_lm {
+	LM_0 = 1,
+	LM_1,
+	LM_2,
+	LM_3,
+	LM_4,
+	LM_5,
+	LM_6,
+	LM_MAX
+};
+
+enum dpu_stage {
+	DPU_STAGE_BASE = 0,
+	DPU_STAGE_0,
+	DPU_STAGE_1,
+	DPU_STAGE_2,
+	DPU_STAGE_3,
+	DPU_STAGE_4,
+	DPU_STAGE_5,
+	DPU_STAGE_6,
+	DPU_STAGE_7,
+	DPU_STAGE_8,
+	DPU_STAGE_9,
+	DPU_STAGE_10,
+	DPU_STAGE_MAX
+};
+enum dpu_dspp {
+	DSPP_0 = 1,
+	DSPP_1,
+	DSPP_2,
+	DSPP_3,
+	DSPP_MAX
+};
+
+enum dpu_ds {
+	DS_TOP,
+	DS_0,
+	DS_1,
+	DS_MAX
+};
+
+enum dpu_ctl {
+	CTL_0 = 1,
+	CTL_1,
+	CTL_2,
+	CTL_3,
+	CTL_4,
+	CTL_MAX
+};
+
+enum dpu_cdm {
+	CDM_0 = 1,
+	CDM_1,
+	CDM_MAX
+};
+
+enum dpu_pingpong {
+	PINGPONG_0 = 1,
+	PINGPONG_1,
+	PINGPONG_2,
+	PINGPONG_3,
+	PINGPONG_4,
+	PINGPONG_S0,
+	PINGPONG_MAX
+};
+
+enum dpu_intf {
+	INTF_0 = 1,
+	INTF_1,
+	INTF_2,
+	INTF_3,
+	INTF_4,
+	INTF_5,
+	INTF_6,
+	INTF_MAX
+};
+
+enum dpu_intf_type {
+	INTF_NONE = 0x0,
+	INTF_DSI = 0x1,
+	INTF_HDMI = 0x3,
+	INTF_LCDC = 0x5,
+	INTF_EDP = 0x9,
+	INTF_DP = 0xa,
+	INTF_TYPE_MAX,
+
+	/* virtual interfaces */
+	INTF_WB = 0x100,
+};
+
+enum dpu_intf_mode {
+	INTF_MODE_NONE = 0,
+	INTF_MODE_CMD,
+	INTF_MODE_VIDEO,
+	INTF_MODE_WB_BLOCK,
+	INTF_MODE_WB_LINE,
+	INTF_MODE_MAX
+};
+
+enum dpu_wb {
+	WB_0 = 1,
+	WB_1,
+	WB_2,
+	WB_3,
+	WB_MAX
+};
+
+enum dpu_ad {
+	AD_0 = 0x1,
+	AD_1,
+	AD_MAX
+};
+
+enum dpu_cwb {
+	CWB_0 = 0x1,
+	CWB_1,
+	CWB_2,
+	CWB_3,
+	CWB_MAX
+};
+
+enum dpu_wd_timer {
+	WD_TIMER_0 = 0x1,
+	WD_TIMER_1,
+	WD_TIMER_2,
+	WD_TIMER_3,
+	WD_TIMER_4,
+	WD_TIMER_5,
+	WD_TIMER_MAX
+};
+
+enum dpu_vbif {
+	VBIF_0,
+	VBIF_1,
+	VBIF_MAX,
+	VBIF_RT = VBIF_0,
+	VBIF_NRT = VBIF_1
+};
+
+enum dpu_iommu_domain {
+	DPU_IOMMU_DOMAIN_UNSECURE,
+	DPU_IOMMU_DOMAIN_SECURE,
+	DPU_IOMMU_DOMAIN_MAX
+};
+
+/**
+ * DPU HW,Component order color map
+ */
+enum {
+	C0_G_Y = 0,
+	C1_B_Cb = 1,
+	C2_R_Cr = 2,
+	C3_ALPHA = 3
+};
+
+/**
+ * enum dpu_plane_type - defines how the color component pixel packing
+ * @DPU_PLANE_INTERLEAVED   : Color components in single plane
+ * @DPU_PLANE_PLANAR        : Color component in separate planes
+ * @DPU_PLANE_PSEUDO_PLANAR : Chroma components interleaved in separate plane
+ */
+enum dpu_plane_type {
+	DPU_PLANE_INTERLEAVED,
+	DPU_PLANE_PLANAR,
+	DPU_PLANE_PSEUDO_PLANAR,
+};
+
+/**
+ * enum dpu_chroma_samp_type - chroma sub-samplng type
+ * @DPU_CHROMA_RGB   : No chroma subsampling
+ * @DPU_CHROMA_H2V1  : Chroma pixels are horizontally subsampled
+ * @DPU_CHROMA_H1V2  : Chroma pixels are vertically subsampled
+ * @DPU_CHROMA_420   : 420 subsampling
+ */
+enum dpu_chroma_samp_type {
+	DPU_CHROMA_RGB,
+	DPU_CHROMA_H2V1,
+	DPU_CHROMA_H1V2,
+	DPU_CHROMA_420
+};
+
+/**
+ * dpu_fetch_type - Defines How DPU HW fetches data
+ * @DPU_FETCH_LINEAR   : fetch is line by line
+ * @DPU_FETCH_TILE     : fetches data in Z order from a tile
+ * @DPU_FETCH_UBWC     : fetch and decompress data
+ */
+enum dpu_fetch_type {
+	DPU_FETCH_LINEAR,
+	DPU_FETCH_TILE,
+	DPU_FETCH_UBWC
+};
+
+/**
+ * Value of enum chosen to fit the number of bits
+ * expected by the HW programming.
+ */
+enum {
+	COLOR_ALPHA_1BIT = 0,
+	COLOR_ALPHA_4BIT = 1,
+	COLOR_4BIT = 0,
+	COLOR_5BIT = 1, /* No 5-bit Alpha */
+	COLOR_6BIT = 2, /* 6-Bit Alpha also = 2 */
+	COLOR_8BIT = 3, /* 8-Bit Alpha also = 3 */
+};
+
+/**
+ * enum dpu_3d_blend_mode
+ * Desribes how the 3d data is blended
+ * @BLEND_3D_NONE      : 3d blending not enabled
+ * @BLEND_3D_FRAME_INT : Frame interleaving
+ * @BLEND_3D_H_ROW_INT : Horizontal row interleaving
+ * @BLEND_3D_V_ROW_INT : vertical row interleaving
+ * @BLEND_3D_COL_INT   : column interleaving
+ * @BLEND_3D_MAX       :
+ */
+enum dpu_3d_blend_mode {
+	BLEND_3D_NONE = 0,
+	BLEND_3D_FRAME_INT,
+	BLEND_3D_H_ROW_INT,
+	BLEND_3D_V_ROW_INT,
+	BLEND_3D_COL_INT,
+	BLEND_3D_MAX
+};
+
+/** struct dpu_format - defines the format configuration which
+ * allows DPU HW to correctly fetch and decode the format
+ * @base: base msm_format struture containing fourcc code
+ * @fetch_planes: how the color components are packed in pixel format
+ * @element: element color ordering
+ * @bits: element bit widths
+ * @chroma_sample: chroma sub-samplng type
+ * @unpack_align_msb: unpack aligned, 0 to LSB, 1 to MSB
+ * @unpack_tight: 0 for loose, 1 for tight
+ * @unpack_count: 0 = 1 component, 1 = 2 component
+ * @bpp: bytes per pixel
+ * @alpha_enable: whether the format has an alpha channel
+ * @num_planes: number of planes (including meta data planes)
+ * @fetch_mode: linear, tiled, or ubwc hw fetch behavior
+ * @is_yuv: is format a yuv variant
+ * @flag: usage bit flags
+ * @tile_width: format tile width
+ * @tile_height: format tile height
+ */
+struct dpu_format {
+	struct msm_format base;
+	enum dpu_plane_type fetch_planes;
+	u8 element[DPU_MAX_PLANES];
+	u8 bits[DPU_MAX_PLANES];
+	enum dpu_chroma_samp_type chroma_sample;
+	u8 unpack_align_msb;
+	u8 unpack_tight;
+	u8 unpack_count;
+	u8 bpp;
+	u8 alpha_enable;
+	u8 num_planes;
+	enum dpu_fetch_type fetch_mode;
+	DECLARE_BITMAP(flag, DPU_FORMAT_FLAG_BIT_MAX);
+	u16 tile_width;
+	u16 tile_height;
+};
+#define to_dpu_format(x) container_of(x, struct dpu_format, base)
+
+/**
+ * struct dpu_hw_fmt_layout - format information of the source pixel data
+ * @format: pixel format parameters
+ * @num_planes: number of planes (including meta data planes)
+ * @width: image width
+ * @height: image height
+ * @total_size: total size in bytes
+ * @plane_addr: address of each plane
+ * @plane_size: length of each plane
+ * @plane_pitch: pitch of each plane
+ */
+struct dpu_hw_fmt_layout {
+	const struct dpu_format *format;
+	uint32_t num_planes;
+	uint32_t width;
+	uint32_t height;
+	uint32_t total_size;
+	uint32_t plane_addr[DPU_MAX_PLANES];
+	uint32_t plane_size[DPU_MAX_PLANES];
+	uint32_t plane_pitch[DPU_MAX_PLANES];
+};
+
+struct dpu_csc_cfg {
+	/* matrix coefficients in S15.16 format */
+	uint32_t csc_mv[DPU_CSC_MATRIX_COEFF_SIZE];
+	uint32_t csc_pre_bv[DPU_CSC_BIAS_SIZE];
+	uint32_t csc_post_bv[DPU_CSC_BIAS_SIZE];
+	uint32_t csc_pre_lv[DPU_CSC_CLAMP_SIZE];
+	uint32_t csc_post_lv[DPU_CSC_CLAMP_SIZE];
+};
+
+/**
+ * struct dpu_mdss_color - mdss color description
+ * color 0 : green
+ * color 1 : blue
+ * color 2 : red
+ * color 3 : alpha
+ */
+struct dpu_mdss_color {
+	u32 color_0;
+	u32 color_1;
+	u32 color_2;
+	u32 color_3;
+};
+
+/*
+ * Define bit masks for h/w logging.
+ */
+#define DPU_DBG_MASK_NONE     (1 << 0)
+#define DPU_DBG_MASK_CDM      (1 << 1)
+#define DPU_DBG_MASK_INTF     (1 << 2)
+#define DPU_DBG_MASK_LM       (1 << 3)
+#define DPU_DBG_MASK_CTL      (1 << 4)
+#define DPU_DBG_MASK_PINGPONG (1 << 5)
+#define DPU_DBG_MASK_SSPP     (1 << 6)
+#define DPU_DBG_MASK_WB       (1 << 7)
+#define DPU_DBG_MASK_TOP      (1 << 8)
+#define DPU_DBG_MASK_VBIF     (1 << 9)
+#define DPU_DBG_MASK_ROT      (1 << 10)
+
+#endif  /* _DPU_HW_MDSS_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
new file mode 100644
index 000000000000..cc3a623903f4
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
@@ -0,0 +1,250 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/iopoll.h>
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_pingpong.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+#include "dpu_trace.h"
+
+#define PP_TEAR_CHECK_EN                0x000
+#define PP_SYNC_CONFIG_VSYNC            0x004
+#define PP_SYNC_CONFIG_HEIGHT           0x008
+#define PP_SYNC_WRCOUNT                 0x00C
+#define PP_VSYNC_INIT_VAL               0x010
+#define PP_INT_COUNT_VAL                0x014
+#define PP_SYNC_THRESH                  0x018
+#define PP_START_POS                    0x01C
+#define PP_RD_PTR_IRQ                   0x020
+#define PP_WR_PTR_IRQ                   0x024
+#define PP_OUT_LINE_COUNT               0x028
+#define PP_LINE_COUNT                   0x02C
+
+#define PP_FBC_MODE                     0x034
+#define PP_FBC_BUDGET_CTL               0x038
+#define PP_FBC_LOSSY_MODE               0x03C
+
+static struct dpu_pingpong_cfg *_pingpong_offset(enum dpu_pingpong pp,
+		struct dpu_mdss_cfg *m,
+		void __iomem *addr,
+		struct dpu_hw_blk_reg_map *b)
+{
+	int i;
+
+	for (i = 0; i < m->pingpong_count; i++) {
+		if (pp == m->pingpong[i].id) {
+			b->base_off = addr;
+			b->blk_off = m->pingpong[i].base;
+			b->length = m->pingpong[i].len;
+			b->hwversion = m->hwversion;
+			b->log_mask = DPU_DBG_MASK_PINGPONG;
+			return &m->pingpong[i];
+		}
+	}
+
+	return ERR_PTR(-EINVAL);
+}
+
+static int dpu_hw_pp_setup_te_config(struct dpu_hw_pingpong *pp,
+		struct dpu_hw_tear_check *te)
+{
+	struct dpu_hw_blk_reg_map *c;
+	int cfg;
+
+	if (!pp || !te)
+		return -EINVAL;
+	c = &pp->hw;
+
+	cfg = BIT(19); /*VSYNC_COUNTER_EN */
+	if (te->hw_vsync_mode)
+		cfg |= BIT(20);
+
+	cfg |= te->vsync_count;
+
+	DPU_REG_WRITE(c, PP_SYNC_CONFIG_VSYNC, cfg);
+	DPU_REG_WRITE(c, PP_SYNC_CONFIG_HEIGHT, te->sync_cfg_height);
+	DPU_REG_WRITE(c, PP_VSYNC_INIT_VAL, te->vsync_init_val);
+	DPU_REG_WRITE(c, PP_RD_PTR_IRQ, te->rd_ptr_irq);
+	DPU_REG_WRITE(c, PP_START_POS, te->start_pos);
+	DPU_REG_WRITE(c, PP_SYNC_THRESH,
+			((te->sync_threshold_continue << 16) |
+			 te->sync_threshold_start));
+	DPU_REG_WRITE(c, PP_SYNC_WRCOUNT,
+			(te->start_pos + te->sync_threshold_start + 1));
+
+	return 0;
+}
+
+static int dpu_hw_pp_poll_timeout_wr_ptr(struct dpu_hw_pingpong *pp,
+		u32 timeout_us)
+{
+	struct dpu_hw_blk_reg_map *c;
+	u32 val;
+	int rc;
+
+	if (!pp)
+		return -EINVAL;
+
+	c = &pp->hw;
+	rc = readl_poll_timeout(c->base_off + c->blk_off + PP_LINE_COUNT,
+			val, (val & 0xffff) >= 1, 10, timeout_us);
+
+	return rc;
+}
+
+static int dpu_hw_pp_enable_te(struct dpu_hw_pingpong *pp, bool enable)
+{
+	struct dpu_hw_blk_reg_map *c;
+
+	if (!pp)
+		return -EINVAL;
+	c = &pp->hw;
+
+	DPU_REG_WRITE(c, PP_TEAR_CHECK_EN, enable);
+	return 0;
+}
+
+static int dpu_hw_pp_connect_external_te(struct dpu_hw_pingpong *pp,
+		bool enable_external_te)
+{
+	struct dpu_hw_blk_reg_map *c = &pp->hw;
+	u32 cfg;
+	int orig;
+
+	if (!pp)
+		return -EINVAL;
+
+	c = &pp->hw;
+	cfg = DPU_REG_READ(c, PP_SYNC_CONFIG_VSYNC);
+	orig = (bool)(cfg & BIT(20));
+	if (enable_external_te)
+		cfg |= BIT(20);
+	else
+		cfg &= ~BIT(20);
+	DPU_REG_WRITE(c, PP_SYNC_CONFIG_VSYNC, cfg);
+	trace_dpu_pp_connect_ext_te(pp->idx - PINGPONG_0, cfg);
+
+	return orig;
+}
+
+static int dpu_hw_pp_get_vsync_info(struct dpu_hw_pingpong *pp,
+		struct dpu_hw_pp_vsync_info *info)
+{
+	struct dpu_hw_blk_reg_map *c;
+	u32 val;
+
+	if (!pp || !info)
+		return -EINVAL;
+	c = &pp->hw;
+
+	val = DPU_REG_READ(c, PP_VSYNC_INIT_VAL);
+	info->rd_ptr_init_val = val & 0xffff;
+
+	val = DPU_REG_READ(c, PP_INT_COUNT_VAL);
+	info->rd_ptr_frame_count = (val & 0xffff0000) >> 16;
+	info->rd_ptr_line_count = val & 0xffff;
+
+	val = DPU_REG_READ(c, PP_LINE_COUNT);
+	info->wr_ptr_line_count = val & 0xffff;
+
+	return 0;
+}
+
+static u32 dpu_hw_pp_get_line_count(struct dpu_hw_pingpong *pp)
+{
+	struct dpu_hw_blk_reg_map *c = &pp->hw;
+	u32 height, init;
+	u32 line = 0xFFFF;
+
+	if (!pp)
+		return 0;
+	c = &pp->hw;
+
+	init = DPU_REG_READ(c, PP_VSYNC_INIT_VAL) & 0xFFFF;
+	height = DPU_REG_READ(c, PP_SYNC_CONFIG_HEIGHT) & 0xFFFF;
+
+	if (height < init)
+		goto line_count_exit;
+
+	line = DPU_REG_READ(c, PP_INT_COUNT_VAL) & 0xFFFF;
+
+	if (line < init)
+		line += (0xFFFF - init);
+	else
+		line -= init;
+
+line_count_exit:
+	return line;
+}
+
+static void _setup_pingpong_ops(struct dpu_hw_pingpong_ops *ops,
+	const struct dpu_pingpong_cfg *hw_cap)
+{
+	ops->setup_tearcheck = dpu_hw_pp_setup_te_config;
+	ops->enable_tearcheck = dpu_hw_pp_enable_te;
+	ops->connect_external_te = dpu_hw_pp_connect_external_te;
+	ops->get_vsync_info = dpu_hw_pp_get_vsync_info;
+	ops->poll_timeout_wr_ptr = dpu_hw_pp_poll_timeout_wr_ptr;
+	ops->get_line_count = dpu_hw_pp_get_line_count;
+};
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+	.start = NULL,
+	.stop = NULL,
+};
+
+struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
+		void __iomem *addr,
+		struct dpu_mdss_cfg *m)
+{
+	struct dpu_hw_pingpong *c;
+	struct dpu_pingpong_cfg *cfg;
+	int rc;
+
+	c = kzalloc(sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return ERR_PTR(-ENOMEM);
+
+	cfg = _pingpong_offset(idx, m, addr, &c->hw);
+	if (IS_ERR_OR_NULL(cfg)) {
+		kfree(c);
+		return ERR_PTR(-EINVAL);
+	}
+
+	c->idx = idx;
+	c->caps = cfg;
+	_setup_pingpong_ops(&c->ops, c->caps);
+
+	rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_PINGPONG, idx, &dpu_hw_ops);
+	if (rc) {
+		DPU_ERROR("failed to init hw blk %d\n", rc);
+		goto blk_init_error;
+	}
+
+	return c;
+
+blk_init_error:
+	kzfree(c);
+
+	return ERR_PTR(rc);
+}
+
+void dpu_hw_pingpong_destroy(struct dpu_hw_pingpong *pp)
+{
+	if (pp)
+		dpu_hw_blk_destroy(&pp->base);
+	kfree(pp);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
new file mode 100644
index 000000000000..3caccd7d6a3e
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
@@ -0,0 +1,136 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_PINGPONG_H
+#define _DPU_HW_PINGPONG_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_pingpong;
+
+struct dpu_hw_tear_check {
+	/*
+	 * This is ratio of MDP VSYNC clk freq(Hz) to
+	 * refresh rate divided by no of lines
+	 */
+	u32 vsync_count;
+	u32 sync_cfg_height;
+	u32 vsync_init_val;
+	u32 sync_threshold_start;
+	u32 sync_threshold_continue;
+	u32 start_pos;
+	u32 rd_ptr_irq;
+	u8 hw_vsync_mode;
+};
+
+struct dpu_hw_pp_vsync_info {
+	u32 rd_ptr_init_val;	/* value of rd pointer at vsync edge */
+	u32 rd_ptr_frame_count;	/* num frames sent since enabling interface */
+	u32 rd_ptr_line_count;	/* current line on panel (rd ptr) */
+	u32 wr_ptr_line_count;	/* current line within pp fifo (wr ptr) */
+};
+
+/**
+ *
+ * struct dpu_hw_pingpong_ops : Interface to the pingpong Hw driver functions
+ *  Assumption is these functions will be called after clocks are enabled
+ *  @setup_tearcheck : program tear check values
+ *  @enable_tearcheck : enables tear check
+ *  @get_vsync_info : retries timing info of the panel
+ *  @setup_dither : function to program the dither hw block
+ *  @get_line_count: obtain current vertical line counter
+ */
+struct dpu_hw_pingpong_ops {
+	/**
+	 * enables vysnc generation and sets up init value of
+	 * read pointer and programs the tear check cofiguration
+	 */
+	int (*setup_tearcheck)(struct dpu_hw_pingpong *pp,
+			struct dpu_hw_tear_check *cfg);
+
+	/**
+	 * enables tear check block
+	 */
+	int (*enable_tearcheck)(struct dpu_hw_pingpong *pp,
+			bool enable);
+
+	/**
+	 * read, modify, write to either set or clear listening to external TE
+	 * @Return: 1 if TE was originally connected, 0 if not, or -ERROR
+	 */
+	int (*connect_external_te)(struct dpu_hw_pingpong *pp,
+			bool enable_external_te);
+
+	/**
+	 * provides the programmed and current
+	 * line_count
+	 */
+	int (*get_vsync_info)(struct dpu_hw_pingpong *pp,
+			struct dpu_hw_pp_vsync_info  *info);
+
+	/**
+	 * poll until write pointer transmission starts
+	 * @Return: 0 on success, -ETIMEDOUT on timeout
+	 */
+	int (*poll_timeout_wr_ptr)(struct dpu_hw_pingpong *pp, u32 timeout_us);
+
+	/**
+	 * Obtain current vertical line counter
+	 */
+	u32 (*get_line_count)(struct dpu_hw_pingpong *pp);
+};
+
+struct dpu_hw_pingpong {
+	struct dpu_hw_blk base;
+	struct dpu_hw_blk_reg_map hw;
+
+	/* pingpong */
+	enum dpu_pingpong idx;
+	const struct dpu_pingpong_cfg *caps;
+
+	/* ops */
+	struct dpu_hw_pingpong_ops ops;
+};
+
+/**
+ * dpu_hw_pingpong - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_pingpong *to_dpu_hw_pingpong(struct dpu_hw_blk *hw)
+{
+	return container_of(hw, struct dpu_hw_pingpong, base);
+}
+
+/**
+ * dpu_hw_pingpong_init - initializes the pingpong driver for the passed
+ *	pingpong idx.
+ * @idx:  Pingpong index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @m:    Pointer to mdss catalog data
+ * Returns: Error code or allocated dpu_hw_pingpong context
+ */
+struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
+		void __iomem *addr,
+		struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_pingpong_destroy - destroys pingpong driver context
+ *	should be called to free the context
+ * @pp:   Pointer to PP driver context returned by dpu_hw_pingpong_init
+ */
+void dpu_hw_pingpong_destroy(struct dpu_hw_pingpong *pp);
+
+#endif /*_DPU_HW_PINGPONG_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
new file mode 100644
index 000000000000..c25b52a6b219
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -0,0 +1,753 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_sspp.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define DPU_FETCH_CONFIG_RESET_VALUE   0x00000087
+
+/* DPU_SSPP_SRC */
+#define SSPP_SRC_SIZE                      0x00
+#define SSPP_SRC_XY                        0x08
+#define SSPP_OUT_SIZE                      0x0c
+#define SSPP_OUT_XY                        0x10
+#define SSPP_SRC0_ADDR                     0x14
+#define SSPP_SRC1_ADDR                     0x18
+#define SSPP_SRC2_ADDR                     0x1C
+#define SSPP_SRC3_ADDR                     0x20
+#define SSPP_SRC_YSTRIDE0                  0x24
+#define SSPP_SRC_YSTRIDE1                  0x28
+#define SSPP_SRC_FORMAT                    0x30
+#define SSPP_SRC_UNPACK_PATTERN            0x34
+#define SSPP_SRC_OP_MODE                   0x38
+
+/* SSPP_MULTIRECT*/
+#define SSPP_SRC_SIZE_REC1                 0x16C
+#define SSPP_SRC_XY_REC1                   0x168
+#define SSPP_OUT_SIZE_REC1                 0x160
+#define SSPP_OUT_XY_REC1                   0x164
+#define SSPP_SRC_FORMAT_REC1               0x174
+#define SSPP_SRC_UNPACK_PATTERN_REC1       0x178
+#define SSPP_SRC_OP_MODE_REC1              0x17C
+#define SSPP_MULTIRECT_OPMODE              0x170
+#define SSPP_SRC_CONSTANT_COLOR_REC1       0x180
+#define SSPP_EXCL_REC_SIZE_REC1            0x184
+#define SSPP_EXCL_REC_XY_REC1              0x188
+
+#define MDSS_MDP_OP_DEINTERLACE            BIT(22)
+#define MDSS_MDP_OP_DEINTERLACE_ODD        BIT(23)
+#define MDSS_MDP_OP_IGC_ROM_1              BIT(18)
+#define MDSS_MDP_OP_IGC_ROM_0              BIT(17)
+#define MDSS_MDP_OP_IGC_EN                 BIT(16)
+#define MDSS_MDP_OP_FLIP_UD                BIT(14)
+#define MDSS_MDP_OP_FLIP_LR                BIT(13)
+#define MDSS_MDP_OP_BWC_EN                 BIT(0)
+#define MDSS_MDP_OP_PE_OVERRIDE            BIT(31)
+#define MDSS_MDP_OP_BWC_LOSSLESS           (0 << 1)
+#define MDSS_MDP_OP_BWC_Q_HIGH             (1 << 1)
+#define MDSS_MDP_OP_BWC_Q_MED              (2 << 1)
+
+#define SSPP_SRC_CONSTANT_COLOR            0x3c
+#define SSPP_EXCL_REC_CTL                  0x40
+#define SSPP_UBWC_STATIC_CTRL              0x44
+#define SSPP_FETCH_CONFIG                  0x048
+#define SSPP_DANGER_LUT                    0x60
+#define SSPP_SAFE_LUT                      0x64
+#define SSPP_CREQ_LUT                      0x68
+#define SSPP_QOS_CTRL                      0x6C
+#define SSPP_DECIMATION_CONFIG             0xB4
+#define SSPP_SRC_ADDR_SW_STATUS            0x70
+#define SSPP_CREQ_LUT_0                    0x74
+#define SSPP_CREQ_LUT_1                    0x78
+#define SSPP_SW_PIX_EXT_C0_LR              0x100
+#define SSPP_SW_PIX_EXT_C0_TB              0x104
+#define SSPP_SW_PIX_EXT_C0_REQ_PIXELS      0x108
+#define SSPP_SW_PIX_EXT_C1C2_LR            0x110
+#define SSPP_SW_PIX_EXT_C1C2_TB            0x114
+#define SSPP_SW_PIX_EXT_C1C2_REQ_PIXELS    0x118
+#define SSPP_SW_PIX_EXT_C3_LR              0x120
+#define SSPP_SW_PIX_EXT_C3_TB              0x124
+#define SSPP_SW_PIX_EXT_C3_REQ_PIXELS      0x128
+#define SSPP_TRAFFIC_SHAPER                0x130
+#define SSPP_CDP_CNTL                      0x134
+#define SSPP_UBWC_ERROR_STATUS             0x138
+#define SSPP_TRAFFIC_SHAPER_PREFILL        0x150
+#define SSPP_TRAFFIC_SHAPER_REC1_PREFILL   0x154
+#define SSPP_TRAFFIC_SHAPER_REC1           0x158
+#define SSPP_EXCL_REC_SIZE                 0x1B4
+#define SSPP_EXCL_REC_XY                   0x1B8
+#define SSPP_VIG_OP_MODE                   0x0
+#define SSPP_VIG_CSC_10_OP_MODE            0x0
+#define SSPP_TRAFFIC_SHAPER_BPC_MAX        0xFF
+
+/* SSPP_QOS_CTRL */
+#define SSPP_QOS_CTRL_VBLANK_EN            BIT(16)
+#define SSPP_QOS_CTRL_DANGER_SAFE_EN       BIT(0)
+#define SSPP_QOS_CTRL_DANGER_VBLANK_MASK   0x3
+#define SSPP_QOS_CTRL_DANGER_VBLANK_OFF    4
+#define SSPP_QOS_CTRL_CREQ_VBLANK_MASK     0x3
+#define SSPP_QOS_CTRL_CREQ_VBLANK_OFF      20
+
+/* DPU_SSPP_SCALER_QSEED2 */
+#define SCALE_CONFIG                       0x04
+#define COMP0_3_PHASE_STEP_X               0x10
+#define COMP0_3_PHASE_STEP_Y               0x14
+#define COMP1_2_PHASE_STEP_X               0x18
+#define COMP1_2_PHASE_STEP_Y               0x1c
+#define COMP0_3_INIT_PHASE_X               0x20
+#define COMP0_3_INIT_PHASE_Y               0x24
+#define COMP1_2_INIT_PHASE_X               0x28
+#define COMP1_2_INIT_PHASE_Y               0x2C
+#define VIG_0_QSEED2_SHARP                 0x30
+
+/*
+ * Definitions for ViG op modes
+ */
+#define VIG_OP_CSC_DST_DATAFMT BIT(19)
+#define VIG_OP_CSC_SRC_DATAFMT BIT(18)
+#define VIG_OP_CSC_EN          BIT(17)
+#define VIG_OP_MEM_PROT_CONT   BIT(15)
+#define VIG_OP_MEM_PROT_VAL    BIT(14)
+#define VIG_OP_MEM_PROT_SAT    BIT(13)
+#define VIG_OP_MEM_PROT_HUE    BIT(12)
+#define VIG_OP_HIST            BIT(8)
+#define VIG_OP_SKY_COL         BIT(7)
+#define VIG_OP_FOIL            BIT(6)
+#define VIG_OP_SKIN_COL        BIT(5)
+#define VIG_OP_PA_EN           BIT(4)
+#define VIG_OP_PA_SAT_ZERO_EXP BIT(2)
+#define VIG_OP_MEM_PROT_BLEND  BIT(1)
+
+/*
+ * Definitions for CSC 10 op modes
+ */
+#define VIG_CSC_10_SRC_DATAFMT BIT(1)
+#define VIG_CSC_10_EN          BIT(0)
+#define CSC_10BIT_OFFSET       4
+
+/* traffic shaper clock in Hz */
+#define TS_CLK			19200000
+
+static inline int _sspp_subblk_offset(struct dpu_hw_pipe *ctx,
+		int s_id,
+		u32 *idx)
+{
+	int rc = 0;
+	const struct dpu_sspp_sub_blks *sblk = ctx->cap->sblk;
+
+	if (!ctx)
+		return -EINVAL;
+
+	switch (s_id) {
+	case DPU_SSPP_SRC:
+		*idx = sblk->src_blk.base;
+		break;
+	case DPU_SSPP_SCALER_QSEED2:
+	case DPU_SSPP_SCALER_QSEED3:
+	case DPU_SSPP_SCALER_RGB:
+		*idx = sblk->scaler_blk.base;
+		break;
+	case DPU_SSPP_CSC:
+	case DPU_SSPP_CSC_10BIT:
+		*idx = sblk->csc_blk.base;
+		break;
+	default:
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+static void dpu_hw_sspp_setup_multirect(struct dpu_hw_pipe *ctx,
+		enum dpu_sspp_multirect_index index,
+		enum dpu_sspp_multirect_mode mode)
+{
+	u32 mode_mask;
+	u32 idx;
+
+	if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+		return;
+
+	if (index == DPU_SSPP_RECT_SOLO) {
+		/**
+		 * if rect index is RECT_SOLO, we cannot expect a
+		 * virtual plane sharing the same SSPP id. So we go
+		 * and disable multirect
+		 */
+		mode_mask = 0;
+	} else {
+		mode_mask = DPU_REG_READ(&ctx->hw, SSPP_MULTIRECT_OPMODE + idx);
+		mode_mask |= index;
+		if (mode == DPU_SSPP_MULTIRECT_TIME_MX)
+			mode_mask |= BIT(2);
+		else
+			mode_mask &= ~BIT(2);
+	}
+
+	DPU_REG_WRITE(&ctx->hw, SSPP_MULTIRECT_OPMODE + idx, mode_mask);
+}
+
+static void _sspp_setup_opmode(struct dpu_hw_pipe *ctx,
+		u32 mask, u8 en)
+{
+	u32 idx;
+	u32 opmode;
+
+	if (!test_bit(DPU_SSPP_SCALER_QSEED2, &ctx->cap->features) ||
+		_sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED2, &idx) ||
+		!test_bit(DPU_SSPP_CSC, &ctx->cap->features))
+		return;
+
+	opmode = DPU_REG_READ(&ctx->hw, SSPP_VIG_OP_MODE + idx);
+
+	if (en)
+		opmode |= mask;
+	else
+		opmode &= ~mask;
+
+	DPU_REG_WRITE(&ctx->hw, SSPP_VIG_OP_MODE + idx, opmode);
+}
+
+static void _sspp_setup_csc10_opmode(struct dpu_hw_pipe *ctx,
+		u32 mask, u8 en)
+{
+	u32 idx;
+	u32 opmode;
+
+	if (_sspp_subblk_offset(ctx, DPU_SSPP_CSC_10BIT, &idx))
+		return;
+
+	opmode = DPU_REG_READ(&ctx->hw, SSPP_VIG_CSC_10_OP_MODE + idx);
+	if (en)
+		opmode |= mask;
+	else
+		opmode &= ~mask;
+
+	DPU_REG_WRITE(&ctx->hw, SSPP_VIG_CSC_10_OP_MODE + idx, opmode);
+}
+
+/**
+ * Setup source pixel format, flip,
+ */
+static void dpu_hw_sspp_setup_format(struct dpu_hw_pipe *ctx,
+		const struct dpu_format *fmt, u32 flags,
+		enum dpu_sspp_multirect_index rect_mode)
+{
+	struct dpu_hw_blk_reg_map *c;
+	u32 chroma_samp, unpack, src_format;
+	u32 opmode = 0;
+	u32 fast_clear = 0;
+	u32 op_mode_off, unpack_pat_off, format_off;
+	u32 idx;
+
+	if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx) || !fmt)
+		return;
+
+	if (rect_mode == DPU_SSPP_RECT_SOLO || rect_mode == DPU_SSPP_RECT_0) {
+		op_mode_off = SSPP_SRC_OP_MODE;
+		unpack_pat_off = SSPP_SRC_UNPACK_PATTERN;
+		format_off = SSPP_SRC_FORMAT;
+	} else {
+		op_mode_off = SSPP_SRC_OP_MODE_REC1;
+		unpack_pat_off = SSPP_SRC_UNPACK_PATTERN_REC1;
+		format_off = SSPP_SRC_FORMAT_REC1;
+	}
+
+	c = &ctx->hw;
+	opmode = DPU_REG_READ(c, op_mode_off + idx);
+	opmode &= ~(MDSS_MDP_OP_FLIP_LR | MDSS_MDP_OP_FLIP_UD |
+			MDSS_MDP_OP_BWC_EN | MDSS_MDP_OP_PE_OVERRIDE);
+
+	if (flags & DPU_SSPP_FLIP_LR)
+		opmode |= MDSS_MDP_OP_FLIP_LR;
+	if (flags & DPU_SSPP_FLIP_UD)
+		opmode |= MDSS_MDP_OP_FLIP_UD;
+
+	chroma_samp = fmt->chroma_sample;
+	if (flags & DPU_SSPP_SOURCE_ROTATED_90) {
+		if (chroma_samp == DPU_CHROMA_H2V1)
+			chroma_samp = DPU_CHROMA_H1V2;
+		else if (chroma_samp == DPU_CHROMA_H1V2)
+			chroma_samp = DPU_CHROMA_H2V1;
+	}
+
+	src_format = (chroma_samp << 23) | (fmt->fetch_planes << 19) |
+		(fmt->bits[C3_ALPHA] << 6) | (fmt->bits[C2_R_Cr] << 4) |
+		(fmt->bits[C1_B_Cb] << 2) | (fmt->bits[C0_G_Y] << 0);
+
+	if (flags & DPU_SSPP_ROT_90)
+		src_format |= BIT(11); /* ROT90 */
+
+	if (fmt->alpha_enable && fmt->fetch_planes == DPU_PLANE_INTERLEAVED)
+		src_format |= BIT(8); /* SRCC3_EN */
+
+	if (flags & DPU_SSPP_SOLID_FILL)
+		src_format |= BIT(22);
+
+	unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
+		(fmt->element[1] << 8) | (fmt->element[0] << 0);
+	src_format |= ((fmt->unpack_count - 1) << 12) |
+		(fmt->unpack_tight << 17) |
+		(fmt->unpack_align_msb << 18) |
+		((fmt->bpp - 1) << 9);
+
+	if (fmt->fetch_mode != DPU_FETCH_LINEAR) {
+		if (DPU_FORMAT_IS_UBWC(fmt))
+			opmode |= MDSS_MDP_OP_BWC_EN;
+		src_format |= (fmt->fetch_mode & 3) << 30; /*FRAME_FORMAT */
+		DPU_REG_WRITE(c, SSPP_FETCH_CONFIG,
+			DPU_FETCH_CONFIG_RESET_VALUE |
+			ctx->mdp->highest_bank_bit << 18);
+		if (IS_UBWC_20_SUPPORTED(ctx->catalog->caps->ubwc_version)) {
+			fast_clear = fmt->alpha_enable ? BIT(31) : 0;
+			DPU_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
+					fast_clear | (ctx->mdp->ubwc_swizzle) |
+					(ctx->mdp->highest_bank_bit << 4));
+		}
+	}
+
+	opmode |= MDSS_MDP_OP_PE_OVERRIDE;
+
+	/* if this is YUV pixel format, enable CSC */
+	if (DPU_FORMAT_IS_YUV(fmt))
+		src_format |= BIT(15);
+
+	if (DPU_FORMAT_IS_DX(fmt))
+		src_format |= BIT(14);
+
+	/* update scaler opmode, if appropriate */
+	if (test_bit(DPU_SSPP_CSC, &ctx->cap->features))
+		_sspp_setup_opmode(ctx, VIG_OP_CSC_EN | VIG_OP_CSC_SRC_DATAFMT,
+			DPU_FORMAT_IS_YUV(fmt));
+	else if (test_bit(DPU_SSPP_CSC_10BIT, &ctx->cap->features))
+		_sspp_setup_csc10_opmode(ctx,
+			VIG_CSC_10_EN | VIG_CSC_10_SRC_DATAFMT,
+			DPU_FORMAT_IS_YUV(fmt));
+
+	DPU_REG_WRITE(c, format_off + idx, src_format);
+	DPU_REG_WRITE(c, unpack_pat_off + idx, unpack);
+	DPU_REG_WRITE(c, op_mode_off + idx, opmode);
+
+	/* clear previous UBWC error */
+	DPU_REG_WRITE(c, SSPP_UBWC_ERROR_STATUS + idx, BIT(31));
+}
+
+static void dpu_hw_sspp_setup_pe_config(struct dpu_hw_pipe *ctx,
+		struct dpu_hw_pixel_ext *pe_ext)
+{
+	struct dpu_hw_blk_reg_map *c;
+	u8 color;
+	u32 lr_pe[4], tb_pe[4], tot_req_pixels[4];
+	const u32 bytemask = 0xff;
+	const u32 shortmask = 0xffff;
+	u32 idx;
+
+	if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx) || !pe_ext)
+		return;
+
+	c = &ctx->hw;
+
+	/* program SW pixel extension override for all pipes*/
+	for (color = 0; color < DPU_MAX_PLANES; color++) {
+		/* color 2 has the same set of registers as color 1 */
+		if (color == 2)
+			continue;
+
+		lr_pe[color] = ((pe_ext->right_ftch[color] & bytemask) << 24)|
+			((pe_ext->right_rpt[color] & bytemask) << 16)|
+			((pe_ext->left_ftch[color] & bytemask) << 8)|
+			(pe_ext->left_rpt[color] & bytemask);
+
+		tb_pe[color] = ((pe_ext->btm_ftch[color] & bytemask) << 24)|
+			((pe_ext->btm_rpt[color] & bytemask) << 16)|
+			((pe_ext->top_ftch[color] & bytemask) << 8)|
+			(pe_ext->top_rpt[color] & bytemask);
+
+		tot_req_pixels[color] = (((pe_ext->roi_h[color] +
+			pe_ext->num_ext_pxls_top[color] +
+			pe_ext->num_ext_pxls_btm[color]) & shortmask) << 16) |
+			((pe_ext->roi_w[color] +
+			pe_ext->num_ext_pxls_left[color] +
+			pe_ext->num_ext_pxls_right[color]) & shortmask);
+	}
+
+	/* color 0 */
+	DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_LR + idx, lr_pe[0]);
+	DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_TB + idx, tb_pe[0]);
+	DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_REQ_PIXELS + idx,
+			tot_req_pixels[0]);
+
+	/* color 1 and color 2 */
+	DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_LR + idx, lr_pe[1]);
+	DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_TB + idx, tb_pe[1]);
+	DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_REQ_PIXELS + idx,
+			tot_req_pixels[1]);
+
+	/* color 3 */
+	DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_LR + idx, lr_pe[3]);
+	DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_TB + idx, lr_pe[3]);
+	DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_REQ_PIXELS + idx,
+			tot_req_pixels[3]);
+}
+
+static void _dpu_hw_sspp_setup_scaler3(struct dpu_hw_pipe *ctx,
+		struct dpu_hw_pipe_cfg *sspp,
+		struct dpu_hw_pixel_ext *pe,
+		void *scaler_cfg)
+{
+	u32 idx;
+	struct dpu_hw_scaler3_cfg *scaler3_cfg = scaler_cfg;
+
+	(void)pe;
+	if (_sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED3, &idx) || !sspp
+		|| !scaler3_cfg || !ctx || !ctx->cap || !ctx->cap->sblk)
+		return;
+
+	dpu_hw_setup_scaler3(&ctx->hw, scaler3_cfg, idx,
+			ctx->cap->sblk->scaler_blk.version,
+			sspp->layout.format);
+}
+
+static u32 _dpu_hw_sspp_get_scaler3_ver(struct dpu_hw_pipe *ctx)
+{
+	u32 idx;
+
+	if (!ctx || _sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED3, &idx))
+		return 0;
+
+	return dpu_hw_get_scaler3_ver(&ctx->hw, idx);
+}
+
+/**
+ * dpu_hw_sspp_setup_rects()
+ */
+static void dpu_hw_sspp_setup_rects(struct dpu_hw_pipe *ctx,
+		struct dpu_hw_pipe_cfg *cfg,
+		enum dpu_sspp_multirect_index rect_index)
+{
+	struct dpu_hw_blk_reg_map *c;
+	u32 src_size, src_xy, dst_size, dst_xy, ystride0, ystride1;
+	u32 src_size_off, src_xy_off, out_size_off, out_xy_off;
+	u32 idx;
+
+	if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx) || !cfg)
+		return;
+
+	c = &ctx->hw;
+
+	if (rect_index == DPU_SSPP_RECT_SOLO || rect_index == DPU_SSPP_RECT_0) {
+		src_size_off = SSPP_SRC_SIZE;
+		src_xy_off = SSPP_SRC_XY;
+		out_size_off = SSPP_OUT_SIZE;
+		out_xy_off = SSPP_OUT_XY;
+	} else {
+		src_size_off = SSPP_SRC_SIZE_REC1;
+		src_xy_off = SSPP_SRC_XY_REC1;
+		out_size_off = SSPP_OUT_SIZE_REC1;
+		out_xy_off = SSPP_OUT_XY_REC1;
+	}
+
+
+	/* src and dest rect programming */
+	src_xy = (cfg->src_rect.y1 << 16) | cfg->src_rect.x1;
+	src_size = (drm_rect_height(&cfg->src_rect) << 16) |
+		   drm_rect_width(&cfg->src_rect);
+	dst_xy = (cfg->dst_rect.y1 << 16) | cfg->dst_rect.x1;
+	dst_size = (drm_rect_height(&cfg->dst_rect) << 16) |
+		drm_rect_width(&cfg->dst_rect);
+
+	if (rect_index == DPU_SSPP_RECT_SOLO) {
+		ystride0 = (cfg->layout.plane_pitch[0]) |
+			(cfg->layout.plane_pitch[1] << 16);
+		ystride1 = (cfg->layout.plane_pitch[2]) |
+			(cfg->layout.plane_pitch[3] << 16);
+	} else {
+		ystride0 = DPU_REG_READ(c, SSPP_SRC_YSTRIDE0 + idx);
+		ystride1 = DPU_REG_READ(c, SSPP_SRC_YSTRIDE1 + idx);
+
+		if (rect_index == DPU_SSPP_RECT_0) {
+			ystride0 = (ystride0 & 0xFFFF0000) |
+				(cfg->layout.plane_pitch[0] & 0x0000FFFF);
+			ystride1 = (ystride1 & 0xFFFF0000)|
+				(cfg->layout.plane_pitch[2] & 0x0000FFFF);
+		} else {
+			ystride0 = (ystride0 & 0x0000FFFF) |
+				((cfg->layout.plane_pitch[0] << 16) &
+				 0xFFFF0000);
+			ystride1 = (ystride1 & 0x0000FFFF) |
+				((cfg->layout.plane_pitch[2] << 16) &
+				 0xFFFF0000);
+		}
+	}
+
+	/* rectangle register programming */
+	DPU_REG_WRITE(c, src_size_off + idx, src_size);
+	DPU_REG_WRITE(c, src_xy_off + idx, src_xy);
+	DPU_REG_WRITE(c, out_size_off + idx, dst_size);
+	DPU_REG_WRITE(c, out_xy_off + idx, dst_xy);
+
+	DPU_REG_WRITE(c, SSPP_SRC_YSTRIDE0 + idx, ystride0);
+	DPU_REG_WRITE(c, SSPP_SRC_YSTRIDE1 + idx, ystride1);
+}
+
+static void dpu_hw_sspp_setup_sourceaddress(struct dpu_hw_pipe *ctx,
+		struct dpu_hw_pipe_cfg *cfg,
+		enum dpu_sspp_multirect_index rect_mode)
+{
+	int i;
+	u32 idx;
+
+	if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+		return;
+
+	if (rect_mode == DPU_SSPP_RECT_SOLO) {
+		for (i = 0; i < ARRAY_SIZE(cfg->layout.plane_addr); i++)
+			DPU_REG_WRITE(&ctx->hw, SSPP_SRC0_ADDR + idx + i * 0x4,
+					cfg->layout.plane_addr[i]);
+	} else if (rect_mode == DPU_SSPP_RECT_0) {
+		DPU_REG_WRITE(&ctx->hw, SSPP_SRC0_ADDR + idx,
+				cfg->layout.plane_addr[0]);
+		DPU_REG_WRITE(&ctx->hw, SSPP_SRC2_ADDR + idx,
+				cfg->layout.plane_addr[2]);
+	} else {
+		DPU_REG_WRITE(&ctx->hw, SSPP_SRC1_ADDR + idx,
+				cfg->layout.plane_addr[0]);
+		DPU_REG_WRITE(&ctx->hw, SSPP_SRC3_ADDR + idx,
+				cfg->layout.plane_addr[2]);
+	}
+}
+
+static void dpu_hw_sspp_setup_csc(struct dpu_hw_pipe *ctx,
+		struct dpu_csc_cfg *data)
+{
+	u32 idx;
+	bool csc10 = false;
+
+	if (_sspp_subblk_offset(ctx, DPU_SSPP_CSC, &idx) || !data)
+		return;
+
+	if (test_bit(DPU_SSPP_CSC_10BIT, &ctx->cap->features)) {
+		idx += CSC_10BIT_OFFSET;
+		csc10 = true;
+	}
+
+	dpu_hw_csc_setup(&ctx->hw, idx, data, csc10);
+}
+
+static void dpu_hw_sspp_setup_solidfill(struct dpu_hw_pipe *ctx, u32 color, enum
+		dpu_sspp_multirect_index rect_index)
+{
+	u32 idx;
+
+	if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+		return;
+
+	if (rect_index == DPU_SSPP_RECT_SOLO || rect_index == DPU_SSPP_RECT_0)
+		DPU_REG_WRITE(&ctx->hw, SSPP_SRC_CONSTANT_COLOR + idx, color);
+	else
+		DPU_REG_WRITE(&ctx->hw, SSPP_SRC_CONSTANT_COLOR_REC1 + idx,
+				color);
+}
+
+static void dpu_hw_sspp_setup_danger_safe_lut(struct dpu_hw_pipe *ctx,
+		struct dpu_hw_pipe_qos_cfg *cfg)
+{
+	u32 idx;
+
+	if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+		return;
+
+	DPU_REG_WRITE(&ctx->hw, SSPP_DANGER_LUT + idx, cfg->danger_lut);
+	DPU_REG_WRITE(&ctx->hw, SSPP_SAFE_LUT + idx, cfg->safe_lut);
+}
+
+static void dpu_hw_sspp_setup_creq_lut(struct dpu_hw_pipe *ctx,
+		struct dpu_hw_pipe_qos_cfg *cfg)
+{
+	u32 idx;
+
+	if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+		return;
+
+	if (ctx->cap && test_bit(DPU_SSPP_QOS_8LVL, &ctx->cap->features)) {
+		DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT_0 + idx, cfg->creq_lut);
+		DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT_1 + idx,
+				cfg->creq_lut >> 32);
+	} else {
+		DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT + idx, cfg->creq_lut);
+	}
+}
+
+static void dpu_hw_sspp_setup_qos_ctrl(struct dpu_hw_pipe *ctx,
+		struct dpu_hw_pipe_qos_cfg *cfg)
+{
+	u32 idx;
+	u32 qos_ctrl = 0;
+
+	if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+		return;
+
+	if (cfg->vblank_en) {
+		qos_ctrl |= ((cfg->creq_vblank &
+				SSPP_QOS_CTRL_CREQ_VBLANK_MASK) <<
+				SSPP_QOS_CTRL_CREQ_VBLANK_OFF);
+		qos_ctrl |= ((cfg->danger_vblank &
+				SSPP_QOS_CTRL_DANGER_VBLANK_MASK) <<
+				SSPP_QOS_CTRL_DANGER_VBLANK_OFF);
+		qos_ctrl |= SSPP_QOS_CTRL_VBLANK_EN;
+	}
+
+	if (cfg->danger_safe_en)
+		qos_ctrl |= SSPP_QOS_CTRL_DANGER_SAFE_EN;
+
+	DPU_REG_WRITE(&ctx->hw, SSPP_QOS_CTRL + idx, qos_ctrl);
+}
+
+static void dpu_hw_sspp_setup_cdp(struct dpu_hw_pipe *ctx,
+		struct dpu_hw_pipe_cdp_cfg *cfg)
+{
+	u32 idx;
+	u32 cdp_cntl = 0;
+
+	if (!ctx || !cfg)
+		return;
+
+	if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+		return;
+
+	if (cfg->enable)
+		cdp_cntl |= BIT(0);
+	if (cfg->ubwc_meta_enable)
+		cdp_cntl |= BIT(1);
+	if (cfg->tile_amortize_enable)
+		cdp_cntl |= BIT(2);
+	if (cfg->preload_ahead == DPU_SSPP_CDP_PRELOAD_AHEAD_64)
+		cdp_cntl |= BIT(3);
+
+	DPU_REG_WRITE(&ctx->hw, SSPP_CDP_CNTL, cdp_cntl);
+}
+
+static void _setup_layer_ops(struct dpu_hw_pipe *c,
+		unsigned long features)
+{
+	if (test_bit(DPU_SSPP_SRC, &features)) {
+		c->ops.setup_format = dpu_hw_sspp_setup_format;
+		c->ops.setup_rects = dpu_hw_sspp_setup_rects;
+		c->ops.setup_sourceaddress = dpu_hw_sspp_setup_sourceaddress;
+		c->ops.setup_solidfill = dpu_hw_sspp_setup_solidfill;
+		c->ops.setup_pe = dpu_hw_sspp_setup_pe_config;
+	}
+
+	if (test_bit(DPU_SSPP_QOS, &features)) {
+		c->ops.setup_danger_safe_lut =
+			dpu_hw_sspp_setup_danger_safe_lut;
+		c->ops.setup_creq_lut = dpu_hw_sspp_setup_creq_lut;
+		c->ops.setup_qos_ctrl = dpu_hw_sspp_setup_qos_ctrl;
+	}
+
+	if (test_bit(DPU_SSPP_CSC, &features) ||
+		test_bit(DPU_SSPP_CSC_10BIT, &features))
+		c->ops.setup_csc = dpu_hw_sspp_setup_csc;
+
+	if (dpu_hw_sspp_multirect_enabled(c->cap))
+		c->ops.setup_multirect = dpu_hw_sspp_setup_multirect;
+
+	if (test_bit(DPU_SSPP_SCALER_QSEED3, &features)) {
+		c->ops.setup_scaler = _dpu_hw_sspp_setup_scaler3;
+		c->ops.get_scaler_ver = _dpu_hw_sspp_get_scaler3_ver;
+	}
+
+	if (test_bit(DPU_SSPP_CDP, &features))
+		c->ops.setup_cdp = dpu_hw_sspp_setup_cdp;
+}
+
+static struct dpu_sspp_cfg *_sspp_offset(enum dpu_sspp sspp,
+		void __iomem *addr,
+		struct dpu_mdss_cfg *catalog,
+		struct dpu_hw_blk_reg_map *b)
+{
+	int i;
+
+	if ((sspp < SSPP_MAX) && catalog && addr && b) {
+		for (i = 0; i < catalog->sspp_count; i++) {
+			if (sspp == catalog->sspp[i].id) {
+				b->base_off = addr;
+				b->blk_off = catalog->sspp[i].base;
+				b->length = catalog->sspp[i].len;
+				b->hwversion = catalog->hwversion;
+				b->log_mask = DPU_DBG_MASK_SSPP;
+				return &catalog->sspp[i];
+			}
+		}
+	}
+
+	return ERR_PTR(-ENOMEM);
+}
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+	.start = NULL,
+	.stop = NULL,
+};
+
+struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
+		void __iomem *addr, struct dpu_mdss_cfg *catalog,
+		bool is_virtual_pipe)
+{
+	struct dpu_hw_pipe *hw_pipe;
+	struct dpu_sspp_cfg *cfg;
+	int rc;
+
+	if (!addr || !catalog)
+		return ERR_PTR(-EINVAL);
+
+	hw_pipe = kzalloc(sizeof(*hw_pipe), GFP_KERNEL);
+	if (!hw_pipe)
+		return ERR_PTR(-ENOMEM);
+
+	cfg = _sspp_offset(idx, addr, catalog, &hw_pipe->hw);
+	if (IS_ERR_OR_NULL(cfg)) {
+		kfree(hw_pipe);
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* Assign ops */
+	hw_pipe->catalog = catalog;
+	hw_pipe->mdp = &catalog->mdp[0];
+	hw_pipe->idx = idx;
+	hw_pipe->cap = cfg;
+	_setup_layer_ops(hw_pipe, hw_pipe->cap->features);
+
+	rc = dpu_hw_blk_init(&hw_pipe->base, DPU_HW_BLK_SSPP, idx, &dpu_hw_ops);
+	if (rc) {
+		DPU_ERROR("failed to init hw blk %d\n", rc);
+		goto blk_init_error;
+	}
+
+	return hw_pipe;
+
+blk_init_error:
+	kzfree(hw_pipe);
+
+	return ERR_PTR(rc);
+}
+
+void dpu_hw_sspp_destroy(struct dpu_hw_pipe *ctx)
+{
+	if (ctx)
+		dpu_hw_blk_destroy(&ctx->base);
+	kfree(ctx);
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
new file mode 100644
index 000000000000..4d81e5f5ce1b
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
@@ -0,0 +1,424 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_SSPP_H
+#define _DPU_HW_SSPP_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_blk.h"
+#include "dpu_formats.h"
+
+struct dpu_hw_pipe;
+
+/**
+ * Flags
+ */
+#define DPU_SSPP_FLIP_LR		BIT(0)
+#define DPU_SSPP_FLIP_UD		BIT(1)
+#define DPU_SSPP_SOURCE_ROTATED_90	BIT(2)
+#define DPU_SSPP_ROT_90			BIT(3)
+#define DPU_SSPP_SOLID_FILL		BIT(4)
+
+/**
+ * Define all scaler feature bits in catalog
+ */
+#define DPU_SSPP_SCALER ((1UL << DPU_SSPP_SCALER_RGB) | \
+	(1UL << DPU_SSPP_SCALER_QSEED2) | \
+	(1UL << DPU_SSPP_SCALER_QSEED3))
+
+/**
+ * Component indices
+ */
+enum {
+	DPU_SSPP_COMP_0,
+	DPU_SSPP_COMP_1_2,
+	DPU_SSPP_COMP_2,
+	DPU_SSPP_COMP_3,
+
+	DPU_SSPP_COMP_MAX
+};
+
+/**
+ * DPU_SSPP_RECT_SOLO - multirect disabled
+ * DPU_SSPP_RECT_0 - rect0 of a multirect pipe
+ * DPU_SSPP_RECT_1 - rect1 of a multirect pipe
+ *
+ * Note: HW supports multirect with either RECT0 or
+ * RECT1. Considering no benefit of such configs over
+ * SOLO mode and to keep the plane management simple,
+ * we dont support single rect multirect configs.
+ */
+enum dpu_sspp_multirect_index {
+	DPU_SSPP_RECT_SOLO = 0,
+	DPU_SSPP_RECT_0,
+	DPU_SSPP_RECT_1,
+};
+
+enum dpu_sspp_multirect_mode {
+	DPU_SSPP_MULTIRECT_NONE = 0,
+	DPU_SSPP_MULTIRECT_PARALLEL,
+	DPU_SSPP_MULTIRECT_TIME_MX,
+};
+
+enum {
+	DPU_FRAME_LINEAR,
+	DPU_FRAME_TILE_A4X,
+	DPU_FRAME_TILE_A5X,
+};
+
+enum dpu_hw_filter {
+	DPU_SCALE_FILTER_NEAREST = 0,
+	DPU_SCALE_FILTER_BIL,
+	DPU_SCALE_FILTER_PCMN,
+	DPU_SCALE_FILTER_CA,
+	DPU_SCALE_FILTER_MAX
+};
+
+enum dpu_hw_filter_alpa {
+	DPU_SCALE_ALPHA_PIXEL_REP,
+	DPU_SCALE_ALPHA_BIL
+};
+
+enum dpu_hw_filter_yuv {
+	DPU_SCALE_2D_4X4,
+	DPU_SCALE_2D_CIR,
+	DPU_SCALE_1D_SEP,
+	DPU_SCALE_BIL
+};
+
+struct dpu_hw_sharp_cfg {
+	u32 strength;
+	u32 edge_thr;
+	u32 smooth_thr;
+	u32 noise_thr;
+};
+
+struct dpu_hw_pixel_ext {
+	/* scaling factors are enabled for this input layer */
+	uint8_t enable_pxl_ext;
+
+	int init_phase_x[DPU_MAX_PLANES];
+	int phase_step_x[DPU_MAX_PLANES];
+	int init_phase_y[DPU_MAX_PLANES];
+	int phase_step_y[DPU_MAX_PLANES];
+
+	/*
+	 * Number of pixels extension in left, right, top and bottom direction
+	 * for all color components. This pixel value for each color component
+	 * should be sum of fetch + repeat pixels.
+	 */
+	int num_ext_pxls_left[DPU_MAX_PLANES];
+	int num_ext_pxls_right[DPU_MAX_PLANES];
+	int num_ext_pxls_top[DPU_MAX_PLANES];
+	int num_ext_pxls_btm[DPU_MAX_PLANES];
+
+	/*
+	 * Number of pixels needs to be overfetched in left, right, top and
+	 * bottom directions from source image for scaling.
+	 */
+	int left_ftch[DPU_MAX_PLANES];
+	int right_ftch[DPU_MAX_PLANES];
+	int top_ftch[DPU_MAX_PLANES];
+	int btm_ftch[DPU_MAX_PLANES];
+
+	/*
+	 * Number of pixels needs to be repeated in left, right, top and
+	 * bottom directions for scaling.
+	 */
+	int left_rpt[DPU_MAX_PLANES];
+	int right_rpt[DPU_MAX_PLANES];
+	int top_rpt[DPU_MAX_PLANES];
+	int btm_rpt[DPU_MAX_PLANES];
+
+	uint32_t roi_w[DPU_MAX_PLANES];
+	uint32_t roi_h[DPU_MAX_PLANES];
+
+	/*
+	 * Filter type to be used for scaling in horizontal and vertical
+	 * directions
+	 */
+	enum dpu_hw_filter horz_filter[DPU_MAX_PLANES];
+	enum dpu_hw_filter vert_filter[DPU_MAX_PLANES];
+
+};
+
+/**
+ * struct dpu_hw_pipe_cfg : Pipe description
+ * @layout:    format layout information for programming buffer to hardware
+ * @src_rect:  src ROI, caller takes into account the different operations
+ *             such as decimation, flip etc to program this field
+ * @dest_rect: destination ROI.
+ * @index:     index of the rectangle of SSPP
+ * @mode:      parallel or time multiplex multirect mode
+ */
+struct dpu_hw_pipe_cfg {
+	struct dpu_hw_fmt_layout layout;
+	struct drm_rect src_rect;
+	struct drm_rect dst_rect;
+	enum dpu_sspp_multirect_index index;
+	enum dpu_sspp_multirect_mode mode;
+};
+
+/**
+ * struct dpu_hw_pipe_qos_cfg : Source pipe QoS configuration
+ * @danger_lut: LUT for generate danger level based on fill level
+ * @safe_lut: LUT for generate safe level based on fill level
+ * @creq_lut: LUT for generate creq level based on fill level
+ * @creq_vblank: creq value generated to vbif during vertical blanking
+ * @danger_vblank: danger value generated during vertical blanking
+ * @vblank_en: enable creq_vblank and danger_vblank during vblank
+ * @danger_safe_en: enable danger safe generation
+ */
+struct dpu_hw_pipe_qos_cfg {
+	u32 danger_lut;
+	u32 safe_lut;
+	u64 creq_lut;
+	u32 creq_vblank;
+	u32 danger_vblank;
+	bool vblank_en;
+	bool danger_safe_en;
+};
+
+/**
+ * enum CDP preload ahead address size
+ */
+enum {
+	DPU_SSPP_CDP_PRELOAD_AHEAD_32,
+	DPU_SSPP_CDP_PRELOAD_AHEAD_64
+};
+
+/**
+ * struct dpu_hw_pipe_cdp_cfg : CDP configuration
+ * @enable: true to enable CDP
+ * @ubwc_meta_enable: true to enable ubwc metadata preload
+ * @tile_amortize_enable: true to enable amortization control for tile format
+ * @preload_ahead: number of request to preload ahead
+ *	DPU_SSPP_CDP_PRELOAD_AHEAD_32,
+ *	DPU_SSPP_CDP_PRELOAD_AHEAD_64
+ */
+struct dpu_hw_pipe_cdp_cfg {
+	bool enable;
+	bool ubwc_meta_enable;
+	bool tile_amortize_enable;
+	u32 preload_ahead;
+};
+
+/**
+ * struct dpu_hw_pipe_ts_cfg - traffic shaper configuration
+ * @size: size to prefill in bytes, or zero to disable
+ * @time: time to prefill in usec, or zero to disable
+ */
+struct dpu_hw_pipe_ts_cfg {
+	u64 size;
+	u64 time;
+};
+
+/**
+ * struct dpu_hw_sspp_ops - interface to the SSPP Hw driver functions
+ * Caller must call the init function to get the pipe context for each pipe
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_sspp_ops {
+	/**
+	 * setup_format - setup pixel format cropping rectangle, flip
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to pipe config structure
+	 * @flags: Extra flags for format config
+	 * @index: rectangle index in multirect
+	 */
+	void (*setup_format)(struct dpu_hw_pipe *ctx,
+			const struct dpu_format *fmt, u32 flags,
+			enum dpu_sspp_multirect_index index);
+
+	/**
+	 * setup_rects - setup pipe ROI rectangles
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to pipe config structure
+	 * @index: rectangle index in multirect
+	 */
+	void (*setup_rects)(struct dpu_hw_pipe *ctx,
+			struct dpu_hw_pipe_cfg *cfg,
+			enum dpu_sspp_multirect_index index);
+
+	/**
+	 * setup_pe - setup pipe pixel extension
+	 * @ctx: Pointer to pipe context
+	 * @pe_ext: Pointer to pixel ext settings
+	 */
+	void (*setup_pe)(struct dpu_hw_pipe *ctx,
+			struct dpu_hw_pixel_ext *pe_ext);
+
+	/**
+	 * setup_sourceaddress - setup pipe source addresses
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to pipe config structure
+	 * @index: rectangle index in multirect
+	 */
+	void (*setup_sourceaddress)(struct dpu_hw_pipe *ctx,
+			struct dpu_hw_pipe_cfg *cfg,
+			enum dpu_sspp_multirect_index index);
+
+	/**
+	 * setup_csc - setup color space coversion
+	 * @ctx: Pointer to pipe context
+	 * @data: Pointer to config structure
+	 */
+	void (*setup_csc)(struct dpu_hw_pipe *ctx, struct dpu_csc_cfg *data);
+
+	/**
+	 * setup_solidfill - enable/disable colorfill
+	 * @ctx: Pointer to pipe context
+	 * @const_color: Fill color value
+	 * @flags: Pipe flags
+	 * @index: rectangle index in multirect
+	 */
+	void (*setup_solidfill)(struct dpu_hw_pipe *ctx, u32 color,
+			enum dpu_sspp_multirect_index index);
+
+	/**
+	 * setup_multirect - setup multirect configuration
+	 * @ctx: Pointer to pipe context
+	 * @index: rectangle index in multirect
+	 * @mode: parallel fetch / time multiplex multirect mode
+	 */
+
+	void (*setup_multirect)(struct dpu_hw_pipe *ctx,
+			enum dpu_sspp_multirect_index index,
+			enum dpu_sspp_multirect_mode mode);
+
+	/**
+	 * setup_sharpening - setup sharpening
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to config structure
+	 */
+	void (*setup_sharpening)(struct dpu_hw_pipe *ctx,
+			struct dpu_hw_sharp_cfg *cfg);
+
+	/**
+	 * setup_danger_safe_lut - setup danger safe LUTs
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to pipe QoS configuration
+	 *
+	 */
+	void (*setup_danger_safe_lut)(struct dpu_hw_pipe *ctx,
+			struct dpu_hw_pipe_qos_cfg *cfg);
+
+	/**
+	 * setup_creq_lut - setup CREQ LUT
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to pipe QoS configuration
+	 *
+	 */
+	void (*setup_creq_lut)(struct dpu_hw_pipe *ctx,
+			struct dpu_hw_pipe_qos_cfg *cfg);
+
+	/**
+	 * setup_qos_ctrl - setup QoS control
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to pipe QoS configuration
+	 *
+	 */
+	void (*setup_qos_ctrl)(struct dpu_hw_pipe *ctx,
+			struct dpu_hw_pipe_qos_cfg *cfg);
+
+	/**
+	 * setup_histogram - setup histograms
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to histogram configuration
+	 */
+	void (*setup_histogram)(struct dpu_hw_pipe *ctx,
+			void *cfg);
+
+	/**
+	 * setup_scaler - setup scaler
+	 * @ctx: Pointer to pipe context
+	 * @pipe_cfg: Pointer to pipe configuration
+	 * @pe_cfg: Pointer to pixel extension configuration
+	 * @scaler_cfg: Pointer to scaler configuration
+	 */
+	void (*setup_scaler)(struct dpu_hw_pipe *ctx,
+		struct dpu_hw_pipe_cfg *pipe_cfg,
+		struct dpu_hw_pixel_ext *pe_cfg,
+		void *scaler_cfg);
+
+	/**
+	 * get_scaler_ver - get scaler h/w version
+	 * @ctx: Pointer to pipe context
+	 */
+	u32 (*get_scaler_ver)(struct dpu_hw_pipe *ctx);
+
+	/**
+	 * setup_cdp - setup client driven prefetch
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to cdp configuration
+	 */
+	void (*setup_cdp)(struct dpu_hw_pipe *ctx,
+			struct dpu_hw_pipe_cdp_cfg *cfg);
+};
+
+/**
+ * struct dpu_hw_pipe - pipe description
+ * @base: hardware block base structure
+ * @hw: block hardware details
+ * @catalog: back pointer to catalog
+ * @mdp: pointer to associated mdp portion of the catalog
+ * @idx: pipe index
+ * @cap: pointer to layer_cfg
+ * @ops: pointer to operations possible for this pipe
+ */
+struct dpu_hw_pipe {
+	struct dpu_hw_blk base;
+	struct dpu_hw_blk_reg_map hw;
+	struct dpu_mdss_cfg *catalog;
+	struct dpu_mdp_cfg *mdp;
+
+	/* Pipe */
+	enum dpu_sspp idx;
+	const struct dpu_sspp_cfg *cap;
+
+	/* Ops */
+	struct dpu_hw_sspp_ops ops;
+};
+
+/**
+ * dpu_hw_pipe - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_pipe *to_dpu_hw_pipe(struct dpu_hw_blk *hw)
+{
+	return container_of(hw, struct dpu_hw_pipe, base);
+}
+
+/**
+ * dpu_hw_sspp_init - initializes the sspp hw driver object.
+ * Should be called once before accessing every pipe.
+ * @idx:  Pipe index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @catalog : Pointer to mdss catalog data
+ * @is_virtual_pipe: is this pipe virtual pipe
+ */
+struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
+		void __iomem *addr, struct dpu_mdss_cfg *catalog,
+		bool is_virtual_pipe);
+
+/**
+ * dpu_hw_sspp_destroy(): Destroys SSPP driver context
+ * should be called during Hw pipe cleanup.
+ * @ctx:  Pointer to SSPP driver context returned by dpu_hw_sspp_init
+ */
+void dpu_hw_sspp_destroy(struct dpu_hw_pipe *ctx);
+
+#endif /*_DPU_HW_SSPP_H */
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
new file mode 100644
index 000000000000..42fc72cf48dd
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
@@ -0,0 +1,398 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_top.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define SSPP_SPARE                        0x28
+#define UBWC_STATIC                       0x144
+
+#define FLD_SPLIT_DISPLAY_CMD             BIT(1)
+#define FLD_SMART_PANEL_FREE_RUN          BIT(2)
+#define FLD_INTF_1_SW_TRG_MUX             BIT(4)
+#define FLD_INTF_2_SW_TRG_MUX             BIT(8)
+#define FLD_TE_LINE_INTER_WATERLEVEL_MASK 0xFFFF
+
+#define DANGER_STATUS                     0x360
+#define SAFE_STATUS                       0x364
+
+#define TE_LINE_INTERVAL                  0x3F4
+
+#define TRAFFIC_SHAPER_EN                 BIT(31)
+#define TRAFFIC_SHAPER_RD_CLIENT(num)     (0x030 + (num * 4))
+#define TRAFFIC_SHAPER_WR_CLIENT(num)     (0x060 + (num * 4))
+#define TRAFFIC_SHAPER_FIXPOINT_FACTOR    4
+
+#define MDP_WD_TIMER_0_CTL                0x380
+#define MDP_WD_TIMER_0_CTL2               0x384
+#define MDP_WD_TIMER_0_LOAD_VALUE         0x388
+#define MDP_WD_TIMER_1_CTL                0x390
+#define MDP_WD_TIMER_1_CTL2               0x394
+#define MDP_WD_TIMER_1_LOAD_VALUE         0x398
+#define MDP_WD_TIMER_2_CTL                0x420
+#define MDP_WD_TIMER_2_CTL2               0x424
+#define MDP_WD_TIMER_2_LOAD_VALUE         0x428
+#define MDP_WD_TIMER_3_CTL                0x430
+#define MDP_WD_TIMER_3_CTL2               0x434
+#define MDP_WD_TIMER_3_LOAD_VALUE         0x438
+#define MDP_WD_TIMER_4_CTL                0x440
+#define MDP_WD_TIMER_4_CTL2               0x444
+#define MDP_WD_TIMER_4_LOAD_VALUE         0x448
+
+#define MDP_TICK_COUNT                    16
+#define XO_CLK_RATE                       19200
+#define MS_TICKS_IN_SEC                   1000
+
+#define CALCULATE_WD_LOAD_VALUE(fps) \
+	((uint32_t)((MS_TICKS_IN_SEC * XO_CLK_RATE)/(MDP_TICK_COUNT * fps)))
+
+#define DCE_SEL                           0x450
+
+static void dpu_hw_setup_split_pipe(struct dpu_hw_mdp *mdp,
+		struct split_pipe_cfg *cfg)
+{
+	struct dpu_hw_blk_reg_map *c;
+	u32 upper_pipe = 0;
+	u32 lower_pipe = 0;
+
+	if (!mdp || !cfg)
+		return;
+
+	c = &mdp->hw;
+
+	if (cfg->en) {
+		if (cfg->mode == INTF_MODE_CMD) {
+			lower_pipe = FLD_SPLIT_DISPLAY_CMD;
+			/* interface controlling sw trigger */
+			if (cfg->intf == INTF_2)
+				lower_pipe |= FLD_INTF_1_SW_TRG_MUX;
+			else
+				lower_pipe |= FLD_INTF_2_SW_TRG_MUX;
+			upper_pipe = lower_pipe;
+		} else {
+			if (cfg->intf == INTF_2) {
+				lower_pipe = FLD_INTF_1_SW_TRG_MUX;
+				upper_pipe = FLD_INTF_2_SW_TRG_MUX;
+			} else {
+				lower_pipe = FLD_INTF_2_SW_TRG_MUX;
+				upper_pipe = FLD_INTF_1_SW_TRG_MUX;
+			}
+		}
+	}
+
+	DPU_REG_WRITE(c, SSPP_SPARE, cfg->split_flush_en ? 0x1 : 0x0);
+	DPU_REG_WRITE(c, SPLIT_DISPLAY_LOWER_PIPE_CTRL, lower_pipe);
+	DPU_REG_WRITE(c, SPLIT_DISPLAY_UPPER_PIPE_CTRL, upper_pipe);
+	DPU_REG_WRITE(c, SPLIT_DISPLAY_EN, cfg->en & 0x1);
+}
+
+static void dpu_hw_setup_cdm_output(struct dpu_hw_mdp *mdp,
+		struct cdm_output_cfg *cfg)
+{
+	struct dpu_hw_blk_reg_map *c;
+	u32 out_ctl = 0;
+
+	if (!mdp || !cfg)
+		return;
+
+	c = &mdp->hw;
+
+	if (cfg->intf_en)
+		out_ctl |= BIT(19);
+
+	DPU_REG_WRITE(c, MDP_OUT_CTL_0, out_ctl);
+}
+
+static bool dpu_hw_setup_clk_force_ctrl(struct dpu_hw_mdp *mdp,
+		enum dpu_clk_ctrl_type clk_ctrl, bool enable)
+{
+	struct dpu_hw_blk_reg_map *c;
+	u32 reg_off, bit_off;
+	u32 reg_val, new_val;
+	bool clk_forced_on;
+
+	if (!mdp)
+		return false;
+
+	c = &mdp->hw;
+
+	if (clk_ctrl <= DPU_CLK_CTRL_NONE || clk_ctrl >= DPU_CLK_CTRL_MAX)
+		return false;
+
+	reg_off = mdp->caps->clk_ctrls[clk_ctrl].reg_off;
+	bit_off = mdp->caps->clk_ctrls[clk_ctrl].bit_off;
+
+	reg_val = DPU_REG_READ(c, reg_off);
+
+	if (enable)
+		new_val = reg_val | BIT(bit_off);
+	else
+		new_val = reg_val & ~BIT(bit_off);
+
+	DPU_REG_WRITE(c, reg_off, new_val);
+
+	clk_forced_on = !(reg_val & BIT(bit_off));
+
+	return clk_forced_on;
+}
+
+
+static void dpu_hw_get_danger_status(struct dpu_hw_mdp *mdp,
+		struct dpu_danger_safe_status *status)
+{
+	struct dpu_hw_blk_reg_map *c;
+	u32 value;
+
+	if (!mdp || !status)
+		return;
+
+	c = &mdp->hw;
+
+	value = DPU_REG_READ(c, DANGER_STATUS);
+	status->mdp = (value >> 0) & 0x3;
+	status->sspp[SSPP_VIG0] = (value >> 4) & 0x3;
+	status->sspp[SSPP_VIG1] = (value >> 6) & 0x3;
+	status->sspp[SSPP_VIG2] = (value >> 8) & 0x3;
+	status->sspp[SSPP_VIG3] = (value >> 10) & 0x3;
+	status->sspp[SSPP_RGB0] = (value >> 12) & 0x3;
+	status->sspp[SSPP_RGB1] = (value >> 14) & 0x3;
+	status->sspp[SSPP_RGB2] = (value >> 16) & 0x3;
+	status->sspp[SSPP_RGB3] = (value >> 18) & 0x3;
+	status->sspp[SSPP_DMA0] = (value >> 20) & 0x3;
+	status->sspp[SSPP_DMA1] = (value >> 22) & 0x3;
+	status->sspp[SSPP_DMA2] = (value >> 28) & 0x3;
+	status->sspp[SSPP_DMA3] = (value >> 30) & 0x3;
+	status->sspp[SSPP_CURSOR0] = (value >> 24) & 0x3;
+	status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x3;
+}
+
+static void dpu_hw_setup_vsync_source(struct dpu_hw_mdp *mdp,
+		struct dpu_vsync_source_cfg *cfg)
+{
+	struct dpu_hw_blk_reg_map *c;
+	u32 reg, wd_load_value, wd_ctl, wd_ctl2, i;
+	static const u32 pp_offset[PINGPONG_MAX] = {0xC, 0x8, 0x4, 0x13, 0x18};
+
+	if (!mdp || !cfg || (cfg->pp_count > ARRAY_SIZE(cfg->ppnumber)))
+		return;
+
+	c = &mdp->hw;
+	reg = DPU_REG_READ(c, MDP_VSYNC_SEL);
+	for (i = 0; i < cfg->pp_count; i++) {
+		int pp_idx = cfg->ppnumber[i] - PINGPONG_0;
+
+		if (pp_idx >= ARRAY_SIZE(pp_offset))
+			continue;
+
+		reg &= ~(0xf << pp_offset[pp_idx]);
+		reg |= (cfg->vsync_source & 0xf) << pp_offset[pp_idx];
+	}
+	DPU_REG_WRITE(c, MDP_VSYNC_SEL, reg);
+
+	if (cfg->vsync_source >= DPU_VSYNC_SOURCE_WD_TIMER_4 &&
+			cfg->vsync_source <= DPU_VSYNC_SOURCE_WD_TIMER_0) {
+		switch (cfg->vsync_source) {
+		case DPU_VSYNC_SOURCE_WD_TIMER_4:
+			wd_load_value = MDP_WD_TIMER_4_LOAD_VALUE;
+			wd_ctl = MDP_WD_TIMER_4_CTL;
+			wd_ctl2 = MDP_WD_TIMER_4_CTL2;
+			break;
+		case DPU_VSYNC_SOURCE_WD_TIMER_3:
+			wd_load_value = MDP_WD_TIMER_3_LOAD_VALUE;
+			wd_ctl = MDP_WD_TIMER_3_CTL;
+			wd_ctl2 = MDP_WD_TIMER_3_CTL2;
+			break;
+		case DPU_VSYNC_SOURCE_WD_TIMER_2:
+			wd_load_value = MDP_WD_TIMER_2_LOAD_VALUE;
+			wd_ctl = MDP_WD_TIMER_2_CTL;
+			wd_ctl2 = MDP_WD_TIMER_2_CTL2;
+			break;
+		case DPU_VSYNC_SOURCE_WD_TIMER_1:
+			wd_load_value = MDP_WD_TIMER_1_LOAD_VALUE;
+			wd_ctl = MDP_WD_TIMER_1_CTL;
+			wd_ctl2 = MDP_WD_TIMER_1_CTL2;
+			break;
+		case DPU_VSYNC_SOURCE_WD_TIMER_0:
+		default:
+			wd_load_value = MDP_WD_TIMER_0_LOAD_VALUE;
+			wd_ctl = MDP_WD_TIMER_0_CTL;
+			wd_ctl2 = MDP_WD_TIMER_0_CTL2;
+			break;
+		}
+
+		DPU_REG_WRITE(c, wd_load_value,
+			CALCULATE_WD_LOAD_VALUE(cfg->frame_rate));
+
+		DPU_REG_WRITE(c, wd_ctl, BIT(0)); /* clear timer */
+		reg = DPU_REG_READ(c, wd_ctl2);
+		reg |= BIT(8);		/* enable heartbeat timer */
+		reg |= BIT(0);		/* enable WD timer */
+		DPU_REG_WRITE(c, wd_ctl2, reg);
+
+		/* make sure that timers are enabled/disabled for vsync state */
+		wmb();
+	}
+}
+
+static void dpu_hw_get_safe_status(struct dpu_hw_mdp *mdp,
+		struct dpu_danger_safe_status *status)
+{
+	struct dpu_hw_blk_reg_map *c;
+	u32 value;
+
+	if (!mdp || !status)
+		return;
+
+	c = &mdp->hw;
+
+	value = DPU_REG_READ(c, SAFE_STATUS);
+	status->mdp = (value >> 0) & 0x1;
+	status->sspp[SSPP_VIG0] = (value >> 4) & 0x1;
+	status->sspp[SSPP_VIG1] = (value >> 6) & 0x1;
+	status->sspp[SSPP_VIG2] = (value >> 8) & 0x1;
+	status->sspp[SSPP_VIG3] = (value >> 10) & 0x1;
+	status->sspp[SSPP_RGB0] = (value >> 12) & 0x1;
+	status->sspp[SSPP_RGB1] = (value >> 14) & 0x1;
+	status->sspp[SSPP_RGB2] = (value >> 16) & 0x1;
+	status->sspp[SSPP_RGB3] = (value >> 18) & 0x1;
+	status->sspp[SSPP_DMA0] = (value >> 20) & 0x1;
+	status->sspp[SSPP_DMA1] = (value >> 22) & 0x1;
+	status->sspp[SSPP_DMA2] = (value >> 28) & 0x1;
+	status->sspp[SSPP_DMA3] = (value >> 30) & 0x1;
+	status->sspp[SSPP_CURSOR0] = (value >> 24) & 0x1;
+	status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x1;
+}
+
+void dpu_hw_reset_ubwc(struct dpu_hw_mdp *mdp, struct dpu_mdss_cfg *m)
+{
+	struct dpu_hw_blk_reg_map c;
+
+	if (!mdp || !m)
+		return;
+
+	if (!IS_UBWC_20_SUPPORTED(m->caps->ubwc_version))
+		return;
+
+	/* force blk offset to zero to access beginning of register region */
+	c = mdp->hw;
+	c.blk_off = 0x0;
+	DPU_REG_WRITE(&c, UBWC_STATIC, m->mdp[0].ubwc_static);
+}
+
+static void dpu_hw_intf_audio_select(struct dpu_hw_mdp *mdp)
+{
+	struct dpu_hw_blk_reg_map *c;
+
+	if (!mdp)
+		return;
+
+	c = &mdp->hw;
+
+	DPU_REG_WRITE(c, HDMI_DP_CORE_SELECT, 0x1);
+}
+
+static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
+		unsigned long cap)
+{
+	ops->setup_split_pipe = dpu_hw_setup_split_pipe;
+	ops->setup_cdm_output = dpu_hw_setup_cdm_output;
+	ops->setup_clk_force_ctrl = dpu_hw_setup_clk_force_ctrl;
+	ops->get_danger_status = dpu_hw_get_danger_status;
+	ops->setup_vsync_source = dpu_hw_setup_vsync_source;
+	ops->get_safe_status = dpu_hw_get_safe_status;
+	ops->reset_ubwc = dpu_hw_reset_ubwc;
+	ops->intf_audio_select = dpu_hw_intf_audio_select;
+}
+
+static const struct dpu_mdp_cfg *_top_offset(enum dpu_mdp mdp,
+		const struct dpu_mdss_cfg *m,
+		void __iomem *addr,
+		struct dpu_hw_blk_reg_map *b)
+{
+	int i;
+
+	if (!m || !addr || !b)
+		return ERR_PTR(-EINVAL);
+
+	for (i = 0; i < m->mdp_count; i++) {
+		if (mdp == m->mdp[i].id) {
+			b->base_off = addr;
+			b->blk_off = m->mdp[i].base;
+			b->length = m->mdp[i].len;
+			b->hwversion = m->hwversion;
+			b->log_mask = DPU_DBG_MASK_TOP;
+			return &m->mdp[i];
+		}
+	}
+
+	return ERR_PTR(-EINVAL);
+}
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+	.start = NULL,
+	.stop = NULL,
+};
+
+struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx,
+		void __iomem *addr,
+		const struct dpu_mdss_cfg *m)
+{
+	struct dpu_hw_mdp *mdp;
+	const struct dpu_mdp_cfg *cfg;
+	int rc;
+
+	if (!addr || !m)
+		return ERR_PTR(-EINVAL);
+
+	mdp = kzalloc(sizeof(*mdp), GFP_KERNEL);
+	if (!mdp)
+		return ERR_PTR(-ENOMEM);
+
+	cfg = _top_offset(idx, m, addr, &mdp->hw);
+	if (IS_ERR_OR_NULL(cfg)) {
+		kfree(mdp);
+		return ERR_PTR(-EINVAL);
+	}
+
+	/*
+	 * Assign ops
+	 */
+	mdp->idx = idx;
+	mdp->caps = cfg;
+	_setup_mdp_ops(&mdp->ops, mdp->caps->features);
+
+	rc = dpu_hw_blk_init(&mdp->base, DPU_HW_BLK_TOP, idx, &dpu_hw_ops);
+	if (rc) {
+		DPU_ERROR("failed to init hw blk %d\n", rc);
+		goto blk_init_error;
+	}
+
+	dpu_dbg_set_dpu_top_offset(mdp->hw.blk_off);
+
+	return mdp;
+
+blk_init_error:
+	kzfree(mdp);
+
+	return ERR_PTR(rc);
+}
+
+void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp)
+{
+	if (mdp)
+		dpu_hw_blk_destroy(&mdp->base);
+	kfree(mdp);
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
new file mode 100644
index 000000000000..899925aaa6d7
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
@@ -0,0 +1,202 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_TOP_H
+#define _DPU_HW_TOP_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_mdp;
+
+/**
+ * struct traffic_shaper_cfg: traffic shaper configuration
+ * @en        : enable/disable traffic shaper
+ * @rd_client : true if read client; false if write client
+ * @client_id : client identifier
+ * @bpc_denom : denominator of byte per clk
+ * @bpc_numer : numerator of byte per clk
+ */
+struct traffic_shaper_cfg {
+	bool en;
+	bool rd_client;
+	u32 client_id;
+	u32 bpc_denom;
+	u64 bpc_numer;
+};
+
+/**
+ * struct split_pipe_cfg - pipe configuration for dual display panels
+ * @en        : Enable/disable dual pipe confguration
+ * @mode      : Panel interface mode
+ * @intf      : Interface id for main control path
+ * @split_flush_en: Allows both the paths to be flushed when master path is
+ *              flushed
+ */
+struct split_pipe_cfg {
+	bool en;
+	enum dpu_intf_mode mode;
+	enum dpu_intf intf;
+	bool split_flush_en;
+};
+
+/**
+ * struct cdm_output_cfg: output configuration for cdm
+ * @intf_en   : enable/disable interface output
+ */
+struct cdm_output_cfg {
+	bool intf_en;
+};
+
+/**
+ * struct dpu_danger_safe_status: danger and safe status signals
+ * @mdp: top level status
+ * @sspp: source pipe status
+ */
+struct dpu_danger_safe_status {
+	u8 mdp;
+	u8 sspp[SSPP_MAX];
+};
+
+/**
+ * struct dpu_vsync_source_cfg - configure vsync source and configure the
+ *                                    watchdog timers if required.
+ * @pp_count: number of ping pongs active
+ * @frame_rate: Display frame rate
+ * @ppnumber: ping pong index array
+ * @vsync_source: vsync source selection
+ */
+struct dpu_vsync_source_cfg {
+	u32 pp_count;
+	u32 frame_rate;
+	u32 ppnumber[PINGPONG_MAX];
+	u32 vsync_source;
+};
+
+/**
+ * struct dpu_hw_mdp_ops - interface to the MDP TOP Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled.
+ * @setup_split_pipe : Programs the pipe control registers
+ * @setup_pp_split : Programs the pp split control registers
+ * @setup_cdm_output : programs cdm control
+ * @setup_traffic_shaper : programs traffic shaper control
+ */
+struct dpu_hw_mdp_ops {
+	/** setup_split_pipe() : Regsiters are not double buffered, thisk
+	 * function should be called before timing control enable
+	 * @mdp  : mdp top context driver
+	 * @cfg  : upper and lower part of pipe configuration
+	 */
+	void (*setup_split_pipe)(struct dpu_hw_mdp *mdp,
+			struct split_pipe_cfg *p);
+
+	/**
+	 * setup_cdm_output() : Setup selection control of the cdm data path
+	 * @mdp  : mdp top context driver
+	 * @cfg  : cdm output configuration
+	 */
+	void (*setup_cdm_output)(struct dpu_hw_mdp *mdp,
+			struct cdm_output_cfg *cfg);
+
+	/**
+	 * setup_traffic_shaper() : Setup traffic shaper control
+	 * @mdp  : mdp top context driver
+	 * @cfg  : traffic shaper configuration
+	 */
+	void (*setup_traffic_shaper)(struct dpu_hw_mdp *mdp,
+			struct traffic_shaper_cfg *cfg);
+
+	/**
+	 * setup_clk_force_ctrl - set clock force control
+	 * @mdp: mdp top context driver
+	 * @clk_ctrl: clock to be controlled
+	 * @enable: force on enable
+	 * @return: if the clock is forced-on by this function
+	 */
+	bool (*setup_clk_force_ctrl)(struct dpu_hw_mdp *mdp,
+			enum dpu_clk_ctrl_type clk_ctrl, bool enable);
+
+	/**
+	 * get_danger_status - get danger status
+	 * @mdp: mdp top context driver
+	 * @status: Pointer to danger safe status
+	 */
+	void (*get_danger_status)(struct dpu_hw_mdp *mdp,
+			struct dpu_danger_safe_status *status);
+
+	/**
+	 * setup_vsync_source - setup vsync source configuration details
+	 * @mdp: mdp top context driver
+	 * @cfg: vsync source selection configuration
+	 */
+	void (*setup_vsync_source)(struct dpu_hw_mdp *mdp,
+				struct dpu_vsync_source_cfg *cfg);
+
+	/**
+	 * get_safe_status - get safe status
+	 * @mdp: mdp top context driver
+	 * @status: Pointer to danger safe status
+	 */
+	void (*get_safe_status)(struct dpu_hw_mdp *mdp,
+			struct dpu_danger_safe_status *status);
+
+	/**
+	 * reset_ubwc - reset top level UBWC configuration
+	 * @mdp: mdp top context driver
+	 * @m: pointer to mdss catalog data
+	 */
+	void (*reset_ubwc)(struct dpu_hw_mdp *mdp, struct dpu_mdss_cfg *m);
+
+	/**
+	 * intf_audio_select - select the external interface for audio
+	 * @mdp: mdp top context driver
+	 */
+	void (*intf_audio_select)(struct dpu_hw_mdp *mdp);
+};
+
+struct dpu_hw_mdp {
+	struct dpu_hw_blk base;
+	struct dpu_hw_blk_reg_map hw;
+
+	/* top */
+	enum dpu_mdp idx;
+	const struct dpu_mdp_cfg *caps;
+
+	/* ops */
+	struct dpu_hw_mdp_ops ops;
+};
+
+/**
+ * to_dpu_hw_mdp - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_mdp *to_dpu_hw_mdp(struct dpu_hw_blk *hw)
+{
+	return container_of(hw, struct dpu_hw_mdp, base);
+}
+
+/**
+ * dpu_hw_mdptop_init - initializes the top driver for the passed idx
+ * @idx:  Interface index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @m:    Pointer to mdss catalog data
+ */
+struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx,
+		void __iomem *addr,
+		const struct dpu_mdss_cfg *m);
+
+void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp);
+
+#endif /*_DPU_HW_TOP_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
new file mode 100644
index 000000000000..1ba571e94b32
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
@@ -0,0 +1,452 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include "msm_drv.h"
+#include "dpu_kms.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+
+/* using a file static variables for debugfs access */
+static u32 dpu_hw_util_log_mask = DPU_DBG_MASK_NONE;
+
+/* DPU_SCALER_QSEED3 */
+#define QSEED3_HW_VERSION                  0x00
+#define QSEED3_OP_MODE                     0x04
+#define QSEED3_RGB2Y_COEFF                 0x08
+#define QSEED3_PHASE_INIT                  0x0C
+#define QSEED3_PHASE_STEP_Y_H              0x10
+#define QSEED3_PHASE_STEP_Y_V              0x14
+#define QSEED3_PHASE_STEP_UV_H             0x18
+#define QSEED3_PHASE_STEP_UV_V             0x1C
+#define QSEED3_PRELOAD                     0x20
+#define QSEED3_DE_SHARPEN                  0x24
+#define QSEED3_DE_SHARPEN_CTL              0x28
+#define QSEED3_DE_SHAPE_CTL                0x2C
+#define QSEED3_DE_THRESHOLD                0x30
+#define QSEED3_DE_ADJUST_DATA_0            0x34
+#define QSEED3_DE_ADJUST_DATA_1            0x38
+#define QSEED3_DE_ADJUST_DATA_2            0x3C
+#define QSEED3_SRC_SIZE_Y_RGB_A            0x40
+#define QSEED3_SRC_SIZE_UV                 0x44
+#define QSEED3_DST_SIZE                    0x48
+#define QSEED3_COEF_LUT_CTRL               0x4C
+#define QSEED3_COEF_LUT_SWAP_BIT           0
+#define QSEED3_COEF_LUT_DIR_BIT            1
+#define QSEED3_COEF_LUT_Y_CIR_BIT          2
+#define QSEED3_COEF_LUT_UV_CIR_BIT         3
+#define QSEED3_COEF_LUT_Y_SEP_BIT          4
+#define QSEED3_COEF_LUT_UV_SEP_BIT         5
+#define QSEED3_BUFFER_CTRL                 0x50
+#define QSEED3_CLK_CTRL0                   0x54
+#define QSEED3_CLK_CTRL1                   0x58
+#define QSEED3_CLK_STATUS                  0x5C
+#define QSEED3_MISR_CTRL                   0x70
+#define QSEED3_MISR_SIGNATURE_0            0x74
+#define QSEED3_MISR_SIGNATURE_1            0x78
+#define QSEED3_PHASE_INIT_Y_H              0x90
+#define QSEED3_PHASE_INIT_Y_V              0x94
+#define QSEED3_PHASE_INIT_UV_H             0x98
+#define QSEED3_PHASE_INIT_UV_V             0x9C
+#define QSEED3_COEF_LUT                    0x100
+#define QSEED3_FILTERS                     5
+#define QSEED3_LUT_REGIONS                 4
+#define QSEED3_CIRCULAR_LUTS               9
+#define QSEED3_SEPARABLE_LUTS              10
+#define QSEED3_LUT_SIZE                    60
+#define QSEED3_ENABLE                      2
+#define QSEED3_DIR_LUT_SIZE                (200 * sizeof(u32))
+#define QSEED3_CIR_LUT_SIZE \
+	(QSEED3_LUT_SIZE * QSEED3_CIRCULAR_LUTS * sizeof(u32))
+#define QSEED3_SEP_LUT_SIZE \
+	(QSEED3_LUT_SIZE * QSEED3_SEPARABLE_LUTS * sizeof(u32))
+
+void dpu_reg_write(struct dpu_hw_blk_reg_map *c,
+		u32 reg_off,
+		u32 val,
+		const char *name)
+{
+	/* don't need to mutex protect this */
+	if (c->log_mask & dpu_hw_util_log_mask)
+		DPU_DEBUG_DRIVER("[%s:0x%X] <= 0x%X\n",
+				name, c->blk_off + reg_off, val);
+	writel_relaxed(val, c->base_off + c->blk_off + reg_off);
+}
+
+int dpu_reg_read(struct dpu_hw_blk_reg_map *c, u32 reg_off)
+{
+	return readl_relaxed(c->base_off + c->blk_off + reg_off);
+}
+
+u32 *dpu_hw_util_get_log_mask_ptr(void)
+{
+	return &dpu_hw_util_log_mask;
+}
+
+void dpu_set_scaler_v2(struct dpu_hw_scaler3_cfg *cfg,
+		const struct dpu_drm_scaler_v2 *scale_v2)
+{
+	int i;
+
+	cfg->enable = scale_v2->enable;
+	cfg->dir_en = scale_v2->dir_en;
+
+	for (i = 0; i < DPU_MAX_PLANES; i++) {
+		cfg->init_phase_x[i] = scale_v2->init_phase_x[i];
+		cfg->phase_step_x[i] = scale_v2->phase_step_x[i];
+		cfg->init_phase_y[i] = scale_v2->init_phase_y[i];
+		cfg->phase_step_y[i] = scale_v2->phase_step_y[i];
+
+		cfg->preload_x[i] = scale_v2->preload_x[i];
+		cfg->preload_y[i] = scale_v2->preload_y[i];
+		cfg->src_width[i] = scale_v2->src_width[i];
+		cfg->src_height[i] = scale_v2->src_height[i];
+	}
+
+	cfg->dst_width = scale_v2->dst_width;
+	cfg->dst_height = scale_v2->dst_height;
+
+	cfg->y_rgb_filter_cfg = scale_v2->y_rgb_filter_cfg;
+	cfg->uv_filter_cfg = scale_v2->uv_filter_cfg;
+	cfg->alpha_filter_cfg = scale_v2->alpha_filter_cfg;
+	cfg->blend_cfg = scale_v2->blend_cfg;
+
+	cfg->lut_flag = scale_v2->lut_flag;
+	cfg->dir_lut_idx = scale_v2->dir_lut_idx;
+	cfg->y_rgb_cir_lut_idx = scale_v2->y_rgb_cir_lut_idx;
+	cfg->uv_cir_lut_idx = scale_v2->uv_cir_lut_idx;
+	cfg->y_rgb_sep_lut_idx = scale_v2->y_rgb_sep_lut_idx;
+	cfg->uv_sep_lut_idx = scale_v2->uv_sep_lut_idx;
+
+	cfg->de.enable = scale_v2->de.enable;
+	cfg->de.sharpen_level1 = scale_v2->de.sharpen_level1;
+	cfg->de.sharpen_level2 = scale_v2->de.sharpen_level2;
+	cfg->de.clip = scale_v2->de.clip;
+	cfg->de.limit = scale_v2->de.limit;
+	cfg->de.thr_quiet = scale_v2->de.thr_quiet;
+	cfg->de.thr_dieout = scale_v2->de.thr_dieout;
+	cfg->de.thr_low = scale_v2->de.thr_low;
+	cfg->de.thr_high = scale_v2->de.thr_high;
+	cfg->de.prec_shift = scale_v2->de.prec_shift;
+
+	for (i = 0; i < DPU_MAX_DE_CURVES; i++) {
+		cfg->de.adjust_a[i] = scale_v2->de.adjust_a[i];
+		cfg->de.adjust_b[i] = scale_v2->de.adjust_b[i];
+		cfg->de.adjust_c[i] = scale_v2->de.adjust_c[i];
+	}
+}
+
+static void _dpu_hw_setup_scaler3_lut(struct dpu_hw_blk_reg_map *c,
+		struct dpu_hw_scaler3_cfg *scaler3_cfg, u32 offset)
+{
+	int i, j, filter;
+	int config_lut = 0x0;
+	unsigned long lut_flags;
+	u32 lut_addr, lut_offset, lut_len;
+	u32 *lut[QSEED3_FILTERS] = {NULL, NULL, NULL, NULL, NULL};
+	static const uint32_t off_tbl[QSEED3_FILTERS][QSEED3_LUT_REGIONS][2] = {
+		{{18, 0x000}, {12, 0x120}, {12, 0x1E0}, {8, 0x2A0} },
+		{{6, 0x320}, {3, 0x3E0}, {3, 0x440}, {3, 0x4A0} },
+		{{6, 0x500}, {3, 0x5c0}, {3, 0x620}, {3, 0x680} },
+		{{6, 0x380}, {3, 0x410}, {3, 0x470}, {3, 0x4d0} },
+		{{6, 0x560}, {3, 0x5f0}, {3, 0x650}, {3, 0x6b0} },
+	};
+
+	lut_flags = (unsigned long) scaler3_cfg->lut_flag;
+	if (test_bit(QSEED3_COEF_LUT_DIR_BIT, &lut_flags) &&
+		(scaler3_cfg->dir_len == QSEED3_DIR_LUT_SIZE)) {
+		lut[0] = scaler3_cfg->dir_lut;
+		config_lut = 1;
+	}
+	if (test_bit(QSEED3_COEF_LUT_Y_CIR_BIT, &lut_flags) &&
+		(scaler3_cfg->y_rgb_cir_lut_idx < QSEED3_CIRCULAR_LUTS) &&
+		(scaler3_cfg->cir_len == QSEED3_CIR_LUT_SIZE)) {
+		lut[1] = scaler3_cfg->cir_lut +
+			scaler3_cfg->y_rgb_cir_lut_idx * QSEED3_LUT_SIZE;
+		config_lut = 1;
+	}
+	if (test_bit(QSEED3_COEF_LUT_UV_CIR_BIT, &lut_flags) &&
+		(scaler3_cfg->uv_cir_lut_idx < QSEED3_CIRCULAR_LUTS) &&
+		(scaler3_cfg->cir_len == QSEED3_CIR_LUT_SIZE)) {
+		lut[2] = scaler3_cfg->cir_lut +
+			scaler3_cfg->uv_cir_lut_idx * QSEED3_LUT_SIZE;
+		config_lut = 1;
+	}
+	if (test_bit(QSEED3_COEF_LUT_Y_SEP_BIT, &lut_flags) &&
+		(scaler3_cfg->y_rgb_sep_lut_idx < QSEED3_SEPARABLE_LUTS) &&
+		(scaler3_cfg->sep_len == QSEED3_SEP_LUT_SIZE)) {
+		lut[3] = scaler3_cfg->sep_lut +
+			scaler3_cfg->y_rgb_sep_lut_idx * QSEED3_LUT_SIZE;
+		config_lut = 1;
+	}
+	if (test_bit(QSEED3_COEF_LUT_UV_SEP_BIT, &lut_flags) &&
+		(scaler3_cfg->uv_sep_lut_idx < QSEED3_SEPARABLE_LUTS) &&
+		(scaler3_cfg->sep_len == QSEED3_SEP_LUT_SIZE)) {
+		lut[4] = scaler3_cfg->sep_lut +
+			scaler3_cfg->uv_sep_lut_idx * QSEED3_LUT_SIZE;
+		config_lut = 1;
+	}
+
+	if (config_lut) {
+		for (filter = 0; filter < QSEED3_FILTERS; filter++) {
+			if (!lut[filter])
+				continue;
+			lut_offset = 0;
+			for (i = 0; i < QSEED3_LUT_REGIONS; i++) {
+				lut_addr = QSEED3_COEF_LUT + offset
+					+ off_tbl[filter][i][1];
+				lut_len = off_tbl[filter][i][0] << 2;
+				for (j = 0; j < lut_len; j++) {
+					DPU_REG_WRITE(c,
+						lut_addr,
+						(lut[filter])[lut_offset++]);
+					lut_addr += 4;
+				}
+			}
+		}
+	}
+
+	if (test_bit(QSEED3_COEF_LUT_SWAP_BIT, &lut_flags))
+		DPU_REG_WRITE(c, QSEED3_COEF_LUT_CTRL + offset, BIT(0));
+
+}
+
+static void _dpu_hw_setup_scaler3_de(struct dpu_hw_blk_reg_map *c,
+		struct dpu_hw_scaler3_de_cfg *de_cfg, u32 offset)
+{
+	u32 sharp_lvl, sharp_ctl, shape_ctl, de_thr;
+	u32 adjust_a, adjust_b, adjust_c;
+
+	if (!de_cfg->enable)
+		return;
+
+	sharp_lvl = (de_cfg->sharpen_level1 & 0x1FF) |
+		((de_cfg->sharpen_level2 & 0x1FF) << 16);
+
+	sharp_ctl = ((de_cfg->limit & 0xF) << 9) |
+		((de_cfg->prec_shift & 0x7) << 13) |
+		((de_cfg->clip & 0x7) << 16);
+
+	shape_ctl = (de_cfg->thr_quiet & 0xFF) |
+		((de_cfg->thr_dieout & 0x3FF) << 16);
+
+	de_thr = (de_cfg->thr_low & 0x3FF) |
+		((de_cfg->thr_high & 0x3FF) << 16);
+
+	adjust_a = (de_cfg->adjust_a[0] & 0x3FF) |
+		((de_cfg->adjust_a[1] & 0x3FF) << 10) |
+		((de_cfg->adjust_a[2] & 0x3FF) << 20);
+
+	adjust_b = (de_cfg->adjust_b[0] & 0x3FF) |
+		((de_cfg->adjust_b[1] & 0x3FF) << 10) |
+		((de_cfg->adjust_b[2] & 0x3FF) << 20);
+
+	adjust_c = (de_cfg->adjust_c[0] & 0x3FF) |
+		((de_cfg->adjust_c[1] & 0x3FF) << 10) |
+		((de_cfg->adjust_c[2] & 0x3FF) << 20);
+
+	DPU_REG_WRITE(c, QSEED3_DE_SHARPEN + offset, sharp_lvl);
+	DPU_REG_WRITE(c, QSEED3_DE_SHARPEN_CTL + offset, sharp_ctl);
+	DPU_REG_WRITE(c, QSEED3_DE_SHAPE_CTL + offset, shape_ctl);
+	DPU_REG_WRITE(c, QSEED3_DE_THRESHOLD + offset, de_thr);
+	DPU_REG_WRITE(c, QSEED3_DE_ADJUST_DATA_0 + offset, adjust_a);
+	DPU_REG_WRITE(c, QSEED3_DE_ADJUST_DATA_1 + offset, adjust_b);
+	DPU_REG_WRITE(c, QSEED3_DE_ADJUST_DATA_2 + offset, adjust_c);
+
+}
+
+void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c,
+		struct dpu_hw_scaler3_cfg *scaler3_cfg,
+		u32 scaler_offset, u32 scaler_version,
+		const struct dpu_format *format)
+{
+	u32 op_mode = 0;
+	u32 phase_init, preload, src_y_rgb, src_uv, dst;
+
+	if (!scaler3_cfg->enable)
+		goto end;
+
+	op_mode |= BIT(0);
+	op_mode |= (scaler3_cfg->y_rgb_filter_cfg & 0x3) << 16;
+
+	if (format && DPU_FORMAT_IS_YUV(format)) {
+		op_mode |= BIT(12);
+		op_mode |= (scaler3_cfg->uv_filter_cfg & 0x3) << 24;
+	}
+
+	op_mode |= (scaler3_cfg->blend_cfg & 1) << 31;
+	op_mode |= (scaler3_cfg->dir_en) ? BIT(4) : 0;
+
+	preload =
+		((scaler3_cfg->preload_x[0] & 0x7F) << 0) |
+		((scaler3_cfg->preload_y[0] & 0x7F) << 8) |
+		((scaler3_cfg->preload_x[1] & 0x7F) << 16) |
+		((scaler3_cfg->preload_y[1] & 0x7F) << 24);
+
+	src_y_rgb = (scaler3_cfg->src_width[0] & 0x1FFFF) |
+		((scaler3_cfg->src_height[0] & 0x1FFFF) << 16);
+
+	src_uv = (scaler3_cfg->src_width[1] & 0x1FFFF) |
+		((scaler3_cfg->src_height[1] & 0x1FFFF) << 16);
+
+	dst = (scaler3_cfg->dst_width & 0x1FFFF) |
+		((scaler3_cfg->dst_height & 0x1FFFF) << 16);
+
+	if (scaler3_cfg->de.enable) {
+		_dpu_hw_setup_scaler3_de(c, &scaler3_cfg->de, scaler_offset);
+		op_mode |= BIT(8);
+	}
+
+	if (scaler3_cfg->lut_flag)
+		_dpu_hw_setup_scaler3_lut(c, scaler3_cfg,
+								scaler_offset);
+
+	if (scaler_version == 0x1002) {
+		phase_init =
+			((scaler3_cfg->init_phase_x[0] & 0x3F) << 0) |
+			((scaler3_cfg->init_phase_y[0] & 0x3F) << 8) |
+			((scaler3_cfg->init_phase_x[1] & 0x3F) << 16) |
+			((scaler3_cfg->init_phase_y[1] & 0x3F) << 24);
+		DPU_REG_WRITE(c, QSEED3_PHASE_INIT + scaler_offset, phase_init);
+	} else {
+		DPU_REG_WRITE(c, QSEED3_PHASE_INIT_Y_H + scaler_offset,
+			scaler3_cfg->init_phase_x[0] & 0x1FFFFF);
+		DPU_REG_WRITE(c, QSEED3_PHASE_INIT_Y_V + scaler_offset,
+			scaler3_cfg->init_phase_y[0] & 0x1FFFFF);
+		DPU_REG_WRITE(c, QSEED3_PHASE_INIT_UV_H + scaler_offset,
+			scaler3_cfg->init_phase_x[1] & 0x1FFFFF);
+		DPU_REG_WRITE(c, QSEED3_PHASE_INIT_UV_V + scaler_offset,
+			scaler3_cfg->init_phase_y[1] & 0x1FFFFF);
+	}
+
+	DPU_REG_WRITE(c, QSEED3_PHASE_STEP_Y_H + scaler_offset,
+		scaler3_cfg->phase_step_x[0] & 0xFFFFFF);
+
+	DPU_REG_WRITE(c, QSEED3_PHASE_STEP_Y_V + scaler_offset,
+		scaler3_cfg->phase_step_y[0] & 0xFFFFFF);
+
+	DPU_REG_WRITE(c, QSEED3_PHASE_STEP_UV_H + scaler_offset,
+		scaler3_cfg->phase_step_x[1] & 0xFFFFFF);
+
+	DPU_REG_WRITE(c, QSEED3_PHASE_STEP_UV_V + scaler_offset,
+		scaler3_cfg->phase_step_y[1] & 0xFFFFFF);
+
+	DPU_REG_WRITE(c, QSEED3_PRELOAD + scaler_offset, preload);
+
+	DPU_REG_WRITE(c, QSEED3_SRC_SIZE_Y_RGB_A + scaler_offset, src_y_rgb);
+
+	DPU_REG_WRITE(c, QSEED3_SRC_SIZE_UV + scaler_offset, src_uv);
+
+	DPU_REG_WRITE(c, QSEED3_DST_SIZE + scaler_offset, dst);
+
+end:
+	if (format && !DPU_FORMAT_IS_DX(format))
+		op_mode |= BIT(14);
+
+	if (format && format->alpha_enable) {
+		op_mode |= BIT(10);
+		if (scaler_version == 0x1002)
+			op_mode |= (scaler3_cfg->alpha_filter_cfg & 0x1) << 30;
+		else
+			op_mode |= (scaler3_cfg->alpha_filter_cfg & 0x3) << 29;
+	}
+
+	DPU_REG_WRITE(c, QSEED3_OP_MODE + scaler_offset, op_mode);
+}
+
+u32 dpu_hw_get_scaler3_ver(struct dpu_hw_blk_reg_map *c,
+			u32 scaler_offset)
+{
+	return DPU_REG_READ(c, QSEED3_HW_VERSION + scaler_offset);
+}
+
+void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c,
+		u32 csc_reg_off,
+		struct dpu_csc_cfg *data, bool csc10)
+{
+	static const u32 matrix_shift = 7;
+	u32 clamp_shift = csc10 ? 16 : 8;
+	u32 val;
+
+	/* matrix coeff - convert S15.16 to S4.9 */
+	val = ((data->csc_mv[0] >> matrix_shift) & 0x1FFF) |
+		(((data->csc_mv[1] >> matrix_shift) & 0x1FFF) << 16);
+	DPU_REG_WRITE(c, csc_reg_off, val);
+	val = ((data->csc_mv[2] >> matrix_shift) & 0x1FFF) |
+		(((data->csc_mv[3] >> matrix_shift) & 0x1FFF) << 16);
+	DPU_REG_WRITE(c, csc_reg_off + 0x4, val);
+	val = ((data->csc_mv[4] >> matrix_shift) & 0x1FFF) |
+		(((data->csc_mv[5] >> matrix_shift) & 0x1FFF) << 16);
+	DPU_REG_WRITE(c, csc_reg_off + 0x8, val);
+	val = ((data->csc_mv[6] >> matrix_shift) & 0x1FFF) |
+		(((data->csc_mv[7] >> matrix_shift) & 0x1FFF) << 16);
+	DPU_REG_WRITE(c, csc_reg_off + 0xc, val);
+	val = (data->csc_mv[8] >> matrix_shift) & 0x1FFF;
+	DPU_REG_WRITE(c, csc_reg_off + 0x10, val);
+
+	/* Pre clamp */
+	val = (data->csc_pre_lv[0] << clamp_shift) | data->csc_pre_lv[1];
+	DPU_REG_WRITE(c, csc_reg_off + 0x14, val);
+	val = (data->csc_pre_lv[2] << clamp_shift) | data->csc_pre_lv[3];
+	DPU_REG_WRITE(c, csc_reg_off + 0x18, val);
+	val = (data->csc_pre_lv[4] << clamp_shift) | data->csc_pre_lv[5];
+	DPU_REG_WRITE(c, csc_reg_off + 0x1c, val);
+
+	/* Post clamp */
+	val = (data->csc_post_lv[0] << clamp_shift) | data->csc_post_lv[1];
+	DPU_REG_WRITE(c, csc_reg_off + 0x20, val);
+	val = (data->csc_post_lv[2] << clamp_shift) | data->csc_post_lv[3];
+	DPU_REG_WRITE(c, csc_reg_off + 0x24, val);
+	val = (data->csc_post_lv[4] << clamp_shift) | data->csc_post_lv[5];
+	DPU_REG_WRITE(c, csc_reg_off + 0x28, val);
+
+	/* Pre-Bias */
+	DPU_REG_WRITE(c, csc_reg_off + 0x2c, data->csc_pre_bv[0]);
+	DPU_REG_WRITE(c, csc_reg_off + 0x30, data->csc_pre_bv[1]);
+	DPU_REG_WRITE(c, csc_reg_off + 0x34, data->csc_pre_bv[2]);
+
+	/* Post-Bias */
+	DPU_REG_WRITE(c, csc_reg_off + 0x38, data->csc_post_bv[0]);
+	DPU_REG_WRITE(c, csc_reg_off + 0x3c, data->csc_post_bv[1]);
+	DPU_REG_WRITE(c, csc_reg_off + 0x40, data->csc_post_bv[2]);
+}
+
+/**
+ * _dpu_copy_formats   - copy formats from src_list to dst_list
+ * @dst_list:          pointer to destination list where to copy formats
+ * @dst_list_size:     size of destination list
+ * @dst_list_pos:      starting position on the list where to copy formats
+ * @src_list:          pointer to source list where to copy formats from
+ * @src_list_size:     size of source list
+ * Return: number of elements populated
+ */
+uint32_t dpu_copy_formats(
+		struct dpu_format_extended *dst_list,
+		uint32_t dst_list_size,
+		uint32_t dst_list_pos,
+		const struct dpu_format_extended *src_list,
+		uint32_t src_list_size)
+{
+	uint32_t cur_pos, i;
+
+	if (!dst_list || !src_list || (dst_list_pos >= (dst_list_size - 1)))
+		return 0;
+
+	for (i = 0, cur_pos = dst_list_pos;
+		(cur_pos < (dst_list_size - 1)) && (i < src_list_size)
+		&& src_list[i].fourcc_format; ++i, ++cur_pos)
+		dst_list[cur_pos] = src_list[i];
+
+	dst_list[cur_pos].fourcc_format = 0;
+
+	return i;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
new file mode 100644
index 000000000000..42f1b228d342
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
@@ -0,0 +1,358 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_UTIL_H
+#define _DPU_HW_UTIL_H
+
+#include <linux/io.h>
+#include <linux/slab.h>
+#include "dpu_hw_mdss.h"
+
+#define REG_MASK(n)                     ((BIT(n)) - 1)
+struct dpu_format_extended;
+
+/*
+ * This is the common struct maintained by each sub block
+ * for mapping the register offsets in this block to the
+ * absoulute IO address
+ * @base_off:     mdp register mapped offset
+ * @blk_off:      pipe offset relative to mdss offset
+ * @length        length of register block offset
+ * @xin_id        xin id
+ * @hwversion     mdss hw version number
+ */
+struct dpu_hw_blk_reg_map {
+	void __iomem *base_off;
+	u32 blk_off;
+	u32 length;
+	u32 xin_id;
+	u32 hwversion;
+	u32 log_mask;
+};
+
+/**
+ * struct dpu_hw_scaler3_de_cfg : QSEEDv3 detail enhancer configuration
+ * @enable:         detail enhancer enable/disable
+ * @sharpen_level1: sharpening strength for noise
+ * @sharpen_level2: sharpening strength for signal
+ * @ clip:          clip shift
+ * @ limit:         limit value
+ * @ thr_quiet:     quiet threshold
+ * @ thr_dieout:    dieout threshold
+ * @ thr_high:      low threshold
+ * @ thr_high:      high threshold
+ * @ prec_shift:    precision shift
+ * @ adjust_a:      A-coefficients for mapping curve
+ * @ adjust_b:      B-coefficients for mapping curve
+ * @ adjust_c:      C-coefficients for mapping curve
+ */
+struct dpu_hw_scaler3_de_cfg {
+	u32 enable;
+	int16_t sharpen_level1;
+	int16_t sharpen_level2;
+	uint16_t clip;
+	uint16_t limit;
+	uint16_t thr_quiet;
+	uint16_t thr_dieout;
+	uint16_t thr_low;
+	uint16_t thr_high;
+	uint16_t prec_shift;
+	int16_t adjust_a[DPU_MAX_DE_CURVES];
+	int16_t adjust_b[DPU_MAX_DE_CURVES];
+	int16_t adjust_c[DPU_MAX_DE_CURVES];
+};
+
+
+/**
+ * struct dpu_hw_scaler3_cfg : QSEEDv3 configuration
+ * @enable:        scaler enable
+ * @dir_en:        direction detection block enable
+ * @ init_phase_x: horizontal initial phase
+ * @ phase_step_x: horizontal phase step
+ * @ init_phase_y: vertical initial phase
+ * @ phase_step_y: vertical phase step
+ * @ preload_x:    horizontal preload value
+ * @ preload_y:    vertical preload value
+ * @ src_width:    source width
+ * @ src_height:   source height
+ * @ dst_width:    destination width
+ * @ dst_height:   destination height
+ * @ y_rgb_filter_cfg: y/rgb plane filter configuration
+ * @ uv_filter_cfg: uv plane filter configuration
+ * @ alpha_filter_cfg: alpha filter configuration
+ * @ blend_cfg:    blend coefficients configuration
+ * @ lut_flag:     scaler LUT update flags
+ *                 0x1 swap LUT bank
+ *                 0x2 update 2D filter LUT
+ *                 0x4 update y circular filter LUT
+ *                 0x8 update uv circular filter LUT
+ *                 0x10 update y separable filter LUT
+ *                 0x20 update uv separable filter LUT
+ * @ dir_lut_idx:  2D filter LUT index
+ * @ y_rgb_cir_lut_idx: y circular filter LUT index
+ * @ uv_cir_lut_idx: uv circular filter LUT index
+ * @ y_rgb_sep_lut_idx: y circular filter LUT index
+ * @ uv_sep_lut_idx: uv separable filter LUT index
+ * @ dir_lut:      pointer to 2D LUT
+ * @ cir_lut:      pointer to circular filter LUT
+ * @ sep_lut:      pointer to separable filter LUT
+ * @ de: detail enhancer configuration
+ */
+struct dpu_hw_scaler3_cfg {
+	u32 enable;
+	u32 dir_en;
+	int32_t init_phase_x[DPU_MAX_PLANES];
+	int32_t phase_step_x[DPU_MAX_PLANES];
+	int32_t init_phase_y[DPU_MAX_PLANES];
+	int32_t phase_step_y[DPU_MAX_PLANES];
+
+	u32 preload_x[DPU_MAX_PLANES];
+	u32 preload_y[DPU_MAX_PLANES];
+	u32 src_width[DPU_MAX_PLANES];
+	u32 src_height[DPU_MAX_PLANES];
+
+	u32 dst_width;
+	u32 dst_height;
+
+	u32 y_rgb_filter_cfg;
+	u32 uv_filter_cfg;
+	u32 alpha_filter_cfg;
+	u32 blend_cfg;
+
+	u32 lut_flag;
+	u32 dir_lut_idx;
+
+	u32 y_rgb_cir_lut_idx;
+	u32 uv_cir_lut_idx;
+	u32 y_rgb_sep_lut_idx;
+	u32 uv_sep_lut_idx;
+	u32 *dir_lut;
+	size_t dir_len;
+	u32 *cir_lut;
+	size_t cir_len;
+	u32 *sep_lut;
+	size_t sep_len;
+
+	/*
+	 * Detail enhancer settings
+	 */
+	struct dpu_hw_scaler3_de_cfg de;
+};
+
+struct dpu_hw_scaler3_lut_cfg {
+	bool is_configured;
+	u32 *dir_lut;
+	size_t dir_len;
+	u32 *cir_lut;
+	size_t cir_len;
+	u32 *sep_lut;
+	size_t sep_len;
+};
+
+/**
+ * struct dpu_drm_pix_ext_v1 - version 1 of pixel ext structure
+ * @num_ext_pxls_lr: Number of total horizontal pixels
+ * @num_ext_pxls_tb: Number of total vertical lines
+ * @left_ftch:       Number of extra pixels to overfetch from left
+ * @right_ftch:      Number of extra pixels to overfetch from right
+ * @top_ftch:        Number of extra lines to overfetch from top
+ * @btm_ftch:        Number of extra lines to overfetch from bottom
+ * @left_rpt:        Number of extra pixels to repeat from left
+ * @right_rpt:       Number of extra pixels to repeat from right
+ * @top_rpt:         Number of extra lines to repeat from top
+ * @btm_rpt:         Number of extra lines to repeat from bottom
+ */
+struct dpu_drm_pix_ext_v1 {
+	/*
+	 * Number of pixels ext in left, right, top and bottom direction
+	 * for all color components.
+	 */
+	int32_t num_ext_pxls_lr[DPU_MAX_PLANES];
+	int32_t num_ext_pxls_tb[DPU_MAX_PLANES];
+
+	/*
+	 * Number of pixels needs to be overfetched in left, right, top
+	 * and bottom directions from source image for scaling.
+	 */
+	int32_t left_ftch[DPU_MAX_PLANES];
+	int32_t right_ftch[DPU_MAX_PLANES];
+	int32_t top_ftch[DPU_MAX_PLANES];
+	int32_t btm_ftch[DPU_MAX_PLANES];
+	/*
+	 * Number of pixels needs to be repeated in left, right, top and
+	 * bottom directions for scaling.
+	 */
+	int32_t left_rpt[DPU_MAX_PLANES];
+	int32_t right_rpt[DPU_MAX_PLANES];
+	int32_t top_rpt[DPU_MAX_PLANES];
+	int32_t btm_rpt[DPU_MAX_PLANES];
+
+};
+
+/**
+ * struct dpu_drm_de_v1 - version 1 of detail enhancer structure
+ * @enable:         Enables/disables detail enhancer
+ * @sharpen_level1: Sharpening strength for noise
+ * @sharpen_level2: Sharpening strength for context
+ * @clip:           Clip coefficient
+ * @limit:          Detail enhancer limit factor
+ * @thr_quiet:      Quite zone threshold
+ * @thr_dieout:     Die-out zone threshold
+ * @thr_low:        Linear zone left threshold
+ * @thr_high:       Linear zone right threshold
+ * @prec_shift:     Detail enhancer precision
+ * @adjust_a:       Mapping curves A coefficients
+ * @adjust_b:       Mapping curves B coefficients
+ * @adjust_c:       Mapping curves C coefficients
+ */
+struct dpu_drm_de_v1 {
+	uint32_t enable;
+	int16_t sharpen_level1;
+	int16_t sharpen_level2;
+	uint16_t clip;
+	uint16_t limit;
+	uint16_t thr_quiet;
+	uint16_t thr_dieout;
+	uint16_t thr_low;
+	uint16_t thr_high;
+	uint16_t prec_shift;
+	int16_t adjust_a[DPU_MAX_DE_CURVES];
+	int16_t adjust_b[DPU_MAX_DE_CURVES];
+	int16_t adjust_c[DPU_MAX_DE_CURVES];
+};
+
+/**
+ * struct dpu_drm_scaler_v2 - version 2 of struct dpu_drm_scaler
+ * @enable:            Scaler enable
+ * @dir_en:            Detail enhancer enable
+ * @pe:                Pixel extension settings
+ * @horz_decimate:     Horizontal decimation factor
+ * @vert_decimate:     Vertical decimation factor
+ * @init_phase_x:      Initial scaler phase values for x
+ * @phase_step_x:      Phase step values for x
+ * @init_phase_y:      Initial scaler phase values for y
+ * @phase_step_y:      Phase step values for y
+ * @preload_x:         Horizontal preload value
+ * @preload_y:         Vertical preload value
+ * @src_width:         Source width
+ * @src_height:        Source height
+ * @dst_width:         Destination width
+ * @dst_height:        Destination height
+ * @y_rgb_filter_cfg:  Y/RGB plane filter configuration
+ * @uv_filter_cfg:     UV plane filter configuration
+ * @alpha_filter_cfg:  Alpha filter configuration
+ * @blend_cfg:         Selection of blend coefficients
+ * @lut_flag:          LUT configuration flags
+ * @dir_lut_idx:       2d 4x4 LUT index
+ * @y_rgb_cir_lut_idx: Y/RGB circular LUT index
+ * @uv_cir_lut_idx:    UV circular LUT index
+ * @y_rgb_sep_lut_idx: Y/RGB separable LUT index
+ * @uv_sep_lut_idx:    UV separable LUT index
+ * @de:                Detail enhancer settings
+ */
+struct dpu_drm_scaler_v2 {
+	/*
+	 * General definitions
+	 */
+	uint32_t enable;
+	uint32_t dir_en;
+
+	/*
+	 * Pix ext settings
+	 */
+	struct dpu_drm_pix_ext_v1 pe;
+
+	/*
+	 * Decimation settings
+	 */
+	uint32_t horz_decimate;
+	uint32_t vert_decimate;
+
+	/*
+	 * Phase settings
+	 */
+	int32_t init_phase_x[DPU_MAX_PLANES];
+	int32_t phase_step_x[DPU_MAX_PLANES];
+	int32_t init_phase_y[DPU_MAX_PLANES];
+	int32_t phase_step_y[DPU_MAX_PLANES];
+
+	uint32_t preload_x[DPU_MAX_PLANES];
+	uint32_t preload_y[DPU_MAX_PLANES];
+	uint32_t src_width[DPU_MAX_PLANES];
+	uint32_t src_height[DPU_MAX_PLANES];
+
+	uint32_t dst_width;
+	uint32_t dst_height;
+
+	uint32_t y_rgb_filter_cfg;
+	uint32_t uv_filter_cfg;
+	uint32_t alpha_filter_cfg;
+	uint32_t blend_cfg;
+
+	uint32_t lut_flag;
+	uint32_t dir_lut_idx;
+
+	/* for Y(RGB) and UV planes*/
+	uint32_t y_rgb_cir_lut_idx;
+	uint32_t uv_cir_lut_idx;
+	uint32_t y_rgb_sep_lut_idx;
+	uint32_t uv_sep_lut_idx;
+
+	/*
+	 * Detail enhancer settings
+	 */
+	struct dpu_drm_de_v1 de;
+};
+
+
+u32 *dpu_hw_util_get_log_mask_ptr(void);
+
+void dpu_reg_write(struct dpu_hw_blk_reg_map *c,
+		u32 reg_off,
+		u32 val,
+		const char *name);
+int dpu_reg_read(struct dpu_hw_blk_reg_map *c, u32 reg_off);
+
+#define DPU_REG_WRITE(c, off, val) dpu_reg_write(c, off, val, #off)
+#define DPU_REG_READ(c, off) dpu_reg_read(c, off)
+
+#define MISR_FRAME_COUNT_MASK		0xFF
+#define MISR_CTRL_ENABLE		BIT(8)
+#define MISR_CTRL_STATUS		BIT(9)
+#define MISR_CTRL_STATUS_CLEAR		BIT(10)
+#define INTF_MISR_CTRL_FREE_RUN_MASK	BIT(31)
+
+void *dpu_hw_util_get_dir(void);
+
+void dpu_set_scaler_v2(struct dpu_hw_scaler3_cfg *cfg,
+		const struct dpu_drm_scaler_v2 *scale_v2);
+
+void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c,
+		struct dpu_hw_scaler3_cfg *scaler3_cfg,
+		u32 scaler_offset, u32 scaler_version,
+		const struct dpu_format *format);
+
+u32 dpu_hw_get_scaler3_ver(struct dpu_hw_blk_reg_map *c,
+		u32 scaler_offset);
+
+void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map  *c,
+		u32 csc_reg_off,
+		struct dpu_csc_cfg *data, bool csc10);
+
+uint32_t dpu_copy_formats(
+		struct dpu_format_extended *dst_list,
+		uint32_t dst_list_size,
+		uint32_t dst_list_pos,
+		const struct dpu_format_extended *src_list,
+		uint32_t src_list_size);
+
+#endif /* _DPU_HW_UTIL_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
new file mode 100644
index 000000000000..d43905525f92
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
@@ -0,0 +1,275 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_vbif.h"
+#include "dpu_dbg.h"
+
+#define VBIF_VERSION			0x0000
+#define VBIF_CLK_FORCE_CTRL0		0x0008
+#define VBIF_CLK_FORCE_CTRL1		0x000C
+#define VBIF_QOS_REMAP_00		0x0020
+#define VBIF_QOS_REMAP_01		0x0024
+#define VBIF_QOS_REMAP_10		0x0028
+#define VBIF_QOS_REMAP_11		0x002C
+#define VBIF_WRITE_GATHER_EN		0x00AC
+#define VBIF_IN_RD_LIM_CONF0		0x00B0
+#define VBIF_IN_RD_LIM_CONF1		0x00B4
+#define VBIF_IN_RD_LIM_CONF2		0x00B8
+#define VBIF_IN_WR_LIM_CONF0		0x00C0
+#define VBIF_IN_WR_LIM_CONF1		0x00C4
+#define VBIF_IN_WR_LIM_CONF2		0x00C8
+#define VBIF_OUT_RD_LIM_CONF0		0x00D0
+#define VBIF_OUT_WR_LIM_CONF0		0x00D4
+#define VBIF_OUT_AXI_AMEMTYPE_CONF0	0x0160
+#define VBIF_OUT_AXI_AMEMTYPE_CONF1	0x0164
+#define VBIF_XIN_PND_ERR		0x0190
+#define VBIF_XIN_SRC_ERR		0x0194
+#define VBIF_XIN_CLR_ERR		0x019C
+#define VBIF_XIN_HALT_CTRL0		0x0200
+#define VBIF_XIN_HALT_CTRL1		0x0204
+#define VBIF_XINL_QOS_RP_REMAP_000	0x0550
+#define VBIF_XINL_QOS_LVL_REMAP_000	0x0590
+
+static void dpu_hw_clear_errors(struct dpu_hw_vbif *vbif,
+		u32 *pnd_errors, u32 *src_errors)
+{
+	struct dpu_hw_blk_reg_map *c;
+	u32 pnd, src;
+
+	if (!vbif)
+		return;
+	c = &vbif->hw;
+	pnd = DPU_REG_READ(c, VBIF_XIN_PND_ERR);
+	src = DPU_REG_READ(c, VBIF_XIN_SRC_ERR);
+
+	if (pnd_errors)
+		*pnd_errors = pnd;
+	if (src_errors)
+		*src_errors = src;
+
+	DPU_REG_WRITE(c, VBIF_XIN_CLR_ERR, pnd | src);
+}
+
+static void dpu_hw_set_mem_type(struct dpu_hw_vbif *vbif,
+		u32 xin_id, u32 value)
+{
+	struct dpu_hw_blk_reg_map *c;
+	u32 reg_off;
+	u32 bit_off;
+	u32 reg_val;
+
+	/*
+	 * Assume 4 bits per bit field, 8 fields per 32-bit register so
+	 * 16 bit fields maximum across two registers
+	 */
+	if (!vbif || xin_id >= MAX_XIN_COUNT || xin_id >= 16)
+		return;
+
+	c = &vbif->hw;
+
+	if (xin_id >= 8) {
+		xin_id -= 8;
+		reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF1;
+	} else {
+		reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF0;
+	}
+	bit_off = (xin_id & 0x7) * 4;
+	reg_val = DPU_REG_READ(c, reg_off);
+	reg_val &= ~(0x7 << bit_off);
+	reg_val |= (value & 0x7) << bit_off;
+	DPU_REG_WRITE(c, reg_off, reg_val);
+}
+
+static void dpu_hw_set_limit_conf(struct dpu_hw_vbif *vbif,
+		u32 xin_id, bool rd, u32 limit)
+{
+	struct dpu_hw_blk_reg_map *c = &vbif->hw;
+	u32 reg_val;
+	u32 reg_off;
+	u32 bit_off;
+
+	if (rd)
+		reg_off = VBIF_IN_RD_LIM_CONF0;
+	else
+		reg_off = VBIF_IN_WR_LIM_CONF0;
+
+	reg_off += (xin_id / 4) * 4;
+	bit_off = (xin_id % 4) * 8;
+	reg_val = DPU_REG_READ(c, reg_off);
+	reg_val &= ~(0xFF << bit_off);
+	reg_val |= (limit) << bit_off;
+	DPU_REG_WRITE(c, reg_off, reg_val);
+}
+
+static u32 dpu_hw_get_limit_conf(struct dpu_hw_vbif *vbif,
+		u32 xin_id, bool rd)
+{
+	struct dpu_hw_blk_reg_map *c = &vbif->hw;
+	u32 reg_val;
+	u32 reg_off;
+	u32 bit_off;
+	u32 limit;
+
+	if (rd)
+		reg_off = VBIF_IN_RD_LIM_CONF0;
+	else
+		reg_off = VBIF_IN_WR_LIM_CONF0;
+
+	reg_off += (xin_id / 4) * 4;
+	bit_off = (xin_id % 4) * 8;
+	reg_val = DPU_REG_READ(c, reg_off);
+	limit = (reg_val >> bit_off) & 0xFF;
+
+	return limit;
+}
+
+static void dpu_hw_set_halt_ctrl(struct dpu_hw_vbif *vbif,
+		u32 xin_id, bool enable)
+{
+	struct dpu_hw_blk_reg_map *c = &vbif->hw;
+	u32 reg_val;
+
+	reg_val = DPU_REG_READ(c, VBIF_XIN_HALT_CTRL0);
+
+	if (enable)
+		reg_val |= BIT(xin_id);
+	else
+		reg_val &= ~BIT(xin_id);
+
+	DPU_REG_WRITE(c, VBIF_XIN_HALT_CTRL0, reg_val);
+}
+
+static bool dpu_hw_get_halt_ctrl(struct dpu_hw_vbif *vbif,
+		u32 xin_id)
+{
+	struct dpu_hw_blk_reg_map *c = &vbif->hw;
+	u32 reg_val;
+
+	reg_val = DPU_REG_READ(c, VBIF_XIN_HALT_CTRL1);
+
+	return (reg_val & BIT(xin_id)) ? true : false;
+}
+
+static void dpu_hw_set_qos_remap(struct dpu_hw_vbif *vbif,
+		u32 xin_id, u32 level, u32 remap_level)
+{
+	struct dpu_hw_blk_reg_map *c;
+	u32 reg_val, reg_val_lvl, mask, reg_high, reg_shift;
+
+	if (!vbif)
+		return;
+
+	c = &vbif->hw;
+
+	reg_high = ((xin_id & 0x8) >> 3) * 4 + (level * 8);
+	reg_shift = (xin_id & 0x7) * 4;
+
+	reg_val = DPU_REG_READ(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high);
+	reg_val_lvl = DPU_REG_READ(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high);
+
+	mask = 0x7 << reg_shift;
+
+	reg_val &= ~mask;
+	reg_val |= (remap_level << reg_shift) & mask;
+
+	reg_val_lvl &= ~mask;
+	reg_val_lvl |= (remap_level << reg_shift) & mask;
+
+	DPU_REG_WRITE(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high, reg_val);
+	DPU_REG_WRITE(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high, reg_val_lvl);
+}
+
+static void dpu_hw_set_write_gather_en(struct dpu_hw_vbif *vbif, u32 xin_id)
+{
+	struct dpu_hw_blk_reg_map *c;
+	u32 reg_val;
+
+	if (!vbif || xin_id >= MAX_XIN_COUNT)
+		return;
+
+	c = &vbif->hw;
+
+	reg_val = DPU_REG_READ(c, VBIF_WRITE_GATHER_EN);
+	reg_val |= BIT(xin_id);
+	DPU_REG_WRITE(c, VBIF_WRITE_GATHER_EN, reg_val);
+}
+
+static void _setup_vbif_ops(struct dpu_hw_vbif_ops *ops,
+		unsigned long cap)
+{
+	ops->set_limit_conf = dpu_hw_set_limit_conf;
+	ops->get_limit_conf = dpu_hw_get_limit_conf;
+	ops->set_halt_ctrl = dpu_hw_set_halt_ctrl;
+	ops->get_halt_ctrl = dpu_hw_get_halt_ctrl;
+	if (test_bit(DPU_VBIF_QOS_REMAP, &cap))
+		ops->set_qos_remap = dpu_hw_set_qos_remap;
+	ops->set_mem_type = dpu_hw_set_mem_type;
+	ops->clear_errors = dpu_hw_clear_errors;
+	ops->set_write_gather_en = dpu_hw_set_write_gather_en;
+}
+
+static const struct dpu_vbif_cfg *_top_offset(enum dpu_vbif vbif,
+		const struct dpu_mdss_cfg *m,
+		void __iomem *addr,
+		struct dpu_hw_blk_reg_map *b)
+{
+	int i;
+
+	for (i = 0; i < m->vbif_count; i++) {
+		if (vbif == m->vbif[i].id) {
+			b->base_off = addr;
+			b->blk_off = m->vbif[i].base;
+			b->length = m->vbif[i].len;
+			b->hwversion = m->hwversion;
+			b->log_mask = DPU_DBG_MASK_VBIF;
+			return &m->vbif[i];
+		}
+	}
+
+	return ERR_PTR(-EINVAL);
+}
+
+struct dpu_hw_vbif *dpu_hw_vbif_init(enum dpu_vbif idx,
+		void __iomem *addr,
+		const struct dpu_mdss_cfg *m)
+{
+	struct dpu_hw_vbif *c;
+	const struct dpu_vbif_cfg *cfg;
+
+	c = kzalloc(sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return ERR_PTR(-ENOMEM);
+
+	cfg = _top_offset(idx, m, addr, &c->hw);
+	if (IS_ERR_OR_NULL(cfg)) {
+		kfree(c);
+		return ERR_PTR(-EINVAL);
+	}
+
+	/*
+	 * Assign ops
+	 */
+	c->idx = idx;
+	c->cap = cfg;
+	_setup_vbif_ops(&c->ops, c->cap->features);
+
+	/* no need to register sub-range in dpu dbg, dump entire vbif io base */
+
+	return c;
+}
+
+void dpu_hw_vbif_destroy(struct dpu_hw_vbif *vbif)
+{
+	kfree(vbif);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h
new file mode 100644
index 000000000000..471ff673c045
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h
@@ -0,0 +1,128 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_VBIF_H
+#define _DPU_HW_VBIF_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+
+struct dpu_hw_vbif;
+
+/**
+ * struct dpu_hw_vbif_ops : Interface to the VBIF hardware driver functions
+ *  Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_vbif_ops {
+	/**
+	 * set_limit_conf - set transaction limit config
+	 * @vbif: vbif context driver
+	 * @xin_id: client interface identifier
+	 * @rd: true for read limit; false for write limit
+	 * @limit: outstanding transaction limit
+	 */
+	void (*set_limit_conf)(struct dpu_hw_vbif *vbif,
+			u32 xin_id, bool rd, u32 limit);
+
+	/**
+	 * get_limit_conf - get transaction limit config
+	 * @vbif: vbif context driver
+	 * @xin_id: client interface identifier
+	 * @rd: true for read limit; false for write limit
+	 * @return: outstanding transaction limit
+	 */
+	u32 (*get_limit_conf)(struct dpu_hw_vbif *vbif,
+			u32 xin_id, bool rd);
+
+	/**
+	 * set_halt_ctrl - set halt control
+	 * @vbif: vbif context driver
+	 * @xin_id: client interface identifier
+	 * @enable: halt control enable
+	 */
+	void (*set_halt_ctrl)(struct dpu_hw_vbif *vbif,
+			u32 xin_id, bool enable);
+
+	/**
+	 * get_halt_ctrl - get halt control
+	 * @vbif: vbif context driver
+	 * @xin_id: client interface identifier
+	 * @return: halt control enable
+	 */
+	bool (*get_halt_ctrl)(struct dpu_hw_vbif *vbif,
+			u32 xin_id);
+
+	/**
+	 * set_qos_remap - set QoS priority remap
+	 * @vbif: vbif context driver
+	 * @xin_id: client interface identifier
+	 * @level: priority level
+	 * @remap_level: remapped level
+	 */
+	void (*set_qos_remap)(struct dpu_hw_vbif *vbif,
+			u32 xin_id, u32 level, u32 remap_level);
+
+	/**
+	 * set_mem_type - set memory type
+	 * @vbif: vbif context driver
+	 * @xin_id: client interface identifier
+	 * @value: memory type value
+	 */
+	void (*set_mem_type)(struct dpu_hw_vbif *vbif,
+			u32 xin_id, u32 value);
+
+	/**
+	 * clear_errors - clear any vbif errors
+	 *	This function clears any detected pending/source errors
+	 *	on the VBIF interface, and optionally returns the detected
+	 *	error mask(s).
+	 * @vbif: vbif context driver
+	 * @pnd_errors: pointer to pending error reporting variable
+	 * @src_errors: pointer to source error reporting variable
+	 */
+	void (*clear_errors)(struct dpu_hw_vbif *vbif,
+		u32 *pnd_errors, u32 *src_errors);
+
+	/**
+	 * set_write_gather_en - set write_gather enable
+	 * @vbif: vbif context driver
+	 * @xin_id: client interface identifier
+	 */
+	void (*set_write_gather_en)(struct dpu_hw_vbif *vbif, u32 xin_id);
+};
+
+struct dpu_hw_vbif {
+	/* base */
+	struct dpu_hw_blk_reg_map hw;
+
+	/* vbif */
+	enum dpu_vbif idx;
+	const struct dpu_vbif_cfg *cap;
+
+	/* ops */
+	struct dpu_hw_vbif_ops ops;
+};
+
+/**
+ * dpu_hw_vbif_init - initializes the vbif driver for the passed interface idx
+ * @idx:  Interface index for which driver object is required
+ * @addr: Mapped register io address of MDSS
+ * @m:    Pointer to mdss catalog data
+ */
+struct dpu_hw_vbif *dpu_hw_vbif_init(enum dpu_vbif idx,
+		void __iomem *addr,
+		const struct dpu_mdss_cfg *m);
+
+void dpu_hw_vbif_destroy(struct dpu_hw_vbif *vbif);
+
+#endif /*_DPU_HW_VBIF_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h
new file mode 100644
index 000000000000..5b2bc9b65b15
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h
@@ -0,0 +1,56 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HWIO_H
+#define _DPU_HWIO_H
+
+#include "dpu_hw_util.h"
+
+/**
+ * MDP TOP block Register and bit fields and defines
+ */
+#define DISP_INTF_SEL                   0x004
+#define INTR_EN                         0x010
+#define INTR_STATUS                     0x014
+#define INTR_CLEAR                      0x018
+#define INTR2_EN                        0x008
+#define INTR2_STATUS                    0x00c
+#define INTR2_CLEAR                     0x02c
+#define HIST_INTR_EN                    0x01c
+#define HIST_INTR_STATUS                0x020
+#define HIST_INTR_CLEAR                 0x024
+#define INTF_INTR_EN                    0x1C0
+#define INTF_INTR_STATUS                0x1C4
+#define INTF_INTR_CLEAR                 0x1C8
+#define SPLIT_DISPLAY_EN                0x2F4
+#define SPLIT_DISPLAY_UPPER_PIPE_CTRL   0x2F8
+#define DSPP_IGC_COLOR0_RAM_LUTN        0x300
+#define DSPP_IGC_COLOR1_RAM_LUTN        0x304
+#define DSPP_IGC_COLOR2_RAM_LUTN        0x308
+#define HW_EVENTS_CTL                   0x37C
+#define CLK_CTRL3                       0x3A8
+#define CLK_STATUS3                     0x3AC
+#define CLK_CTRL4                       0x3B0
+#define CLK_STATUS4                     0x3B4
+#define CLK_CTRL5                       0x3B8
+#define CLK_STATUS5                     0x3BC
+#define CLK_CTRL7                       0x3D0
+#define CLK_STATUS7                     0x3D4
+#define SPLIT_DISPLAY_LOWER_PIPE_CTRL   0x3F0
+#define SPLIT_DISPLAY_TE_LINE_INTERVAL  0x3F4
+#define INTF_SW_RESET_MASK              0x3FC
+#define HDMI_DP_CORE_SELECT             0x408
+#define MDP_OUT_CTL_0                   0x410
+#define MDP_VSYNC_SEL                   0x414
+#define DCE_SEL                         0x450
+
+#endif /*_DPU_HWIO_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
new file mode 100644
index 000000000000..f7caec3033b6
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
@@ -0,0 +1,186 @@
+/* Copyright (c) 2012-2015, 2017-2018, The Linux Foundation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+
+#include "dpu_io_util.h"
+
+void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk)
+{
+	int i;
+
+	for (i = num_clk - 1; i >= 0; i--) {
+		if (clk_arry[i].clk)
+			clk_put(clk_arry[i].clk);
+		clk_arry[i].clk = NULL;
+	}
+}
+
+int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk)
+{
+	int i, rc = 0;
+
+	for (i = 0; i < num_clk; i++) {
+		clk_arry[i].clk = clk_get(dev, clk_arry[i].clk_name);
+		rc = PTR_RET(clk_arry[i].clk);
+		if (rc) {
+			DEV_ERR("%pS->%s: '%s' get failed. rc=%d\n",
+				__builtin_return_address(0), __func__,
+				clk_arry[i].clk_name, rc);
+			goto error;
+		}
+	}
+
+	return rc;
+
+error:
+	for (i--; i >= 0; i--) {
+		if (clk_arry[i].clk)
+			clk_put(clk_arry[i].clk);
+		clk_arry[i].clk = NULL;
+	}
+
+	return rc;
+}
+
+int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk)
+{
+	int i, rc = 0;
+
+	for (i = 0; i < num_clk; i++) {
+		if (clk_arry[i].clk) {
+			if (clk_arry[i].type != DSS_CLK_AHB) {
+				DEV_DBG("%pS->%s: '%s' rate %ld\n",
+					__builtin_return_address(0), __func__,
+					clk_arry[i].clk_name,
+					clk_arry[i].rate);
+				rc = clk_set_rate(clk_arry[i].clk,
+					clk_arry[i].rate);
+				if (rc) {
+					DEV_ERR("%pS->%s: %s failed. rc=%d\n",
+						__builtin_return_address(0),
+						__func__,
+						clk_arry[i].clk_name, rc);
+					break;
+				}
+			}
+		} else {
+			DEV_ERR("%pS->%s: '%s' is not available\n",
+				__builtin_return_address(0), __func__,
+				clk_arry[i].clk_name);
+			rc = -EPERM;
+			break;
+		}
+	}
+
+	return rc;
+}
+
+int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable)
+{
+	int i, rc = 0;
+
+	if (enable) {
+		for (i = 0; i < num_clk; i++) {
+			DEV_DBG("%pS->%s: enable '%s'\n",
+				__builtin_return_address(0), __func__,
+				clk_arry[i].clk_name);
+			if (clk_arry[i].clk) {
+				rc = clk_prepare_enable(clk_arry[i].clk);
+				if (rc)
+					DEV_ERR("%pS->%s: %s en fail. rc=%d\n",
+						__builtin_return_address(0),
+						__func__,
+						clk_arry[i].clk_name, rc);
+			} else {
+				DEV_ERR("%pS->%s: '%s' is not available\n",
+					__builtin_return_address(0), __func__,
+					clk_arry[i].clk_name);
+				rc = -EPERM;
+			}
+
+			if (rc) {
+				msm_dss_enable_clk(&clk_arry[i],
+					i, false);
+				break;
+			}
+		}
+	} else {
+		for (i = num_clk - 1; i >= 0; i--) {
+			DEV_DBG("%pS->%s: disable '%s'\n",
+				__builtin_return_address(0), __func__,
+				clk_arry[i].clk_name);
+
+			if (clk_arry[i].clk)
+				clk_disable_unprepare(clk_arry[i].clk);
+			else
+				DEV_ERR("%pS->%s: '%s' is not available\n",
+					__builtin_return_address(0), __func__,
+					clk_arry[i].clk_name);
+		}
+	}
+
+	return rc;
+}
+
+int msm_dss_parse_clock(struct platform_device *pdev,
+		struct dss_module_power *mp)
+{
+	u32 i, rc = 0;
+	const char *clock_name;
+	u32 rate = 0;
+	int num_clk = 0;
+
+	if (!pdev || !mp)
+		return -EINVAL;
+
+	mp->num_clk = 0;
+	num_clk = of_property_count_strings(pdev->dev.of_node, "clock-names");
+	if (num_clk <= 0) {
+		pr_debug("clocks are not defined\n");
+		return 0;
+	}
+
+	mp->clk_config = devm_kzalloc(&pdev->dev,
+				      sizeof(struct dss_clk) * num_clk,
+				      GFP_KERNEL);
+	if (!mp->clk_config)
+		return -ENOMEM;
+
+	for (i = 0; i < num_clk; i++) {
+		rc = of_property_read_string_index(pdev->dev.of_node,
+						   "clock-names", i,
+						   &clock_name);
+		if (rc)
+			break;
+		strlcpy(mp->clk_config[i].clk_name, clock_name,
+			sizeof(mp->clk_config[i].clk_name));
+
+		mp->clk_config[i].type = DSS_CLK_AHB;
+		rc = of_property_read_u32_index(pdev->dev.of_node,
+						"clock-frequency", i,
+						&rate);
+		if (rc)
+			continue;
+		mp->clk_config[i].rate = rate;
+		if (rate)
+			mp->clk_config[i].type = DSS_CLK_PCLK;
+	}
+
+	if (!rc)
+		mp->num_clk = num_clk;
+
+	return rc;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.h
new file mode 100644
index 000000000000..bc07381d7429
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.h
@@ -0,0 +1,57 @@
+/* Copyright (c) 2012, 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DPU_IO_UTIL_H__
+#define __DPU_IO_UTIL_H__
+
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#define DEV_DBG(fmt, args...)   pr_debug(fmt, ##args)
+#define DEV_INFO(fmt, args...)  pr_info(fmt, ##args)
+#define DEV_WARN(fmt, args...)  pr_warn(fmt, ##args)
+#define DEV_ERR(fmt, args...)   pr_err(fmt, ##args)
+
+struct dss_gpio {
+	unsigned int gpio;
+	unsigned int value;
+	char gpio_name[32];
+};
+
+enum dss_clk_type {
+	DSS_CLK_AHB, /* no set rate. rate controlled through rpm */
+	DSS_CLK_PCLK,
+};
+
+struct dss_clk {
+	struct clk *clk; /* clk handle */
+	char clk_name[32];
+	enum dss_clk_type type;
+	unsigned long rate;
+	unsigned long max_rate;
+};
+
+struct dss_module_power {
+	unsigned int num_gpio;
+	struct dss_gpio *gpio_config;
+	unsigned int num_clk;
+	struct dss_clk *clk_config;
+};
+
+int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk);
+void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk);
+int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk);
+int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable);
+int msm_dss_parse_clock(struct platform_device *pdev,
+		struct dss_module_power *mp);
+#endif /* __DPU_IO_UTIL_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c
new file mode 100644
index 000000000000..d5e6ce0140cf
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c
@@ -0,0 +1,66 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/kthread.h>
+
+#include "dpu_irq.h"
+#include "dpu_core_irq.h"
+
+irqreturn_t dpu_irq(struct msm_kms *kms)
+{
+	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+	return dpu_core_irq(dpu_kms);
+}
+
+void dpu_irq_preinstall(struct msm_kms *kms)
+{
+	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+	if (!dpu_kms->dev || !dpu_kms->dev->dev) {
+		pr_err("invalid device handles\n");
+		return;
+	}
+
+	dpu_core_irq_preinstall(dpu_kms);
+}
+
+int dpu_irq_postinstall(struct msm_kms *kms)
+{
+	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+	int rc;
+
+	if (!kms) {
+		DPU_ERROR("invalid parameters\n");
+		return -EINVAL;
+	}
+
+	rc = dpu_core_irq_postinstall(dpu_kms);
+
+	return rc;
+}
+
+void dpu_irq_uninstall(struct msm_kms *kms)
+{
+	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+	if (!kms) {
+		DPU_ERROR("invalid parameters\n");
+		return;
+	}
+
+	dpu_core_irq_uninstall(dpu_kms);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h
new file mode 100644
index 000000000000..3e147f7176e2
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h
@@ -0,0 +1,59 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DPU_IRQ_H__
+#define __DPU_IRQ_H__
+
+#include <linux/kernel.h>
+#include <linux/irqdomain.h>
+
+#include "msm_kms.h"
+
+/**
+ * dpu_irq_controller - define MDSS level interrupt controller context
+ * @enabled_mask:	enable status of MDSS level interrupt
+ * @domain:		interrupt domain of this controller
+ */
+struct dpu_irq_controller {
+	unsigned long enabled_mask;
+	struct irq_domain *domain;
+};
+
+/**
+ * dpu_irq_preinstall - perform pre-installation of MDSS IRQ handler
+ * @kms:		pointer to kms context
+ * @return:		none
+ */
+void dpu_irq_preinstall(struct msm_kms *kms);
+
+/**
+ * dpu_irq_postinstall - perform post-installation of MDSS IRQ handler
+ * @kms:		pointer to kms context
+ * @return:		0 if success; error code otherwise
+ */
+int dpu_irq_postinstall(struct msm_kms *kms);
+
+/**
+ * dpu_irq_uninstall - uninstall MDSS IRQ handler
+ * @drm_dev:		pointer to kms context
+ * @return:		none
+ */
+void dpu_irq_uninstall(struct msm_kms *kms);
+
+/**
+ * dpu_irq - MDSS level IRQ handler
+ * @kms:		pointer to kms context
+ * @return:		interrupt handling status
+ */
+irqreturn_t dpu_irq(struct msm_kms *kms);
+
+#endif /* __DPU_IRQ_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
new file mode 100644
index 000000000000..791122f0e9a9
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -0,0 +1,1380 @@
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark at gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <drm/drm_crtc.h>
+#include <linux/debugfs.h>
+#include <linux/of_irq.h>
+#include <linux/dma-buf.h>
+
+#include "msm_drv.h"
+#include "msm_mmu.h"
+#include "msm_gem.h"
+
+#include "dpu_kms.h"
+#include "dpu_core_irq.h"
+#include "dpu_formats.h"
+#include "dpu_hw_vbif.h"
+#include "dpu_vbif.h"
+#include "dpu_encoder.h"
+#include "dpu_plane.h"
+#include "dpu_crtc.h"
+
+#define CREATE_TRACE_POINTS
+#include "dpu_trace.h"
+
+static const char * const iommu_ports[] = {
+		"mdp_0",
+};
+
+/*
+ * To enable overall DRM driver logging
+ * # echo 0x2 > /sys/module/drm/parameters/debug
+ *
+ * To enable DRM driver h/w logging
+ * # echo <mask> > /sys/kernel/debug/dri/0/debug/hw_log_mask
+ *
+ * See dpu_hw_mdss.h for h/w logging mask definitions (search for DPU_DBG_MASK_)
+ */
+#define DPU_DEBUGFS_DIR "msm_dpu"
+#define DPU_DEBUGFS_HWMASKNAME "hw_log_mask"
+
+static int dpu_kms_hw_init(struct msm_kms *kms);
+static int _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
+
+static unsigned long dpu_iomap_size(struct platform_device *pdev,
+				    const char *name)
+{
+	struct resource *res;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+	if (!res) {
+		DRM_ERROR("failed to get memory resource: %s\n", name);
+		return 0;
+	}
+
+	return resource_size(res);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int _dpu_danger_signal_status(struct seq_file *s,
+		bool danger_status)
+{
+	struct dpu_kms *kms = (struct dpu_kms *)s->private;
+	struct msm_drm_private *priv;
+	struct dpu_danger_safe_status status;
+	int i;
+
+	if (!kms || !kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
+		DPU_ERROR("invalid arg(s)\n");
+		return 0;
+	}
+
+	priv = kms->dev->dev_private;
+	memset(&status, 0, sizeof(struct dpu_danger_safe_status));
+
+	pm_runtime_get_sync(&kms->pdev->dev);
+	if (danger_status) {
+		seq_puts(s, "\nDanger signal status:\n");
+		if (kms->hw_mdp->ops.get_danger_status)
+			kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
+					&status);
+	} else {
+		seq_puts(s, "\nSafe signal status:\n");
+		if (kms->hw_mdp->ops.get_danger_status)
+			kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
+					&status);
+	}
+	pm_runtime_put_sync(&kms->pdev->dev);
+
+	seq_printf(s, "MDP     :  0x%x\n", status.mdp);
+
+	for (i = SSPP_VIG0; i < SSPP_MAX; i++)
+		seq_printf(s, "SSPP%d   :  0x%x  \t", i - SSPP_VIG0,
+				status.sspp[i]);
+	seq_puts(s, "\n");
+
+	return 0;
+}
+
+#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix)				\
+static int __prefix ## _open(struct inode *inode, struct file *file)	\
+{									\
+	return single_open(file, __prefix ## _show, inode->i_private);	\
+}									\
+static const struct file_operations __prefix ## _fops = {		\
+	.owner = THIS_MODULE,						\
+	.open = __prefix ## _open,					\
+	.release = single_release,					\
+	.read = seq_read,						\
+	.llseek = seq_lseek,						\
+}
+
+static int dpu_debugfs_danger_stats_show(struct seq_file *s, void *v)
+{
+	return _dpu_danger_signal_status(s, true);
+}
+DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_danger_stats);
+
+static int dpu_debugfs_safe_stats_show(struct seq_file *s, void *v)
+{
+	return _dpu_danger_signal_status(s, false);
+}
+DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_safe_stats);
+
+static void dpu_debugfs_danger_destroy(struct dpu_kms *dpu_kms)
+{
+	debugfs_remove_recursive(dpu_kms->debugfs_danger);
+	dpu_kms->debugfs_danger = NULL;
+}
+
+static int dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
+		struct dentry *parent)
+{
+	dpu_kms->debugfs_danger = debugfs_create_dir("danger",
+			parent);
+	if (!dpu_kms->debugfs_danger) {
+		DPU_ERROR("failed to create danger debugfs\n");
+		return -EINVAL;
+	}
+
+	debugfs_create_file("danger_status", 0600, dpu_kms->debugfs_danger,
+			dpu_kms, &dpu_debugfs_danger_stats_fops);
+	debugfs_create_file("safe_status", 0600, dpu_kms->debugfs_danger,
+			dpu_kms, &dpu_debugfs_safe_stats_fops);
+
+	return 0;
+}
+
+static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data)
+{
+	struct dpu_debugfs_regset32 *regset;
+	struct dpu_kms *dpu_kms;
+	struct drm_device *dev;
+	struct msm_drm_private *priv;
+	void __iomem *base;
+	uint32_t i, addr;
+
+	if (!s || !s->private)
+		return 0;
+
+	regset = s->private;
+
+	dpu_kms = regset->dpu_kms;
+	if (!dpu_kms || !dpu_kms->mmio)
+		return 0;
+
+	dev = dpu_kms->dev;
+	if (!dev)
+		return 0;
+
+	priv = dev->dev_private;
+	if (!priv)
+		return 0;
+
+	base = dpu_kms->mmio + regset->offset;
+
+	/* insert padding spaces, if needed */
+	if (regset->offset & 0xF) {
+		seq_printf(s, "[%x]", regset->offset & ~0xF);
+		for (i = 0; i < (regset->offset & 0xF); i += 4)
+			seq_puts(s, "         ");
+	}
+
+	pm_runtime_get_sync(&dpu_kms->pdev->dev);
+
+	/* main register output */
+	for (i = 0; i < regset->blk_len; i += 4) {
+		addr = regset->offset + i;
+		if ((addr & 0xF) == 0x0)
+			seq_printf(s, i ? "\n[%x]" : "[%x]", addr);
+		seq_printf(s, " %08x", readl_relaxed(base + i));
+	}
+	seq_puts(s, "\n");
+	pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+	return 0;
+}
+
+static int dpu_debugfs_open_regset32(struct inode *inode,
+		struct file *file)
+{
+	return single_open(file, _dpu_debugfs_show_regset32, inode->i_private);
+}
+
+static const struct file_operations dpu_fops_regset32 = {
+	.open =		dpu_debugfs_open_regset32,
+	.read =		seq_read,
+	.llseek =	seq_lseek,
+	.release =	single_release,
+};
+
+void dpu_debugfs_setup_regset32(struct dpu_debugfs_regset32 *regset,
+		uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms)
+{
+	if (regset) {
+		regset->offset = offset;
+		regset->blk_len = length;
+		regset->dpu_kms = dpu_kms;
+	}
+}
+
+void *dpu_debugfs_create_regset32(const char *name, umode_t mode,
+		void *parent, struct dpu_debugfs_regset32 *regset)
+{
+	if (!name || !regset || !regset->dpu_kms || !regset->blk_len)
+		return NULL;
+
+	/* make sure offset is a multiple of 4 */
+	regset->offset = round_down(regset->offset, 4);
+
+	return debugfs_create_file(name, mode, parent,
+			regset, &dpu_fops_regset32);
+}
+
+static int _dpu_debugfs_init(struct dpu_kms *dpu_kms)
+{
+	void *p;
+	int rc;
+
+	p = dpu_hw_util_get_log_mask_ptr();
+
+	if (!dpu_kms || !p)
+		return -EINVAL;
+
+	dpu_kms->debugfs_root = debugfs_create_dir("debug",
+					   dpu_kms->dev->primary->debugfs_root);
+	if (IS_ERR_OR_NULL(dpu_kms->debugfs_root)) {
+		DRM_ERROR("debugfs create_dir failed %ld\n",
+			  PTR_ERR(dpu_kms->debugfs_root));
+		return PTR_ERR(dpu_kms->debugfs_root);
+	}
+
+	rc = dpu_dbg_debugfs_register(dpu_kms->debugfs_root);
+	if (rc) {
+		DRM_ERROR("failed to reg dpu dbg debugfs: %d\n", rc);
+		return rc;
+	}
+
+	/* allow root to be NULL */
+	debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, dpu_kms->debugfs_root, p);
+
+	(void) dpu_debugfs_danger_init(dpu_kms, dpu_kms->debugfs_root);
+	(void) dpu_debugfs_vbif_init(dpu_kms, dpu_kms->debugfs_root);
+	(void) dpu_debugfs_core_irq_init(dpu_kms, dpu_kms->debugfs_root);
+
+	rc = dpu_core_perf_debugfs_init(&dpu_kms->perf, dpu_kms->debugfs_root);
+	if (rc) {
+		DPU_ERROR("failed to init perf %d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static void _dpu_debugfs_destroy(struct dpu_kms *dpu_kms)
+{
+	/* don't need to NULL check debugfs_root */
+	if (dpu_kms) {
+		dpu_debugfs_vbif_destroy(dpu_kms);
+		dpu_debugfs_danger_destroy(dpu_kms);
+		dpu_debugfs_core_irq_destroy(dpu_kms);
+		debugfs_remove_recursive(dpu_kms->debugfs_root);
+	}
+}
+#else
+static void _dpu_debugfs_destroy(struct dpu_kms *dpu_kms)
+{
+}
+#endif
+
+static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+	return dpu_crtc_vblank(crtc, true);
+}
+
+static void dpu_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+	dpu_crtc_vblank(crtc, false);
+}
+
+static void dpu_kms_prepare_commit(struct msm_kms *kms,
+		struct drm_atomic_state *state)
+{
+	struct dpu_kms *dpu_kms;
+	struct msm_drm_private *priv;
+	struct drm_device *dev;
+	struct drm_encoder *encoder;
+
+	if (!kms)
+		return;
+	dpu_kms = to_dpu_kms(kms);
+	dev = dpu_kms->dev;
+
+	if (!dev || !dev->dev_private)
+		return;
+	priv = dev->dev_private;
+	pm_runtime_get_sync(&dpu_kms->pdev->dev);
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+		if (encoder->crtc != NULL)
+			dpu_encoder_prepare_commit(encoder);
+}
+
+/*
+ * Override the encoder enable since we need to setup the inline rotator and do
+ * some crtc magic before enabling any bridge that might be present.
+ */
+void dpu_kms_encoder_enable(struct drm_encoder *encoder)
+{
+	const struct drm_encoder_helper_funcs *funcs = encoder->helper_private;
+	struct drm_crtc *crtc = encoder->crtc;
+
+	/* Forward this enable call to the commit hook */
+	if (funcs && funcs->commit)
+		funcs->commit(encoder);
+
+	if (crtc && crtc->state->active) {
+		trace_dpu_kms_enc_enable(DRMID(crtc));
+		dpu_crtc_commit_kickoff(crtc);
+	}
+}
+
+static void dpu_kms_commit(struct msm_kms *kms, struct drm_atomic_state *state)
+{
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *crtc_state;
+	int i;
+
+	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+		/* If modeset is required, kickoff is run in encoder_enable */
+		if (drm_atomic_crtc_needs_modeset(crtc_state))
+			continue;
+
+		if (crtc->state->active) {
+			trace_dpu_kms_commit(DRMID(crtc));
+			dpu_crtc_commit_kickoff(crtc);
+		}
+	}
+}
+
+static void dpu_kms_complete_commit(struct msm_kms *kms,
+		struct drm_atomic_state *old_state)
+{
+	struct dpu_kms *dpu_kms;
+	struct msm_drm_private *priv;
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *old_crtc_state;
+	int i;
+
+	if (!kms || !old_state)
+		return;
+	dpu_kms = to_dpu_kms(kms);
+
+	if (!dpu_kms->dev || !dpu_kms->dev->dev_private)
+		return;
+	priv = dpu_kms->dev->dev_private;
+
+	DPU_ATRACE_BEGIN("kms_complete_commit");
+
+	for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i)
+		dpu_crtc_complete_commit(crtc, old_crtc_state);
+
+	pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+	DPU_ATRACE_END("kms_complete_commit");
+}
+
+static void dpu_kms_wait_for_commit_done(struct msm_kms *kms,
+		struct drm_crtc *crtc)
+{
+	struct drm_encoder *encoder;
+	struct drm_device *dev;
+	int ret;
+
+	if (!kms || !crtc || !crtc->state) {
+		DPU_ERROR("invalid params\n");
+		return;
+	}
+
+	dev = crtc->dev;
+
+	if (!crtc->state->enable) {
+		DPU_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
+		return;
+	}
+
+	if (!crtc->state->active) {
+		DPU_DEBUG("[crtc:%d] not active\n", crtc->base.id);
+		return;
+	}
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		if (encoder->crtc != crtc)
+			continue;
+		/*
+		 * Wait for post-flush if necessary to delay before
+		 * plane_cleanup. For example, wait for vsync in case of video
+		 * mode panels. This may be a no-op for command mode panels.
+		 */
+		trace_dpu_kms_wait_for_commit_done(DRMID(crtc));
+		ret = dpu_encoder_wait_for_event(encoder, MSM_ENC_COMMIT_DONE);
+		if (ret && ret != -EWOULDBLOCK) {
+			DPU_ERROR("wait for commit done returned %d\n", ret);
+			break;
+		}
+	}
+}
+
+static void _dpu_kms_initialize_dsi(struct drm_device *dev,
+				    struct msm_drm_private *priv,
+				    struct dpu_kms *dpu_kms)
+{
+	struct drm_encoder *encoder = NULL;
+	int i, rc;
+
+	/*TODO: Support two independent DSI connectors */
+	encoder = dpu_encoder_init(dev, DRM_MODE_CONNECTOR_DSI);
+	if (IS_ERR_OR_NULL(encoder)) {
+		DPU_ERROR("encoder init failed for dsi display\n");
+		return;
+	}
+
+	priv->encoders[priv->num_encoders++] = encoder;
+
+	for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
+		if (!priv->dsi[i]) {
+			DPU_DEBUG("invalid msm_dsi for ctrl %d\n", i);
+			return;
+		}
+
+		rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder);
+		if (rc) {
+			DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
+				i, rc);
+			continue;
+		}
+	}
+}
+
+/**
+ * _dpu_kms_setup_displays - create encoders, bridges and connectors
+ *                           for underlying displays
+ * @dev:        Pointer to drm device structure
+ * @priv:       Pointer to private drm device data
+ * @dpu_kms:    Pointer to dpu kms structure
+ * Returns:     Zero on success
+ */
+static void _dpu_kms_setup_displays(struct drm_device *dev,
+				    struct msm_drm_private *priv,
+				    struct dpu_kms *dpu_kms)
+{
+	_dpu_kms_initialize_dsi(dev, priv, dpu_kms);
+
+	/**
+	 * Extend this function to initialize other
+	 * types of displays
+	 */
+}
+
+static void _dpu_kms_drm_obj_destroy(struct dpu_kms *dpu_kms)
+{
+	struct msm_drm_private *priv;
+	int i;
+
+	if (!dpu_kms) {
+		DPU_ERROR("invalid dpu_kms\n");
+		return;
+	} else if (!dpu_kms->dev) {
+		DPU_ERROR("invalid dev\n");
+		return;
+	} else if (!dpu_kms->dev->dev_private) {
+		DPU_ERROR("invalid dev_private\n");
+		return;
+	}
+	priv = dpu_kms->dev->dev_private;
+
+	for (i = 0; i < priv->num_crtcs; i++)
+		priv->crtcs[i]->funcs->destroy(priv->crtcs[i]);
+	priv->num_crtcs = 0;
+
+	for (i = 0; i < priv->num_planes; i++)
+		priv->planes[i]->funcs->destroy(priv->planes[i]);
+	priv->num_planes = 0;
+
+	for (i = 0; i < priv->num_connectors; i++)
+		priv->connectors[i]->funcs->destroy(priv->connectors[i]);
+	priv->num_connectors = 0;
+
+	for (i = 0; i < priv->num_encoders; i++)
+		priv->encoders[i]->funcs->destroy(priv->encoders[i]);
+	priv->num_encoders = 0;
+}
+
+static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
+{
+	struct drm_device *dev;
+	struct drm_plane *primary_planes[MAX_PLANES], *plane;
+	struct drm_crtc *crtc;
+
+	struct msm_drm_private *priv;
+	struct dpu_mdss_cfg *catalog;
+
+	int primary_planes_idx = 0, i, ret;
+	int max_crtc_count;
+
+	if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev) {
+		DPU_ERROR("invalid dpu_kms\n");
+		return -EINVAL;
+	}
+
+	dev = dpu_kms->dev;
+	priv = dev->dev_private;
+	catalog = dpu_kms->catalog;
+
+	/*
+	 * Create encoder and query display drivers to create
+	 * bridges and connectors
+	 */
+	_dpu_kms_setup_displays(dev, priv, dpu_kms);
+
+	max_crtc_count = min(catalog->mixer_count, priv->num_encoders);
+
+	/* Create the planes */
+	for (i = 0; i < catalog->sspp_count; i++) {
+		bool primary = true;
+
+		if (catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR)
+			|| primary_planes_idx >= max_crtc_count)
+			primary = false;
+
+		plane = dpu_plane_init(dev, catalog->sspp[i].id, primary,
+				(1UL << max_crtc_count) - 1, 0);
+		if (IS_ERR(plane)) {
+			DPU_ERROR("dpu_plane_init failed\n");
+			ret = PTR_ERR(plane);
+			goto fail;
+		}
+		priv->planes[priv->num_planes++] = plane;
+
+		if (primary)
+			primary_planes[primary_planes_idx++] = plane;
+	}
+
+	max_crtc_count = min(max_crtc_count, primary_planes_idx);
+
+	/* Create one CRTC per encoder */
+	for (i = 0; i < max_crtc_count; i++) {
+		crtc = dpu_crtc_init(dev, primary_planes[i]);
+		if (IS_ERR(crtc)) {
+			ret = PTR_ERR(crtc);
+			goto fail;
+		}
+		priv->crtcs[priv->num_crtcs++] = crtc;
+	}
+
+	/* All CRTCs are compatible with all encoders */
+	for (i = 0; i < priv->num_encoders; i++)
+		priv->encoders[i]->possible_crtcs = (1 << priv->num_crtcs) - 1;
+
+	return 0;
+fail:
+	_dpu_kms_drm_obj_destroy(dpu_kms);
+	return ret;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
+{
+	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+	struct drm_device *dev;
+	int rc;
+
+	if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev) {
+		DPU_ERROR("invalid dpu_kms\n");
+		return -EINVAL;
+	}
+
+	dev = dpu_kms->dev;
+
+	rc = _dpu_debugfs_init(dpu_kms);
+	if (rc)
+		DPU_ERROR("dpu_debugfs init failed: %d\n", rc);
+
+	return rc;
+}
+#endif
+
+static long dpu_kms_round_pixclk(struct msm_kms *kms, unsigned long rate,
+		struct drm_encoder *encoder)
+{
+	return rate;
+}
+
+static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
+{
+	struct drm_device *dev;
+	int i;
+
+	dev = dpu_kms->dev;
+	if (!dev)
+		return;
+
+	if (dpu_kms->hw_intr)
+		dpu_hw_intr_destroy(dpu_kms->hw_intr);
+	dpu_kms->hw_intr = NULL;
+
+	if (dpu_kms->power_event)
+		dpu_power_handle_unregister_event(
+				&dpu_kms->phandle, dpu_kms->power_event);
+
+	/* safe to call these more than once during shutdown */
+	_dpu_debugfs_destroy(dpu_kms);
+	_dpu_kms_mmu_destroy(dpu_kms);
+
+	if (dpu_kms->catalog) {
+		for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
+			u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
+
+			if ((vbif_idx < VBIF_MAX) && dpu_kms->hw_vbif[vbif_idx])
+				dpu_hw_vbif_destroy(dpu_kms->hw_vbif[vbif_idx]);
+		}
+	}
+
+	if (dpu_kms->rm_init)
+		dpu_rm_destroy(&dpu_kms->rm);
+	dpu_kms->rm_init = false;
+
+	if (dpu_kms->catalog)
+		dpu_hw_catalog_deinit(dpu_kms->catalog);
+	dpu_kms->catalog = NULL;
+
+	if (dpu_kms->core_client)
+		dpu_power_client_destroy(&dpu_kms->phandle,
+			dpu_kms->core_client);
+	dpu_kms->core_client = NULL;
+
+	if (dpu_kms->vbif[VBIF_NRT])
+		devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_NRT]);
+	dpu_kms->vbif[VBIF_NRT] = NULL;
+
+	if (dpu_kms->vbif[VBIF_RT])
+		devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_RT]);
+	dpu_kms->vbif[VBIF_RT] = NULL;
+
+	if (dpu_kms->mmio)
+		devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->mmio);
+	dpu_kms->mmio = NULL;
+}
+
+static void dpu_kms_destroy(struct msm_kms *kms)
+{
+	struct dpu_kms *dpu_kms;
+
+	if (!kms) {
+		DPU_ERROR("invalid kms\n");
+		return;
+	}
+
+	dpu_kms = to_dpu_kms(kms);
+
+	dpu_dbg_destroy();
+	_dpu_kms_hw_destroy(dpu_kms);
+}
+
+static void dpu_kms_preclose(struct msm_kms *kms, struct drm_file *file)
+{
+	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+	struct drm_device *dev = dpu_kms->dev;
+	struct msm_drm_private *priv = dev->dev_private;
+	unsigned int i;
+
+	for (i = 0; i < priv->num_crtcs; i++)
+		dpu_crtc_cancel_pending_flip(priv->crtcs[i], file);
+}
+
+static int dpu_kms_pm_suspend(struct device *dev)
+{
+	struct drm_device *ddev;
+	struct drm_modeset_acquire_ctx ctx;
+	struct drm_atomic_state *state;
+	struct dpu_kms *dpu_kms;
+	int ret = 0, num_crtcs = 0;
+
+	if (!dev)
+		return -EINVAL;
+
+	ddev = dev_get_drvdata(dev);
+	if (!ddev || !ddev_to_msm_kms(ddev))
+		return -EINVAL;
+
+	dpu_kms = to_dpu_kms(ddev_to_msm_kms(ddev));
+
+	/* disable hot-plug polling */
+	drm_kms_helper_poll_disable(ddev);
+
+	/* acquire modeset lock(s) */
+	drm_modeset_acquire_init(&ctx, 0);
+
+retry:
+	DPU_ATRACE_BEGIN("kms_pm_suspend");
+
+	ret = drm_modeset_lock_all_ctx(ddev, &ctx);
+	if (ret)
+		goto unlock;
+
+	/* save current state for resume */
+	if (dpu_kms->suspend_state)
+		drm_atomic_state_put(dpu_kms->suspend_state);
+	dpu_kms->suspend_state = drm_atomic_helper_duplicate_state(ddev, &ctx);
+	if (IS_ERR_OR_NULL(dpu_kms->suspend_state)) {
+		DRM_ERROR("failed to back up suspend state\n");
+		dpu_kms->suspend_state = NULL;
+		goto unlock;
+	}
+
+	/* create atomic state to disable all CRTCs */
+	state = drm_atomic_state_alloc(ddev);
+	if (IS_ERR_OR_NULL(state)) {
+		DRM_ERROR("failed to allocate crtc disable state\n");
+		goto unlock;
+	}
+
+	state->acquire_ctx = &ctx;
+
+	/* check for nothing to do */
+	if (num_crtcs == 0) {
+		DRM_DEBUG("all crtcs are already in the off state\n");
+		drm_atomic_state_put(state);
+		goto suspended;
+	}
+
+	/* commit the "disable all" state */
+	ret = drm_atomic_commit(state);
+	if (ret < 0) {
+		DRM_ERROR("failed to disable crtcs, %d\n", ret);
+		drm_atomic_state_put(state);
+		goto unlock;
+	}
+
+suspended:
+	dpu_kms->suspend_block = true;
+
+unlock:
+	if (ret == -EDEADLK) {
+		drm_modeset_backoff(&ctx);
+		goto retry;
+	}
+	drm_modeset_drop_locks(&ctx);
+	drm_modeset_acquire_fini(&ctx);
+
+	DPU_ATRACE_END("kms_pm_suspend");
+	return 0;
+}
+
+static int dpu_kms_pm_resume(struct device *dev)
+{
+	struct drm_device *ddev;
+	struct dpu_kms *dpu_kms;
+	int ret;
+
+	if (!dev)
+		return -EINVAL;
+
+	ddev = dev_get_drvdata(dev);
+	if (!ddev || !ddev_to_msm_kms(ddev))
+		return -EINVAL;
+
+	dpu_kms = to_dpu_kms(ddev_to_msm_kms(ddev));
+
+	DPU_ATRACE_BEGIN("kms_pm_resume");
+
+	drm_mode_config_reset(ddev);
+
+	drm_modeset_lock_all(ddev);
+
+	dpu_kms->suspend_block = false;
+
+	if (dpu_kms->suspend_state) {
+		dpu_kms->suspend_state->acquire_ctx =
+			ddev->mode_config.acquire_ctx;
+		ret = drm_atomic_commit(dpu_kms->suspend_state);
+		if (ret < 0) {
+			DRM_ERROR("failed to restore state, %d\n", ret);
+			drm_atomic_state_put(dpu_kms->suspend_state);
+		}
+		dpu_kms->suspend_state = NULL;
+	}
+	drm_modeset_unlock_all(ddev);
+
+	/* enable hot-plug polling */
+	drm_kms_helper_poll_enable(ddev);
+
+	DPU_ATRACE_END("kms_pm_resume");
+	return 0;
+}
+
+void _dpu_kms_set_encoder_mode(struct msm_kms *kms,
+				 struct drm_encoder *encoder,
+				 bool cmd_mode)
+{
+	struct msm_display_info info;
+	struct msm_drm_private *priv = encoder->dev->dev_private;
+	int i, rc = 0;
+
+	memset(&info, 0, sizeof(info));
+
+	info.intf_type = encoder->encoder_type;
+	info.capabilities = cmd_mode ? MSM_DISPLAY_CAP_CMD_MODE :
+			MSM_DISPLAY_CAP_VID_MODE;
+
+	/* TODO: No support for DSI swap */
+	for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
+		if (priv->dsi[i]) {
+			info.h_tile_instance[info.num_of_h_tiles] = i;
+			info.num_of_h_tiles++;
+		}
+	}
+
+	rc = dpu_encoder_setup(encoder->dev, encoder, &info);
+	if (rc)
+		DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
+			encoder->base.id, rc);
+}
+
+static const struct msm_kms_funcs kms_funcs = {
+	.hw_init         = dpu_kms_hw_init,
+	.irq_preinstall  = dpu_irq_preinstall,
+	.irq_postinstall = dpu_irq_postinstall,
+	.irq_uninstall   = dpu_irq_uninstall,
+	.irq             = dpu_irq,
+	.preclose        = dpu_kms_preclose,
+	.prepare_commit  = dpu_kms_prepare_commit,
+	.commit          = dpu_kms_commit,
+	.complete_commit = dpu_kms_complete_commit,
+	.wait_for_crtc_commit_done = dpu_kms_wait_for_commit_done,
+	.enable_vblank   = dpu_kms_enable_vblank,
+	.disable_vblank  = dpu_kms_disable_vblank,
+	.check_modified_format = dpu_format_check_modified_format,
+	.get_format      = dpu_get_msm_format,
+	.round_pixclk    = dpu_kms_round_pixclk,
+	.pm_suspend      = dpu_kms_pm_suspend,
+	.pm_resume       = dpu_kms_pm_resume,
+	.destroy         = dpu_kms_destroy,
+	.set_encoder_mode = _dpu_kms_set_encoder_mode,
+#ifdef CONFIG_DEBUG_FS
+	.debugfs_init    = dpu_kms_debugfs_init,
+#endif
+};
+
+/* the caller api needs to turn on clock before calling it */
+static inline void _dpu_kms_core_hw_rev_init(struct dpu_kms *dpu_kms)
+{
+	dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
+}
+
+static int _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms)
+{
+	struct msm_mmu *mmu;
+
+	mmu = dpu_kms->base.aspace->mmu;
+
+	mmu->funcs->detach(mmu, (const char **)iommu_ports,
+			ARRAY_SIZE(iommu_ports));
+	msm_gem_address_space_put(dpu_kms->base.aspace);
+
+	return 0;
+}
+
+static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
+{
+	struct iommu_domain *domain;
+	struct msm_gem_address_space *aspace;
+	int ret;
+
+	domain = iommu_domain_alloc(&platform_bus_type);
+	if (!domain)
+		return 0;
+
+	aspace = msm_gem_address_space_create(dpu_kms->dev->dev,
+			domain, "dpu1");
+	if (IS_ERR(aspace)) {
+		ret = PTR_ERR(aspace);
+		goto fail;
+	}
+
+	dpu_kms->base.aspace = aspace;
+
+	ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
+			ARRAY_SIZE(iommu_ports));
+	if (ret) {
+		DPU_ERROR("failed to attach iommu %d\n", ret);
+		msm_gem_address_space_put(aspace);
+		goto fail;
+	}
+
+	return 0;
+fail:
+	_dpu_kms_mmu_destroy(dpu_kms);
+
+	return ret;
+}
+
+static struct dss_clk *_dpu_kms_get_clk(struct dpu_kms *dpu_kms,
+		char *clock_name)
+{
+	struct dss_module_power *mp = &dpu_kms->mp;
+	int i;
+
+	for (i = 0; i < mp->num_clk; i++) {
+		if (!strcmp(mp->clk_config[i].clk_name, clock_name))
+			return &mp->clk_config[i];
+	}
+
+	return NULL;
+}
+
+u64 dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name)
+{
+	struct dss_clk *clk;
+
+	clk = _dpu_kms_get_clk(dpu_kms, clock_name);
+	if (!clk)
+		return -EINVAL;
+
+	return clk_get_rate(clk->clk);
+}
+
+static void dpu_kms_handle_power_event(u32 event_type, void *usr)
+{
+	struct dpu_kms *dpu_kms = usr;
+
+	if (!dpu_kms)
+		return;
+
+	if (event_type == DPU_POWER_EVENT_POST_ENABLE)
+		dpu_vbif_init_memtypes(dpu_kms);
+}
+
+static int dpu_kms_hw_init(struct msm_kms *kms)
+{
+	struct dpu_kms *dpu_kms;
+	struct drm_device *dev;
+	struct msm_drm_private *priv;
+	int i, rc = -EINVAL;
+
+	if (!kms) {
+		DPU_ERROR("invalid kms\n");
+		goto end;
+	}
+
+	dpu_kms = to_dpu_kms(kms);
+	dev = dpu_kms->dev;
+	if (!dev) {
+		DPU_ERROR("invalid device\n");
+		goto end;
+	}
+
+	rc = dpu_dbg_init(&dpu_kms->pdev->dev);
+	if (rc) {
+		DRM_ERROR("failed to init dpu dbg: %d\n", rc);
+		goto end;
+	}
+
+	priv = dev->dev_private;
+	if (!priv) {
+		DPU_ERROR("invalid private data\n");
+		goto dbg_destroy;
+	}
+
+	dpu_kms->mmio = msm_ioremap(dpu_kms->pdev, "mdp_phys", "mdp_phys");
+	if (IS_ERR(dpu_kms->mmio)) {
+		rc = PTR_ERR(dpu_kms->mmio);
+		DPU_ERROR("mdp register memory map failed: %d\n", rc);
+		dpu_kms->mmio = NULL;
+		goto error;
+	}
+	DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
+	dpu_kms->mmio_len = dpu_iomap_size(dpu_kms->pdev, "mdp_phys");
+
+	dpu_kms->vbif[VBIF_RT] = msm_ioremap(dpu_kms->pdev, "vbif_phys",
+								"vbif_phys");
+	if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
+		rc = PTR_ERR(dpu_kms->vbif[VBIF_RT]);
+		DPU_ERROR("vbif register memory map failed: %d\n", rc);
+		dpu_kms->vbif[VBIF_RT] = NULL;
+		goto error;
+	}
+	dpu_kms->vbif_len[VBIF_RT] = dpu_iomap_size(dpu_kms->pdev, "vbif_phys");
+	dpu_kms->vbif[VBIF_NRT] = msm_ioremap(dpu_kms->pdev, "vbif_nrt_phys",
+								"vbif_nrt_phys");
+	if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) {
+		dpu_kms->vbif[VBIF_NRT] = NULL;
+		DPU_DEBUG("VBIF NRT is not defined");
+	} else {
+		dpu_kms->vbif_len[VBIF_NRT] = dpu_iomap_size(dpu_kms->pdev,
+							     "vbif_nrt_phys");
+	}
+
+	dpu_kms->reg_dma = msm_ioremap(dpu_kms->pdev, "regdma_phys",
+								"regdma_phys");
+	if (IS_ERR(dpu_kms->reg_dma)) {
+		dpu_kms->reg_dma = NULL;
+		DPU_DEBUG("REG_DMA is not defined");
+	} else {
+		dpu_kms->reg_dma_len = dpu_iomap_size(dpu_kms->pdev,
+						      "regdma_phys");
+	}
+
+	dpu_kms->core_client = dpu_power_client_create(&dpu_kms->phandle,
+					"core");
+	if (IS_ERR_OR_NULL(dpu_kms->core_client)) {
+		rc = PTR_ERR(dpu_kms->core_client);
+		if (!dpu_kms->core_client)
+			rc = -EINVAL;
+		DPU_ERROR("dpu power client create failed: %d\n", rc);
+		dpu_kms->core_client = NULL;
+		goto error;
+	}
+
+	pm_runtime_get_sync(&dpu_kms->pdev->dev);
+
+	_dpu_kms_core_hw_rev_init(dpu_kms);
+
+	pr_info("dpu hardware revision:0x%x\n", dpu_kms->core_rev);
+
+	dpu_kms->catalog = dpu_hw_catalog_init(dpu_kms->core_rev);
+	if (IS_ERR_OR_NULL(dpu_kms->catalog)) {
+		rc = PTR_ERR(dpu_kms->catalog);
+		if (!dpu_kms->catalog)
+			rc = -EINVAL;
+		DPU_ERROR("catalog init failed: %d\n", rc);
+		dpu_kms->catalog = NULL;
+		goto power_error;
+	}
+
+	dpu_dbg_init_dbg_buses(dpu_kms->core_rev);
+
+	/*
+	 * Now we need to read the HW catalog and initialize resources such as
+	 * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
+	 */
+	rc = _dpu_kms_mmu_init(dpu_kms);
+	if (rc) {
+		DPU_ERROR("dpu_kms_mmu_init failed: %d\n", rc);
+		goto power_error;
+	}
+
+	rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mmio,
+			dpu_kms->dev);
+	if (rc) {
+		DPU_ERROR("rm init failed: %d\n", rc);
+		goto power_error;
+	}
+
+	dpu_kms->rm_init = true;
+
+	dpu_kms->hw_mdp = dpu_rm_get_mdp(&dpu_kms->rm);
+	if (IS_ERR_OR_NULL(dpu_kms->hw_mdp)) {
+		rc = PTR_ERR(dpu_kms->hw_mdp);
+		if (!dpu_kms->hw_mdp)
+			rc = -EINVAL;
+		DPU_ERROR("failed to get hw_mdp: %d\n", rc);
+		dpu_kms->hw_mdp = NULL;
+		goto power_error;
+	}
+
+	for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
+		u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
+
+		dpu_kms->hw_vbif[i] = dpu_hw_vbif_init(vbif_idx,
+				dpu_kms->vbif[vbif_idx], dpu_kms->catalog);
+		if (IS_ERR_OR_NULL(dpu_kms->hw_vbif[vbif_idx])) {
+			rc = PTR_ERR(dpu_kms->hw_vbif[vbif_idx]);
+			if (!dpu_kms->hw_vbif[vbif_idx])
+				rc = -EINVAL;
+			DPU_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc);
+			dpu_kms->hw_vbif[vbif_idx] = NULL;
+			goto power_error;
+		}
+	}
+
+	rc = dpu_core_perf_init(&dpu_kms->perf, dev, dpu_kms->catalog,
+			&dpu_kms->phandle,
+			_dpu_kms_get_clk(dpu_kms, "core"));
+	if (rc) {
+		DPU_ERROR("failed to init perf %d\n", rc);
+		goto perf_err;
+	}
+
+	dpu_kms->hw_intr = dpu_hw_intr_init(dpu_kms->mmio, dpu_kms->catalog);
+	if (IS_ERR_OR_NULL(dpu_kms->hw_intr)) {
+		rc = PTR_ERR(dpu_kms->hw_intr);
+		DPU_ERROR("hw_intr init failed: %d\n", rc);
+		dpu_kms->hw_intr = NULL;
+		goto hw_intr_init_err;
+	}
+
+	/*
+	 * _dpu_kms_drm_obj_init should create the DRM related objects
+	 * i.e. CRTCs, planes, encoders, connectors and so forth
+	 */
+	rc = _dpu_kms_drm_obj_init(dpu_kms);
+	if (rc) {
+		DPU_ERROR("modeset init failed: %d\n", rc);
+		goto drm_obj_init_err;
+	}
+
+	dev->mode_config.min_width = 0;
+	dev->mode_config.min_height = 0;
+
+	/*
+	 * max crtc width is equal to the max mixer width * 2 and max height is
+	 * is 4K
+	 */
+	dev->mode_config.max_width =
+			dpu_kms->catalog->caps->max_mixer_width * 2;
+	dev->mode_config.max_height = 4096;
+
+	/*
+	 * Support format modifiers for compression etc.
+	 */
+	dev->mode_config.allow_fb_modifiers = true;
+
+	/*
+	 * Handle (re)initializations during power enable
+	 */
+	dpu_kms_handle_power_event(DPU_POWER_EVENT_POST_ENABLE, dpu_kms);
+	dpu_kms->power_event = dpu_power_handle_register_event(
+			&dpu_kms->phandle,
+			DPU_POWER_EVENT_POST_ENABLE,
+			dpu_kms_handle_power_event, dpu_kms, "kms");
+
+	pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+	return 0;
+
+drm_obj_init_err:
+	dpu_core_perf_destroy(&dpu_kms->perf);
+hw_intr_init_err:
+perf_err:
+power_error:
+	pm_runtime_put_sync(&dpu_kms->pdev->dev);
+error:
+	_dpu_kms_hw_destroy(dpu_kms);
+dbg_destroy:
+	dpu_dbg_destroy();
+end:
+	return rc;
+}
+
+struct msm_kms *dpu_kms_init(struct drm_device *dev)
+{
+	struct msm_drm_private *priv;
+	struct dpu_kms *dpu_kms;
+	int irq;
+
+	if (!dev || !dev->dev_private) {
+		DPU_ERROR("drm device node invalid\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	priv = dev->dev_private;
+	dpu_kms = to_dpu_kms(priv->kms);
+
+	irq = irq_of_parse_and_map(dpu_kms->pdev->dev.of_node, 0);
+	if (irq < 0) {
+		DPU_ERROR("failed to get irq: %d\n", irq);
+		return ERR_PTR(irq);
+	}
+	dpu_kms->base.irq = irq;
+
+	return &dpu_kms->base;
+}
+
+static int dpu_bind(struct device *dev, struct device *master, void *data)
+{
+	struct drm_device *ddev = dev_get_drvdata(master);
+	struct platform_device *pdev = to_platform_device(dev);
+	struct msm_drm_private *priv = ddev->dev_private;
+	struct dpu_kms *dpu_kms;
+	struct dss_module_power *mp;
+	int ret = 0;
+
+	dpu_kms = devm_kzalloc(&pdev->dev, sizeof(*dpu_kms), GFP_KERNEL);
+	if (!dpu_kms)
+		return -ENOMEM;
+
+	mp = &dpu_kms->mp;
+	ret = msm_dss_parse_clock(pdev, mp);
+	if (ret) {
+		DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
+		return ret;
+	}
+
+	ret = msm_dss_get_clk(&pdev->dev, mp->clk_config, mp->num_clk);
+	if (ret) {
+		pr_err("failed to get clocks, ret=%d\n", ret);
+		goto clk_get_error;
+	}
+
+	ret = msm_dss_clk_set_rate(mp->clk_config, mp->num_clk);
+	if (ret) {
+		pr_err("failed to set clock rate, ret=%d\n", ret);
+		goto clk_rate_error;
+	}
+
+	dpu_power_resource_init(pdev, &dpu_kms->phandle);
+
+	platform_set_drvdata(pdev, dpu_kms);
+
+	msm_kms_init(&dpu_kms->base, &kms_funcs);
+	dpu_kms->dev = ddev;
+	dpu_kms->pdev = pdev;
+
+	pm_runtime_enable(&pdev->dev);
+	dpu_kms->rpm_enabled = true;
+
+	priv->kms = &dpu_kms->base;
+	return ret;
+
+clk_rate_error:
+	msm_dss_put_clk(mp->clk_config, mp->num_clk);
+clk_get_error:
+	devm_kfree(&pdev->dev, mp->clk_config);
+	mp->num_clk = 0;
+	return ret;
+}
+
+static void dpu_unbind(struct device *dev, struct device *master, void *data)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
+	struct dss_module_power *mp = &dpu_kms->mp;
+
+	dpu_power_resource_deinit(pdev, &dpu_kms->phandle);
+	msm_dss_put_clk(mp->clk_config, mp->num_clk);
+	devm_kfree(&pdev->dev, mp->clk_config);
+	mp->num_clk = 0;
+
+	if (dpu_kms->rpm_enabled)
+		pm_runtime_disable(&pdev->dev);
+}
+
+static const struct component_ops dpu_ops = {
+	.bind   = dpu_bind,
+	.unbind = dpu_unbind,
+};
+
+static int dpu_dev_probe(struct platform_device *pdev)
+{
+	return component_add(&pdev->dev, &dpu_ops);
+}
+
+static int dpu_dev_remove(struct platform_device *pdev)
+{
+	component_del(&pdev->dev, &dpu_ops);
+	return 0;
+}
+
+static int dpu_runtime_suspend(struct device *dev)
+{
+	int rc = -1;
+	struct platform_device *pdev = to_platform_device(dev);
+	struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
+	struct drm_device *ddev;
+	struct dss_module_power *mp = &dpu_kms->mp;
+
+	ddev = dpu_kms->dev;
+	if (!ddev) {
+		DPU_ERROR("invalid drm_device\n");
+		goto exit;
+	}
+
+	rc = dpu_power_resource_enable(&dpu_kms->phandle,
+			dpu_kms->core_client, false);
+	if (rc)
+		DPU_ERROR("resource disable failed: %d\n", rc);
+
+	rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
+	if (rc)
+		DPU_ERROR("clock disable failed rc:%d\n", rc);
+
+exit:
+	return rc;
+}
+
+static int dpu_runtime_resume(struct device *dev)
+{
+	int rc = -1;
+	struct platform_device *pdev = to_platform_device(dev);
+	struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
+	struct drm_device *ddev;
+	struct dss_module_power *mp = &dpu_kms->mp;
+
+	ddev = dpu_kms->dev;
+	if (!ddev) {
+		DPU_ERROR("invalid drm_device\n");
+		goto exit;
+	}
+
+	rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
+	if (rc) {
+		DPU_ERROR("clock enable failed rc:%d\n", rc);
+		goto exit;
+	}
+
+	rc = dpu_power_resource_enable(&dpu_kms->phandle,
+			dpu_kms->core_client, true);
+	if (rc)
+		DPU_ERROR("resource enable failed: %d\n", rc);
+
+exit:
+	return rc;
+}
+
+static const struct dev_pm_ops dpu_pm_ops = {
+	SET_RUNTIME_PM_OPS(dpu_runtime_suspend, dpu_runtime_resume, NULL)
+};
+
+static const struct of_device_id dpu_dt_match[] = {
+	{ .compatible = "qcom,dpu", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, dpu_dt_match);
+
+static struct platform_driver dpu_driver = {
+	.probe = dpu_dev_probe,
+	.remove = dpu_dev_remove,
+	.driver = {
+		.name = "msm_dpu",
+		.of_match_table = dpu_dt_match,
+		.pm = &dpu_pm_ops,
+	},
+};
+
+void __init msm_dpu_register(void)
+{
+	platform_driver_register(&dpu_driver);
+}
+
+void __exit msm_dpu_unregister(void)
+{
+	platform_driver_unregister(&dpu_driver);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
new file mode 100644
index 000000000000..407c1ed27fe6
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -0,0 +1,402 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark at gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __DPU_KMS_H__
+#define __DPU_KMS_H__
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "msm_mmu.h"
+#include "msm_gem.h"
+#include "dpu_dbg.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_hw_top.h"
+#include "dpu_rm.h"
+#include "dpu_power_handle.h"
+#include "dpu_irq.h"
+#include "dpu_core_perf.h"
+
+#define DRMID(x) ((x) ? (x)->base.id : -1)
+
+/**
+ * DPU_DEBUG - macro for kms/plane/crtc/encoder/connector logs
+ * @fmt: Pointer to format string
+ */
+#define DPU_DEBUG(fmt, ...)                                                \
+	do {                                                               \
+		if (unlikely(drm_debug & DRM_UT_KMS))                      \
+			DRM_DEBUG(fmt, ##__VA_ARGS__); \
+		else                                                       \
+			pr_debug(fmt, ##__VA_ARGS__);                      \
+	} while (0)
+
+/**
+ * DPU_DEBUG_DRIVER - macro for hardware driver logging
+ * @fmt: Pointer to format string
+ */
+#define DPU_DEBUG_DRIVER(fmt, ...)                                         \
+	do {                                                               \
+		if (unlikely(drm_debug & DRM_UT_DRIVER))                   \
+			DRM_ERROR(fmt, ##__VA_ARGS__); \
+		else                                                       \
+			pr_debug(fmt, ##__VA_ARGS__);                      \
+	} while (0)
+
+#define DPU_ERROR(fmt, ...) pr_err("[dpu error]" fmt, ##__VA_ARGS__)
+
+/**
+ * ktime_compare_safe - compare two ktime structures
+ *	This macro is similar to the standard ktime_compare() function, but
+ *	attempts to also handle ktime overflows.
+ * @A: First ktime value
+ * @B: Second ktime value
+ * Returns: -1 if A < B, 0 if A == B, 1 if A > B
+ */
+#define ktime_compare_safe(A, B) \
+	ktime_compare(ktime_sub((A), (B)), ktime_set(0, 0))
+
+#define DPU_NAME_SIZE  12
+
+/* timeout in frames waiting for frame done */
+#define DPU_FRAME_DONE_TIMEOUT	60
+
+/*
+ * struct dpu_irq_callback - IRQ callback handlers
+ * @list: list to callback
+ * @func: intr handler
+ * @arg: argument for the handler
+ */
+struct dpu_irq_callback {
+	struct list_head list;
+	void (*func)(void *arg, int irq_idx);
+	void *arg;
+};
+
+/**
+ * struct dpu_irq: IRQ structure contains callback registration info
+ * @total_irq:    total number of irq_idx obtained from HW interrupts mapping
+ * @irq_cb_tbl:   array of IRQ callbacks setting
+ * @enable_counts array of IRQ enable counts
+ * @cb_lock:      callback lock
+ * @debugfs_file: debugfs file for irq statistics
+ */
+struct dpu_irq {
+	u32 total_irqs;
+	struct list_head *irq_cb_tbl;
+	atomic_t *enable_counts;
+	atomic_t *irq_counts;
+	spinlock_t cb_lock;
+	struct dentry *debugfs_file;
+};
+
+struct dpu_kms {
+	struct msm_kms base;
+	struct drm_device *dev;
+	int core_rev;
+	struct dpu_mdss_cfg *catalog;
+
+	struct dpu_power_handle phandle;
+	struct dpu_power_client *core_client;
+	struct dpu_power_event *power_event;
+
+	/* directory entry for debugfs */
+	struct dentry *debugfs_root;
+	struct dentry *debugfs_danger;
+	struct dentry *debugfs_vbif;
+
+	/* io/register spaces: */
+	void __iomem *mmio, *vbif[VBIF_MAX], *reg_dma;
+	unsigned long mmio_len, vbif_len[VBIF_MAX], reg_dma_len;
+
+	struct regulator *vdd;
+	struct regulator *mmagic;
+	struct regulator *venus;
+
+	struct dpu_hw_intr *hw_intr;
+	struct dpu_irq irq_obj;
+
+	struct dpu_core_perf perf;
+
+	/* saved atomic state during system suspend */
+	struct drm_atomic_state *suspend_state;
+	bool suspend_block;
+
+	struct dpu_rm rm;
+	bool rm_init;
+
+	struct dpu_hw_vbif *hw_vbif[VBIF_MAX];
+	struct dpu_hw_mdp *hw_mdp;
+
+	bool has_danger_ctrl;
+
+	struct platform_device *pdev;
+	bool rpm_enabled;
+	struct dss_module_power mp;
+};
+
+struct vsync_info {
+	u32 frame_count;
+	u32 line_count;
+};
+
+#define to_dpu_kms(x) container_of(x, struct dpu_kms, base)
+
+/* get struct msm_kms * from drm_device * */
+#define ddev_to_msm_kms(D) ((D) && (D)->dev_private ? \
+		((struct msm_drm_private *)((D)->dev_private))->kms : NULL)
+
+/**
+ * dpu_kms_is_suspend_state - whether or not the system is pm suspended
+ * @dev: Pointer to drm device
+ * Return: Suspend status
+ */
+static inline bool dpu_kms_is_suspend_state(struct drm_device *dev)
+{
+	if (!ddev_to_msm_kms(dev))
+		return false;
+
+	return to_dpu_kms(ddev_to_msm_kms(dev))->suspend_state != NULL;
+}
+
+/**
+ * dpu_kms_is_suspend_blocked - whether or not commits are blocked due to pm
+ *				suspend status
+ * @dev: Pointer to drm device
+ * Return: True if commits should be rejected due to pm suspend
+ */
+static inline bool dpu_kms_is_suspend_blocked(struct drm_device *dev)
+{
+	if (!dpu_kms_is_suspend_state(dev))
+		return false;
+
+	return to_dpu_kms(ddev_to_msm_kms(dev))->suspend_block;
+}
+
+/**
+ * Debugfs functions - extra helper functions for debugfs support
+ *
+ * Main debugfs documentation is located at,
+ *
+ * Documentation/filesystems/debugfs.txt
+ *
+ * @dpu_debugfs_setup_regset32: Initialize data for dpu_debugfs_create_regset32
+ * @dpu_debugfs_create_regset32: Create 32-bit register dump file
+ * @dpu_debugfs_get_root: Get root dentry for DPU_KMS's debugfs node
+ */
+
+/**
+ * Companion structure for dpu_debugfs_create_regset32. Do not initialize the
+ * members of this structure explicitly; use dpu_debugfs_setup_regset32 instead.
+ */
+struct dpu_debugfs_regset32 {
+	uint32_t offset;
+	uint32_t blk_len;
+	struct dpu_kms *dpu_kms;
+};
+
+/**
+ * dpu_debugfs_setup_regset32 - Initialize register block definition for debugfs
+ * This function is meant to initialize dpu_debugfs_regset32 structures for use
+ * with dpu_debugfs_create_regset32.
+ * @regset: opaque register definition structure
+ * @offset: sub-block offset
+ * @length: sub-block length, in bytes
+ * @dpu_kms: pointer to dpu kms structure
+ */
+void dpu_debugfs_setup_regset32(struct dpu_debugfs_regset32 *regset,
+		uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_debugfs_create_regset32 - Create register read back file for debugfs
+ *
+ * This function is almost identical to the standard debugfs_create_regset32()
+ * function, with the main difference being that a list of register
+ * names/offsets do not need to be provided. The 'read' function simply outputs
+ * sequential register values over a specified range.
+ *
+ * Similar to the related debugfs_create_regset32 API, the structure pointed to
+ * by regset needs to persist for the lifetime of the created file. The calling
+ * code is responsible for initialization/management of this structure.
+ *
+ * The structure pointed to by regset is meant to be opaque. Please use
+ * dpu_debugfs_setup_regset32 to initialize it.
+ *
+ * @name:   File name within debugfs
+ * @mode:   File mode within debugfs
+ * @parent: Parent directory entry within debugfs, can be NULL
+ * @regset: Pointer to persistent register block definition
+ *
+ * Return: dentry pointer for newly created file, use either debugfs_remove()
+ *         or debugfs_remove_recursive() (on a parent directory) to remove the
+ *         file
+ */
+void *dpu_debugfs_create_regset32(const char *name, umode_t mode,
+		void *parent, struct dpu_debugfs_regset32 *regset);
+
+/**
+ * dpu_debugfs_get_root - Return root directory entry for KMS's debugfs
+ *
+ * The return value should be passed as the 'parent' argument to subsequent
+ * debugfs create calls.
+ *
+ * @dpu_kms: Pointer to DPU's KMS structure
+ *
+ * Return: dentry pointer for DPU's debugfs location
+ */
+void *dpu_debugfs_get_root(struct dpu_kms *dpu_kms);
+
+/**
+ * DPU info management functions
+ * These functions/definitions allow for building up a 'dpu_info' structure
+ * containing one or more "key=value\n" entries.
+ */
+#define DPU_KMS_INFO_MAX_SIZE	4096
+
+/**
+ * struct dpu_kms_info - connector information structure container
+ * @data: Array of information character data
+ * @len: Current length of information data
+ * @staged_len: Temporary data buffer length, commit to
+ *              len using dpu_kms_info_stop
+ * @start: Whether or not a partial data entry was just started
+ */
+struct dpu_kms_info {
+	char data[DPU_KMS_INFO_MAX_SIZE];
+	uint32_t len;
+	uint32_t staged_len;
+	bool start;
+};
+
+/**
+ * DPU_KMS_INFO_DATA - Macro for accessing dpu_kms_info data bytes
+ * @S: Pointer to dpu_kms_info structure
+ * Returns: Pointer to byte data
+ */
+#define DPU_KMS_INFO_DATA(S)    ((S) ? ((struct dpu_kms_info *)(S))->data : 0)
+
+/**
+ * DPU_KMS_INFO_DATALEN - Macro for accessing dpu_kms_info data length
+ *			it adds an extra character length to count null.
+ * @S: Pointer to dpu_kms_info structure
+ * Returns: Size of available byte data
+ */
+#define DPU_KMS_INFO_DATALEN(S) ((S) ? ((struct dpu_kms_info *)(S))->len + 1 \
+							: 0)
+
+/**
+ * dpu_kms_info_reset - reset dpu_kms_info structure
+ * @info: Pointer to dpu_kms_info structure
+ */
+void dpu_kms_info_reset(struct dpu_kms_info *info);
+
+/**
+ * dpu_kms_info_add_keyint - add integer value to 'dpu_kms_info'
+ * @info: Pointer to dpu_kms_info structure
+ * @key: Pointer to key string
+ * @value: Signed 64-bit integer value
+ */
+void dpu_kms_info_add_keyint(struct dpu_kms_info *info,
+		const char *key,
+		int64_t value);
+
+/**
+ * dpu_kms_info_add_keystr - add string value to 'dpu_kms_info'
+ * @info: Pointer to dpu_kms_info structure
+ * @key: Pointer to key string
+ * @value: Pointer to string value
+ */
+void dpu_kms_info_add_keystr(struct dpu_kms_info *info,
+		const char *key,
+		const char *value);
+
+/**
+ * dpu_kms_info_start - begin adding key to 'dpu_kms_info'
+ * Usage:
+ *      dpu_kms_info_start(key)
+ *      dpu_kms_info_append(val_1)
+ *      ...
+ *      dpu_kms_info_append(val_n)
+ *      dpu_kms_info_stop
+ * @info: Pointer to dpu_kms_info structure
+ * @key: Pointer to key string
+ */
+void dpu_kms_info_start(struct dpu_kms_info *info,
+		const char *key);
+
+/**
+ * dpu_kms_info_append - append value string to 'dpu_kms_info'
+ * Usage:
+ *      dpu_kms_info_start(key)
+ *      dpu_kms_info_append(val_1)
+ *      ...
+ *      dpu_kms_info_append(val_n)
+ *      dpu_kms_info_stop
+ * @info: Pointer to dpu_kms_info structure
+ * @str: Pointer to partial value string
+ */
+void dpu_kms_info_append(struct dpu_kms_info *info,
+		const char *str);
+
+/**
+ * dpu_kms_info_append_format - append format code string to 'dpu_kms_info'
+ * Usage:
+ *      dpu_kms_info_start(key)
+ *      dpu_kms_info_append_format(fourcc, modifier)
+ *      ...
+ *      dpu_kms_info_stop
+ * @info: Pointer to dpu_kms_info structure
+ * @pixel_format: FOURCC format code
+ * @modifier: 64-bit drm format modifier
+ */
+void dpu_kms_info_append_format(struct dpu_kms_info *info,
+		uint32_t pixel_format,
+		uint64_t modifier);
+
+/**
+ * dpu_kms_info_stop - finish adding key to 'dpu_kms_info'
+ * Usage:
+ *      dpu_kms_info_start(key)
+ *      dpu_kms_info_append(val_1)
+ *      ...
+ *      dpu_kms_info_append(val_n)
+ *      dpu_kms_info_stop
+ * @info: Pointer to dpu_kms_info structure
+ */
+void dpu_kms_info_stop(struct dpu_kms_info *info);
+
+/**
+ * Vblank enable/disable functions
+ */
+int dpu_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+void dpu_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+
+void dpu_kms_encoder_enable(struct drm_encoder *encoder);
+
+/**
+ * dpu_kms_get_clk_rate() - get the clock rate
+ * @dpu_kms:  poiner to dpu_kms structure
+ * @clock_name: clock name to get the rate
+ *
+ * Return: current clock rate
+ */
+u64 dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name);
+
+#endif /* __dpu_kms_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms_utils.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms_utils.c
new file mode 100644
index 000000000000..a80b3da5a9fe
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms_utils.c
@@ -0,0 +1,153 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"dpu-kms_utils:[%s] " fmt, __func__
+
+#include "dpu_kms.h"
+
+void dpu_kms_info_reset(struct dpu_kms_info *info)
+{
+	if (info) {
+		info->len = 0;
+		info->staged_len = 0;
+	}
+}
+
+void dpu_kms_info_add_keyint(struct dpu_kms_info *info,
+		const char *key,
+		int64_t value)
+{
+	uint32_t len;
+
+	if (info && key) {
+		len = snprintf(info->data + info->len,
+				DPU_KMS_INFO_MAX_SIZE - info->len,
+				"%s=%lld\n",
+				key,
+				value);
+
+		/* check if snprintf truncated the string */
+		if ((info->len + len) < DPU_KMS_INFO_MAX_SIZE)
+			info->len += len;
+	}
+}
+
+void dpu_kms_info_add_keystr(struct dpu_kms_info *info,
+		const char *key,
+		const char *value)
+{
+	uint32_t len;
+
+	if (info && key && value) {
+		len = snprintf(info->data + info->len,
+				DPU_KMS_INFO_MAX_SIZE - info->len,
+				"%s=%s\n",
+				key,
+				value);
+
+		/* check if snprintf truncated the string */
+		if ((info->len + len) < DPU_KMS_INFO_MAX_SIZE)
+			info->len += len;
+	}
+}
+
+void dpu_kms_info_start(struct dpu_kms_info *info,
+		const char *key)
+{
+	uint32_t len;
+
+	if (info && key) {
+		len = snprintf(info->data + info->len,
+				DPU_KMS_INFO_MAX_SIZE - info->len,
+				"%s=",
+				key);
+
+		info->start = true;
+
+		/* check if snprintf truncated the string */
+		if ((info->len + len) < DPU_KMS_INFO_MAX_SIZE)
+			info->staged_len = info->len + len;
+	}
+}
+
+void dpu_kms_info_append(struct dpu_kms_info *info,
+		const char *str)
+{
+	uint32_t len;
+
+	if (info) {
+		len = snprintf(info->data + info->staged_len,
+				DPU_KMS_INFO_MAX_SIZE - info->staged_len,
+				"%s",
+				str);
+
+		/* check if snprintf truncated the string */
+		if ((info->staged_len + len) < DPU_KMS_INFO_MAX_SIZE) {
+			info->staged_len += len;
+			info->start = false;
+		}
+	}
+}
+
+void dpu_kms_info_append_format(struct dpu_kms_info *info,
+		uint32_t pixel_format,
+		uint64_t modifier)
+{
+	uint32_t len;
+
+	if (!info)
+		return;
+
+	if (modifier) {
+		len = snprintf(info->data + info->staged_len,
+				DPU_KMS_INFO_MAX_SIZE - info->staged_len,
+				info->start ?
+				"%c%c%c%c/%llX/%llX" : " %c%c%c%c/%llX/%llX",
+				(pixel_format >> 0) & 0xFF,
+				(pixel_format >> 8) & 0xFF,
+				(pixel_format >> 16) & 0xFF,
+				(pixel_format >> 24) & 0xFF,
+				(modifier >> 56) & 0xFF,
+				modifier & ((1ULL << 56) - 1));
+	} else {
+		len = snprintf(info->data + info->staged_len,
+				DPU_KMS_INFO_MAX_SIZE - info->staged_len,
+				info->start ?
+				"%c%c%c%c" : " %c%c%c%c",
+				(pixel_format >> 0) & 0xFF,
+				(pixel_format >> 8) & 0xFF,
+				(pixel_format >> 16) & 0xFF,
+				(pixel_format >> 24) & 0xFF);
+	}
+
+	/* check if snprintf truncated the string */
+	if ((info->staged_len + len) < DPU_KMS_INFO_MAX_SIZE) {
+		info->staged_len += len;
+		info->start = false;
+	}
+}
+
+void dpu_kms_info_stop(struct dpu_kms_info *info)
+{
+	uint32_t len;
+
+	if (info) {
+		/* insert final delimiter */
+		len = snprintf(info->data + info->staged_len,
+				DPU_KMS_INFO_MAX_SIZE - info->staged_len,
+				"\n");
+
+		/* check if snprintf truncated the string */
+		if ((info->staged_len + len) < DPU_KMS_INFO_MAX_SIZE)
+			info->len = info->staged_len + len;
+	}
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
new file mode 100644
index 000000000000..5191c77cd907
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
@@ -0,0 +1,259 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2018, The Linux Foundation
+ */
+
+#include "dpu_kms.h"
+
+#define to_dpu_mdss(x) container_of(x, struct dpu_mdss, base)
+
+#define HW_INTR_STATUS			0x0010
+
+struct dpu_mdss {
+	struct msm_mdss base;
+	void __iomem *mmio;
+	unsigned long mmio_len;
+	u32 hwversion;
+	struct dss_module_power mp;
+	struct dpu_irq_controller irq_controller;
+};
+
+static irqreturn_t dpu_mdss_irq(int irq, void *arg)
+{
+	struct dpu_mdss *dpu_mdss = arg;
+	u32 interrupts;
+
+	interrupts = readl_relaxed(dpu_mdss->mmio + HW_INTR_STATUS);
+
+	while (interrupts) {
+		irq_hw_number_t hwirq = fls(interrupts) - 1;
+		unsigned int mapping;
+		int rc;
+
+		mapping = irq_find_mapping(dpu_mdss->irq_controller.domain,
+					   hwirq);
+		if (mapping == 0) {
+			DRM_ERROR("couldn't find irq mapping for %lu\n", hwirq);
+			return IRQ_NONE;
+		}
+
+		rc = generic_handle_irq(mapping);
+		if (rc < 0) {
+			DRM_ERROR("handle irq fail: irq=%lu mapping=%u rc=%d\n",
+				  hwirq, mapping, rc);
+			return IRQ_NONE;
+		}
+
+		interrupts &= ~(1 << hwirq);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void dpu_mdss_irq_mask(struct irq_data *irqd)
+{
+	struct dpu_mdss *dpu_mdss = irq_data_get_irq_chip_data(irqd);
+
+	/* memory barrier */
+	smp_mb__before_atomic();
+	clear_bit(irqd->hwirq, &dpu_mdss->irq_controller.enabled_mask);
+	/* memory barrier */
+	smp_mb__after_atomic();
+}
+
+static void dpu_mdss_irq_unmask(struct irq_data *irqd)
+{
+	struct dpu_mdss *dpu_mdss = irq_data_get_irq_chip_data(irqd);
+
+	/* memory barrier */
+	smp_mb__before_atomic();
+	set_bit(irqd->hwirq, &dpu_mdss->irq_controller.enabled_mask);
+	/* memory barrier */
+	smp_mb__after_atomic();
+}
+
+static struct irq_chip dpu_mdss_irq_chip = {
+	.name = "dpu_mdss",
+	.irq_mask = dpu_mdss_irq_mask,
+	.irq_unmask = dpu_mdss_irq_unmask,
+};
+
+static int dpu_mdss_irqdomain_map(struct irq_domain *domain,
+		unsigned int irq, irq_hw_number_t hwirq)
+{
+	struct dpu_mdss *dpu_mdss = domain->host_data;
+	int ret;
+
+	irq_set_chip_and_handler(irq, &dpu_mdss_irq_chip, handle_level_irq);
+	ret = irq_set_chip_data(irq, dpu_mdss);
+
+	return ret;
+}
+
+static const struct irq_domain_ops dpu_mdss_irqdomain_ops = {
+	.map = dpu_mdss_irqdomain_map,
+	.xlate = irq_domain_xlate_onecell,
+};
+
+static int _dpu_mdss_irq_domain_add(struct dpu_mdss *dpu_mdss)
+{
+	struct device *dev;
+	struct irq_domain *domain;
+
+	dev = dpu_mdss->base.dev->dev;
+
+	domain = irq_domain_add_linear(dev->of_node, 32,
+			&dpu_mdss_irqdomain_ops, dpu_mdss);
+	if (!domain) {
+		DPU_ERROR("failed to add irq_domain\n");
+		return -EINVAL;
+	}
+
+	dpu_mdss->irq_controller.enabled_mask = 0;
+	dpu_mdss->irq_controller.domain = domain;
+
+	return 0;
+}
+
+int _dpu_mdss_irq_domain_fini(struct dpu_mdss *dpu_mdss)
+{
+	if (dpu_mdss->irq_controller.domain) {
+		irq_domain_remove(dpu_mdss->irq_controller.domain);
+		dpu_mdss->irq_controller.domain = NULL;
+	}
+	return 0;
+}
+static int dpu_mdss_enable(struct msm_mdss *mdss)
+{
+	struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
+	struct dss_module_power *mp = &dpu_mdss->mp;
+	int ret;
+
+	ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
+	if (ret)
+		DPU_ERROR("clock enable failed, ret:%d\n", ret);
+
+	return ret;
+}
+
+static int dpu_mdss_disable(struct msm_mdss *mdss)
+{
+	struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
+	struct dss_module_power *mp = &dpu_mdss->mp;
+	int ret;
+
+	ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
+	if (ret)
+		DPU_ERROR("clock disable failed, ret:%d\n", ret);
+
+	return ret;
+}
+
+static void dpu_mdss_destroy(struct drm_device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev->dev);
+	struct msm_drm_private *priv = dev->dev_private;
+	struct dpu_mdss *dpu_mdss = to_dpu_mdss(priv->mdss);
+	struct dss_module_power *mp = &dpu_mdss->mp;
+
+	_dpu_mdss_irq_domain_fini(dpu_mdss);
+
+	msm_dss_put_clk(mp->clk_config, mp->num_clk);
+	devm_kfree(&pdev->dev, mp->clk_config);
+
+	if (dpu_mdss->mmio)
+		devm_iounmap(&pdev->dev, dpu_mdss->mmio);
+	dpu_mdss->mmio = NULL;
+
+	pm_runtime_disable(dev->dev);
+	priv->mdss = NULL;
+}
+
+static const struct msm_mdss_funcs mdss_funcs = {
+	.enable	= dpu_mdss_enable,
+	.disable = dpu_mdss_disable,
+	.destroy = dpu_mdss_destroy,
+};
+
+int dpu_mdss_init(struct drm_device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev->dev);
+	struct msm_drm_private *priv = dev->dev_private;
+	struct resource *res;
+	struct dpu_mdss *dpu_mdss;
+	struct dss_module_power *mp;
+	int ret = 0;
+
+	dpu_mdss = devm_kzalloc(dev->dev, sizeof(*dpu_mdss), GFP_KERNEL);
+	if (!dpu_mdss)
+		return -ENOMEM;
+
+	dpu_mdss->mmio = msm_ioremap(pdev, "mdss_phys", "mdss_phys");
+	if (IS_ERR(dpu_mdss->mmio))
+		return PTR_ERR(dpu_mdss->mmio);
+
+	DRM_DEBUG("mapped mdss address space @%pK\n", dpu_mdss->mmio);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mdss_phys");
+	if (!res) {
+		DRM_ERROR("failed to get memory resource for mdss_phys\n");
+		return -ENOMEM;
+	}
+	dpu_mdss->mmio_len = resource_size(res);
+
+	mp = &dpu_mdss->mp;
+	ret = msm_dss_parse_clock(pdev, mp);
+	if (ret) {
+		DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
+		goto clk_parse_err;
+	}
+
+	ret = msm_dss_get_clk(&pdev->dev, mp->clk_config, mp->num_clk);
+	if (ret) {
+		DPU_ERROR("failed to get clocks, ret=%d\n", ret);
+		goto clk_get_error;
+	}
+
+	ret = msm_dss_clk_set_rate(mp->clk_config, mp->num_clk);
+	if (ret) {
+		DPU_ERROR("failed to set clock rate, ret=%d\n", ret);
+		goto clk_rate_error;
+	}
+
+	dpu_mdss->base.dev = dev;
+	dpu_mdss->base.funcs = &mdss_funcs;
+
+	ret = _dpu_mdss_irq_domain_add(dpu_mdss);
+	if (ret)
+		goto irq_domain_error;
+
+	ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
+			dpu_mdss_irq, 0, "dpu_mdss_isr", dpu_mdss);
+	if (ret) {
+		DPU_ERROR("failed to init irq: %d\n", ret);
+		goto irq_error;
+	}
+
+	pm_runtime_enable(dev->dev);
+
+	pm_runtime_get_sync(dev->dev);
+	dpu_mdss->hwversion = readl_relaxed(dpu_mdss->mmio);
+	pm_runtime_put_sync(dev->dev);
+
+	priv->mdss = &dpu_mdss->base;
+
+	return ret;
+
+irq_error:
+	_dpu_mdss_irq_domain_fini(dpu_mdss);
+irq_domain_error:
+clk_rate_error:
+	msm_dss_put_clk(mp->clk_config, mp->num_clk);
+clk_get_error:
+	devm_kfree(&pdev->dev, mp->clk_config);
+clk_parse_err:
+	if (dpu_mdss->mmio)
+		devm_iounmap(&pdev->dev, dpu_mdss->mmio);
+	dpu_mdss->mmio = NULL;
+	return ret;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
new file mode 100644
index 000000000000..d3d7ebf0c394
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -0,0 +1,1963 @@
+/*
+ * Copyright (C) 2014-2018 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark at gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <linux/dma-buf.h>
+
+#include "msm_drv.h"
+#include "dpu_kms.h"
+#include "dpu_formats.h"
+#include "dpu_hw_sspp.h"
+#include "dpu_hw_catalog_format.h"
+#include "dpu_trace.h"
+#include "dpu_crtc.h"
+#include "dpu_vbif.h"
+#include "dpu_plane.h"
+
+#define DPU_DEBUG_PLANE(pl, fmt, ...) DPU_DEBUG("plane%d " fmt,\
+		(pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__)
+
+#define DPU_ERROR_PLANE(pl, fmt, ...) DPU_ERROR("plane%d " fmt,\
+		(pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__)
+
+#define DECIMATED_DIMENSION(dim, deci) (((dim) + ((1 << (deci)) - 1)) >> (deci))
+#define PHASE_STEP_SHIFT	21
+#define PHASE_STEP_UNIT_SCALE   ((int) (1 << PHASE_STEP_SHIFT))
+#define PHASE_RESIDUAL		15
+
+#define SHARP_STRENGTH_DEFAULT	32
+#define SHARP_EDGE_THR_DEFAULT	112
+#define SHARP_SMOOTH_THR_DEFAULT	8
+#define SHARP_NOISE_THR_DEFAULT	2
+
+#define DPU_NAME_SIZE  12
+
+#define DPU_PLANE_COLOR_FILL_FLAG	BIT(31)
+#define DPU_ZPOS_MAX 255
+
+/* multirect rect index */
+enum {
+	R0,
+	R1,
+	R_MAX
+};
+
+#define DPU_QSEED3_DEFAULT_PRELOAD_H 0x4
+#define DPU_QSEED3_DEFAULT_PRELOAD_V 0x3
+
+#define DEFAULT_REFRESH_RATE	60
+
+/**
+ * enum dpu_plane_qos - Different qos configurations for each pipe
+ *
+ * @DPU_PLANE_QOS_VBLANK_CTRL: Setup VBLANK qos for the pipe.
+ * @DPU_PLANE_QOS_VBLANK_AMORTIZE: Enables Amortization within pipe.
+ *	this configuration is mutually exclusive from VBLANK_CTRL.
+ * @DPU_PLANE_QOS_PANIC_CTRL: Setup panic for the pipe.
+ */
+enum dpu_plane_qos {
+	DPU_PLANE_QOS_VBLANK_CTRL = BIT(0),
+	DPU_PLANE_QOS_VBLANK_AMORTIZE = BIT(1),
+	DPU_PLANE_QOS_PANIC_CTRL = BIT(2),
+};
+
+/*
+ * struct dpu_plane - local dpu plane structure
+ * @aspace: address space pointer
+ * @csc_ptr: Points to dpu_csc_cfg structure to use for current
+ * @mplane_list: List of multirect planes of the same pipe
+ * @catalog: Points to dpu catalog structure
+ * @revalidate: force revalidation of all the plane properties
+ */
+struct dpu_plane {
+	struct drm_plane base;
+
+	struct mutex lock;
+
+	enum dpu_sspp pipe;
+	uint32_t features;      /* capabilities from catalog */
+	uint32_t nformats;
+	uint32_t formats[64];
+
+	struct dpu_hw_pipe *pipe_hw;
+	struct dpu_hw_pipe_cfg pipe_cfg;
+	struct dpu_hw_pipe_qos_cfg pipe_qos_cfg;
+	uint32_t color_fill;
+	bool is_error;
+	bool is_rt_pipe;
+	bool is_virtual;
+	struct list_head mplane_list;
+	struct dpu_mdss_cfg *catalog;
+
+	struct dpu_csc_cfg *csc_ptr;
+
+	const struct dpu_sspp_sub_blks *pipe_sblk;
+	char pipe_name[DPU_NAME_SIZE];
+
+	/* debugfs related stuff */
+	struct dentry *debugfs_root;
+	struct dpu_debugfs_regset32 debugfs_src;
+	struct dpu_debugfs_regset32 debugfs_scaler;
+	struct dpu_debugfs_regset32 debugfs_csc;
+	bool debugfs_default_scale;
+};
+
+#define to_dpu_plane(x) container_of(x, struct dpu_plane, base)
+
+static struct dpu_kms *_dpu_plane_get_kms(struct drm_plane *plane)
+{
+	struct msm_drm_private *priv;
+
+	if (!plane || !plane->dev)
+		return NULL;
+	priv = plane->dev->dev_private;
+	if (!priv)
+		return NULL;
+	return to_dpu_kms(priv->kms);
+}
+
+static bool dpu_plane_enabled(struct drm_plane_state *state)
+{
+	return state && state->fb && state->crtc;
+}
+
+static bool dpu_plane_sspp_enabled(struct drm_plane_state *state)
+{
+	return state && state->crtc;
+}
+
+/**
+ * _dpu_plane_calc_fill_level - calculate fill level of the given source format
+ * @plane:		Pointer to drm plane
+ * @fmt:		Pointer to source buffer format
+ * @src_wdith:		width of source buffer
+ * Return: fill level corresponding to the source buffer/format or 0 if error
+ */
+static inline int _dpu_plane_calc_fill_level(struct drm_plane *plane,
+		const struct dpu_format *fmt, u32 src_width)
+{
+	struct dpu_plane *pdpu, *tmp;
+	struct dpu_plane_state *pstate;
+	u32 fixed_buff_size;
+	u32 total_fl;
+
+	if (!plane || !fmt || !plane->state || !src_width || !fmt->bpp) {
+		DPU_ERROR("invalid arguments\n");
+		return 0;
+	}
+
+	pdpu = to_dpu_plane(plane);
+	pstate = to_dpu_plane_state(plane->state);
+	fixed_buff_size = pdpu->pipe_sblk->common->pixel_ram_size;
+
+	list_for_each_entry(tmp, &pdpu->mplane_list, mplane_list) {
+		if (!dpu_plane_enabled(tmp->base.state))
+			continue;
+		DPU_DEBUG("plane%d/%d src_width:%d/%d\n",
+				pdpu->base.base.id, tmp->base.base.id,
+				src_width,
+				drm_rect_width(&tmp->pipe_cfg.src_rect));
+		src_width = max_t(u32, src_width,
+				  drm_rect_width(&tmp->pipe_cfg.src_rect));
+	}
+
+	if (fmt->fetch_planes == DPU_PLANE_PSEUDO_PLANAR) {
+		if (fmt->chroma_sample == DPU_CHROMA_420) {
+			/* NV12 */
+			total_fl = (fixed_buff_size / 2) /
+				((src_width + 32) * fmt->bpp);
+		} else {
+			/* non NV12 */
+			total_fl = (fixed_buff_size / 2) * 2 /
+				((src_width + 32) * fmt->bpp);
+		}
+	} else {
+		if (pstate->multirect_mode == DPU_SSPP_MULTIRECT_PARALLEL) {
+			total_fl = (fixed_buff_size / 2) * 2 /
+				((src_width + 32) * fmt->bpp);
+		} else {
+			total_fl = (fixed_buff_size) * 2 /
+				((src_width + 32) * fmt->bpp);
+		}
+	}
+
+	DPU_DEBUG("plane%u: pnum:%d fmt: %4.4s w:%u fl:%u\n",
+			plane->base.id, pdpu->pipe - SSPP_VIG0,
+			(char *)&fmt->base.pixel_format,
+			src_width, total_fl);
+
+	return total_fl;
+}
+
+/**
+ * _dpu_plane_get_qos_lut - get LUT mapping based on fill level
+ * @tbl:		Pointer to LUT table
+ * @total_fl:		fill level
+ * Return: LUT setting corresponding to the fill level
+ */
+static u64 _dpu_plane_get_qos_lut(const struct dpu_qos_lut_tbl *tbl,
+		u32 total_fl)
+{
+	int i;
+
+	if (!tbl || !tbl->nentry || !tbl->entries)
+		return 0;
+
+	for (i = 0; i < tbl->nentry; i++)
+		if (total_fl <= tbl->entries[i].fl)
+			return tbl->entries[i].lut;
+
+	/* if last fl is zero, use as default */
+	if (!tbl->entries[i-1].fl)
+		return tbl->entries[i-1].lut;
+
+	return 0;
+}
+
+/**
+ * _dpu_plane_set_qos_lut - set QoS LUT of the given plane
+ * @plane:		Pointer to drm plane
+ * @fb:			Pointer to framebuffer associated with the given plane
+ */
+static void _dpu_plane_set_qos_lut(struct drm_plane *plane,
+		struct drm_framebuffer *fb)
+{
+	struct dpu_plane *pdpu;
+	const struct dpu_format *fmt = NULL;
+	u64 qos_lut;
+	u32 total_fl = 0, lut_usage;
+
+	if (!plane || !fb) {
+		DPU_ERROR("invalid arguments plane %d fb %d\n",
+				plane != 0, fb != 0);
+		return;
+	}
+
+	pdpu = to_dpu_plane(plane);
+
+	if (!pdpu->pipe_hw || !pdpu->pipe_sblk || !pdpu->catalog) {
+		DPU_ERROR("invalid arguments\n");
+		return;
+	} else if (!pdpu->pipe_hw->ops.setup_creq_lut) {
+		return;
+	}
+
+	if (!pdpu->is_rt_pipe) {
+		lut_usage = DPU_QOS_LUT_USAGE_NRT;
+	} else {
+		fmt = dpu_get_dpu_format_ext(
+				fb->format->format,
+				fb->modifier);
+		total_fl = _dpu_plane_calc_fill_level(plane, fmt,
+				drm_rect_width(&pdpu->pipe_cfg.src_rect));
+
+		if (fmt && DPU_FORMAT_IS_LINEAR(fmt))
+			lut_usage = DPU_QOS_LUT_USAGE_LINEAR;
+		else
+			lut_usage = DPU_QOS_LUT_USAGE_MACROTILE;
+	}
+
+	qos_lut = _dpu_plane_get_qos_lut(
+			&pdpu->catalog->perf.qos_lut_tbl[lut_usage], total_fl);
+
+	pdpu->pipe_qos_cfg.creq_lut = qos_lut;
+
+	trace_dpu_perf_set_qos_luts(pdpu->pipe - SSPP_VIG0,
+			(fmt) ? fmt->base.pixel_format : 0,
+			pdpu->is_rt_pipe, total_fl, qos_lut, lut_usage);
+
+	DPU_DEBUG("plane%u: pnum:%d fmt: %4.4s rt:%d fl:%u lut:0x%llx\n",
+			plane->base.id,
+			pdpu->pipe - SSPP_VIG0,
+			fmt ? (char *)&fmt->base.pixel_format : NULL,
+			pdpu->is_rt_pipe, total_fl, qos_lut);
+
+	pdpu->pipe_hw->ops.setup_creq_lut(pdpu->pipe_hw, &pdpu->pipe_qos_cfg);
+}
+
+/**
+ * _dpu_plane_set_panic_lut - set danger/safe LUT of the given plane
+ * @plane:		Pointer to drm plane
+ * @fb:			Pointer to framebuffer associated with the given plane
+ */
+static void _dpu_plane_set_danger_lut(struct drm_plane *plane,
+		struct drm_framebuffer *fb)
+{
+	struct dpu_plane *pdpu;
+	const struct dpu_format *fmt = NULL;
+	u32 danger_lut, safe_lut;
+
+	if (!plane || !fb) {
+		DPU_ERROR("invalid arguments\n");
+		return;
+	}
+
+	pdpu = to_dpu_plane(plane);
+
+	if (!pdpu->pipe_hw || !pdpu->pipe_sblk || !pdpu->catalog) {
+		DPU_ERROR("invalid arguments\n");
+		return;
+	} else if (!pdpu->pipe_hw->ops.setup_danger_safe_lut) {
+		return;
+	}
+
+	if (!pdpu->is_rt_pipe) {
+		danger_lut = pdpu->catalog->perf.danger_lut_tbl
+				[DPU_QOS_LUT_USAGE_NRT];
+		safe_lut = pdpu->catalog->perf.safe_lut_tbl
+				[DPU_QOS_LUT_USAGE_NRT];
+	} else {
+		fmt = dpu_get_dpu_format_ext(
+				fb->format->format,
+				fb->modifier);
+
+		if (fmt && DPU_FORMAT_IS_LINEAR(fmt)) {
+			danger_lut = pdpu->catalog->perf.danger_lut_tbl
+					[DPU_QOS_LUT_USAGE_LINEAR];
+			safe_lut = pdpu->catalog->perf.safe_lut_tbl
+					[DPU_QOS_LUT_USAGE_LINEAR];
+		} else {
+			danger_lut = pdpu->catalog->perf.danger_lut_tbl
+					[DPU_QOS_LUT_USAGE_MACROTILE];
+			safe_lut = pdpu->catalog->perf.safe_lut_tbl
+					[DPU_QOS_LUT_USAGE_MACROTILE];
+		}
+	}
+
+	pdpu->pipe_qos_cfg.danger_lut = danger_lut;
+	pdpu->pipe_qos_cfg.safe_lut = safe_lut;
+
+	trace_dpu_perf_set_danger_luts(pdpu->pipe - SSPP_VIG0,
+			(fmt) ? fmt->base.pixel_format : 0,
+			(fmt) ? fmt->fetch_mode : 0,
+			pdpu->pipe_qos_cfg.danger_lut,
+			pdpu->pipe_qos_cfg.safe_lut);
+
+	DPU_DEBUG("plane%u: pnum:%d fmt: %4.4s mode:%d luts[0x%x, 0x%x]\n",
+		plane->base.id,
+		pdpu->pipe - SSPP_VIG0,
+		fmt ? (char *)&fmt->base.pixel_format : NULL,
+		fmt ? fmt->fetch_mode : -1,
+		pdpu->pipe_qos_cfg.danger_lut,
+		pdpu->pipe_qos_cfg.safe_lut);
+
+	pdpu->pipe_hw->ops.setup_danger_safe_lut(pdpu->pipe_hw,
+			&pdpu->pipe_qos_cfg);
+}
+
+/**
+ * _dpu_plane_set_qos_ctrl - set QoS control of the given plane
+ * @plane:		Pointer to drm plane
+ * @enable:		true to enable QoS control
+ * @flags:		QoS control mode (enum dpu_plane_qos)
+ */
+static void _dpu_plane_set_qos_ctrl(struct drm_plane *plane,
+	bool enable, u32 flags)
+{
+	struct dpu_plane *pdpu;
+
+	if (!plane) {
+		DPU_ERROR("invalid arguments\n");
+		return;
+	}
+
+	pdpu = to_dpu_plane(plane);
+
+	if (!pdpu->pipe_hw || !pdpu->pipe_sblk) {
+		DPU_ERROR("invalid arguments\n");
+		return;
+	} else if (!pdpu->pipe_hw->ops.setup_qos_ctrl) {
+		return;
+	}
+
+	if (flags & DPU_PLANE_QOS_VBLANK_CTRL) {
+		pdpu->pipe_qos_cfg.creq_vblank = pdpu->pipe_sblk->creq_vblank;
+		pdpu->pipe_qos_cfg.danger_vblank =
+				pdpu->pipe_sblk->danger_vblank;
+		pdpu->pipe_qos_cfg.vblank_en = enable;
+	}
+
+	if (flags & DPU_PLANE_QOS_VBLANK_AMORTIZE) {
+		/* this feature overrules previous VBLANK_CTRL */
+		pdpu->pipe_qos_cfg.vblank_en = false;
+		pdpu->pipe_qos_cfg.creq_vblank = 0; /* clear vblank bits */
+	}
+
+	if (flags & DPU_PLANE_QOS_PANIC_CTRL)
+		pdpu->pipe_qos_cfg.danger_safe_en = enable;
+
+	if (!pdpu->is_rt_pipe) {
+		pdpu->pipe_qos_cfg.vblank_en = false;
+		pdpu->pipe_qos_cfg.danger_safe_en = false;
+	}
+
+	DPU_DEBUG("plane%u: pnum:%d ds:%d vb:%d pri[0x%x, 0x%x] is_rt:%d\n",
+		plane->base.id,
+		pdpu->pipe - SSPP_VIG0,
+		pdpu->pipe_qos_cfg.danger_safe_en,
+		pdpu->pipe_qos_cfg.vblank_en,
+		pdpu->pipe_qos_cfg.creq_vblank,
+		pdpu->pipe_qos_cfg.danger_vblank,
+		pdpu->is_rt_pipe);
+
+	pdpu->pipe_hw->ops.setup_qos_ctrl(pdpu->pipe_hw,
+			&pdpu->pipe_qos_cfg);
+}
+
+int dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
+{
+	struct dpu_plane *pdpu;
+	struct msm_drm_private *priv;
+	struct dpu_kms *dpu_kms;
+
+	if (!plane || !plane->dev) {
+		DPU_ERROR("invalid arguments\n");
+		return -EINVAL;
+	}
+
+	priv = plane->dev->dev_private;
+	if (!priv || !priv->kms) {
+		DPU_ERROR("invalid KMS reference\n");
+		return -EINVAL;
+	}
+
+	dpu_kms = to_dpu_kms(priv->kms);
+	pdpu = to_dpu_plane(plane);
+
+	if (!pdpu->is_rt_pipe)
+		goto end;
+
+	pm_runtime_get_sync(&dpu_kms->pdev->dev);
+	_dpu_plane_set_qos_ctrl(plane, enable, DPU_PLANE_QOS_PANIC_CTRL);
+	pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+end:
+	return 0;
+}
+
+/**
+ * _dpu_plane_set_ot_limit - set OT limit for the given plane
+ * @plane:		Pointer to drm plane
+ * @crtc:		Pointer to drm crtc
+ */
+static void _dpu_plane_set_ot_limit(struct drm_plane *plane,
+		struct drm_crtc *crtc)
+{
+	struct dpu_plane *pdpu;
+	struct dpu_vbif_set_ot_params ot_params;
+	struct msm_drm_private *priv;
+	struct dpu_kms *dpu_kms;
+
+	if (!plane || !plane->dev || !crtc) {
+		DPU_ERROR("invalid arguments plane %d crtc %d\n",
+				plane != 0, crtc != 0);
+		return;
+	}
+
+	priv = plane->dev->dev_private;
+	if (!priv || !priv->kms) {
+		DPU_ERROR("invalid KMS reference\n");
+		return;
+	}
+
+	dpu_kms = to_dpu_kms(priv->kms);
+	pdpu = to_dpu_plane(plane);
+	if (!pdpu->pipe_hw) {
+		DPU_ERROR("invalid pipe reference\n");
+		return;
+	}
+
+	memset(&ot_params, 0, sizeof(ot_params));
+	ot_params.xin_id = pdpu->pipe_hw->cap->xin_id;
+	ot_params.num = pdpu->pipe_hw->idx - SSPP_NONE;
+	ot_params.width = drm_rect_width(&pdpu->pipe_cfg.src_rect);
+	ot_params.height = drm_rect_height(&pdpu->pipe_cfg.src_rect);
+	ot_params.is_wfd = !pdpu->is_rt_pipe;
+	ot_params.frame_rate = crtc->mode.vrefresh;
+	ot_params.vbif_idx = VBIF_RT;
+	ot_params.clk_ctrl = pdpu->pipe_hw->cap->clk_ctrl;
+	ot_params.rd = true;
+
+	dpu_vbif_set_ot_limit(dpu_kms, &ot_params);
+}
+
+/**
+ * _dpu_plane_set_vbif_qos - set vbif QoS for the given plane
+ * @plane:		Pointer to drm plane
+ */
+static void _dpu_plane_set_qos_remap(struct drm_plane *plane)
+{
+	struct dpu_plane *pdpu;
+	struct dpu_vbif_set_qos_params qos_params;
+	struct msm_drm_private *priv;
+	struct dpu_kms *dpu_kms;
+
+	if (!plane || !plane->dev) {
+		DPU_ERROR("invalid arguments\n");
+		return;
+	}
+
+	priv = plane->dev->dev_private;
+	if (!priv || !priv->kms) {
+		DPU_ERROR("invalid KMS reference\n");
+		return;
+	}
+
+	dpu_kms = to_dpu_kms(priv->kms);
+	pdpu = to_dpu_plane(plane);
+	if (!pdpu->pipe_hw) {
+		DPU_ERROR("invalid pipe reference\n");
+		return;
+	}
+
+	memset(&qos_params, 0, sizeof(qos_params));
+	qos_params.vbif_idx = VBIF_RT;
+	qos_params.clk_ctrl = pdpu->pipe_hw->cap->clk_ctrl;
+	qos_params.xin_id = pdpu->pipe_hw->cap->xin_id;
+	qos_params.num = pdpu->pipe_hw->idx - SSPP_VIG0;
+	qos_params.is_rt = pdpu->is_rt_pipe;
+
+	DPU_DEBUG("plane%d pipe:%d vbif:%d xin:%d rt:%d, clk_ctrl:%d\n",
+			plane->base.id, qos_params.num,
+			qos_params.vbif_idx,
+			qos_params.xin_id, qos_params.is_rt,
+			qos_params.clk_ctrl);
+
+	dpu_vbif_set_qos_remap(dpu_kms, &qos_params);
+}
+
+/**
+ * _dpu_plane_get_aspace: gets the address space
+ */
+static int _dpu_plane_get_aspace(
+		struct dpu_plane *pdpu,
+		struct dpu_plane_state *pstate,
+		struct msm_gem_address_space **aspace)
+{
+	struct dpu_kms *kms;
+
+	if (!pdpu || !pstate || !aspace) {
+		DPU_ERROR("invalid parameters\n");
+		return -EINVAL;
+	}
+
+	kms = _dpu_plane_get_kms(&pdpu->base);
+	if (!kms) {
+		DPU_ERROR("invalid kms\n");
+		return -EINVAL;
+	}
+
+	*aspace = kms->base.aspace;
+
+	return 0;
+}
+
+static inline void _dpu_plane_set_scanout(struct drm_plane *plane,
+		struct dpu_plane_state *pstate,
+		struct dpu_hw_pipe_cfg *pipe_cfg,
+		struct drm_framebuffer *fb)
+{
+	struct dpu_plane *pdpu;
+	struct msm_gem_address_space *aspace = NULL;
+	int ret;
+
+	if (!plane || !pstate || !pipe_cfg || !fb) {
+		DPU_ERROR(
+			"invalid arg(s), plane %d state %d cfg %d fb %d\n",
+			plane != 0, pstate != 0, pipe_cfg != 0, fb != 0);
+		return;
+	}
+
+	pdpu = to_dpu_plane(plane);
+	if (!pdpu->pipe_hw) {
+		DPU_ERROR_PLANE(pdpu, "invalid pipe_hw\n");
+		return;
+	}
+
+	ret = _dpu_plane_get_aspace(pdpu, pstate, &aspace);
+	if (ret) {
+		DPU_ERROR_PLANE(pdpu, "Failed to get aspace %d\n", ret);
+		return;
+	}
+
+	ret = dpu_format_populate_layout(aspace, fb, &pipe_cfg->layout);
+	if (ret == -EAGAIN)
+		DPU_DEBUG_PLANE(pdpu, "not updating same src addrs\n");
+	else if (ret)
+		DPU_ERROR_PLANE(pdpu, "failed to get format layout, %d\n", ret);
+	else if (pdpu->pipe_hw->ops.setup_sourceaddress) {
+		trace_dpu_plane_set_scanout(pdpu->pipe_hw->idx,
+					    &pipe_cfg->layout,
+					    pstate->multirect_index);
+		pdpu->pipe_hw->ops.setup_sourceaddress(pdpu->pipe_hw, pipe_cfg,
+						pstate->multirect_index);
+	}
+}
+
+static void _dpu_plane_setup_scaler3(struct dpu_plane *pdpu,
+		struct dpu_plane_state *pstate,
+		uint32_t src_w, uint32_t src_h, uint32_t dst_w, uint32_t dst_h,
+		struct dpu_hw_scaler3_cfg *scale_cfg,
+		const struct dpu_format *fmt,
+		uint32_t chroma_subsmpl_h, uint32_t chroma_subsmpl_v)
+{
+	uint32_t i;
+
+	if (!pdpu || !pstate || !scale_cfg || !fmt || !chroma_subsmpl_h ||
+			!chroma_subsmpl_v) {
+		DPU_ERROR(
+			"pdpu %d pstate %d scale_cfg %d fmt %d smp_h %d smp_v %d\n",
+			!!pdpu, !!pstate, !!scale_cfg, !!fmt, chroma_subsmpl_h,
+			chroma_subsmpl_v);
+		return;
+	}
+
+	memset(scale_cfg, 0, sizeof(*scale_cfg));
+	memset(&pstate->pixel_ext, 0, sizeof(struct dpu_hw_pixel_ext));
+
+	scale_cfg->phase_step_x[DPU_SSPP_COMP_0] =
+		mult_frac((1 << PHASE_STEP_SHIFT), src_w, dst_w);
+	scale_cfg->phase_step_y[DPU_SSPP_COMP_0] =
+		mult_frac((1 << PHASE_STEP_SHIFT), src_h, dst_h);
+
+
+	scale_cfg->phase_step_y[DPU_SSPP_COMP_1_2] =
+		scale_cfg->phase_step_y[DPU_SSPP_COMP_0] / chroma_subsmpl_v;
+	scale_cfg->phase_step_x[DPU_SSPP_COMP_1_2] =
+		scale_cfg->phase_step_x[DPU_SSPP_COMP_0] / chroma_subsmpl_h;
+
+	scale_cfg->phase_step_x[DPU_SSPP_COMP_2] =
+		scale_cfg->phase_step_x[DPU_SSPP_COMP_1_2];
+	scale_cfg->phase_step_y[DPU_SSPP_COMP_2] =
+		scale_cfg->phase_step_y[DPU_SSPP_COMP_1_2];
+
+	scale_cfg->phase_step_x[DPU_SSPP_COMP_3] =
+		scale_cfg->phase_step_x[DPU_SSPP_COMP_0];
+	scale_cfg->phase_step_y[DPU_SSPP_COMP_3] =
+		scale_cfg->phase_step_y[DPU_SSPP_COMP_0];
+
+	for (i = 0; i < DPU_MAX_PLANES; i++) {
+		scale_cfg->src_width[i] = src_w;
+		scale_cfg->src_height[i] = src_h;
+		if (i == DPU_SSPP_COMP_1_2 || i == DPU_SSPP_COMP_2) {
+			scale_cfg->src_width[i] /= chroma_subsmpl_h;
+			scale_cfg->src_height[i] /= chroma_subsmpl_v;
+		}
+		scale_cfg->preload_x[i] = DPU_QSEED3_DEFAULT_PRELOAD_H;
+		scale_cfg->preload_y[i] = DPU_QSEED3_DEFAULT_PRELOAD_V;
+		pstate->pixel_ext.num_ext_pxls_top[i] =
+			scale_cfg->src_height[i];
+		pstate->pixel_ext.num_ext_pxls_left[i] =
+			scale_cfg->src_width[i];
+	}
+	if (!(DPU_FORMAT_IS_YUV(fmt)) && (src_h == dst_h)
+		&& (src_w == dst_w))
+		return;
+
+	scale_cfg->dst_width = dst_w;
+	scale_cfg->dst_height = dst_h;
+	scale_cfg->y_rgb_filter_cfg = DPU_SCALE_BIL;
+	scale_cfg->uv_filter_cfg = DPU_SCALE_BIL;
+	scale_cfg->alpha_filter_cfg = DPU_SCALE_ALPHA_BIL;
+	scale_cfg->lut_flag = 0;
+	scale_cfg->blend_cfg = 1;
+	scale_cfg->enable = 1;
+}
+
+static inline void _dpu_plane_setup_csc(struct dpu_plane *pdpu)
+{
+	static const struct dpu_csc_cfg dpu_csc_YUV2RGB_601L = {
+		{
+			/* S15.16 format */
+			0x00012A00, 0x00000000, 0x00019880,
+			0x00012A00, 0xFFFF9B80, 0xFFFF3000,
+			0x00012A00, 0x00020480, 0x00000000,
+		},
+		/* signed bias */
+		{ 0xfff0, 0xff80, 0xff80,},
+		{ 0x0, 0x0, 0x0,},
+		/* unsigned clamp */
+		{ 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0,},
+		{ 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,},
+	};
+	static const struct dpu_csc_cfg dpu_csc10_YUV2RGB_601L = {
+		{
+			/* S15.16 format */
+			0x00012A00, 0x00000000, 0x00019880,
+			0x00012A00, 0xFFFF9B80, 0xFFFF3000,
+			0x00012A00, 0x00020480, 0x00000000,
+			},
+		/* signed bias */
+		{ 0xffc0, 0xfe00, 0xfe00,},
+		{ 0x0, 0x0, 0x0,},
+		/* unsigned clamp */
+		{ 0x40, 0x3ac, 0x40, 0x3c0, 0x40, 0x3c0,},
+		{ 0x00, 0x3ff, 0x00, 0x3ff, 0x00, 0x3ff,},
+	};
+
+	if (!pdpu) {
+		DPU_ERROR("invalid plane\n");
+		return;
+	}
+
+	if (BIT(DPU_SSPP_CSC_10BIT) & pdpu->features)
+		pdpu->csc_ptr = (struct dpu_csc_cfg *)&dpu_csc10_YUV2RGB_601L;
+	else
+		pdpu->csc_ptr = (struct dpu_csc_cfg *)&dpu_csc_YUV2RGB_601L;
+
+	DPU_DEBUG_PLANE(pdpu, "using 0x%X 0x%X 0x%X...\n",
+			pdpu->csc_ptr->csc_mv[0],
+			pdpu->csc_ptr->csc_mv[1],
+			pdpu->csc_ptr->csc_mv[2]);
+}
+
+static void _dpu_plane_setup_scaler(struct dpu_plane *pdpu,
+		struct dpu_plane_state *pstate,
+		const struct dpu_format *fmt, bool color_fill)
+{
+	struct dpu_hw_pixel_ext *pe;
+	uint32_t chroma_subsmpl_h, chroma_subsmpl_v;
+
+	if (!pdpu || !fmt || !pstate) {
+		DPU_ERROR("invalid arg(s), plane %d fmt %d state %d\n",
+				pdpu != 0, fmt != 0, pstate != 0);
+		return;
+	}
+
+	pe = &pstate->pixel_ext;
+
+	/* don't chroma subsample if decimating */
+	chroma_subsmpl_h =
+		drm_format_horz_chroma_subsampling(fmt->base.pixel_format);
+	chroma_subsmpl_v =
+		drm_format_vert_chroma_subsampling(fmt->base.pixel_format);
+
+	/* update scaler. calculate default config for QSEED3 */
+	_dpu_plane_setup_scaler3(pdpu, pstate,
+			drm_rect_width(&pdpu->pipe_cfg.src_rect),
+			drm_rect_height(&pdpu->pipe_cfg.src_rect),
+			drm_rect_width(&pdpu->pipe_cfg.dst_rect),
+			drm_rect_height(&pdpu->pipe_cfg.dst_rect),
+			&pstate->scaler3_cfg, fmt,
+			chroma_subsmpl_h, chroma_subsmpl_v);
+}
+
+/**
+ * _dpu_plane_color_fill - enables color fill on plane
+ * @pdpu:   Pointer to DPU plane object
+ * @color:  RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red
+ * @alpha:  8-bit fill alpha value, 255 selects 100% alpha
+ * Returns: 0 on success
+ */
+static int _dpu_plane_color_fill(struct dpu_plane *pdpu,
+		uint32_t color, uint32_t alpha)
+{
+	const struct dpu_format *fmt;
+	const struct drm_plane *plane;
+	struct dpu_plane_state *pstate;
+
+	if (!pdpu || !pdpu->base.state) {
+		DPU_ERROR("invalid plane\n");
+		return -EINVAL;
+	}
+
+	if (!pdpu->pipe_hw) {
+		DPU_ERROR_PLANE(pdpu, "invalid plane h/w pointer\n");
+		return -EINVAL;
+	}
+
+	plane = &pdpu->base;
+	pstate = to_dpu_plane_state(plane->state);
+
+	DPU_DEBUG_PLANE(pdpu, "\n");
+
+	/*
+	 * select fill format to match user property expectation,
+	 * h/w only supports RGB variants
+	 */
+	fmt = dpu_get_dpu_format(DRM_FORMAT_ABGR8888);
+
+	/* update sspp */
+	if (fmt && pdpu->pipe_hw->ops.setup_solidfill) {
+		pdpu->pipe_hw->ops.setup_solidfill(pdpu->pipe_hw,
+				(color & 0xFFFFFF) | ((alpha & 0xFF) << 24),
+				pstate->multirect_index);
+
+		/* override scaler/decimation if solid fill */
+		pdpu->pipe_cfg.src_rect.x1 = 0;
+		pdpu->pipe_cfg.src_rect.y1 = 0;
+		pdpu->pipe_cfg.src_rect.x2 =
+			drm_rect_width(&pdpu->pipe_cfg.dst_rect);
+		pdpu->pipe_cfg.src_rect.y2 =
+			drm_rect_height(&pdpu->pipe_cfg.dst_rect);
+		_dpu_plane_setup_scaler(pdpu, pstate, fmt, true);
+
+		if (pdpu->pipe_hw->ops.setup_format)
+			pdpu->pipe_hw->ops.setup_format(pdpu->pipe_hw,
+					fmt, DPU_SSPP_SOLID_FILL,
+					pstate->multirect_index);
+
+		if (pdpu->pipe_hw->ops.setup_rects)
+			pdpu->pipe_hw->ops.setup_rects(pdpu->pipe_hw,
+					&pdpu->pipe_cfg,
+					pstate->multirect_index);
+
+		if (pdpu->pipe_hw->ops.setup_pe)
+			pdpu->pipe_hw->ops.setup_pe(pdpu->pipe_hw,
+					&pstate->pixel_ext);
+
+		if (pdpu->pipe_hw->ops.setup_scaler &&
+				pstate->multirect_index != DPU_SSPP_RECT_1)
+			pdpu->pipe_hw->ops.setup_scaler(pdpu->pipe_hw,
+					&pdpu->pipe_cfg, &pstate->pixel_ext,
+					&pstate->scaler3_cfg);
+	}
+
+	return 0;
+}
+
+void dpu_plane_clear_multirect(const struct drm_plane_state *drm_state)
+{
+	struct dpu_plane_state *pstate;
+
+	if (!drm_state)
+		return;
+
+	pstate = to_dpu_plane_state(drm_state);
+
+	pstate->multirect_index = DPU_SSPP_RECT_SOLO;
+	pstate->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+}
+
+int dpu_plane_validate_multirect_v2(struct dpu_multirect_plane_states *plane)
+{
+	struct dpu_plane_state *pstate[R_MAX];
+	const struct drm_plane_state *drm_state[R_MAX];
+	struct drm_rect src[R_MAX], dst[R_MAX];
+	struct dpu_plane *dpu_plane[R_MAX];
+	const struct dpu_format *fmt[R_MAX];
+	int i, buffer_lines;
+	unsigned int max_tile_height = 1;
+	bool parallel_fetch_qualified = true;
+	bool has_tiled_rect = false;
+
+	for (i = 0; i < R_MAX; i++) {
+		const struct msm_format *msm_fmt;
+
+		drm_state[i] = i ? plane->r1 : plane->r0;
+		msm_fmt = msm_framebuffer_format(drm_state[i]->fb);
+		fmt[i] = to_dpu_format(msm_fmt);
+
+		if (DPU_FORMAT_IS_UBWC(fmt[i])) {
+			has_tiled_rect = true;
+			if (fmt[i]->tile_height > max_tile_height)
+				max_tile_height = fmt[i]->tile_height;
+		}
+	}
+
+	for (i = 0; i < R_MAX; i++) {
+		int width_threshold;
+
+		pstate[i] = to_dpu_plane_state(drm_state[i]);
+		dpu_plane[i] = to_dpu_plane(drm_state[i]->plane);
+
+		if (pstate[i] == NULL) {
+			DPU_ERROR("DPU plane state of plane id %d is NULL\n",
+				drm_state[i]->plane->base.id);
+			return -EINVAL;
+		}
+
+		src[i].x1 = drm_state[i]->src_x >> 16;
+		src[i].y1 = drm_state[i]->src_y >> 16;
+		src[i].x2 = src[i].x1 + (drm_state[i]->src_w >> 16);
+		src[i].y2 = src[i].y1 + (drm_state[i]->src_h >> 16);
+
+		dst[i] = drm_plane_state_dest(drm_state[i]);
+
+		if (drm_rect_calc_hscale(&src[i], &dst[i], 1, 1) != 1 ||
+		    drm_rect_calc_vscale(&src[i], &dst[i], 1, 1) != 1) {
+			DPU_ERROR_PLANE(dpu_plane[i],
+				"scaling is not supported in multirect mode\n");
+			return -EINVAL;
+		}
+
+		if (DPU_FORMAT_IS_YUV(fmt[i])) {
+			DPU_ERROR_PLANE(dpu_plane[i],
+				"Unsupported format for multirect mode\n");
+			return -EINVAL;
+		}
+
+		/**
+		 * SSPP PD_MEM is split half - one for each RECT.
+		 * Tiled formats need 5 lines of buffering while fetching
+		 * whereas linear formats need only 2 lines.
+		 * So we cannot support more than half of the supported SSPP
+		 * width for tiled formats.
+		 */
+		width_threshold = dpu_plane[i]->pipe_sblk->common->maxlinewidth;
+		if (has_tiled_rect)
+			width_threshold /= 2;
+
+		if (parallel_fetch_qualified &&
+		    drm_rect_width(&src[i]) > width_threshold)
+			parallel_fetch_qualified = false;
+
+	}
+
+	/* Validate RECT's and set the mode */
+
+	/* Prefer PARALLEL FETCH Mode over TIME_MX Mode */
+	if (parallel_fetch_qualified) {
+		pstate[R0]->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL;
+		pstate[R1]->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL;
+
+		goto done;
+	}
+
+	/* TIME_MX Mode */
+	buffer_lines = 2 * max_tile_height;
+
+	if (dst[R1].y1 >= dst[R0].y2 + buffer_lines ||
+	    dst[R0].y1 >= dst[R1].y2 + buffer_lines) {
+		pstate[R0]->multirect_mode = DPU_SSPP_MULTIRECT_TIME_MX;
+		pstate[R1]->multirect_mode = DPU_SSPP_MULTIRECT_TIME_MX;
+	} else {
+		DPU_ERROR(
+			"No multirect mode possible for the planes (%d - %d)\n",
+			drm_state[R0]->plane->base.id,
+			drm_state[R1]->plane->base.id);
+		return -EINVAL;
+	}
+
+done:
+	if (dpu_plane[R0]->is_virtual) {
+		pstate[R0]->multirect_index = DPU_SSPP_RECT_1;
+		pstate[R1]->multirect_index = DPU_SSPP_RECT_0;
+	} else {
+		pstate[R0]->multirect_index = DPU_SSPP_RECT_0;
+		pstate[R1]->multirect_index = DPU_SSPP_RECT_1;
+	};
+
+	DPU_DEBUG_PLANE(dpu_plane[R0], "R0: %d - %d\n",
+		pstate[R0]->multirect_mode, pstate[R0]->multirect_index);
+	DPU_DEBUG_PLANE(dpu_plane[R1], "R1: %d - %d\n",
+		pstate[R1]->multirect_mode, pstate[R1]->multirect_index);
+	return 0;
+}
+
+/**
+ * dpu_plane_get_ctl_flush - get control flush for the given plane
+ * @plane: Pointer to drm plane structure
+ * @ctl: Pointer to hardware control driver
+ * @flush_sspp: Pointer to sspp flush control word
+ */
+void dpu_plane_get_ctl_flush(struct drm_plane *plane, struct dpu_hw_ctl *ctl,
+		u32 *flush_sspp)
+{
+	struct dpu_plane_state *pstate;
+
+	if (!plane || !flush_sspp) {
+		DPU_ERROR("invalid parameters\n");
+		return;
+	}
+
+	pstate = to_dpu_plane_state(plane->state);
+
+	*flush_sspp = ctl->ops.get_bitmask_sspp(ctl, dpu_plane_pipe(plane));
+}
+
+static int dpu_plane_prepare_fb(struct drm_plane *plane,
+		struct drm_plane_state *new_state)
+{
+	struct drm_framebuffer *fb = new_state->fb;
+	struct dpu_plane *pdpu = to_dpu_plane(plane);
+	struct dpu_plane_state *pstate = to_dpu_plane_state(new_state);
+	struct dpu_hw_fmt_layout layout;
+	struct drm_gem_object *obj;
+	struct msm_gem_object *msm_obj;
+	struct dma_fence *fence;
+	struct msm_gem_address_space *aspace;
+	int ret;
+
+	if (!new_state->fb)
+		return 0;
+
+	DPU_DEBUG_PLANE(pdpu, "FB[%u]\n", fb->base.id);
+
+	ret = _dpu_plane_get_aspace(pdpu, pstate, &aspace);
+	if (ret) {
+		DPU_ERROR_PLANE(pdpu, "Failed to get aspace\n");
+		return ret;
+	}
+
+	/* cache aspace */
+	pstate->aspace = aspace;
+
+	/*
+	 * TODO: Need to sort out the msm_framebuffer_prepare() call below so
+	 *       we can use msm_atomic_prepare_fb() instead of doing the
+	 *       implicit fence and fb prepare by hand here.
+	 */
+	obj = msm_framebuffer_bo(new_state->fb, 0);
+	msm_obj = to_msm_bo(obj);
+	fence = reservation_object_get_excl_rcu(msm_obj->resv);
+	if (fence)
+		drm_atomic_set_fence_for_plane(new_state, fence);
+
+	if (pstate->aspace) {
+		ret = msm_framebuffer_prepare(new_state->fb,
+				pstate->aspace);
+		if (ret) {
+			DPU_ERROR("failed to prepare framebuffer\n");
+			return ret;
+		}
+	}
+
+	/* validate framebuffer layout before commit */
+	ret = dpu_format_populate_layout(pstate->aspace,
+			new_state->fb, &layout);
+	if (ret) {
+		DPU_ERROR_PLANE(pdpu, "failed to get format layout, %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void dpu_plane_cleanup_fb(struct drm_plane *plane,
+		struct drm_plane_state *old_state)
+{
+	struct dpu_plane *pdpu = to_dpu_plane(plane);
+	struct dpu_plane_state *old_pstate;
+
+	if (!old_state || !old_state->fb)
+		return;
+
+	old_pstate = to_dpu_plane_state(old_state);
+
+	DPU_DEBUG_PLANE(pdpu, "FB[%u]\n", old_state->fb->base.id);
+
+	msm_framebuffer_cleanup(old_state->fb, old_pstate->aspace);
+}
+
+static bool dpu_plane_validate_src(struct drm_rect *src,
+				   struct drm_rect *fb_rect,
+				   uint32_t min_src_size)
+{
+	/* Ensure fb size is supported */
+	if (drm_rect_width(fb_rect) > MAX_IMG_WIDTH ||
+	    drm_rect_height(fb_rect) > MAX_IMG_HEIGHT)
+		return false;
+
+	/* Ensure src rect is above the minimum size */
+	if (drm_rect_width(src) < min_src_size ||
+	    drm_rect_height(src) < min_src_size)
+		return false;
+
+	/* Ensure src is fully encapsulated in fb */
+	return drm_rect_intersect(fb_rect, src) &&
+		drm_rect_equals(fb_rect, src);
+}
+
+static int dpu_plane_sspp_atomic_check(struct drm_plane *plane,
+		struct drm_plane_state *state)
+{
+	int ret = 0;
+	struct dpu_plane *pdpu;
+	struct dpu_plane_state *pstate;
+	const struct dpu_format *fmt;
+	struct drm_rect src, dst, fb_rect = { 0 };
+	uint32_t max_upscale = 1, max_downscale = 1;
+	uint32_t min_src_size, max_linewidth;
+	int hscale = 1, vscale = 1;
+
+	if (!plane || !state) {
+		DPU_ERROR("invalid arg(s), plane %d state %d\n",
+				plane != 0, state != 0);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	pdpu = to_dpu_plane(plane);
+	pstate = to_dpu_plane_state(state);
+
+	if (!pdpu->pipe_sblk) {
+		DPU_ERROR_PLANE(pdpu, "invalid catalog\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	src.x1 = state->src_x >> 16;
+	src.y1 = state->src_y >> 16;
+	src.x2 = src.x1 + (state->src_w >> 16);
+	src.y2 = src.y1 + (state->src_h >> 16);
+
+	dst = drm_plane_state_dest(state);
+
+	fb_rect.x2 = state->fb->width;
+	fb_rect.y2 = state->fb->height;
+
+	max_linewidth = pdpu->pipe_sblk->common->maxlinewidth;
+
+	if (pdpu->features & DPU_SSPP_SCALER) {
+		max_downscale = pdpu->pipe_sblk->maxdwnscale;
+		max_upscale = pdpu->pipe_sblk->maxupscale;
+	}
+	if (drm_rect_width(&src) < drm_rect_width(&dst))
+		hscale = drm_rect_calc_hscale(&src, &dst, 1, max_upscale);
+	else
+		hscale = drm_rect_calc_hscale(&dst, &src, 1, max_downscale);
+	if (drm_rect_height(&src) < drm_rect_height(&dst))
+		vscale = drm_rect_calc_vscale(&src, &dst, 1, max_upscale);
+	else
+		vscale = drm_rect_calc_vscale(&dst, &src, 1, max_downscale);
+
+	DPU_DEBUG_PLANE(pdpu, "check %d -> %d\n",
+		dpu_plane_enabled(plane->state), dpu_plane_enabled(state));
+
+	if (!dpu_plane_enabled(state))
+		goto exit;
+
+	fmt = to_dpu_format(msm_framebuffer_format(state->fb));
+
+	min_src_size = DPU_FORMAT_IS_YUV(fmt) ? 2 : 1;
+
+	if (DPU_FORMAT_IS_YUV(fmt) &&
+		(!(pdpu->features & DPU_SSPP_SCALER) ||
+		 !(pdpu->features & (BIT(DPU_SSPP_CSC)
+		 | BIT(DPU_SSPP_CSC_10BIT))))) {
+		DPU_ERROR_PLANE(pdpu,
+				"plane doesn't have scaler/csc for yuv\n");
+		ret = -EINVAL;
+
+	/* check src bounds */
+	} else if (!dpu_plane_validate_src(&src, &fb_rect, min_src_size)) {
+		DPU_ERROR_PLANE(pdpu, "invalid source " DRM_RECT_FMT "\n",
+				DRM_RECT_ARG(&src));
+		ret = -E2BIG;
+
+	/* valid yuv image */
+	} else if (DPU_FORMAT_IS_YUV(fmt) &&
+		   (src.x1 & 0x1 || src.y1 & 0x1 ||
+		    drm_rect_width(&src) & 0x1 ||
+		    drm_rect_height(&src) & 0x1)) {
+		DPU_ERROR_PLANE(pdpu, "invalid yuv source " DRM_RECT_FMT "\n",
+				DRM_RECT_ARG(&src));
+		ret = -EINVAL;
+
+	/* min dst support */
+	} else if (drm_rect_width(&dst) < 0x1 || drm_rect_height(&dst) < 0x1) {
+		DPU_ERROR_PLANE(pdpu, "invalid dest rect " DRM_RECT_FMT "\n",
+				DRM_RECT_ARG(&dst));
+		ret = -EINVAL;
+
+	/* check decimated source width */
+	} else if (drm_rect_width(&src) > max_linewidth) {
+		DPU_ERROR_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n",
+				DRM_RECT_ARG(&src), max_linewidth);
+		ret = -E2BIG;
+
+	/* check scaler capability */
+	} else if (hscale < 0 || vscale < 0) {
+		DPU_ERROR_PLANE(pdpu, "invalid scaling requested src="
+				DRM_RECT_FMT " dst=" DRM_RECT_FMT "\n",
+				DRM_RECT_ARG(&src), DRM_RECT_ARG(&dst));
+		ret = -E2BIG;
+	}
+
+exit:
+	return ret;
+}
+
+static int dpu_plane_atomic_check(struct drm_plane *plane,
+		struct drm_plane_state *state)
+{
+	if (!state->fb)
+		return 0;
+
+	DPU_DEBUG_PLANE(to_dpu_plane(plane), "\n");
+
+	return dpu_plane_sspp_atomic_check(plane, state);
+}
+
+void dpu_plane_flush(struct drm_plane *plane)
+{
+	struct dpu_plane *pdpu;
+	struct dpu_plane_state *pstate;
+
+	if (!plane || !plane->state) {
+		DPU_ERROR("invalid plane\n");
+		return;
+	}
+
+	pdpu = to_dpu_plane(plane);
+	pstate = to_dpu_plane_state(plane->state);
+
+	/*
+	 * These updates have to be done immediately before the plane flush
+	 * timing, and may not be moved to the atomic_update/mode_set functions.
+	 */
+	if (pdpu->is_error)
+		/* force white frame with 100% alpha pipe output on error */
+		_dpu_plane_color_fill(pdpu, 0xFFFFFF, 0xFF);
+	else if (pdpu->color_fill & DPU_PLANE_COLOR_FILL_FLAG)
+		/* force 100% alpha */
+		_dpu_plane_color_fill(pdpu, pdpu->color_fill, 0xFF);
+	else if (pdpu->pipe_hw && pdpu->csc_ptr && pdpu->pipe_hw->ops.setup_csc)
+		pdpu->pipe_hw->ops.setup_csc(pdpu->pipe_hw, pdpu->csc_ptr);
+
+	/* flag h/w flush complete */
+	if (plane->state)
+		pstate->pending = false;
+}
+
+/**
+ * dpu_plane_set_error: enable/disable error condition
+ * @plane: pointer to drm_plane structure
+ */
+void dpu_plane_set_error(struct drm_plane *plane, bool error)
+{
+	struct dpu_plane *pdpu;
+
+	if (!plane)
+		return;
+
+	pdpu = to_dpu_plane(plane);
+	pdpu->is_error = error;
+}
+
+static int dpu_plane_sspp_atomic_update(struct drm_plane *plane,
+				struct drm_plane_state *old_state)
+{
+	uint32_t nplanes, src_flags;
+	struct dpu_plane *pdpu;
+	struct drm_plane_state *state;
+	struct dpu_plane_state *pstate;
+	struct dpu_plane_state *old_pstate;
+	const struct dpu_format *fmt;
+	struct drm_crtc *crtc;
+	struct drm_framebuffer *fb;
+	struct drm_rect src, dst;
+
+	if (!plane) {
+		DPU_ERROR("invalid plane\n");
+		return -EINVAL;
+	} else if (!plane->state) {
+		DPU_ERROR("invalid plane state\n");
+		return -EINVAL;
+	} else if (!old_state) {
+		DPU_ERROR("invalid old state\n");
+		return -EINVAL;
+	}
+
+	pdpu = to_dpu_plane(plane);
+	state = plane->state;
+
+	pstate = to_dpu_plane_state(state);
+
+	old_pstate = to_dpu_plane_state(old_state);
+
+	crtc = state->crtc;
+	fb = state->fb;
+	if (!crtc || !fb) {
+		DPU_ERROR_PLANE(pdpu, "invalid crtc %d or fb %d\n",
+				crtc != 0, fb != 0);
+		return -EINVAL;
+	}
+	fmt = to_dpu_format(msm_framebuffer_format(fb));
+	nplanes = fmt->num_planes;
+
+	memset(&(pdpu->pipe_cfg), 0, sizeof(struct dpu_hw_pipe_cfg));
+
+	_dpu_plane_set_scanout(plane, pstate, &pdpu->pipe_cfg, fb);
+
+	pstate->pending = true;
+
+	pdpu->is_rt_pipe = (dpu_crtc_get_client_type(crtc) != NRT_CLIENT);
+	_dpu_plane_set_qos_ctrl(plane, false, DPU_PLANE_QOS_PANIC_CTRL);
+
+	src.x1 = state->src_x >> 16;
+	src.y1 = state->src_y >> 16;
+	src.x2 = src.x1 + (state->src_w >> 16);
+	src.y2 = src.y1 + (state->src_h >> 16);
+
+	dst = drm_plane_state_dest(state);
+
+	DPU_DEBUG_PLANE(pdpu, "FB[%u] " DRM_RECT_FMT "->crtc%u " DRM_RECT_FMT
+			", %4.4s ubwc %d\n", fb->base.id, DRM_RECT_ARG(&src),
+			crtc->base.id, DRM_RECT_ARG(&dst),
+			(char *)&fmt->base.pixel_format,
+			DPU_FORMAT_IS_UBWC(fmt));
+
+	pdpu->pipe_cfg.src_rect = src;
+	pdpu->pipe_cfg.dst_rect = dst;
+
+	_dpu_plane_setup_scaler(pdpu, pstate, fmt, false);
+
+	/* override for color fill */
+	if (pdpu->color_fill & DPU_PLANE_COLOR_FILL_FLAG) {
+		/* skip remaining processing on color fill */
+		return 0;
+	}
+
+	if (pdpu->pipe_hw->ops.setup_rects) {
+		pdpu->pipe_hw->ops.setup_rects(pdpu->pipe_hw,
+				&pdpu->pipe_cfg,
+				pstate->multirect_index);
+	}
+
+	if (pdpu->pipe_hw->ops.setup_pe &&
+			(pstate->multirect_index != DPU_SSPP_RECT_1))
+		pdpu->pipe_hw->ops.setup_pe(pdpu->pipe_hw,
+				&pstate->pixel_ext);
+
+	/**
+	 * when programmed in multirect mode, scalar block will be
+	 * bypassed. Still we need to update alpha and bitwidth
+	 * ONLY for RECT0
+	 */
+	if (pdpu->pipe_hw->ops.setup_scaler &&
+			pstate->multirect_index != DPU_SSPP_RECT_1)
+		pdpu->pipe_hw->ops.setup_scaler(pdpu->pipe_hw,
+				&pdpu->pipe_cfg, &pstate->pixel_ext,
+				&pstate->scaler3_cfg);
+
+	if (pdpu->pipe_hw->ops.setup_multirect)
+		pdpu->pipe_hw->ops.setup_multirect(
+				pdpu->pipe_hw,
+				pstate->multirect_index,
+				pstate->multirect_mode);
+
+	if (pdpu->pipe_hw->ops.setup_format) {
+		src_flags = 0x0;
+
+		/* update format */
+		pdpu->pipe_hw->ops.setup_format(pdpu->pipe_hw, fmt, src_flags,
+				pstate->multirect_index);
+
+		if (pdpu->pipe_hw->ops.setup_cdp) {
+			struct dpu_hw_pipe_cdp_cfg *cdp_cfg = &pstate->cdp_cfg;
+
+			memset(cdp_cfg, 0, sizeof(struct dpu_hw_pipe_cdp_cfg));
+
+			cdp_cfg->enable = pdpu->catalog->perf.cdp_cfg
+					[DPU_PERF_CDP_USAGE_RT].rd_enable;
+			cdp_cfg->ubwc_meta_enable =
+					DPU_FORMAT_IS_UBWC(fmt);
+			cdp_cfg->tile_amortize_enable =
+					DPU_FORMAT_IS_UBWC(fmt) ||
+					DPU_FORMAT_IS_TILE(fmt);
+			cdp_cfg->preload_ahead = DPU_SSPP_CDP_PRELOAD_AHEAD_64;
+
+			pdpu->pipe_hw->ops.setup_cdp(pdpu->pipe_hw, cdp_cfg);
+		}
+
+		/* update csc */
+		if (DPU_FORMAT_IS_YUV(fmt))
+			_dpu_plane_setup_csc(pdpu);
+		else
+			pdpu->csc_ptr = 0;
+	}
+
+	_dpu_plane_set_qos_lut(plane, fb);
+	_dpu_plane_set_danger_lut(plane, fb);
+
+	if (plane->type != DRM_PLANE_TYPE_CURSOR) {
+		_dpu_plane_set_qos_ctrl(plane, true, DPU_PLANE_QOS_PANIC_CTRL);
+		_dpu_plane_set_ot_limit(plane, crtc);
+	}
+
+	_dpu_plane_set_qos_remap(plane);
+	return 0;
+}
+
+static void _dpu_plane_atomic_disable(struct drm_plane *plane,
+				struct drm_plane_state *old_state)
+{
+	struct dpu_plane *pdpu;
+	struct drm_plane_state *state;
+	struct dpu_plane_state *pstate;
+
+	if (!plane) {
+		DPU_ERROR("invalid plane\n");
+		return;
+	} else if (!plane->state) {
+		DPU_ERROR("invalid plane state\n");
+		return;
+	} else if (!old_state) {
+		DPU_ERROR("invalid old state\n");
+		return;
+	}
+
+	pdpu = to_dpu_plane(plane);
+	state = plane->state;
+	pstate = to_dpu_plane_state(state);
+
+	trace_dpu_plane_disable(DRMID(plane), is_dpu_plane_virtual(plane),
+				pstate->multirect_mode);
+
+	pstate->pending = true;
+
+	if (is_dpu_plane_virtual(plane) &&
+			pdpu->pipe_hw && pdpu->pipe_hw->ops.setup_multirect)
+		pdpu->pipe_hw->ops.setup_multirect(pdpu->pipe_hw,
+				DPU_SSPP_RECT_SOLO, DPU_SSPP_MULTIRECT_NONE);
+}
+
+static void dpu_plane_atomic_update(struct drm_plane *plane,
+				struct drm_plane_state *old_state)
+{
+	struct dpu_plane *pdpu;
+	struct drm_plane_state *state;
+
+	if (!plane) {
+		DPU_ERROR("invalid plane\n");
+		return;
+	} else if (!plane->state) {
+		DPU_ERROR("invalid plane state\n");
+		return;
+	}
+
+	pdpu = to_dpu_plane(plane);
+	pdpu->is_error = false;
+	state = plane->state;
+
+	DPU_DEBUG_PLANE(pdpu, "\n");
+
+	if (!dpu_plane_sspp_enabled(state)) {
+		_dpu_plane_atomic_disable(plane, old_state);
+	} else {
+		int ret;
+
+		ret = dpu_plane_sspp_atomic_update(plane, old_state);
+		/* atomic_check should have ensured that this doesn't fail */
+		WARN_ON(ret < 0);
+	}
+}
+
+void dpu_plane_restore(struct drm_plane *plane)
+{
+	struct dpu_plane *pdpu;
+
+	if (!plane || !plane->state) {
+		DPU_ERROR("invalid plane\n");
+		return;
+	}
+
+	pdpu = to_dpu_plane(plane);
+
+	DPU_DEBUG_PLANE(pdpu, "\n");
+
+	/* last plane state is same as current state */
+	dpu_plane_atomic_update(plane, plane->state);
+}
+
+static void dpu_plane_destroy(struct drm_plane *plane)
+{
+	struct dpu_plane *pdpu = plane ? to_dpu_plane(plane) : NULL;
+
+	DPU_DEBUG_PLANE(pdpu, "\n");
+
+	if (pdpu) {
+		_dpu_plane_set_qos_ctrl(plane, false, DPU_PLANE_QOS_PANIC_CTRL);
+
+		mutex_destroy(&pdpu->lock);
+
+		drm_plane_helper_disable(plane);
+
+		/* this will destroy the states as well */
+		drm_plane_cleanup(plane);
+
+		if (pdpu->pipe_hw)
+			dpu_hw_sspp_destroy(pdpu->pipe_hw);
+
+		kfree(pdpu);
+	}
+}
+
+static void dpu_plane_destroy_state(struct drm_plane *plane,
+		struct drm_plane_state *state)
+{
+	struct dpu_plane_state *pstate;
+
+	if (!plane || !state) {
+		DPU_ERROR("invalid arg(s), plane %d state %d\n",
+				plane != 0, state != 0);
+		return;
+	}
+
+	pstate = to_dpu_plane_state(state);
+
+	/* remove ref count for frame buffers */
+	if (state->fb)
+		drm_framebuffer_put(state->fb);
+
+	kfree(pstate);
+}
+
+static struct drm_plane_state *
+dpu_plane_duplicate_state(struct drm_plane *plane)
+{
+	struct dpu_plane *pdpu;
+	struct dpu_plane_state *pstate;
+	struct dpu_plane_state *old_state;
+
+	if (!plane) {
+		DPU_ERROR("invalid plane\n");
+		return NULL;
+	} else if (!plane->state) {
+		DPU_ERROR("invalid plane state\n");
+		return NULL;
+	}
+
+	old_state = to_dpu_plane_state(plane->state);
+	pdpu = to_dpu_plane(plane);
+	pstate = kmemdup(old_state, sizeof(*old_state), GFP_KERNEL);
+	if (!pstate) {
+		DPU_ERROR_PLANE(pdpu, "failed to allocate state\n");
+		return NULL;
+	}
+
+	DPU_DEBUG_PLANE(pdpu, "\n");
+
+	pstate->pending = false;
+
+	__drm_atomic_helper_plane_duplicate_state(plane, &pstate->base);
+
+	return &pstate->base;
+}
+
+static void dpu_plane_reset(struct drm_plane *plane)
+{
+	struct dpu_plane *pdpu;
+	struct dpu_plane_state *pstate;
+
+	if (!plane) {
+		DPU_ERROR("invalid plane\n");
+		return;
+	}
+
+	pdpu = to_dpu_plane(plane);
+	DPU_DEBUG_PLANE(pdpu, "\n");
+
+	/* remove previous state, if present */
+	if (plane->state) {
+		dpu_plane_destroy_state(plane, plane->state);
+		plane->state = 0;
+	}
+
+	pstate = kzalloc(sizeof(*pstate), GFP_KERNEL);
+	if (!pstate) {
+		DPU_ERROR_PLANE(pdpu, "failed to allocate state\n");
+		return;
+	}
+
+	pstate->base.plane = plane;
+
+	plane->state = &pstate->base;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static ssize_t _dpu_plane_danger_read(struct file *file,
+			char __user *buff, size_t count, loff_t *ppos)
+{
+	struct dpu_kms *kms = file->private_data;
+	struct dpu_mdss_cfg *cfg = kms->catalog;
+	int len = 0;
+	char buf[40] = {'\0'};
+
+	if (!cfg)
+		return -ENODEV;
+
+	if (*ppos)
+		return 0; /* the end */
+
+	len = snprintf(buf, sizeof(buf), "%d\n", !kms->has_danger_ctrl);
+	if (len < 0 || len >= sizeof(buf))
+		return 0;
+
+	if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
+		return -EFAULT;
+
+	*ppos += len;   /* increase offset */
+
+	return len;
+}
+
+static void _dpu_plane_set_danger_state(struct dpu_kms *kms, bool enable)
+{
+	struct drm_plane *plane;
+
+	drm_for_each_plane(plane, kms->dev) {
+		if (plane->fb && plane->state) {
+			dpu_plane_danger_signal_ctrl(plane, enable);
+			DPU_DEBUG("plane:%d img:%dx%d ",
+				plane->base.id, plane->fb->width,
+				plane->fb->height);
+			DPU_DEBUG("src[%d,%d,%d,%d] dst[%d,%d,%d,%d]\n",
+				plane->state->src_x >> 16,
+				plane->state->src_y >> 16,
+				plane->state->src_w >> 16,
+				plane->state->src_h >> 16,
+				plane->state->crtc_x, plane->state->crtc_y,
+				plane->state->crtc_w, plane->state->crtc_h);
+		} else {
+			DPU_DEBUG("Inactive plane:%d\n", plane->base.id);
+		}
+	}
+}
+
+static ssize_t _dpu_plane_danger_write(struct file *file,
+		    const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct dpu_kms *kms = file->private_data;
+	struct dpu_mdss_cfg *cfg = kms->catalog;
+	int disable_panic;
+	char buf[10];
+
+	if (!cfg)
+		return -EFAULT;
+
+	if (count >= sizeof(buf))
+		return -EFAULT;
+
+	if (copy_from_user(buf, user_buf, count))
+		return -EFAULT;
+
+	buf[count] = 0;	/* end of string */
+
+	if (kstrtoint(buf, 0, &disable_panic))
+		return -EFAULT;
+
+	if (disable_panic) {
+		/* Disable panic signal for all active pipes */
+		DPU_DEBUG("Disabling danger:\n");
+		_dpu_plane_set_danger_state(kms, false);
+		kms->has_danger_ctrl = false;
+	} else {
+		/* Enable panic signal for all active pipes */
+		DPU_DEBUG("Enabling danger:\n");
+		kms->has_danger_ctrl = true;
+		_dpu_plane_set_danger_state(kms, true);
+	}
+
+	return count;
+}
+
+static const struct file_operations dpu_plane_danger_enable = {
+	.open = simple_open,
+	.read = _dpu_plane_danger_read,
+	.write = _dpu_plane_danger_write,
+};
+
+static int _dpu_plane_init_debugfs(struct drm_plane *plane)
+{
+	struct dpu_plane *pdpu;
+	struct dpu_kms *kms;
+	struct msm_drm_private *priv;
+	const struct dpu_sspp_sub_blks *sblk = 0;
+	const struct dpu_sspp_cfg *cfg = 0;
+
+	if (!plane || !plane->dev) {
+		DPU_ERROR("invalid arguments\n");
+		return -EINVAL;
+	}
+
+	priv = plane->dev->dev_private;
+	if (!priv || !priv->kms) {
+		DPU_ERROR("invalid KMS reference\n");
+		return -EINVAL;
+	}
+
+	kms = to_dpu_kms(priv->kms);
+	pdpu = to_dpu_plane(plane);
+
+	if (pdpu && pdpu->pipe_hw)
+		cfg = pdpu->pipe_hw->cap;
+	if (cfg)
+		sblk = cfg->sblk;
+
+	if (!sblk)
+		return 0;
+
+	/* create overall sub-directory for the pipe */
+	pdpu->debugfs_root =
+		debugfs_create_dir(pdpu->pipe_name,
+				plane->dev->primary->debugfs_root);
+
+	if (!pdpu->debugfs_root)
+		return -ENOMEM;
+
+	/* don't error check these */
+	debugfs_create_x32("features", 0600,
+			pdpu->debugfs_root, &pdpu->features);
+
+	/* add register dump support */
+	dpu_debugfs_setup_regset32(&pdpu->debugfs_src,
+			sblk->src_blk.base + cfg->base,
+			sblk->src_blk.len,
+			kms);
+	dpu_debugfs_create_regset32("src_blk", 0400,
+			pdpu->debugfs_root, &pdpu->debugfs_src);
+
+	if (cfg->features & BIT(DPU_SSPP_SCALER_QSEED3) ||
+			cfg->features & BIT(DPU_SSPP_SCALER_QSEED2)) {
+		dpu_debugfs_setup_regset32(&pdpu->debugfs_scaler,
+				sblk->scaler_blk.base + cfg->base,
+				sblk->scaler_blk.len,
+				kms);
+		dpu_debugfs_create_regset32("scaler_blk", 0400,
+				pdpu->debugfs_root,
+				&pdpu->debugfs_scaler);
+		debugfs_create_bool("default_scaling",
+				0600,
+				pdpu->debugfs_root,
+				&pdpu->debugfs_default_scale);
+	}
+
+	if (cfg->features & BIT(DPU_SSPP_CSC) ||
+			cfg->features & BIT(DPU_SSPP_CSC_10BIT)) {
+		dpu_debugfs_setup_regset32(&pdpu->debugfs_csc,
+				sblk->csc_blk.base + cfg->base,
+				sblk->csc_blk.len,
+				kms);
+		dpu_debugfs_create_regset32("csc_blk", 0400,
+				pdpu->debugfs_root, &pdpu->debugfs_csc);
+	}
+
+	debugfs_create_u32("xin_id",
+			0400,
+			pdpu->debugfs_root,
+			(u32 *) &cfg->xin_id);
+	debugfs_create_u32("clk_ctrl",
+			0400,
+			pdpu->debugfs_root,
+			(u32 *) &cfg->clk_ctrl);
+	debugfs_create_x32("creq_vblank",
+			0600,
+			pdpu->debugfs_root,
+			(u32 *) &sblk->creq_vblank);
+	debugfs_create_x32("danger_vblank",
+			0600,
+			pdpu->debugfs_root,
+			(u32 *) &sblk->danger_vblank);
+
+	debugfs_create_file("disable_danger",
+			0600,
+			pdpu->debugfs_root,
+			kms, &dpu_plane_danger_enable);
+
+	return 0;
+}
+
+static void _dpu_plane_destroy_debugfs(struct drm_plane *plane)
+{
+	struct dpu_plane *pdpu;
+
+	if (!plane)
+		return;
+	pdpu = to_dpu_plane(plane);
+
+	debugfs_remove_recursive(pdpu->debugfs_root);
+}
+#else
+static int _dpu_plane_init_debugfs(struct drm_plane *plane)
+{
+	return 0;
+}
+static void _dpu_plane_destroy_debugfs(struct drm_plane *plane)
+{
+}
+#endif
+
+static int dpu_plane_late_register(struct drm_plane *plane)
+{
+	return _dpu_plane_init_debugfs(plane);
+}
+
+static void dpu_plane_early_unregister(struct drm_plane *plane)
+{
+	_dpu_plane_destroy_debugfs(plane);
+}
+
+static const struct drm_plane_funcs dpu_plane_funcs = {
+		.update_plane = drm_atomic_helper_update_plane,
+		.disable_plane = drm_atomic_helper_disable_plane,
+		.destroy = dpu_plane_destroy,
+		.reset = dpu_plane_reset,
+		.atomic_duplicate_state = dpu_plane_duplicate_state,
+		.atomic_destroy_state = dpu_plane_destroy_state,
+		.late_register = dpu_plane_late_register,
+		.early_unregister = dpu_plane_early_unregister,
+};
+
+static const struct drm_plane_helper_funcs dpu_plane_helper_funcs = {
+		.prepare_fb = dpu_plane_prepare_fb,
+		.cleanup_fb = dpu_plane_cleanup_fb,
+		.atomic_check = dpu_plane_atomic_check,
+		.atomic_update = dpu_plane_atomic_update,
+};
+
+enum dpu_sspp dpu_plane_pipe(struct drm_plane *plane)
+{
+	return plane ? to_dpu_plane(plane)->pipe : SSPP_NONE;
+}
+
+bool is_dpu_plane_virtual(struct drm_plane *plane)
+{
+	return plane ? to_dpu_plane(plane)->is_virtual : false;
+}
+
+/* initialize plane */
+struct drm_plane *dpu_plane_init(struct drm_device *dev,
+		uint32_t pipe, bool primary_plane,
+		unsigned long possible_crtcs, u32 master_plane_id)
+{
+	struct drm_plane *plane = NULL, *master_plane = NULL;
+	const struct dpu_format_extended *format_list;
+	struct dpu_plane *pdpu;
+	struct msm_drm_private *priv;
+	struct dpu_kms *kms;
+	enum drm_plane_type type;
+	int zpos_max = DPU_ZPOS_MAX;
+	int ret = -EINVAL;
+
+	if (!dev) {
+		DPU_ERROR("[%u]device is NULL\n", pipe);
+		goto exit;
+	}
+
+	priv = dev->dev_private;
+	if (!priv) {
+		DPU_ERROR("[%u]private data is NULL\n", pipe);
+		goto exit;
+	}
+
+	if (!priv->kms) {
+		DPU_ERROR("[%u]invalid KMS reference\n", pipe);
+		goto exit;
+	}
+	kms = to_dpu_kms(priv->kms);
+
+	if (!kms->catalog) {
+		DPU_ERROR("[%u]invalid catalog reference\n", pipe);
+		goto exit;
+	}
+
+	/* create and zero local structure */
+	pdpu = kzalloc(sizeof(*pdpu), GFP_KERNEL);
+	if (!pdpu) {
+		DPU_ERROR("[%u]failed to allocate local plane struct\n", pipe);
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	/* cache local stuff for later */
+	plane = &pdpu->base;
+	pdpu->pipe = pipe;
+	pdpu->is_virtual = (master_plane_id != 0);
+	INIT_LIST_HEAD(&pdpu->mplane_list);
+	master_plane = drm_plane_find(dev, NULL, master_plane_id);
+	if (master_plane) {
+		struct dpu_plane *mpdpu = to_dpu_plane(master_plane);
+
+		list_add_tail(&pdpu->mplane_list, &mpdpu->mplane_list);
+	}
+
+	/* initialize underlying h/w driver */
+	pdpu->pipe_hw = dpu_hw_sspp_init(pipe, kms->mmio, kms->catalog,
+							master_plane_id != 0);
+	if (IS_ERR(pdpu->pipe_hw)) {
+		DPU_ERROR("[%u]SSPP init failed\n", pipe);
+		ret = PTR_ERR(pdpu->pipe_hw);
+		goto clean_plane;
+	} else if (!pdpu->pipe_hw->cap || !pdpu->pipe_hw->cap->sblk) {
+		DPU_ERROR("[%u]SSPP init returned invalid cfg\n", pipe);
+		goto clean_sspp;
+	}
+
+	/* cache features mask for later */
+	pdpu->features = pdpu->pipe_hw->cap->features;
+	pdpu->pipe_sblk = pdpu->pipe_hw->cap->sblk;
+	if (!pdpu->pipe_sblk) {
+		DPU_ERROR("[%u]invalid sblk\n", pipe);
+		goto clean_sspp;
+	}
+
+	if (!master_plane_id)
+		format_list = pdpu->pipe_sblk->format_list;
+	else
+		format_list = pdpu->pipe_sblk->virt_format_list;
+
+	pdpu->nformats = dpu_populate_formats(format_list,
+				pdpu->formats,
+				0,
+				ARRAY_SIZE(pdpu->formats));
+
+	if (!pdpu->nformats) {
+		DPU_ERROR("[%u]no valid formats for plane\n", pipe);
+		goto clean_sspp;
+	}
+
+	if (pdpu->features & BIT(DPU_SSPP_CURSOR))
+		type = DRM_PLANE_TYPE_CURSOR;
+	else if (primary_plane)
+		type = DRM_PLANE_TYPE_PRIMARY;
+	else
+		type = DRM_PLANE_TYPE_OVERLAY;
+	ret = drm_universal_plane_init(dev, plane, 0xff, &dpu_plane_funcs,
+				pdpu->formats, pdpu->nformats,
+				NULL, type, NULL);
+	if (ret)
+		goto clean_sspp;
+
+	pdpu->catalog = kms->catalog;
+
+	if (kms->catalog->mixer_count &&
+		kms->catalog->mixer[0].sblk->maxblendstages) {
+		zpos_max = kms->catalog->mixer[0].sblk->maxblendstages - 1;
+		if (zpos_max > DPU_STAGE_MAX - DPU_STAGE_0 - 1)
+			zpos_max = DPU_STAGE_MAX - DPU_STAGE_0 - 1;
+	}
+
+	ret = drm_plane_create_zpos_property(plane, 0, 0, zpos_max);
+	if (ret)
+		DPU_ERROR("failed to install zpos property, rc = %d\n", ret);
+
+	/* success! finalize initialization */
+	drm_plane_helper_add(plane, &dpu_plane_helper_funcs);
+
+	/* save user friendly pipe name for later */
+	snprintf(pdpu->pipe_name, DPU_NAME_SIZE, "plane%u", plane->base.id);
+
+	mutex_init(&pdpu->lock);
+
+	DPU_DEBUG("%s created for pipe:%u id:%u virtual:%u\n", pdpu->pipe_name,
+					pipe, plane->base.id, master_plane_id);
+	return plane;
+
+clean_sspp:
+	if (pdpu && pdpu->pipe_hw)
+		dpu_hw_sspp_destroy(pdpu->pipe_hw);
+clean_plane:
+	kfree(pdpu);
+exit:
+	return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
new file mode 100644
index 000000000000..f6fe6ddc7a3a
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark at gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _DPU_PLANE_H_
+#define _DPU_PLANE_H_
+
+#include <drm/drm_crtc.h>
+
+#include "dpu_kms.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_sspp.h"
+
+/**
+ * struct dpu_plane_state: Define dpu extension of drm plane state object
+ * @base:	base drm plane state object
+ * @property_state: Local storage for msm_prop properties
+ * @property_values:	cached plane property values
+ * @aspace:	pointer to address space for input/output buffers
+ * @input_fence:	dereferenced input fence pointer
+ * @stage:	assigned by crtc blender
+ * @multirect_index: index of the rectangle of SSPP
+ * @multirect_mode: parallel or time multiplex multirect mode
+ * @pending:	whether the current update is still pending
+ * @scaler3_cfg: configuration data for scaler3
+ * @pixel_ext: configuration data for pixel extensions
+ * @scaler_check_state: indicates status of user provided pixel extension data
+ * @cdp_cfg:	CDP configuration
+ */
+struct dpu_plane_state {
+	struct drm_plane_state base;
+	struct msm_gem_address_space *aspace;
+	void *input_fence;
+	enum dpu_stage stage;
+	uint32_t multirect_index;
+	uint32_t multirect_mode;
+	bool pending;
+
+	/* scaler configuration */
+	struct dpu_hw_scaler3_cfg scaler3_cfg;
+	struct dpu_hw_pixel_ext pixel_ext;
+
+	struct dpu_hw_pipe_cdp_cfg cdp_cfg;
+};
+
+/**
+ * struct dpu_multirect_plane_states: Defines multirect pair of drm plane states
+ * @r0: drm plane configured on rect 0
+ * @r1: drm plane configured on rect 1
+ */
+struct dpu_multirect_plane_states {
+	const struct drm_plane_state *r0;
+	const struct drm_plane_state *r1;
+};
+
+#define to_dpu_plane_state(x) \
+	container_of(x, struct dpu_plane_state, base)
+
+/**
+ * dpu_plane_pipe - return sspp identifier for the given plane
+ * @plane:   Pointer to DRM plane object
+ * Returns: sspp identifier of the given plane
+ */
+enum dpu_sspp dpu_plane_pipe(struct drm_plane *plane);
+
+/**
+ * is_dpu_plane_virtual - check for virtual plane
+ * @plane: Pointer to DRM plane object
+ * returns: true - if the plane is virtual
+ *          false - if the plane is primary
+ */
+bool is_dpu_plane_virtual(struct drm_plane *plane);
+
+/**
+ * dpu_plane_get_ctl_flush - get control flush mask
+ * @plane:   Pointer to DRM plane object
+ * @ctl: Pointer to control hardware
+ * @flush_sspp: Pointer to sspp flush control word
+ */
+void dpu_plane_get_ctl_flush(struct drm_plane *plane, struct dpu_hw_ctl *ctl,
+		u32 *flush_sspp);
+
+/**
+ * dpu_plane_restore - restore hw state if previously power collapsed
+ * @plane: Pointer to drm plane structure
+ */
+void dpu_plane_restore(struct drm_plane *plane);
+
+/**
+ * dpu_plane_flush - final plane operations before commit flush
+ * @plane: Pointer to drm plane structure
+ */
+void dpu_plane_flush(struct drm_plane *plane);
+
+/**
+ * dpu_plane_kickoff - final plane operations before commit kickoff
+ * @plane: Pointer to drm plane structure
+ */
+void dpu_plane_kickoff(struct drm_plane *plane);
+
+/**
+ * dpu_plane_set_error: enable/disable error condition
+ * @plane: pointer to drm_plane structure
+ */
+void dpu_plane_set_error(struct drm_plane *plane, bool error);
+
+/**
+ * dpu_plane_init - create new dpu plane for the given pipe
+ * @dev:   Pointer to DRM device
+ * @pipe:  dpu hardware pipe identifier
+ * @primary_plane: true if this pipe is primary plane for crtc
+ * @possible_crtcs: bitmask of crtc that can be attached to the given pipe
+ * @master_plane_id: primary plane id of a multirect pipe. 0 value passed for
+ *                   a regular plane initialization. A non-zero primary plane
+ *                   id will be passed for a virtual pipe initialization.
+ *
+ */
+struct drm_plane *dpu_plane_init(struct drm_device *dev,
+		uint32_t pipe, bool primary_plane,
+		unsigned long possible_crtcs, u32 master_plane_id);
+
+/**
+ * dpu_plane_validate_multirecti_v2 - validate the multirect planes
+ *				      against hw limitations
+ * @plane: drm plate states of the multirect pair
+ */
+int dpu_plane_validate_multirect_v2(struct dpu_multirect_plane_states *plane);
+
+/**
+ * dpu_plane_clear_multirect - clear multirect bits for the given pipe
+ * @drm_state: Pointer to DRM plane state
+ */
+void dpu_plane_clear_multirect(const struct drm_plane_state *drm_state);
+
+/**
+ * dpu_plane_wait_input_fence - wait for input fence object
+ * @plane:   Pointer to DRM plane object
+ * @wait_ms: Wait timeout value
+ * Returns: Zero on success
+ */
+int dpu_plane_wait_input_fence(struct drm_plane *plane, uint32_t wait_ms);
+
+/**
+ * dpu_plane_color_fill - enables color fill on plane
+ * @plane:  Pointer to DRM plane object
+ * @color:  RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red
+ * @alpha:  8-bit fill alpha value, 255 selects 100% alpha
+ * Returns: 0 on success
+ */
+int dpu_plane_color_fill(struct drm_plane *plane,
+		uint32_t color, uint32_t alpha);
+
+/**
+ * dpu_plane_set_revalidate - sets revalidate flag which forces a full
+ *	validation of the plane properties in the next atomic check
+ * @plane: Pointer to DRM plane object
+ * @enable: Boolean to set/unset the flag
+ */
+void dpu_plane_set_revalidate(struct drm_plane *plane, bool enable);
+
+#endif /* _DPU_PLANE_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c
new file mode 100644
index 000000000000..a68f1249388c
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c
@@ -0,0 +1,249 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d]: " fmt, __func__, __LINE__
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/string.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/of_platform.h>
+
+#include "dpu_power_handle.h"
+#include "dpu_trace.h"
+
+static const char *data_bus_name[DPU_POWER_HANDLE_DBUS_ID_MAX] = {
+	[DPU_POWER_HANDLE_DBUS_ID_MNOC] = "qcom,dpu-data-bus",
+	[DPU_POWER_HANDLE_DBUS_ID_LLCC] = "qcom,dpu-llcc-bus",
+	[DPU_POWER_HANDLE_DBUS_ID_EBI] = "qcom,dpu-ebi-bus",
+};
+
+const char *dpu_power_handle_get_dbus_name(u32 bus_id)
+{
+	if (bus_id < DPU_POWER_HANDLE_DBUS_ID_MAX)
+		return data_bus_name[bus_id];
+
+	return NULL;
+}
+
+static void dpu_power_event_trigger_locked(struct dpu_power_handle *phandle,
+		u32 event_type)
+{
+	struct dpu_power_event *event;
+
+	list_for_each_entry(event, &phandle->event_list, list) {
+		if (event->event_type & event_type)
+			event->cb_fnc(event_type, event->usr);
+	}
+}
+
+struct dpu_power_client *dpu_power_client_create(
+	struct dpu_power_handle *phandle, char *client_name)
+{
+	struct dpu_power_client *client;
+	static u32 id;
+
+	if (!client_name || !phandle) {
+		pr_err("client name is null or invalid power data\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	client = kzalloc(sizeof(struct dpu_power_client), GFP_KERNEL);
+	if (!client)
+		return ERR_PTR(-ENOMEM);
+
+	mutex_lock(&phandle->phandle_lock);
+	strlcpy(client->name, client_name, MAX_CLIENT_NAME_LEN);
+	client->usecase_ndx = VOTE_INDEX_DISABLE;
+	client->id = id;
+	client->active = true;
+	pr_debug("client %s created:%pK id :%d\n", client_name,
+		client, id);
+	id++;
+	list_add(&client->list, &phandle->power_client_clist);
+	mutex_unlock(&phandle->phandle_lock);
+
+	return client;
+}
+
+void dpu_power_client_destroy(struct dpu_power_handle *phandle,
+	struct dpu_power_client *client)
+{
+	if (!client  || !phandle) {
+		pr_err("reg bus vote: invalid client handle\n");
+	} else if (!client->active) {
+		pr_err("dpu power deinit already done\n");
+		kfree(client);
+	} else {
+		pr_debug("bus vote client %s destroyed:%pK id:%u\n",
+			client->name, client, client->id);
+		mutex_lock(&phandle->phandle_lock);
+		list_del_init(&client->list);
+		mutex_unlock(&phandle->phandle_lock);
+		kfree(client);
+	}
+}
+
+void dpu_power_resource_init(struct platform_device *pdev,
+	struct dpu_power_handle *phandle)
+{
+	phandle->dev = &pdev->dev;
+
+	INIT_LIST_HEAD(&phandle->power_client_clist);
+	INIT_LIST_HEAD(&phandle->event_list);
+
+	mutex_init(&phandle->phandle_lock);
+}
+
+void dpu_power_resource_deinit(struct platform_device *pdev,
+	struct dpu_power_handle *phandle)
+{
+	struct dpu_power_client *curr_client, *next_client;
+	struct dpu_power_event *curr_event, *next_event;
+
+	if (!phandle || !pdev) {
+		pr_err("invalid input param\n");
+		return;
+	}
+
+	mutex_lock(&phandle->phandle_lock);
+	list_for_each_entry_safe(curr_client, next_client,
+			&phandle->power_client_clist, list) {
+		pr_err("cliend:%s-%d still registered with refcount:%d\n",
+				curr_client->name, curr_client->id,
+				curr_client->refcount);
+		curr_client->active = false;
+		list_del(&curr_client->list);
+	}
+
+	list_for_each_entry_safe(curr_event, next_event,
+			&phandle->event_list, list) {
+		pr_err("event:%d, client:%s still registered\n",
+				curr_event->event_type,
+				curr_event->client_name);
+		curr_event->active = false;
+		list_del(&curr_event->list);
+	}
+	mutex_unlock(&phandle->phandle_lock);
+}
+
+int dpu_power_resource_enable(struct dpu_power_handle *phandle,
+	struct dpu_power_client *pclient, bool enable)
+{
+	bool changed = false;
+	u32 max_usecase_ndx = VOTE_INDEX_DISABLE, prev_usecase_ndx;
+	struct dpu_power_client *client;
+
+	if (!phandle || !pclient) {
+		pr_err("invalid input argument\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&phandle->phandle_lock);
+	if (enable)
+		pclient->refcount++;
+	else if (pclient->refcount)
+		pclient->refcount--;
+
+	if (pclient->refcount)
+		pclient->usecase_ndx = VOTE_INDEX_LOW;
+	else
+		pclient->usecase_ndx = VOTE_INDEX_DISABLE;
+
+	list_for_each_entry(client, &phandle->power_client_clist, list) {
+		if (client->usecase_ndx < VOTE_INDEX_MAX &&
+		    client->usecase_ndx > max_usecase_ndx)
+			max_usecase_ndx = client->usecase_ndx;
+	}
+
+	if (phandle->current_usecase_ndx != max_usecase_ndx) {
+		changed = true;
+		prev_usecase_ndx = phandle->current_usecase_ndx;
+		phandle->current_usecase_ndx = max_usecase_ndx;
+	}
+
+	pr_debug("%pS: changed=%d current idx=%d request client %s id:%u enable:%d refcount:%d\n",
+		__builtin_return_address(0), changed, max_usecase_ndx,
+		pclient->name, pclient->id, enable, pclient->refcount);
+
+	if (!changed)
+		goto end;
+
+	if (enable) {
+		dpu_power_event_trigger_locked(phandle,
+				DPU_POWER_EVENT_PRE_ENABLE);
+		dpu_power_event_trigger_locked(phandle,
+				DPU_POWER_EVENT_POST_ENABLE);
+
+	} else {
+		dpu_power_event_trigger_locked(phandle,
+				DPU_POWER_EVENT_PRE_DISABLE);
+		dpu_power_event_trigger_locked(phandle,
+				DPU_POWER_EVENT_POST_DISABLE);
+	}
+
+end:
+	mutex_unlock(&phandle->phandle_lock);
+	return 0;
+}
+
+struct dpu_power_event *dpu_power_handle_register_event(
+		struct dpu_power_handle *phandle,
+		u32 event_type, void (*cb_fnc)(u32 event_type, void *usr),
+		void *usr, char *client_name)
+{
+	struct dpu_power_event *event;
+
+	if (!phandle) {
+		pr_err("invalid power handle\n");
+		return ERR_PTR(-EINVAL);
+	} else if (!cb_fnc || !event_type) {
+		pr_err("no callback fnc or event type\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	event = kzalloc(sizeof(struct dpu_power_event), GFP_KERNEL);
+	if (!event)
+		return ERR_PTR(-ENOMEM);
+
+	event->event_type = event_type;
+	event->cb_fnc = cb_fnc;
+	event->usr = usr;
+	strlcpy(event->client_name, client_name, MAX_CLIENT_NAME_LEN);
+	event->active = true;
+
+	mutex_lock(&phandle->phandle_lock);
+	list_add(&event->list, &phandle->event_list);
+	mutex_unlock(&phandle->phandle_lock);
+
+	return event;
+}
+
+void dpu_power_handle_unregister_event(
+		struct dpu_power_handle *phandle,
+		struct dpu_power_event *event)
+{
+	if (!phandle || !event) {
+		pr_err("invalid phandle or event\n");
+	} else if (!event->active) {
+		pr_err("power handle deinit already done\n");
+		kfree(event);
+	} else {
+		mutex_lock(&phandle->phandle_lock);
+		list_del_init(&event->list);
+		mutex_unlock(&phandle->phandle_lock);
+		kfree(event);
+	}
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h
new file mode 100644
index 000000000000..344f74464eca
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h
@@ -0,0 +1,225 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DPU_POWER_HANDLE_H_
+#define _DPU_POWER_HANDLE_H_
+
+#define MAX_CLIENT_NAME_LEN 128
+
+#define DPU_POWER_HANDLE_ENABLE_BUS_AB_QUOTA	0
+#define DPU_POWER_HANDLE_DISABLE_BUS_AB_QUOTA	0
+#define DPU_POWER_HANDLE_ENABLE_BUS_IB_QUOTA	1600000000
+#define DPU_POWER_HANDLE_DISABLE_BUS_IB_QUOTA	0
+
+#include "dpu_io_util.h"
+
+/* event will be triggered before power handler disable */
+#define DPU_POWER_EVENT_PRE_DISABLE	0x1
+
+/* event will be triggered after power handler disable */
+#define DPU_POWER_EVENT_POST_DISABLE	0x2
+
+/* event will be triggered before power handler enable */
+#define DPU_POWER_EVENT_PRE_ENABLE	0x4
+
+/* event will be triggered after power handler enable */
+#define DPU_POWER_EVENT_POST_ENABLE	0x8
+
+/**
+ * mdss_bus_vote_type: register bus vote type
+ * VOTE_INDEX_DISABLE: removes the client vote
+ * VOTE_INDEX_LOW: keeps the lowest vote for register bus
+ * VOTE_INDEX_MAX: invalid
+ */
+enum mdss_bus_vote_type {
+	VOTE_INDEX_DISABLE,
+	VOTE_INDEX_LOW,
+	VOTE_INDEX_MAX,
+};
+
+/**
+ * enum dpu_power_handle_data_bus_client - type of axi bus clients
+ * @DPU_POWER_HANDLE_DATA_BUS_CLIENT_RT: core real-time bus client
+ * @DPU_POWER_HANDLE_DATA_BUS_CLIENT_NRT: core non-real-time bus client
+ * @DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX: maximum number of bus client type
+ */
+enum dpu_power_handle_data_bus_client {
+	DPU_POWER_HANDLE_DATA_BUS_CLIENT_RT,
+	DPU_POWER_HANDLE_DATA_BUS_CLIENT_NRT,
+	DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX
+};
+
+/**
+ * enum DPU_POWER_HANDLE_DBUS_ID - data bus identifier
+ * @DPU_POWER_HANDLE_DBUS_ID_MNOC: DPU/MNOC data bus
+ * @DPU_POWER_HANDLE_DBUS_ID_LLCC: MNOC/LLCC data bus
+ * @DPU_POWER_HANDLE_DBUS_ID_EBI: LLCC/EBI data bus
+ */
+enum DPU_POWER_HANDLE_DBUS_ID {
+	DPU_POWER_HANDLE_DBUS_ID_MNOC,
+	DPU_POWER_HANDLE_DBUS_ID_LLCC,
+	DPU_POWER_HANDLE_DBUS_ID_EBI,
+	DPU_POWER_HANDLE_DBUS_ID_MAX,
+};
+
+/**
+ * struct dpu_power_client: stores the power client for dpu driver
+ * @name:	name of the client
+ * @usecase_ndx: current regs bus vote type
+ * @refcount:	current refcount if multiple modules are using same
+ *              same client for enable/disable. Power module will
+ *              aggregate the refcount and vote accordingly for this
+ *              client.
+ * @id:		assigned during create. helps for debugging.
+ * @list:	list to attach power handle master list
+ * @ab:         arbitrated bandwidth for each bus client
+ * @ib:         instantaneous bandwidth for each bus client
+ * @active:	inidcates the state of dpu power handle
+ */
+struct dpu_power_client {
+	char name[MAX_CLIENT_NAME_LEN];
+	short usecase_ndx;
+	short refcount;
+	u32 id;
+	struct list_head list;
+	u64 ab[DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX];
+	u64 ib[DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX];
+	bool active;
+};
+
+/*
+ * struct dpu_power_event - local event registration structure
+ * @client_name: name of the client registering
+ * @cb_fnc: pointer to desired callback function
+ * @usr: user pointer to pass to callback event trigger
+ * @event: refer to DPU_POWER_HANDLE_EVENT_*
+ * @list: list to attach event master list
+ * @active: indicates the state of dpu power handle
+ */
+struct dpu_power_event {
+	char client_name[MAX_CLIENT_NAME_LEN];
+	void (*cb_fnc)(u32 event_type, void *usr);
+	void *usr;
+	u32 event_type;
+	struct list_head list;
+	bool active;
+};
+
+/**
+ * struct dpu_power_handle: power handle main struct
+ * @client_clist: master list to store all clients
+ * @phandle_lock: lock to synchronize the enable/disable
+ * @dev: pointer to device structure
+ * @usecase_ndx: current usecase index
+ * @event_list: current power handle event list
+ */
+struct dpu_power_handle {
+	struct list_head power_client_clist;
+	struct mutex phandle_lock;
+	struct device *dev;
+	u32 current_usecase_ndx;
+	struct list_head event_list;
+};
+
+/**
+ * dpu_power_resource_init() - initializes the dpu power handle
+ * @pdev:   platform device to search the power resources
+ * @pdata:  power handle to store the power resources
+ */
+void dpu_power_resource_init(struct platform_device *pdev,
+	struct dpu_power_handle *pdata);
+
+/**
+ * dpu_power_resource_deinit() - release the dpu power handle
+ * @pdev:   platform device for power resources
+ * @pdata:  power handle containing the resources
+ *
+ * Return: error code.
+ */
+void dpu_power_resource_deinit(struct platform_device *pdev,
+	struct dpu_power_handle *pdata);
+
+/**
+ * dpu_power_client_create() - create the client on power handle
+ * @pdata:  power handle containing the resources
+ * @client_name: new client name for registration
+ *
+ * Return: error code.
+ */
+struct dpu_power_client *dpu_power_client_create(struct dpu_power_handle *pdata,
+	char *client_name);
+
+/**
+ * dpu_power_client_destroy() - destroy the client on power handle
+ * @pdata:  power handle containing the resources
+ * @client_name: new client name for registration
+ *
+ * Return: none
+ */
+void dpu_power_client_destroy(struct dpu_power_handle *phandle,
+	struct dpu_power_client *client);
+
+/**
+ * dpu_power_resource_enable() - enable/disable the power resources
+ * @pdata:  power handle containing the resources
+ * @client: client information to enable/disable its vote
+ * @enable: boolean request for enable/disable
+ *
+ * Return: error code.
+ */
+int dpu_power_resource_enable(struct dpu_power_handle *pdata,
+	struct dpu_power_client *pclient, bool enable);
+
+/**
+ * dpu_power_data_bus_bandwidth_ctrl() - control data bus bandwidth enable
+ * @phandle:  power handle containing the resources
+ * @client: client information to bandwidth control
+ * @enable: true to enable bandwidth for data base
+ *
+ * Return: none
+ */
+void dpu_power_data_bus_bandwidth_ctrl(struct dpu_power_handle *phandle,
+		struct dpu_power_client *pclient, int enable);
+
+/**
+ * dpu_power_handle_register_event - register a callback function for an event.
+ *	Clients can register for multiple events with a single register.
+ *	Any block with access to phandle can register for the event
+ *	notification.
+ * @phandle:	power handle containing the resources
+ * @event_type:	event type to register; refer DPU_POWER_HANDLE_EVENT_*
+ * @cb_fnc:	pointer to desired callback function
+ * @usr:	user pointer to pass to callback on event trigger
+ *
+ * Return:	event pointer if success, or error code otherwise
+ */
+struct dpu_power_event *dpu_power_handle_register_event(
+		struct dpu_power_handle *phandle,
+		u32 event_type, void (*cb_fnc)(u32 event_type, void *usr),
+		void *usr, char *client_name);
+/**
+ * dpu_power_handle_unregister_event - unregister callback for event(s)
+ * @phandle:	power handle containing the resources
+ * @event:	event pointer returned after power handle register
+ */
+void dpu_power_handle_unregister_event(struct dpu_power_handle *phandle,
+		struct dpu_power_event *event);
+
+/**
+ * dpu_power_handle_get_dbus_name - get name of given data bus identifier
+ * @bus_id:	data bus identifier
+ * Return:	Pointer to name string if success; NULL otherwise
+ */
+const char *dpu_power_handle_get_dbus_name(u32 bus_id);
+
+#endif /* _DPU_POWER_HANDLE_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
new file mode 100644
index 000000000000..13c0a36d4ef9
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -0,0 +1,1079 @@
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"[drm:%s] " fmt, __func__
+#include "dpu_kms.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_hw_cdm.h"
+#include "dpu_hw_pingpong.h"
+#include "dpu_hw_intf.h"
+#include "dpu_encoder.h"
+#include "dpu_trace.h"
+
+#define RESERVED_BY_OTHER(h, r) \
+	((h)->rsvp && ((h)->rsvp->enc_id != (r)->enc_id))
+
+#define RM_RQ_LOCK(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_RESERVE_LOCK))
+#define RM_RQ_CLEAR(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_RESERVE_CLEAR))
+#define RM_RQ_DS(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_DS))
+#define RM_IS_TOPOLOGY_MATCH(t, r) ((t).num_lm == (r).num_lm && \
+				(t).num_comp_enc == (r).num_enc && \
+				(t).num_intf == (r).num_intf)
+
+struct dpu_rm_topology_def {
+	enum dpu_rm_topology_name top_name;
+	int num_lm;
+	int num_comp_enc;
+	int num_intf;
+	int num_ctl;
+	int needs_split_display;
+};
+
+static const struct dpu_rm_topology_def g_top_table[] = {
+	{   DPU_RM_TOPOLOGY_NONE,                 0, 0, 0, 0, false },
+	{   DPU_RM_TOPOLOGY_SINGLEPIPE,           1, 0, 1, 1, false },
+	{   DPU_RM_TOPOLOGY_DUALPIPE,             2, 0, 2, 2, true  },
+	{   DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE,     2, 0, 1, 1, false },
+};
+
+/**
+ * struct dpu_rm_requirements - Reservation requirements parameter bundle
+ * @top_ctrl:  topology control preference from kernel client
+ * @top:       selected topology for the display
+ * @hw_res:	   Hardware resources required as reported by the encoders
+ */
+struct dpu_rm_requirements {
+	uint64_t top_ctrl;
+	const struct dpu_rm_topology_def *topology;
+	struct dpu_encoder_hw_resources hw_res;
+};
+
+/**
+ * struct dpu_rm_rsvp - Use Case Reservation tagging structure
+ *	Used to tag HW blocks as reserved by a CRTC->Encoder->Connector chain
+ *	By using as a tag, rather than lists of pointers to HW blocks used
+ *	we can avoid some list management since we don't know how many blocks
+ *	of each type a given use case may require.
+ * @list:	List head for list of all reservations
+ * @seq:	Global RSVP sequence number for debugging, especially for
+ *		differentiating differenct allocations for same encoder.
+ * @enc_id:	Reservations are tracked by Encoder DRM object ID.
+ *		CRTCs may be connected to multiple Encoders.
+ *		An encoder or connector id identifies the display path.
+ * @topology	DRM<->HW topology use case
+ */
+struct dpu_rm_rsvp {
+	struct list_head list;
+	uint32_t seq;
+	uint32_t enc_id;
+	enum dpu_rm_topology_name topology;
+};
+
+/**
+ * struct dpu_rm_hw_blk - hardware block tracking list member
+ * @list:	List head for list of all hardware blocks tracking items
+ * @rsvp:	Pointer to use case reservation if reserved by a client
+ * @rsvp_nxt:	Temporary pointer used during reservation to the incoming
+ *		request. Will be swapped into rsvp if proposal is accepted
+ * @type:	Type of hardware block this structure tracks
+ * @id:		Hardware ID number, within it's own space, ie. LM_X
+ * @catalog:	Pointer to the hardware catalog entry for this block
+ * @hw:		Pointer to the hardware register access object for this block
+ */
+struct dpu_rm_hw_blk {
+	struct list_head list;
+	struct dpu_rm_rsvp *rsvp;
+	struct dpu_rm_rsvp *rsvp_nxt;
+	enum dpu_hw_blk_type type;
+	uint32_t id;
+	struct dpu_hw_blk *hw;
+};
+
+/**
+ * dpu_rm_dbg_rsvp_stage - enum of steps in making reservation for event logging
+ */
+enum dpu_rm_dbg_rsvp_stage {
+	DPU_RM_STAGE_BEGIN,
+	DPU_RM_STAGE_AFTER_CLEAR,
+	DPU_RM_STAGE_AFTER_RSVPNEXT,
+	DPU_RM_STAGE_FINAL
+};
+
+static void _dpu_rm_print_rsvps(
+		struct dpu_rm *rm,
+		enum dpu_rm_dbg_rsvp_stage stage)
+{
+	struct dpu_rm_rsvp *rsvp;
+	struct dpu_rm_hw_blk *blk;
+	enum dpu_hw_blk_type type;
+
+	DPU_DEBUG("%d\n", stage);
+
+	list_for_each_entry(rsvp, &rm->rsvps, list) {
+		DRM_DEBUG_KMS("%d rsvp[s%ue%u] topology %d\n", stage, rsvp->seq,
+			      rsvp->enc_id, rsvp->topology);
+	}
+
+	for (type = 0; type < DPU_HW_BLK_MAX; type++) {
+		list_for_each_entry(blk, &rm->hw_blks[type], list) {
+			if (!blk->rsvp && !blk->rsvp_nxt)
+				continue;
+
+			DRM_DEBUG_KMS("%d rsvp[s%ue%u->s%ue%u] %d %d\n", stage,
+				(blk->rsvp) ? blk->rsvp->seq : 0,
+				(blk->rsvp) ? blk->rsvp->enc_id : 0,
+				(blk->rsvp_nxt) ? blk->rsvp_nxt->seq : 0,
+				(blk->rsvp_nxt) ? blk->rsvp_nxt->enc_id : 0,
+				blk->type, blk->id);
+		}
+	}
+}
+
+struct dpu_hw_mdp *dpu_rm_get_mdp(struct dpu_rm *rm)
+{
+	return rm->hw_mdp;
+}
+
+enum dpu_rm_topology_name
+dpu_rm_get_topology_name(struct msm_display_topology topology)
+{
+	int i;
+
+	for (i = 0; i < DPU_RM_TOPOLOGY_MAX; i++)
+		if (RM_IS_TOPOLOGY_MATCH(g_top_table[i], topology))
+			return g_top_table[i].top_name;
+
+	return DPU_RM_TOPOLOGY_NONE;
+}
+
+void dpu_rm_init_hw_iter(
+		struct dpu_rm_hw_iter *iter,
+		uint32_t enc_id,
+		enum dpu_hw_blk_type type)
+{
+	memset(iter, 0, sizeof(*iter));
+	iter->enc_id = enc_id;
+	iter->type = type;
+}
+
+static bool _dpu_rm_get_hw_locked(struct dpu_rm *rm, struct dpu_rm_hw_iter *i)
+{
+	struct list_head *blk_list;
+
+	if (!rm || !i || i->type >= DPU_HW_BLK_MAX) {
+		DPU_ERROR("invalid rm\n");
+		return false;
+	}
+
+	i->hw = NULL;
+	blk_list = &rm->hw_blks[i->type];
+
+	if (i->blk && (&i->blk->list == blk_list)) {
+		DPU_DEBUG("attempt resume iteration past last\n");
+		return false;
+	}
+
+	i->blk = list_prepare_entry(i->blk, blk_list, list);
+
+	list_for_each_entry_continue(i->blk, blk_list, list) {
+		struct dpu_rm_rsvp *rsvp = i->blk->rsvp;
+
+		if (i->blk->type != i->type) {
+			DPU_ERROR("found incorrect block type %d on %d list\n",
+					i->blk->type, i->type);
+			return false;
+		}
+
+		if ((i->enc_id == 0) || (rsvp && rsvp->enc_id == i->enc_id)) {
+			i->hw = i->blk->hw;
+			DPU_DEBUG("found type %d id %d for enc %d\n",
+					i->type, i->blk->id, i->enc_id);
+			return true;
+		}
+	}
+
+	DPU_DEBUG("no match, type %d for enc %d\n", i->type, i->enc_id);
+
+	return false;
+}
+
+bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *i)
+{
+	bool ret;
+
+	mutex_lock(&rm->rm_lock);
+	ret = _dpu_rm_get_hw_locked(rm, i);
+	mutex_unlock(&rm->rm_lock);
+
+	return ret;
+}
+
+static void _dpu_rm_hw_destroy(enum dpu_hw_blk_type type, void *hw)
+{
+	switch (type) {
+	case DPU_HW_BLK_LM:
+		dpu_hw_lm_destroy(hw);
+		break;
+	case DPU_HW_BLK_CTL:
+		dpu_hw_ctl_destroy(hw);
+		break;
+	case DPU_HW_BLK_CDM:
+		dpu_hw_cdm_destroy(hw);
+		break;
+	case DPU_HW_BLK_PINGPONG:
+		dpu_hw_pingpong_destroy(hw);
+		break;
+	case DPU_HW_BLK_INTF:
+		dpu_hw_intf_destroy(hw);
+		break;
+	case DPU_HW_BLK_SSPP:
+		/* SSPPs are not managed by the resource manager */
+	case DPU_HW_BLK_TOP:
+		/* Top is a singleton, not managed in hw_blks list */
+	case DPU_HW_BLK_MAX:
+	default:
+		DPU_ERROR("unsupported block type %d\n", type);
+		break;
+	}
+}
+
+int dpu_rm_destroy(struct dpu_rm *rm)
+{
+
+	struct dpu_rm_rsvp *rsvp_cur, *rsvp_nxt;
+	struct dpu_rm_hw_blk *hw_cur, *hw_nxt;
+	enum dpu_hw_blk_type type;
+
+	if (!rm) {
+		DPU_ERROR("invalid rm\n");
+		return -EINVAL;
+	}
+
+	list_for_each_entry_safe(rsvp_cur, rsvp_nxt, &rm->rsvps, list) {
+		list_del(&rsvp_cur->list);
+		kfree(rsvp_cur);
+	}
+
+
+	for (type = 0; type < DPU_HW_BLK_MAX; type++) {
+		list_for_each_entry_safe(hw_cur, hw_nxt, &rm->hw_blks[type],
+				list) {
+			list_del(&hw_cur->list);
+			_dpu_rm_hw_destroy(hw_cur->type, hw_cur->hw);
+			kfree(hw_cur);
+		}
+	}
+
+	dpu_hw_mdp_destroy(rm->hw_mdp);
+	rm->hw_mdp = NULL;
+
+	mutex_destroy(&rm->rm_lock);
+
+	return 0;
+}
+
+static int _dpu_rm_hw_blk_create(
+		struct dpu_rm *rm,
+		struct dpu_mdss_cfg *cat,
+		void __iomem *mmio,
+		enum dpu_hw_blk_type type,
+		uint32_t id,
+		void *hw_catalog_info)
+{
+	struct dpu_rm_hw_blk *blk;
+	struct dpu_hw_mdp *hw_mdp;
+	void *hw;
+
+	hw_mdp = rm->hw_mdp;
+
+	switch (type) {
+	case DPU_HW_BLK_LM:
+		hw = dpu_hw_lm_init(id, mmio, cat);
+		break;
+	case DPU_HW_BLK_CTL:
+		hw = dpu_hw_ctl_init(id, mmio, cat);
+		break;
+	case DPU_HW_BLK_CDM:
+		hw = dpu_hw_cdm_init(id, mmio, cat, hw_mdp);
+		break;
+	case DPU_HW_BLK_PINGPONG:
+		hw = dpu_hw_pingpong_init(id, mmio, cat);
+		break;
+	case DPU_HW_BLK_INTF:
+		hw = dpu_hw_intf_init(id, mmio, cat);
+		break;
+	case DPU_HW_BLK_SSPP:
+		/* SSPPs are not managed by the resource manager */
+	case DPU_HW_BLK_TOP:
+		/* Top is a singleton, not managed in hw_blks list */
+	case DPU_HW_BLK_MAX:
+	default:
+		DPU_ERROR("unsupported block type %d\n", type);
+		return -EINVAL;
+	}
+
+	if (IS_ERR_OR_NULL(hw)) {
+		DPU_ERROR("failed hw object creation: type %d, err %ld\n",
+				type, PTR_ERR(hw));
+		return -EFAULT;
+	}
+
+	blk = kzalloc(sizeof(*blk), GFP_KERNEL);
+	if (!blk) {
+		_dpu_rm_hw_destroy(type, hw);
+		return -ENOMEM;
+	}
+
+	blk->type = type;
+	blk->id = id;
+	blk->hw = hw;
+	list_add_tail(&blk->list, &rm->hw_blks[type]);
+
+	return 0;
+}
+
+int dpu_rm_init(struct dpu_rm *rm,
+		struct dpu_mdss_cfg *cat,
+		void __iomem *mmio,
+		struct drm_device *dev)
+{
+	int rc, i;
+	enum dpu_hw_blk_type type;
+
+	if (!rm || !cat || !mmio || !dev) {
+		DPU_ERROR("invalid kms\n");
+		return -EINVAL;
+	}
+
+	/* Clear, setup lists */
+	memset(rm, 0, sizeof(*rm));
+
+	mutex_init(&rm->rm_lock);
+
+	INIT_LIST_HEAD(&rm->rsvps);
+	for (type = 0; type < DPU_HW_BLK_MAX; type++)
+		INIT_LIST_HEAD(&rm->hw_blks[type]);
+
+	rm->dev = dev;
+
+	/* Some of the sub-blocks require an mdptop to be created */
+	rm->hw_mdp = dpu_hw_mdptop_init(MDP_TOP, mmio, cat);
+	if (IS_ERR_OR_NULL(rm->hw_mdp)) {
+		rc = PTR_ERR(rm->hw_mdp);
+		rm->hw_mdp = NULL;
+		DPU_ERROR("failed: mdp hw not available\n");
+		goto fail;
+	}
+
+	/* Interrogate HW catalog and create tracking items for hw blocks */
+	for (i = 0; i < cat->mixer_count; i++) {
+		struct dpu_lm_cfg *lm = &cat->mixer[i];
+
+		if (lm->pingpong == PINGPONG_MAX) {
+			DPU_DEBUG("skip mixer %d without pingpong\n", lm->id);
+			continue;
+		}
+
+		rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_LM,
+				cat->mixer[i].id, &cat->mixer[i]);
+		if (rc) {
+			DPU_ERROR("failed: lm hw not available\n");
+			goto fail;
+		}
+
+		if (!rm->lm_max_width) {
+			rm->lm_max_width = lm->sblk->maxwidth;
+		} else if (rm->lm_max_width != lm->sblk->maxwidth) {
+			/*
+			 * Don't expect to have hw where lm max widths differ.
+			 * If found, take the min.
+			 */
+			DPU_ERROR("unsupported: lm maxwidth differs\n");
+			if (rm->lm_max_width > lm->sblk->maxwidth)
+				rm->lm_max_width = lm->sblk->maxwidth;
+		}
+	}
+
+	for (i = 0; i < cat->pingpong_count; i++) {
+		rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_PINGPONG,
+				cat->pingpong[i].id, &cat->pingpong[i]);
+		if (rc) {
+			DPU_ERROR("failed: pp hw not available\n");
+			goto fail;
+		}
+	}
+
+	for (i = 0; i < cat->intf_count; i++) {
+		if (cat->intf[i].type == INTF_NONE) {
+			DPU_DEBUG("skip intf %d with type none\n", i);
+			continue;
+		}
+
+		rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_INTF,
+				cat->intf[i].id, &cat->intf[i]);
+		if (rc) {
+			DPU_ERROR("failed: intf hw not available\n");
+			goto fail;
+		}
+	}
+
+	for (i = 0; i < cat->ctl_count; i++) {
+		rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_CTL,
+				cat->ctl[i].id, &cat->ctl[i]);
+		if (rc) {
+			DPU_ERROR("failed: ctl hw not available\n");
+			goto fail;
+		}
+	}
+
+	for (i = 0; i < cat->cdm_count; i++) {
+		rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_CDM,
+				cat->cdm[i].id, &cat->cdm[i]);
+		if (rc) {
+			DPU_ERROR("failed: cdm hw not available\n");
+			goto fail;
+		}
+	}
+
+	return 0;
+
+fail:
+	dpu_rm_destroy(rm);
+
+	return rc;
+}
+
+/**
+ * _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
+ *	proposed use case requirements, incl. hardwired dependent blocks like
+ *	pingpong
+ * @rm: dpu resource manager handle
+ * @rsvp: reservation currently being created
+ * @reqs: proposed use case requirements
+ * @lm: proposed layer mixer, function checks if lm, and all other hardwired
+ *      blocks connected to the lm (pp) is available and appropriate
+ * @pp: output parameter, pingpong block attached to the layer mixer.
+ *      NULL if pp was not available, or not matching requirements.
+ * @primary_lm: if non-null, this function check if lm is compatible primary_lm
+ *              as well as satisfying all other requirements
+ * @Return: true if lm matches all requirements, false otherwise
+ */
+static bool _dpu_rm_check_lm_and_get_connected_blks(
+		struct dpu_rm *rm,
+		struct dpu_rm_rsvp *rsvp,
+		struct dpu_rm_requirements *reqs,
+		struct dpu_rm_hw_blk *lm,
+		struct dpu_rm_hw_blk **pp,
+		struct dpu_rm_hw_blk *primary_lm)
+{
+	const struct dpu_lm_cfg *lm_cfg = to_dpu_hw_mixer(lm->hw)->cap;
+	struct dpu_rm_hw_iter iter;
+
+	*pp = NULL;
+
+	DPU_DEBUG("check lm %d pp %d\n",
+			   lm_cfg->id, lm_cfg->pingpong);
+
+	/* Check if this layer mixer is a peer of the proposed primary LM */
+	if (primary_lm) {
+		const struct dpu_lm_cfg *prim_lm_cfg =
+				to_dpu_hw_mixer(primary_lm->hw)->cap;
+
+		if (!test_bit(lm_cfg->id, &prim_lm_cfg->lm_pair_mask)) {
+			DPU_DEBUG("lm %d not peer of lm %d\n", lm_cfg->id,
+					prim_lm_cfg->id);
+			return false;
+		}
+	}
+
+	/* Already reserved? */
+	if (RESERVED_BY_OTHER(lm, rsvp)) {
+		DPU_DEBUG("lm %d already reserved\n", lm_cfg->id);
+		return false;
+	}
+
+	dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_PINGPONG);
+	while (_dpu_rm_get_hw_locked(rm, &iter)) {
+		if (iter.blk->id == lm_cfg->pingpong) {
+			*pp = iter.blk;
+			break;
+		}
+	}
+
+	if (!*pp) {
+		DPU_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong);
+		return false;
+	}
+
+	if (RESERVED_BY_OTHER(*pp, rsvp)) {
+		DPU_DEBUG("lm %d pp %d already reserved\n", lm->id,
+				(*pp)->id);
+		return false;
+	}
+
+	return true;
+}
+
+static int _dpu_rm_reserve_lms(
+		struct dpu_rm *rm,
+		struct dpu_rm_rsvp *rsvp,
+		struct dpu_rm_requirements *reqs)
+
+{
+	struct dpu_rm_hw_blk *lm[MAX_BLOCKS];
+	struct dpu_rm_hw_blk *pp[MAX_BLOCKS];
+	struct dpu_rm_hw_iter iter_i, iter_j;
+	int lm_count = 0;
+	int i, rc = 0;
+
+	if (!reqs->topology->num_lm) {
+		DPU_ERROR("invalid number of lm: %d\n", reqs->topology->num_lm);
+		return -EINVAL;
+	}
+
+	/* Find a primary mixer */
+	dpu_rm_init_hw_iter(&iter_i, 0, DPU_HW_BLK_LM);
+	while (lm_count != reqs->topology->num_lm &&
+			_dpu_rm_get_hw_locked(rm, &iter_i)) {
+		memset(&lm, 0, sizeof(lm));
+		memset(&pp, 0, sizeof(pp));
+
+		lm_count = 0;
+		lm[lm_count] = iter_i.blk;
+
+		if (!_dpu_rm_check_lm_and_get_connected_blks(
+				rm, rsvp, reqs, lm[lm_count],
+				&pp[lm_count], NULL))
+			continue;
+
+		++lm_count;
+
+		/* Valid primary mixer found, find matching peers */
+		dpu_rm_init_hw_iter(&iter_j, 0, DPU_HW_BLK_LM);
+
+		while (lm_count != reqs->topology->num_lm &&
+				_dpu_rm_get_hw_locked(rm, &iter_j)) {
+			if (iter_i.blk == iter_j.blk)
+				continue;
+
+			if (!_dpu_rm_check_lm_and_get_connected_blks(
+					rm, rsvp, reqs, iter_j.blk,
+					&pp[lm_count], iter_i.blk))
+				continue;
+
+			lm[lm_count] = iter_j.blk;
+			++lm_count;
+		}
+	}
+
+	if (lm_count != reqs->topology->num_lm) {
+		DPU_DEBUG("unable to find appropriate mixers\n");
+		return -ENAVAIL;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(lm); i++) {
+		if (!lm[i])
+			break;
+
+		lm[i]->rsvp_nxt = rsvp;
+		pp[i]->rsvp_nxt = rsvp;
+
+		trace_dpu_rm_reserve_lms(lm[i]->id, lm[i]->type, rsvp->enc_id,
+					 pp[i]->id);
+	}
+
+	return rc;
+}
+
+static int _dpu_rm_reserve_ctls(
+		struct dpu_rm *rm,
+		struct dpu_rm_rsvp *rsvp,
+		const struct dpu_rm_topology_def *top)
+{
+	struct dpu_rm_hw_blk *ctls[MAX_BLOCKS];
+	struct dpu_rm_hw_iter iter;
+	int i = 0;
+
+	memset(&ctls, 0, sizeof(ctls));
+
+	dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CTL);
+	while (_dpu_rm_get_hw_locked(rm, &iter)) {
+		const struct dpu_hw_ctl *ctl = to_dpu_hw_ctl(iter.blk->hw);
+		unsigned long features = ctl->caps->features;
+		bool has_split_display;
+
+		if (RESERVED_BY_OTHER(iter.blk, rsvp))
+			continue;
+
+		has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features;
+
+		DPU_DEBUG("ctl %d caps 0x%lX\n", iter.blk->id, features);
+
+		if (top->needs_split_display != has_split_display)
+			continue;
+
+		ctls[i] = iter.blk;
+		DPU_DEBUG("ctl %d match\n", iter.blk->id);
+
+		if (++i == top->num_ctl)
+			break;
+	}
+
+	if (i != top->num_ctl)
+		return -ENAVAIL;
+
+	for (i = 0; i < ARRAY_SIZE(ctls) && i < top->num_ctl; i++) {
+		ctls[i]->rsvp_nxt = rsvp;
+		trace_dpu_rm_reserve_ctls(ctls[i]->id, ctls[i]->type,
+					  rsvp->enc_id);
+	}
+
+	return 0;
+}
+
+static int _dpu_rm_reserve_cdm(
+		struct dpu_rm *rm,
+		struct dpu_rm_rsvp *rsvp,
+		uint32_t id,
+		enum dpu_hw_blk_type type)
+{
+	struct dpu_rm_hw_iter iter;
+
+	DRM_DEBUG_KMS("type %d id %d\n", type, id);
+
+	dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CDM);
+	while (_dpu_rm_get_hw_locked(rm, &iter)) {
+		const struct dpu_hw_cdm *cdm = to_dpu_hw_cdm(iter.blk->hw);
+		const struct dpu_cdm_cfg *caps = cdm->caps;
+		bool match = false;
+
+		if (RESERVED_BY_OTHER(iter.blk, rsvp))
+			continue;
+
+		if (type == DPU_HW_BLK_INTF && id != INTF_MAX)
+			match = test_bit(id, &caps->intf_connect);
+
+		DRM_DEBUG_KMS("iter: type:%d id:%d enc:%d cdm:%lu match:%d\n",
+			      iter.blk->type, iter.blk->id, rsvp->enc_id,
+			      caps->intf_connect, match);
+
+		if (!match)
+			continue;
+
+		trace_dpu_rm_reserve_cdm(iter.blk->id, iter.blk->type,
+					 rsvp->enc_id);
+		iter.blk->rsvp_nxt = rsvp;
+		break;
+	}
+
+	if (!iter.hw) {
+		DPU_ERROR("couldn't reserve cdm for type %d id %d\n", type, id);
+		return -ENAVAIL;
+	}
+
+	return 0;
+}
+
+static int _dpu_rm_reserve_intf(
+		struct dpu_rm *rm,
+		struct dpu_rm_rsvp *rsvp,
+		uint32_t id,
+		enum dpu_hw_blk_type type,
+		bool needs_cdm)
+{
+	struct dpu_rm_hw_iter iter;
+	int ret = 0;
+
+	/* Find the block entry in the rm, and note the reservation */
+	dpu_rm_init_hw_iter(&iter, 0, type);
+	while (_dpu_rm_get_hw_locked(rm, &iter)) {
+		if (iter.blk->id != id)
+			continue;
+
+		if (RESERVED_BY_OTHER(iter.blk, rsvp)) {
+			DPU_ERROR("type %d id %d already reserved\n", type, id);
+			return -ENAVAIL;
+		}
+
+		iter.blk->rsvp_nxt = rsvp;
+		trace_dpu_rm_reserve_intf(iter.blk->id, iter.blk->type,
+					  rsvp->enc_id);
+		break;
+	}
+
+	/* Shouldn't happen since intfs are fixed at probe */
+	if (!iter.hw) {
+		DPU_ERROR("couldn't find type %d id %d\n", type, id);
+		return -EINVAL;
+	}
+
+	if (needs_cdm)
+		ret = _dpu_rm_reserve_cdm(rm, rsvp, id, type);
+
+	return ret;
+}
+
+static int _dpu_rm_reserve_intf_related_hw(
+		struct dpu_rm *rm,
+		struct dpu_rm_rsvp *rsvp,
+		struct dpu_encoder_hw_resources *hw_res)
+{
+	int i, ret = 0;
+	u32 id;
+
+	for (i = 0; i < ARRAY_SIZE(hw_res->intfs); i++) {
+		if (hw_res->intfs[i] == INTF_MODE_NONE)
+			continue;
+		id = i + INTF_0;
+		ret = _dpu_rm_reserve_intf(rm, rsvp, id,
+				DPU_HW_BLK_INTF, hw_res->needs_cdm);
+		if (ret)
+			return ret;
+	}
+
+	return ret;
+}
+
+static int _dpu_rm_make_next_rsvp(
+		struct dpu_rm *rm,
+		struct drm_encoder *enc,
+		struct drm_crtc_state *crtc_state,
+		struct drm_connector_state *conn_state,
+		struct dpu_rm_rsvp *rsvp,
+		struct dpu_rm_requirements *reqs)
+{
+	int ret;
+	struct dpu_rm_topology_def topology;
+
+	/* Create reservation info, tag reserved blocks with it as we go */
+	rsvp->seq = ++rm->rsvp_next_seq;
+	rsvp->enc_id = enc->base.id;
+	rsvp->topology = reqs->topology->top_name;
+	list_add_tail(&rsvp->list, &rm->rsvps);
+
+	ret = _dpu_rm_reserve_lms(rm, rsvp, reqs);
+	if (ret) {
+		DPU_ERROR("unable to find appropriate mixers\n");
+		return ret;
+	}
+
+	/*
+	 * Do assignment preferring to give away low-resource CTLs first:
+	 * - Check mixers without Split Display
+	 * - Only then allow to grab from CTLs with split display capability
+	 */
+	_dpu_rm_reserve_ctls(rm, rsvp, reqs->topology);
+	if (ret && !reqs->topology->needs_split_display) {
+		memcpy(&topology, reqs->topology, sizeof(topology));
+		topology.needs_split_display = true;
+		_dpu_rm_reserve_ctls(rm, rsvp, &topology);
+	}
+	if (ret) {
+		DPU_ERROR("unable to find appropriate CTL\n");
+		return ret;
+	}
+
+	/* Assign INTFs and blks whose usage is tied to them: CTL & CDM */
+	ret = _dpu_rm_reserve_intf_related_hw(rm, rsvp, &reqs->hw_res);
+	if (ret)
+		return ret;
+
+	return ret;
+}
+
+static int _dpu_rm_populate_requirements(
+		struct dpu_rm *rm,
+		struct drm_encoder *enc,
+		struct drm_crtc_state *crtc_state,
+		struct drm_connector_state *conn_state,
+		struct dpu_rm_requirements *reqs,
+		struct msm_display_topology req_topology)
+{
+	int i;
+
+	memset(reqs, 0, sizeof(*reqs));
+
+	dpu_encoder_get_hw_resources(enc, &reqs->hw_res, conn_state);
+
+	for (i = 0; i < DPU_RM_TOPOLOGY_MAX; i++) {
+		if (RM_IS_TOPOLOGY_MATCH(g_top_table[i],
+					req_topology)) {
+			reqs->topology = &g_top_table[i];
+			break;
+		}
+	}
+
+	if (!reqs->topology) {
+		DPU_ERROR("invalid topology for the display\n");
+		return -EINVAL;
+	}
+
+	/**
+	 * Set the requirement based on caps if not set from user space
+	 * This will ensure to select LM tied with DS blocks
+	 * Currently, DS blocks are tied with LM 0 and LM 1 (primary display)
+	 */
+	if (!RM_RQ_DS(reqs) && rm->hw_mdp->caps->has_dest_scaler &&
+		conn_state->connector->connector_type == DRM_MODE_CONNECTOR_DSI)
+		reqs->top_ctrl |= BIT(DPU_RM_TOPCTL_DS);
+
+	DRM_DEBUG_KMS("top_ctrl: 0x%llX num_h_tiles: %d\n", reqs->top_ctrl,
+		      reqs->hw_res.display_num_of_h_tiles);
+	DRM_DEBUG_KMS("num_lm: %d num_ctl: %d topology: %d split_display: %d\n",
+		      reqs->topology->num_lm, reqs->topology->num_ctl,
+		      reqs->topology->top_name,
+		      reqs->topology->needs_split_display);
+
+	return 0;
+}
+
+static struct dpu_rm_rsvp *_dpu_rm_get_rsvp(
+		struct dpu_rm *rm,
+		struct drm_encoder *enc)
+{
+	struct dpu_rm_rsvp *i;
+
+	if (!rm || !enc) {
+		DPU_ERROR("invalid params\n");
+		return NULL;
+	}
+
+	if (list_empty(&rm->rsvps))
+		return NULL;
+
+	list_for_each_entry(i, &rm->rsvps, list)
+		if (i->enc_id == enc->base.id)
+			return i;
+
+	return NULL;
+}
+
+static struct drm_connector *_dpu_rm_get_connector(
+		struct drm_encoder *enc)
+{
+	struct drm_connector *conn = NULL;
+	struct list_head *connector_list =
+			&enc->dev->mode_config.connector_list;
+
+	list_for_each_entry(conn, connector_list, head)
+		if (conn->encoder == enc)
+			return conn;
+
+	return NULL;
+}
+
+/**
+ * _dpu_rm_release_rsvp - release resources and release a reservation
+ * @rm:	KMS handle
+ * @rsvp:	RSVP pointer to release and release resources for
+ */
+static void _dpu_rm_release_rsvp(
+		struct dpu_rm *rm,
+		struct dpu_rm_rsvp *rsvp,
+		struct drm_connector *conn)
+{
+	struct dpu_rm_rsvp *rsvp_c, *rsvp_n;
+	struct dpu_rm_hw_blk *blk;
+	enum dpu_hw_blk_type type;
+
+	if (!rsvp)
+		return;
+
+	DPU_DEBUG("rel rsvp %d enc %d\n", rsvp->seq, rsvp->enc_id);
+
+	list_for_each_entry_safe(rsvp_c, rsvp_n, &rm->rsvps, list) {
+		if (rsvp == rsvp_c) {
+			list_del(&rsvp_c->list);
+			break;
+		}
+	}
+
+	for (type = 0; type < DPU_HW_BLK_MAX; type++) {
+		list_for_each_entry(blk, &rm->hw_blks[type], list) {
+			if (blk->rsvp == rsvp) {
+				blk->rsvp = NULL;
+				DPU_DEBUG("rel rsvp %d enc %d %d %d\n",
+						rsvp->seq, rsvp->enc_id,
+						blk->type, blk->id);
+			}
+			if (blk->rsvp_nxt == rsvp) {
+				blk->rsvp_nxt = NULL;
+				DPU_DEBUG("rel rsvp_nxt %d enc %d %d %d\n",
+						rsvp->seq, rsvp->enc_id,
+						blk->type, blk->id);
+			}
+		}
+	}
+
+	kfree(rsvp);
+}
+
+void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc)
+{
+	struct dpu_rm_rsvp *rsvp;
+	struct drm_connector *conn;
+
+	if (!rm || !enc) {
+		DPU_ERROR("invalid params\n");
+		return;
+	}
+
+	mutex_lock(&rm->rm_lock);
+
+	rsvp = _dpu_rm_get_rsvp(rm, enc);
+	if (!rsvp) {
+		DPU_ERROR("failed to find rsvp for enc %d\n", enc->base.id);
+		goto end;
+	}
+
+	conn = _dpu_rm_get_connector(enc);
+	if (!conn) {
+		DPU_ERROR("failed to get connector for enc %d\n", enc->base.id);
+		goto end;
+	}
+
+	_dpu_rm_release_rsvp(rm, rsvp, conn);
+end:
+	mutex_unlock(&rm->rm_lock);
+}
+
+static int _dpu_rm_commit_rsvp(
+		struct dpu_rm *rm,
+		struct dpu_rm_rsvp *rsvp,
+		struct drm_connector_state *conn_state)
+{
+	struct dpu_rm_hw_blk *blk;
+	enum dpu_hw_blk_type type;
+	int ret = 0;
+
+	/* Swap next rsvp to be the active */
+	for (type = 0; type < DPU_HW_BLK_MAX; type++) {
+		list_for_each_entry(blk, &rm->hw_blks[type], list) {
+			if (blk->rsvp_nxt) {
+				blk->rsvp = blk->rsvp_nxt;
+				blk->rsvp_nxt = NULL;
+			}
+		}
+	}
+
+	if (!ret)
+		DRM_DEBUG_KMS("rsrv enc %d topology %d\n", rsvp->enc_id,
+			      rsvp->topology);
+
+	return ret;
+}
+
+int dpu_rm_reserve(
+		struct dpu_rm *rm,
+		struct drm_encoder *enc,
+		struct drm_crtc_state *crtc_state,
+		struct drm_connector_state *conn_state,
+		struct msm_display_topology topology,
+		bool test_only)
+{
+	struct dpu_rm_rsvp *rsvp_cur, *rsvp_nxt;
+	struct dpu_rm_requirements reqs;
+	int ret;
+
+	if (!rm || !enc || !crtc_state || !conn_state) {
+		DPU_ERROR("invalid arguments\n");
+		return -EINVAL;
+	}
+
+	/* Check if this is just a page-flip */
+	if (!drm_atomic_crtc_needs_modeset(crtc_state))
+		return 0;
+
+	DRM_DEBUG_KMS("reserving hw for conn %d enc %d crtc %d test_only %d\n",
+		      conn_state->connector->base.id, enc->base.id,
+		      crtc_state->crtc->base.id, test_only);
+
+	mutex_lock(&rm->rm_lock);
+
+	_dpu_rm_print_rsvps(rm, DPU_RM_STAGE_BEGIN);
+
+	ret = _dpu_rm_populate_requirements(rm, enc, crtc_state,
+			conn_state, &reqs, topology);
+	if (ret) {
+		DPU_ERROR("failed to populate hw requirements\n");
+		goto end;
+	}
+
+	/*
+	 * We only support one active reservation per-hw-block. But to implement
+	 * transactional semantics for test-only, and for allowing failure while
+	 * modifying your existing reservation, over the course of this
+	 * function we can have two reservations:
+	 * Current: Existing reservation
+	 * Next: Proposed reservation. The proposed reservation may fail, or may
+	 *       be discarded if in test-only mode.
+	 * If reservation is successful, and we're not in test-only, then we
+	 * replace the current with the next.
+	 */
+	rsvp_nxt = kzalloc(sizeof(*rsvp_nxt), GFP_KERNEL);
+	if (!rsvp_nxt) {
+		ret = -ENOMEM;
+		goto end;
+	}
+
+	rsvp_cur = _dpu_rm_get_rsvp(rm, enc);
+
+	/*
+	 * User can request that we clear out any reservation during the
+	 * atomic_check phase by using this CLEAR bit
+	 */
+	if (rsvp_cur && test_only && RM_RQ_CLEAR(&reqs)) {
+		DPU_DEBUG("test_only & CLEAR: clear rsvp[s%de%d]\n",
+				rsvp_cur->seq, rsvp_cur->enc_id);
+		_dpu_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
+		rsvp_cur = NULL;
+		_dpu_rm_print_rsvps(rm, DPU_RM_STAGE_AFTER_CLEAR);
+	}
+
+	/* Check the proposed reservation, store it in hw's "next" field */
+	ret = _dpu_rm_make_next_rsvp(rm, enc, crtc_state, conn_state,
+			rsvp_nxt, &reqs);
+
+	_dpu_rm_print_rsvps(rm, DPU_RM_STAGE_AFTER_RSVPNEXT);
+
+	if (ret) {
+		DPU_ERROR("failed to reserve hw resources: %d\n", ret);
+		_dpu_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector);
+	} else if (test_only && !RM_RQ_LOCK(&reqs)) {
+		/*
+		 * Normally, if test_only, test the reservation and then undo
+		 * However, if the user requests LOCK, then keep the reservation
+		 * made during the atomic_check phase.
+		 */
+		DPU_DEBUG("test_only: discard test rsvp[s%de%d]\n",
+				rsvp_nxt->seq, rsvp_nxt->enc_id);
+		_dpu_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector);
+	} else {
+		if (test_only && RM_RQ_LOCK(&reqs))
+			DPU_DEBUG("test_only & LOCK: lock rsvp[s%de%d]\n",
+					rsvp_nxt->seq, rsvp_nxt->enc_id);
+
+		_dpu_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
+
+		ret = _dpu_rm_commit_rsvp(rm, rsvp_nxt, conn_state);
+	}
+
+	_dpu_rm_print_rsvps(rm, DPU_RM_STAGE_FINAL);
+
+end:
+	mutex_unlock(&rm->rm_lock);
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
new file mode 100644
index 000000000000..ef3f67bedaa0
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DPU_RM_H__
+#define __DPU_RM_H__
+
+#include <linux/list.h>
+
+#include "msm_kms.h"
+#include "dpu_hw_top.h"
+
+/**
+ * enum dpu_rm_topology_name - HW resource use case in use by connector
+ * @DPU_RM_TOPOLOGY_NONE:                 No topology in use currently
+ * @DPU_RM_TOPOLOGY_SINGLEPIPE:           1 LM, 1 PP, 1 INTF/WB
+ * @DPU_RM_TOPOLOGY_DUALPIPE:             2 LM, 2 PP, 2 INTF/WB
+ * @DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE:     2 LM, 2 PP, 3DMux, 1 INTF/WB
+ */
+enum dpu_rm_topology_name {
+	DPU_RM_TOPOLOGY_NONE = 0,
+	DPU_RM_TOPOLOGY_SINGLEPIPE,
+	DPU_RM_TOPOLOGY_DUALPIPE,
+	DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE,
+	DPU_RM_TOPOLOGY_MAX,
+};
+
+/**
+ * enum dpu_rm_topology_control - HW resource use case in use by connector
+ * @DPU_RM_TOPCTL_RESERVE_LOCK: If set, in AtomicTest phase, after a successful
+ *                              test, reserve the resources for this display.
+ *                              Normal behavior would not impact the reservation
+ *                              list during the AtomicTest phase.
+ * @DPU_RM_TOPCTL_RESERVE_CLEAR: If set, in AtomicTest phase, before testing,
+ *                               release any reservation held by this display.
+ *                               Normal behavior would not impact the
+ *                               reservation list during the AtomicTest phase.
+ * @DPU_RM_TOPCTL_DS  : Require layer mixers with DS capabilities
+ */
+enum dpu_rm_topology_control {
+	DPU_RM_TOPCTL_RESERVE_LOCK,
+	DPU_RM_TOPCTL_RESERVE_CLEAR,
+	DPU_RM_TOPCTL_DS,
+};
+
+/**
+ * struct dpu_rm - DPU dynamic hardware resource manager
+ * @dev: device handle for event logging purposes
+ * @rsvps: list of hardware reservations by each crtc->encoder->connector
+ * @hw_blks: array of lists of hardware resources present in the system, one
+ *	list per type of hardware block
+ * @hw_mdp: hardware object for mdp_top
+ * @lm_max_width: cached layer mixer maximum width
+ * @rsvp_next_seq: sequence number for next reservation for debugging purposes
+ * @rm_lock: resource manager mutex
+ */
+struct dpu_rm {
+	struct drm_device *dev;
+	struct list_head rsvps;
+	struct list_head hw_blks[DPU_HW_BLK_MAX];
+	struct dpu_hw_mdp *hw_mdp;
+	uint32_t lm_max_width;
+	uint32_t rsvp_next_seq;
+	struct mutex rm_lock;
+};
+
+/**
+ *  struct dpu_rm_hw_blk - resource manager internal structure
+ *	forward declaration for single iterator definition without void pointer
+ */
+struct dpu_rm_hw_blk;
+
+/**
+ * struct dpu_rm_hw_iter - iterator for use with dpu_rm
+ * @hw: dpu_hw object requested, or NULL on failure
+ * @blk: dpu_rm internal block representation. Clients ignore. Used as iterator.
+ * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder
+ * @type: Hardware Block Type client wishes to search for.
+ */
+struct dpu_rm_hw_iter {
+	void *hw;
+	struct dpu_rm_hw_blk *blk;
+	uint32_t enc_id;
+	enum dpu_hw_blk_type type;
+};
+
+/**
+ * dpu_rm_init - Read hardware catalog and create reservation tracking objects
+ *	for all HW blocks.
+ * @rm: DPU Resource Manager handle
+ * @cat: Pointer to hardware catalog
+ * @mmio: mapped register io address of MDP
+ * @dev: device handle for event logging purposes
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int dpu_rm_init(struct dpu_rm *rm,
+		struct dpu_mdss_cfg *cat,
+		void *mmio,
+		struct drm_device *dev);
+
+/**
+ * dpu_rm_destroy - Free all memory allocated by dpu_rm_init
+ * @rm: DPU Resource Manager handle
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int dpu_rm_destroy(struct dpu_rm *rm);
+
+/**
+ * dpu_rm_reserve - Given a CRTC->Encoder->Connector display chain, analyze
+ *	the use connections and user requirements, specified through related
+ *	topology control properties, and reserve hardware blocks to that
+ *	display chain.
+ *	HW blocks can then be accessed through dpu_rm_get_* functions.
+ *	HW Reservations should be released via dpu_rm_release_hw.
+ * @rm: DPU Resource Manager handle
+ * @drm_enc: DRM Encoder handle
+ * @crtc_state: Proposed Atomic DRM CRTC State handle
+ * @conn_state: Proposed Atomic DRM Connector State handle
+ * @topology: Pointer to topology info for the display
+ * @test_only: Atomic-Test phase, discard results (unless property overrides)
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int dpu_rm_reserve(struct dpu_rm *rm,
+		struct drm_encoder *drm_enc,
+		struct drm_crtc_state *crtc_state,
+		struct drm_connector_state *conn_state,
+		struct msm_display_topology topology,
+		bool test_only);
+
+/**
+ * dpu_rm_reserve - Given the encoder for the display chain, release any
+ *	HW blocks previously reserved for that use case.
+ * @rm: DPU Resource Manager handle
+ * @enc: DRM Encoder handle
+ * @Return: 0 on Success otherwise -ERROR
+ */
+void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc);
+
+/**
+ * dpu_rm_get_mdp - Retrieve HW block for MDP TOP.
+ *	This is never reserved, and is usable by any display.
+ * @rm: DPU Resource Manager handle
+ * @Return: Pointer to hw block or NULL
+ */
+struct dpu_hw_mdp *dpu_rm_get_mdp(struct dpu_rm *rm);
+
+/**
+ * dpu_rm_init_hw_iter - setup given iterator for new iteration over hw list
+ *	using dpu_rm_get_hw
+ * @iter: iter object to initialize
+ * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder
+ * @type: Hardware Block Type client wishes to search for.
+ */
+void dpu_rm_init_hw_iter(
+		struct dpu_rm_hw_iter *iter,
+		uint32_t enc_id,
+		enum dpu_hw_blk_type type);
+/**
+ * dpu_rm_get_hw - retrieve reserved hw object given encoder and hw type
+ *	Meant to do a single pass through the hardware list to iteratively
+ *	retrieve hardware blocks of a given type for a given encoder.
+ *	Initialize an iterator object.
+ *	Set hw block type of interest. Set encoder id of interest, 0 for any.
+ *	Function returns first hw of type for that encoder.
+ *	Subsequent calls will return the next reserved hw of that type in-order.
+ *	Iterator HW pointer will be null on failure to find hw.
+ * @rm: DPU Resource Manager handle
+ * @iter: iterator object
+ * @Return: true on match found, false on no match found
+ */
+bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *iter);
+
+/**
+ * dpu_rm_check_property_topctl - validate property bitmask before it is set
+ * @val: user's proposed topology control bitmask
+ * @Return: 0 on success or error
+ */
+int dpu_rm_check_property_topctl(uint64_t val);
+
+/**
+ * dpu_rm_get_topology_name - returns the name of the the given topology
+ *                            definition
+ * @topology: topology definition
+ * @Return: name of the topology
+ */
+enum dpu_rm_topology_name
+dpu_rm_get_topology_name(struct msm_display_topology topology);
+
+#endif /* __DPU_RM_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
new file mode 100644
index 000000000000..76efc690cce3
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
@@ -0,0 +1,1011 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#if !defined(_DPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _DPU_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#include <drm/drm_rect.h>
+#include "dpu_crtc.h"
+#include "dpu_encoder_phys.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_vbif.h"
+#include "dpu_plane.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM dpu
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE dpu_trace
+
+TRACE_EVENT(dpu_perf_set_qos_luts,
+	TP_PROTO(u32 pnum, u32 fmt, bool rt, u32 fl,
+		u32 lut, u32 lut_usage),
+	TP_ARGS(pnum, fmt, rt, fl, lut, lut_usage),
+	TP_STRUCT__entry(
+			__field(u32, pnum)
+			__field(u32, fmt)
+			__field(bool, rt)
+			__field(u32, fl)
+			__field(u64, lut)
+			__field(u32, lut_usage)
+	),
+	TP_fast_assign(
+			__entry->pnum = pnum;
+			__entry->fmt = fmt;
+			__entry->rt = rt;
+			__entry->fl = fl;
+			__entry->lut = lut;
+			__entry->lut_usage = lut_usage;
+	),
+	TP_printk("pnum=%d fmt=%x rt=%d fl=%d lut=0x%llx lut_usage=%d",
+			__entry->pnum, __entry->fmt,
+			__entry->rt, __entry->fl,
+			__entry->lut, __entry->lut_usage)
+);
+
+TRACE_EVENT(dpu_perf_set_danger_luts,
+	TP_PROTO(u32 pnum, u32 fmt, u32 mode, u32 danger_lut,
+		u32 safe_lut),
+	TP_ARGS(pnum, fmt, mode, danger_lut, safe_lut),
+	TP_STRUCT__entry(
+			__field(u32, pnum)
+			__field(u32, fmt)
+			__field(u32, mode)
+			__field(u32, danger_lut)
+			__field(u32, safe_lut)
+	),
+	TP_fast_assign(
+			__entry->pnum = pnum;
+			__entry->fmt = fmt;
+			__entry->mode = mode;
+			__entry->danger_lut = danger_lut;
+			__entry->safe_lut = safe_lut;
+	),
+	TP_printk("pnum=%d fmt=%x mode=%d luts[0x%x, 0x%x]",
+			__entry->pnum, __entry->fmt,
+			__entry->mode, __entry->danger_lut,
+			__entry->safe_lut)
+);
+
+TRACE_EVENT(dpu_perf_set_ot,
+	TP_PROTO(u32 pnum, u32 xin_id, u32 rd_lim, u32 vbif_idx),
+	TP_ARGS(pnum, xin_id, rd_lim, vbif_idx),
+	TP_STRUCT__entry(
+			__field(u32, pnum)
+			__field(u32, xin_id)
+			__field(u32, rd_lim)
+			__field(u32, vbif_idx)
+	),
+	TP_fast_assign(
+			__entry->pnum = pnum;
+			__entry->xin_id = xin_id;
+			__entry->rd_lim = rd_lim;
+			__entry->vbif_idx = vbif_idx;
+	),
+	TP_printk("pnum:%d xin_id:%d ot:%d vbif:%d",
+			__entry->pnum, __entry->xin_id, __entry->rd_lim,
+			__entry->vbif_idx)
+)
+
+TRACE_EVENT(dpu_perf_update_bus,
+	TP_PROTO(int client, unsigned long long ab_quota,
+	unsigned long long ib_quota),
+	TP_ARGS(client, ab_quota, ib_quota),
+	TP_STRUCT__entry(
+			__field(int, client)
+			__field(u64, ab_quota)
+			__field(u64, ib_quota)
+	),
+	TP_fast_assign(
+			__entry->client = client;
+			__entry->ab_quota = ab_quota;
+			__entry->ib_quota = ib_quota;
+	),
+	TP_printk("Request client:%d ab=%llu ib=%llu",
+			__entry->client,
+			__entry->ab_quota,
+			__entry->ib_quota)
+)
+
+
+TRACE_EVENT(dpu_cmd_release_bw,
+	TP_PROTO(u32 crtc_id),
+	TP_ARGS(crtc_id),
+	TP_STRUCT__entry(
+			__field(u32, crtc_id)
+	),
+	TP_fast_assign(
+			__entry->crtc_id = crtc_id;
+	),
+	TP_printk("crtc:%d", __entry->crtc_id)
+);
+
+TRACE_EVENT(tracing_mark_write,
+	TP_PROTO(int pid, const char *name, bool trace_begin),
+	TP_ARGS(pid, name, trace_begin),
+	TP_STRUCT__entry(
+			__field(int, pid)
+			__string(trace_name, name)
+			__field(bool, trace_begin)
+	),
+	TP_fast_assign(
+			__entry->pid = pid;
+			__assign_str(trace_name, name);
+			__entry->trace_begin = trace_begin;
+	),
+	TP_printk("%s|%d|%s", __entry->trace_begin ? "B" : "E",
+		__entry->pid, __get_str(trace_name))
+)
+
+TRACE_EVENT(dpu_trace_counter,
+	TP_PROTO(int pid, char *name, int value),
+	TP_ARGS(pid, name, value),
+	TP_STRUCT__entry(
+			__field(int, pid)
+			__string(counter_name, name)
+			__field(int, value)
+	),
+	TP_fast_assign(
+			__entry->pid = current->tgid;
+			__assign_str(counter_name, name);
+			__entry->value = value;
+	),
+	TP_printk("%d|%s|%d", __entry->pid,
+			__get_str(counter_name), __entry->value)
+)
+
+TRACE_EVENT(dpu_perf_crtc_update,
+	TP_PROTO(u32 crtc, u64 bw_ctl_mnoc, u64 bw_ctl_llcc,
+			u64 bw_ctl_ebi, u32 core_clk_rate,
+			bool stop_req, u32 update_bus, u32 update_clk),
+	TP_ARGS(crtc, bw_ctl_mnoc, bw_ctl_llcc, bw_ctl_ebi, core_clk_rate,
+		stop_req, update_bus, update_clk),
+	TP_STRUCT__entry(
+			__field(u32, crtc)
+			__field(u64, bw_ctl_mnoc)
+			__field(u64, bw_ctl_llcc)
+			__field(u64, bw_ctl_ebi)
+			__field(u32, core_clk_rate)
+			__field(bool, stop_req)
+			__field(u32, update_bus)
+			__field(u32, update_clk)
+	),
+	TP_fast_assign(
+			__entry->crtc = crtc;
+			__entry->bw_ctl_mnoc = bw_ctl_mnoc;
+			__entry->bw_ctl_llcc = bw_ctl_llcc;
+			__entry->bw_ctl_ebi = bw_ctl_ebi;
+			__entry->core_clk_rate = core_clk_rate;
+			__entry->stop_req = stop_req;
+			__entry->update_bus = update_bus;
+			__entry->update_clk = update_clk;
+	),
+	 TP_printk(
+		"crtc=%d bw_mnoc=%llu bw_llcc=%llu bw_ebi=%llu clk_rate=%u stop_req=%d u_bus=%d u_clk=%d",
+			__entry->crtc,
+			__entry->bw_ctl_mnoc,
+			__entry->bw_ctl_llcc,
+			__entry->bw_ctl_ebi,
+			__entry->core_clk_rate,
+			__entry->stop_req,
+			__entry->update_bus,
+			__entry->update_clk)
+);
+
+DECLARE_EVENT_CLASS(dpu_enc_irq_template,
+	TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+		 int irq_idx),
+	TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		drm_id		)
+		__field(	enum dpu_intr_idx,	intr_idx	)
+		__field(	int,			hw_idx		)
+		__field(	int,			irq_idx		)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->intr_idx = intr_idx;
+		__entry->hw_idx = hw_idx;
+		__entry->irq_idx = irq_idx;
+	),
+	TP_printk("id=%u, intr=%d, hw=%d, irq=%d",
+		  __entry->drm_id, __entry->intr_idx, __entry->hw_idx,
+		  __entry->irq_idx)
+);
+DEFINE_EVENT(dpu_enc_irq_template, dpu_enc_irq_register_success,
+	TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+		 int irq_idx),
+	TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx)
+);
+DEFINE_EVENT(dpu_enc_irq_template, dpu_enc_irq_unregister_success,
+	TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+		 int irq_idx),
+	TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx)
+);
+
+TRACE_EVENT(dpu_enc_irq_wait_success,
+	TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+		 int irq_idx, enum dpu_pingpong pp_idx, int atomic_cnt),
+	TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx, pp_idx, atomic_cnt),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		drm_id		)
+		__field(	enum dpu_intr_idx,	intr_idx	)
+		__field(	int,			hw_idx		)
+		__field(	int,			irq_idx		)
+		__field(	enum dpu_pingpong,	pp_idx		)
+		__field(	int,			atomic_cnt	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->intr_idx = intr_idx;
+		__entry->hw_idx = hw_idx;
+		__entry->irq_idx = irq_idx;
+		__entry->pp_idx = pp_idx;
+		__entry->atomic_cnt = atomic_cnt;
+	),
+	TP_printk("id=%u, intr=%d, hw=%d, irq=%d, pp=%d, atomic_cnt=%d",
+		  __entry->drm_id, __entry->intr_idx, __entry->hw_idx,
+		  __entry->irq_idx, __entry->pp_idx, __entry->atomic_cnt)
+);
+
+DECLARE_EVENT_CLASS(dpu_drm_obj_template,
+	TP_PROTO(uint32_t drm_id),
+	TP_ARGS(drm_id),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		drm_id		)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+	),
+	TP_printk("id=%u", __entry->drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_atomic_check,
+	TP_PROTO(uint32_t drm_id),
+	TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_mode_set,
+	TP_PROTO(uint32_t drm_id),
+	TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_disable,
+	TP_PROTO(uint32_t drm_id),
+	TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_kickoff,
+	TP_PROTO(uint32_t drm_id),
+	TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_prepare_kickoff,
+	TP_PROTO(uint32_t drm_id),
+	TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_prepare_kickoff_reset,
+	TP_PROTO(uint32_t drm_id),
+	TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_complete_flip,
+	TP_PROTO(uint32_t drm_id),
+	TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_vblank_cb,
+	TP_PROTO(uint32_t drm_id),
+	TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_complete_commit,
+	TP_PROTO(uint32_t drm_id),
+	TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_kms_enc_enable,
+	TP_PROTO(uint32_t drm_id),
+	TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_kms_commit,
+	TP_PROTO(uint32_t drm_id),
+	TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_kms_wait_for_commit_done,
+	TP_PROTO(uint32_t drm_id),
+	TP_ARGS(drm_id)
+);
+
+TRACE_EVENT(dpu_enc_enable,
+	TP_PROTO(uint32_t drm_id, int hdisplay, int vdisplay),
+	TP_ARGS(drm_id, hdisplay, vdisplay),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		drm_id		)
+		__field(	int,			hdisplay	)
+		__field(	int,			vdisplay	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->hdisplay = hdisplay;
+		__entry->vdisplay = vdisplay;
+	),
+	TP_printk("id=%u, mode=%dx%d",
+		  __entry->drm_id, __entry->hdisplay, __entry->vdisplay)
+);
+
+DECLARE_EVENT_CLASS(dpu_enc_keyval_template,
+	TP_PROTO(uint32_t drm_id, int val),
+	TP_ARGS(drm_id, val),
+	TP_STRUCT__entry(
+		__field(	uint32_t,	drm_id	)
+		__field(	int,		val	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->val = val;
+	),
+	TP_printk("id=%u, val=%d", __entry->drm_id, __entry->val)
+);
+DEFINE_EVENT(dpu_enc_keyval_template, dpu_enc_underrun_cb,
+	TP_PROTO(uint32_t drm_id, int count),
+	TP_ARGS(drm_id, count)
+);
+DEFINE_EVENT(dpu_enc_keyval_template, dpu_enc_trigger_start,
+	TP_PROTO(uint32_t drm_id, int ctl_idx),
+	TP_ARGS(drm_id, ctl_idx)
+);
+
+TRACE_EVENT(dpu_enc_atomic_check_flags,
+	TP_PROTO(uint32_t drm_id, unsigned int flags, int private_flags),
+	TP_ARGS(drm_id, flags, private_flags),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		drm_id		)
+		__field(	unsigned int,		flags		)
+		__field(	int,			private_flags	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->flags = flags;
+		__entry->private_flags = private_flags;
+	),
+	TP_printk("id=%u, flags=%u, private_flags=%d",
+		  __entry->drm_id, __entry->flags, __entry->private_flags)
+);
+
+DECLARE_EVENT_CLASS(dpu_enc_id_enable_template,
+	TP_PROTO(uint32_t drm_id, bool enable),
+	TP_ARGS(drm_id, enable),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		drm_id		)
+		__field(	bool,			enable		)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->enable = enable;
+	),
+	TP_printk("id=%u, enable=%s",
+		  __entry->drm_id, __entry->enable ? "true" : "false")
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_rc_helper,
+	TP_PROTO(uint32_t drm_id, bool enable),
+	TP_ARGS(drm_id, enable)
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_vblank_cb,
+	TP_PROTO(uint32_t drm_id, bool enable),
+	TP_ARGS(drm_id, enable)
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_frame_event_cb,
+	TP_PROTO(uint32_t drm_id, bool enable),
+	TP_ARGS(drm_id, enable)
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_phys_cmd_connect_te,
+	TP_PROTO(uint32_t drm_id, bool enable),
+	TP_ARGS(drm_id, enable)
+);
+
+TRACE_EVENT(dpu_enc_rc,
+	TP_PROTO(uint32_t drm_id, u32 sw_event, bool idle_pc_supported,
+		 int rc_state, const char *stage),
+	TP_ARGS(drm_id, sw_event, idle_pc_supported, rc_state, stage),
+	TP_STRUCT__entry(
+		__field(	uint32_t,	drm_id			)
+		__field(	u32,		sw_event		)
+		__field(	bool,		idle_pc_supported	)
+		__field(	int,		rc_state		)
+		__string(	stage_str,	stage			)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->sw_event = sw_event;
+		__entry->idle_pc_supported = idle_pc_supported;
+		__entry->rc_state = rc_state;
+		__assign_str(stage_str, stage);
+	),
+	TP_printk("%s: id:%u, sw_event:%d, idle_pc_supported:%s, rc_state:%d\n",
+		  __get_str(stage_str), __entry->drm_id, __entry->sw_event,
+		  __entry->idle_pc_supported ? "true" : "false",
+		  __entry->rc_state)
+);
+
+TRACE_EVENT(dpu_enc_frame_done_cb_not_busy,
+	TP_PROTO(uint32_t drm_id, u32 event, enum dpu_intf intf_idx),
+	TP_ARGS(drm_id, event, intf_idx),
+	TP_STRUCT__entry(
+		__field(	uint32_t,	drm_id		)
+		__field(	u32,		event		)
+		__field(	enum dpu_intf,	intf_idx	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->event = event;
+		__entry->intf_idx = intf_idx;
+	),
+	TP_printk("id=%u, event=%u, intf=%d", __entry->drm_id, __entry->event,
+		  __entry->intf_idx)
+);
+
+TRACE_EVENT(dpu_enc_frame_done_cb,
+	TP_PROTO(uint32_t drm_id, unsigned int idx,
+		 unsigned long frame_busy_mask),
+	TP_ARGS(drm_id, idx, frame_busy_mask),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		drm_id		)
+		__field(	unsigned int,		idx		)
+		__field(	unsigned long,		frame_busy_mask	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->idx = idx;
+		__entry->frame_busy_mask = frame_busy_mask;
+	),
+	TP_printk("id=%u, idx=%u, frame_busy_mask=%lx", __entry->drm_id,
+		  __entry->idx, __entry->frame_busy_mask)
+);
+
+TRACE_EVENT(dpu_enc_trigger_flush,
+	TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx,
+		 int pending_kickoff_cnt, int ctl_idx, u32 pending_flush_ret),
+	TP_ARGS(drm_id, intf_idx, pending_kickoff_cnt, ctl_idx,
+		pending_flush_ret),
+	TP_STRUCT__entry(
+		__field(	uint32_t,	drm_id			)
+		__field(	enum dpu_intf,	intf_idx		)
+		__field(	int,		pending_kickoff_cnt	)
+		__field(	int,		ctl_idx			)
+		__field(	u32,		pending_flush_ret	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->intf_idx = intf_idx;
+		__entry->pending_kickoff_cnt = pending_kickoff_cnt;
+		__entry->ctl_idx = ctl_idx;
+		__entry->pending_flush_ret = pending_flush_ret;
+	),
+	TP_printk("id=%u, intf_idx=%d, pending_kickoff_cnt=%d ctl_idx=%d "
+		  "pending_flush_ret=%u", __entry->drm_id,
+		  __entry->intf_idx, __entry->pending_kickoff_cnt,
+		  __entry->ctl_idx, __entry->pending_flush_ret)
+);
+
+DECLARE_EVENT_CLASS(dpu_enc_ktime_template,
+	TP_PROTO(uint32_t drm_id, ktime_t time),
+	TP_ARGS(drm_id, time),
+	TP_STRUCT__entry(
+		__field(	uint32_t,	drm_id	)
+		__field(	ktime_t,	time	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->time = time;
+	),
+	TP_printk("id=%u, time=%lld", __entry->drm_id,
+		  ktime_to_ms(__entry->time))
+);
+DEFINE_EVENT(dpu_enc_ktime_template, dpu_enc_vsync_event_work,
+	TP_PROTO(uint32_t drm_id, ktime_t time),
+	TP_ARGS(drm_id, time)
+);
+DEFINE_EVENT(dpu_enc_ktime_template, dpu_enc_early_kickoff,
+	TP_PROTO(uint32_t drm_id, ktime_t time),
+	TP_ARGS(drm_id, time)
+);
+
+DECLARE_EVENT_CLASS(dpu_id_event_template,
+	TP_PROTO(uint32_t drm_id, u32 event),
+	TP_ARGS(drm_id, event),
+	TP_STRUCT__entry(
+		__field(	uint32_t,	drm_id	)
+		__field(	u32,		event	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->event = event;
+	),
+	TP_printk("id=%u, event=%u", __entry->drm_id, __entry->event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_enc_frame_done_timeout,
+	TP_PROTO(uint32_t drm_id, u32 event),
+	TP_ARGS(drm_id, event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_cb,
+	TP_PROTO(uint32_t drm_id, u32 event),
+	TP_ARGS(drm_id, event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_crtc_handle_power_event,
+	TP_PROTO(uint32_t drm_id, u32 event),
+	TP_ARGS(drm_id, event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_done,
+	TP_PROTO(uint32_t drm_id, u32 event),
+	TP_ARGS(drm_id, event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_more_pending,
+	TP_PROTO(uint32_t drm_id, u32 event),
+	TP_ARGS(drm_id, event)
+);
+
+TRACE_EVENT(dpu_enc_wait_event_timeout,
+	TP_PROTO(uint32_t drm_id, int32_t hw_id, int rc, s64 time,
+		 s64 expected_time, int atomic_cnt),
+	TP_ARGS(drm_id, hw_id, rc, time, expected_time, atomic_cnt),
+	TP_STRUCT__entry(
+		__field(	uint32_t,	drm_id		)
+		__field(	int32_t,	hw_id		)
+		__field(	int,		rc		)
+		__field(	s64,		time		)
+		__field(	s64,		expected_time	)
+		__field(	int,		atomic_cnt	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->hw_id = hw_id;
+		__entry->rc = rc;
+		__entry->time = time;
+		__entry->expected_time = expected_time;
+		__entry->atomic_cnt = atomic_cnt;
+	),
+	TP_printk("id=%u, hw_id=%d, rc=%d, time=%lld, expected=%lld cnt=%d",
+		  __entry->drm_id, __entry->hw_id, __entry->rc, __entry->time,
+		  __entry->expected_time, __entry->atomic_cnt)
+);
+
+TRACE_EVENT(dpu_enc_phys_cmd_irq_ctrl,
+	TP_PROTO(uint32_t drm_id, enum dpu_pingpong pp, bool enable,
+		 int refcnt),
+	TP_ARGS(drm_id, pp, enable, refcnt),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		drm_id	)
+		__field(	enum dpu_pingpong,	pp	)
+		__field(	bool,			enable	)
+		__field(	int,			refcnt	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->pp = pp;
+		__entry->enable = enable;
+		__entry->refcnt = refcnt;
+	),
+	TP_printk("id=%u, pp=%d, enable=%s, refcnt=%d", __entry->drm_id,
+		  __entry->pp, __entry->enable ? "true" : "false",
+		  __entry->refcnt)
+);
+
+TRACE_EVENT(dpu_enc_phys_cmd_pp_tx_done,
+	TP_PROTO(uint32_t drm_id, enum dpu_pingpong pp, int new_count,
+		 u32 event),
+	TP_ARGS(drm_id, pp, new_count, event),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		drm_id		)
+		__field(	enum dpu_pingpong,	pp		)
+		__field(	int,			new_count	)
+		__field(	u32,			event		)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->pp = pp;
+		__entry->new_count = new_count;
+		__entry->event = event;
+	),
+	TP_printk("id=%u, pp=%d, new_count=%d, event=%u", __entry->drm_id,
+		  __entry->pp, __entry->new_count, __entry->event)
+);
+
+TRACE_EVENT(dpu_enc_phys_cmd_pdone_timeout,
+	TP_PROTO(uint32_t drm_id, enum dpu_pingpong pp, int timeout_count,
+		 int kickoff_count, u32 event),
+	TP_ARGS(drm_id, pp, timeout_count, kickoff_count, event),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		drm_id		)
+		__field(	enum dpu_pingpong,	pp		)
+		__field(	int,			timeout_count	)
+		__field(	int,			kickoff_count	)
+		__field(	u32,			event		)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->pp = pp;
+		__entry->timeout_count = timeout_count;
+		__entry->kickoff_count = kickoff_count;
+		__entry->event = event;
+	),
+	TP_printk("id=%u, pp=%d, timeout_count=%d, kickoff_count=%d, event=%u",
+		  __entry->drm_id, __entry->pp, __entry->timeout_count,
+		  __entry->kickoff_count, __entry->event)
+);
+
+TRACE_EVENT(dpu_enc_phys_vid_post_kickoff,
+	TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx),
+	TP_ARGS(drm_id, intf_idx),
+	TP_STRUCT__entry(
+		__field(	uint32_t,	drm_id			)
+		__field(	enum dpu_intf,	intf_idx		)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->intf_idx = intf_idx;
+	),
+	TP_printk("id=%u, intf_idx=%d", __entry->drm_id, __entry->intf_idx)
+);
+
+TRACE_EVENT(dpu_enc_phys_vid_irq_ctrl,
+	TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx, bool enable,
+		 int refcnt),
+	TP_ARGS(drm_id, intf_idx, enable, refcnt),
+	TP_STRUCT__entry(
+		__field(	uint32_t,	drm_id		)
+		__field(	enum dpu_intf,	intf_idx	)
+		__field(	bool,		enable		)
+		__field(	int,		refcnt		)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->intf_idx = intf_idx;
+		__entry->enable = enable;
+		__entry->refcnt = refcnt;
+	),
+	TP_printk("id=%u, intf_idx=%d enable=%s refcnt=%d", __entry->drm_id,
+		  __entry->intf_idx, __entry->enable ? "true" : "false",
+		  __entry->drm_id)
+);
+
+TRACE_EVENT(dpu_crtc_setup_mixer,
+	TP_PROTO(uint32_t crtc_id, uint32_t plane_id,
+		 struct drm_plane_state *state, struct dpu_plane_state *pstate,
+		 uint32_t stage_idx, enum dpu_sspp sspp, uint32_t pixel_format,
+		 uint64_t modifier),
+	TP_ARGS(crtc_id, plane_id, state, pstate, stage_idx, sspp,
+		pixel_format, modifier),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		crtc_id		)
+		__field(	uint32_t,		plane_id	)
+		__field(	struct drm_plane_state*,state		)
+		__field(	struct dpu_plane_state*,pstate		)
+		__field(	uint32_t,		stage_idx	)
+		__field(	enum dpu_sspp,		sspp		)
+		__field(	uint32_t,		pixel_format	)
+		__field(	uint64_t,		modifier	)
+	),
+	TP_fast_assign(
+		__entry->crtc_id = crtc_id;
+		__entry->plane_id = plane_id;
+		__entry->state = state;
+		__entry->pstate = pstate;
+		__entry->stage_idx = stage_idx;
+		__entry->sspp = sspp;
+		__entry->pixel_format = pixel_format;
+		__entry->modifier = modifier;
+	),
+	TP_printk("crtc_id:%u plane_id:%u fb_id:%u src:{%ux%u+%ux%u} "
+		  "dst:{%ux%u+%ux%u} stage_idx:%u stage:%d, sspp:%d "
+		  "multirect_index:%d multirect_mode:%u pix_format:%u "
+		  "modifier:%llu",
+		  __entry->crtc_id, __entry->plane_id,
+		  __entry->state->fb ? __entry->state->fb->base.id : -1,
+		  __entry->state->src_w >> 16,  __entry->state->src_h >> 16,
+		  __entry->state->src_x >> 16,  __entry->state->src_y >> 16,
+		  __entry->state->crtc_w,  __entry->state->crtc_h,
+		  __entry->state->crtc_x,  __entry->state->crtc_y,
+		  __entry->stage_idx, __entry->pstate->stage, __entry->sspp,
+		  __entry->pstate->multirect_index,
+		  __entry->pstate->multirect_mode, __entry->pixel_format,
+		  __entry->modifier)
+);
+
+TRACE_EVENT(dpu_crtc_setup_lm_bounds,
+	TP_PROTO(uint32_t drm_id, int mixer, struct drm_rect *bounds),
+	TP_ARGS(drm_id, mixer, bounds),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		drm_id	)
+		__field(	int,			mixer	)
+		__field(	struct drm_rect *,	bounds	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->mixer = mixer;
+		__entry->bounds = bounds;
+	),
+	TP_printk("id:%u mixer:%d bounds:" DRM_RECT_FMT, __entry->drm_id,
+		  __entry->mixer, DRM_RECT_ARG(__entry->bounds))
+);
+
+TRACE_EVENT(dpu_crtc_vblank_enable,
+	TP_PROTO(uint32_t drm_id, uint32_t enc_id, bool enable,
+		 struct dpu_crtc *crtc),
+	TP_ARGS(drm_id, enc_id, enable, crtc),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		drm_id	)
+		__field(	uint32_t,		enc_id	)
+		__field(	bool,			enable	)
+		__field(	struct dpu_crtc *,	crtc	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->enc_id = enc_id;
+		__entry->enable = enable;
+		__entry->crtc = crtc;
+	),
+	TP_printk("id:%u encoder:%u enable:%s state{enabled:%s suspend:%s "
+		  "vblank_req:%s}",
+		  __entry->drm_id, __entry->enc_id,
+		  __entry->enable ? "true" : "false",
+		  __entry->crtc->enabled ? "true" : "false",
+		  __entry->crtc->suspend ? "true" : "false",
+		  __entry->crtc->vblank_requested ? "true" : "false")
+);
+
+DECLARE_EVENT_CLASS(dpu_crtc_enable_template,
+	TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+	TP_ARGS(drm_id, enable, crtc),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		drm_id	)
+		__field(	bool,			enable	)
+		__field(	struct dpu_crtc *,	crtc	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->enable = enable;
+		__entry->crtc = crtc;
+	),
+	TP_printk("id:%u enable:%s state{enabled:%s suspend:%s vblank_req:%s}",
+		  __entry->drm_id, __entry->enable ? "true" : "false",
+		  __entry->crtc->enabled ? "true" : "false",
+		  __entry->crtc->suspend ? "true" : "false",
+		  __entry->crtc->vblank_requested ? "true" : "false")
+);
+DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_set_suspend,
+	TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+	TP_ARGS(drm_id, enable, crtc)
+);
+DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_enable,
+	TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+	TP_ARGS(drm_id, enable, crtc)
+);
+DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_disable,
+	TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+	TP_ARGS(drm_id, enable, crtc)
+);
+DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_vblank,
+	TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+	TP_ARGS(drm_id, enable, crtc)
+);
+
+TRACE_EVENT(dpu_crtc_disable_frame_pending,
+	TP_PROTO(uint32_t drm_id, int frame_pending),
+	TP_ARGS(drm_id, frame_pending),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		drm_id		)
+		__field(	int,			frame_pending	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->frame_pending = frame_pending;
+	),
+	TP_printk("id:%u frame_pending:%d", __entry->drm_id,
+		  __entry->frame_pending)
+);
+
+TRACE_EVENT(dpu_plane_set_scanout,
+	TP_PROTO(enum dpu_sspp index, struct dpu_hw_fmt_layout *layout,
+		 enum dpu_sspp_multirect_index multirect_index),
+	TP_ARGS(index, layout, multirect_index),
+	TP_STRUCT__entry(
+		__field(	enum dpu_sspp,			index	)
+		__field(	struct dpu_hw_fmt_layout*,	layout	)
+		__field(	enum dpu_sspp_multirect_index,	multirect_index)
+	),
+	TP_fast_assign(
+		__entry->index = index;
+		__entry->layout = layout;
+		__entry->multirect_index = multirect_index;
+	),
+	TP_printk("index:%d layout:{%ux%u @ [%u/%u, %u/%u, %u/%u, %u/%u]} "
+		  "multirect_index:%d", __entry->index, __entry->layout->width,
+		  __entry->layout->height, __entry->layout->plane_addr[0],
+		  __entry->layout->plane_size[0],
+		  __entry->layout->plane_addr[1],
+		  __entry->layout->plane_size[1],
+		  __entry->layout->plane_addr[2],
+		  __entry->layout->plane_size[2],
+		  __entry->layout->plane_addr[3],
+		  __entry->layout->plane_size[3], __entry->multirect_index)
+);
+
+TRACE_EVENT(dpu_plane_disable,
+	TP_PROTO(uint32_t drm_id, bool is_virtual, uint32_t multirect_mode),
+	TP_ARGS(drm_id, is_virtual, multirect_mode),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		drm_id		)
+		__field(	bool,			is_virtual	)
+		__field(	uint32_t,		multirect_mode	)
+	),
+	TP_fast_assign(
+		__entry->drm_id = drm_id;
+		__entry->is_virtual = is_virtual;
+		__entry->multirect_mode = multirect_mode;
+	),
+	TP_printk("id:%u is_virtual:%s multirect_mode:%u", __entry->drm_id,
+		  __entry->is_virtual ? "true" : "false",
+		  __entry->multirect_mode)
+);
+
+DECLARE_EVENT_CLASS(dpu_rm_iter_template,
+	TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
+	TP_ARGS(id, type, enc_id),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		id	)
+		__field(	enum dpu_hw_blk_type,	type	)
+		__field(	uint32_t,		enc_id	)
+	),
+	TP_fast_assign(
+		__entry->id = id;
+		__entry->type = type;
+		__entry->enc_id = enc_id;
+	),
+	TP_printk("id:%d type:%d enc_id:%u", __entry->id, __entry->type,
+		  __entry->enc_id)
+);
+DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_cdm,
+	TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
+	TP_ARGS(id, type, enc_id)
+);
+DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_intf,
+	TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
+	TP_ARGS(id, type, enc_id)
+);
+DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_ctls,
+	TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
+	TP_ARGS(id, type, enc_id)
+);
+
+TRACE_EVENT(dpu_rm_reserve_lms,
+	TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id,
+		 uint32_t pp_id),
+	TP_ARGS(id, type, enc_id, pp_id),
+	TP_STRUCT__entry(
+		__field(	uint32_t,		id	)
+		__field(	enum dpu_hw_blk_type,	type	)
+		__field(	uint32_t,		enc_id	)
+		__field(	uint32_t,		pp_id	)
+	),
+	TP_fast_assign(
+		__entry->id = id;
+		__entry->type = type;
+		__entry->enc_id = enc_id;
+		__entry->pp_id = pp_id;
+	),
+	TP_printk("id:%d type:%d enc_id:%u pp_id:%u", __entry->id,
+		  __entry->type, __entry->enc_id, __entry->pp_id)
+);
+
+TRACE_EVENT(dpu_vbif_wait_xin_halt_fail,
+	TP_PROTO(enum dpu_vbif index, u32 xin_id),
+	TP_ARGS(index, xin_id),
+	TP_STRUCT__entry(
+		__field(	enum dpu_vbif,	index	)
+		__field(	u32,		xin_id	)
+	),
+	TP_fast_assign(
+		__entry->index = index;
+		__entry->xin_id = xin_id;
+	),
+	TP_printk("index:%d xin_id:%u", __entry->index, __entry->xin_id)
+);
+
+TRACE_EVENT(dpu_pp_connect_ext_te,
+	TP_PROTO(enum dpu_pingpong pp, u32 cfg),
+	TP_ARGS(pp, cfg),
+	TP_STRUCT__entry(
+		__field(	enum dpu_pingpong,	pp	)
+		__field(	u32,			cfg	)
+	),
+	TP_fast_assign(
+		__entry->pp = pp;
+		__entry->cfg = cfg;
+	),
+	TP_printk("pp:%d cfg:%u", __entry->pp, __entry->cfg)
+);
+
+DECLARE_EVENT_CLASS(dpu_core_irq_idx_cnt_template,
+	TP_PROTO(int irq_idx, int enable_count),
+	TP_ARGS(irq_idx, enable_count),
+	TP_STRUCT__entry(
+		__field(	int,	irq_idx		)
+		__field(	int,	enable_count	)
+	),
+	TP_fast_assign(
+		__entry->irq_idx = irq_idx;
+		__entry->enable_count = enable_count;
+	),
+	TP_printk("irq_idx:%d enable_count:%u", __entry->irq_idx,
+		  __entry->enable_count)
+);
+DEFINE_EVENT(dpu_core_irq_idx_cnt_template, dpu_core_irq_enable_idx,
+	TP_PROTO(int irq_idx, int enable_count),
+	TP_ARGS(irq_idx, enable_count)
+);
+DEFINE_EVENT(dpu_core_irq_idx_cnt_template, dpu_core_irq_disable_idx,
+	TP_PROTO(int irq_idx, int enable_count),
+	TP_ARGS(irq_idx, enable_count)
+);
+DEFINE_EVENT(dpu_core_irq_idx_cnt_template, dpu_core_irq_disable_nolock,
+	TP_PROTO(int irq_idx, int enable_count),
+	TP_ARGS(irq_idx, enable_count)
+);
+
+DECLARE_EVENT_CLASS(dpu_core_irq_callback_template,
+	TP_PROTO(int irq_idx, struct dpu_irq_callback *callback),
+	TP_ARGS(irq_idx, callback),
+	TP_STRUCT__entry(
+		__field(	int,				irq_idx	)
+		__field(	struct dpu_irq_callback *,	callback)
+	),
+	TP_fast_assign(
+		__entry->irq_idx = irq_idx;
+		__entry->callback = callback;
+	),
+	TP_printk("irq_idx:%d callback:%pK", __entry->irq_idx,
+		  __entry->callback)
+);
+DEFINE_EVENT(dpu_core_irq_callback_template, dpu_core_irq_register_callback,
+	TP_PROTO(int irq_idx, struct dpu_irq_callback *callback),
+	TP_ARGS(irq_idx, callback)
+);
+DEFINE_EVENT(dpu_core_irq_callback_template, dpu_core_irq_unregister_callback,
+	TP_PROTO(int irq_idx, struct dpu_irq_callback *callback),
+	TP_ARGS(irq_idx, callback)
+);
+
+TRACE_EVENT(dpu_core_perf_update_clk,
+	TP_PROTO(struct drm_device *dev, bool stop_req, u64 clk_rate),
+	TP_ARGS(dev, stop_req, clk_rate),
+	TP_STRUCT__entry(
+		__field(	struct drm_device *,	dev		)
+		__field(	bool,			stop_req	)
+		__field(	u64,			clk_rate	)
+	),
+	TP_fast_assign(
+		__entry->dev = dev;
+		__entry->stop_req = stop_req;
+		__entry->clk_rate = clk_rate;
+	),
+	TP_printk("dev:%s stop_req:%s clk_rate:%llu", __entry->dev->unique,
+		  __entry->stop_req ? "true" : "false", __entry->clk_rate)
+);
+
+#define DPU_ATRACE_END(name) trace_tracing_mark_write(current->tgid, name, 0)
+#define DPU_ATRACE_BEGIN(name) trace_tracing_mark_write(current->tgid, name, 1)
+#define DPU_ATRACE_FUNC() DPU_ATRACE_BEGIN(__func__)
+
+#define DPU_ATRACE_INT(name, value) \
+	trace_dpu_trace_counter(current->tgid, name, value)
+
+#endif /* _DPU_TRACE_H_ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
new file mode 100644
index 000000000000..295528292296
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
@@ -0,0 +1,384 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+
+#include "dpu_vbif.h"
+#include "dpu_hw_vbif.h"
+#include "dpu_trace.h"
+
+/**
+ * _dpu_vbif_wait_for_xin_halt - wait for the xin to halt
+ * @vbif:	Pointer to hardware vbif driver
+ * @xin_id:	Client interface identifier
+ * @return:	0 if success; error code otherwise
+ */
+static int _dpu_vbif_wait_for_xin_halt(struct dpu_hw_vbif *vbif, u32 xin_id)
+{
+	ktime_t timeout;
+	bool status;
+	int rc;
+
+	if (!vbif || !vbif->cap || !vbif->ops.get_halt_ctrl) {
+		DPU_ERROR("invalid arguments vbif %d\n", vbif != 0);
+		return -EINVAL;
+	}
+
+	timeout = ktime_add_us(ktime_get(), vbif->cap->xin_halt_timeout);
+	for (;;) {
+		status = vbif->ops.get_halt_ctrl(vbif, xin_id);
+		if (status)
+			break;
+		if (ktime_compare_safe(ktime_get(), timeout) > 0) {
+			status = vbif->ops.get_halt_ctrl(vbif, xin_id);
+			break;
+		}
+		usleep_range(501, 1000);
+	}
+
+	if (!status) {
+		rc = -ETIMEDOUT;
+		DPU_ERROR("VBIF %d client %d not halting. TIMEDOUT.\n",
+				vbif->idx - VBIF_0, xin_id);
+	} else {
+		rc = 0;
+		DPU_DEBUG("VBIF %d client %d is halted\n",
+				vbif->idx - VBIF_0, xin_id);
+	}
+
+	return rc;
+}
+
+/**
+ * _dpu_vbif_apply_dynamic_ot_limit - determine OT based on usecase parameters
+ * @vbif:	Pointer to hardware vbif driver
+ * @ot_lim:	Pointer to OT limit to be modified
+ * @params:	Pointer to usecase parameters
+ */
+static void _dpu_vbif_apply_dynamic_ot_limit(struct dpu_hw_vbif *vbif,
+		u32 *ot_lim, struct dpu_vbif_set_ot_params *params)
+{
+	u64 pps;
+	const struct dpu_vbif_dynamic_ot_tbl *tbl;
+	u32 i;
+
+	if (!vbif || !(vbif->cap->features & BIT(DPU_VBIF_QOS_OTLIM)))
+		return;
+
+	/* Dynamic OT setting done only for WFD */
+	if (!params->is_wfd)
+		return;
+
+	pps = params->frame_rate;
+	pps *= params->width;
+	pps *= params->height;
+
+	tbl = params->rd ? &vbif->cap->dynamic_ot_rd_tbl :
+			&vbif->cap->dynamic_ot_wr_tbl;
+
+	for (i = 0; i < tbl->count; i++) {
+		if (pps <= tbl->cfg[i].pps) {
+			*ot_lim = tbl->cfg[i].ot_limit;
+			break;
+		}
+	}
+
+	DPU_DEBUG("vbif:%d xin:%d w:%d h:%d fps:%d pps:%llu ot:%u\n",
+			vbif->idx - VBIF_0, params->xin_id,
+			params->width, params->height, params->frame_rate,
+			pps, *ot_lim);
+}
+
+/**
+ * _dpu_vbif_get_ot_limit - get OT based on usecase & configuration parameters
+ * @vbif:	Pointer to hardware vbif driver
+ * @params:	Pointer to usecase parameters
+ * @return:	OT limit
+ */
+static u32 _dpu_vbif_get_ot_limit(struct dpu_hw_vbif *vbif,
+	struct dpu_vbif_set_ot_params *params)
+{
+	u32 ot_lim = 0;
+	u32 val;
+
+	if (!vbif || !vbif->cap) {
+		DPU_ERROR("invalid arguments vbif %d\n", vbif != 0);
+		return -EINVAL;
+	}
+
+	if (vbif->cap->default_ot_wr_limit && !params->rd)
+		ot_lim = vbif->cap->default_ot_wr_limit;
+	else if (vbif->cap->default_ot_rd_limit && params->rd)
+		ot_lim = vbif->cap->default_ot_rd_limit;
+
+	/*
+	 * If default ot is not set from dt/catalog,
+	 * then do not configure it.
+	 */
+	if (ot_lim == 0)
+		goto exit;
+
+	/* Modify the limits if the target and the use case requires it */
+	_dpu_vbif_apply_dynamic_ot_limit(vbif, &ot_lim, params);
+
+	if (vbif && vbif->ops.get_limit_conf) {
+		val = vbif->ops.get_limit_conf(vbif,
+				params->xin_id, params->rd);
+		if (val == ot_lim)
+			ot_lim = 0;
+	}
+
+exit:
+	DPU_DEBUG("vbif:%d xin:%d ot_lim:%d\n",
+			vbif->idx - VBIF_0, params->xin_id, ot_lim);
+	return ot_lim;
+}
+
+/**
+ * dpu_vbif_set_ot_limit - set OT based on usecase & configuration parameters
+ * @vbif:	Pointer to hardware vbif driver
+ * @params:	Pointer to usecase parameters
+ *
+ * Note this function would block waiting for bus halt.
+ */
+void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
+		struct dpu_vbif_set_ot_params *params)
+{
+	struct dpu_hw_vbif *vbif = NULL;
+	struct dpu_hw_mdp *mdp;
+	bool forced_on = false;
+	u32 ot_lim;
+	int ret, i;
+
+	if (!dpu_kms) {
+		DPU_ERROR("invalid arguments\n");
+		return;
+	}
+	mdp = dpu_kms->hw_mdp;
+
+	for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+		if (dpu_kms->hw_vbif[i] &&
+				dpu_kms->hw_vbif[i]->idx == params->vbif_idx)
+			vbif = dpu_kms->hw_vbif[i];
+	}
+
+	if (!vbif || !mdp) {
+		DPU_DEBUG("invalid arguments vbif %d mdp %d\n",
+				vbif != 0, mdp != 0);
+		return;
+	}
+
+	if (!mdp->ops.setup_clk_force_ctrl ||
+			!vbif->ops.set_limit_conf ||
+			!vbif->ops.set_halt_ctrl)
+		return;
+
+	/* set write_gather_en for all write clients */
+	if (vbif->ops.set_write_gather_en && !params->rd)
+		vbif->ops.set_write_gather_en(vbif, params->xin_id);
+
+	ot_lim = _dpu_vbif_get_ot_limit(vbif, params) & 0xFF;
+
+	if (ot_lim == 0)
+		goto exit;
+
+	trace_dpu_perf_set_ot(params->num, params->xin_id, ot_lim,
+		params->vbif_idx);
+
+	forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
+
+	vbif->ops.set_limit_conf(vbif, params->xin_id, params->rd, ot_lim);
+
+	vbif->ops.set_halt_ctrl(vbif, params->xin_id, true);
+
+	ret = _dpu_vbif_wait_for_xin_halt(vbif, params->xin_id);
+	if (ret)
+		trace_dpu_vbif_wait_xin_halt_fail(vbif->idx, params->xin_id);
+
+	vbif->ops.set_halt_ctrl(vbif, params->xin_id, false);
+
+	if (forced_on)
+		mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
+exit:
+	return;
+}
+
+void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
+		struct dpu_vbif_set_qos_params *params)
+{
+	struct dpu_hw_vbif *vbif = NULL;
+	struct dpu_hw_mdp *mdp;
+	bool forced_on = false;
+	const struct dpu_vbif_qos_tbl *qos_tbl;
+	int i;
+
+	if (!dpu_kms || !params || !dpu_kms->hw_mdp) {
+		DPU_ERROR("invalid arguments\n");
+		return;
+	}
+	mdp = dpu_kms->hw_mdp;
+
+	for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+		if (dpu_kms->hw_vbif[i] &&
+				dpu_kms->hw_vbif[i]->idx == params->vbif_idx) {
+			vbif = dpu_kms->hw_vbif[i];
+			break;
+		}
+	}
+
+	if (!vbif || !vbif->cap) {
+		DPU_ERROR("invalid vbif %d\n", params->vbif_idx);
+		return;
+	}
+
+	if (!vbif->ops.set_qos_remap || !mdp->ops.setup_clk_force_ctrl) {
+		DPU_DEBUG("qos remap not supported\n");
+		return;
+	}
+
+	qos_tbl = params->is_rt ? &vbif->cap->qos_rt_tbl :
+			&vbif->cap->qos_nrt_tbl;
+
+	if (!qos_tbl->npriority_lvl || !qos_tbl->priority_lvl) {
+		DPU_DEBUG("qos tbl not defined\n");
+		return;
+	}
+
+	forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
+
+	for (i = 0; i < qos_tbl->npriority_lvl; i++) {
+		DPU_DEBUG("vbif:%d xin:%d lvl:%d/%d\n",
+				params->vbif_idx, params->xin_id, i,
+				qos_tbl->priority_lvl[i]);
+		vbif->ops.set_qos_remap(vbif, params->xin_id, i,
+				qos_tbl->priority_lvl[i]);
+	}
+
+	if (forced_on)
+		mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
+}
+
+void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms)
+{
+	struct dpu_hw_vbif *vbif;
+	u32 i, pnd, src;
+
+	if (!dpu_kms) {
+		DPU_ERROR("invalid argument\n");
+		return;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+		vbif = dpu_kms->hw_vbif[i];
+		if (vbif && vbif->ops.clear_errors) {
+			vbif->ops.clear_errors(vbif, &pnd, &src);
+			if (pnd || src) {
+				DRM_DEBUG_KMS("VBIF %d: pnd 0x%X, src 0x%X\n",
+					      vbif->idx - VBIF_0, pnd, src);
+			}
+		}
+	}
+}
+
+void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms)
+{
+	struct dpu_hw_vbif *vbif;
+	int i, j;
+
+	if (!dpu_kms) {
+		DPU_ERROR("invalid argument\n");
+		return;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+		vbif = dpu_kms->hw_vbif[i];
+		if (vbif && vbif->cap && vbif->ops.set_mem_type) {
+			for (j = 0; j < vbif->cap->memtype_count; j++)
+				vbif->ops.set_mem_type(
+						vbif, j, vbif->cap->memtype[j]);
+		}
+	}
+}
+
+#ifdef CONFIG_DEBUG_FS
+void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms)
+{
+	debugfs_remove_recursive(dpu_kms->debugfs_vbif);
+	dpu_kms->debugfs_vbif = NULL;
+}
+
+int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
+{
+	char vbif_name[32];
+	struct dentry *debugfs_vbif;
+	int i, j;
+
+	dpu_kms->debugfs_vbif = debugfs_create_dir("vbif", debugfs_root);
+	if (!dpu_kms->debugfs_vbif) {
+		DPU_ERROR("failed to create vbif debugfs\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
+		struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
+
+		snprintf(vbif_name, sizeof(vbif_name), "%d", vbif->id);
+
+		debugfs_vbif = debugfs_create_dir(vbif_name,
+				dpu_kms->debugfs_vbif);
+
+		debugfs_create_u32("features", 0600, debugfs_vbif,
+			(u32 *)&vbif->features);
+
+		debugfs_create_u32("xin_halt_timeout", 0400, debugfs_vbif,
+			(u32 *)&vbif->xin_halt_timeout);
+
+		debugfs_create_u32("default_rd_ot_limit", 0400, debugfs_vbif,
+			(u32 *)&vbif->default_ot_rd_limit);
+
+		debugfs_create_u32("default_wr_ot_limit", 0400, debugfs_vbif,
+			(u32 *)&vbif->default_ot_wr_limit);
+
+		for (j = 0; j < vbif->dynamic_ot_rd_tbl.count; j++) {
+			struct dpu_vbif_dynamic_ot_cfg *cfg =
+					&vbif->dynamic_ot_rd_tbl.cfg[j];
+
+			snprintf(vbif_name, sizeof(vbif_name),
+					"dynamic_ot_rd_%d_pps", j);
+			debugfs_create_u64(vbif_name, 0400, debugfs_vbif,
+					(u64 *)&cfg->pps);
+			snprintf(vbif_name, sizeof(vbif_name),
+					"dynamic_ot_rd_%d_ot_limit", j);
+			debugfs_create_u32(vbif_name, 0400, debugfs_vbif,
+					(u32 *)&cfg->ot_limit);
+		}
+
+		for (j = 0; j < vbif->dynamic_ot_wr_tbl.count; j++) {
+			struct dpu_vbif_dynamic_ot_cfg *cfg =
+					&vbif->dynamic_ot_wr_tbl.cfg[j];
+
+			snprintf(vbif_name, sizeof(vbif_name),
+					"dynamic_ot_wr_%d_pps", j);
+			debugfs_create_u64(vbif_name, 0400, debugfs_vbif,
+					(u64 *)&cfg->pps);
+			snprintf(vbif_name, sizeof(vbif_name),
+					"dynamic_ot_wr_%d_ot_limit", j);
+			debugfs_create_u32(vbif_name, 0400, debugfs_vbif,
+					(u32 *)&cfg->ot_limit);
+		}
+	}
+
+	return 0;
+}
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
new file mode 100644
index 000000000000..f17af52dbbd5
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
@@ -0,0 +1,94 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DPU_VBIF_H__
+#define __DPU_VBIF_H__
+
+#include "dpu_kms.h"
+
+struct dpu_vbif_set_ot_params {
+	u32 xin_id;
+	u32 num;
+	u32 width;
+	u32 height;
+	u32 frame_rate;
+	bool rd;
+	bool is_wfd;
+	u32 vbif_idx;
+	u32 clk_ctrl;
+};
+
+struct dpu_vbif_set_memtype_params {
+	u32 xin_id;
+	u32 vbif_idx;
+	u32 clk_ctrl;
+	bool is_cacheable;
+};
+
+/**
+ * struct dpu_vbif_set_qos_params - QoS remapper parameter
+ * @vbif_idx: vbif identifier
+ * @xin_id: client interface identifier
+ * @clk_ctrl: clock control identifier of the xin
+ * @num: pipe identifier (debug only)
+ * @is_rt: true if pipe is used in real-time use case
+ */
+struct dpu_vbif_set_qos_params {
+	u32 vbif_idx;
+	u32 xin_id;
+	u32 clk_ctrl;
+	u32 num;
+	bool is_rt;
+};
+
+/**
+ * dpu_vbif_set_ot_limit - set OT limit for vbif client
+ * @dpu_kms:	DPU handler
+ * @params:	Pointer to OT configuration parameters
+ */
+void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
+		struct dpu_vbif_set_ot_params *params);
+
+/**
+ * dpu_vbif_set_qos_remap - set QoS priority level remap
+ * @dpu_kms:	DPU handler
+ * @params:	Pointer to QoS configuration parameters
+ */
+void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
+		struct dpu_vbif_set_qos_params *params);
+
+/**
+ * dpu_vbif_clear_errors - clear any vbif errors
+ * @dpu_kms:	DPU handler
+ */
+void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_vbif_init_memtypes - initialize xin memory types for vbif
+ * @dpu_kms:	DPU handler
+ */
+void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms);
+
+#ifdef CONFIG_DEBUG_FS
+int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root);
+void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms);
+#else
+static inline int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms,
+		struct dentry *debugfs_root)
+{
+	return 0;
+}
+static inline void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms)
+{
+}
+#endif
+#endif /* __DPU_VBIF_H__ */
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index cd0959783203..2c76abc13506 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark at gmail.com>
  *
@@ -15,6 +16,8 @@
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/kthread.h>
+#include <uapi/linux/sched/types.h>
 #include <drm/drm_of.h>
 
 #include "msm_drv.h"
@@ -149,7 +152,7 @@ struct vblank_event {
 	bool enable;
 };
 
-static void vblank_ctrl_worker(struct work_struct *work)
+static void vblank_ctrl_worker(struct kthread_work *work)
 {
 	struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
 						struct msm_vblank_ctrl, work);
@@ -197,7 +200,8 @@ static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
 	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
 	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
 
-	queue_work(priv->wq, &vbl_ctrl->work);
+	kthread_queue_work(&priv->disp_thread[crtc_id].worker,
+			&vbl_ctrl->work);
 
 	return 0;
 }
@@ -211,17 +215,33 @@ static int msm_drm_uninit(struct device *dev)
 	struct msm_mdss *mdss = priv->mdss;
 	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
 	struct vblank_event *vbl_ev, *tmp;
+	int i;
 
 	/* We must cancel and cleanup any pending vblank enable/disable
 	 * work before drm_irq_uninstall() to avoid work re-enabling an
 	 * irq after uninstall has disabled it.
 	 */
-	cancel_work_sync(&vbl_ctrl->work);
+	kthread_flush_work(&vbl_ctrl->work);
 	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
 		list_del(&vbl_ev->node);
 		kfree(vbl_ev);
 	}
 
+	/* clean up display commit/event worker threads */
+	for (i = 0; i < priv->num_crtcs; i++) {
+		if (priv->disp_thread[i].thread) {
+			kthread_flush_worker(&priv->disp_thread[i].worker);
+			kthread_stop(priv->disp_thread[i].thread);
+			priv->disp_thread[i].thread = NULL;
+		}
+
+		if (priv->event_thread[i].thread) {
+			kthread_flush_worker(&priv->event_thread[i].worker);
+			kthread_stop(priv->event_thread[i].thread);
+			priv->event_thread[i].thread = NULL;
+		}
+	}
+
 	msm_gem_shrinker_cleanup(ddev);
 
 	drm_kms_helper_poll_fini(ddev);
@@ -269,6 +289,7 @@ static int msm_drm_uninit(struct device *dev)
 
 #define KMS_MDP4 4
 #define KMS_MDP5 5
+#define KMS_DPU  3
 
 static int get_mdp_ver(struct platform_device *pdev)
 {
@@ -360,7 +381,8 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
 	struct msm_drm_private *priv;
 	struct msm_kms *kms;
 	struct msm_mdss *mdss;
-	int ret;
+	int ret, i;
+	struct sched_param param;
 
 	ddev = drm_dev_alloc(drv, dev);
 	if (IS_ERR(ddev)) {
@@ -379,7 +401,17 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
 	ddev->dev_private = priv;
 	priv->dev = ddev;
 
-	ret = mdp5_mdss_init(ddev);
+	switch (get_mdp_ver(pdev)) {
+	case KMS_MDP5:
+		ret = mdp5_mdss_init(ddev);
+		break;
+	case KMS_DPU:
+		ret = dpu_mdss_init(ddev);
+		break;
+	default:
+		ret = 0;
+		break;
+	}
 	if (ret)
 		goto err_free_priv;
 
@@ -389,7 +421,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
 
 	INIT_LIST_HEAD(&priv->inactive_list);
 	INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
-	INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
+	kthread_init_work(&priv->vblank_ctrl.work, vblank_ctrl_worker);
 	spin_lock_init(&priv->vblank_ctrl.lock);
 
 	drm_mode_config_init(ddev);
@@ -413,6 +445,10 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
 	case KMS_MDP5:
 		kms = mdp5_kms_init(ddev);
 		break;
+	case KMS_DPU:
+		kms = dpu_kms_init(ddev);
+		priv->kms = kms;
+		break;
 	default:
 		kms = ERR_PTR(-ENODEV);
 		break;
@@ -444,6 +480,79 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
 	ddev->mode_config.funcs = &mode_config_funcs;
 	ddev->mode_config.helper_private = &mode_config_helper_funcs;
 
+	/**
+	 * this priority was found during empiric testing to have appropriate
+	 * realtime scheduling to process display updates and interact with
+	 * other real time and normal priority task
+	 */
+	param.sched_priority = 16;
+	for (i = 0; i < priv->num_crtcs; i++) {
+
+		/* initialize display thread */
+		priv->disp_thread[i].crtc_id = priv->crtcs[i]->base.id;
+		kthread_init_worker(&priv->disp_thread[i].worker);
+		priv->disp_thread[i].dev = ddev;
+		priv->disp_thread[i].thread =
+			kthread_run(kthread_worker_fn,
+				&priv->disp_thread[i].worker,
+				"crtc_commit:%d", priv->disp_thread[i].crtc_id);
+		ret = sched_setscheduler(priv->disp_thread[i].thread,
+							SCHED_FIFO, &param);
+		if (ret)
+			pr_warn("display thread priority update failed: %d\n",
+									ret);
+
+		if (IS_ERR(priv->disp_thread[i].thread)) {
+			dev_err(dev, "failed to create crtc_commit kthread\n");
+			priv->disp_thread[i].thread = NULL;
+		}
+
+		/* initialize event thread */
+		priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
+		kthread_init_worker(&priv->event_thread[i].worker);
+		priv->event_thread[i].dev = ddev;
+		priv->event_thread[i].thread =
+			kthread_run(kthread_worker_fn,
+				&priv->event_thread[i].worker,
+				"crtc_event:%d", priv->event_thread[i].crtc_id);
+		/**
+		 * event thread should also run at same priority as disp_thread
+		 * because it is handling frame_done events. A lower priority
+		 * event thread and higher priority disp_thread can causes
+		 * frame_pending counters beyond 2. This can lead to commit
+		 * failure at crtc commit level.
+		 */
+		ret = sched_setscheduler(priv->event_thread[i].thread,
+							SCHED_FIFO, &param);
+		if (ret)
+			pr_warn("display event thread priority update failed: %d\n",
+									ret);
+
+		if (IS_ERR(priv->event_thread[i].thread)) {
+			dev_err(dev, "failed to create crtc_event kthread\n");
+			priv->event_thread[i].thread = NULL;
+		}
+
+		if ((!priv->disp_thread[i].thread) ||
+				!priv->event_thread[i].thread) {
+			/* clean up previously created threads if any */
+			for ( ; i >= 0; i--) {
+				if (priv->disp_thread[i].thread) {
+					kthread_stop(
+						priv->disp_thread[i].thread);
+					priv->disp_thread[i].thread = NULL;
+				}
+
+				if (priv->event_thread[i].thread) {
+					kthread_stop(
+						priv->event_thread[i].thread);
+					priv->event_thread[i].thread = NULL;
+				}
+			}
+			goto err_msm_uninit;
+		}
+	}
+
 	ret = drm_vblank_init(ddev, priv->num_crtcs);
 	if (ret < 0) {
 		dev_err(dev, "failed to initialize vblank\n");
@@ -1069,12 +1178,13 @@ static int add_display_components(struct device *dev,
 	int ret;
 
 	/*
-	 * MDP5 based devices don't have a flat hierarchy. There is a top level
-	 * parent: MDSS, and children: MDP5, DSI, HDMI, eDP etc. Populate the
-	 * children devices, find the MDP5 node, and then add the interfaces
-	 * to our components list.
+	 * MDP5/DPU based devices don't have a flat hierarchy. There is a top
+	 * level parent: MDSS, and children: MDP5/DPU, DSI, HDMI, eDP etc.
+	 * Populate the children devices, find the MDP5/DPU node, and then add
+	 * the interfaces to our components list.
 	 */
-	if (of_device_is_compatible(dev->of_node, "qcom,mdss")) {
+	if (of_device_is_compatible(dev->of_node, "qcom,mdss") ||
+	    of_device_is_compatible(dev->of_node, "qcom,dpu-mdss")) {
 		ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
 		if (ret) {
 			dev_err(dev, "failed to populate children devices\n");
@@ -1186,6 +1296,7 @@ static int msm_pdev_remove(struct platform_device *pdev)
 static const struct of_device_id dt_match[] = {
 	{ .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 },
 	{ .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 },
+	{ .compatible = "qcom,dpu-mdss", .data = (void *)KMS_DPU },
 	{}
 };
 MODULE_DEVICE_TABLE(of, dt_match);
@@ -1207,6 +1318,7 @@ static int __init msm_drm_register(void)
 
 	DBG("init");
 	msm_mdp_register();
+	msm_dpu_register();
 	msm_dsi_register();
 	msm_edp_register();
 	msm_hdmi_register();
@@ -1223,6 +1335,7 @@ static void __exit msm_drm_unregister(void)
 	msm_edp_unregister();
 	msm_dsi_unregister();
 	msm_mdp_unregister();
+	msm_dpu_unregister();
 }
 
 module_init(msm_drm_register);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 3b206ae6423f..0cba86ed3f54 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark at gmail.com>
  *
@@ -33,6 +34,7 @@
 #include <linux/of_graph.h>
 #include <linux/of_device.h>
 #include <asm/sizes.h>
+#include <linux/kthread.h>
 
 #include <drm/drmP.h>
 #include <drm/drm_atomic.h>
@@ -55,7 +57,7 @@ struct msm_gem_address_space;
 struct msm_gem_vma;
 
 #define MAX_CRTCS      8
-#define MAX_PLANES     16
+#define MAX_PLANES     20
 #define MAX_ENCODERS   8
 #define MAX_BRIDGES    8
 #define MAX_CONNECTORS 8
@@ -74,12 +76,77 @@ enum msm_mdp_plane_property {
 };
 
 struct msm_vblank_ctrl {
-	struct work_struct work;
+	struct kthread_work work;
 	struct list_head event_list;
 	spinlock_t lock;
 };
 
 #define MSM_GPU_MAX_RINGS 4
+#define MAX_H_TILES_PER_DISPLAY 2
+
+/**
+ * enum msm_display_caps - features/capabilities supported by displays
+ * @MSM_DISPLAY_CAP_VID_MODE:           Video or "active" mode supported
+ * @MSM_DISPLAY_CAP_CMD_MODE:           Command mode supported
+ * @MSM_DISPLAY_CAP_HOT_PLUG:           Hot plug detection supported
+ * @MSM_DISPLAY_CAP_EDID:               EDID supported
+ */
+enum msm_display_caps {
+	MSM_DISPLAY_CAP_VID_MODE	= BIT(0),
+	MSM_DISPLAY_CAP_CMD_MODE	= BIT(1),
+	MSM_DISPLAY_CAP_HOT_PLUG	= BIT(2),
+	MSM_DISPLAY_CAP_EDID		= BIT(3),
+};
+
+/**
+ * enum msm_event_wait - type of HW events to wait for
+ * @MSM_ENC_COMMIT_DONE - wait for the driver to flush the registers to HW
+ * @MSM_ENC_TX_COMPLETE - wait for the HW to transfer the frame to panel
+ * @MSM_ENC_VBLANK - wait for the HW VBLANK event (for driver-internal waiters)
+ */
+enum msm_event_wait {
+	MSM_ENC_COMMIT_DONE = 0,
+	MSM_ENC_TX_COMPLETE,
+	MSM_ENC_VBLANK,
+};
+
+/**
+ * struct msm_display_topology - defines a display topology pipeline
+ * @num_lm:       number of layer mixers used
+ * @num_enc:      number of compression encoder blocks used
+ * @num_intf:     number of interfaces the panel is mounted on
+ */
+struct msm_display_topology {
+	u32 num_lm;
+	u32 num_enc;
+	u32 num_intf;
+};
+
+/**
+ * struct msm_display_info - defines display properties
+ * @intf_type:          DRM_MODE_CONNECTOR_ display type
+ * @capabilities:       Bitmask of display flags
+ * @num_of_h_tiles:     Number of horizontal tiles in case of split interface
+ * @h_tile_instance:    Controller instance used per tile. Number of elements is
+ *                      based on num_of_h_tiles
+ * @is_te_using_watchdog_timer:  Boolean to indicate watchdog TE is
+ *				 used instead of panel TE in cmd mode panels
+ */
+struct msm_display_info {
+	int intf_type;
+	uint32_t capabilities;
+	uint32_t num_of_h_tiles;
+	uint32_t h_tile_instance[MAX_H_TILES_PER_DISPLAY];
+	bool is_te_using_watchdog_timer;
+};
+
+/* Commit/Event thread specific structure */
+struct msm_drm_thread {
+	struct drm_device *dev;
+	struct task_struct *thread;
+	unsigned int crtc_id;
+	struct kthread_worker worker;
+};
 
 struct msm_drm_private {
 
@@ -90,7 +157,7 @@ struct msm_drm_private {
 	/* subordinate devices, if present: */
 	struct platform_device *gpu_pdev;
 
-	/* top level MDSS wrapper device (for MDP5 only) */
+	/* top level MDSS wrapper device (for MDP5/DPU only) */
 	struct msm_mdss *mdss;
 
 	/* possibly this should be in the kms component, but it is
@@ -128,6 +195,9 @@ struct msm_drm_private {
 	unsigned int num_crtcs;
 	struct drm_crtc *crtcs[MAX_CRTCS];
 
+	struct msm_drm_thread disp_thread[MAX_CRTCS];
+	struct msm_drm_thread event_thread[MAX_CRTCS];
+
 	unsigned int num_encoders;
 	struct drm_encoder *encoders[MAX_ENCODERS];
 
@@ -180,6 +250,9 @@ struct msm_gem_address_space *
 msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
 		const char *name);
 
+int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
+void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
+
 void msm_gem_submit_free(struct msm_gem_submit *submit);
 int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
 		struct drm_file *file);
@@ -291,6 +364,8 @@ static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi,
 
 void __init msm_mdp_register(void);
 void __exit msm_mdp_unregister(void);
+void __init msm_dpu_register(void);
+void __exit msm_dpu_unregister(void);
 
 #ifdef CONFIG_DEBUG_FS
 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 36201f43fa31..ff8eb0dde606 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark at gmail.com>
  *
@@ -51,6 +52,11 @@ struct msm_kms_funcs {
 	const struct msm_format *(*get_format)(struct msm_kms *kms,
 					const uint32_t format,
 					const uint64_t modifiers);
+	/* do format checking on format modified through fb_cmd2 modifiers */
+	int (*check_modified_format)(const struct msm_kms *kms,
+			const struct msm_format *msm_fmt,
+			const struct drm_mode_fb_cmd2 *cmd,
+			struct drm_gem_object **bos);
 	/* misc: */
 	long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
 			struct drm_encoder *encoder);
@@ -91,6 +97,7 @@ static inline void msm_kms_init(struct msm_kms *kms,
 
 struct msm_kms *mdp4_kms_init(struct drm_device *dev);
 struct msm_kms *mdp5_kms_init(struct drm_device *dev);
+struct msm_kms *dpu_kms_init(struct drm_device *dev);
 
 struct msm_mdss_funcs {
 	int (*enable)(struct msm_mdss *mdss);
@@ -104,5 +111,6 @@ struct msm_mdss {
 };
 
 int mdp5_mdss_init(struct drm_device *dev);
+int dpu_mdss_init(struct drm_device *dev);
 
 #endif /* __MSM_KMS_H__ */
diff --git a/include/uapi/media/msm_media_info.h b/include/uapi/media/msm_media_info.h
new file mode 100644
index 000000000000..4f12e5c534c8
--- /dev/null
+++ b/include/uapi/media/msm_media_info.h
@@ -0,0 +1,1376 @@
+#ifndef __MEDIA_INFO_H__
+#define __MEDIA_INFO_H__
+
+#ifndef MSM_MEDIA_ALIGN
+#define MSM_MEDIA_ALIGN(__sz, __align) (((__align) & ((__align) - 1)) ?\
+	((((__sz) + (__align) - 1) / (__align)) * (__align)) :\
+	(((__sz) + (__align) - 1) & (~((__align) - 1))))
+#endif
+
+#ifndef MSM_MEDIA_ROUNDUP
+#define MSM_MEDIA_ROUNDUP(__sz, __r) (((__sz) + ((__r) - 1)) / (__r))
+#endif
+
+#ifndef MSM_MEDIA_MAX
+#define MSM_MEDIA_MAX(__a, __b) ((__a) > (__b)?(__a):(__b))
+#endif
+
+enum color_fmts {
+	/* Venus NV12:
+	 * YUV 4:2:0 image with a plane of 8 bit Y samples followed
+	 * by an interleaved U/V plane containing 8 bit 2x2 subsampled
+	 * colour difference samples.
+	 *
+	 * <-------- Y/UV_Stride -------->
+	 * <------- Width ------->
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  ^           ^
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  Height      |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |          Y_Scanlines
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              V
+	 * U V U V U V U V U V U V . . . .  ^
+	 * U V U V U V U V U V U V . . . .  |
+	 * U V U V U V U V U V U V . . . .  |
+	 * U V U V U V U V U V U V . . . .  UV_Scanlines
+	 * . . . . . . . . . . . . . . . .  |
+	 * . . . . . . . . . . . . . . . .  V
+	 * . . . . . . . . . . . . . . . .  --> Buffer size alignment
+	 *
+	 * Y_Stride : Width aligned to 128
+	 * UV_Stride : Width aligned to 128
+	 * Y_Scanlines: Height aligned to 32
+	 * UV_Scanlines: Height/2 aligned to 16
+	 * Extradata: Arbitrary (software-imposed) padding
+	 * Total size = align((Y_Stride * Y_Scanlines
+	 *          + UV_Stride * UV_Scanlines
+	 *          + max(Extradata, Y_Stride * 8), 4096)
+	 */
+	COLOR_FMT_NV12,
+
+	/* Venus NV21:
+	 * YUV 4:2:0 image with a plane of 8 bit Y samples followed
+	 * by an interleaved V/U plane containing 8 bit 2x2 subsampled
+	 * colour difference samples.
+	 *
+	 * <-------- Y/UV_Stride -------->
+	 * <------- Width ------->
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  ^           ^
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  Height      |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |          Y_Scanlines
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              V
+	 * V U V U V U V U V U V U . . . .  ^
+	 * V U V U V U V U V U V U . . . .  |
+	 * V U V U V U V U V U V U . . . .  |
+	 * V U V U V U V U V U V U . . . .  UV_Scanlines
+	 * . . . . . . . . . . . . . . . .  |
+	 * . . . . . . . . . . . . . . . .  V
+	 * . . . . . . . . . . . . . . . .  --> Padding & Buffer size alignment
+	 *
+	 * Y_Stride : Width aligned to 128
+	 * UV_Stride : Width aligned to 128
+	 * Y_Scanlines: Height aligned to 32
+	 * UV_Scanlines: Height/2 aligned to 16
+	 * Extradata: Arbitrary (software-imposed) padding
+	 * Total size = align((Y_Stride * Y_Scanlines
+	 *          + UV_Stride * UV_Scanlines
+	 *          + max(Extradata, Y_Stride * 8), 4096)
+	 */
+	COLOR_FMT_NV21,
+	/* Venus NV12_MVTB:
+	 * Two YUV 4:2:0 images/views one after the other
+	 * in a top-bottom layout, same as NV12
+	 * with a plane of 8 bit Y samples followed
+	 * by an interleaved U/V plane containing 8 bit 2x2 subsampled
+	 * colour difference samples.
+	 *
+	 *
+	 * <-------- Y/UV_Stride -------->
+	 * <------- Width ------->
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  ^           ^               ^
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |               |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  Height      |               |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |          Y_Scanlines      |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |               |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |               |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |               |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  V           |               |
+	 * . . . . . . . . . . . . . . . .              |             View_1
+	 * . . . . . . . . . . . . . . . .              |               |
+	 * . . . . . . . . . . . . . . . .              |               |
+	 * . . . . . . . . . . . . . . . .              V               |
+	 * U V U V U V U V U V U V . . . .  ^                           |
+	 * U V U V U V U V U V U V . . . .  |                           |
+	 * U V U V U V U V U V U V . . . .  |                           |
+	 * U V U V U V U V U V U V . . . .  UV_Scanlines                |
+	 * . . . . . . . . . . . . . . . .  |                           |
+	 * . . . . . . . . . . . . . . . .  V                           V
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  ^           ^               ^
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |               |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  Height      |               |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |          Y_Scanlines      |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |               |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |               |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |               |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  V           |               |
+	 * . . . . . . . . . . . . . . . .              |             View_2
+	 * . . . . . . . . . . . . . . . .              |               |
+	 * . . . . . . . . . . . . . . . .              |               |
+	 * . . . . . . . . . . . . . . . .              V               |
+	 * U V U V U V U V U V U V . . . .  ^                           |
+	 * U V U V U V U V U V U V . . . .  |                           |
+	 * U V U V U V U V U V U V . . . .  |                           |
+	 * U V U V U V U V U V U V . . . .  UV_Scanlines                |
+	 * . . . . . . . . . . . . . . . .  |                           |
+	 * . . . . . . . . . . . . . . . .  V                           V
+	 * . . . . . . . . . . . . . . . .  --> Buffer size alignment
+	 *
+	 * Y_Stride : Width aligned to 128
+	 * UV_Stride : Width aligned to 128
+	 * Y_Scanlines: Height aligned to 32
+	 * UV_Scanlines: Height/2 aligned to 16
+	 * View_1 begin at: 0 (zero)
+	 * View_2 begin at: Y_Stride * Y_Scanlines + UV_Stride * UV_Scanlines
+	 * Extradata: Arbitrary (software-imposed) padding
+	 * Total size = align((2*(Y_Stride * Y_Scanlines)
+	 *          + 2*(UV_Stride * UV_Scanlines) + Extradata), 4096)
+	 */
+	COLOR_FMT_NV12_MVTB,
+	/*
+	 * The buffer can be of 2 types:
+	 * (1) Venus NV12 UBWC Progressive
+	 * (2) Venus NV12 UBWC Interlaced
+	 *
+	 * (1) Venus NV12 UBWC Progressive Buffer Format:
+	 * Compressed Macro-tile format for NV12.
+	 * Contains 4 planes in the following order -
+	 * (A) Y_Meta_Plane
+	 * (B) Y_UBWC_Plane
+	 * (C) UV_Meta_Plane
+	 * (D) UV_UBWC_Plane
+	 *
+	 * Y_Meta_Plane consists of meta information to decode compressed
+	 * tile data in Y_UBWC_Plane.
+	 * Y_UBWC_Plane consists of Y data in compressed macro-tile format.
+	 * UBWC decoder block will use the Y_Meta_Plane data together with
+	 * Y_UBWC_Plane data to produce loss-less uncompressed 8 bit Y samples.
+	 *
+	 * UV_Meta_Plane consists of meta information to decode compressed
+	 * tile data in UV_UBWC_Plane.
+	 * UV_UBWC_Plane consists of UV data in compressed macro-tile format.
+	 * UBWC decoder block will use UV_Meta_Plane data together with
+	 * UV_UBWC_Plane data to produce loss-less uncompressed 8 bit 2x2
+	 * subsampled color difference samples.
+	 *
+	 * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable
+	 * and randomly accessible. There is no dependency between tiles.
+	 *
+	 * <----- Y_Meta_Stride ---->
+	 * <-------- Width ------>
+	 * M M M M M M M M M M M M . .      ^           ^
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      Height      |
+	 * M M M M M M M M M M M M . .      |         Meta_Y_Scanlines
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      V           |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . .                  V
+	 * <--Compressed tile Y Stride--->
+	 * <------- Width ------->
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  ^           ^
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  Height      |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |        Macro_tile_Y_Scanlines
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . . . .              V
+	 * <----- UV_Meta_Stride ---->
+	 * M M M M M M M M M M M M . .      ^
+	 * M M M M M M M M M M M M . .      |
+	 * M M M M M M M M M M M M . .      |
+	 * M M M M M M M M M M M M . .      M_UV_Scanlines
+	 * . . . . . . . . . . . . . .      |
+	 * . . . . . . . . . . . . . .      V
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * <--Compressed tile UV Stride--->
+	 * U* V* U* V* U* V* U* V* . . . .  ^
+	 * U* V* U* V* U* V* U* V* . . . .  |
+	 * U* V* U* V* U* V* U* V* . . . .  |
+	 * U* V* U* V* U* V* U* V* . . . .  UV_Scanlines
+	 * . . . . . . . . . . . . . . . .  |
+	 * . . . . . . . . . . . . . . . .  V
+	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+	 *
+	 * Y_Stride = align(Width, 128)
+	 * UV_Stride = align(Width, 128)
+	 * Y_Scanlines = align(Height, 32)
+	 * UV_Scanlines = align(Height/2, 16)
+	 * Y_UBWC_Plane_size = align(Y_Stride * Y_Scanlines, 4096)
+	 * UV_UBWC_Plane_size = align(UV_Stride * UV_Scanlines, 4096)
+	 * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+	 * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16)
+	 * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096)
+	 * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+	 * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16)
+	 * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096)
+	 * Extradata = 8k
+	 *
+	 * Total size = align( Y_UBWC_Plane_size + UV_UBWC_Plane_size +
+	 *           Y_Meta_Plane_size + UV_Meta_Plane_size
+	 *           + max(Extradata, Y_Stride * 48), 4096)
+	 *
+	 *
+	 * (2) Venus NV12 UBWC Interlaced Buffer Format:
+	 * Compressed Macro-tile format for NV12 interlaced.
+	 * Contains 8 planes in the following order -
+	 * (A) Y_Meta_Top_Field_Plane
+	 * (B) Y_UBWC_Top_Field_Plane
+	 * (C) UV_Meta_Top_Field_Plane
+	 * (D) UV_UBWC_Top_Field_Plane
+	 * (E) Y_Meta_Bottom_Field_Plane
+	 * (F) Y_UBWC_Bottom_Field_Plane
+	 * (G) UV_Meta_Bottom_Field_Plane
+	 * (H) UV_UBWC_Bottom_Field_Plane
+	 * Y_Meta_Top_Field_Plane consists of meta information to decode
+	 * compressed tile data for Y_UBWC_Top_Field_Plane.
+	 * Y_UBWC_Top_Field_Plane consists of Y data in compressed macro-tile
+	 * format for top field of an interlaced frame.
+	 * UBWC decoder block will use the Y_Meta_Top_Field_Plane data together
+	 * with Y_UBWC_Top_Field_Plane data to produce loss-less uncompressed
+	 * 8 bit Y samples for top field of an interlaced frame.
+	 *
+	 * UV_Meta_Top_Field_Plane consists of meta information to decode
+	 * compressed tile data in UV_UBWC_Top_Field_Plane.
+	 * UV_UBWC_Top_Field_Plane consists of UV data in compressed macro-tile
+	 * format for top field of an interlaced frame.
+	 * UBWC decoder block will use UV_Meta_Top_Field_Plane data together
+	 * with UV_UBWC_Top_Field_Plane data to produce loss-less uncompressed
+	 * 8 bit subsampled color difference samples for top field of an
+	 * interlaced frame.
+	 *
+	 * Each tile in Y_UBWC_Top_Field_Plane/UV_UBWC_Top_Field_Plane is
+	 * independently decodable and randomly accessible. There is no
+	 * dependency between tiles.
+	 *
+	 * Y_Meta_Bottom_Field_Plane consists of meta information to decode
+	 * compressed tile data for Y_UBWC_Bottom_Field_Plane.
+	 * Y_UBWC_Bottom_Field_Plane consists of Y data in compressed macro-tile
+	 * format for bottom field of an interlaced frame.
+	 * UBWC decoder block will use the Y_Meta_Bottom_Field_Plane data
+	 * together with Y_UBWC_Bottom_Field_Plane data to produce loss-less
+	 * uncompressed 8 bit Y samples for bottom field of an interlaced frame.
+	 *
+	 * UV_Meta_Bottom_Field_Plane consists of meta information to decode
+	 * compressed tile data in UV_UBWC_Bottom_Field_Plane.
+	 * UV_UBWC_Bottom_Field_Plane consists of UV data in compressed
+	 * macro-tile format for bottom field of an interlaced frame.
+	 * UBWC decoder block will use UV_Meta_Bottom_Field_Plane data together
+	 * with UV_UBWC_Bottom_Field_Plane data to produce loss-less
+	 * uncompressed 8 bit subsampled color difference samples for bottom
+	 * field of an interlaced frame.
+	 *
+	 * Each tile in Y_UBWC_Bottom_Field_Plane/UV_UBWC_Bottom_Field_Plane is
+	 * independently decodable and randomly accessible. There is no
+	 * dependency between tiles.
+	 *
+	 * <-----Y_TF_Meta_Stride---->
+	 * <-------- Width ------>
+	 * M M M M M M M M M M M M . .      ^           ^
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . . Half_height      |
+	 * M M M M M M M M M M M M . .      |         Meta_Y_TF_Scanlines
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      V           |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . .                  V
+	 * <-Compressed tile Y_TF Stride->
+	 * <------- Width ------->
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  ^           ^
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Half_height  |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |        Macro_tile_Y_TF_Scanlines
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . . . .              V
+	 * <----UV_TF_Meta_Stride---->
+	 * M M M M M M M M M M M M . .      ^
+	 * M M M M M M M M M M M M . .      |
+	 * M M M M M M M M M M M M . .      |
+	 * M M M M M M M M M M M M . .      M_UV_TF_Scanlines
+	 * . . . . . . . . . . . . . .      |
+	 * . . . . . . . . . . . . . .      V
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * <-Compressed tile UV_TF Stride->
+	 * U* V* U* V* U* V* U* V* . . . .  ^
+	 * U* V* U* V* U* V* U* V* . . . .  |
+	 * U* V* U* V* U* V* U* V* . . . .  |
+	 * U* V* U* V* U* V* U* V* . . . .  UV_TF_Scanlines
+	 * . . . . . . . . . . . . . . . .  |
+	 * . . . . . . . . . . . . . . . .  V
+	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+	 * <-----Y_BF_Meta_Stride---->
+	 * <-------- Width ------>
+	 * M M M M M M M M M M M M . .      ^           ^
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . . Half_height      |
+	 * M M M M M M M M M M M M . .      |         Meta_Y_BF_Scanlines
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      V           |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . .                  V
+	 * <-Compressed tile Y_BF Stride->
+	 * <------- Width ------->
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  ^           ^
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Half_height  |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |        Macro_tile_Y_BF_Scanlines
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . . . .              V
+	 * <----UV_BF_Meta_Stride---->
+	 * M M M M M M M M M M M M . .      ^
+	 * M M M M M M M M M M M M . .      |
+	 * M M M M M M M M M M M M . .      |
+	 * M M M M M M M M M M M M . .      M_UV_BF_Scanlines
+	 * . . . . . . . . . . . . . .      |
+	 * . . . . . . . . . . . . . .      V
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * <-Compressed tile UV_BF Stride->
+	 * U* V* U* V* U* V* U* V* . . . .  ^
+	 * U* V* U* V* U* V* U* V* . . . .  |
+	 * U* V* U* V* U* V* U* V* . . . .  |
+	 * U* V* U* V* U* V* U* V* . . . .  UV_BF_Scanlines
+	 * . . . . . . . . . . . . . . . .  |
+	 * . . . . . . . . . . . . . . . .  V
+	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+	 *
+	 * Half_height = (Height+1)>>1
+	 * Y_TF_Stride = align(Width, 128)
+	 * UV_TF_Stride = align(Width, 128)
+	 * Y_TF_Scanlines = align(Half_height, 32)
+	 * UV_TF_Scanlines = align((Half_height+1)/2, 32)
+	 * Y_UBWC_TF_Plane_size = align(Y_TF_Stride * Y_TF_Scanlines, 4096)
+	 * UV_UBWC_TF_Plane_size = align(UV_TF_Stride * UV_TF_Scanlines, 4096)
+	 * Y_TF_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+	 * Y_TF_Meta_Scanlines = align(roundup(Half_height, Y_TileHeight), 16)
+	 * Y_TF_Meta_Plane_size =
+	 *     align(Y_TF_Meta_Stride * Y_TF_Meta_Scanlines, 4096)
+	 * UV_TF_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+	 * UV_TF_Meta_Scanlines = align(roundup(Half_height, UV_TileHeight), 16)
+	 * UV_TF_Meta_Plane_size =
+	 *     align(UV_TF_Meta_Stride * UV_TF_Meta_Scanlines, 4096)
+	 * Y_BF_Stride = align(Width, 128)
+	 * UV_BF_Stride = align(Width, 128)
+	 * Y_BF_Scanlines = align(Half_height, 32)
+	 * UV_BF_Scanlines = align((Half_height+1)/2, 32)
+	 * Y_UBWC_BF_Plane_size = align(Y_BF_Stride * Y_BF_Scanlines, 4096)
+	 * UV_UBWC_BF_Plane_size = align(UV_BF_Stride * UV_BF_Scanlines, 4096)
+	 * Y_BF_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+	 * Y_BF_Meta_Scanlines = align(roundup(Half_height, Y_TileHeight), 16)
+	 * Y_BF_Meta_Plane_size =
+	 *     align(Y_BF_Meta_Stride * Y_BF_Meta_Scanlines, 4096)
+	 * UV_BF_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+	 * UV_BF_Meta_Scanlines = align(roundup(Half_height, UV_TileHeight), 16)
+	 * UV_BF_Meta_Plane_size =
+	 *     align(UV_BF_Meta_Stride * UV_BF_Meta_Scanlines, 4096)
+	 * Extradata = 8k
+	 *
+	 * Total size = align( Y_UBWC_TF_Plane_size + UV_UBWC_TF_Plane_size +
+	 *           Y_TF_Meta_Plane_size + UV_TF_Meta_Plane_size +
+	 *			 Y_UBWC_BF_Plane_size + UV_UBWC_BF_Plane_size +
+	 *           Y_BF_Meta_Plane_size + UV_BF_Meta_Plane_size +
+	 *           + max(Extradata, Y_TF_Stride * 48), 4096)
+	 */
+	COLOR_FMT_NV12_UBWC,
+	/* Venus NV12 10-bit UBWC:
+	 * Compressed Macro-tile format for NV12.
+	 * Contains 4 planes in the following order -
+	 * (A) Y_Meta_Plane
+	 * (B) Y_UBWC_Plane
+	 * (C) UV_Meta_Plane
+	 * (D) UV_UBWC_Plane
+	 *
+	 * Y_Meta_Plane consists of meta information to decode compressed
+	 * tile data in Y_UBWC_Plane.
+	 * Y_UBWC_Plane consists of Y data in compressed macro-tile format.
+	 * UBWC decoder block will use the Y_Meta_Plane data together with
+	 * Y_UBWC_Plane data to produce loss-less uncompressed 10 bit Y samples.
+	 *
+	 * UV_Meta_Plane consists of meta information to decode compressed
+	 * tile data in UV_UBWC_Plane.
+	 * UV_UBWC_Plane consists of UV data in compressed macro-tile format.
+	 * UBWC decoder block will use UV_Meta_Plane data together with
+	 * UV_UBWC_Plane data to produce loss-less uncompressed 10 bit 2x2
+	 * subsampled color difference samples.
+	 *
+	 * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable
+	 * and randomly accessible. There is no dependency between tiles.
+	 *
+	 * <----- Y_Meta_Stride ----->
+	 * <-------- Width ------>
+	 * M M M M M M M M M M M M . .      ^           ^
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      Height      |
+	 * M M M M M M M M M M M M . .      |         Meta_Y_Scanlines
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      V           |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . .                  V
+	 * <--Compressed tile Y Stride--->
+	 * <------- Width ------->
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  ^           ^
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  Height      |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |        Macro_tile_Y_Scanlines
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . . . .              V
+	 * <----- UV_Meta_Stride ---->
+	 * M M M M M M M M M M M M . .      ^
+	 * M M M M M M M M M M M M . .      |
+	 * M M M M M M M M M M M M . .      |
+	 * M M M M M M M M M M M M . .      M_UV_Scanlines
+	 * . . . . . . . . . . . . . .      |
+	 * . . . . . . . . . . . . . .      V
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * <--Compressed tile UV Stride--->
+	 * U* V* U* V* U* V* U* V* . . . .  ^
+	 * U* V* U* V* U* V* U* V* . . . .  |
+	 * U* V* U* V* U* V* U* V* . . . .  |
+	 * U* V* U* V* U* V* U* V* . . . .  UV_Scanlines
+	 * . . . . . . . . . . . . . . . .  |
+	 * . . . . . . . . . . . . . . . .  V
+	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+	 *
+	 *
+	 * Y_Stride = align(Width * 4/3, 128)
+	 * UV_Stride = align(Width * 4/3, 128)
+	 * Y_Scanlines = align(Height, 32)
+	 * UV_Scanlines = align(Height/2, 16)
+	 * Y_UBWC_Plane_Size = align(Y_Stride * Y_Scanlines, 4096)
+	 * UV_UBWC_Plane_Size = align(UV_Stride * UV_Scanlines, 4096)
+	 * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+	 * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16)
+	 * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096)
+	 * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+	 * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16)
+	 * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096)
+	 * Extradata = 8k
+	 *
+	 * Total size = align(Y_UBWC_Plane_size + UV_UBWC_Plane_size +
+	 *           Y_Meta_Plane_size + UV_Meta_Plane_size
+	 *           + max(Extradata, Y_Stride * 48), 4096)
+	 */
+	COLOR_FMT_NV12_BPP10_UBWC,
+	/* Venus RGBA8888 format:
+	 * Contains 1 plane in the following order -
+	 * (A) RGBA plane
+	 *
+	 * <-------- RGB_Stride -------->
+	 * <------- Width ------->
+	 * R R R R R R R R R R R R . . . .  ^           ^
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  Height      |
+	 * R R R R R R R R R R R R . . . .  |       RGB_Scanlines
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              V
+	 *
+	 * RGB_Stride = align(Width * 4, 128)
+	 * RGB_Scanlines = align(Height, 32)
+	 * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+	 * Extradata = 8k
+	 *
+	 * Total size = align(RGB_Plane_size + Extradata, 4096)
+	 */
+	COLOR_FMT_RGBA8888,
+	/* Venus RGBA8888 UBWC format:
+	 * Contains 2 planes in the following order -
+	 * (A) Meta plane
+	 * (B) RGBA plane
+	 *
+	 * <--- RGB_Meta_Stride ---->
+	 * <-------- Width ------>
+	 * M M M M M M M M M M M M . .      ^           ^
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      Height      |
+	 * M M M M M M M M M M M M . .      |       Meta_RGB_Scanlines
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      V           |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . .                  V
+	 * <-------- RGB_Stride -------->
+	 * <------- Width ------->
+	 * R R R R R R R R R R R R . . . .  ^           ^
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  Height      |
+	 * R R R R R R R R R R R R . . . .  |       RGB_Scanlines
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .    -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . . . .              V
+	 *
+	 * RGB_Stride = align(Width * 4, 128)
+	 * RGB_Scanlines = align(Height, 32)
+	 * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+	 * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64)
+	 * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16)
+	 * RGB_Meta_Plane_size = align(RGB_Meta_Stride *
+	 *		RGB_Meta_Scanlines, 4096)
+	 * Extradata = 8k
+	 *
+	 * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size +
+	 *		Extradata, 4096)
+	 */
+	COLOR_FMT_RGBA8888_UBWC,
+	/* Venus RGBA1010102 UBWC format:
+	 * Contains 2 planes in the following order -
+	 * (A) Meta plane
+	 * (B) RGBA plane
+	 *
+	 * <--- RGB_Meta_Stride ---->
+	 * <-------- Width ------>
+	 * M M M M M M M M M M M M . .      ^           ^
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      Height      |
+	 * M M M M M M M M M M M M . .      |       Meta_RGB_Scanlines
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      V           |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . .                  V
+	 * <-------- RGB_Stride -------->
+	 * <------- Width ------->
+	 * R R R R R R R R R R R R . . . .  ^           ^
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  Height      |
+	 * R R R R R R R R R R R R . . . .  |       RGB_Scanlines
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .    -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . . . .              V
+	 *
+	 * RGB_Stride = align(Width * 4, 256)
+	 * RGB_Scanlines = align(Height, 16)
+	 * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+	 * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64)
+	 * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16)
+	 * RGB_Meta_Plane_size = align(RGB_Meta_Stride *
+	 *		RGB_Meta_Scanlines, 4096)
+	 * Extradata = 8k
+	 *
+	 * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size +
+	 *		Extradata, 4096)
+	 */
+	COLOR_FMT_RGBA1010102_UBWC,
+	/* Venus RGB565 UBWC format:
+	 * Contains 2 planes in the following order -
+	 * (A) Meta plane
+	 * (B) RGB plane
+	 *
+	 * <--- RGB_Meta_Stride ---->
+	 * <-------- Width ------>
+	 * M M M M M M M M M M M M . .      ^           ^
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      Height      |
+	 * M M M M M M M M M M M M . .      |       Meta_RGB_Scanlines
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      V           |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . .                  V
+	 * <-------- RGB_Stride -------->
+	 * <------- Width ------->
+	 * R R R R R R R R R R R R . . . .  ^           ^
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  Height      |
+	 * R R R R R R R R R R R R . . . .  |       RGB_Scanlines
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  |           |
+	 * R R R R R R R R R R R R . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .    -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . . . .              V
+	 *
+	 * RGB_Stride = align(Width * 2, 128)
+	 * RGB_Scanlines = align(Height, 16)
+	 * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+	 * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64)
+	 * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16)
+	 * RGB_Meta_Plane_size = align(RGB_Meta_Stride *
+	 *		RGB_Meta_Scanlines, 4096)
+	 * Extradata = 8k
+	 *
+	 * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size +
+	 *		Extradata, 4096)
+	 */
+	COLOR_FMT_RGB565_UBWC,
+	/* P010 UBWC:
+	 * Compressed Macro-tile format for NV12.
+	 * Contains 4 planes in the following order -
+	 * (A) Y_Meta_Plane
+	 * (B) Y_UBWC_Plane
+	 * (C) UV_Meta_Plane
+	 * (D) UV_UBWC_Plane
+	 *
+	 * Y_Meta_Plane consists of meta information to decode compressed
+	 * tile data in Y_UBWC_Plane.
+	 * Y_UBWC_Plane consists of Y data in compressed macro-tile format.
+	 * UBWC decoder block will use the Y_Meta_Plane data together with
+	 * Y_UBWC_Plane data to produce loss-less uncompressed 10 bit Y samples.
+	 *
+	 * UV_Meta_Plane consists of meta information to decode compressed
+	 * tile data in UV_UBWC_Plane.
+	 * UV_UBWC_Plane consists of UV data in compressed macro-tile format.
+	 * UBWC decoder block will use UV_Meta_Plane data together with
+	 * UV_UBWC_Plane data to produce loss-less uncompressed 10 bit 2x2
+	 * subsampled color difference samples.
+	 *
+	 * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable
+	 * and randomly accessible. There is no dependency between tiles.
+	 *
+	 * <----- Y_Meta_Stride ----->
+	 * <-------- Width ------>
+	 * M M M M M M M M M M M M . .      ^           ^
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      Height      |
+	 * M M M M M M M M M M M M . .      |         Meta_Y_Scanlines
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      |           |
+	 * M M M M M M M M M M M M . .      V           |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .                  |
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . .                  V
+	 * <--Compressed tile Y Stride--->
+	 * <------- Width ------->
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  ^           ^
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  Height      |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |        Macro_tile_Y_Scanlines
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+	 * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+	 * . . . . . . . . . . . . . . . .              V
+	 * <----- UV_Meta_Stride ---->
+	 * M M M M M M M M M M M M . .      ^
+	 * M M M M M M M M M M M M . .      |
+	 * M M M M M M M M M M M M . .      |
+	 * M M M M M M M M M M M M . .      M_UV_Scanlines
+	 * . . . . . . . . . . . . . .      |
+	 * . . . . . . . . . . . . . .      V
+	 * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+	 * <--Compressed tile UV Stride--->
+	 * U* V* U* V* U* V* U* V* . . . .  ^
+	 * U* V* U* V* U* V* U* V* . . . .  |
+	 * U* V* U* V* U* V* U* V* . . . .  |
+	 * U* V* U* V* U* V* U* V* . . . .  UV_Scanlines
+	 * . . . . . . . . . . . . . . . .  |
+	 * . . . . . . . . . . . . . . . .  V
+	 * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+	 *
+	 *
+	 * Y_Stride = align(Width * 2, 256)
+	 * UV_Stride = align(Width * 2, 256)
+	 * Y_Scanlines = align(Height, 16)
+	 * UV_Scanlines = align(Height/2, 16)
+	 * Y_UBWC_Plane_Size = align(Y_Stride * Y_Scanlines, 4096)
+	 * UV_UBWC_Plane_Size = align(UV_Stride * UV_Scanlines, 4096)
+	 * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+	 * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16)
+	 * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096)
+	 * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+	 * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16)
+	 * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096)
+	 * Extradata = 8k
+	 *
+	 * Total size = align(Y_UBWC_Plane_size + UV_UBWC_Plane_size +
+	 *           Y_Meta_Plane_size + UV_Meta_Plane_size
+	 *           + max(Extradata, Y_Stride * 48), 4096)
+	 */
+	COLOR_FMT_P010_UBWC,
+	/* Venus P010:
+	 * YUV 4:2:0 image with a plane of 10 bit Y samples followed
+	 * by an interleaved U/V plane containing 10 bit 2x2 subsampled
+	 * colour difference samples.
+	 *
+	 * <-------- Y/UV_Stride -------->
+	 * <------- Width ------->
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  ^           ^
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  Height      |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |          Y_Scanlines
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+	 * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  V           |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              |
+	 * . . . . . . . . . . . . . . . .              V
+	 * U V U V U V U V U V U V . . . .  ^
+	 * U V U V U V U V U V U V . . . .  |
+	 * U V U V U V U V U V U V . . . .  |
+	 * U V U V U V U V U V U V . . . .  UV_Scanlines
+	 * . . . . . . . . . . . . . . . .  |
+	 * . . . . . . . . . . . . . . . .  V
+	 * . . . . . . . . . . . . . . . .  --> Buffer size alignment
+	 *
+	 * Y_Stride : Width * 2 aligned to 128
+	 * UV_Stride : Width * 2 aligned to 128
+	 * Y_Scanlines: Height aligned to 32
+	 * UV_Scanlines: Height/2 aligned to 16
+	 * Extradata: Arbitrary (software-imposed) padding
+	 * Total size = align((Y_Stride * Y_Scanlines
+	 *          + UV_Stride * UV_Scanlines
+	 *          + max(Extradata, Y_Stride * 8), 4096)
+	 */
+	COLOR_FMT_P010,
+};
+
+#define COLOR_FMT_RGBA1010102_UBWC	COLOR_FMT_RGBA1010102_UBWC
+#define COLOR_FMT_RGB565_UBWC		COLOR_FMT_RGB565_UBWC
+#define COLOR_FMT_P010_UBWC		COLOR_FMT_P010_UBWC
+#define COLOR_FMT_P010		COLOR_FMT_P010
+
+static inline unsigned int VENUS_EXTRADATA_SIZE(int width, int height)
+{
+	(void)height;
+	(void)width;
+
+	/*
+	 * In the future, calculate the size based on the w/h but just
+	 * hardcode it for now since 16K satisfies all current usecases.
+	 */
+	return 16 * 1024;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static inline unsigned int VENUS_Y_STRIDE(int color_fmt, int width)
+{
+	unsigned int alignment, stride = 0;
+
+	if (!width)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV21:
+	case COLOR_FMT_NV12:
+	case COLOR_FMT_NV12_MVTB:
+	case COLOR_FMT_NV12_UBWC:
+		alignment = 128;
+		stride = MSM_MEDIA_ALIGN(width, alignment);
+		break;
+	case COLOR_FMT_NV12_BPP10_UBWC:
+		alignment = 256;
+		stride = MSM_MEDIA_ALIGN(width, 192);
+		stride = MSM_MEDIA_ALIGN(stride * 4/3, alignment);
+		break;
+	case COLOR_FMT_P010_UBWC:
+		alignment = 256;
+		stride = MSM_MEDIA_ALIGN(width * 2, alignment);
+		break;
+	case COLOR_FMT_P010:
+		alignment = 128;
+		stride = MSM_MEDIA_ALIGN(width*2, alignment);
+		break;
+	default:
+		break;
+	}
+invalid_input:
+	return stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static inline unsigned int VENUS_UV_STRIDE(int color_fmt, int width)
+{
+	unsigned int alignment, stride = 0;
+
+	if (!width)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV21:
+	case COLOR_FMT_NV12:
+	case COLOR_FMT_NV12_MVTB:
+	case COLOR_FMT_NV12_UBWC:
+		alignment = 128;
+		stride = MSM_MEDIA_ALIGN(width, alignment);
+		break;
+	case COLOR_FMT_NV12_BPP10_UBWC:
+		alignment = 256;
+		stride = MSM_MEDIA_ALIGN(width, 192);
+		stride = MSM_MEDIA_ALIGN(stride * 4/3, alignment);
+		break;
+	case COLOR_FMT_P010_UBWC:
+		alignment = 256;
+		stride = MSM_MEDIA_ALIGN(width * 2, alignment);
+		break;
+	case COLOR_FMT_P010:
+		alignment = 128;
+		stride = MSM_MEDIA_ALIGN(width*2, alignment);
+		break;
+	default:
+		break;
+	}
+invalid_input:
+	return stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static inline unsigned int VENUS_Y_SCANLINES(int color_fmt, int height)
+{
+	unsigned int alignment, sclines = 0;
+
+	if (!height)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV21:
+	case COLOR_FMT_NV12:
+	case COLOR_FMT_NV12_MVTB:
+	case COLOR_FMT_NV12_UBWC:
+	case COLOR_FMT_P010:
+		alignment = 32;
+		break;
+	case COLOR_FMT_NV12_BPP10_UBWC:
+	case COLOR_FMT_P010_UBWC:
+		alignment = 16;
+		break;
+	default:
+		return 0;
+	}
+	sclines = MSM_MEDIA_ALIGN(height, alignment);
+invalid_input:
+	return sclines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static inline unsigned int VENUS_UV_SCANLINES(int color_fmt, int height)
+{
+	unsigned int alignment, sclines = 0;
+
+	if (!height)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV21:
+	case COLOR_FMT_NV12:
+	case COLOR_FMT_NV12_MVTB:
+	case COLOR_FMT_NV12_BPP10_UBWC:
+	case COLOR_FMT_P010_UBWC:
+	case COLOR_FMT_P010:
+		alignment = 16;
+		break;
+	case COLOR_FMT_NV12_UBWC:
+		alignment = 32;
+		break;
+	default:
+		goto invalid_input;
+	}
+
+	sclines = MSM_MEDIA_ALIGN((height+1)>>1, alignment);
+
+invalid_input:
+	return sclines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static inline unsigned int VENUS_Y_META_STRIDE(int color_fmt, int width)
+{
+	int y_tile_width = 0, y_meta_stride = 0;
+
+	if (!width)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV12_UBWC:
+	case COLOR_FMT_P010_UBWC:
+		y_tile_width = 32;
+		break;
+	case COLOR_FMT_NV12_BPP10_UBWC:
+		y_tile_width = 48;
+		break;
+	default:
+		goto invalid_input;
+	}
+
+	y_meta_stride = MSM_MEDIA_ROUNDUP(width, y_tile_width);
+	y_meta_stride = MSM_MEDIA_ALIGN(y_meta_stride, 64);
+
+invalid_input:
+	return y_meta_stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static inline unsigned int VENUS_Y_META_SCANLINES(int color_fmt, int height)
+{
+	int y_tile_height = 0, y_meta_scanlines = 0;
+
+	if (!height)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV12_UBWC:
+		y_tile_height = 8;
+		break;
+	case COLOR_FMT_NV12_BPP10_UBWC:
+	case COLOR_FMT_P010_UBWC:
+		y_tile_height = 4;
+		break;
+	default:
+		goto invalid_input;
+	}
+
+	y_meta_scanlines = MSM_MEDIA_ROUNDUP(height, y_tile_height);
+	y_meta_scanlines = MSM_MEDIA_ALIGN(y_meta_scanlines, 16);
+
+invalid_input:
+	return y_meta_scanlines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static inline unsigned int VENUS_UV_META_STRIDE(int color_fmt, int width)
+{
+	int uv_tile_width = 0, uv_meta_stride = 0;
+
+	if (!width)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV12_UBWC:
+	case COLOR_FMT_P010_UBWC:
+		uv_tile_width = 16;
+		break;
+	case COLOR_FMT_NV12_BPP10_UBWC:
+		uv_tile_width = 24;
+		break;
+	default:
+		goto invalid_input;
+	}
+
+	uv_meta_stride = MSM_MEDIA_ROUNDUP((width+1)>>1, uv_tile_width);
+	uv_meta_stride = MSM_MEDIA_ALIGN(uv_meta_stride, 64);
+
+invalid_input:
+	return uv_meta_stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static inline unsigned int VENUS_UV_META_SCANLINES(int color_fmt, int height)
+{
+	int uv_tile_height = 0, uv_meta_scanlines = 0;
+
+	if (!height)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV12_UBWC:
+		uv_tile_height = 8;
+		break;
+	case COLOR_FMT_NV12_BPP10_UBWC:
+	case COLOR_FMT_P010_UBWC:
+		uv_tile_height = 4;
+		break;
+	default:
+		goto invalid_input;
+	}
+
+	uv_meta_scanlines = MSM_MEDIA_ROUNDUP((height+1)>>1, uv_tile_height);
+	uv_meta_scanlines = MSM_MEDIA_ALIGN(uv_meta_scanlines, 16);
+
+invalid_input:
+	return uv_meta_scanlines;
+}
+
+static inline unsigned int VENUS_RGB_STRIDE(int color_fmt, int width)
+{
+	unsigned int alignment = 0, stride = 0, bpp = 4;
+
+	if (!width)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_RGBA8888:
+		alignment = 128;
+		break;
+	case COLOR_FMT_RGB565_UBWC:
+		alignment = 256;
+		bpp = 2;
+		break;
+	case COLOR_FMT_RGBA8888_UBWC:
+	case COLOR_FMT_RGBA1010102_UBWC:
+		alignment = 256;
+		break;
+	default:
+		goto invalid_input;
+	}
+
+	stride = MSM_MEDIA_ALIGN(width * bpp, alignment);
+
+invalid_input:
+	return stride;
+}
+
+static inline unsigned int VENUS_RGB_SCANLINES(int color_fmt, int height)
+{
+	unsigned int alignment = 0, scanlines = 0;
+
+	if (!height)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_RGBA8888:
+		alignment = 32;
+		break;
+	case COLOR_FMT_RGBA8888_UBWC:
+	case COLOR_FMT_RGBA1010102_UBWC:
+	case COLOR_FMT_RGB565_UBWC:
+		alignment = 16;
+		break;
+	default:
+		goto invalid_input;
+	}
+
+	scanlines = MSM_MEDIA_ALIGN(height, alignment);
+
+invalid_input:
+	return scanlines;
+}
+
+static inline unsigned int VENUS_RGB_META_STRIDE(int color_fmt, int width)
+{
+	int rgb_tile_width = 0, rgb_meta_stride = 0;
+
+	if (!width)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_RGBA8888_UBWC:
+	case COLOR_FMT_RGBA1010102_UBWC:
+	case COLOR_FMT_RGB565_UBWC:
+		rgb_tile_width = 16;
+		break;
+	default:
+		goto invalid_input;
+	}
+
+	rgb_meta_stride = MSM_MEDIA_ROUNDUP(width, rgb_tile_width);
+	rgb_meta_stride = MSM_MEDIA_ALIGN(rgb_meta_stride, 64);
+
+invalid_input:
+	return rgb_meta_stride;
+}
+
+static inline unsigned int VENUS_RGB_META_SCANLINES(int color_fmt, int height)
+{
+	int rgb_tile_height = 0, rgb_meta_scanlines = 0;
+
+	if (!height)
+		goto invalid_input;
+
+	switch (color_fmt) {
+	case COLOR_FMT_RGBA8888_UBWC:
+	case COLOR_FMT_RGBA1010102_UBWC:
+	case COLOR_FMT_RGB565_UBWC:
+		rgb_tile_height = 4;
+		break;
+	default:
+		goto invalid_input;
+	}
+
+	rgb_meta_scanlines = MSM_MEDIA_ROUNDUP(height, rgb_tile_height);
+	rgb_meta_scanlines = MSM_MEDIA_ALIGN(rgb_meta_scanlines, 16);
+
+invalid_input:
+	return rgb_meta_scanlines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ * @height
+ * Progressive: height
+ * Interlaced: height
+ */
+static inline unsigned int VENUS_BUFFER_SIZE(
+	int color_fmt, int width, int height)
+{
+	const unsigned int extra_size = VENUS_EXTRADATA_SIZE(width, height);
+	unsigned int uv_alignment = 0, size = 0;
+	unsigned int y_plane, uv_plane, y_stride,
+		uv_stride, y_sclines, uv_sclines;
+	unsigned int y_ubwc_plane = 0, uv_ubwc_plane = 0;
+	unsigned int y_meta_stride = 0, y_meta_scanlines = 0;
+	unsigned int uv_meta_stride = 0, uv_meta_scanlines = 0;
+	unsigned int y_meta_plane = 0, uv_meta_plane = 0;
+	unsigned int rgb_stride = 0, rgb_scanlines = 0;
+	unsigned int rgb_plane = 0, rgb_ubwc_plane = 0, rgb_meta_plane = 0;
+	unsigned int rgb_meta_stride = 0, rgb_meta_scanlines = 0;
+
+	if (!width || !height)
+		goto invalid_input;
+
+	y_stride = VENUS_Y_STRIDE(color_fmt, width);
+	uv_stride = VENUS_UV_STRIDE(color_fmt, width);
+	y_sclines = VENUS_Y_SCANLINES(color_fmt, height);
+	uv_sclines = VENUS_UV_SCANLINES(color_fmt, height);
+	rgb_stride = VENUS_RGB_STRIDE(color_fmt, width);
+	rgb_scanlines = VENUS_RGB_SCANLINES(color_fmt, height);
+
+	switch (color_fmt) {
+	case COLOR_FMT_NV21:
+	case COLOR_FMT_NV12:
+	case COLOR_FMT_P010:
+		uv_alignment = 4096;
+		y_plane = y_stride * y_sclines;
+		uv_plane = uv_stride * uv_sclines + uv_alignment;
+		size = y_plane + uv_plane +
+				MSM_MEDIA_MAX(extra_size, 8 * y_stride);
+		size = MSM_MEDIA_ALIGN(size, 4096);
+		break;
+	case COLOR_FMT_NV12_MVTB:
+		uv_alignment = 4096;
+		y_plane = y_stride * y_sclines;
+		uv_plane = uv_stride * uv_sclines + uv_alignment;
+		size = y_plane + uv_plane;
+		size = 2 * size + extra_size;
+		size = MSM_MEDIA_ALIGN(size, 4096);
+		break;
+	case COLOR_FMT_NV12_UBWC:
+		y_sclines = VENUS_Y_SCANLINES(color_fmt, (height+1)>>1);
+		y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
+		uv_sclines = VENUS_UV_SCANLINES(color_fmt, (height+1)>>1);
+		uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
+		y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
+		y_meta_scanlines =
+			VENUS_Y_META_SCANLINES(color_fmt, (height+1)>>1);
+		y_meta_plane = MSM_MEDIA_ALIGN(
+			y_meta_stride * y_meta_scanlines, 4096);
+		uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
+		uv_meta_scanlines =
+			VENUS_UV_META_SCANLINES(color_fmt, (height+1)>>1);
+		uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
+			uv_meta_scanlines, 4096);
+
+		size = (y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
+			uv_meta_plane)*2 +
+			MSM_MEDIA_MAX(extra_size + 8192, 48 * y_stride);
+		size = MSM_MEDIA_ALIGN(size, 4096);
+		break;
+	case COLOR_FMT_NV12_BPP10_UBWC:
+		y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
+		uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
+		y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
+		y_meta_scanlines = VENUS_Y_META_SCANLINES(color_fmt, height);
+		y_meta_plane = MSM_MEDIA_ALIGN(
+				y_meta_stride * y_meta_scanlines, 4096);
+		uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
+		uv_meta_scanlines = VENUS_UV_META_SCANLINES(color_fmt, height);
+		uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
+					uv_meta_scanlines, 4096);
+
+		size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
+			uv_meta_plane +
+			MSM_MEDIA_MAX(extra_size + 8192, 48 * y_stride);
+		size = MSM_MEDIA_ALIGN(size, 4096);
+		break;
+	case COLOR_FMT_P010_UBWC:
+		y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
+		uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
+		y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
+		y_meta_scanlines = VENUS_Y_META_SCANLINES(color_fmt, height);
+		y_meta_plane = MSM_MEDIA_ALIGN(
+				y_meta_stride * y_meta_scanlines, 4096);
+		uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
+		uv_meta_scanlines = VENUS_UV_META_SCANLINES(color_fmt, height);
+		uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
+					uv_meta_scanlines, 4096);
+
+		size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
+			uv_meta_plane;
+		size = MSM_MEDIA_ALIGN(size, 4096);
+		break;
+	case COLOR_FMT_RGBA8888:
+		rgb_plane = MSM_MEDIA_ALIGN(rgb_stride  * rgb_scanlines, 4096);
+		size = rgb_plane;
+		size =  MSM_MEDIA_ALIGN(size, 4096);
+		break;
+	case COLOR_FMT_RGBA8888_UBWC:
+	case COLOR_FMT_RGBA1010102_UBWC:
+	case COLOR_FMT_RGB565_UBWC:
+		rgb_ubwc_plane = MSM_MEDIA_ALIGN(rgb_stride * rgb_scanlines,
+							4096);
+		rgb_meta_stride = VENUS_RGB_META_STRIDE(color_fmt, width);
+		rgb_meta_scanlines = VENUS_RGB_META_SCANLINES(color_fmt,
+					height);
+		rgb_meta_plane = MSM_MEDIA_ALIGN(rgb_meta_stride *
+					rgb_meta_scanlines, 4096);
+		size = rgb_ubwc_plane + rgb_meta_plane;
+		size = MSM_MEDIA_ALIGN(size, 4096);
+		break;
+	default:
+		break;
+	}
+invalid_input:
+	return size;
+}
+
+static inline unsigned int VENUS_VIEW2_OFFSET(
+	int color_fmt, int width, int height)
+{
+	unsigned int offset = 0;
+	unsigned int y_plane, uv_plane, y_stride,
+		uv_stride, y_sclines, uv_sclines;
+	if (!width || !height)
+		goto invalid_input;
+
+	y_stride = VENUS_Y_STRIDE(color_fmt, width);
+	uv_stride = VENUS_UV_STRIDE(color_fmt, width);
+	y_sclines = VENUS_Y_SCANLINES(color_fmt, height);
+	uv_sclines = VENUS_UV_SCANLINES(color_fmt, height);
+	switch (color_fmt) {
+	case COLOR_FMT_NV12_MVTB:
+		y_plane = y_stride * y_sclines;
+		uv_plane = uv_stride * uv_sclines;
+		offset = y_plane + uv_plane;
+		break;
+	default:
+		break;
+	}
+invalid_input:
+	return offset;
+}
+
+#endif
-- 
Sean Paul, Software Engineer, Google / Chromium OS



More information about the Freedreno mailing list