[PATCH 2/2] drm/msm/mdp5: Wait for PP_DONE irq for command mode CRTC atomic commit

Hai Li hali at codeaurora.org
Fri Apr 24 08:57:10 PDT 2015


CRTCs in DSI command mode data path should wait for pingpong done,
instead of vblank, to finish atomic commit.

This change is to enable PP_DONE irq on command mode CRTCs and wait for
this irq happens before atomic commit completion.

Signed-off-by: Hai Li <hali at codeaurora.org>
---
 drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c |  4 --
 drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c        | 71 ++++++++++++++++++++-----
 2 files changed, 58 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
index e4e8956..5f87357 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
@@ -216,16 +216,12 @@ static void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
 static void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
 {
 	struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
-	struct mdp5_kms *mdp5_kms = get_kms(encoder);
 	struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc);
 	struct mdp5_interface *intf = &mdp5_cmd_enc->intf;
-	int lm = mdp5_crtc_get_lm(encoder->crtc);
 
 	if (WARN_ON(!mdp5_cmd_enc->enabled))
 		return;
 
-	/* Wait for the last frame done */
-	mdp_irq_wait(&mdp5_kms->base, lm2ppdone(lm));
 	pingpong_tearcheck_disable(encoder);
 
 	mdp5_ctl_set_encoder_state(ctl, false);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 034b1b9..a38c27b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark at gmail.com>
  *
@@ -60,6 +60,9 @@ struct mdp5_crtc {
 
 	struct mdp_irq vblank;
 	struct mdp_irq err;
+	struct mdp_irq pp_done;
+
+	struct completion pp_completion;
 
 	void (*wait_for_commit_done)(struct drm_crtc *crtc);
 
@@ -89,6 +92,12 @@ static void request_pending(struct drm_crtc *crtc, uint32_t pending)
 	mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
 }
 
+static void request_pp_done_pending(struct drm_crtc *crtc)
+{
+	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+	reinit_completion(&mdp5_crtc->pp_completion);
+}
+
 static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
 {
 	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
@@ -284,6 +293,9 @@ static void mdp5_crtc_disable(struct drm_crtc *crtc)
 	/* set STAGE_UNUSED for all layers */
 	mdp5_ctl_blend(mdp5_crtc->ctl, mdp5_crtc->lm, 0x00000000);
 
+	if (mdp5_crtc->pp_done.irq)
+		mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->pp_done);
+
 	mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
 	mdp5_disable(mdp5_kms);
 
@@ -303,6 +315,9 @@ static void mdp5_crtc_enable(struct drm_crtc *crtc)
 	mdp5_enable(mdp5_kms);
 	mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
 
+	if (mdp5_crtc->pp_done.irq)
+		mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->pp_done);
+
 	mdp5_crtc->enabled = true;
 }
 
@@ -404,6 +419,15 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc)
 
 	blend_setup(crtc);
 
+	/* PP_DONE irq is only used by command mode for now.
+	 * It is better to request pending before FLUSH and START trigger
+	 * to make sure no pp_done irq missed.
+	 * This is safe because no pp_done will happen before SW trigger
+	 * in command mode.
+	 */
+	if (mdp5_crtc->pp_done.irq)
+		request_pp_done_pending(crtc);
+
 	mdp5_crtc->flushed_mask = crtc_flush_all(crtc);
 
 	request_pending(crtc, PENDING_FLIP);
@@ -614,6 +638,26 @@ static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
 	DBG("%s: error: %08x", mdp5_crtc->name, irqstatus);
 }
 
+static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus)
+{
+	struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc,
+								pp_done);
+
+	complete(&mdp5_crtc->pp_completion);
+}
+
+static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+	int ret;
+
+	ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion,
+						msecs_to_jiffies(50));
+	if (ret == 0)
+		dev_warn(dev->dev, "pp done time out, lm=%d\n", mdp5_crtc->lm);
+}
+
 static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
 {
 	struct drm_device *dev = crtc->dev;
@@ -661,18 +705,18 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf)
 
 	/* now that we know what irq's we want: */
 	mdp5_crtc->err.irqmask = intf2err(intf->num);
-
-	/* Register command mode Pingpong done as vblank for now,
-	 * so that atomic commit should wait for it to finish.
-	 * Ideally, in the future, we should take rd_ptr done as vblank,
-	 * and let atomic commit wait for pingpong done for commond mode.
-	 */
-	if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
-		mdp5_crtc->vblank.irqmask = lm2ppdone(lm);
-	else
-		mdp5_crtc->vblank.irqmask = intf2vblank(lm, intf);
-
-	mdp5_crtc->wait_for_commit_done = mdp5_crtc_wait_for_flush_done;
+	mdp5_crtc->vblank.irqmask = intf2vblank(lm, intf);
+
+	if ((intf->type == INTF_DSI) &&
+		(intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) {
+		mdp5_crtc->pp_done.irqmask = lm2ppdone(lm);
+		mdp5_crtc->pp_done.irq = mdp5_crtc_pp_done_irq;
+		mdp5_crtc->wait_for_commit_done = mdp5_crtc_wait_for_pp_done;
+	} else {
+		mdp5_crtc->pp_done.irqmask = 0;
+		mdp5_crtc->pp_done.irq = NULL;
+		mdp5_crtc->wait_for_commit_done = mdp5_crtc_wait_for_flush_done;
+	}
 
 	mdp_irq_update(&mdp5_kms->base);
 
@@ -721,6 +765,7 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
 
 	spin_lock_init(&mdp5_crtc->lm_lock);
 	spin_lock_init(&mdp5_crtc->cursor.lock);
+	init_completion(&mdp5_crtc->pp_completion);
 
 	mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
 	mdp5_crtc->err.irq = mdp5_crtc_err_irq;
-- 
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
hosted by The Linux Foundation



More information about the dri-devel mailing list