[PATCH 6/7] drm/msm: add atomic support
Rob Clark
robdclark at gmail.com
Fri May 30 10:23:08 PDT 2014
Signed-off-by: Rob Clark <robdclark at gmail.com>
---
drivers/gpu/drm/msm/Makefile | 1 +
drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c | 57 ++++++------
drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c | 6 ++
drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h | 1 +
drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c | 5 --
drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c | 56 ++++++------
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c | 6 ++
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h | 2 +-
drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c | 5 --
drivers/gpu/drm/msm/msm_atomic.c | 141 ++++++++++++++++++++++++++++++
drivers/gpu/drm/msm/msm_drv.c | 22 ++++-
drivers/gpu/drm/msm/msm_drv.h | 7 ++
drivers/gpu/drm/msm/msm_gem.c | 24 +----
drivers/gpu/drm/msm/msm_gem.h | 13 +++
drivers/gpu/drm/msm/msm_kms.h | 1 +
15 files changed, 253 insertions(+), 94 deletions(-)
create mode 100644 drivers/gpu/drm/msm/msm_atomic.c
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 5e1e6b0..dd12f56 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -27,6 +27,7 @@ msm-y := \
mdp/mdp5/mdp5_kms.o \
mdp/mdp5/mdp5_plane.o \
mdp/mdp5/mdp5_smp.o \
+ msm_atomic.o \
msm_drv.o \
msm_fb.o \
msm_gem.o \
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 8d9b604..9cbd997 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -52,7 +52,6 @@ struct mdp4_crtc {
/* if there is a pending flip, these will be non-null: */
struct drm_pending_vblank_event *event;
- struct msm_fence_cb pageflip_cb;
#define PENDING_CURSOR 0x1
#define PENDING_FLIP 0x2
@@ -93,12 +92,16 @@ static void request_pending(struct drm_crtc *crtc, uint32_t pending)
mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank);
}
-static void crtc_flush(struct drm_crtc *crtc)
+void mdp4_crtc_flush(struct drm_crtc *crtc)
{
+ struct msm_drm_private *priv = crtc->dev->dev_private;
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct mdp4_kms *mdp4_kms = get_kms(crtc);
uint32_t i, flush = 0;
+ if (priv->pending_crtcs & (1 << crtc->id))
+ return;
+
for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
struct drm_plane *plane = mdp4_crtc->planes[i];
if (plane) {
@@ -142,7 +145,7 @@ static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
* so that we can safely queue unref to current fb (ie. next
* vblank we know hw is done w/ previous scanout_fb).
*/
- crtc_flush(crtc);
+ mdp4_crtc_flush(crtc);
if (mdp4_crtc->scanout_fb)
drm_flip_work_queue(&mdp4_crtc->unref_fb_work,
@@ -177,21 +180,6 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
spin_unlock_irqrestore(&dev->event_lock, flags);
}
-static void pageflip_cb(struct msm_fence_cb *cb)
-{
- struct mdp4_crtc *mdp4_crtc =
- container_of(cb, struct mdp4_crtc, pageflip_cb);
- struct drm_crtc *crtc = &mdp4_crtc->base;
- struct drm_framebuffer *fb = crtc->primary->fb;
-
- if (!fb)
- return;
-
- drm_framebuffer_reference(fb);
- mdp4_plane_set_scanout(mdp4_crtc->plane, fb);
- update_scanout(crtc, fb);
-}
-
static void unref_fb_worker(struct drm_flip_work *work, void *val)
{
struct mdp4_crtc *mdp4_crtc =
@@ -406,7 +394,7 @@ static void mdp4_crtc_prepare(struct drm_crtc *crtc)
static void mdp4_crtc_commit(struct drm_crtc *crtc)
{
mdp4_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
- crtc_flush(crtc);
+ mdp4_crtc_flush(crtc);
/* drop the ref to mdp clk's that we got in prepare: */
mdp4_disable(get_kms(crtc));
}
@@ -448,23 +436,27 @@ static int mdp4_crtc_page_flip(struct drm_crtc *crtc,
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct drm_gem_object *obj;
unsigned long flags;
+ spin_lock_irqsave(&dev->event_lock, flags);
if (mdp4_crtc->event) {
dev_err(dev->dev, "already pending flip!\n");
+ spin_unlock_irqrestore(&dev->event_lock, flags);
return -EBUSY;
}
- obj = msm_framebuffer_bo(new_fb, 0);
-
- spin_lock_irqsave(&dev->event_lock, flags);
mdp4_crtc->event = event;
spin_unlock_irqrestore(&dev->event_lock, flags);
update_fb(crtc, new_fb);
- return msm_gem_queue_inactive_cb(obj, &mdp4_crtc->pageflip_cb);
+
+ /* grab extra ref for update_scanout() */
+ drm_framebuffer_reference(new_fb);
+ mdp4_plane_set_scanout(mdp4_crtc->plane, new_fb);
+ update_scanout(crtc, new_fb);
+
+ return 0;
}
static int mdp4_crtc_set_property(struct drm_crtc *crtc,
@@ -598,7 +590,7 @@ static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
mdp4_crtc->cursor.y = y;
spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
- crtc_flush(crtc);
+ mdp4_crtc_flush(crtc);
request_pending(crtc, PENDING_CURSOR);
return 0;
@@ -635,8 +627,13 @@ static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
pending = atomic_xchg(&mdp4_crtc->pending, 0);
if (pending & PENDING_FLIP) {
- complete_flip(crtc, NULL);
- drm_flip_work_commit(&mdp4_crtc->unref_fb_work, priv->wq);
+ if (priv->pending_crtcs & (1 << crtc->id)) {
+ /* our update hasn't been flushed yet: */
+ request_pending(crtc, PENDING_FLIP);
+ } else {
+ complete_flip(crtc, NULL);
+ drm_flip_work_commit(&mdp4_crtc->unref_fb_work, priv->wq);
+ }
}
if (pending & PENDING_CURSOR) {
@@ -650,7 +647,7 @@ static void mdp4_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, err);
struct drm_crtc *crtc = &mdp4_crtc->base;
DBG("%s: error: %08x", mdp4_crtc->name, irqstatus);
- crtc_flush(crtc);
+ mdp4_crtc_flush(crtc);
}
uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc)
@@ -730,7 +727,7 @@ static void set_attach(struct drm_crtc *crtc, enum mdp4_pipe pipe_id,
mdp4_crtc->planes[pipe_id] = plane;
blend_setup(crtc);
if (mdp4_crtc->enabled && (plane != mdp4_crtc->plane))
- crtc_flush(crtc);
+ mdp4_crtc_flush(crtc);
}
void mdp4_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane)
@@ -792,8 +789,6 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
ret = drm_flip_work_init(&mdp4_crtc->unref_cursor_work, 64,
"unref cursor", unref_cursor_worker);
- INIT_FENCE_CB(&mdp4_crtc->pageflip_cb, pageflip_cb);
-
drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs);
drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 0bb4faa..af0bfb1 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -131,6 +131,11 @@ static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
return mdp4_dtv_round_pixclk(encoder, rate);
}
+static void mdp4_flush(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ mdp4_crtc_flush(crtc);
+}
+
static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
@@ -162,6 +167,7 @@ static const struct mdp_kms_funcs kms_funcs = {
.disable_vblank = mdp4_disable_vblank,
.get_format = mdp_get_format,
.round_pixclk = mdp4_round_pixclk,
+ .flush = mdp4_flush,
.preclose = mdp4_preclose,
.destroy = mdp4_destroy,
},
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index 715520c5..834454c 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -184,6 +184,7 @@ enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane);
struct drm_plane *mdp4_plane_init(struct drm_device *dev,
enum mdp4_pipe pipe_id, bool private_plane);
+void mdp4_crtc_flush(struct drm_crtc *crtc);
uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc);
void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config);
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 4c92985..f35848d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -48,11 +48,6 @@ static int mdp4_plane_update(struct drm_plane *plane,
mdp4_plane->enabled = true;
- if (plane->fb)
- drm_framebuffer_unreference(plane->fb);
-
- drm_framebuffer_reference(fb);
-
return mdp4_plane_mode_set(plane, crtc, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 57b67b4..039bcb2 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -35,7 +35,6 @@ struct mdp5_crtc {
/* if there is a pending flip, these will be non-null: */
struct drm_pending_vblank_event *event;
- struct msm_fence_cb pageflip_cb;
#define PENDING_CURSOR 0x1
#define PENDING_FLIP 0x2
@@ -73,13 +72,17 @@ static void request_pending(struct drm_crtc *crtc, uint32_t pending)
mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
}
-static void crtc_flush(struct drm_crtc *crtc)
+void mdp5_crtc_flush(struct drm_crtc *crtc)
{
+ struct msm_drm_private *priv = crtc->dev->dev_private;
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
int id = mdp5_crtc->id;
uint32_t i, flush = 0;
+ if (priv->pending_crtcs & (1 << crtc->id))
+ return;
+
for (i = 0; i < ARRAY_SIZE(mdp5_crtc->planes); i++) {
struct drm_plane *plane = mdp5_crtc->planes[i];
if (plane) {
@@ -124,7 +127,7 @@ static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
* so that we can safely queue unref to current fb (ie. next
* vblank we know hw is done w/ previous scanout_fb).
*/
- crtc_flush(crtc);
+ mdp5_crtc_flush(crtc);
if (mdp5_crtc->scanout_fb)
drm_flip_work_queue(&mdp5_crtc->unref_fb_work,
@@ -165,21 +168,6 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
}
}
-static void pageflip_cb(struct msm_fence_cb *cb)
-{
- struct mdp5_crtc *mdp5_crtc =
- container_of(cb, struct mdp5_crtc, pageflip_cb);
- struct drm_crtc *crtc = &mdp5_crtc->base;
- struct drm_framebuffer *fb = mdp5_crtc->fb;
-
- if (!fb)
- return;
-
- drm_framebuffer_reference(fb);
- mdp5_plane_set_scanout(mdp5_crtc->plane, fb);
- update_scanout(crtc, fb);
-}
-
static void unref_fb_worker(struct drm_flip_work *work, void *val)
{
struct mdp5_crtc *mdp5_crtc =
@@ -324,7 +312,7 @@ static void mdp5_crtc_prepare(struct drm_crtc *crtc)
static void mdp5_crtc_commit(struct drm_crtc *crtc)
{
mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
- crtc_flush(crtc);
+ mdp5_crtc_flush(crtc);
/* drop the ref to mdp clk's that we got in prepare: */
mdp5_disable(get_kms(crtc));
}
@@ -366,23 +354,26 @@ static int mdp5_crtc_page_flip(struct drm_crtc *crtc,
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct drm_gem_object *obj;
unsigned long flags;
+ spin_lock_irqsave(&dev->event_lock, flags);
if (mdp5_crtc->event) {
dev_err(dev->dev, "already pending flip!\n");
+ spin_unlock_irqrestore(&dev->event_lock, flags);
return -EBUSY;
}
- obj = msm_framebuffer_bo(new_fb, 0);
-
- spin_lock_irqsave(&dev->event_lock, flags);
mdp5_crtc->event = event;
spin_unlock_irqrestore(&dev->event_lock, flags);
update_fb(crtc, new_fb);
- return msm_gem_queue_inactive_cb(obj, &mdp5_crtc->pageflip_cb);
+ /* grab extra ref for update_scanout() */
+ drm_framebuffer_reference(new_fb);
+ mdp5_plane_set_scanout(mdp5_crtc->plane, new_fb);
+ update_scanout(crtc, new_fb);
+
+ return 0;
}
static int mdp5_crtc_set_property(struct drm_crtc *crtc,
@@ -424,8 +415,13 @@ static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
pending = atomic_xchg(&mdp5_crtc->pending, 0);
if (pending & PENDING_FLIP) {
- complete_flip(crtc, NULL);
- drm_flip_work_commit(&mdp5_crtc->unref_fb_work, priv->wq);
+ if (priv->pending_crtcs & (1 << crtc->id)) {
+ /* our update hasn't been flushed yet: */
+ request_pending(crtc, PENDING_FLIP);
+ } else {
+ complete_flip(crtc, NULL);
+ drm_flip_work_commit(&mdp5_crtc->unref_fb_work, priv->wq);
+ }
}
}
@@ -434,7 +430,7 @@ static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
struct drm_crtc *crtc = &mdp5_crtc->base;
DBG("%s: error: %08x", mdp5_crtc->name, irqstatus);
- crtc_flush(crtc);
+ mdp5_crtc_flush(crtc);
}
uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
@@ -501,7 +497,7 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
MDP5_CTL_OP_MODE(MODE_NONE) |
MDP5_CTL_OP_INTF_NUM(intfnum[intf]));
- crtc_flush(crtc);
+ mdp5_crtc_flush(crtc);
}
static void set_attach(struct drm_crtc *crtc, enum mdp5_pipe pipe_id,
@@ -517,7 +513,7 @@ static void set_attach(struct drm_crtc *crtc, enum mdp5_pipe pipe_id,
mdp5_crtc->planes[pipe_id] = plane;
blend_setup(crtc);
if (mdp5_crtc->enabled && (plane != mdp5_crtc->plane))
- crtc_flush(crtc);
+ mdp5_crtc_flush(crtc);
}
void mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane)
@@ -563,8 +559,6 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
if (ret)
goto fail;
- INIT_FENCE_CB(&mdp5_crtc->pageflip_cb, pageflip_cb);
-
drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs);
drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index ee8446c..01c3d70 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -91,6 +91,11 @@ static long mdp5_round_pixclk(struct msm_kms *kms, unsigned long rate,
return rate;
}
+static void mdp5_flush(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ mdp5_crtc_flush(crtc);
+}
+
static void mdp5_preclose(struct msm_kms *kms, struct drm_file *file)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
@@ -118,6 +123,7 @@ static const struct mdp_kms_funcs kms_funcs = {
.disable_vblank = mdp5_disable_vblank,
.get_format = mdp_get_format,
.round_pixclk = mdp5_round_pixclk,
+ .flush = mdp5_flush,
.preclose = mdp5_preclose,
.destroy = mdp5_destroy,
},
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index c8b1a25..18b031b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -197,8 +197,8 @@ enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
struct drm_plane *mdp5_plane_init(struct drm_device *dev,
enum mdp5_pipe pipe, bool private_plane);
+void mdp5_crtc_flush(struct drm_crtc *crtc);
uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
-
void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
enum mdp5_intf intf_id);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 53cc8c6..f1bf3c2 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -48,11 +48,6 @@ static int mdp5_plane_update(struct drm_plane *plane,
mdp5_plane->enabled = true;
- if (plane->fb)
- drm_framebuffer_unreference(plane->fb);
-
- drm_framebuffer_reference(fb);
-
return mdp5_plane_mode_set(plane, crtc, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h);
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
new file mode 100644
index 0000000..b231377
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark at gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "msm_gem.h"
+
+struct msm_async_commit {
+ struct drm_atomic_state *state;
+ uint32_t fence;
+ struct msm_fence_cb fence_cb;
+};
+
+static void fence_cb(struct msm_fence_cb *cb);
+static int commit_sync(struct drm_device *dev, void *state, bool unlocked);
+
+static struct msm_async_commit *new_commit(struct drm_atomic_state *state)
+{
+ struct msm_async_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
+
+ if (!c)
+ return NULL;
+
+ drm_atomic_state_reference(state);
+ c->state = state;
+ INIT_FENCE_CB(&c->fence_cb, fence_cb);
+
+ return c;
+}
+static void free_commit(struct msm_async_commit *c)
+{
+ drm_atomic_state_unreference(c->state);
+ kfree(c);
+}
+
+static void fence_cb(struct msm_fence_cb *cb)
+{
+ struct msm_async_commit *c =
+ container_of(cb, struct msm_async_commit, fence_cb);
+ commit_sync(c->state->dev, c->state, true);
+ free_commit(c);
+}
+
+static void add_fb(struct msm_async_commit *c, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb)
+{
+ struct drm_gem_object *obj = msm_framebuffer_bo(fb, 0);
+ c->fence = max(c->fence, msm_gem_fence(to_msm_bo(obj), MSM_PREP_READ));
+}
+
+static int wait_fb(struct drm_crtc *crtc, struct drm_framebuffer *fb)
+{
+ // XXX TODO wait..
+ return 0;
+}
+
+#define pending_fb(state) ((state) && (state)->fb && (state)->new_fb)
+
+static int commit_sync(struct drm_device *dev, void *state, bool unlocked)
+{
+ struct drm_atomic_state *a = state;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ int ncrtcs = dev->mode_config.num_crtc;
+ uint32_t pending_crtcs = 0;
+ int i, ret;
+
+ for (i = 0; i < ncrtcs; i++)
+ if (a->crtcs[i])
+ pending_crtcs |= (1 << a->crtcs[i]->id);
+
+ mutex_lock(&dev->struct_mutex);
+ WARN_ON(priv->pending_crtcs & pending_crtcs);
+ priv->pending_crtcs |= pending_crtcs;
+ mutex_unlock(&dev->struct_mutex);
+
+ if (unlocked)
+ ret = drm_atomic_commit_unlocked(dev, state);
+ else
+ ret = drm_atomic_commit(dev, state);
+
+ mutex_lock(&dev->struct_mutex);
+ priv->pending_crtcs &= ~pending_crtcs;
+ mutex_unlock(&dev->struct_mutex);
+
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ncrtcs; i++)
+ if (a->crtcs[i])
+ kms->funcs->flush(kms, a->crtcs[i]);
+
+ return 0;
+}
+
+int msm_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state)
+{
+ struct drm_atomic_state *a = state;
+ int nplanes = dev->mode_config.num_total_plane;
+ int i;
+
+ if (a->flags & DRM_MODE_ATOMIC_NONBLOCK) {
+ /* non-block mode: defer commit until fb's are ready */
+ struct msm_async_commit *c = new_commit(state);
+
+ if (!c)
+ return -ENOMEM;
+
+ for (i = 0; i < nplanes; i++)
+ if (pending_fb(a->pstates[i]))
+ add_fb(c, a->pstates[i]->crtc, a->pstates[i]->fb);
+
+ return msm_queue_fence_cb(dev, &c->fence_cb, c->fence);
+ } else {
+ /* blocking mode: wait until fb's are ready */
+ int ret = 0;
+
+ for (i = 0; i < nplanes && !ret; i++)
+ if (pending_fb(a->pstates[i]))
+ ret = wait_fb(a->pstates[i]->crtc, a->pstates[i]->fb);
+
+ if (ret)
+ return ret;
+
+ return commit_sync(dev, state, false);
+ }
+}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 36b9273..05d4a80 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -600,6 +600,26 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
return ret;
}
+int msm_queue_fence_cb(struct drm_device *dev,
+ struct msm_fence_cb *cb, uint32_t fence)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ int ret = 0;
+
+ mutex_lock(&dev->struct_mutex);
+ if (!list_empty(&cb->work.entry)) {
+ ret = -EINVAL;
+ } else if (fence > priv->completed_fence) {
+ cb->fence = fence;
+ list_add_tail(&cb->work.entry, &priv->fence_cbs);
+ } else {
+ queue_work(priv->wq, &cb->work);
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
/* called from workqueue */
void msm_update_fence(struct drm_device *dev, uint32_t fence)
{
@@ -815,7 +835,7 @@ static struct drm_driver msm_driver = {
.atomic_begin = drm_atomic_begin,
.atomic_set_event = drm_atomic_set_event,
.atomic_check = drm_atomic_check,
- .atomic_commit = drm_atomic_commit,
+ .atomic_commit = msm_atomic_commit,
.atomic_end = drm_atomic_end,
.atomic_funcs = &drm_atomic_funcs,
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index afc990e..5e1e7da 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -110,6 +110,9 @@ struct msm_drm_private {
unsigned int num_connectors;
struct drm_connector *connectors[8];
+ /* crtc's pending atomic update: */
+ uint32_t pending_crtcs;
+
/* VRAM carveout, used when no IOMMU: */
struct {
unsigned long size;
@@ -143,11 +146,15 @@ int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
struct timespec *timeout);
+int msm_queue_fence_cb(struct drm_device *dev,
+ struct msm_fence_cb *cb, uint32_t fence);
void msm_update_fence(struct drm_device *dev, uint32_t fence);
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file);
+int msm_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state);
+
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index bb8026d..90cb138 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -392,23 +392,11 @@ void *msm_gem_vaddr(struct drm_gem_object *obj)
int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
struct msm_fence_cb *cb)
{
- struct drm_device *dev = obj->dev;
- struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- int ret = 0;
+ uint32_t fence = msm_gem_fence(msm_obj,
+ MSM_PREP_READ | MSM_PREP_WRITE);
- mutex_lock(&dev->struct_mutex);
- if (!list_empty(&cb->work.entry)) {
- ret = -EINVAL;
- } else if (is_active(msm_obj)) {
- cb->fence = max(msm_obj->read_fence, msm_obj->write_fence);
- list_add_tail(&cb->work.entry, &priv->fence_cbs);
- } else {
- queue_work(priv->wq, &cb->work);
- }
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
+ return msm_queue_fence_cb(obj->dev, cb, fence);
}
void msm_gem_move_to_active(struct drm_gem_object *obj,
@@ -447,12 +435,8 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
int ret = 0;
if (is_active(msm_obj)) {
- uint32_t fence = 0;
+ uint32_t fence = msm_gem_fence(msm_obj, op);
- if (op & MSM_PREP_READ)
- fence = msm_obj->write_fence;
- if (op & MSM_PREP_WRITE)
- fence = max(fence, msm_obj->read_fence);
if (op & MSM_PREP_NOSYNC)
timeout = NULL;
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 3246bb4..41a60cb 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -70,6 +70,19 @@ static inline bool is_active(struct msm_gem_object *msm_obj)
return msm_obj->gpu != NULL;
}
+static inline uint32_t msm_gem_fence(struct msm_gem_object *msm_obj,
+ uint32_t op)
+{
+ uint32_t fence = 0;
+
+ if (op & MSM_PREP_READ)
+ fence = msm_obj->write_fence;
+ if (op & MSM_PREP_WRITE)
+ fence = max(fence, msm_obj->read_fence);
+
+ return fence;
+}
+
#define MAX_CMDS 4
/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 0643774..91ddffc 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -42,6 +42,7 @@ struct msm_kms_funcs {
const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format);
long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
struct drm_encoder *encoder);
+ void (*flush)(struct msm_kms *kms, struct drm_crtc *crtc);
/* cleanup: */
void (*preclose)(struct msm_kms *kms, struct drm_file *file);
void (*destroy)(struct msm_kms *kms);
--
1.9.3
More information about the dri-devel
mailing list