[PATCH 1/2] drm/udl: make page flips asynchronous.

Haixia Shi hshi at chromium.org
Wed Mar 4 18:26:23 PST 2015


This also limits the maximum frequency of page flips by the vrefresh rate.

Signed-off-by: Haixia Shi <hshi at chromium.org>
Reviewed-by: Stéphane Marchesin <marcheu at chromium.org>
Tested-by: Haixia Shi <hshi at chromium.org>
---
 drivers/gpu/drm/udl/udl_drv.h     |   3 +
 drivers/gpu/drm/udl/udl_modeset.c | 129 ++++++++++++++++++++++++++++++++++----
 2 files changed, 119 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 80adbac..b1e9fae 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -47,6 +47,7 @@ struct urb_list {
 };
 
 struct udl_fbdev;
+struct udl_flip_queue;
 
 struct udl_device {
 	struct device *dev;
@@ -65,6 +66,8 @@ struct udl_device {
 	atomic_t bytes_identical; /* saved effort with backbuffer comparison */
 	atomic_t bytes_sent; /* to usb, after compression including overhead */
 	atomic_t cpu_kcycles_used; /* transpired during pixel processing */
+
+	struct udl_flip_queue *flip_queue;
 };
 
 struct udl_gem_object {
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index 677190a6..f3804b4 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -303,6 +303,16 @@ udl_pipe_set_base(struct drm_crtc *crtc, int x, int y,
 }
 #endif
 
+struct udl_flip_queue {
+	struct mutex lock;
+	struct workqueue_struct *wq;
+	struct delayed_work work;
+	struct drm_crtc *crtc;
+	struct drm_pending_vblank_event *event;
+	u64 flip_time;  /* in jiffies */
+	u64 vblank_interval;  /* in jiffies */
+};
+
 static int udl_crtc_mode_set(struct drm_crtc *crtc,
 			       struct drm_display_mode *mode,
 			       struct drm_display_mode *adjusted_mode,
@@ -313,6 +323,7 @@ static int udl_crtc_mode_set(struct drm_crtc *crtc,
 	struct drm_device *dev = crtc->dev;
 	struct udl_framebuffer *ufb = to_udl_fb(crtc->primary->fb);
 	struct udl_device *udl = dev->dev_private;
+	struct udl_flip_queue *flip_queue = udl->flip_queue;
 	char *buf;
 	char *wrptr;
 	int color_depth = 0;
@@ -347,6 +358,13 @@ static int udl_crtc_mode_set(struct drm_crtc *crtc,
 	ufb->active_16 = true;
 	udl->mode_buf_len = wrptr - buf;
 
+	/* update flip queue vblank interval */
+	if (flip_queue) {
+		mutex_lock(&flip_queue->lock);
+		flip_queue->vblank_interval = HZ / mode->vrefresh;
+		mutex_unlock(&flip_queue->lock);
+	}
+
 	/* damage all of it */
 	udl_handle_damage(ufb, 0, 0, ufb->base.width, ufb->base.height);
 	return 0;
@@ -364,29 +382,82 @@ static void udl_crtc_destroy(struct drm_crtc *crtc)
 	kfree(crtc);
 }
 
+static void udl_sched_page_flip(struct work_struct *work)
+{
+	struct udl_flip_queue *flip_queue =
+		container_of(container_of(work, struct delayed_work, work),
+			struct udl_flip_queue, work);
+	struct drm_crtc *crtc;
+	struct drm_device *dev;
+	struct drm_pending_vblank_event *event;
+	struct drm_framebuffer *fb;
+
+	mutex_lock(&flip_queue->lock);
+	crtc = flip_queue->crtc;
+	dev = crtc->dev;
+	event = flip_queue->event;
+	fb = crtc->primary->fb;
+	flip_queue->event = NULL;
+	mutex_unlock(&flip_queue->lock);
+
+	if (fb)
+		udl_handle_damage(to_udl_fb(fb), 0, 0, fb->width, fb->height);
+	if (event) {
+		unsigned long flags;
+		spin_lock_irqsave(&dev->event_lock, flags);
+		drm_send_vblank_event(dev, 0, event);
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+	}
+}
+
 static int udl_crtc_page_flip(struct drm_crtc *crtc,
 			      struct drm_framebuffer *fb,
 			      struct drm_pending_vblank_event *event,
 			      uint32_t page_flip_flags)
 {
-	struct udl_framebuffer *ufb = to_udl_fb(fb);
 	struct drm_device *dev = crtc->dev;
-	unsigned long flags;
+	struct udl_device *udl = dev->dev_private;
+	struct udl_flip_queue *flip_queue = udl->flip_queue;
 
-	struct drm_framebuffer *old_fb = crtc->primary->fb;
-	if (old_fb) {
-		struct udl_framebuffer *uold_fb = to_udl_fb(old_fb);
-		uold_fb->active_16 = false;
+	if (!flip_queue || !flip_queue->wq) {
+		DRM_ERROR("Uninitialized page flip queue\n");
+		return -ENOMEM;
 	}
-	ufb->active_16 = true;
 
-	udl_handle_damage(ufb, 0, 0, fb->width, fb->height);
+	mutex_lock(&flip_queue->lock);
+
+	flip_queue->crtc = crtc;
+	if (fb) {
+		struct drm_framebuffer *old_fb;
+		struct udl_framebuffer *ufb;
+		ufb = to_udl_fb(fb);
+		old_fb = crtc->primary->fb;
+		if (old_fb) {
+			struct udl_framebuffer *uold_fb = to_udl_fb(old_fb);
+			uold_fb->active_16 = false;
+		}
+		ufb->active_16 = true;
+		crtc->primary->fb = fb;
+	}
+	if (event) {
+		if (flip_queue->event) {
+			unsigned long flags;
+			spin_lock_irqsave(&dev->event_lock, flags);
+			drm_send_vblank_event(dev, 0, flip_queue->event);
+			spin_unlock_irqrestore(&dev->event_lock, flags);
+		}
+		flip_queue->event = event;
+	}
+	if (!delayed_work_pending(&flip_queue->work)) {
+		u64 now = jiffies;
+		u64 next_flip =
+			flip_queue->flip_time + flip_queue->vblank_interval;
+		flip_queue->flip_time = (next_flip < now) ? now : next_flip;
+		queue_delayed_work(flip_queue->wq, &flip_queue->work,
+			flip_queue->flip_time - now);
+	}
 
-	spin_lock_irqsave(&dev->event_lock, flags);
-	if (event)
-		drm_send_vblank_event(dev, 0, event);
-	spin_unlock_irqrestore(&dev->event_lock, flags);
-	crtc->primary->fb = fb;
+	mutex_unlock(&flip_queue->lock);
 
 	return 0;
 }
@@ -429,6 +500,35 @@ static int udl_crtc_init(struct drm_device *dev)
 	return 0;
 }
 
+static void udl_flip_workqueue_init(struct drm_device *dev)
+{
+	struct udl_device *udl = dev->dev_private;
+	struct udl_flip_queue *flip_queue =
+		kzalloc(sizeof(struct udl_flip_queue), GFP_KERNEL);
+	BUG_ON(!flip_queue);
+	mutex_init(&flip_queue->lock);
+	flip_queue->wq = create_singlethread_workqueue("flip");
+	BUG_ON(!flip_queue->wq);
+	INIT_DELAYED_WORK(&flip_queue->work, udl_sched_page_flip);
+	flip_queue->flip_time = jiffies;
+	flip_queue->vblank_interval = HZ / 60;
+	udl->flip_queue = flip_queue;
+}
+
+static void udl_flip_workqueue_cleanup(struct drm_device *dev)
+{
+	struct udl_device *udl = dev->dev_private;
+	struct udl_flip_queue *flip_queue = udl->flip_queue;
+	if (!flip_queue)
+		return;
+	if (flip_queue->wq) {
+		flush_workqueue(flip_queue->wq);
+		destroy_workqueue(flip_queue->wq);
+	}
+	mutex_destroy(&flip_queue->lock);
+	kfree(flip_queue);
+}
+
 static const struct drm_mode_config_funcs udl_mode_funcs = {
 	.fb_create = udl_fb_user_fb_create,
 	.output_poll_changed = NULL,
@@ -458,10 +558,13 @@ int udl_modeset_init(struct drm_device *dev)
 
 	udl_connector_init(dev, encoder);
 
+	udl_flip_workqueue_init(dev);
+
 	return 0;
 }
 
 void udl_modeset_cleanup(struct drm_device *dev)
 {
+	udl_flip_workqueue_cleanup(dev);
 	drm_mode_config_cleanup(dev);
 }
-- 
2.2.0.rc0.207.ga3a616c



More information about the dri-devel mailing list