[RFC] drm: add a mechanism for drivers to schedule vblank callbacks
Paulo Zanoni
przanoni at gmail.com
Wed Nov 19 11:47:08 PST 2014
From: Paulo Zanoni <paulo.r.zanoni at intel.com>
The drivers need a way to schedule functions to be ran after a certain
number of vblanks. The i915.ko driver has plenty of examples where
this could be used, such as for unpinning buffers after a modeset.
Since the vblank wait IOCTL does basically what we want, but for the
user space, in this patch we add a new list of vblank events
(dev->vblank_kernel_list) and handle it exactly like we handle the
user space events.
Signed-off-by: Paulo Zanoni <paulo.r.zanoni at intel.com>
---
drivers/gpu/drm/drm_drv.c | 1 +
drivers/gpu/drm/drm_irq.c | 106 ++++++++++++++++++++++++++++++++++--
drivers/gpu/drm/i915/i915_debugfs.c | 81 +++++++++++++++++++++++++++
include/drm/drmP.h | 22 ++++++++
4 files changed, 206 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 2e5c7d9..b5ae6c8 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -557,6 +557,7 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
INIT_LIST_HEAD(&dev->vmalist);
INIT_LIST_HEAD(&dev->maplist);
INIT_LIST_HEAD(&dev->vblank_event_list);
+ INIT_LIST_HEAD(&dev->vblank_kernel_list);
spin_lock_init(&dev->buf_lock);
spin_lock_init(&dev->event_lock);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 0e47df4..6e04035 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -1107,6 +1107,13 @@ void drm_crtc_wait_one_vblank(struct drm_crtc *crtc)
}
EXPORT_SYMBOL(drm_crtc_wait_one_vblank);
+static void send_kernel_vblank_event(struct drm_kernel_vblank_item *item)
+{
+ if (item->callback_from_work)
+ schedule_work(&item->work);
+ else
+ item->callback(item);
+}
/**
* drm_vblank_off - disable vblank events on a CRTC
* @dev: DRM device
@@ -1124,7 +1131,8 @@ EXPORT_SYMBOL(drm_crtc_wait_one_vblank);
void drm_vblank_off(struct drm_device *dev, int crtc)
{
struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
- struct drm_pending_vblank_event *e, *t;
+ struct drm_pending_vblank_event *e, *et;
+ struct drm_kernel_vblank_item *i, *it;
struct timeval now;
unsigned long irqflags;
unsigned int seq;
@@ -1151,7 +1159,7 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
/* Send any queued vblank events, lest the natives grow disquiet */
seq = drm_vblank_count_and_time(dev, crtc, &now);
- list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
+ list_for_each_entry_safe(e, et, &dev->vblank_event_list, base.link) {
if (e->pipe != crtc)
continue;
DRM_DEBUG("Sending premature vblank event on disable: \
@@ -1161,6 +1169,21 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
drm_vblank_put(dev, e->pipe);
send_vblank_event(dev, e, seq, &now);
}
+
+ list_for_each_entry_safe(i, it, &dev->vblank_kernel_list, link) {
+ int pipe = drm_crtc_index(i->crtc);
+
+ if (pipe != crtc)
+ continue;
+
+ DRM_DEBUG("Sending premature kernel vblank event on disable: \
+ wanted %d, current %d\n",
+ i->target_seq, seq);
+ i->premature = true;
+ list_del(&i->link);
+ drm_vblank_put(dev, pipe);
+ send_kernel_vblank_event(i);
+ }
spin_unlock_irqrestore(&dev->event_lock, irqflags);
}
EXPORT_SYMBOL(drm_vblank_off);
@@ -1560,9 +1583,68 @@ done:
return ret;
}
+static void drm_kernel_vblank_work_func(struct work_struct *work)
+{
+ struct drm_kernel_vblank_item *item =
+ container_of(work, struct drm_kernel_vblank_item, work);
+
+ item->callback(item);
+}
+
+int drm_wait_vblank_kernel(struct drm_kernel_vblank_item *item)
+{
+ struct drm_crtc *crtc = item->crtc;
+ struct drm_device *dev = crtc->dev;
+ int pipe = drm_crtc_index(crtc);
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ unsigned int seq;
+ unsigned long irqflags;
+ int ret = 0;
+
+ if (!dev->irq_enabled)
+ return -EINVAL;
+
+ if (item->callback_from_work)
+ INIT_WORK(&item->work, drm_kernel_vblank_work_func);
+
+ ret = drm_vblank_get(dev, pipe);
+ if (ret) {
+ DRM_DEBUG("failed to acquire vblank counter, %d\n", ret);
+ return ret;
+ }
+
+ seq = drm_vblank_count(dev, pipe);
+ if (!item->absolute)
+ item->target_seq += seq;
+
+ spin_lock_irqsave(&dev->event_lock, irqflags);
+
+ if (!vblank->enabled) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (seq - item->target_seq <= (1 << 23)) {
+ drm_vblank_put(dev, pipe);
+ send_kernel_vblank_event(item);
+ } else {
+ list_add_tail(&item->link, &dev->vblank_kernel_list);
+ }
+
+ spin_unlock_irqrestore(&dev->event_lock, irqflags);
+ return 0;
+
+out:
+ spin_unlock_irqrestore(&dev->event_lock, irqflags);
+ drm_vblank_put(dev, pipe);
+ return ret;
+}
+EXPORT_SYMBOL(drm_wait_vblank_kernel);
+
static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
{
- struct drm_pending_vblank_event *e, *t;
+ struct drm_pending_vblank_event *e, *et;
+ struct drm_kernel_vblank_item *i, *it;
struct timeval now;
unsigned int seq;
@@ -1570,7 +1652,7 @@ static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
seq = drm_vblank_count_and_time(dev, crtc, &now);
- list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
+ list_for_each_entry_safe(e, et, &dev->vblank_event_list, base.link) {
if (e->pipe != crtc)
continue;
if ((seq - e->event.sequence) > (1<<23))
@@ -1584,6 +1666,22 @@ static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
send_vblank_event(dev, e, seq, &now);
}
+ list_for_each_entry_safe(i, it, &dev->vblank_kernel_list, link) {
+ int pipe = drm_crtc_index(i->crtc);
+
+ if (pipe != crtc)
+ continue;
+ if ((seq - i->target_seq) > (1<<23))
+ continue;
+
+ DRM_DEBUG("kernel vblank event on %d, current %d\n",
+ i->target_seq, seq);
+
+ list_del(&i->link);
+ drm_vblank_put(dev, pipe);
+ send_kernel_vblank_event(i);
+ }
+
trace_drm_vblank_event(crtc, seq);
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 319da61..e705976 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -2715,6 +2715,86 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
return 0;
}
+struct vblank_data {
+ int eight;
+ const char *message;
+ struct drm_kernel_vblank_item item;
+};
+
+static void vblank_callback(struct drm_kernel_vblank_item *item)
+{
+ struct vblank_data *data = container_of(item, struct vblank_data, item);
+
+ WARN_ON(data->eight != 8);
+ WARN_ON(item->callback_from_work != drm_can_sleep());
+ DRM_DEBUG_KMS("vblank callback, premature: %s, message: %s\n",
+ yesno(item->premature), data->message);
+
+ kfree(data);
+}
+
+static int i915_vblank_queue_test_new(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct intel_crtc *crtc = NULL;
+ int ret = 0;
+ struct vblank_data *data1, *data2;
+
+ for_each_intel_crtc(dev, crtc)
+ if (crtc->pipe == PIPE_A)
+ break;
+ if (WARN_ON(!crtc))
+ return -EINVAL;
+
+ data1 = kzalloc(sizeof *data1, GFP_KERNEL);
+ if (!data1)
+ return -ENOMEM;
+
+ data2 = kzalloc(sizeof *data2, GFP_KERNEL);
+ if (!data2) {
+ kfree(data1);
+ return -ENOMEM;
+ }
+
+ data1->message = "hello world (atomic)";
+ data1->eight = 8;
+ data1->item.crtc = &crtc->base;
+ data1->item.target_seq = 60;
+ data1->item.absolute = false;
+ data1->item.callback = vblank_callback;
+ data1->item.callback_from_work = false;
+
+ data2->message = "hello world (work)";
+ data2->eight = 8;
+ data2->item.crtc = &crtc->base;
+ data2->item.target_seq = 60;
+ data2->item.absolute = false;
+ data2->item.callback = vblank_callback;
+ data2->item.callback_from_work = true;
+
+ DRM_DEBUG_KMS("scheduling 60 vblanks (no work)\n");
+ ret = drm_wait_vblank_kernel(&data1->item);
+ if (ret) {
+ DRM_DEBUG_KMS("vblank schedule error: %d\n", ret);
+ kfree(data1);
+ kfree(data2);
+ return ret;
+ }
+ DRM_DEBUG_KMS("vblanks scheduled\n");
+
+ DRM_DEBUG_KMS("scheduling 60 vblanks (with work)\n");
+ ret = drm_wait_vblank_kernel(&data2->item);
+ if (ret) {
+ DRM_DEBUG_KMS("vblank schedule error: %d\n", ret);
+ kfree(data2);
+ return ret;
+ }
+ DRM_DEBUG_KMS("vblanks scheduled\n");
+
+ return 0;
+}
+
struct pipe_crc_info {
const char *name;
struct drm_device *dev;
@@ -4289,6 +4369,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_dp_mst_info", i915_dp_mst_info, 0},
{"i915_wa_registers", i915_wa_registers, 0},
{"i915_ddb_info", i915_ddb_info, 0},
+ {"i915_vblank_queue_test_new", i915_vblank_queue_test_new, 0},
};
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index be776fb..295d0e0 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -667,6 +667,26 @@ struct drm_pending_vblank_event {
struct drm_event_vblank event;
};
+struct drm_kernel_vblank_item {
+ /* Internal members, set by drm.ko. */
+ struct list_head link;
+ struct work_struct work;
+
+ /* Parameters: set by the caller of drm_wait_vblank_kernel(). */
+ struct drm_crtc *crtc;
+
+ int target_seq;
+ bool absolute; /* Tells if target_seq is a relative offset to the
+ * current vblank count, or just an absolute value. */
+
+ void (*callback)(struct drm_kernel_vblank_item *item);
+
+ bool callback_from_work;
+
+ /* Output: to be used by the callback function. */
+ bool premature;
+};
+
struct drm_vblank_crtc {
struct drm_device *dev; /* pointer to the drm_device */
wait_queue_head_t queue; /**< VBLANK wait queue */
@@ -784,6 +804,7 @@ struct drm_device {
* List of events
*/
struct list_head vblank_event_list;
+ struct list_head vblank_kernel_list;
spinlock_t event_lock;
/*@} */
@@ -900,6 +921,7 @@ extern int drm_irq_uninstall(struct drm_device *dev);
extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
extern int drm_wait_vblank(struct drm_device *dev, void *data,
struct drm_file *filp);
+extern int drm_wait_vblank_kernel(struct drm_kernel_vblank_item *item);
extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
struct timeval *vblanktime);
--
2.1.1
More information about the dri-devel
mailing list