[Intel-gfx] [PATCH 09/11] dma-buf: Proxy fence, an unsignaled fence placeholder
Chris Wilson
chris at chris-wilson.co.uk
Thu May 28 21:15:22 UTC 2020
Often we need to create a fence for a future event that has not yet been
associated with a fence. We can store a proxy fence, a placeholder, in
the timeline and replace it later when the real fence is known. Any
listeners that attach to the proxy fence will automatically be signaled
when the real fence completes, and any future listeners will instead be
attach directly to the real fence avoiding any indirection overhead.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Cc: Lionel Landwerlin <lionel.g.landwerlin at intel.com>
---
drivers/dma-buf/Makefile | 13 +-
drivers/dma-buf/dma-fence-private.h | 20 +
drivers/dma-buf/dma-fence-proxy.c | 306 +++++++++++
drivers/dma-buf/dma-fence.c | 4 +-
drivers/dma-buf/selftests.h | 1 +
drivers/dma-buf/st-dma-fence-proxy.c | 752 +++++++++++++++++++++++++++
include/linux/dma-fence-proxy.h | 38 ++
7 files changed, 1130 insertions(+), 4 deletions(-)
create mode 100644 drivers/dma-buf/dma-fence-private.h
create mode 100644 drivers/dma-buf/dma-fence-proxy.c
create mode 100644 drivers/dma-buf/st-dma-fence-proxy.c
create mode 100644 include/linux/dma-fence-proxy.h
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index 995e05f609ff..afaf6dadd9a3 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -1,6 +1,12 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \
- dma-resv.o seqno-fence.o
+obj-y := \
+ dma-buf.o \
+ dma-fence.o \
+ dma-fence-array.o \
+ dma-fence-chain.o \
+ dma-fence-proxy.o \
+ dma-resv.o \
+ seqno-fence.o
obj-$(CONFIG_DMABUF_HEAPS) += dma-heap.o
obj-$(CONFIG_DMABUF_HEAPS) += heaps/
obj-$(CONFIG_SYNC_FILE) += sync_file.o
@@ -10,6 +16,7 @@ obj-$(CONFIG_UDMABUF) += udmabuf.o
dmabuf_selftests-y := \
selftest.o \
st-dma-fence.o \
- st-dma-fence-chain.o
+ st-dma-fence-chain.o \
+ st-dma-fence-proxy.o
obj-$(CONFIG_DMABUF_SELFTESTS) += dmabuf_selftests.o
diff --git a/drivers/dma-buf/dma-fence-private.h b/drivers/dma-buf/dma-fence-private.h
new file mode 100644
index 000000000000..6924d28af0fa
--- /dev/null
+++ b/drivers/dma-buf/dma-fence-private.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Fence mechanism for dma-buf and to allow for asynchronous dma access
+ *
+ * Copyright (C) 2012 Canonical Ltd
+ * Copyright (C) 2012 Texas Instruments
+ *
+ * Authors:
+ * Rob Clark <robdclark at gmail.com>
+ * Maarten Lankhorst <maarten.lankhorst at canonical.com>
+ */
+
+#ifndef DMA_FENCE_PRIVATE_H
+#define DMA_FENCE_PRIAVTE_H
+
+struct dma_fence;
+
+bool __dma_fence_enable_signaling(struct dma_fence *fence);
+
+#endif /* DMA_FENCE_PRIAVTE_H */
diff --git a/drivers/dma-buf/dma-fence-proxy.c b/drivers/dma-buf/dma-fence-proxy.c
new file mode 100644
index 000000000000..42674e92b0f9
--- /dev/null
+++ b/drivers/dma-buf/dma-fence-proxy.c
@@ -0,0 +1,306 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * dma-fence-proxy: placeholder unsignaled fence
+ *
+ * Copyright (C) 2017-2019 Intel Corporation
+ */
+
+#include <linux/dma-fence.h>
+#include <linux/dma-fence-proxy.h>
+#include <linux/export.h>
+#include <linux/irq_work.h>
+#include <linux/slab.h>
+
+#include "dma-fence-private.h"
+
+struct dma_fence_proxy {
+ struct dma_fence base;
+
+ struct dma_fence *real;
+ struct dma_fence_cb cb;
+ struct irq_work work;
+
+ wait_queue_head_t wq;
+};
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+#define same_lockclass(A, B) (A)->dep_map.key == (B)->dep_map.key
+#else
+#define same_lockclass(A, B) 0
+#endif
+
+static const char *proxy_get_driver_name(struct dma_fence *fence)
+{
+ struct dma_fence_proxy *p = container_of(fence, typeof(*p), base);
+ struct dma_fence *real = READ_ONCE(p->real);
+
+ return real ? real->ops->get_driver_name(real) : "proxy";
+}
+
+static const char *proxy_get_timeline_name(struct dma_fence *fence)
+{
+ struct dma_fence_proxy *p = container_of(fence, typeof(*p), base);
+ struct dma_fence *real = READ_ONCE(p->real);
+
+ return real ? real->ops->get_timeline_name(real) : "unset";
+}
+
+static void proxy_irq_work(struct irq_work *work)
+{
+ struct dma_fence_proxy *p = container_of(work, typeof(*p), work);
+
+ dma_fence_signal(&p->base);
+ dma_fence_put(&p->base);
+}
+
+static void proxy_callback(struct dma_fence *real, struct dma_fence_cb *cb)
+{
+ struct dma_fence_proxy *p = container_of(cb, typeof(*p), cb);
+
+ /* Signaled before enabling signalling callbacks? */
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &p->base.flags)) {
+ dma_fence_put(&p->base);
+ return;
+ }
+
+ if (real->error)
+ dma_fence_set_error(&p->base, real->error);
+
+ /* Lower the height of the proxy chain -> single stack frame */
+ irq_work_queue(&p->work);
+}
+
+static bool proxy_enable_signaling(struct dma_fence *fence)
+{
+ struct dma_fence_proxy *p = container_of(fence, typeof(*p), base);
+ struct dma_fence *real = READ_ONCE(p->real);
+ bool ret = true;
+
+ if (real) {
+ spin_lock_nested(real->lock,
+ same_lockclass(&p->wq.lock, real->lock));
+ ret = __dma_fence_enable_signaling(real);
+ if (!ret && real->error)
+ dma_fence_set_error(&p->base, real->error);
+ spin_unlock(real->lock);
+ }
+
+ return ret;
+}
+
+static void proxy_release(struct dma_fence *fence)
+{
+ struct dma_fence_proxy *p = container_of(fence, typeof(*p), base);
+
+ dma_fence_put(p->real);
+ dma_fence_free(&p->base);
+}
+
+const struct dma_fence_ops dma_fence_proxy_ops = {
+ .get_driver_name = proxy_get_driver_name,
+ .get_timeline_name = proxy_get_timeline_name,
+ .enable_signaling = proxy_enable_signaling,
+ .wait = dma_fence_default_wait,
+ .release = proxy_release,
+};
+EXPORT_SYMBOL_GPL(dma_fence_proxy_ops);
+
+/**
+ * __dma_fence_create_proxy - Create an unset dma-fence
+ * @context: context number to use for proxy fence
+ * @seqno: sequence number to use for proxy fence
+ *
+ * __dma_fence_create_proxy() creates a new dma_fence stub that is initially
+ * unsignaled and may later be replaced with a real fence. Any listeners
+ * to the proxy fence will be signaled when the target fence signals its
+ * completion.
+ */
+struct dma_fence *__dma_fence_create_proxy(u64 context, u64 seqno)
+{
+ struct dma_fence_proxy *p;
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return NULL;
+
+ init_waitqueue_head(&p->wq);
+ dma_fence_init(&p->base, &dma_fence_proxy_ops, &p->wq.lock,
+ context, seqno);
+ init_irq_work(&p->work, proxy_irq_work);
+
+ return &p->base;
+}
+EXPORT_SYMBOL(__dma_fence_create_proxy);
+
+/**
+ * dma_fence_create_proxy - Create an unset dma-fence
+ *
+ * Wraps __dma_fence_create_proxy() to create a new proxy fence with the
+ * next available (unique) context id.
+ */
+struct dma_fence *dma_fence_create_proxy(void)
+{
+ return __dma_fence_create_proxy(dma_fence_context_alloc(1), 0);
+}
+EXPORT_SYMBOL(dma_fence_create_proxy);
+
+static void __wake_up_listeners(struct dma_fence_proxy *p)
+{
+ struct wait_queue_entry *wait, *next;
+
+ list_for_each_entry_safe(wait, next, &p->wq.head, entry) {
+ INIT_LIST_HEAD(&wait->entry);
+ wait->func(wait, TASK_NORMAL, 0, p->real);
+ }
+}
+
+static void set_proxy_callback(struct dma_fence *real, struct dma_fence_cb *cb)
+{
+ cb->func = proxy_callback;
+ list_add_tail(&cb->node, &real->cb_list);
+}
+
+static void proxy_assign(struct dma_fence *fence, struct dma_fence *real)
+{
+ struct dma_fence_proxy *p = container_of(fence, typeof(*p), base);
+ unsigned long flags;
+
+ if (WARN_ON(fence == real))
+ return;
+
+ if (WARN_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)))
+ return;
+
+ if (WARN_ON(p->real))
+ return;
+
+ spin_lock_irqsave(&p->wq.lock, flags);
+
+ if (unlikely(!real)) {
+ dma_fence_signal_locked(&p->base);
+ goto unlock;
+ }
+
+ p->real = dma_fence_get(real);
+
+ dma_fence_get(&p->base);
+ spin_lock_nested(real->lock, same_lockclass(&p->wq.lock, real->lock));
+ if (dma_fence_is_signaled_locked(real))
+ proxy_callback(real, &p->cb);
+ else if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &p->base.flags) &&
+ !__dma_fence_enable_signaling(real))
+ proxy_callback(real, &p->cb);
+ else
+ set_proxy_callback(real, &p->cb);
+ spin_unlock(real->lock);
+
+unlock:
+ __wake_up_listeners(p);
+ spin_unlock_irqrestore(&p->wq.lock, flags);
+}
+
+/**
+ * dma_fence_replace_proxy - Replace the proxy fence with the real target
+ * @slot: pointer to location of fence to update
+ * @fence: the new fence to store in @slot
+ *
+ * Once the real dma_fence is known, we can replace the proxy fence holder
+ * with a pointer to the real dma fence. Future listeners will attach to
+ * the real fence, avoiding any indirection overhead. Previous listeners
+ * will remain attached to the proxy fence, and be signaled in turn when
+ * the target fence completes.
+ */
+struct dma_fence *
+dma_fence_replace_proxy(struct dma_fence __rcu **slot, struct dma_fence *fence)
+{
+ struct dma_fence *old;
+
+ if (fence)
+ dma_fence_get(fence);
+
+ old = rcu_replace_pointer(*slot, fence, true);
+ if (old && dma_fence_is_proxy(old))
+ proxy_assign(old, fence);
+
+ return old;
+}
+EXPORT_SYMBOL(dma_fence_replace_proxy);
+
+/**
+ * dma_fence_proxy_set_real - Set the target of a proxy fence
+ * @fence: the proxy fence
+ * @real: the target fence.
+ *
+ */
+void dma_fence_proxy_set_real(struct dma_fence *fence, struct dma_fence *real)
+{
+ if (dma_fence_is_proxy(fence))
+ proxy_assign(fence, real);
+}
+EXPORT_SYMBOL(dma_fence_proxy_set_real);
+
+/**
+ * dma_fence_proxy_get_real - Query the target of a proxy fence
+ * @fence: the proxy fence
+ *
+ * Unpeel the proxy fence to see if it has been replaced with a real fence.
+ */
+struct dma_fence *dma_fence_proxy_get_real(struct dma_fence *fence)
+{
+ if (dma_fence_is_proxy(fence)) {
+ struct dma_fence_proxy *p =
+ container_of(fence, typeof(*p), base);
+
+ if (p->real)
+ fence = p->real;
+ }
+
+ return fence;
+}
+EXPORT_SYMBOL(dma_fence_proxy_get_real);
+
+void dma_fence_add_proxy_listener(struct dma_fence *fence,
+ struct wait_queue_entry *wait)
+{
+ if (dma_fence_is_proxy(fence)) {
+ struct dma_fence_proxy *p =
+ container_of(fence, typeof(*p), base);
+ unsigned long flags;
+
+ spin_lock_irqsave(&p->wq.lock, flags);
+ if (!p->real) {
+ list_add_tail(&wait->entry, &p->wq.head);
+ wait = NULL;
+ }
+ fence = p->real;
+ spin_unlock_irqrestore(&p->wq.lock, flags);
+ }
+
+ if (wait) {
+ INIT_LIST_HEAD(&wait->entry);
+ wait->func(wait, TASK_NORMAL, 0, fence);
+ }
+}
+EXPORT_SYMBOL(dma_fence_add_proxy_listener);
+
+bool dma_fence_remove_proxy_listener(struct dma_fence *fence,
+ struct wait_queue_entry *wait)
+{
+ bool ret = false;
+
+ if (dma_fence_is_proxy(fence)) {
+ struct dma_fence_proxy *p =
+ container_of(fence, typeof(*p), base);
+ unsigned long flags;
+
+ spin_lock_irqsave(&p->wq.lock, flags);
+ if (!list_empty(&wait->entry)) {
+ list_del_init(&wait->entry);
+ ret = true;
+ }
+ spin_unlock_irqrestore(&p->wq.lock, flags);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(dma_fence_remove_proxy_listener);
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 656e9ac2d028..329bd033059f 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -19,6 +19,8 @@
#define CREATE_TRACE_POINTS
#include <trace/events/dma_fence.h>
+#include "dma-fence-private.h"
+
EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit);
EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal);
EXPORT_TRACEPOINT_SYMBOL(dma_fence_signaled);
@@ -275,7 +277,7 @@ void dma_fence_free(struct dma_fence *fence)
}
EXPORT_SYMBOL(dma_fence_free);
-static bool __dma_fence_enable_signaling(struct dma_fence *fence)
+bool __dma_fence_enable_signaling(struct dma_fence *fence)
{
bool was_set;
diff --git a/drivers/dma-buf/selftests.h b/drivers/dma-buf/selftests.h
index 55918ef9adab..616eca70e2d8 100644
--- a/drivers/dma-buf/selftests.h
+++ b/drivers/dma-buf/selftests.h
@@ -12,3 +12,4 @@
selftest(sanitycheck, __sanitycheck__) /* keep first (igt selfcheck) */
selftest(dma_fence, dma_fence)
selftest(dma_fence_chain, dma_fence_chain)
+selftest(dma_fence_proxy, dma_fence_proxy)
diff --git a/drivers/dma-buf/st-dma-fence-proxy.c b/drivers/dma-buf/st-dma-fence-proxy.c
new file mode 100644
index 000000000000..c3f210bc4e60
--- /dev/null
+++ b/drivers/dma-buf/st-dma-fence-proxy.c
@@ -0,0 +1,752 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-fence.h>
+#include <linux/dma-fence-proxy.h>
+#include <linux/kernel.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "selftest.h"
+
+static struct kmem_cache *slab_fences;
+
+static struct mock_fence {
+ struct dma_fence base;
+ spinlock_t lock;
+} *to_mock_fence(struct dma_fence *f) {
+ return container_of(f, struct mock_fence, base);
+}
+
+static const char *mock_name(struct dma_fence *f)
+{
+ return "mock";
+}
+
+static void mock_fence_release(struct dma_fence *f)
+{
+ kmem_cache_free(slab_fences, to_mock_fence(f));
+}
+
+static const struct dma_fence_ops mock_ops = {
+ .get_driver_name = mock_name,
+ .get_timeline_name = mock_name,
+ .release = mock_fence_release,
+};
+
+static struct dma_fence *mock_fence(void)
+{
+ struct mock_fence *f;
+
+ f = kmem_cache_alloc(slab_fences, GFP_KERNEL);
+ if (!f)
+ return NULL;
+
+ spin_lock_init(&f->lock);
+ dma_fence_init(&f->base, &mock_ops, &f->lock, 0, 0);
+
+ return &f->base;
+}
+
+static int sanitycheck(void *arg)
+{
+ struct dma_fence *f;
+
+ f = dma_fence_create_proxy();
+ if (!f)
+ return -ENOMEM;
+
+ dma_fence_signal(f);
+ dma_fence_put(f);
+
+ return 0;
+}
+
+struct fences {
+ struct dma_fence *real;
+ struct dma_fence *proxy;
+ struct dma_fence __rcu *slot;
+};
+
+static int create_fences(struct fences *f, bool attach)
+{
+ f->proxy = dma_fence_create_proxy();
+ if (!f->proxy)
+ return -ENOMEM;
+
+ RCU_INIT_POINTER(f->slot, f->proxy);
+
+ f->real = mock_fence();
+ if (!f->real) {
+ dma_fence_put(f->proxy);
+ return -ENOMEM;
+ }
+
+ if (attach)
+ dma_fence_replace_proxy(&f->slot, f->real);
+
+ return 0;
+}
+
+static void free_fences(struct fences *f)
+{
+ dma_fence_put(dma_fence_replace_proxy(&f->slot, NULL));
+
+ dma_fence_signal(f->real);
+ dma_fence_put(f->real);
+
+ dma_fence_signal(f->proxy);
+ dma_fence_put(f->proxy);
+}
+
+static int wrap_target(void *arg)
+{
+ struct fences f;
+ int err = -EINVAL;
+
+ if (create_fences(&f, false))
+ return -ENOMEM;
+
+ if (dma_fence_proxy_get_real(f.proxy) != f.proxy) {
+ pr_err("Unwrapped proxy fenced reported a target fence!\n");
+ goto err_free;
+ }
+
+ dma_fence_proxy_set_real(f.proxy, f.real);
+ rcu_assign_pointer(f.slot, dma_fence_get(f.real)); /* free_fences() */
+
+ if (dma_fence_proxy_get_real(f.proxy) != f.real) {
+ pr_err("Wrapped proxy fenced did not report the target fence!\n");
+ goto err_free;
+ }
+
+ err = 0;
+err_free:
+ free_fences(&f);
+ return err;
+}
+
+static int wrap_proxy(void *arg)
+{
+ struct fences f;
+ int err = -EINVAL;
+
+ if (create_fences(&f, true))
+ return -ENOMEM;
+
+ if (dma_fence_proxy_get_real(f.proxy) != f.real) {
+ pr_err("Wrapped proxy fenced did not report the target fence!\n");
+ goto err_free;
+ }
+
+ err = 0;
+err_free:
+ free_fences(&f);
+ return err;
+}
+
+static int wrap_signaling(void *arg)
+{
+ struct fences f;
+ int err = -EINVAL;
+
+ if (create_fences(&f, true))
+ return -ENOMEM;
+
+ if (dma_fence_is_signaled(f.proxy)) {
+ pr_err("Fence unexpectedly signaled on creation\n");
+ goto err_free;
+ }
+
+ if (dma_fence_signal(f.real)) {
+ pr_err("Fence reported being already signaled\n");
+ goto err_free;
+ }
+
+ if (!dma_fence_is_signaled(f.proxy)) {
+ pr_err("Fence not reporting signaled\n");
+ goto err_free;
+ }
+
+ err = 0;
+err_free:
+ free_fences(&f);
+ return err;
+}
+
+static int wrap_signaling_recurse(void *arg)
+{
+ struct fences f;
+ struct dma_fence *chain;
+ int err = -EINVAL;
+
+ if (create_fences(&f, false))
+ return -ENOMEM;
+
+ chain = dma_fence_create_proxy();
+ if (!chain) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ dma_fence_replace_proxy(&f.slot, chain);
+ dma_fence_put(dma_fence_replace_proxy(&f.slot, f.real));
+ dma_fence_put(chain);
+
+ /* f.real <- chain <- f.proxy */
+
+ if (dma_fence_is_signaled(f.proxy)) {
+ pr_err("Fence unexpectedly signaled on creation\n");
+ goto err_free;
+ }
+
+ if (dma_fence_signal(f.real)) {
+ pr_err("Fence reported being already signaled\n");
+ goto err_free;
+ }
+
+ if (!dma_fence_is_signaled(f.proxy)) {
+ pr_err("Fence not reporting signaled\n");
+ goto err_free;
+ }
+
+ err = 0;
+err_free:
+ free_fences(&f);
+ return err;
+}
+
+struct simple_cb {
+ struct dma_fence_cb cb;
+ bool seen;
+};
+
+static void simple_callback(struct dma_fence *f, struct dma_fence_cb *cb)
+{
+ /* Ensure the callback marker is visible, no excuses for missing it! */
+ smp_store_mb(container_of(cb, struct simple_cb, cb)->seen, true);
+}
+
+static int wrap_add_callback(void *arg)
+{
+ struct simple_cb cb = {};
+ struct fences f;
+ int err = -EINVAL;
+
+ if (create_fences(&f, true))
+ return -ENOMEM;
+
+ if (dma_fence_add_callback(f.proxy, &cb.cb, simple_callback)) {
+ pr_err("Failed to add callback, fence already signaled!\n");
+ goto err_free;
+ }
+
+ dma_fence_signal(f.real);
+ if (!cb.seen) {
+ pr_err("Callback failed!\n");
+ goto err_free;
+ }
+
+ err = 0;
+err_free:
+ free_fences(&f);
+ return err;
+}
+
+static int wrap_add_callback_recurse(void *arg)
+{
+ struct simple_cb cb = {};
+ struct dma_fence *chain;
+ struct fences f;
+ int err = -EINVAL;
+
+ if (create_fences(&f, false))
+ return -ENOMEM;
+
+ chain = dma_fence_create_proxy();
+ if (!chain) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ dma_fence_replace_proxy(&f.slot, chain);
+ dma_fence_put(dma_fence_replace_proxy(&f.slot, f.real));
+ dma_fence_put(chain);
+
+ /* f.real <- chain <- f.proxy */
+
+ if (dma_fence_add_callback(f.proxy, &cb.cb, simple_callback)) {
+ pr_err("Failed to add callback, fence already signaled!\n");
+ goto err_free;
+ }
+
+ dma_fence_signal(f.real);
+ if (!cb.seen) {
+ pr_err("Callback failed!\n");
+ goto err_free;
+ }
+
+ err = 0;
+err_free:
+ free_fences(&f);
+ return err;
+}
+
+static int wrap_late_add_callback(void *arg)
+{
+ struct simple_cb cb = {};
+ struct fences f;
+ int err = -EINVAL;
+
+ if (create_fences(&f, true))
+ return -ENOMEM;
+
+ dma_fence_signal(f.real);
+
+ if (!dma_fence_add_callback(f.proxy, &cb.cb, simple_callback)) {
+ pr_err("Added callback, but fence was already signaled!\n");
+ goto err_free;
+ }
+
+ dma_fence_signal(f.real);
+ if (cb.seen) {
+ pr_err("Callback called after failed attachment!\n");
+ goto err_free;
+ }
+
+ err = 0;
+err_free:
+ free_fences(&f);
+ return err;
+}
+
+static int wrap_early_add_callback(void *arg)
+{
+ struct simple_cb cb = {};
+ struct fences f;
+ int err = -EINVAL;
+
+ if (create_fences(&f, false))
+ return -ENOMEM;
+
+ if (dma_fence_add_callback(f.proxy, &cb.cb, simple_callback)) {
+ pr_err("Failed to add callback, fence already signaled!\n");
+ goto err_free;
+ }
+
+ dma_fence_replace_proxy(&f.slot, f.real);
+ dma_fence_signal(f.real);
+ if (!cb.seen) {
+ pr_err("Callback failed!\n");
+ goto err_free;
+ }
+
+ err = 0;
+err_free:
+ free_fences(&f);
+ return err;
+}
+
+static int wrap_early_add_callback_late(void *arg)
+{
+ struct simple_cb cb = {};
+ struct fences f;
+ int err = -EINVAL;
+
+ if (create_fences(&f, false))
+ return -ENOMEM;
+
+ dma_fence_signal(f.real);
+
+ if (dma_fence_add_callback(f.proxy, &cb.cb, simple_callback)) {
+ pr_err("Failed to add callback, fence already signaled!\n");
+ goto err_free;
+ }
+
+ dma_fence_replace_proxy(&f.slot, f.real);
+ dma_fence_signal(f.real);
+ if (!cb.seen) {
+ pr_err("Callback failed!\n");
+ goto err_free;
+ }
+
+ err = 0;
+err_free:
+ free_fences(&f);
+ return err;
+}
+
+static int wrap_early_add_callback_early(void *arg)
+{
+ struct simple_cb cb = {};
+ struct fences f;
+ int err = -EINVAL;
+
+ if (create_fences(&f, false))
+ return -ENOMEM;
+
+ if (dma_fence_add_callback(f.proxy, &cb.cb, simple_callback)) {
+ pr_err("Failed to add callback, fence already signaled!\n");
+ goto err_free;
+ }
+
+ dma_fence_replace_proxy(&f.slot, f.real);
+ dma_fence_signal(f.real);
+ if (!cb.seen) {
+ pr_err("Callback failed!\n");
+ goto err_free;
+ }
+
+ err = 0;
+err_free:
+ free_fences(&f);
+ return err;
+}
+
+static int wrap_rm_callback(void *arg)
+{
+ struct simple_cb cb = {};
+ struct fences f;
+ int err = -EINVAL;
+
+ if (create_fences(&f, true))
+ return -ENOMEM;
+
+ if (dma_fence_add_callback(f.proxy, &cb.cb, simple_callback)) {
+ pr_err("Failed to add callback, fence already signaled!\n");
+ goto err_free;
+ }
+
+ if (!dma_fence_remove_callback(f.proxy, &cb.cb)) {
+ pr_err("Failed to remove callback!\n");
+ goto err_free;
+ }
+
+ dma_fence_signal(f.real);
+ if (cb.seen) {
+ pr_err("Callback still signaled after removal!\n");
+ goto err_free;
+ }
+
+ err = 0;
+err_free:
+ free_fences(&f);
+ return err;
+}
+
+static int wrap_late_rm_callback(void *arg)
+{
+ struct simple_cb cb = {};
+ struct fences f;
+ int err = -EINVAL;
+
+ if (create_fences(&f, true))
+ return -ENOMEM;
+
+ if (dma_fence_add_callback(f.proxy, &cb.cb, simple_callback)) {
+ pr_err("Failed to add callback, fence already signaled!\n");
+ goto err_free;
+ }
+
+ dma_fence_signal(f.real);
+ if (!cb.seen) {
+ pr_err("Callback failed!\n");
+ goto err_free;
+ }
+
+ if (dma_fence_remove_callback(f.proxy, &cb.cb)) {
+ pr_err("Callback removal succeed after being executed!\n");
+ goto err_free;
+ }
+
+ err = 0;
+err_free:
+ free_fences(&f);
+ return err;
+}
+
+static int wrap_status(void *arg)
+{
+ struct fences f;
+ int err = -EINVAL;
+
+ if (create_fences(&f, true))
+ return -ENOMEM;
+
+ if (dma_fence_get_status(f.proxy)) {
+ pr_err("Fence unexpectedly has signaled status on creation\n");
+ goto err_free;
+ }
+
+ dma_fence_signal(f.real);
+ if (!dma_fence_get_status(f.proxy)) {
+ pr_err("Fence not reporting signaled status\n");
+ goto err_free;
+ }
+
+ err = 0;
+err_free:
+ free_fences(&f);
+ return err;
+}
+
+static int wrap_error(void *arg)
+{
+ struct fences f;
+ int err = -EINVAL;
+
+ if (create_fences(&f, true))
+ return -ENOMEM;
+
+ dma_fence_set_error(f.real, -EIO);
+
+ if (dma_fence_get_status(f.proxy)) {
+ pr_err("Fence unexpectedly has error status before signal\n");
+ goto err_free;
+ }
+
+ dma_fence_signal(f.real);
+ if (dma_fence_get_status(f.proxy) != -EIO) {
+ pr_err("Fence not reporting error status, got %d\n",
+ dma_fence_get_status(f.proxy));
+ goto err_free;
+ }
+
+ err = 0;
+err_free:
+ free_fences(&f);
+ return err;
+}
+
+static int wrap_wait(void *arg)
+{
+ struct fences f;
+ int err = -EINVAL;
+
+ if (create_fences(&f, true))
+ return -ENOMEM;
+
+ if (dma_fence_wait_timeout(f.proxy, false, 0) != 0) {
+ pr_err("Wait reported complete before being signaled\n");
+ goto err_free;
+ }
+
+ dma_fence_signal(f.real);
+
+ if (dma_fence_wait_timeout(f.proxy, false, 0) == 0) {
+ pr_err("Wait reported incomplete after being signaled\n");
+ goto err_free;
+ }
+
+ err = 0;
+err_free:
+ dma_fence_signal(f.real);
+ free_fences(&f);
+ return err;
+}
+
+struct wait_timer {
+ struct timer_list timer;
+ struct fences f;
+};
+
+static void wait_timer(struct timer_list *timer)
+{
+ struct wait_timer *wt = from_timer(wt, timer, timer);
+
+ dma_fence_signal(wt->f.real);
+}
+
+static int wrap_wait_timeout(void *arg)
+{
+ struct wait_timer wt;
+ int err = -EINVAL;
+
+ if (create_fences(&wt.f, true))
+ return -ENOMEM;
+
+ timer_setup_on_stack(&wt.timer, wait_timer, 0);
+
+ if (dma_fence_wait_timeout(wt.f.proxy, false, 1) != 0) {
+ pr_err("Wait reported complete before being signaled\n");
+ goto err_free;
+ }
+
+ mod_timer(&wt.timer, jiffies + 1);
+
+ if (dma_fence_wait_timeout(wt.f.proxy, false, 2) != 0) {
+ if (timer_pending(&wt.timer)) {
+ pr_notice("Timer did not fire within the jiffie!\n");
+ err = 0; /* not our fault! */
+ } else {
+ pr_err("Wait reported incomplete after timeout\n");
+ }
+ goto err_free;
+ }
+
+ err = 0;
+err_free:
+ del_timer_sync(&wt.timer);
+ destroy_timer_on_stack(&wt.timer);
+ dma_fence_signal(wt.f.real);
+ free_fences(&wt.f);
+ return err;
+}
+
+struct proxy_wait {
+ struct wait_queue_entry base;
+ struct dma_fence *fence;
+ bool seen;
+};
+
+static int proxy_wait_cb(struct wait_queue_entry *entry,
+ unsigned int mode, int flags, void *key)
+{
+ struct proxy_wait *p = container_of(entry, typeof(*p), base);
+
+ p->fence = key;
+ p->seen = true;
+
+ return 0;
+}
+
+static int wrap_listen_early(void *arg)
+{
+ struct proxy_wait wait = { .base.func = proxy_wait_cb };
+ struct fences f;
+ int err = -EINVAL;
+
+ if (create_fences(&f, false))
+ return -ENOMEM;
+
+ dma_fence_replace_proxy(&f.slot, f.real);
+ dma_fence_add_proxy_listener(f.proxy, &wait.base);
+
+ if (!wait.seen) {
+ pr_err("Proxy listener was not called after replace!\n");
+ err = -EINVAL;
+ goto err_free;
+ }
+
+ if (wait.fence != f.real) {
+ pr_err("Proxy listener was not passed the real fence!\n");
+ err = -EINVAL;
+ goto err_free;
+ }
+
+ err = 0;
+err_free:
+ dma_fence_signal(f.real);
+ free_fences(&f);
+ return err;
+}
+
+static int wrap_listen_late(void *arg)
+{
+ struct proxy_wait wait = { .base.func = proxy_wait_cb };
+ struct fences f;
+ int err = -EINVAL;
+
+ if (create_fences(&f, false))
+ return -ENOMEM;
+
+ dma_fence_add_proxy_listener(f.proxy, &wait.base);
+ dma_fence_replace_proxy(&f.slot, f.real);
+
+ if (!wait.seen) {
+ pr_err("Proxy listener was not called on replace!\n");
+ err = -EINVAL;
+ goto err_free;
+ }
+
+ if (wait.fence != f.real) {
+ pr_err("Proxy listener was not passed the real fence!\n");
+ err = -EINVAL;
+ goto err_free;
+ }
+
+ err = 0;
+err_free:
+ dma_fence_signal(f.real);
+ free_fences(&f);
+ return err;
+}
+
+static int wrap_listen_cancel(void *arg)
+{
+ struct proxy_wait wait = { .base.func = proxy_wait_cb };
+ struct fences f;
+ int err = -EINVAL;
+
+ if (create_fences(&f, false))
+ return -ENOMEM;
+
+ dma_fence_add_proxy_listener(f.proxy, &wait.base);
+ if (!dma_fence_remove_proxy_listener(f.proxy, &wait.base)) {
+ pr_err("Cancelling listener, already detached?\n");
+ err = -EINVAL;
+ goto err_free;
+ }
+ dma_fence_replace_proxy(&f.slot, f.real);
+
+ if (wait.seen) {
+ pr_err("Proxy listener was called after being removed!\n");
+ err = -EINVAL;
+ goto err_free;
+ }
+
+ if (dma_fence_remove_proxy_listener(f.proxy, &wait.base)) {
+ pr_err("Double listener cancellation!\n");
+ err = -EINVAL;
+ goto err_free;
+ }
+
+ err = 0;
+err_free:
+ dma_fence_signal(f.real);
+ free_fences(&f);
+ return err;
+}
+
+int dma_fence_proxy(void)
+{
+ static const struct subtest tests[] = {
+ SUBTEST(sanitycheck),
+ SUBTEST(wrap_target),
+ SUBTEST(wrap_proxy),
+ SUBTEST(wrap_signaling),
+ SUBTEST(wrap_signaling_recurse),
+ SUBTEST(wrap_add_callback),
+ SUBTEST(wrap_add_callback_recurse),
+ SUBTEST(wrap_late_add_callback),
+ SUBTEST(wrap_early_add_callback),
+ SUBTEST(wrap_early_add_callback_late),
+ SUBTEST(wrap_early_add_callback_early),
+ SUBTEST(wrap_rm_callback),
+ SUBTEST(wrap_late_rm_callback),
+ SUBTEST(wrap_status),
+ SUBTEST(wrap_error),
+ SUBTEST(wrap_wait),
+ SUBTEST(wrap_wait_timeout),
+ SUBTEST(wrap_listen_early),
+ SUBTEST(wrap_listen_late),
+ SUBTEST(wrap_listen_cancel),
+ };
+ int ret;
+
+ slab_fences = KMEM_CACHE(mock_fence,
+ SLAB_TYPESAFE_BY_RCU |
+ SLAB_HWCACHE_ALIGN);
+ if (!slab_fences)
+ return -ENOMEM;
+
+ ret = subtests(tests, NULL);
+
+ kmem_cache_destroy(slab_fences);
+
+ return ret;
+}
diff --git a/include/linux/dma-fence-proxy.h b/include/linux/dma-fence-proxy.h
new file mode 100644
index 000000000000..6a986b5bb009
--- /dev/null
+++ b/include/linux/dma-fence-proxy.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * dma-fence-proxy: allows waiting upon unset and future fences
+ *
+ * Copyright (C) 2017 Intel Corporation
+ */
+
+#ifndef __LINUX_DMA_FENCE_PROXY_H
+#define __LINUX_DMA_FENCE_PROXY_H
+
+#include <linux/kernel.h>
+#include <linux/dma-fence.h>
+
+struct wait_queue_entry;
+
+extern const struct dma_fence_ops dma_fence_proxy_ops;
+
+struct dma_fence *__dma_fence_create_proxy(u64 context, u64 seqno);
+struct dma_fence *dma_fence_create_proxy(void);
+
+static inline bool dma_fence_is_proxy(struct dma_fence *fence)
+{
+ return fence->ops == &dma_fence_proxy_ops;
+}
+
+void dma_fence_proxy_set_real(struct dma_fence *fence, struct dma_fence *real);
+struct dma_fence *dma_fence_proxy_get_real(struct dma_fence *fence);
+
+struct dma_fence *
+dma_fence_replace_proxy(struct dma_fence __rcu **slot,
+ struct dma_fence *fence);
+
+void dma_fence_add_proxy_listener(struct dma_fence *fence,
+ struct wait_queue_entry *wait);
+bool dma_fence_remove_proxy_listener(struct dma_fence *fence,
+ struct wait_queue_entry *wait);
+
+#endif /* __LINUX_DMA_FENCE_PROXY_H */
--
2.20.1
More information about the Intel-gfx
mailing list