[PATCH 7/9] dma-buf/resv: add new fences container implementation
Christian König
ckoenig.leichtzumerken at gmail.com
Mon Aug 26 14:57:29 UTC 2019
Add a new container for fences which internally uses
dma_fence_array's to store the fences.
Signed-off-by: Christian König <christian.koenig at amd.com>
---
drivers/dma-buf/dma-resv.c | 181 +++++++++++++++++++++++++++++++++++++
include/linux/dma-resv.h | 49 ++++++++++
2 files changed, 230 insertions(+)
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 59fbcd9f4b01..d67eaa3fa650 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -33,6 +33,7 @@
*/
#include <linux/dma-resv.h>
+#include <linux/dma-fence-array.h>
#include <linux/export.h>
/**
@@ -55,6 +56,186 @@ EXPORT_SYMBOL(reservation_seqcount_class);
const char reservation_seqcount_string[] = "reservation_seqcount";
EXPORT_SYMBOL(reservation_seqcount_string);
+static void dma_resv_fences_init(struct dma_resv_fences *fences)
+{
+ RCU_INIT_POINTER(fences->fence, NULL);
+ fences->staged = NULL;
+}
+
+static void dma_resv_fences_fini(struct dma_resv_fences *fences)
+{
+ /*
+ * This object should be dead and all references must have
+ * been released to it, so no need to be protected with rcu.
+ */
+ dma_fence_put(rcu_dereference_protected(fences->fence, true));
+ dma_fence_array_free(fences->staged);
+}
+
+/**
+ * dma_resv_fences_reserve - allocate fence slots
+ * @fences: fences object where we need slots
+ * @num_fences: number of fence slots we need
+ *
+ * Make sure that we have at least @num_fences + all the existing ones free
+ * slots in the staged dma_fence_array.
+ *
+ * Returns -ENOMEM on allocation failure, 0 otherwise.
+ */
+int dma_resv_fences_reserve(struct dma_resv *obj,
+ struct dma_resv_fences *fences,
+ unsigned int num_fences)
+{
+ struct dma_fence *fence = dma_resv_fences_deref(obj, fences);
+ struct dma_fence_array *staged, *array;
+ unsigned int i;
+
+ array = fences->staged;
+ if (!array)
+ array = to_dma_fence_array(fence);
+
+ if (array)
+ num_fences += array->num_fences;
+ else if (fence)
+ num_fences += 1;
+
+ staged = fences->staged;
+ if (staged && dma_fence_array_max_fences(staged) >= num_fences)
+ return 0;
+
+ staged = dma_fence_array_alloc(num_fences, NULL);
+ if (!staged)
+ return -ENOMEM;
+
+ /* Copy over all fences from the old object */
+ if (array) {
+ for (i = 0; i < array->num_fences; ++i) {
+ struct dma_fence *f = array->fences[i];
+
+ staged->fences[i] = dma_fence_get(f);
+ }
+ staged->num_fences = array->num_fences;
+
+ } else if (fence) {
+ staged->fences[0] = dma_fence_get(fence);
+ staged->num_fences = 1;
+
+ } else {
+ staged->num_fences = 0;
+ }
+
+ dma_fence_array_free(fences->staged);
+ fences->staged = staged;
+
+ return 0;
+}
+EXPORT_SYMBOL(dma_resv_fences_reserve);
+
+/**
+ * dma_resv_fences_set - set the singleton fence
+ * @fences: fences object where to set the fence
+ * @fence: singleton fence for the object
+ *
+ * Grabs a reference to the new fence and replaces the current singleton fence
+ * with a new one and drop any staged fences.
+ */
+void dma_resv_fences_set(struct dma_resv *obj,
+ struct dma_resv_fences *fences,
+ struct dma_fence *fence)
+{
+ struct dma_fence *old = dma_resv_fences_deref(obj, fences);
+
+ rcu_assign_pointer(fences->fence, dma_fence_get(fence));
+ dma_fence_array_free(fences->staged);
+ fences->staged = NULL;
+ dma_fence_put(old);
+}
+EXPORT_SYMBOL(dma_resv_fences_set);
+
+/**
+ * dma_resv_fences_add - add a fence to the staged fence_array
+ * @fences: fences object where to add the fence to
+ * @fence: fence to add
+ *
+ * Add a new fence to the staged fence_array.
+ */
+void dma_resv_fences_add(struct dma_resv_fences *fences,
+ struct dma_fence *fence)
+{
+ struct dma_fence_array *staged = fences->staged;
+ struct dma_fence *old;
+ unsigned int i;
+
+ for (i = 0; i < staged->num_fences; ++i) {
+ old = staged->fences[i];
+
+ if (old->context == fence->context
+#ifndef CONFIG_DEBUG_MUTEXES
+ || dma_fence_is_signaled(old)
+#endif
+ ) {
+ dma_fence_put(old);
+ goto replace;
+ }
+ }
+
+ BUG_ON(staged->num_fences >= dma_fence_array_max_fences(staged));
+ i = staged->num_fences++;
+
+replace:
+ staged->fences[i] = dma_fence_get(fence);
+}
+EXPORT_SYMBOL(dma_resv_fences_add);
+
+/**
+ * dma_resv_fences_commit - commit the staged dma_fence_array
+ * @fences: fences object where the commit should happen
+ *
+ * Commit the fences staged in the dma_fence_array and make them visible to
+ * other threads.
+ */
+void dma_resv_fences_commit(struct dma_resv *obj,
+ struct dma_resv_fences *fences)
+{
+ struct dma_fence *old = dma_resv_fences_deref(obj, fences);
+ struct dma_fence_array *array = fences->staged, *staged;
+ unsigned int i;
+
+ if (!array || !array->num_fences)
+ return;
+
+ fences->staged = NULL;
+ dma_fence_array_init(array, dma_fence_context_alloc(1), 1, false);
+ rcu_assign_pointer(fences->fence, &array->base);
+
+ /* Try to recycle the old fence array */
+ staged = to_dma_fence_array(old);
+ if (!staged || dma_fence_array_max_fences(staged) < array->num_fences) {
+ dma_fence_put(old);
+ return;
+ }
+
+ /* Try to drop the last reference */
+ if (!dma_fence_array_recycle(staged))
+ return;
+
+ /* Make sure the staged array has the latest fences */
+ for (i = 0; i < array->num_fences; ++i) {
+ struct dma_fence *f = array->fences[i];
+
+ if (f == staged->fences[i])
+ continue;
+
+ dma_fence_put(staged->fences[i]);
+ staged->fences[i] = dma_fence_get(f);
+ }
+ for (;i < staged->num_fences; ++i)
+ dma_fence_put(staged->fences[i]);
+ staged->num_fences = array->num_fences;
+ fences->staged = staged;
+}
+EXPORT_SYMBOL(dma_resv_fences_commit);
+
/**
* dma_resv_list_alloc - allocate fence list
* @shared_max: number of fences we need space for
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index 03b0f95682b0..c70f13fa6789 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -45,10 +45,33 @@
#include <linux/seqlock.h>
#include <linux/rcupdate.h>
+struct dma_resv;
+
extern struct ww_class reservation_ww_class;
extern struct lock_class_key reservation_seqcount_class;
extern const char reservation_seqcount_string[];
+/**
+ * struct dma_resv_fences - fences inside a reservation object
+ * @fence: the current RCU protected singleton fence
+ * @staged: optional staged dma_fence_array to replace @fence
+ */
+struct dma_resv_fences {
+ struct dma_fence __rcu *fence;
+ struct dma_fence_array *staged;
+};
+
+int dma_resv_fences_reserve(struct dma_resv *obj,
+ struct dma_resv_fences *fences,
+ unsigned int num_fences);
+void dma_resv_fences_set(struct dma_resv *obj,
+ struct dma_resv_fences *fences,
+ struct dma_fence *fence);
+void dma_resv_fences_add(struct dma_resv_fences *fences,
+ struct dma_fence *fence);
+void dma_resv_fences_commit(struct dma_resv *obj,
+ struct dma_resv_fences *fences);
+
/**
* struct dma_resv_list - a list of shared fences
* @rcu: for internal use
@@ -80,6 +103,32 @@ struct dma_resv {
#define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
#define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
+/**
+ * dma_resv_fences_deref - get singleton fence
+ * @obj: the reservation object
+ * @fences: the fences object
+ *
+ * Returns the singleton fence from a resv_fences object.
+ */
+static inline struct dma_fence *
+dma_resv_fences_deref(struct dma_resv *obj, struct dma_resv_fences *fences)
+{
+ return rcu_dereference_protected(fences->fence,
+ dma_resv_held(obj));
+}
+
+/**
+ * dma_resv_fences_get_rcu - RCU get single fence
+ * @fences: fences structure where we need to get a reference for
+ *
+ * Get a reference to the single fence representing the synchronization.
+ */
+static inline struct dma_fence *
+dma_resv_fences_get_rcu(struct dma_resv_fences *fences)
+{
+ return dma_fence_get_rcu_safe(&fences->fence);
+}
+
/**
* dma_resv_get_list - get the reservation object's
* shared fence list, with update-side lock held
--
2.17.1
More information about the dri-devel
mailing list