[PATCH 06/11] dma-buf: add dma_resv_list_fence helper
Christian König
ckoenig.leichtzumerken at gmail.com
Mon May 17 14:11:24 UTC 2021
Instead of repeating the access check over and over again.
Signed-off-by: Christian König <christian.koenig at amd.com>
---
drivers/dma-buf/dma-resv.c | 42 +++++++++++++++++++++++---------------
1 file changed, 25 insertions(+), 17 deletions(-)
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index b1a1a31dc009..49f3c1009821 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -93,6 +93,22 @@ static void dma_resv_list_free(struct dma_resv_list *list)
kfree_rcu(list, rcu);
}
+/**
+ * dma_resv_list_fence - return fence for index
+ * @obj: the reservation object
+ * @list: list to get the fence from
+ * @idx: index into the fence array
+ *
+ * Return the fence at the specified index double checking that either the rcu
+ * read side or the dma_resv object is held.
+ */
+static struct dma_fence *dma_resv_list_fence(struct dma_resv *obj,
+ struct dma_resv_list *list,
+ unsigned int idx)
+{
+ return rcu_dereference_check(list->shared[idx], dma_resv_held(obj));
+}
+
/**
* dma_resv_init - initialize a reservation object
* @obj: the reservation object
@@ -171,8 +187,7 @@ int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
for (i = 0, j = 0, k = max; i < (old ? old->shared_count : 0); ++i) {
struct dma_fence *fence;
- fence = rcu_dereference_protected(old->shared[i],
- dma_resv_held(obj));
+ fence = dma_resv_list_fence(obj, old, i);
if (dma_fence_is_signaled(fence))
RCU_INIT_POINTER(new->shared[--k], fence);
else
@@ -194,13 +209,8 @@ int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
return 0;
/* Drop the references to the signaled fences */
- for (i = k; i < max; ++i) {
- struct dma_fence *fence;
-
- fence = rcu_dereference_protected(new->shared[i],
- dma_resv_held(obj));
- dma_fence_put(fence);
- }
+ for (i = k; i < max; ++i)
+ dma_fence_put(dma_resv_list_fence(obj, new, i));
kfree_rcu(old, rcu);
return 0;
@@ -251,8 +261,7 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
for (i = 0; i < count; ++i) {
- old = rcu_dereference_protected(fobj->shared[i],
- dma_resv_held(obj));
+ old = dma_resv_list_fence(obj, fobj, i);
if (old->context == fence->context ||
dma_fence_is_signaled(old))
goto replace;
@@ -303,8 +312,7 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
/* inplace update, no shared fences */
while (i--)
- dma_fence_put(rcu_dereference_protected(old->shared[i],
- dma_resv_held(obj)));
+ dma_fence_put(dma_resv_list_fence(obj, old, i));
dma_fence_put(old_fence);
}
@@ -350,7 +358,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
struct dma_fence __rcu **dst;
struct dma_fence *fence;
- fence = rcu_dereference(src_list->shared[i]);
+ fence = dma_resv_list_fence(src, src_list, i);
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&fence->flags))
continue;
@@ -459,7 +467,7 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
shared = nshared;
shared_count = fobj ? fobj->shared_count : 0;
for (i = 0; i < shared_count; ++i) {
- shared[i] = rcu_dereference(fobj->shared[i]);
+ shared[i] = dma_resv_list_fence(obj, fobj, i);
if (!dma_fence_get_rcu(shared[i]))
break;
}
@@ -543,7 +551,7 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
for (i = 0; !fence && i < shared_count; ++i) {
struct dma_fence *lfence;
- lfence = rcu_dereference(fobj->shared[i]);
+ lfence = dma_resv_list_fence(obj, fobj, i);
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&lfence->flags))
continue;
@@ -629,7 +637,7 @@ bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
for (i = 0; i < shared_count; ++i) {
struct dma_fence *fence;
- fence = rcu_dereference(fobj->shared[i]);
+ fence = dma_resv_list_fence(obj, fobj, i);
ret = dma_resv_test_signaled_single(fence);
if (ret < 0)
goto retry;
--
2.25.1
More information about the dri-devel
mailing list