[PATCH 5/6] dma-fence: Simply wrap dma_fence_signal_locked with dma_fence_signal

Koenig, Christian Christian.Koenig at amd.com
Sat Aug 17 15:16:35 UTC 2019


Am 17.08.19 um 16:47 schrieb Chris Wilson:
> Currently dma_fence_signal() tries to avoid the spinlock and only takes
> it if absolutely required to walk the callback list. However, to allow
> for some users to surreptitiously insert lazy signal callbacks that
> do not depend on enabling the signaling mechanism around every fence,
> we always need to notify the callbacks on signaling. As such, we will
> always need to take the spinlock and dma_fence_signal() effectively
> becomes a clone of dma_fence_signal_locked().
>
> v2: Update the test_and_set_bit() before entering the spinlock.
>
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> Cc: Christian König <christian.koenig at amd.com>
> Cc: Daniel Vetter <daniel.vetter at ffwll.ch>
> ---
>   drivers/dma-buf/dma-fence.c | 43 +++++++++++--------------------------
>   1 file changed, 13 insertions(+), 30 deletions(-)
>
> diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
> index ff0cd6eae766..89d96e3e6df6 100644
> --- a/drivers/dma-buf/dma-fence.c
> +++ b/drivers/dma-buf/dma-fence.c
> @@ -129,25 +129,16 @@ EXPORT_SYMBOL(dma_fence_context_alloc);
>   int dma_fence_signal_locked(struct dma_fence *fence)
>   {
>   	struct dma_fence_cb *cur, *tmp;
> -	int ret = 0;
>   
>   	lockdep_assert_held(fence->lock);
>   
> -	if (WARN_ON(!fence))
> +	if (unlikely(test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
> +				      &fence->flags)))
>   		return -EINVAL;
>   
> -	if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
> -		ret = -EINVAL;
> -
> -		/*
> -		 * we might have raced with the unlocked dma_fence_signal,
> -		 * still run through all callbacks
> -		 */
> -	} else {
> -		fence->timestamp = ktime_get();
> -		set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
> -		trace_dma_fence_signaled(fence);
> -	}
> +	fence->timestamp = ktime_get();
> +	set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
> +	trace_dma_fence_signaled(fence);
>   
>   	if (!list_empty(&fence->cb_list)) {
>   		list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
> @@ -156,7 +147,8 @@ int dma_fence_signal_locked(struct dma_fence *fence)
>   		}
>   		INIT_LIST_HEAD(&fence->cb_list);
>   	}
> -	return ret;
> +
> +	return 0;
>   }
>   EXPORT_SYMBOL(dma_fence_signal_locked);
>   
> @@ -176,28 +168,19 @@ EXPORT_SYMBOL(dma_fence_signal_locked);
>   int dma_fence_signal(struct dma_fence *fence)
>   {
>   	unsigned long flags;
> +	int ret;
>   
>   	if (!fence)
>   		return -EINVAL;
>   
> -	if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
> +	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
>   		return -EINVAL;

Actually I think we can completely drop this extra test. Signaling a 
fence twice shouldn't be the fast path we should optimize for.

Apart from that it looks good to me,
Christian.

>   
> -	fence->timestamp = ktime_get();
> -	set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
> -	trace_dma_fence_signaled(fence);
> -
> -	if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
> -		struct dma_fence_cb *cur, *tmp;
> +	spin_lock_irqsave(fence->lock, flags);
> +	ret = dma_fence_signal_locked(fence);
> +	spin_unlock_irqrestore(fence->lock, flags);
>   
> -		spin_lock_irqsave(fence->lock, flags);
> -		list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
> -			list_del_init(&cur->node);
> -			cur->func(fence, cur);
> -		}
> -		spin_unlock_irqrestore(fence->lock, flags);
> -	}
> -	return 0;
> +	return ret;
>   }
>   EXPORT_SYMBOL(dma_fence_signal);
>   



More information about the dri-devel mailing list