[Intel-gfx] [PATCH 09/28] dma-buf: use the new iterator in dma_resv_poll

Tvrtko Ursulin tvrtko.ursulin at linux.intel.com
Tue Oct 5 07:44:31 UTC 2021


On 01/10/2021 11:05, Christian König wrote:
> Simplify the code a bit.
> 
> Signed-off-by: Christian König <christian.koenig at amd.com>
> ---
>   drivers/dma-buf/dma-buf.c | 36 ++++++------------------------------
>   1 file changed, 6 insertions(+), 30 deletions(-)
> 
> diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
> index 8242b5d9baeb..beb504a92d60 100644
> --- a/drivers/dma-buf/dma-buf.c
> +++ b/drivers/dma-buf/dma-buf.c
> @@ -209,19 +209,14 @@ static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
>   	dma_fence_put(fence);
>   }
>   
> -static bool dma_buf_poll_shared(struct dma_resv *resv,
> +static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write,
>   				struct dma_buf_poll_cb_t *dcb)
>   {
> -	struct dma_resv_list *fobj = dma_resv_shared_list(resv);
> +	struct dma_resv_iter cursor;
>   	struct dma_fence *fence;
> -	int i, r;
> -
> -	if (!fobj)
> -		return false;
> +	int r;
>   
> -	for (i = 0; i < fobj->shared_count; ++i) {
> -		fence = rcu_dereference_protected(fobj->shared[i],
> -						  dma_resv_held(resv));
> +	dma_resv_for_each_fence(&cursor, resv, write, fence) {
>   		dma_fence_get(fence);
>   		r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
>   		if (!r)

It is unchanged with this patch, but are the semantics supposed to be 
like this? Signal poll event if _any_ of the shared fences has been 
signaled?

Regards,

Tvrtko

> @@ -232,24 +227,6 @@ static bool dma_buf_poll_shared(struct dma_resv *resv,
>   	return false;
>   }
>   
> -static bool dma_buf_poll_excl(struct dma_resv *resv,
> -			      struct dma_buf_poll_cb_t *dcb)
> -{
> -	struct dma_fence *fence = dma_resv_excl_fence(resv);
> -	int r;
> -
> -	if (!fence)
> -		return false;
> -
> -	dma_fence_get(fence);
> -	r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
> -	if (!r)
> -		return true;
> -	dma_fence_put(fence);
> -
> -	return false;
> -}
> -
>   static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
>   {
>   	struct dma_buf *dmabuf;
> @@ -282,8 +259,7 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
>   		spin_unlock_irq(&dmabuf->poll.lock);
>   
>   		if (events & EPOLLOUT) {
> -			if (!dma_buf_poll_shared(resv, dcb) &&
> -			    !dma_buf_poll_excl(resv, dcb))
> +			if (!dma_buf_poll_add_cb(resv, true, dcb))
>   				/* No callback queued, wake up any other waiters */
>   				dma_buf_poll_cb(NULL, &dcb->cb);
>   			else
> @@ -303,7 +279,7 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
>   		spin_unlock_irq(&dmabuf->poll.lock);
>   
>   		if (events & EPOLLIN) {
> -			if (!dma_buf_poll_excl(resv, dcb))
> +			if (!dma_buf_poll_add_cb(resv, false, dcb))
>   				/* No callback queued, wake up any other waiters */
>   				dma_buf_poll_cb(NULL, &dcb->cb);
>   			else
> 


More information about the Intel-gfx mailing list