[Intel-gfx] [PATCH 15/39] drm/i915: Extend execution fence to support a callback

Tvrtko Ursulin tvrtko.ursulin at linux.intel.com
Thu Mar 14 16:50:40 UTC 2019


On 13/03/2019 14:43, Chris Wilson wrote:
> In the next patch, we will want to configure the slave request
> depending on which physical engine the master request is executed on.
> For this, we introduce a callback from the execute fence to convey this
> information.
> 
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> ---
>   drivers/gpu/drm/i915/i915_request.c | 84 +++++++++++++++++++++++++++--
>   drivers/gpu/drm/i915/i915_request.h |  4 ++
>   2 files changed, 83 insertions(+), 5 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
> index 2382339172b4..0a46f8113f5c 100644
> --- a/drivers/gpu/drm/i915/i915_request.c
> +++ b/drivers/gpu/drm/i915/i915_request.c
> @@ -38,6 +38,8 @@ struct execute_cb {
>   	struct list_head link;
>   	struct irq_work work;
>   	struct i915_sw_fence *fence;
> +	void (*hook)(struct i915_request *rq, struct dma_fence *signal);
> +	struct i915_request *signal;
>   };
>   
>   static struct i915_global_request {
> @@ -343,6 +345,17 @@ static void irq_execute_cb(struct irq_work *wrk)
>   	kmem_cache_free(global.slab_execute_cbs, cb);
>   }
>   
> +static void irq_execute_cb_hook(struct irq_work *wrk)
> +{
> +	struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
> +
> +	cb->hook(container_of(cb->fence, struct i915_request, submit),
> +		 &cb->signal->fence);
> +	i915_request_put(cb->signal);
> +
> +	irq_execute_cb(wrk);
> +}
> +
>   static void __notify_execute_cb(struct i915_request *rq)
>   {
>   	struct execute_cb *cb;
> @@ -369,14 +382,19 @@ static void __notify_execute_cb(struct i915_request *rq)
>   }
>   
>   static int
> -i915_request_await_execution(struct i915_request *rq,
> -			     struct i915_request *signal,
> -			     gfp_t gfp)
> +__i915_request_await_execution(struct i915_request *rq,
> +			       struct i915_request *signal,
> +			       void (*hook)(struct i915_request *rq,
> +					    struct dma_fence *signal),
> +			       gfp_t gfp)
>   {
>   	struct execute_cb *cb;
>   
> -	if (i915_request_is_active(signal))
> +	if (i915_request_is_active(signal)) {
> +		if (hook)
> +			hook(rq, &signal->fence);
>   		return 0;
> +	}
>   
>   	cb = kmem_cache_alloc(global.slab_execute_cbs, gfp);
>   	if (!cb)
> @@ -386,8 +404,18 @@ i915_request_await_execution(struct i915_request *rq,
>   	i915_sw_fence_await(cb->fence);
>   	init_irq_work(&cb->work, irq_execute_cb);
>   
> +	if (hook) {
> +		cb->hook = hook;
> +		cb->signal = i915_request_get(signal);
> +		cb->work.func = irq_execute_cb_hook;
> +	}
> +
>   	spin_lock_irq(&signal->lock);
>   	if (i915_request_is_active(signal)) {
> +		if (hook) {
> +			hook(rq, &signal->fence);
> +			i915_request_put(signal);
> +		}
>   		i915_sw_fence_complete(cb->fence);
>   		kmem_cache_free(global.slab_execute_cbs, cb);
>   	} else {
> @@ -790,7 +818,7 @@ emit_semaphore_wait(struct i915_request *to,
>   		return err;
>   
>   	/* Only submit our spinner after the signaler is running! */
> -	err = i915_request_await_execution(to, from, gfp);
> +	err = __i915_request_await_execution(to, from, NULL, gfp);
>   	if (err)
>   		return err;
>   
> @@ -910,6 +938,52 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
>   	return 0;
>   }
>   
> +int
> +i915_request_await_execution(struct i915_request *rq,
> +			     struct dma_fence *fence,
> +			     void (*hook)(struct i915_request *rq,
> +					  struct dma_fence *signal))
> +{
> +	struct dma_fence **child = &fence;
> +	unsigned int nchild = 1;
> +	int ret;
> +
> +	if (dma_fence_is_array(fence)) {
> +		struct dma_fence_array *array = to_dma_fence_array(fence);
> +
> +		/* XXX Error for signal-on-any fence arrays */
> +
> +		child = array->fences;
> +		nchild = array->num_fences;
> +		GEM_BUG_ON(!nchild);
> +	}
> +
> +	do {
> +		fence = *child++;
> +		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
> +			continue;
> +
> +		/*
> +		 * We don't squash repeated fence dependencies here as we
> +		 * want to run our callback in all cases.
> +		 */
> +
> +		if (dma_fence_is_i915(fence))
> +			ret = __i915_request_await_execution(rq,
> +							     to_request(fence),
> +							     hook,
> +							     I915_FENCE_GFP);
> +		else
> +			ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
> +							    I915_FENCE_TIMEOUT,
> +							    GFP_KERNEL);
> +		if (ret < 0)
> +			return ret;
> +	} while (--nchild);
> +
> +	return 0;
> +}
> +
>   /**
>    * i915_request_await_object - set this request to (async) wait upon a bo
>    * @to: request we are wishing to use
> diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
> index cd6c130964cd..d4f6b2940130 100644
> --- a/drivers/gpu/drm/i915/i915_request.h
> +++ b/drivers/gpu/drm/i915/i915_request.h
> @@ -265,6 +265,10 @@ int i915_request_await_object(struct i915_request *to,
>   			      bool write);
>   int i915_request_await_dma_fence(struct i915_request *rq,
>   				 struct dma_fence *fence);
> +int i915_request_await_execution(struct i915_request *rq,
> +				 struct dma_fence *fence,
> +				 void (*hook)(struct i915_request *rq,
> +					      struct dma_fence *signal));
>   
>   void i915_request_add(struct i915_request *rq);
>   
> 

Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin at intel.com>

Regards,

Tvrtko


More information about the Intel-gfx mailing list