[PATCH 22/23] drm/i915: drop bo->moving dependency

Daniel Vetter daniel at ffwll.ch
Tue Mar 29 15:57:13 UTC 2022


On Mon, Mar 21, 2022 at 02:58:55PM +0100, Christian König wrote:
> That should now be handled by the common dma_resv framework.
> 
> Signed-off-by: Christian König <christian.koenig at amd.com>
> Cc: intel-gfx at lists.freedesktop.org

Reviewed-by: Daniel Vetter <daniel.vetter at ffwll.ch>

> ---
>  drivers/gpu/drm/i915/gem/i915_gem_object.c   | 29 ++++++--------------
>  drivers/gpu/drm/i915/gem/i915_gem_object.h   |  5 ++--
>  drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c | 15 +---------
>  drivers/gpu/drm/i915/i915_vma.c              |  9 +++++-
>  4 files changed, 19 insertions(+), 39 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
> index d87b508b59b1..fd240435ffef 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
> @@ -742,18 +742,19 @@ static const struct drm_gem_object_funcs i915_gem_object_funcs = {
>  /**
>   * i915_gem_object_get_moving_fence - Get the object's moving fence if any
>   * @obj: The object whose moving fence to get.
> + * @fence: The resulting fence
>   *
>   * A non-signaled moving fence means that there is an async operation
>   * pending on the object that needs to be waited on before setting up
>   * any GPU- or CPU PTEs to the object's pages.
>   *
> - * Return: A refcounted pointer to the object's moving fence if any,
> - * NULL otherwise.
> + * Return: Negative error code or 0 for success.
>   */
> -struct dma_fence *
> -i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj)
> +int i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj,
> +				     struct dma_fence **fence)
>  {
> -	return dma_fence_get(i915_gem_to_ttm(obj)->moving);
> +	return dma_resv_get_singleton(obj->base.resv, DMA_RESV_USAGE_KERNEL,
> +				      fence);
>  }
>  
>  /**
> @@ -771,23 +772,9 @@ i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj)
>  int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj,
>  				      bool intr)
>  {
> -	struct dma_fence *fence = i915_gem_to_ttm(obj)->moving;
> -	int ret;
> -
>  	assert_object_held(obj);
> -	if (!fence)
> -		return 0;
> -
> -	ret = dma_fence_wait(fence, intr);
> -	if (ret)
> -		return ret;
> -
> -	if (fence->error)
> -		return fence->error;
> -
> -	i915_gem_to_ttm(obj)->moving = NULL;
> -	dma_fence_put(fence);
> -	return 0;
> +	return dma_resv_wait_timeout(obj->base. resv, DMA_RESV_USAGE_KERNEL,
> +				     intr, MAX_SCHEDULE_TIMEOUT);
>  }
>  
>  #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
> index f66d46882ea7..be57af8bfb31 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
> @@ -521,9 +521,8 @@ i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
>  	i915_gem_object_unpin_pages(obj);
>  }
>  
> -struct dma_fence *
> -i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj);
> -
> +int i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj,
> +				     struct dma_fence **fence);
>  int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj,
>  				      bool intr);
>  
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
> index e4a232e22f9d..4d5d0cd64f23 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
> @@ -452,19 +452,6 @@ __i915_ttm_move(struct ttm_buffer_object *bo,
>  	return fence;
>  }
>  
> -static int
> -prev_deps(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
> -	  struct i915_deps *deps)
> -{
> -	int ret;
> -
> -	ret = i915_deps_add_dependency(deps, bo->moving, ctx);
> -	if (!ret)
> -		ret = i915_deps_add_resv(deps, bo->base.resv, ctx);
> -
> -	return ret;
> -}
> -
>  /**
>   * i915_ttm_move - The TTM move callback used by i915.
>   * @bo: The buffer object.
> @@ -519,7 +506,7 @@ int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
>  		struct i915_deps deps;
>  
>  		i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
> -		ret = prev_deps(bo, ctx, &deps);
> +		ret = i915_deps_add_resv(&deps, bo->base.resv, ctx);
>  		if (ret) {
>  			i915_refct_sgt_put(dst_rsgt);
>  			return ret;
> diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
> index 52fd6705a518..8737159f4706 100644
> --- a/drivers/gpu/drm/i915/i915_vma.c
> +++ b/drivers/gpu/drm/i915/i915_vma.c
> @@ -1247,10 +1247,17 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
>  	if (err)
>  		return err;
>  
> +	if (vma->obj) {
> +		err = i915_gem_object_get_moving_fence(vma->obj, &moving);
> +		if (err)
> +			return err;
> +	} else {
> +		moving = NULL;
> +	}
> +
>  	if (flags & PIN_GLOBAL)
>  		wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
>  
> -	moving = vma->obj ? i915_gem_object_get_moving_fence(vma->obj) : NULL;
>  	if (flags & vma->vm->bind_async_flags || moving) {
>  		/* lock VM */
>  		err = i915_vm_lock_objects(vma->vm, ww);
> -- 
> 2.25.1
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch


More information about the dri-devel mailing list