[PATCH 21/23] dma-buf: wait for map to complete for static attachments
Daniel Vetter
daniel at ffwll.ch
Tue Mar 29 15:56:45 UTC 2022
On Mon, Mar 21, 2022 at 02:58:54PM +0100, Christian König wrote:
> We have previously done that in the individual drivers but it is
> more defensive to move that into the common code.
>
> Dynamic attachments should wait for map operations to complete by themselves.
>
> Signed-off-by: Christian König <christian.koenig at amd.com>
You could perhaps ditch the call to i915_gem_object_wait_migration() in
the i915 dma-buf code too with this, but that entire thing is a bit a mess
and totally different. So I guess we'll leave that.
Reviewed-by: Daniel Vetter <daniel.vetter at ffwll.ch>
> ---
> drivers/dma-buf/dma-buf.c | 18 +++++++++++++++---
> drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c | 14 +-------------
> drivers/gpu/drm/nouveau/nouveau_prime.c | 17 +----------------
> drivers/gpu/drm/radeon/radeon_prime.c | 16 +++-------------
> 4 files changed, 20 insertions(+), 45 deletions(-)
>
> diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
> index 528983d3ba64..d3dd602c4753 100644
> --- a/drivers/dma-buf/dma-buf.c
> +++ b/drivers/dma-buf/dma-buf.c
> @@ -660,12 +660,24 @@ static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
> enum dma_data_direction direction)
> {
> struct sg_table *sg_table;
> + signed long ret;
>
> sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
> + if (IS_ERR_OR_NULL(sg_table))
> + return sg_table;
> +
> + if (!dma_buf_attachment_is_dynamic(attach)) {
> + ret = dma_resv_wait_timeout(attach->dmabuf->resv,
> + DMA_RESV_USAGE_KERNEL, true,
> + MAX_SCHEDULE_TIMEOUT);
> + if (ret < 0) {
> + attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
> + direction);
> + return ERR_PTR(ret);
> + }
> + }
>
> - if (!IS_ERR_OR_NULL(sg_table))
> - mangle_sg_table(sg_table);
> -
> + mangle_sg_table(sg_table);
> return sg_table;
> }
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> index 579adfafe4d0..782cbca37538 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
> @@ -102,21 +102,9 @@ static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach)
> {
> struct drm_gem_object *obj = attach->dmabuf->priv;
> struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
> - int r;
>
> /* pin buffer into GTT */
> - r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
> - if (r)
> - return r;
> -
> - if (bo->tbo.moving) {
> - r = dma_fence_wait(bo->tbo.moving, true);
> - if (r) {
> - amdgpu_bo_unpin(bo);
> - return r;
> - }
> - }
> - return 0;
> + return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
> }
>
> /**
> diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
> index 60019d0532fc..347488685f74 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_prime.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
> @@ -93,22 +93,7 @@ int nouveau_gem_prime_pin(struct drm_gem_object *obj)
> if (ret)
> return -EINVAL;
>
> - ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
> - if (ret)
> - goto error;
> -
> - if (nvbo->bo.moving)
> - ret = dma_fence_wait(nvbo->bo.moving, true);
> -
> - ttm_bo_unreserve(&nvbo->bo);
> - if (ret)
> - goto error;
> -
> - return ret;
> -
> -error:
> - nouveau_bo_unpin(nvbo);
> - return ret;
> + return 0;
> }
>
> void nouveau_gem_prime_unpin(struct drm_gem_object *obj)
> diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
> index 4a90807351e7..42a87948e28c 100644
> --- a/drivers/gpu/drm/radeon/radeon_prime.c
> +++ b/drivers/gpu/drm/radeon/radeon_prime.c
> @@ -77,19 +77,9 @@ int radeon_gem_prime_pin(struct drm_gem_object *obj)
>
> /* pin buffer into GTT */
> ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
> - if (unlikely(ret))
> - goto error;
> -
> - if (bo->tbo.moving) {
> - ret = dma_fence_wait(bo->tbo.moving, false);
> - if (unlikely(ret)) {
> - radeon_bo_unpin(bo);
> - goto error;
> - }
> - }
> -
> - bo->prime_shared_count++;
> -error:
> + if (likely(ret == 0))
> + bo->prime_shared_count++;
> +
> radeon_bo_unreserve(bo);
> return ret;
> }
> --
> 2.25.1
>
--
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
More information about the dri-devel
mailing list