[PATCH 1/2] drm/amdgpu: fix and cleanup amdgpu_gem_object_close v2

Pan, Xinhui Xinhui.Pan at amd.com
Tue Mar 31 01:59:30 UTC 2020


Reviewed-by: xinhui pan <xinhui.pan at amd.com>


> 2020年3月30日 18:50,Christian König <ckoenig.leichtzumerken at gmail.com> 写道:
> 
> The problem is that we can't add the clear fence to the BO
> when there is an exclusive fence on it since we can't
> guarantee the the clear fence will complete after the
> exclusive one.
> 
> To fix this refactor the function and also add the exclusive
> fence as shared to the resv object.
> 
> v2: fix warning
> v3: add excl fence as shared instead
> 
> Signed-off-by: Christian König <christian.koenig at amd.com>
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 41 ++++++++++++++-----------
> 1 file changed, 23 insertions(+), 18 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> index 5bec66e6b1f8..a0be80513e96 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
> @@ -161,16 +161,17 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
> 
> 	struct amdgpu_bo_list_entry vm_pd;
> 	struct list_head list, duplicates;
> +	struct dma_fence *fence = NULL;
> 	struct ttm_validate_buffer tv;
> 	struct ww_acquire_ctx ticket;
> 	struct amdgpu_bo_va *bo_va;
> -	int r;
> +	long r;
> 
> 	INIT_LIST_HEAD(&list);
> 	INIT_LIST_HEAD(&duplicates);
> 
> 	tv.bo = &bo->tbo;
> -	tv.num_shared = 1;
> +	tv.num_shared = 2;
> 	list_add(&tv.head, &list);
> 
> 	amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
> @@ -178,28 +179,32 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
> 	r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
> 	if (r) {
> 		dev_err(adev->dev, "leaking bo va because "
> -			"we fail to reserve bo (%d)\n", r);
> +			"we fail to reserve bo (%ld)\n", r);
> 		return;
> 	}
> 	bo_va = amdgpu_vm_bo_find(vm, bo);
> -	if (bo_va && --bo_va->ref_count == 0) {
> -		amdgpu_vm_bo_rmv(adev, bo_va);
> +	if (!bo_va || --bo_va->ref_count)
> +		goto out_unlock;
> 
> -		if (amdgpu_vm_ready(vm)) {
> -			struct dma_fence *fence = NULL;
> +	amdgpu_vm_bo_rmv(adev, bo_va);
> +	if (!amdgpu_vm_ready(vm))
> +		goto out_unlock;
> 
> -			r = amdgpu_vm_clear_freed(adev, vm, &fence);
> -			if (unlikely(r)) {
> -				dev_err(adev->dev, "failed to clear page "
> -					"tables on GEM object close (%d)\n", r);
> -			}
> +	fence = dma_resv_get_excl(bo->tbo.base.resv);
> +	amdgpu_bo_fence(bo, fence, true);
> +	fence = NULL;
> 
> -			if (fence) {
> -				amdgpu_bo_fence(bo, fence, true);
> -				dma_fence_put(fence);
> -			}
> -		}
> -	}
> +	r = amdgpu_vm_clear_freed(adev, vm, &fence);
> +	if (r || !fence)
> +		goto out_unlock;
> +
> +	amdgpu_bo_fence(bo, fence, true);
> +	dma_fence_put(fence);
> +
> +out_unlock:
> +	if (unlikely(r < 0))
> +		dev_err(adev->dev, "failed to clear page "
> +			"tables on GEM object close (%ld)\n", r);
> 	ttm_eu_backoff_reservation(&ticket, &list);
> }
> 
> -- 
> 2.17.1
> 



More information about the amd-gfx mailing list