[PATCH 8/8] drm/amdgpu: use the new VM backend for clears
zhoucm1
zhoucm1 at amd.com
Wed Mar 20 04:27:05 UTC 2019
patch#2 and patch#4 are Ached-by: Chunming Zhou <david1.zhou at amd.com>
patch#1, #3, #5~#8 are Reviewed-by: Chunming Zhou <david1.zhou at amd.com>
On 2019年03月19日 20:44, Christian König wrote:
> And remove the existing code when it is unused.
>
> Signed-off-by: Christian König <christian.koenig at amd.com>
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 89 +++++++++-----------------
> 1 file changed, 32 insertions(+), 57 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> index 729da1c486cd..af1a7020c3ab 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> @@ -711,11 +711,9 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
> {
> struct ttm_operation_ctx ctx = { true, false };
> unsigned level = adev->vm_manager.root_level;
> + struct amdgpu_vm_update_params params;
> struct amdgpu_bo *ancestor = bo;
> - struct dma_fence *fence = NULL;
> unsigned entries, ats_entries;
> - struct amdgpu_ring *ring;
> - struct amdgpu_job *job;
> uint64_t addr;
> int r;
>
> @@ -750,8 +748,6 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
> }
> }
>
> - ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
> -
> r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
> if (r)
> return r;
> @@ -772,60 +768,45 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
>
> }
>
> - r = amdgpu_job_alloc_with_ib(adev, 64, &job);
> + memset(¶ms, 0, sizeof(params));
> + params.adev = adev;
> + params.vm = vm;
> +
> + r = vm->update_funcs->prepare(¶ms, AMDGPU_FENCE_OWNER_KFD, NULL);
> if (r)
> return r;
>
> - do {
> - addr = amdgpu_bo_gpu_offset(bo);
> - if (ats_entries) {
> - uint64_t ats_value;
> -
> - ats_value = AMDGPU_PTE_DEFAULT_ATC;
> - if (level != AMDGPU_VM_PTB)
> - ats_value |= AMDGPU_PDE_PTE;
> -
> - amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
> - ats_entries, 0, ats_value);
> - addr += ats_entries * 8;
> - }
> -
> - if (entries) {
> - uint64_t value = 0;
> -
> - /* Workaround for fault priority problem on GMC9 */
> - if (level == AMDGPU_VM_PTB &&
> - adev->asic_type >= CHIP_VEGA10)
> - value = AMDGPU_PTE_EXECUTABLE;
> -
> - amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
> - entries, 0, value);
> - }
> + addr = 0;
> + if (ats_entries) {
> + uint64_t ats_value;
>
> - bo = bo->shadow;
> - } while (bo);
> + ats_value = AMDGPU_PTE_DEFAULT_ATC;
> + if (level != AMDGPU_VM_PTB)
> + ats_value |= AMDGPU_PDE_PTE;
>
> - amdgpu_ring_pad_ib(ring, &job->ibs[0]);
> + r = vm->update_funcs->update(¶ms, bo, addr, 0, ats_entries,
> + 0, ats_value);
> + if (r)
> + return r;
>
> - WARN_ON(job->ibs[0].length_dw > 64);
> - r = amdgpu_sync_resv(adev, &job->sync, vm->root.base.bo->tbo.resv,
> - AMDGPU_FENCE_OWNER_KFD, false);
> - if (r)
> - goto error_free;
> + addr += ats_entries * 8;
> + }
>
> - r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_UNDEFINED,
> - &fence);
> - if (r)
> - goto error_free;
> + if (entries) {
> + uint64_t value = 0;
>
> - amdgpu_bo_fence(vm->root.base.bo, fence, true);
> - dma_fence_put(fence);
> + /* Workaround for fault priority problem on GMC9 */
> + if (level == AMDGPU_VM_PTB &&
> + adev->asic_type >= CHIP_VEGA10)
> + value = AMDGPU_PTE_EXECUTABLE;
>
> - return 0;
> + r = vm->update_funcs->update(¶ms, bo, addr, 0, entries,
> + 0, value);
> + if (r)
> + return r;
> + }
>
> -error_free:
> - amdgpu_job_free(job);
> - return r;
> + return vm->update_funcs->commit(¶ms, NULL);
> }
>
> /**
> @@ -913,7 +894,7 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
> if (r)
> goto error_free_pt;
>
> - return 1;
> + return 0;
>
> error_free_pt:
> amdgpu_bo_unref(&pt->shadow);
> @@ -1421,12 +1402,10 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
> unsigned shift, parent_shift, mask;
> uint64_t incr, entry_end, pe_start;
> struct amdgpu_bo *pt;
> - bool need_to_sync;
>
> r = amdgpu_vm_alloc_pts(params->adev, params->vm, &cursor);
> - if (r < 0)
> + if (r)
> return r;
> - need_to_sync = (r && params->vm->use_cpu_for_update);
>
> pt = cursor.entry->base.bo;
>
> @@ -1474,10 +1453,6 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
> entry_end += cursor.pfn & ~(entry_end - 1);
> entry_end = min(entry_end, end);
>
> - if (need_to_sync)
> - r = amdgpu_bo_sync_wait(params->vm->root.base.bo,
> - AMDGPU_FENCE_OWNER_VM, true);
> -
> do {
> uint64_t upd_end = min(entry_end, frag_end);
> unsigned nptes = (upd_end - frag_start) >> shift;
More information about the amd-gfx
mailing list