[PATCH 6/8] drm/amdgpu: use the new VM backend for PDEs
Christian König
ckoenig.leichtzumerken at gmail.com
Tue Mar 19 12:44:09 UTC 2019
And remove the existing code when it is unused.
Signed-off-by: Christian König <christian.koenig at amd.com>
---
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 74 +++++---------------------
1 file changed, 14 insertions(+), 60 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 86e12acb8493..735a32b8a596 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1320,10 +1320,10 @@ static void amdgpu_vm_update_func(struct amdgpu_vm_update_params *params,
*
* Makes sure the requested entry in parent is up to date.
*/
-static void amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params,
- struct amdgpu_vm *vm,
- struct amdgpu_vm_pt *parent,
- struct amdgpu_vm_pt *entry)
+static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params,
+ struct amdgpu_vm *vm,
+ struct amdgpu_vm_pt *parent,
+ struct amdgpu_vm_pt *entry)
{
struct amdgpu_bo *bo = parent->base.bo, *pbo;
uint64_t pde, pt, flags;
@@ -1335,7 +1335,7 @@ static void amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params,
level += params->adev->vm_manager.root_level;
amdgpu_gmc_get_pde_for_bo(entry->base.bo, level, &pt, &flags);
pde = (entry - parent->entries) * 8;
- amdgpu_vm_update_func(params, bo, pde, pt, 1, 0, flags);
+ return vm->update_funcs->update(params, bo, pde, pt, 1, 0, flags);
}
/*
@@ -1372,33 +1372,18 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
struct amdgpu_vm *vm)
{
struct amdgpu_vm_update_params params;
- struct amdgpu_job *job;
- unsigned ndw = 0;
- int r = 0;
+ int r;
if (list_empty(&vm->relocated))
return 0;
-restart:
memset(¶ms, 0, sizeof(params));
params.adev = adev;
+ params.vm = vm;
- if (vm->use_cpu_for_update) {
- r = amdgpu_bo_sync_wait(vm->root.base.bo,
- AMDGPU_FENCE_OWNER_VM, true);
- if (unlikely(r))
- return r;
-
- params.func = amdgpu_vm_cpu_set_ptes;
- } else {
- ndw = 512;
- r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
- if (r)
- return r;
-
- params.ib = &job->ibs[0];
- params.func = amdgpu_vm_do_set_ptes;
- }
+ r = vm->update_funcs->prepare(¶ms, AMDGPU_FENCE_OWNER_VM, NULL);
+ if (r)
+ return r;
while (!list_empty(&vm->relocated)) {
struct amdgpu_vm_pt *pt, *entry;
@@ -1411,49 +1396,18 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
if (!pt)
continue;
- amdgpu_vm_update_pde(¶ms, vm, pt, entry);
-
- if (!vm->use_cpu_for_update &&
- (ndw - params.ib->length_dw) < 32)
- break;
- }
-
- if (vm->use_cpu_for_update) {
- /* Flush HDP */
- mb();
- amdgpu_asic_flush_hdp(adev, NULL);
- } else if (params.ib->length_dw == 0) {
- amdgpu_job_free(job);
- } else {
- struct amdgpu_bo *root = vm->root.base.bo;
- struct amdgpu_ring *ring;
- struct dma_fence *fence;
-
- ring = container_of(vm->entity.rq->sched, struct amdgpu_ring,
- sched);
-
- amdgpu_ring_pad_ib(ring, params.ib);
- amdgpu_sync_resv(adev, &job->sync, root->tbo.resv,
- AMDGPU_FENCE_OWNER_VM, false);
- WARN_ON(params.ib->length_dw > ndw);
- r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM,
- &fence);
+ r = amdgpu_vm_update_pde(¶ms, vm, pt, entry);
if (r)
goto error;
-
- amdgpu_bo_fence(root, fence, true);
- dma_fence_put(vm->last_update);
- vm->last_update = fence;
}
- if (!list_empty(&vm->relocated))
- goto restart;
-
+ r = vm->update_funcs->commit(¶ms, &vm->last_update);
+ if (r)
+ goto error;
return 0;
error:
amdgpu_vm_invalidate_pds(adev, vm);
- amdgpu_job_free(job);
return r;
}
--
2.17.1
More information about the amd-gfx
mailing list