[PATCH 5/5] drm/amdgpu: rework page directory filling v2
Christian König
deathsimple at vodafone.de
Mon Aug 28 18:50:44 UTC 2017
From: Christian König <christian.koenig at amd.com>
Keep track off relocated PDs/PTs instead of walking and checking all PDs.
v2: fix root PD handling
Signed-off-by: Christian König <christian.koenig at amd.com>
Reviewed-by: Alex Deucher <alexander.deucher at amd.com> (v1)
---
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 89 +++++++++++++++++++++++-----------
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 3 ++
2 files changed, 63 insertions(+), 29 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 592c3e7..4cdfb70 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -196,7 +196,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
spin_lock(&vm->status_lock);
- list_del_init(&bo_base->vm_status);
+ list_move(&bo_base->vm_status, &vm->relocated);
}
spin_unlock(&vm->status_lock);
@@ -313,8 +313,10 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
entry->base.vm = vm;
entry->base.bo = pt;
list_add_tail(&entry->base.bo_list, &pt->va);
- INIT_LIST_HEAD(&entry->base.vm_status);
- entry->addr = 0;
+ spin_lock(&vm->status_lock);
+ list_add(&entry->base.vm_status, &vm->relocated);
+ spin_unlock(&vm->status_lock);
+ entry->addr = ~0ULL;
}
if (level < adev->vm_manager.num_level) {
@@ -999,18 +1001,17 @@ static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
*/
static int amdgpu_vm_update_level(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
- struct amdgpu_vm_pt *parent,
- unsigned level)
+ struct amdgpu_vm_pt *parent)
{
struct amdgpu_bo *shadow;
struct amdgpu_ring *ring = NULL;
uint64_t pd_addr, shadow_addr = 0;
- uint32_t incr = amdgpu_vm_bo_size(adev, level + 1);
uint64_t last_pde = ~0, last_pt = ~0, last_shadow = ~0;
unsigned count = 0, pt_idx, ndw = 0;
struct amdgpu_job *job;
struct amdgpu_pte_update_params params;
struct dma_fence *fence = NULL;
+ uint32_t incr;
int r;
@@ -1058,12 +1059,17 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
/* walk over the address space and update the directory */
for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
- struct amdgpu_bo *bo = parent->entries[pt_idx].base.bo;
+ struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
+ struct amdgpu_bo *bo = entry->base.bo;
uint64_t pde, pt;
if (bo == NULL)
continue;
+ spin_lock(&vm->status_lock);
+ list_del_init(&entry->base.vm_status);
+ spin_unlock(&vm->status_lock);
+
pt = amdgpu_bo_gpu_offset(bo);
pt = amdgpu_gart_get_vm_pde(adev, pt);
/* Don't update huge pages here */
@@ -1074,6 +1080,7 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
parent->entries[pt_idx].addr = pt | AMDGPU_PTE_VALID;
pde = pd_addr + pt_idx * 8;
+ incr = amdgpu_bo_size(bo);
if (((last_pde + 8 * count) != pde) ||
((last_pt + incr * count) != pt) ||
(count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
@@ -1134,20 +1141,6 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
dma_fence_put(fence);
}
}
- /*
- * Recurse into the subdirectories. This recursion is harmless because
- * we only have a maximum of 5 layers.
- */
- for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
- struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
-
- if (!entry->base.bo)
- continue;
-
- r = amdgpu_vm_update_level(adev, vm, entry, level + 1);
- if (r)
- return r;
- }
return 0;
@@ -1163,7 +1156,8 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
*
* Mark all PD level as invalid after an error.
*/
-static void amdgpu_vm_invalidate_level(struct amdgpu_vm_pt *parent)
+static void amdgpu_vm_invalidate_level(struct amdgpu_vm *vm,
+ struct amdgpu_vm_pt *parent)
{
unsigned pt_idx;
@@ -1178,7 +1172,10 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_vm_pt *parent)
continue;
entry->addr = ~0ULL;
- amdgpu_vm_invalidate_level(entry);
+ spin_lock(&vm->status_lock);
+ list_move(&entry->base.vm_status, &vm->relocated);
+ spin_unlock(&vm->status_lock);
+ amdgpu_vm_invalidate_level(vm, entry);
}
}
@@ -1196,9 +1193,38 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
{
int r;
- r = amdgpu_vm_update_level(adev, vm, &vm->root, 0);
- if (r)
- amdgpu_vm_invalidate_level(&vm->root);
+ spin_lock(&vm->status_lock);
+ while (!list_empty(&vm->relocated)) {
+ struct amdgpu_vm_bo_base *bo_base;
+ struct amdgpu_bo *bo;
+
+ bo_base = list_first_entry(&vm->relocated,
+ struct amdgpu_vm_bo_base,
+ vm_status);
+ spin_unlock(&vm->status_lock);
+
+ bo = bo_base->bo->parent;
+ if (bo) {
+ struct amdgpu_vm_bo_base *parent;
+ struct amdgpu_vm_pt *pt;
+
+ parent = list_first_entry(&bo->va,
+ struct amdgpu_vm_bo_base,
+ bo_list);
+ pt = container_of(parent, struct amdgpu_vm_pt, base);
+
+ r = amdgpu_vm_update_level(adev, vm, pt);
+ if (r) {
+ amdgpu_vm_invalidate_level(vm, &vm->root);
+ return r;
+ }
+ spin_lock(&vm->status_lock);
+ } else {
+ spin_lock(&vm->status_lock);
+ list_del_init(&bo_base->vm_status);
+ }
+ }
+ spin_unlock(&vm->status_lock);
if (vm->use_cpu_for_update) {
/* Flush HDP */
@@ -1600,7 +1626,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
error_free:
amdgpu_job_free(job);
- amdgpu_vm_invalidate_level(&vm->root);
+ amdgpu_vm_invalidate_level(vm, &vm->root);
return r;
}
@@ -2390,9 +2416,13 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
continue;
}
- /* Don't add page tables to the moved state */
- if (bo->tbo.type == ttm_bo_type_kernel)
+ if (bo->tbo.type == ttm_bo_type_kernel) {
+ spin_lock(&bo_base->vm->status_lock);
+ if (list_empty(&bo_base->vm_status))
+ list_add(&bo_base->vm_status, &vm->relocated);
+ spin_unlock(&bo_base->vm->status_lock);
continue;
+ }
spin_lock(&bo_base->vm->status_lock);
list_move(&bo_base->vm_status, &bo_base->vm->moved);
@@ -2482,6 +2512,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->reserved_vmid[i] = NULL;
spin_lock_init(&vm->status_lock);
INIT_LIST_HEAD(&vm->evicted);
+ INIT_LIST_HEAD(&vm->relocated);
INIT_LIST_HEAD(&vm->moved);
INIT_LIST_HEAD(&vm->freed);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 4e465e8..c3753af 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -129,6 +129,9 @@ struct amdgpu_vm {
/* BOs who needs a validation */
struct list_head evicted;
+ /* PT BOs which relocated and their parent need an update */
+ struct list_head relocated;
+
/* BOs moved, but not yet updated in the PT */
struct list_head moved;
--
2.7.4
More information about the amd-gfx
mailing list