[PATCH 03/13] drm/amdgpu: update pd shadow while updating pd
Chunming Zhou
David1.Zhou at amd.com
Mon Jul 25 07:22:23 UTC 2016
Change-Id: Icafa90a6625ea7b5ab3e360ba0d73544cda251b0
Signed-off-by: Chunming Zhou <David1.Zhou at amd.com>
---
drivers/gpu/drm/amd/amdgpu/amdgpu.h | 3 ++-
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 6 +++++-
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 5 ++++-
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 32 +++++++++++++++++++++++---------
4 files changed, 34 insertions(+), 12 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index af536fb..7f57b0e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -893,6 +893,7 @@ struct amdgpu_vm {
/* contains the page directory */
struct amdgpu_bo *page_directory;
struct amdgpu_bo *page_directory_shadow;
+ struct amdgpu_bo_list_entry pd_entry_shadow;
unsigned max_pde_used;
struct fence *page_directory_fence;
@@ -980,7 +981,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
- struct amdgpu_vm *vm);
+ struct amdgpu_vm *vm, bool shadow);
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 55bba02..4f89bad 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -590,7 +590,11 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
struct amdgpu_bo *bo;
int i, r;
- r = amdgpu_vm_update_page_directory(adev, vm);
+ r = amdgpu_vm_update_page_directory(adev, vm, false);
+ if (r)
+ return r;
+
+ r = amdgpu_vm_update_page_directory(adev, vm, true);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 0069aec..29729b0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -578,7 +578,10 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
goto error_unreserve;
}
- r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
+ r = amdgpu_vm_update_page_directory(adev, bo_va->vm, false);
+ if (r)
+ goto error_unreserve;
+ r = amdgpu_vm_update_page_directory(adev, bo_va->vm, true);
if (r)
goto error_unreserve;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index c0f6479a..f13bab9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -132,13 +132,15 @@ void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates)
/* add the vm page table to the list */
for (i = 0; i <= vm->max_pde_used; ++i) {
struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
+ struct amdgpu_bo_list_entry *entry_shadow = &vm->page_tables[i].entry_shadow;
- if (!entry->robj)
+ if (!entry->robj || !entry_shadow->robj)
continue;
list_add(&entry->tv.head, duplicates);
+ list_add(&entry_shadow->tv.head, duplicates);
}
-
+ list_add(&vm->pd_entry_shadow.tv.head, duplicates);
}
/**
@@ -601,10 +603,11 @@ uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
* Returns 0 for success, error for failure.
*/
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
- struct amdgpu_vm *vm)
+ struct amdgpu_vm *vm, bool shadow)
{
struct amdgpu_ring *ring;
- struct amdgpu_bo *pd = vm->page_directory;
+ struct amdgpu_bo *pd = shadow ? vm->page_directory_shadow :
+ vm->page_directory;
uint64_t pd_addr = amdgpu_bo_gpu_offset(pd);
uint32_t incr = AMDGPU_VM_PTE_COUNT * 8;
uint64_t last_pde = ~0, last_pt = ~0;
@@ -639,10 +642,15 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
continue;
pt = amdgpu_bo_gpu_offset(bo);
- if (vm->page_tables[pt_idx].addr == pt)
- continue;
- vm->page_tables[pt_idx].addr = pt;
- vm->page_tables[pt_idx].addr_shadow = pt;
+ if (!shadow) {
+ if (vm->page_tables[pt_idx].addr == pt)
+ continue;
+ vm->page_tables[pt_idx].addr = pt;
+ } else {
+ if (vm->page_tables[pt_idx].addr_shadow == pt)
+ continue;
+ vm->page_tables[pt_idx].addr_shadow = pt;
+ }
pde = pd_addr + pt_idx * 8;
if (((last_pde + 8 * count) != pde) ||
@@ -1556,9 +1564,15 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
r = amdgpu_bo_create(adev, pd_size, align, true,
AMDGPU_GEM_DOMAIN_GTT,
AMDGPU_GEM_CREATE_CPU_GTT_USWC,
- NULL, NULL, &vm->page_directory_shadow);
+ NULL, vm->page_directory->tbo.resv,
+ &vm->page_directory_shadow);
if (r)
goto error_free_page_directory;
+ vm->pd_entry_shadow.robj = vm->page_directory_shadow;
+ vm->pd_entry_shadow.priority = 0;
+ vm->pd_entry_shadow.tv.bo = &vm->page_directory_shadow->tbo;
+ vm->pd_entry_shadow.tv.shared = true;
+ vm->pd_entry_shadow.user_pages = NULL;
return 0;
--
1.9.1
More information about the amd-gfx
mailing list