[PATCH 09/13] drm/amdgpu: add vm recover pt fence
Chunming Zhou
David1.Zhou at amd.com
Mon Jul 25 07:22:29 UTC 2016
Before every job runs, we must make sure which's vm is recoverred completely.
Change-Id: Ibe77a3c8f8206def280543fbb4195ad2ab9772e0
Signed-off-by: Chunming Zhou <David1.Zhou at amd.com>
---
drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 ++
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 9 +++++++++
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 21 +++++++++++++++------
3 files changed, 26 insertions(+), 6 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 878a599..b092eca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -914,6 +914,8 @@ struct amdgpu_vm {
/* client id */
u64 client_id;
+
+ struct fence *recover_pt_fence;
};
struct amdgpu_vm_id {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index aaee0c8..df8b6e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -170,6 +170,15 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
trace_amdgpu_sched_run_job(job);
+
+ if (job->vm && job->vm->recover_pt_fence) {
+ signed long r;
+ r = fence_wait_timeout(job->vm->recover_pt_fence, true,
+ MAX_SCHEDULE_TIMEOUT);
+ if (r < 0)
+ DRM_ERROR("Error (%ld) waiting for fence!\n", r);
+ }
+
r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs,
job->sync.last_vm_update, job, &fence);
if (r) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 8f030a4..636b558 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -705,11 +705,11 @@ error_free:
static int amdgpu_vm_recover_bo_from_shadow(struct amdgpu_device *adev,
struct amdgpu_bo *bo,
struct amdgpu_bo *bo_shadow,
- struct reservation_object *resv)
+ struct reservation_object *resv,
+ struct fence **fence)
{
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
- struct fence *fence;
int r;
uint64_t vram_addr, gtt_addr;
@@ -729,9 +729,9 @@ static int amdgpu_vm_recover_bo_from_shadow(struct amdgpu_device *adev,
goto err3;
r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr,
- amdgpu_bo_size(bo), resv, &fence);
+ amdgpu_bo_size(bo), resv, fence);
if (!r)
- amdgpu_bo_fence(bo, fence, true);
+ amdgpu_bo_fence(bo, *fence, true);
err3:
amdgpu_bo_unpin(bo_shadow);
@@ -745,6 +745,7 @@ err1:
int amdgpu_vm_recover_page_table_from_shadow(struct amdgpu_device *adev,
struct amdgpu_vm *vm)
{
+ struct fence *fence;
uint64_t pt_idx;
int r;
@@ -755,11 +756,14 @@ int amdgpu_vm_recover_page_table_from_shadow(struct amdgpu_device *adev,
r = amdgpu_vm_recover_bo_from_shadow(adev, vm->page_directory,
vm->page_directory_shadow,
- NULL);
+ NULL, &fence);
if (r) {
DRM_ERROR("recover page table failed!\n");
goto err;
}
+ fence_put(vm->recover_pt_fence);
+ vm->recover_pt_fence = fence_get(fence);
+ fence_put(fence);
for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
struct amdgpu_bo *bo = vm->page_tables[pt_idx].entry.robj;
@@ -768,11 +772,14 @@ int amdgpu_vm_recover_page_table_from_shadow(struct amdgpu_device *adev,
if (!bo || !bo_shadow)
continue;
r = amdgpu_vm_recover_bo_from_shadow(adev, bo, bo_shadow,
- NULL);
+ NULL, &fence);
if (r) {
DRM_ERROR("recover page table failed!\n");
goto err;
}
+ fence_put(vm->recover_pt_fence);
+ vm->recover_pt_fence = fence_get(fence);
+ fence_put(fence);
}
err:
@@ -1599,6 +1606,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
INIT_LIST_HEAD(&vm->cleared);
INIT_LIST_HEAD(&vm->freed);
INIT_LIST_HEAD(&vm->list);
+ vm->recover_pt_fence = NULL;
pd_size = amdgpu_vm_directory_size(adev);
pd_entries = amdgpu_vm_num_pdes(adev);
@@ -1705,6 +1713,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
amdgpu_bo_unref(&vm->page_directory);
fence_put(vm->page_directory_fence);
+ fence_put(vm->recover_pt_fence);
}
/**
--
1.9.1
More information about the amd-gfx
mailing list