[PATCH 4/8] drm/amdgpu: allow direct submission in the VM backends
Christian König
ckoenig.leichtzumerken at gmail.com
Fri Mar 29 10:45:03 UTC 2019
This allows us to update page tables directly while in a page fault.
Signed-off-by: Christian König <christian.koenig at amd.com>
---
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 5 ++++
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c | 4 +++
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c | 29 +++++++++++++--------
3 files changed, 27 insertions(+), 11 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 91baf95212a6..66e27b941a55 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -191,6 +191,11 @@ struct amdgpu_vm_update_params {
*/
struct amdgpu_vm *vm;
+ /**
+ * @direct: if changes should be made directly
+ */
+ bool direct;
+
/**
* @pages_addr:
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
index 5222d165abfc..f94e4896079c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
@@ -49,6 +49,10 @@ static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, void *owner,
{
int r;
+ /* Don't wait for anything during page fault */
+ if (p->direct)
+ return 0;
+
/* Wait for PT BOs to be idle. PTs share the same resv. object
* as the root PD BO
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index ddd181f5ed37..891d597063cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -68,17 +68,17 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
if (r)
return r;
- r = amdgpu_sync_fence(p->adev, &p->job->sync, exclusive, false);
- if (r)
- return r;
+ p->num_dw_left = ndw;
+
+ if (p->direct)
+ return 0;
- r = amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.resv,
- owner, false);
+ r = amdgpu_sync_fence(p->adev, &p->job->sync, exclusive, false);
if (r)
return r;
- p->num_dw_left = ndw;
- return 0;
+ return amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.resv,
+ owner, false);
}
/**
@@ -99,13 +99,21 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
struct dma_fence *f;
int r;
- ring = container_of(p->vm->entity.rq->sched, struct amdgpu_ring, sched);
+ if (p->direct)
+ ring = p->adev->vm_manager.page_fault;
+ else
+ ring = container_of(p->vm->entity.rq->sched,
+ struct amdgpu_ring, sched);
WARN_ON(ib->length_dw == 0);
amdgpu_ring_pad_ib(ring, ib);
WARN_ON(ib->length_dw > p->num_dw_left);
- r = amdgpu_job_submit(p->job, &p->vm->entity,
- AMDGPU_FENCE_OWNER_VM, &f);
+
+ if (p->direct)
+ r = amdgpu_job_submit_direct(p->job, ring, &f);
+ else
+ r = amdgpu_job_submit(p->job, &p->vm->entity,
+ AMDGPU_FENCE_OWNER_VM, &f);
if (r)
goto error;
@@ -120,7 +128,6 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
return r;
}
-
/**
* amdgpu_vm_sdma_copy_ptes - copy the PTEs from mapping
*
--
2.17.1
More information about the amd-gfx
mailing list