[PATCH 1/3] drm/amdgpu: add a bool to specify if needing vm flush

Chunming Zhou David1.Zhou at amd.com
Fri Jul 1 10:11:23 UTC 2016


which avoids job->vm_pd_addr be changed.

Change-Id: I3c3f2497a8e9794cd1612c226817423e2001aa43
Signed-off-by: Chunming Zhou <David1.Zhou at amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h     |  3 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c  |  3 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c |  3 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c  | 13 +++++++------
 4 files changed, 13 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 4cfc4eb..69fc1ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -948,7 +948,7 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
 				  struct amdgpu_vm *vm);
 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 		      struct amdgpu_sync *sync, struct fence *fence,
-		      unsigned *vm_id, uint64_t *vm_pd_addr);
+		      unsigned *vm_id, uint64_t *vm_pd_addr, bool *need_flush);
 int amdgpu_vm_flush(struct amdgpu_ring *ring,
 		    unsigned vm_id, uint64_t pd_addr,
 		    uint32_t gds_base, uint32_t gds_size,
@@ -1267,6 +1267,7 @@ struct amdgpu_job {
 	uint32_t		num_ibs;
 	void			*owner;
 	uint64_t		ctx;
+	bool                    need_flush;
 	unsigned		vm_id;
 	uint64_t		vm_pd_addr;
 	uint32_t		gds_base, gds_size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 0bf6c1b..21e40e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -163,7 +163,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
 		r = amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr,
 				    job->gds_base, job->gds_size,
 				    job->gws_base, job->gws_size,
-				    job->oa_base, job->oa_size);
+				    job->oa_base, job->oa_size,
+				    job->need_flush);
 		if (r) {
 			amdgpu_ring_undo(ring);
 			return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 0b55025..4d78a13 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -145,7 +145,8 @@ static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
 
 		r = amdgpu_vm_grab_id(vm, ring, &job->sync,
 				      &job->base.s_fence->finished,
-				      &job->vm_id, &job->vm_pd_addr);
+				      &job->vm_id, &job->vm_pd_addr,
+				      &job->need_flush);
 		if (r)
 			DRM_ERROR("Error getting VM ID (%d)\n", r);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 6a02f0a..b012068 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -179,7 +179,7 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
  */
 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 		      struct amdgpu_sync *sync, struct fence *fence,
-		      unsigned *vm_id, uint64_t *vm_pd_addr)
+		      unsigned *vm_id, uint64_t *vm_pd_addr, bool *need_flush)
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct fence *updates = sync->last_vm_update;
@@ -236,6 +236,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 	}
 	kfree(fences);
 
+	*need_flush = true;
 	/* Check if we can use a VMID already assigned to this VM */
 	i = ring->idx;
 	do {
@@ -278,7 +279,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 		vm->ids[ring->idx] = id;
 
 		*vm_id = id - adev->vm_manager.ids;
-		*vm_pd_addr = AMDGPU_VM_NO_FLUSH;
+		*need_flush = false;
 		trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr);
 
 		mutex_unlock(&adev->vm_manager.lock);
@@ -356,7 +357,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring,
 		    unsigned vm_id, uint64_t pd_addr,
 		    uint32_t gds_base, uint32_t gds_size,
 		    uint32_t gws_base, uint32_t gws_size,
-		    uint32_t oa_base, uint32_t oa_size)
+		    uint32_t oa_base, uint32_t oa_size,
+		    bool need_flush)
 {
 	struct amdgpu_device *adev = ring->adev;
 	struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
@@ -370,12 +372,11 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring,
 	int r;
 
 	if (ring->funcs->emit_pipeline_sync && (
-	    pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed ||
+	    need_flush || gds_switch_needed ||
 	    amdgpu_vm_ring_has_compute_vm_bug(ring)))
 		amdgpu_ring_emit_pipeline_sync(ring);
 
-	if (ring->funcs->emit_vm_flush &&
-	    pd_addr != AMDGPU_VM_NO_FLUSH) {
+	if (ring->funcs->emit_vm_flush && need_flush) {
 		struct fence *fence;
 
 		trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id);
-- 
1.9.1



More information about the amd-gfx mailing list