[PATCH 5/5] drm/amdgpu: allow concurrent VM flushes

Christian König deathsimple at vodafone.de
Wed Apr 5 16:22:00 UTC 2017


From: Christian König <christian.koenig at amd.com>

Enable concurrent VM flushes for Vega10.

Signed-off-by: Christian König <christian.koenig at amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 51 +++++++++++++++++++---------------
 1 file changed, 28 insertions(+), 23 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 6fd1952..1bb2f8a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -462,11 +462,12 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 	}
 	kfree(fences);
 
-	job->vm_needs_flush = true;
+	job->vm_needs_flush = false;
 	/* Check if we can use a VMID already assigned to this VM */
 	i = ring->idx;
 	do {
 		struct fence *flushed;
+		bool needs_flush = false;
 
 		id = vm->ids[i++];
 		if (i == AMDGPU_MAX_RINGS)
@@ -484,16 +485,17 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 		if (job->vm_pd_addr != id->pd_gpu_addr[vmhub])
 			continue;
 
-		if (!id->last_flush[vmhub])
-			continue;
-
-		if (id->last_flush[vmhub]->context != fence_context &&
-		    !fence_is_signaled(id->last_flush[vmhub]))
-			continue;
+		if (!id->last_flush[vmhub] ||
+		    (id->last_flush[vmhub]->context != fence_context &&
+		     !fence_is_signaled(id->last_flush[vmhub])))
+			needs_flush = true;
 
 		flushed  = id->flushed_updates[vmhub];
-		if (updates &&
-		    (!flushed || fence_is_later(updates, flushed)))
+		if (updates && (!flushed || fence_is_later(updates, flushed)))
+			needs_flush = true;
+
+		/* Concurrent flushes are only possible starting with Vega10 */
+		if (adev->asic_type < CHIP_VEGA10 && needs_flush)
 			continue;
 
 		/* Good we can use this VMID. Remember this submission as
@@ -503,15 +505,15 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 		if (r)
 			goto error;
 
-		list_move_tail(&id->list, &adev->vm_manager.ids_lru);
-		vm->ids[ring->idx] = id;
-
-		job->vm_id = id - adev->vm_manager.ids;
-		job->vm_needs_flush = false;
-		trace_amdgpu_vm_grab_id(vm, ring->idx, job);
+		if (updates && (!flushed || fence_is_later(updates, flushed))) {
+			fence_put(id->flushed_updates[vmhub]);
+			id->flushed_updates[vmhub] = fence_get(updates);
+		}
 
-		mutex_unlock(&adev->vm_manager.lock);
-		return 0;
+		if (needs_flush)
+			goto needs_flush;
+		else
+			goto no_flush_needed;
 
 	} while (i != ring->idx);
 
@@ -523,18 +525,21 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 	if (r)
 		goto error;
 
+	id->pd_gpu_addr[vmhub] = job->vm_pd_addr;
+	fence_put(id->flushed_updates[vmhub]);
+	id->flushed_updates[vmhub] = fence_get(updates);
+	id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
+	atomic64_set(&id->owner, vm->client_id);
+
+needs_flush:
+	job->vm_needs_flush = true;
 	for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
 		fence_put(id->last_flush[i]);
 		id->last_flush[i] = NULL;
 	}
 
-	fence_put(id->flushed_updates[vmhub]);
-	id->flushed_updates[vmhub] = fence_get(updates);
-
-	id->pd_gpu_addr[vmhub] = job->vm_pd_addr;
-	id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
+no_flush_needed:
 	list_move_tail(&id->list, &adev->vm_manager.ids_lru);
-	atomic64_set(&id->owner, vm->client_id);
 	vm->ids[ring->idx] = id;
 
 	job->vm_id = id - adev->vm_manager.ids;
-- 
2.5.0



More information about the amd-gfx mailing list