[PATCH 2/5] drm/amdgpu: separate VMID flush tracking per hub
Christian König
deathsimple at vodafone.de
Wed Apr 5 16:21:57 UTC 2017
From: Christian König <christian.koenig at amd.com>
Rather inefficient, but this way we only need to flush the current hub.
I wonder if we shouldn't make nails with heads and separate the VMID ranges completely.
Signed-off-by: Christian König <christian.koenig at amd.com>
---
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 36 ++++++++++++++++++++--------------
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 6 +++---
2 files changed, 24 insertions(+), 18 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 8785420..6fd1952 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -406,6 +406,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_job *job)
{
struct amdgpu_device *adev = ring->adev;
+ unsigned vmhub = ring->funcs->vmhub;
uint64_t fence_context = adev->fence_context + ring->idx;
struct fence *updates = sync->last_vm_update;
struct amdgpu_vm_id *id, *idle;
@@ -480,17 +481,17 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (atomic64_read(&id->owner) != vm->client_id)
continue;
- if (job->vm_pd_addr != id->pd_gpu_addr)
+ if (job->vm_pd_addr != id->pd_gpu_addr[vmhub])
continue;
- if (!id->last_flush)
+ if (!id->last_flush[vmhub])
continue;
- if (id->last_flush->context != fence_context &&
- !fence_is_signaled(id->last_flush))
+ if (id->last_flush[vmhub]->context != fence_context &&
+ !fence_is_signaled(id->last_flush[vmhub]))
continue;
- flushed = id->flushed_updates;
+ flushed = id->flushed_updates[vmhub];
if (updates &&
(!flushed || fence_is_later(updates, flushed)))
continue;
@@ -522,13 +523,15 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (r)
goto error;
- fence_put(id->last_flush);
- id->last_flush = NULL;
+ for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
+ fence_put(id->last_flush[i]);
+ id->last_flush[i] = NULL;
+ }
- fence_put(id->flushed_updates);
- id->flushed_updates = fence_get(updates);
+ fence_put(id->flushed_updates[vmhub]);
+ id->flushed_updates[vmhub] = fence_get(updates);
- id->pd_gpu_addr = job->vm_pd_addr;
+ id->pd_gpu_addr[vmhub] = job->vm_pd_addr;
id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
list_move_tail(&id->list, &adev->vm_manager.ids_lru);
atomic64_set(&id->owner, vm->client_id);
@@ -591,6 +594,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_vm_id *id = &adev->vm_manager.ids[job->vm_id];
+ unsigned vmhub = ring->funcs->vmhub;
bool gds_switch_needed = ring->funcs->emit_gds_switch && (
id->gds_base != job->gds_base ||
id->gds_size != job->gds_size ||
@@ -629,8 +633,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
return r;
mutex_lock(&adev->vm_manager.lock);
- fence_put(id->last_flush);
- id->last_flush = fence;
+ fence_put(id->last_flush[vmhub]);
+ id->last_flush[vmhub] = fence;
mutex_unlock(&adev->vm_manager.lock);
}
@@ -2234,13 +2238,15 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
*/
void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
{
- unsigned i;
+ unsigned i, j;
for (i = 0; i < AMDGPU_NUM_VM; ++i) {
struct amdgpu_vm_id *id = &adev->vm_manager.ids[i];
amdgpu_sync_free(&adev->vm_manager.ids[i].active);
- fence_put(id->flushed_updates);
- fence_put(id->last_flush);
+ for (j = 0; j < AMDGPU_MAX_VMHUBS; ++j) {
+ fence_put(id->flushed_updates[j]);
+ fence_put(id->last_flush[j]);
+ }
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 7d01372..d61dd83 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -132,12 +132,12 @@ struct amdgpu_vm {
struct amdgpu_vm_id {
struct list_head list;
struct amdgpu_sync active;
- struct fence *last_flush;
+ struct fence *last_flush[AMDGPU_MAX_VMHUBS];
atomic64_t owner;
- uint64_t pd_gpu_addr;
+ uint64_t pd_gpu_addr[AMDGPU_MAX_VMHUBS];
/* last flushed PD/PT update */
- struct fence *flushed_updates;
+ struct fence *flushed_updates[AMDGPU_MAX_VMHUBS];
uint32_t current_gpu_reset_count;
--
2.5.0
More information about the amd-gfx
mailing list