[PATCH 1/3] drm/amdgpu: add a bool to specify if needing vm flush V2
Christian König
deathsimple at vodafone.de
Mon Jul 4 08:29:11 UTC 2016
Am 04.07.2016 um 04:27 schrieb Chunming Zhou:
> which avoids job->vm_pd_addr be changed.
>
> V2: pass job structure to amdgpu_vm_grab_id and amdgpu_vm_flush directly.
>
> Change-Id: I3c3f2497a8e9794cd1612c226817423e2001aa43
> Signed-off-by: Chunming Zhou <David1.Zhou at amd.com>
Reviewed-by: Christian König <christian.koenig at amd.com> for the whole
series.
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu.h | 9 ++---
> drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 5 +--
> drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 2 +-
> drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 66 ++++++++++++++++-----------------
> 4 files changed, 36 insertions(+), 46 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> index 4cfc4eb..f246f64 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> @@ -948,12 +948,8 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
> struct amdgpu_vm *vm);
> int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
> struct amdgpu_sync *sync, struct fence *fence,
> - unsigned *vm_id, uint64_t *vm_pd_addr);
> -int amdgpu_vm_flush(struct amdgpu_ring *ring,
> - unsigned vm_id, uint64_t pd_addr,
> - uint32_t gds_base, uint32_t gds_size,
> - uint32_t gws_base, uint32_t gws_size,
> - uint32_t oa_base, uint32_t oa_size);
> + struct amdgpu_job *job);
> +int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
> void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
> uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
> int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
> @@ -1267,6 +1263,7 @@ struct amdgpu_job {
> uint32_t num_ibs;
> void *owner;
> uint64_t ctx;
> + bool vm_needs_flush;
> unsigned vm_id;
> uint64_t vm_pd_addr;
> uint32_t gds_base, gds_size;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
> index 0bf6c1b..46c3097 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
> @@ -160,10 +160,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
> patch_offset = amdgpu_ring_init_cond_exec(ring);
>
> if (vm) {
> - r = amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr,
> - job->gds_base, job->gds_size,
> - job->gws_base, job->gws_size,
> - job->oa_base, job->oa_size);
> + r = amdgpu_vm_flush(ring, job);
> if (r) {
> amdgpu_ring_undo(ring);
> return r;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> index 0b55025..aaee0c8 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> @@ -145,7 +145,7 @@ static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
>
> r = amdgpu_vm_grab_id(vm, ring, &job->sync,
> &job->base.s_fence->finished,
> - &job->vm_id, &job->vm_pd_addr);
> + job);
> if (r)
> DRM_ERROR("Error getting VM ID (%d)\n", r);
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> index 6a02f0a..3193d75 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> @@ -179,7 +179,7 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
> */
> int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
> struct amdgpu_sync *sync, struct fence *fence,
> - unsigned *vm_id, uint64_t *vm_pd_addr)
> + struct amdgpu_job *job)
> {
> struct amdgpu_device *adev = ring->adev;
> struct fence *updates = sync->last_vm_update;
> @@ -236,6 +236,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
> }
> kfree(fences);
>
> + job->vm_needs_flush = true;
> /* Check if we can use a VMID already assigned to this VM */
> i = ring->idx;
> do {
> @@ -255,7 +256,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
> if (atomic64_read(&id->owner) != vm->client_id)
> continue;
>
> - if (*vm_pd_addr != id->pd_gpu_addr)
> + if (job->vm_pd_addr != id->pd_gpu_addr)
> continue;
>
> if (!same_ring &&
> @@ -277,9 +278,9 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
> list_move_tail(&id->list, &adev->vm_manager.ids_lru);
> vm->ids[ring->idx] = id;
>
> - *vm_id = id - adev->vm_manager.ids;
> - *vm_pd_addr = AMDGPU_VM_NO_FLUSH;
> - trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr);
> + job->vm_id = id - adev->vm_manager.ids;
> + job->vm_needs_flush = false;
> + trace_amdgpu_vm_grab_id(vm, ring->idx, job->vm_id, job->vm_pd_addr);
>
> mutex_unlock(&adev->vm_manager.lock);
> return 0;
> @@ -303,14 +304,14 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
> fence_put(id->flushed_updates);
> id->flushed_updates = fence_get(updates);
>
> - id->pd_gpu_addr = *vm_pd_addr;
> + id->pd_gpu_addr = job->vm_pd_addr;
> id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
> list_move_tail(&id->list, &adev->vm_manager.ids_lru);
> atomic64_set(&id->owner, vm->client_id);
> vm->ids[ring->idx] = id;
>
> - *vm_id = id - adev->vm_manager.ids;
> - trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr);
> + job->vm_id = id - adev->vm_manager.ids;
> + trace_amdgpu_vm_grab_id(vm, ring->idx, job->vm_id, job->vm_pd_addr);
>
> error:
> mutex_unlock(&adev->vm_manager.lock);
> @@ -352,34 +353,29 @@ static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
> *
> * Emit a VM flush when it is necessary.
> */
> -int amdgpu_vm_flush(struct amdgpu_ring *ring,
> - unsigned vm_id, uint64_t pd_addr,
> - uint32_t gds_base, uint32_t gds_size,
> - uint32_t gws_base, uint32_t gws_size,
> - uint32_t oa_base, uint32_t oa_size)
> +int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
> {
> struct amdgpu_device *adev = ring->adev;
> - struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
> + struct amdgpu_vm_id *id = &adev->vm_manager.ids[job->vm_id];
> bool gds_switch_needed = ring->funcs->emit_gds_switch && (
> - id->gds_base != gds_base ||
> - id->gds_size != gds_size ||
> - id->gws_base != gws_base ||
> - id->gws_size != gws_size ||
> - id->oa_base != oa_base ||
> - id->oa_size != oa_size);
> + id->gds_base != job->gds_base ||
> + id->gds_size != job->gds_size ||
> + id->gws_base != job->gws_base ||
> + id->gws_size != job->gws_size ||
> + id->oa_base != job->oa_base ||
> + id->oa_size != job->oa_size);
> int r;
>
> if (ring->funcs->emit_pipeline_sync && (
> - pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed ||
> + job->vm_needs_flush || gds_switch_needed ||
> amdgpu_vm_ring_has_compute_vm_bug(ring)))
> amdgpu_ring_emit_pipeline_sync(ring);
>
> - if (ring->funcs->emit_vm_flush &&
> - pd_addr != AMDGPU_VM_NO_FLUSH) {
> + if (ring->funcs->emit_vm_flush && job->vm_needs_flush) {
> struct fence *fence;
>
> - trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id);
> - amdgpu_ring_emit_vm_flush(ring, vm_id, pd_addr);
> + trace_amdgpu_vm_flush(job->vm_pd_addr, ring->idx, job->vm_id);
> + amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr);
>
> r = amdgpu_fence_emit(ring, &fence);
> if (r)
> @@ -392,16 +388,16 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring,
> }
>
> if (gds_switch_needed) {
> - id->gds_base = gds_base;
> - id->gds_size = gds_size;
> - id->gws_base = gws_base;
> - id->gws_size = gws_size;
> - id->oa_base = oa_base;
> - id->oa_size = oa_size;
> - amdgpu_ring_emit_gds_switch(ring, vm_id,
> - gds_base, gds_size,
> - gws_base, gws_size,
> - oa_base, oa_size);
> + id->gds_base = job->gds_base;
> + id->gds_size = job->gds_size;
> + id->gws_base = job->gws_base;
> + id->gws_size = job->gws_size;
> + id->oa_base = job->oa_base;
> + id->oa_size = job->oa_size;
> + amdgpu_ring_emit_gds_switch(ring, job->vm_id,
> + job->gds_base, job->gds_size,
> + job->gws_base, job->gws_size,
> + job->oa_base, job->oa_size);
> }
>
> return 0;
More information about the amd-gfx
mailing list