[PATCH 2/4] drm/amdgpu: make pipeline sync be in same place

Zhang, Jerry (Junwei) Jerry.Zhang at amd.com
Wed May 10 08:08:26 UTC 2017


On 05/10/2017 03:31 PM, Chunming Zhou wrote:
> Change-Id: I0ccfa0e6de0cddbcca8dd85f2862240bc5ca02b3
> Signed-off-by: Chunming Zhou <David1.Zhou at amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu.h     |  1 -
>   drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c  |  6 +++---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_job.c |  1 -
>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c  | 34 ++++++++++++++++++++++++++++++---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h  |  2 ++
>   5 files changed, 36 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> index ef018bf..a583aab 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> @@ -1170,7 +1170,6 @@ struct amdgpu_job {
>   	void			*owner;
>   	uint64_t		fence_ctx; /* the fence_context this job uses */
>   	bool                    vm_needs_flush;
> -	bool			need_pipeline_sync;
>   	unsigned		vm_id;
>   	uint64_t		vm_pd_addr;
>   	uint32_t		gds_base, gds_size;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
> index 86ad507..dc250d6 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
> @@ -121,7 +121,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
>   {
>   	struct amdgpu_device *adev = ring->adev;
>   	struct amdgpu_ib *ib = &ibs[0];
> -	struct fence *tmp;
> +	struct fence *tmp = NULL;
>   	bool skip_preamble, need_ctx_switch;
>   	unsigned patch_offset = ~0;
>   	struct amdgpu_vm *vm;
> @@ -169,8 +169,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
>   	}
>
>   	if (ring->funcs->emit_pipeline_sync && job &&
> -	    (tmp = amdgpu_sync_get_fence(&job->sched_sync))) {
> -		job->need_pipeline_sync = true;
> +	    ((tmp = amdgpu_sync_get_fence(&job->sched_sync)) ||
> +	     amdgpu_vm_need_pipeline_sync(ring, job))) {
>   		amdgpu_ring_emit_pipeline_sync(ring);
>   		fence_put(tmp);
>   	}
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> index fa0c8b1..adb7901 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> @@ -57,7 +57,6 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
>   	(*job)->vm = vm;
>   	(*job)->ibs = (void *)&(*job)[1];
>   	(*job)->num_ibs = num_ibs;
> -	(*job)->need_pipeline_sync = false;
>
>   	amdgpu_sync_create(&(*job)->sync);
>   	amdgpu_sync_create(&(*job)->sched_sync);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> index b4f83fc..58cde30 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> @@ -700,6 +700,37 @@ static u64 amdgpu_vm_adjust_mc_addr(struct amdgpu_device *adev, u64 mc_addr)
>   	return addr;
>   }
>
> +bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
> +				  struct amdgpu_job *job)
> +{
> +	struct amdgpu_device *adev = ring->adev;
> +	unsigned vmhub = ring->funcs->vmhub;
> +	struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
> +	struct amdgpu_vm_id *id;
> +	bool gds_switch_needed;
> +	bool vm_flush_needed = job->vm_needs_flush ||
> +		amdgpu_vm_ring_has_compute_vm_bug(ring);
> +
> +	if (job->vm_id == 0)
> +		return false;
> +	id = &id_mgr->ids[job->vm_id];
> +	gds_switch_needed = ring->funcs->emit_gds_switch && (
> +		id->gds_base != job->gds_base ||
> +		id->gds_size != job->gds_size ||
> +		id->gws_base != job->gws_base ||
> +		id->gws_size != job->gws_size ||
> +		id->oa_base != job->oa_base ||
> +		id->oa_size != job->oa_size);
> +
> +	if (amdgpu_vm_had_gpu_reset(adev, id)) {
> +		gds_switch_needed = true;
> +		vm_flush_needed = true;
> +	}
> +	if (!vm_flush_needed && !gds_switch_needed)
> +		return false;
> +	return true;
> +}
> +
>   /**
>    * amdgpu_vm_flush - hardware flush the vm
>    *
> @@ -738,9 +769,6 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
>   	if (ring->funcs->init_cond_exec)
>   		patch_offset = amdgpu_ring_init_cond_exec(ring);
>
> -	if (ring->funcs->emit_pipeline_sync && !job->need_pipeline_sync)
> -		amdgpu_ring_emit_pipeline_sync(ring);
> -

Just confirm:
this pipeline sync will be done when vm_flush job is scheduled?

If yes,
Reviewed-by: Junwei Zhang <Jerry.Zhang at amd.com>

Jerry

>   	if (ring->funcs->emit_vm_flush && vm_flush_needed) {
>   		u64 pd_addr = amdgpu_vm_adjust_mc_addr(adev, job->vm_pd_addr);
>   		struct fence *fence;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> index 9828fcd..3d16169 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> @@ -254,5 +254,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
>   		      struct amdgpu_bo_va *bo_va);
>   void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size);
>   int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
> +bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
> +				  struct amdgpu_job *job);
>
>   #endif
>


More information about the amd-gfx mailing list