[PATCH] drm/amdgpu/gmc11: avoid cpu accessing registers to flush VM

Christian König ckoenig.leichtzumerken at gmail.com
Wed May 4 06:25:37 UTC 2022


Am 03.05.22 um 22:08 schrieb Alex Deucher:
> From: Jack Xiao <Jack.Xiao at amd.com>
>
> Due to gfxoff on, cpu accessing registers is not expected.
>
> Signed-off-by: Jack Xiao <Jack.Xiao at amd.com>
> Reviewed-by: Hawking Zhang <Hawking.Zhang at amd.com>
> Signed-off-by: Alex Deucher <alexander.deucher at amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c | 51 +++++++++++++++++++++++++-
>   1 file changed, 50 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
> index 61db2a378008..032414d7429d 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
> @@ -265,6 +265,12 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
>   static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
>   					uint32_t vmhub, uint32_t flush_type)
>   {
> +	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
> +	struct dma_fence *fence;
> +	struct amdgpu_job *job;
> +
> +	int r;
> +
>   	if ((vmhub == AMDGPU_GFXHUB_0) && !adev->gfx.is_poweron)
>   		return;
>   
> @@ -288,8 +294,51 @@ static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
>   	}
>   
>   	mutex_lock(&adev->mman.gtt_window_lock);
> -	gmc_v11_0_flush_vm_hub(adev, vmid, vmhub, 0);
> +
> +	if (vmhub == AMDGPU_MMHUB_0) {
> +		gmc_v11_0_flush_vm_hub(adev, vmid, AMDGPU_MMHUB_0, 0);
> +		mutex_unlock(&adev->mman.gtt_window_lock);
> +		return;
> +	}
> +
> +	BUG_ON(vmhub != AMDGPU_GFXHUB_0);

I've already responded on the internal review that this BUG_ON is not 
justified.

We should rather change the "if (vmhub ==..." above to make sure that 
all other HUBs don't use the gfxoff workaround.

Christian.

> +
> +	if (!adev->mman.buffer_funcs_enabled ||
> +	    !adev->ib_pool_ready ||
> +	    amdgpu_in_reset(adev) ||
> +	    ring->sched.ready == false) {
> +		gmc_v11_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB_0, 0);
> +		mutex_unlock(&adev->mman.gtt_window_lock);
> +		return;
> +	}
> +
> +	r = amdgpu_job_alloc_with_ib(adev, 16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
> +				     &job);
> +	if (r)
> +		goto error_alloc;
> +
> +	job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
> +	job->vm_needs_flush = true;
> +	job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop;
> +	amdgpu_ring_pad_ib(ring, &job->ibs[0]);
> +	r = amdgpu_job_submit(job, &adev->mman.entity,
> +			      AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
> +	if (r)
> +		goto error_submit;
> +
> +	mutex_unlock(&adev->mman.gtt_window_lock);
> +
> +	dma_fence_wait(fence, false);
> +	dma_fence_put(fence);
> +
> +	return;
> +
> +error_submit:
> +	amdgpu_job_free(job);
> +
> +error_alloc:
>   	mutex_unlock(&adev->mman.gtt_window_lock);
> +	DRM_ERROR("Error flushing GPU TLB using the SDMA (%d)!\n", r);
>   	return;
>   }
>   



More information about the amd-gfx mailing list