[PATCH 03/11] drm/amdgpu: cleanup gmc_v11_0_flush_gpu_tlb
Alex Deucher
alexdeucher at gmail.com
Tue Sep 5 20:56:21 UTC 2023
On Tue, Sep 5, 2023 at 2:30 AM Christian König
<ckoenig.leichtzumerken at gmail.com> wrote:
>
> Remove leftovers from copying this from the gmc v10 code.
>
> Signed-off-by: Christian König <christian.koenig at amd.com>
> ---
> drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c | 108 ++++++++++---------------
> 1 file changed, 41 insertions(+), 67 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
> index dcbba981462e..3c3ad3f17c6a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
> @@ -186,27 +186,50 @@ static bool gmc_v11_0_get_vmid_pasid_mapping_info(
> return !!(*p_pasid);
> }
>
> -/*
> - * GART
> - * VMID 0 is the physical GPU addresses as used by the kernel.
> - * VMIDs 1-15 are used for userspace clients and are handled
> - * by the amdgpu vm/hsa code.
> +/**
> + * gmc_v11_0_flush_gpu_tlb - gart tlb flush callback
> + *
> + * @adev: amdgpu_device pointer
> + * @vmid: vm instance to flush
> + * @vmhub: which hub to flush
> + * @flush_type: the flush type
> + *
> + * Flush the TLB for the requested page table.
> */
> -
> -static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
> - unsigned int vmhub, uint32_t flush_type)
> +static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
> + uint32_t vmhub, uint32_t flush_type)
> {
> bool use_semaphore = gmc_v11_0_use_invalidate_semaphore(adev, vmhub);
> struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
> u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
> - u32 tmp;
> /* Use register 17 for GART */
> const unsigned int eng = 17;
> + unsigned char hub_ip;
> + u32 sem, req, ack;
> unsigned int i;
> - unsigned char hub_ip = 0;
> + u32 tmp;
> +
> + if ((vmhub == AMDGPU_GFXHUB(0)) && !adev->gfx.is_poweron)
> + return;
>
> - hub_ip = (vmhub == AMDGPU_GFXHUB(0)) ?
> - GC_HWIP : MMHUB_HWIP;
> + sem = hub->vm_inv_eng0_sem + hub->eng_distance * eng;
> + req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
> + ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
> +
> + /* flush hdp cache */
> + adev->hdp.funcs->flush_hdp(adev, NULL);
> +
> + /* For SRIOV run time, driver shouldn't access the register through MMIO
> + * Directly use kiq to do the vm invalidation instead
> + */
> + if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring.sched.ready) &&
> + (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
> + amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
> + 1 << vmid);
> + return;
> + }
> +
> + hub_ip = (vmhub == AMDGPU_GFXHUB(0)) ? GC_HWIP : MMHUB_HWIP;
>
> spin_lock(&adev->gmc.invalidate_lock);
> /*
> @@ -220,8 +243,7 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
> if (use_semaphore) {
> for (i = 0; i < adev->usec_timeout; i++) {
> /* a read return value of 1 means semaphore acuqire */
> - tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
> - hub->eng_distance * eng, hub_ip);
> + tmp = RREG32_RLC_NO_KIQ(sem, hub_ip);
> if (tmp & 0x1)
> break;
> udelay(1);
> @@ -231,12 +253,11 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
> DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
> }
>
> - WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req, hub_ip);
> + WREG32_RLC_NO_KIQ(req, inv_req, hub_ip);
>
> /* Wait for ACK with a delay.*/
> for (i = 0; i < adev->usec_timeout; i++) {
> - tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_ack +
> - hub->eng_distance * eng, hub_ip);
> + tmp = RREG32_RLC_NO_KIQ(ack, hub_ip);
> tmp &= 1 << vmid;
> if (tmp)
> break;
> @@ -246,12 +267,7 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
>
> /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
> if (use_semaphore)
> - /*
> - * add semaphore release after invalidation,
> - * write with 0 means semaphore release
> - */
> - WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
> - hub->eng_distance * eng, 0, hub_ip);
> + WREG32_RLC_NO_KIQ(sem, 0, hub_ip);
>
> /* Issue additional private vm invalidation to MMHUB */
> if ((vmhub != AMDGPU_GFXHUB(0)) &&
> @@ -268,50 +284,8 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
>
> spin_unlock(&adev->gmc.invalidate_lock);
>
> - if (i < adev->usec_timeout)
> - return;
> -
> - DRM_ERROR("Timeout waiting for VM flush ACK!\n");
> -}
> -
> -/**
> - * gmc_v11_0_flush_gpu_tlb - gart tlb flush callback
> - *
> - * @adev: amdgpu_device pointer
> - * @vmid: vm instance to flush
> - * @vmhub: which hub to flush
> - * @flush_type: the flush type
> - *
> - * Flush the TLB for the requested page table.
> - */
> -static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
> - uint32_t vmhub, uint32_t flush_type)
> -{
> - if ((vmhub == AMDGPU_GFXHUB(0)) && !adev->gfx.is_poweron)
> - return;
> -
> - /* flush hdp cache */
> - adev->hdp.funcs->flush_hdp(adev, NULL);
> -
> - /* For SRIOV run time, driver shouldn't access the register through MMIO
> - * Directly use kiq to do the vm invalidation instead
> - */
> - if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring.sched.ready) &&
> - (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
> - struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
> - const unsigned int eng = 17;
> - u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
> - u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
> - u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
> -
> - amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
> - 1 << vmid);
> - return;
> - }
> -
> - mutex_lock(&adev->mman.gtt_window_lock);
> - gmc_v11_0_flush_vm_hub(adev, vmid, vmhub, 0);
> - mutex_unlock(&adev->mman.gtt_window_lock);
> + if (i >= adev->usec_timeout)
> + DRM_ERROR("Timeout waiting for VM flush ACK!\n");
While you are at it, maybe switch this to use dev_err so we can better
tell what GPU in the multi-GPU case. Same comment for the other
patches. Either way:
Reviewed-by: Alex Deucher <alexander.deucher at amd.com>
Alex
> }
>
> /**
> --
> 2.34.1
>
More information about the amd-gfx
mailing list