[PATCH] drm/amdgpu: Add xcc_inst param to amdgpu_virt_kiq_reg_write_reg_wait (v3)
Lazar, Lijo
lijo.lazar at amd.com
Tue Oct 31 04:01:36 UTC 2023
On 10/28/2023 1:41 AM, Victor Lu wrote:
> amdgpu_virt_kiq_reg_write_reg_wait is hardcoded to use MEC engine 0.
> Add xcc_inst as a parameter to allow it to use different MEC engines.
>
> v3: use first xcc for MMHUB in gmc_v9_0_flush_gpu_tlb
>
> v2: rebase
>
> Signed-off-by: Victor Lu <victorchengchi.lu at amd.com>
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 5 +++--
> drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 3 ++-
> drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c | 2 +-
> drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c | 2 +-
> drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 26 ++++++++++++++----------
> 5 files changed, 22 insertions(+), 16 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
> index 7a084fbfd33c..82762c61d3ec 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
> @@ -73,9 +73,10 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev)
>
> void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
> uint32_t reg0, uint32_t reg1,
> - uint32_t ref, uint32_t mask)
> + uint32_t ref, uint32_t mask,
> + uint32_t xcc_inst)
> {
> - struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
> + struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_inst];
> struct amdgpu_ring *ring = &kiq->ring;
> signed long r, cnt = 0;
> unsigned long flags;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
> index 03c0e38b8aea..5c64258eb728 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
> @@ -334,7 +334,8 @@ bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
> void amdgpu_virt_init_setting(struct amdgpu_device *adev);
> void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
> uint32_t reg0, uint32_t rreg1,
> - uint32_t ref, uint32_t mask);
> + uint32_t ref, uint32_t mask,
> + uint32_t xcc_id);
> int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
> int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init);
> int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
> index d8a4fddab9c1..173237e99882 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
> @@ -268,7 +268,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
> if (adev->gfx.kiq[0].ring.sched.ready && !adev->enable_mes &&
> (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
> amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
> - 1 << vmid);
> + 1 << vmid, 0);
> return;
> }
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
> index 19eaada35ede..2e4abb356e38 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
> @@ -228,7 +228,7 @@ static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
> if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring.sched.ready) &&
> (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
> amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
> - 1 << vmid);
> + 1 << vmid, 0);
> return;
> }
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
> index 0ab9c554da78..32cc3645f02b 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
> @@ -817,7 +817,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
> uint32_t vmhub, uint32_t flush_type)
> {
> bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
> - u32 j, inv_req, tmp, sem, req, ack;
> + u32 j, inv_req, tmp, sem, req, ack, inst;
> const unsigned int eng = 17;
> struct amdgpu_vmhub *hub;
>
> @@ -832,13 +832,17 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
> /* This is necessary for a HW workaround under SRIOV as well
> * as GFXOFF under bare metal
> */
> - if (adev->gfx.kiq[0].ring.sched.ready &&
> + if (vmhub >= AMDGPU_MMHUB0(0))
> + inst = 0;
Same GET_INST(GC, 0) comment here. Looks like this hunk can be combined
with patch 1.
Thanks,
Lijo
> + else
> + inst = vmhub;
> + if (adev->gfx.kiq[inst].ring.sched.ready &&
> (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
> uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
> uint32_t ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
>
> amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
> - 1 << vmid);
> + 1 << vmid, inst);
> return;
> }
>
> @@ -856,9 +860,9 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
> for (j = 0; j < adev->usec_timeout; j++) {
> /* a read return value of 1 means semaphore acquire */
> if (vmhub >= AMDGPU_MMHUB0(0))
> - tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, sem, 0);
> + tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, sem, inst);
> else
> - tmp = RREG32_SOC15_IP_NO_KIQ(GC, sem, vmhub);
> + tmp = RREG32_SOC15_IP_NO_KIQ(GC, sem, inst);
> if (tmp & 0x1)
> break;
> udelay(1);
> @@ -869,9 +873,9 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
> }
>
> if (vmhub >= AMDGPU_MMHUB0(0))
> - WREG32_SOC15_IP_NO_KIQ(MMHUB, req, inv_req, 0);
> + WREG32_SOC15_IP_NO_KIQ(MMHUB, req, inv_req, inst);
> else
> - WREG32_SOC15_IP_NO_KIQ(GC, req, inv_req, vmhub);
> + WREG32_SOC15_IP_NO_KIQ(GC, req, inv_req, inst);
>
> /*
> * Issue a dummy read to wait for the ACK register to
> @@ -884,9 +888,9 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
>
> for (j = 0; j < adev->usec_timeout; j++) {
> if (vmhub >= AMDGPU_MMHUB0(0))
> - tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, ack, 0);
> + tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, ack, inst);
> else
> - tmp = RREG32_SOC15_IP_NO_KIQ(GC, ack, vmhub);
> + tmp = RREG32_SOC15_IP_NO_KIQ(GC, ack, inst);
> if (tmp & (1 << vmid))
> break;
> udelay(1);
> @@ -899,9 +903,9 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
> * write with 0 means semaphore release
> */
> if (vmhub >= AMDGPU_MMHUB0(0))
> - WREG32_SOC15_IP_NO_KIQ(MMHUB, sem, 0, 0);
> + WREG32_SOC15_IP_NO_KIQ(MMHUB, sem, 0, inst);
> else
> - WREG32_SOC15_IP_NO_KIQ(GC, sem, 0, vmhub);
> + WREG32_SOC15_IP_NO_KIQ(GC, sem, 0, inst);
> }
>
> spin_unlock(&adev->gmc.invalidate_lock);
More information about the amd-gfx
mailing list