[PATCH 02/44] drm/amdgpu/vcn3.0: split code along instances
Boyuan Zhang
Boyuan.Zhang at amd.com
Tue Feb 4 15:40:04 UTC 2025
On 2025-01-31 11:56, Alex Deucher wrote:
> Split the code on a per instance basis. This will allow
> us to use the per instance functions in the future to
> handle more things per instance.
>
> Signed-off-by: Alex Deucher<alexander.deucher at amd.com>
Reviewed-by: Boyuan Zhang <Boyuan.Zhang at amd.com>
<mailto:Boyuan.Zhang at amd.com>
> ---
> drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c | 436 +++++++++++++-------------
> 1 file changed, 213 insertions(+), 223 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
> index 63ddd4cca9109..a3627700ed48d 100644
> --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
> @@ -1134,192 +1134,186 @@ static int vcn_v3_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
> return 0;
> }
>
> -static int vcn_v3_0_start(struct amdgpu_device *adev)
> +static int vcn_v3_0_start(struct amdgpu_device *adev, int i)
> {
> volatile struct amdgpu_fw_shared *fw_shared;
> struct amdgpu_ring *ring;
> uint32_t rb_bufsz, tmp;
> - int i, j, k, r;
> + int j, k, r;
>
> - for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
> - if (adev->pm.dpm_enabled)
> - amdgpu_dpm_enable_vcn(adev, true, i);
> - }
> + if (adev->vcn.harvest_config & (1 << i))
> + return 0;
>
> - for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
> - if (adev->vcn.harvest_config & (1 << i))
> - continue;
> + if (adev->pm.dpm_enabled)
> + amdgpu_dpm_enable_vcn(adev, true, i);
>
> - if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
> - r = vcn_v3_0_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
> - continue;
> - }
> + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
> + return vcn_v3_0_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
>
> - /* disable VCN power gating */
> - vcn_v3_0_disable_static_power_gating(adev, i);
> + /* disable VCN power gating */
> + vcn_v3_0_disable_static_power_gating(adev, i);
>
> - /* set VCN status busy */
> - tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
> - WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp);
> + /* set VCN status busy */
> + tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
> + WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp);
>
> - /*SW clock gating */
> - vcn_v3_0_disable_clock_gating(adev, i);
> + /* SW clock gating */
> + vcn_v3_0_disable_clock_gating(adev, i);
>
> - /* enable VCPU clock */
> - WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
> - UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
> -
> - /* disable master interrupt */
> - WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0,
> - ~UVD_MASTINT_EN__VCPU_EN_MASK);
> -
> - /* enable LMI MC and UMC channels */
> - WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0,
> - ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
> -
> - tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
> - tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
> - tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
> - WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
> -
> - /* setup mmUVD_LMI_CTRL */
> - tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL);
> - WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp |
> - UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
> - UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
> - UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
> - UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
> -
> - /* setup mmUVD_MPC_CNTL */
> - tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL);
> - tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
> - tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
> - WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
> -
> - /* setup UVD_MPC_SET_MUXA0 */
> - WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0,
> - ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
> - (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
> - (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
> - (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
> -
> - /* setup UVD_MPC_SET_MUXB0 */
> - WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0,
> - ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
> - (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
> - (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
> - (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
> -
> - /* setup mmUVD_MPC_SET_MUX */
> - WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX,
> - ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
> - (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
> - (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
> -
> - vcn_v3_0_mc_resume(adev, i);
> -
> - /* VCN global tiling registers */
> - WREG32_SOC15(VCN, i, mmUVD_GFX10_ADDR_CONFIG,
> - adev->gfx.config.gb_addr_config);
> -
> - /* unblock VCPU register access */
> - WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0,
> - ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
> -
> - /* release VCPU reset to boot */
> - WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
> - ~UVD_VCPU_CNTL__BLK_RST_MASK);
> + /* enable VCPU clock */
> + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
> + UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
>
> - for (j = 0; j < 10; ++j) {
> - uint32_t status;
> + /* disable master interrupt */
> + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0,
> + ~UVD_MASTINT_EN__VCPU_EN_MASK);
>
> - for (k = 0; k < 100; ++k) {
> - status = RREG32_SOC15(VCN, i, mmUVD_STATUS);
> - if (status & 2)
> - break;
> - mdelay(10);
> - }
> - r = 0;
> - if (status & 2)
> - break;
> + /* enable LMI MC and UMC channels */
> + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0,
> + ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
>
> - DRM_ERROR("VCN[%d] decode not responding, trying to reset the VCPU!!!\n", i);
> - WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
> - UVD_VCPU_CNTL__BLK_RST_MASK,
> - ~UVD_VCPU_CNTL__BLK_RST_MASK);
> - mdelay(10);
> - WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
> - ~UVD_VCPU_CNTL__BLK_RST_MASK);
> + tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
> + tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
> + tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
> + WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
> +
> + /* setup mmUVD_LMI_CTRL */
> + tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL);
> + WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp |
> + UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
> + UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
> + UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
> + UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
> +
> + /* setup mmUVD_MPC_CNTL */
> + tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL);
> + tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
> + tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
> + WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
> +
> + /* setup UVD_MPC_SET_MUXA0 */
> + WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0,
> + ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
> + (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
> + (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
> + (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
> +
> + /* setup UVD_MPC_SET_MUXB0 */
> + WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0,
> + ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
> + (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
> + (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
> + (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
> +
> + /* setup mmUVD_MPC_SET_MUX */
> + WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX,
> + ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
> + (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
> + (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
> +
> + vcn_v3_0_mc_resume(adev, i);
> +
> + /* VCN global tiling registers */
> + WREG32_SOC15(VCN, i, mmUVD_GFX10_ADDR_CONFIG,
> + adev->gfx.config.gb_addr_config);
> +
> + /* unblock VCPU register access */
> + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0,
> + ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
>
> + /* release VCPU reset to boot */
> + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
> + ~UVD_VCPU_CNTL__BLK_RST_MASK);
> +
> + for (j = 0; j < 10; ++j) {
> + uint32_t status;
> +
> + for (k = 0; k < 100; ++k) {
> + status = RREG32_SOC15(VCN, i, mmUVD_STATUS);
> + if (status & 2)
> + break;
> mdelay(10);
> - r = -1;
> }
> + r = 0;
> + if (status & 2)
> + break;
>
> - if (r) {
> - DRM_ERROR("VCN[%d] decode not responding, giving up!!!\n", i);
> - return r;
> - }
> + DRM_ERROR("VCN[%d] decode not responding, trying to reset the VCPU!!!\n", i);
> + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
> + UVD_VCPU_CNTL__BLK_RST_MASK,
> + ~UVD_VCPU_CNTL__BLK_RST_MASK);
> + mdelay(10);
> + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
> + ~UVD_VCPU_CNTL__BLK_RST_MASK);
> +
> + mdelay(10);
> + r = -1;
> + }
>
> - /* enable master interrupt */
> - WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN),
> - UVD_MASTINT_EN__VCPU_EN_MASK,
> - ~UVD_MASTINT_EN__VCPU_EN_MASK);
> + if (r) {
> + DRM_ERROR("VCN[%d] decode not responding, giving up!!!\n", i);
> + return r;
> + }
>
> - /* clear the busy bit of VCN_STATUS */
> - WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0,
> - ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
> + /* enable master interrupt */
> + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN),
> + UVD_MASTINT_EN__VCPU_EN_MASK,
> + ~UVD_MASTINT_EN__VCPU_EN_MASK);
>
> - WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0);
> + /* clear the busy bit of VCN_STATUS */
> + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0,
> + ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
>
> - ring = &adev->vcn.inst[i].ring_dec;
> - /* force RBC into idle state */
> - rb_bufsz = order_base_2(ring->ring_size);
> - tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
> - tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
> - tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
> - tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
> - tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
> - WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
> + WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0);
>
> - fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
> - fw_shared->multi_queue.decode_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
> + ring = &adev->vcn.inst[i].ring_dec;
> + /* force RBC into idle state */
> + rb_bufsz = order_base_2(ring->ring_size);
> + tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
> + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
> + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
> + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
> + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
> + WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
>
> - /* programm the RB_BASE for ring buffer */
> - WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
> - lower_32_bits(ring->gpu_addr));
> - WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
> - upper_32_bits(ring->gpu_addr));
> + fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
> + fw_shared->multi_queue.decode_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
>
> - /* Initialize the ring buffer's read and write pointers */
> - WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0);
> + /* programm the RB_BASE for ring buffer */
> + WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
> + lower_32_bits(ring->gpu_addr));
> + WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
> + upper_32_bits(ring->gpu_addr));
>
> - WREG32_SOC15(VCN, i, mmUVD_SCRATCH2, 0);
> - ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR);
> - WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR,
> - lower_32_bits(ring->wptr));
> - fw_shared->rb.wptr = lower_32_bits(ring->wptr);
> - fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
> -
> - if (amdgpu_ip_version(adev, UVD_HWIP, 0) !=
> - IP_VERSION(3, 0, 33)) {
> - fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
> - ring = &adev->vcn.inst[i].ring_enc[0];
> - WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
> - WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
> - WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
> - WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
> - WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4);
> - fw_shared->multi_queue.encode_generalpurpose_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
> -
> - fw_shared->multi_queue.encode_lowlatency_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
> - ring = &adev->vcn.inst[i].ring_enc[1];
> - WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
> - WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
> - WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
> - WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
> - WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
> - fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
> - }
> + /* Initialize the ring buffer's read and write pointers */
> + WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0);
> +
> + WREG32_SOC15(VCN, i, mmUVD_SCRATCH2, 0);
> + ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR);
> + WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR,
> + lower_32_bits(ring->wptr));
> + fw_shared->rb.wptr = lower_32_bits(ring->wptr);
> + fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
> +
> + if (amdgpu_ip_version(adev, UVD_HWIP, 0) !=
> + IP_VERSION(3, 0, 33)) {
> + fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
> + ring = &adev->vcn.inst[i].ring_enc[0];
> + WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
> + WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
> + WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
> + WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
> + WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4);
> + fw_shared->multi_queue.encode_generalpurpose_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
> +
> + fw_shared->multi_queue.encode_lowlatency_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
> + ring = &adev->vcn.inst[i].ring_enc[1];
> + WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
> + WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
> + WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
> + WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
> + WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
> + fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
> }
>
> return 0;
> @@ -1565,79 +1559,73 @@ static int vcn_v3_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
> return 0;
> }
>
> -static int vcn_v3_0_stop(struct amdgpu_device *adev)
> +static int vcn_v3_0_stop(struct amdgpu_device *adev, int i)
> {
> uint32_t tmp;
> - int i, r = 0;
> + int r = 0;
>
> - for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
> - if (adev->vcn.harvest_config & (1 << i))
> - continue;
> + if (adev->vcn.harvest_config & (1 << i))
> + return 0;
>
> - if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
> - r = vcn_v3_0_stop_dpg_mode(adev, i);
> - continue;
> - }
> + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
> + return vcn_v3_0_stop_dpg_mode(adev, i);
>
> - /* wait for vcn idle */
> - r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
> - if (r)
> - return r;
> + /* wait for vcn idle */
> + r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
> + if (r)
> + return r;
>
> - tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
> - UVD_LMI_STATUS__READ_CLEAN_MASK |
> - UVD_LMI_STATUS__WRITE_CLEAN_MASK |
> - UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
> - r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
> - if (r)
> - return r;
> + tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
> + UVD_LMI_STATUS__READ_CLEAN_MASK |
> + UVD_LMI_STATUS__WRITE_CLEAN_MASK |
> + UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
> + r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
> + if (r)
> + return r;
>
> - /* disable LMI UMC channel */
> - tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2);
> - tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
> - WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp);
> - tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
> - UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
> - r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
> - if (r)
> - return r;
> + /* disable LMI UMC channel */
> + tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2);
> + tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
> + WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp);
> + tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
> + UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
> + r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
> + if (r)
> + return r;
>
> - /* block VCPU register access */
> - WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL),
> - UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
> - ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
> + /* block VCPU register access */
> + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL),
> + UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
> + ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
>
> - /* reset VCPU */
> - WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
> - UVD_VCPU_CNTL__BLK_RST_MASK,
> - ~UVD_VCPU_CNTL__BLK_RST_MASK);
> + /* reset VCPU */
> + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
> + UVD_VCPU_CNTL__BLK_RST_MASK,
> + ~UVD_VCPU_CNTL__BLK_RST_MASK);
>
> - /* disable VCPU clock */
> - WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
> - ~(UVD_VCPU_CNTL__CLK_EN_MASK));
> + /* disable VCPU clock */
> + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
> + ~(UVD_VCPU_CNTL__CLK_EN_MASK));
>
> - /* apply soft reset */
> - tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
> - tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
> - WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
> - tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
> - tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
> - WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
> + /* apply soft reset */
> + tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
> + tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
> + WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
> + tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
> + tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
> + WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
>
> - /* clear status */
> - WREG32_SOC15(VCN, i, mmUVD_STATUS, 0);
> + /* clear status */
> + WREG32_SOC15(VCN, i, mmUVD_STATUS, 0);
>
> - /* apply HW clock gating */
> - vcn_v3_0_enable_clock_gating(adev, i);
> + /* apply HW clock gating */
> + vcn_v3_0_enable_clock_gating(adev, i);
>
> - /* enable VCN power gating */
> - vcn_v3_0_enable_static_power_gating(adev, i);
> - }
> + /* enable VCN power gating */
> + vcn_v3_0_enable_static_power_gating(adev, i);
>
> - for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
> - if (adev->pm.dpm_enabled)
> - amdgpu_dpm_enable_vcn(adev, false, i);
> - }
> + if (adev->pm.dpm_enabled)
> + amdgpu_dpm_enable_vcn(adev, false, i);
>
> return 0;
> }
> @@ -2163,7 +2151,7 @@ static int vcn_v3_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
> enum amd_powergating_state state)
> {
> struct amdgpu_device *adev = ip_block->adev;
> - int ret;
> + int ret = 0, i;
>
> /* for SRIOV, guest should not control VCN Power-gating
> * MMSCH FW should control Power-gating and clock-gating
> @@ -2177,10 +2165,12 @@ static int vcn_v3_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
> if (state == adev->vcn.cur_state)
> return 0;
>
> - if (state == AMD_PG_STATE_GATE)
> - ret = vcn_v3_0_stop(adev);
> - else
> - ret = vcn_v3_0_start(adev);
> + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
> + if (state == AMD_PG_STATE_GATE)
> + ret |= vcn_v3_0_stop(adev, i);
> + else
> + ret |= vcn_v3_0_start(adev, i);
> + }
>
> if (!ret)
> adev->vcn.cur_state = state;
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <https://lists.freedesktop.org/archives/amd-gfx/attachments/20250204/72d56ca3/attachment-0001.htm>
More information about the amd-gfx
mailing list