[PATCH v3 2/5] drm/amdgpu/vcn2.5: support multiple instance direct SRAM read and write
Leo Liu
leo.liu at amd.com
Wed Jan 15 13:09:33 UTC 2020
On 2020-01-14 5:23 p.m., James Zhu wrote:
> Add multiple instance direct SRAM read and write support for vcn2.5
>
> Signed-off-by: James Zhu <James.Zhu at amd.com>
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 27 +++++-----
> drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 46 ++++++++--------
> drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c | 94 ++++++++++++++++-----------------
> 3 files changed, 83 insertions(+), 84 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
> index 99df693..ca62d99 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
> @@ -165,15 +165,15 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
> dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
> return r;
> }
> - }
>
> - if (adev->vcn.indirect_sram) {
> - r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
> - AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.dpg_sram_bo,
> - &adev->vcn.dpg_sram_gpu_addr, &adev->vcn.dpg_sram_cpu_addr);
> - if (r) {
> - dev_err(adev->dev, "(%d) failed to allocate DPG bo\n", r);
> - return r;
> + if (adev->vcn.indirect_sram) {
> + r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
> + AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].dpg_sram_bo,
> + &adev->vcn.inst[i].dpg_sram_gpu_addr, &adev->vcn.inst[i].dpg_sram_cpu_addr);
> + if (r) {
> + dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r);
> + return r;
> + }
> }
> }
>
> @@ -186,15 +186,14 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
>
> cancel_delayed_work_sync(&adev->vcn.idle_work);
>
> - if (adev->vcn.indirect_sram) {
> - amdgpu_bo_free_kernel(&adev->vcn.dpg_sram_bo,
> - &adev->vcn.dpg_sram_gpu_addr,
> - (void **)&adev->vcn.dpg_sram_cpu_addr);
> - }
> -
> for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
> if (adev->vcn.harvest_config & (1 << j))
> continue;
> + if (adev->vcn.indirect_sram) {
> + amdgpu_bo_free_kernel(&adev->vcn.inst[i].dpg_sram_bo,
> + &adev->vcn.inst[i].dpg_sram_gpu_addr,
> + (void **)&adev->vcn.inst[i].dpg_sram_cpu_addr);
> + }
> kvfree(adev->vcn.inst[j].saved_bo);
>
> amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo,
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
> index 26c6623..63c55bc 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
> @@ -104,27 +104,27 @@
> internal_reg_offset >>= 2; \
> })
>
> -#define RREG32_SOC15_DPG_MODE_2_0(offset, mask_en) \
> - ({ \
> - WREG32_SOC15(VCN, 0, mmUVD_DPG_LMA_CTL, \
> - (0x0 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \
> - mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \
> - offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \
> - RREG32_SOC15(VCN, 0, mmUVD_DPG_LMA_DATA); \
> +#define RREG32_SOC15_DPG_MODE_2_0(inst_idx, offset, mask_en) \
> + ({ \
> + WREG32_SOC15(VCN, inst, mmUVD_DPG_LMA_CTL, \
> + (0x0 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \
> + mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \
> + offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \
> + RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_DATA); \
Please keep the backslash aligned with those above and below.
Regards,
Leo
> })
>
> -#define WREG32_SOC15_DPG_MODE_2_0(offset, value, mask_en, indirect) \
> - do { \
> - if (!indirect) { \
> - WREG32_SOC15(VCN, 0, mmUVD_DPG_LMA_DATA, value); \
> - WREG32_SOC15(VCN, 0, mmUVD_DPG_LMA_CTL, \
> - (0x1 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \
> - mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \
> - offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \
> - } else { \
> - *adev->vcn.dpg_sram_curr_addr++ = offset; \
> - *adev->vcn.dpg_sram_curr_addr++ = value; \
> - } \
> +#define WREG32_SOC15_DPG_MODE_2_0(inst_idx, offset, value, mask_en, indirect) \
> + do { \
> + if (!indirect) { \
> + WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_DATA, value); \
> + WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_CTL, \
> + (0x1 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \
> + mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \
> + offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \
> + } else { \
> + *adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = offset; \
> + *adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = value; \
> + } \
> } while (0)
>
> enum engine_status_constants {
> @@ -173,6 +173,10 @@ struct amdgpu_vcn_inst {
> struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS];
> struct amdgpu_irq_src irq;
> struct amdgpu_vcn_reg external;
> + struct amdgpu_bo *dpg_sram_bo;
> + void *dpg_sram_cpu_addr;
> + uint64_t dpg_sram_gpu_addr;
> + uint32_t *dpg_sram_curr_addr;
> };
>
> struct amdgpu_vcn {
> @@ -184,10 +188,6 @@ struct amdgpu_vcn {
> struct dpg_pause_state pause_state;
>
> bool indirect_sram;
> - struct amdgpu_bo *dpg_sram_bo;
> - void *dpg_sram_cpu_addr;
> - uint64_t dpg_sram_gpu_addr;
> - uint32_t *dpg_sram_curr_addr;
>
> uint8_t num_vcn_inst;
> struct amdgpu_vcn_inst inst[AMDGPU_MAX_VCN_INSTANCES];
> diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
> index dcdc7ad..9ff59ac 100644
> --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
> @@ -356,88 +356,88 @@ static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirec
> /* cache window 0: fw */
> if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
> if (!indirect) {
> - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
> + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
> UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
> (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo), 0, indirect);
> - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
> + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
> UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
> (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi), 0, indirect);
> - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
> + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
> UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
> } else {
> - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
> + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
> UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
> - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
> + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
> UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
> - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
> + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
> UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
> }
> offset = 0;
> } else {
> - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
> + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
> UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
> lower_32_bits(adev->vcn.inst->gpu_addr), 0, indirect);
> - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
> + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
> UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
> upper_32_bits(adev->vcn.inst->gpu_addr), 0, indirect);
> offset = size;
> - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
> + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
> UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
> AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
> }
>
> if (!indirect)
> - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
> + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
> UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
> else
> - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
> + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
> UVD, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
>
> /* cache window 1: stack */
> if (!indirect) {
> - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
> + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
> UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
> lower_32_bits(adev->vcn.inst->gpu_addr + offset), 0, indirect);
> - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
> + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
> UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
> upper_32_bits(adev->vcn.inst->gpu_addr + offset), 0, indirect);
> - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
> + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
> UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
> } else {
> - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
> + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
> UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
> - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
> + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
> UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
> - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
> + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
> UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
> }
> - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
> + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
> UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
>
> /* cache window 2: context */
> - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
> + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
> UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
> lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
> - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
> + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
> UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
> upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
> - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
> + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
> UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
> - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
> + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
> UVD, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
>
> /* non-cache window */
> - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
> + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
> UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 0, 0, indirect);
> - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
> + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
> UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 0, 0, indirect);
> - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
> + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
> UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
> - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
> + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
> UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0), 0, 0, indirect);
>
> /* VCN global tiling registers */
> - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
> + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
> UVD, 0, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
> }
>
> @@ -583,19 +583,19 @@ static void vcn_v2_0_clock_gating_dpg_mode(struct amdgpu_device *adev,
> UVD_CGC_CTRL__WCB_MODE_MASK |
> UVD_CGC_CTRL__VCPU_MODE_MASK |
> UVD_CGC_CTRL__SCPU_MODE_MASK);
> - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
> + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
> UVD, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
>
> /* turn off clock gating */
> - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
> + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
> UVD, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
>
> /* turn on SUVD clock gating */
> - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
> + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
> UVD, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
>
> /* turn on sw mode in UVD_SUVD_CGC_CTRL */
> - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
> + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
> UVD, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
> }
>
> @@ -759,7 +759,7 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
> WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, tmp);
>
> if (indirect)
> - adev->vcn.dpg_sram_curr_addr = (uint32_t*)adev->vcn.dpg_sram_cpu_addr;
> + adev->vcn.inst->dpg_sram_curr_addr = (uint32_t*)adev->vcn.inst->dpg_sram_cpu_addr;
>
> /* enable clock gating */
> vcn_v2_0_clock_gating_dpg_mode(adev, 0, indirect);
> @@ -768,11 +768,11 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
> tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
> tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
> tmp |= UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK;
> - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
> + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
> UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
>
> /* disable master interupt */
> - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
> + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
> UVD, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
>
> /* setup mmUVD_LMI_CTRL */
> @@ -784,28 +784,28 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
> UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
> (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
> 0x00100000L);
> - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
> + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
> UVD, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
>
> - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
> + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
> UVD, 0, mmUVD_MPC_CNTL),
> 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
>
> - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
> + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
> UVD, 0, mmUVD_MPC_SET_MUXA0),
> ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
> (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
> (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
> (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
>
> - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
> + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
> UVD, 0, mmUVD_MPC_SET_MUXB0),
> ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
> (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
> (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
> (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
>
> - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
> + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
> UVD, 0, mmUVD_MPC_SET_MUX),
> ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
> (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
> @@ -813,29 +813,29 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
>
> vcn_v2_0_mc_resume_dpg_mode(adev, indirect);
>
> - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
> + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
> UVD, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
> - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
> + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
> UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
>
> /* release VCPU reset to boot */
> - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
> + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
> UVD, 0, mmUVD_SOFT_RESET), 0, 0, indirect);
>
> /* enable LMI MC and UMC channels */
> - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
> + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
> UVD, 0, mmUVD_LMI_CTRL2),
> 0x1F << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT, 0, indirect);
>
> /* enable master interrupt */
> - WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0(
> + WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
> UVD, 0, mmUVD_MASTINT_EN),
> UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
>
> if (indirect)
> - psp_update_vcn_sram(adev, 0, adev->vcn.dpg_sram_gpu_addr,
> - (uint32_t)((uintptr_t)adev->vcn.dpg_sram_curr_addr -
> - (uintptr_t)adev->vcn.dpg_sram_cpu_addr));
> + psp_update_vcn_sram(adev, 0, adev->vcn.inst->dpg_sram_gpu_addr,
> + (uint32_t)((uintptr_t)adev->vcn.inst->dpg_sram_curr_addr -
> + (uintptr_t)adev->vcn.inst->dpg_sram_cpu_addr));
>
> /* force RBC into idle state */
> rb_bufsz = order_base_2(ring->ring_size);
More information about the amd-gfx
mailing list