[PATCH 3/5] drm/amdgpu: Avoid use SOC15_REG_OFFSET in static const array

Liu, Shaoyun Shaoyun.Liu at amd.com
Thu Dec 7 18:32:42 UTC 2017


Thanks a lot . 

Regards
Shaoyun.liu



-----Original Message-----
From: Koenig, Christian 
Sent: Thursday, December 07, 2017 12:13 PM
To: Liu, Shaoyun; amd-gfx at lists.freedesktop.org
Subject: Re: [PATCH 3/5] drm/amdgpu: Avoid use SOC15_REG_OFFSET in static const array

Hi Shaoyun,

I didn't have time to take a closer look.

Just skimming over the code I've found one thing that looked like odd 
coding style:
> +		amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0,
> +mmUVD_NO_OP), 0)); }

Please make sure to fix those, with that done the series is Acked-by: 
Christian König <christian.koenig at amd.com>

Regards,
Christian.

Am 07.12.2017 um 17:24 schrieb Liu, Shaoyun:
> Hi , Christian
> Do you have time to continue review them ?
>
> Regards
> Shaoyun.liu
>
>
> -----Original Message-----
> From: Liu, Shaoyun
> Sent: Friday, December 01, 2017 3:13 PM
> To: amd-gfx at lists.freedesktop.org
> Cc: Liu, Shaoyun
> Subject: [PATCH 3/5] drm/amdgpu: Avoid use SOC15_REG_OFFSET in static const array
>
> Change-Id: I59828a9a10652988e22b50d87dd1ec9df8ae7a1d
> Signed-off-by: Shaoyun Liu <Shaoyun.Liu at amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu.h       |  19 +++
>   drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c     | 233 +++++++++++-------------------
>   drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c     |  20 +--
>   drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c    |  45 ++++--
>   drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h    |   3 +-
>   drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c    |  45 ++++--
>   drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h    |   3 +-
>   drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c    | 101 ++++++-------
>   drivers/gpu/drm/amd/amdgpu/soc15.c        | 115 ++++++++++-----
>   drivers/gpu/drm/amd/amdgpu/soc15.h        |  18 +++
>   drivers/gpu/drm/amd/amdgpu/soc15_common.h |   6 -
>   drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c     |  13 +-
>   drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c     |  14 +-
>   13 files changed, 352 insertions(+), 283 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> index 1625e41..1d0e109 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> @@ -1438,6 +1438,23 @@ struct amdgpu_fw_vram_usage {  typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);  typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
>   
> +
> +/*
> + * amdgpu nbio functions
> + *
> + * Fix me :
> + * 	Put more NBIO specifc func wraper here , for now just try to minimize the
> + *	change to avoid use SOC15_REG_OFFSET in the constant array
> + */
> +
> +struct amdgpu_nbio_funcs {
> +	u32 (*get_hdp_flush_req_offset)(struct amdgpu_device*);
> +	u32 (*get_hdp_flush_done_offset)(struct amdgpu_device*);
> +	u32 (*get_pcie_index_offset)(struct amdgpu_device*);
> +	u32 (*get_pcie_data_offset)(struct amdgpu_device*); };
> +
> +
>   /* Define the HW IP blocks will be used in driver , add more if necessary */  enum amd_hw_ip_block_type {
>   	GC_HWIP = 1,
> @@ -1657,6 +1674,8 @@ struct amdgpu_device {
>   	/* soc15 register offset based on ip, instance and  segment */
>   	uint32_t 		*reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
>   
> +	const struct amdgpu_nbio_funcs	*nbio_funcs;
> +
>   	/* delayed work_func for deferring clockgating during resume */
>   	struct delayed_work     late_init_work;
>   
> diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
> index 5497ed6..c39c50a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
> @@ -65,152 +65,84 @@
>   MODULE_FIRMWARE("amdgpu/raven_mec2.bin");
>   MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
>   
> -static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] = -{
> -	{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) },
> -	{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_BASE),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_SIZE),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID1),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID1) },
> -	{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_BASE),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_SIZE),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID2),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID2) },
> -	{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_BASE),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_SIZE),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID3),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID3) },
> -	{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_BASE),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_SIZE),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID4),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID4) },
> -	{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_BASE),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_SIZE),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID5),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID5) },
> -	{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_BASE),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_SIZE),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID6),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID6) },
> -	{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_BASE),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_SIZE),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID7),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID7) },
> -	{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_BASE),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_SIZE),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID8),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID8) },
> -	{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_BASE),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_SIZE),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID9),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID9) },
> -	{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_BASE),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_SIZE),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID10),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID10) },
> -	{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_BASE),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_SIZE),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID11),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID11) },
> -	{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_BASE),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_SIZE),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID12),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID12)},
> -	{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_BASE),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_SIZE),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID13),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID13) },
> -	{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_BASE),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_SIZE),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID14),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID14) },
> -	{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_BASE),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_SIZE),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID15),
> -	  SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID15) }
> +static const struct soc15_reg_golden golden_settings_gc_9_0[] = {
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
>   };
>   
> -static const u32 golden_settings_gc_9_0[] = -{
> -	SOC15_REG_OFFSET(GC, 0, mmCPC_UTCL1_CNTL), 0x08000000, 0x08000080,
> -	SOC15_REG_OFFSET(GC, 0, mmCPF_UTCL1_CNTL), 0x08000000, 0x08000080,
> -	SOC15_REG_OFFSET(GC, 0, mmCPG_UTCL1_CNTL), 0x08000000, 0x08000080,
> -	SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2), 0xf00fffff, 0x00000420,
> -	SOC15_REG_OFFSET(GC, 0, mmGB_GPU_ID), 0x0000000f, 0x00000000,
> -	SOC15_REG_OFFSET(GC, 0, mmIA_UTCL1_CNTL), 0x08000000, 0x08000080,
> -	SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3), 0x00000003, 0x82400024,
> -	SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE), 0x3fffffff, 0x00000001,
> -	SOC15_REG_OFFSET(GC, 0, mmPA_SC_LINE_STIPPLE_STATE), 0x0000ff0f, 0x00000000,
> -	SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_0), 0x08000000, 0x08000080,
> -	SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_1), 0x08000000, 0x08000080,
> -	SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_2), 0x08000000, 0x08000080,
> -	SOC15_REG_OFFSET(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL), 0x08000000, 0x08000080,
> -	SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_UTCL1_CNTL), 0x08000000, 0x08000080,
> -	SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), 0x00001000, 0x00001000,
> -	SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL_1), 0x0000000f, 0x01000107,
> -	SOC15_REG_OFFSET(GC, 0, mmSQC_CONFIG), 0x03000000, 0x020a2000,
> -	SOC15_REG_OFFSET(GC, 0, mmTA_CNTL_AUX), 0xfffffeef, 0x010b0000,
> -	SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_HI), 0xffffffff, 0x4a2c0e68,
> -	SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_LO), 0xffffffff, 0xb5d3f197,
> -	SOC15_REG_OFFSET(GC, 0, mmVGT_CACHE_INVALIDATION), 0x3fff3af3, 0x19200000,
> -	SOC15_REG_OFFSET(GC, 0, mmVGT_GS_MAX_WAVE_ID), 0x00000fff, 0x000003ff,
> -	SOC15_REG_OFFSET(GC, 0, mmWD_UTCL1_CNTL), 0x08000000, 0x08000080
> -};
> -
> -static const u32 golden_settings_gc_9_0_vg10[] =
> +static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
>   {
> -	SOC15_REG_OFFSET(GC, 0, mmCB_HW_CONTROL), 0x0000f000, 0x00012107,
> -	SOC15_REG_OFFSET(GC, 0, mmCB_HW_CONTROL_3), 0x30000000, 0x10000000,
> -	SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG), 0xffff77ff, 0x2a114042,
> -	SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG_READ), 0xffff77ff, 0x2a114042,
> -	SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1), 0x00008000, 0x00048000,
> -	SOC15_REG_OFFSET(GC, 0, mmRMI_UTCL1_CNTL2), 0x00030000, 0x00020000,
> -	SOC15_REG_OFFSET(GC, 0, mmTD_CNTL), 0x00001800, 0x00000800
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0x0000f000, 0x00012107),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x2a114042),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00008000, 0x00048000),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800)
>   };
>   
> -static const u32 golden_settings_gc_9_1[] = -{
> -	SOC15_REG_OFFSET(GC, 0, mmCB_HW_CONTROL), 0xfffdf3cf, 0x00014104,
> -	SOC15_REG_OFFSET(GC, 0, mmCPC_UTCL1_CNTL), 0x08000000, 0x08000080,
> -	SOC15_REG_OFFSET(GC, 0, mmCPF_UTCL1_CNTL), 0x08000000, 0x08000080,
> -	SOC15_REG_OFFSET(GC, 0, mmCPG_UTCL1_CNTL), 0x08000000, 0x08000080,
> -	SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2), 0xf00fffff, 0x00000420,
> -	SOC15_REG_OFFSET(GC, 0, mmGB_GPU_ID), 0x0000000f, 0x00000000,
> -	SOC15_REG_OFFSET(GC, 0, mmIA_UTCL1_CNTL), 0x08000000, 0x08000080,
> -	SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3), 0x00000003, 0x82400024,
> -	SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE), 0x3fffffff, 0x00000001,
> -	SOC15_REG_OFFSET(GC, 0, mmPA_SC_LINE_STIPPLE_STATE), 0x0000ff0f, 0x00000000,
> -	SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_0), 0x08000000, 0x08000080,
> -	SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_1), 0x08000000, 0x08000080,
> -	SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_2), 0x08000000, 0x08000080,
> -	SOC15_REG_OFFSET(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL), 0x08000000, 0x08000080,
> -	SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_UTCL1_CNTL), 0x08000000, 0x08000080,
> -	SOC15_REG_OFFSET(GC, 0, mmTA_CNTL_AUX), 0xfffffeef, 0x010b0000,
> -	SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_HI), 0xffffffff, 0x00000000,
> -	SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_LO), 0xffffffff, 0x00003120,
> -	SOC15_REG_OFFSET(GC, 0, mmVGT_CACHE_INVALIDATION), 0x3fff3af3, 0x19200000,
> -	SOC15_REG_OFFSET(GC, 0, mmVGT_GS_MAX_WAVE_ID), 0x00000fff, 0x000000ff,
> -	SOC15_REG_OFFSET(GC, 0, mmWD_UTCL1_CNTL), 0x08000000, 0x08000080
> +static const struct soc15_reg_golden golden_settings_gc_9_1[] = {
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
>   };
>   
> -static const u32 golden_settings_gc_9_1_rv1[] =
> +static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] =
>   {
> -	SOC15_REG_OFFSET(GC, 0, mmCB_HW_CONTROL_3), 0x30000000, 0x10000000,
> -	SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG), 0xffff77ff, 0x24000042,
> -	SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG_READ), 0xffff77ff, 0x24000042,
> -	SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1), 0xffffffff, 0x04048000,
> -	SOC15_REG_OFFSET(GC, 0, mmPA_SC_MODE_CNTL_1), 0x06000000, 0x06000000,
> -	SOC15_REG_OFFSET(GC, 0, mmRMI_UTCL1_CNTL2), 0x00030000, 0x00020000,
> -	SOC15_REG_OFFSET(GC, 0, mmTD_CNTL), 0x01bd9f33, 0x00000800
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24000042),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24000042),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04048000),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_MODE_CNTL_1, 0x06000000, 0x06000000),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x00000800)
>   };
>   
> -static const u32 golden_settings_gc_9_x_common[] =
> +static const struct soc15_reg_golden golden_settings_gc_9_x_common[] =
>   {
> -	SOC15_REG_OFFSET(GC, 0, mmGRBM_CAM_INDEX), 0xffffffff, 0x00000000,
> -	SOC15_REG_OFFSET(GC, 0, mmGRBM_CAM_DATA), 0xffffffff, 0x2544c382
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000),
> +	SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382)
>   };
>   
>   #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042 @@ -230,18 +162,18 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)  {
>   	switch (adev->asic_type) {
>   	case CHIP_VEGA10:
> -		amdgpu_program_register_sequence(adev,
> +		soc15_program_register_sequence(adev,
>   						 golden_settings_gc_9_0,
>   						 ARRAY_SIZE(golden_settings_gc_9_0));
> -		amdgpu_program_register_sequence(adev,
> +		soc15_program_register_sequence(adev,
>   						 golden_settings_gc_9_0_vg10,
>   						 ARRAY_SIZE(golden_settings_gc_9_0_vg10));
>   		break;
>   	case CHIP_RAVEN:
> -		amdgpu_program_register_sequence(adev,
> +		soc15_program_register_sequence(adev,
>   						 golden_settings_gc_9_1,
>   						 ARRAY_SIZE(golden_settings_gc_9_1));
> -		amdgpu_program_register_sequence(adev,
> +		soc15_program_register_sequence(adev,
>   						 golden_settings_gc_9_1_rv1,
>   						 ARRAY_SIZE(golden_settings_gc_9_1_rv1));
>   		break;
> @@ -249,7 +181,7 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
>   		break;
>   	}
>   
> -	amdgpu_program_register_sequence(adev, golden_settings_gc_9_x_common,
> +	soc15_program_register_sequence(adev, golden_settings_gc_9_x_common,
>   					(const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
>   }
>   
> @@ -1137,7 +1069,7 @@ static int gfx_v9_0_ngg_init(struct amdgpu_device *adev)
>   	adev->gfx.ngg.gds_reserve_size = ALIGN(5 * 4, 0x40);
>   	adev->gds.mem.total_size -= adev->gfx.ngg.gds_reserve_size;
>   	adev->gds.mem.gfx_partition_size -= adev->gfx.ngg.gds_reserve_size;
> -	adev->gfx.ngg.gds_reserve_addr = amdgpu_gds_reg_offset[0].mem_base;
> +	adev->gfx.ngg.gds_reserve_addr = SOC15_REG_OFFSET(GC, 0,
> +mmGDS_VMID0_BASE);
>   	adev->gfx.ngg.gds_reserve_addr += adev->gds.mem.gfx_partition_size;
>   
>   	/* Primitive Buffer */
> @@ -1243,7 +1175,7 @@ static int gfx_v9_0_ngg_en(struct amdgpu_device *adev)
>   	}
>   
>   	gfx_v9_0_write_data_to_reg(ring, 0, false,
> -				   amdgpu_gds_reg_offset[0].mem_size,
> +				   SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE),
>   			           (adev->gds.mem.total_size +
>   				    adev->gfx.ngg.gds_reserve_size) >>
>   				   AMDGPU_GDS_SHIFT);
> @@ -1259,7 +1191,7 @@ static int gfx_v9_0_ngg_en(struct amdgpu_device *adev)
>   
>   
>   	gfx_v9_0_write_data_to_reg(ring, 0, false,
> -				   amdgpu_gds_reg_offset[0].mem_size, 0);
> +				   SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE), 0);
>   
>   	amdgpu_ring_commit(ring);
>   
> @@ -3146,6 +3078,8 @@ static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
>   					  uint32_t gws_base, uint32_t gws_size,
>   					  uint32_t oa_base, uint32_t oa_size)  {
> +	struct amdgpu_device *adev = ring->adev;
> +
>   	gds_base = gds_base >> AMDGPU_GDS_SHIFT;
>   	gds_size = gds_size >> AMDGPU_GDS_SHIFT;
>   
> @@ -3157,22 +3091,22 @@ static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
>   
>   	/* GDS Base */
>   	gfx_v9_0_write_data_to_reg(ring, 0, false,
> -				   amdgpu_gds_reg_offset[vmid].mem_base,
> +				   SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE) + 2 * vmid,
>   				   gds_base);
>   
>   	/* GDS Size */
>   	gfx_v9_0_write_data_to_reg(ring, 0, false,
> -				   amdgpu_gds_reg_offset[vmid].mem_size,
> +				   SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE) + 2 * vmid,
>   				   gds_size);
>   
>   	/* GWS */
>   	gfx_v9_0_write_data_to_reg(ring, 0, false,
> -				   amdgpu_gds_reg_offset[vmid].gws,
> +				   SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0) + vmid,
>   				   gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
>   
>   	/* OA */
>   	gfx_v9_0_write_data_to_reg(ring, 0, false,
> -				   amdgpu_gds_reg_offset[vmid].oa,
> +				   SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) + vmid,
>   				   (1 << (oa_size + oa_base)) - (1 << oa_base));  }
>   
> @@ -3617,6 +3551,7 @@ static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
>   
>   static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)  {
> +	struct amdgpu_device *adev = ring->adev;
>   	u32 ref_and_mask, reg_mem_engine;
>   	const struct nbio_hdp_flush_reg *nbio_hf_reg;
>   
> @@ -3643,8 +3578,8 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
>   	}
>   
>   	gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
> -			      nbio_hf_reg->hdp_flush_req_offset,
> -			      nbio_hf_reg->hdp_flush_done_offset,
> +			      adev->nbio_funcs->get_hdp_flush_req_offset(adev),
> +			      adev->nbio_funcs->get_hdp_flush_done_offset(adev),
>   			      ref_and_mask, ref_and_mask, 0x20);  }
>   
> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
> index 30eb625..c515499 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
> @@ -35,6 +35,7 @@
>   #include "mmhub/mmhub_1_0_offset.h"
>   #include "athub/athub_1_0_offset.h"
>   
> +#include "soc15.h"
>   #include "soc15_common.h"
>   #include "umc/umc_6_0_sh_mask.h"
>   
> @@ -74,16 +75,16 @@
>   	0xf6e, 0x0fffffff, 0x00000000,
>   };
>   
> -static const u32 golden_settings_mmhub_1_0_0[] =
> +static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
>   {
> -	SOC15_REG_OFFSET(MMHUB, 0, mmDAGB1_WRCLI2), 0x00000007, 0xfe5fe0fa,
> -	SOC15_REG_OFFSET(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0), 0x00000030, 0x55555565
> +	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
> +	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0,
> +0x00000030, 0x55555565)
>   };
>   
> -static const u32 golden_settings_athub_1_0_0[] =
> +static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
>   {
> -	SOC15_REG_OFFSET(ATHUB, 0, mmRPB_ARB_CNTL), 0x0000ff00, 0x00000800,
> -	SOC15_REG_OFFSET(ATHUB, 0, mmRPB_ARB_CNTL2), 0x00ff00ff, 0x00080008
> +	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
> +	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff,
> +0x00080008)
>   };
>   
>   /* Ecc related register addresses, (BASE + reg offset) */ @@ -895,17 +896,18 @@ static int gmc_v9_0_sw_fini(void *handle)
>   
>   static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)  {
> +
>   	switch (adev->asic_type) {
>   	case CHIP_VEGA10:
> -		amdgpu_program_register_sequence(adev,
> +		soc15_program_register_sequence(adev,
>   						golden_settings_mmhub_1_0_0,
>   						ARRAY_SIZE(golden_settings_mmhub_1_0_0));
> -		amdgpu_program_register_sequence(adev,
> +		soc15_program_register_sequence(adev,
>   						golden_settings_athub_1_0_0,
>   						ARRAY_SIZE(golden_settings_athub_1_0_0));
>   		break;
>   	case CHIP_RAVEN:
> -		amdgpu_program_register_sequence(adev,
> +		soc15_program_register_sequence(adev,
>   						golden_settings_athub_1_0_0,
>   						ARRAY_SIZE(golden_settings_athub_1_0_0));
>   		break;
> diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
> index 76db711..0d3272e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
> +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
> @@ -76,16 +76,13 @@ u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev)
>   	return RREG32_SOC15(NBIO, 0, mmRCC_PF_0_0_RCC_CONFIG_MEMSIZE);  }
>   
> -static const u32 nbio_sdma_doorbell_range_reg[] = -{
> -	SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE),
> -	SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE)
> -};
> -
>   void nbio_v6_1_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
>   				  bool use_doorbell, int doorbell_index)  {
> -	u32 doorbell_range = RREG32(nbio_sdma_doorbell_range_reg[instance]);
> +	u32 reg = instance == 0 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE) :
> +			SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE);
> +
> +	u32 doorbell_range = RREG32(reg);
>   
>   	if (use_doorbell) {
>   		doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index); @@ -93,7 +90,8 @@ void nbio_v6_1_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
>   	} else
>   		doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0);
>   
> -	WREG32(nbio_sdma_doorbell_range_reg[instance], doorbell_range);
> +	WREG32(reg, doorbell_range);
> +
>   }
>   
>   void nbio_v6_1_enable_doorbell_aperture(struct amdgpu_device *adev, @@ -215,9 +213,27 @@ void nbio_v6_1_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
>   		*flags |= AMD_CG_SUPPORT_BIF_LS;
>   }
>   
> +static u32 get_hdp_flush_req_offset(struct amdgpu_device *adev) {
> +	return SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_REQ); }
> +
> +static u32 get_hdp_flush_done_offset(struct amdgpu_device *adev) {
> +	return SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_DONE); }
> +
> +static u32 get_pcie_index_offset(struct amdgpu_device *adev) {
> +	return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX); }
> +
> +static u32 get_pcie_data_offset(struct amdgpu_device *adev) {
> +	return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA); }
> +
>   const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
> -	.hdp_flush_req_offset = SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_REQ),
> -	.hdp_flush_done_offset = SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_DONE),
>   	.ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK,
>   	.ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK,
>   	.ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK,
> @@ -232,11 +248,14 @@ void nbio_v6_1_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
>   	.ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK
>   };
>   
> -const struct nbio_pcie_index_data nbio_v6_1_pcie_index_data = {
> -	.index_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX),
> -	.data_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA),
> +const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
> +	.get_hdp_flush_req_offset = get_hdp_flush_req_offset,
> +	.get_hdp_flush_done_offset = get_hdp_flush_done_offset,
> +	.get_pcie_index_offset = get_pcie_index_offset,
> +	.get_pcie_data_offset = get_pcie_data_offset,
>   };
>   
> +
>   void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev)  {
>   	uint32_t reg;
> diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
> index 14ca8d4..973effe 100644
> --- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
> +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
> @@ -27,7 +27,8 @@
>   #include "soc15_common.h"
>   
>   extern const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg; -extern const struct nbio_pcie_index_data nbio_v6_1_pcie_index_data;
> +extern const struct amdgpu_nbio_funcs nbio_v6_1_funcs;
> +
>   int nbio_v6_1_init(struct amdgpu_device *adev);
>   u32 nbio_v6_1_get_atombios_scratch_regs(struct amdgpu_device *adev,
>                                           uint32_t idx); diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
> index 1fb7717..4220648 100644
> --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
> @@ -32,6 +32,7 @@
>   
>   #define smnNBIF_MGCG_CTRL_LCLK	0x1013a05c
>   
> +
>   u32 nbio_v7_0_get_rev_id(struct amdgpu_device *adev)  {
>           u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0); @@ -73,16 +74,13 @@ u32 nbio_v7_0_get_memsize(struct amdgpu_device *adev)
>   	return RREG32_SOC15(NBIO, 0, mmRCC_CONFIG_MEMSIZE);  }
>   
> -static const u32 nbio_sdma_doorbell_range_reg[] = -{
> -	SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE),
> -	SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE)
> -};
> -
>   void nbio_v7_0_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
>   				  bool use_doorbell, int doorbell_index)  {
> -	u32 doorbell_range = RREG32(nbio_sdma_doorbell_range_reg[instance]);
> +	u32 reg = instance == 0 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE) :
> +			SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE);
> +
> +	u32 doorbell_range = RREG32(reg);
>   
>   	if (use_doorbell) {
>   		doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index); @@ -90,7 +88,7 @@ void nbio_v7_0_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
>   	} else
>   		doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0);
>   
> -	WREG32(nbio_sdma_doorbell_range_reg[instance], doorbell_range);
> +	WREG32(reg, doorbell_range);
>   }
>   
>   void nbio_v7_0_enable_doorbell_aperture(struct amdgpu_device *adev, @@ -185,9 +183,27 @@ void nbio_v7_0_ih_control(struct amdgpu_device *adev)
>   	WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL, interrupt_cntl);  }
>   
> +static u32 get_hdp_flush_req_offset(struct amdgpu_device *adev) {
> +	return SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_REQ); }
> +
> +static u32 get_hdp_flush_done_offset(struct amdgpu_device *adev) {
> +	return SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_DONE); }
> +
> +static u32 get_pcie_index_offset(struct amdgpu_device *adev) {
> +	return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2); }
> +
> +static u32 get_pcie_data_offset(struct amdgpu_device *adev) {
> +	return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2); }
> +
>   const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg = {
> -	.hdp_flush_req_offset = SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_REQ),
> -	.hdp_flush_done_offset = SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_DONE),
>   	.ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK,
>   	.ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK,
>   	.ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK, @@ -202,7 +218,10 @@ void nbio_v7_0_ih_control(struct amdgpu_device *adev)
>   	.ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK,  };
>   
> -const struct nbio_pcie_index_data nbio_v7_0_pcie_index_data = {
> -	.index_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2),
> -	.data_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2)
> +const struct amdgpu_nbio_funcs nbio_v7_0_funcs = {
> +	.get_hdp_flush_req_offset = get_hdp_flush_req_offset,
> +	.get_hdp_flush_done_offset = get_hdp_flush_done_offset,
> +	.get_pcie_index_offset = get_pcie_index_offset,
> +	.get_pcie_data_offset = get_pcie_data_offset,
>   };
> +
> diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h
> index df8fa90..070c3bd 100644
> --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h
> +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h
> @@ -27,7 +27,8 @@
>   #include "soc15_common.h"
>   
>   extern const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg; -extern const struct nbio_pcie_index_data nbio_v7_0_pcie_index_data;
> +extern const struct amdgpu_nbio_funcs nbio_v7_0_funcs;
> +
>   int nbio_v7_0_init(struct amdgpu_device *adev);
>   u32 nbio_v7_0_get_atombios_scratch_regs(struct amdgpu_device *adev,
>                                           uint32_t idx); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
> index 458667d..5c98c4d 100644
> --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
> @@ -53,58 +53,58 @@
>   static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev);  static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev);
>   
> -static const u32 golden_settings_sdma_4[] = {
> -	SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CHICKEN_BITS), 0xfe931f07, 0x02831d07,
> -	SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), 0xff000ff0, 0x3f000100,
> -	SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_IB_CNTL), 0x800f0100, 0x00000100,
> -	SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), 0xfffffff7, 0x00403000,
> -	SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_PAGE_IB_CNTL), 0x800f0100, 0x00000100,
> -	SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL), 0x0000fff0, 0x00403000,
> -	SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), 0x003ff006, 0x0003c000,
> -	SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC0_IB_CNTL), 0x800f0100, 0x00000100,
> -	SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL), 0x0000fff0, 0x00403000,
> -	SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL), 0x800f0100, 0x00000100,
> -	SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL), 0x0000fff0, 0x00403000,
> -	SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UTCL1_PAGE), 0x000003ff, 0x000003c0,
> -	SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CHICKEN_BITS), 0xfe931f07, 0x02831f07,
> -	SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CLK_CTRL), 0xffffffff, 0x3f000100,
> -	SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GFX_IB_CNTL), 0x800f0100, 0x00000100,
> -	SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL), 0x0000fff0, 0x00403000,
> -	SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_PAGE_IB_CNTL), 0x800f0100, 0x00000100,
> -	SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL), 0x0000fff0, 0x00403000,
> -	SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL), 0x003ff000, 0x0003c000,
> -	SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_RLC0_IB_CNTL), 0x800f0100, 0x00000100,
> -	SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL), 0x0000fff0, 0x00403000,
> -	SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_RLC1_IB_CNTL), 0x800f0100, 0x00000100,
> -	SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL), 0x0000fff0, 0x00403000,
> -	SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_UTCL1_PAGE), 0x000003ff, 0x000003c0
> +static const struct soc15_reg_golden golden_settings_sdma_4[] = {
> +	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
> +	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xff000ff0, 0x3f000100),
> +	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_IB_CNTL, 0x800f0100, 0x00000100),
> +	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
> +	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_PAGE_IB_CNTL, 0x800f0100, 0x00000100),
> +	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
> +	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_POWER_CNTL, 0x003ff006, 0x0003c000),
> +	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_IB_CNTL, 0x800f0100, 0x00000100),
> +	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
> +	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0100, 0x00000100),
> +	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
> +	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
> +	SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
> +	SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
> +	SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100),
> +	SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
> +	SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_PAGE_IB_CNTL, 0x800f0100, 0x00000100),
> +	SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
> +	SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_POWER_CNTL, 0x003ff000, 0x0003c000),
> +	SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_IB_CNTL, 0x800f0100, 0x00000100),
> +	SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
> +	SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_IB_CNTL, 0x800f0100, 0x00000100),
> +	SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
> +	SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff,
> +0x000003c0)
>   };
>   
> -static const u32 golden_settings_sdma_vg10[] = {
> -	SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG), 0x0018773f, 0x00104002,
> -	SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ), 0x0018773f, 0x00104002,
> -	SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG), 0x0018773f, 0x00104002,
> -	SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ), 0x0018773f, 0x00104002
> +static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
> +	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
> +	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
> +	SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
> +	SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ,
> +0x0018773f, 0x00104002)
>   };
>   
> -static const u32 golden_settings_sdma_4_1[] =
> +static const struct soc15_reg_golden golden_settings_sdma_4_1[] =
>   {
> -	SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CHICKEN_BITS), 0xfe931f07, 0x02831d07,
> -	SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), 0xffffffff, 0x3f000100,
> -	SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_IB_CNTL), 0x800f0111, 0x00000100,
> -	SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), 0xfffffff7, 0x00403000,
> -	SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), 0xfc3fffff, 0x40000051,
> -	SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC0_IB_CNTL), 0x800f0111, 0x00000100,
> -	SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL), 0xfffffff7, 0x00403000,
> -	SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL), 0x800f0111, 0x00000100,
> -	SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL), 0xfffffff7, 0x00403000,
> -	SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UTCL1_PAGE), 0x000003ff, 0x000003c0
> +	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
> +	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100),
> +	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100),
> +	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
> +	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_POWER_CNTL, 0xfc3fffff, 0x40000051),
> +	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100),
> +	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
> +	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100),
> +	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
> +	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff,
> +0x000003c0)
>   };
>   
> -static const u32 golden_settings_sdma_rv1[] =
> +static const struct soc15_reg_golden golden_settings_sdma_rv1[] =
>   {
> -	SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG), 0x0018773f, 0x00000002,
> -	SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ), 0x0018773f, 0x00000002
> +	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00000002),
> +	SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ,
> +0x0018773f, 0x00000002)
>   };
>   
>   static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev, @@ -118,18 +118,18 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)  {
>   	switch (adev->asic_type) {
>   	case CHIP_VEGA10:
> -		amdgpu_program_register_sequence(adev,
> +		soc15_program_register_sequence(adev,
>   						 golden_settings_sdma_4,
>   						 ARRAY_SIZE(golden_settings_sdma_4));
> -		amdgpu_program_register_sequence(adev,
> +		soc15_program_register_sequence(adev,
>   						 golden_settings_sdma_vg10,
>   						 ARRAY_SIZE(golden_settings_sdma_vg10));
>   		break;
>   	case CHIP_RAVEN:
> -		amdgpu_program_register_sequence(adev,
> +		soc15_program_register_sequence(adev,
>   						 golden_settings_sdma_4_1,
>   						 ARRAY_SIZE(golden_settings_sdma_4_1));
> -		amdgpu_program_register_sequence(adev,
> +		soc15_program_register_sequence(adev,
>   						 golden_settings_sdma_rv1,
>   						 ARRAY_SIZE(golden_settings_sdma_rv1));
>   		break;
> @@ -358,6 +358,7 @@ static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
>    */
>   static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)  {
> +	struct amdgpu_device *adev = ring->adev;
>   	u32 ref_and_mask = 0;
>   	const struct nbio_hdp_flush_reg *nbio_hf_reg;
>   
> @@ -374,8 +375,8 @@ static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
>   	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
>   			  SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
>   			  SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
> -	amdgpu_ring_write(ring, nbio_hf_reg->hdp_flush_done_offset << 2);
> -	amdgpu_ring_write(ring, nbio_hf_reg->hdp_flush_req_offset << 2);
> +	amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_done_offset(adev)) << 2);
> +	amdgpu_ring_write(ring,
> +(adev->nbio_funcs->get_hdp_flush_req_offset(adev)) << 2);
>   	amdgpu_ring_write(ring, ref_and_mask); /* reference */
>   	amdgpu_ring_write(ring, ref_and_mask); /* mask */
>   	amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
> index 7c88bcb..fb5f825 100644
> --- a/drivers/gpu/drm/amd/amdgpu/soc15.c
> +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
> @@ -101,15 +101,8 @@ static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)  {
>   	unsigned long flags, address, data;
>   	u32 r;
> -	const struct nbio_pcie_index_data *nbio_pcie_id;
> -
> -	if (adev->flags & AMD_IS_APU)
> -		nbio_pcie_id = &nbio_v7_0_pcie_index_data;
> -	else
> -		nbio_pcie_id = &nbio_v6_1_pcie_index_data;
> -
> -	address = nbio_pcie_id->index_offset;
> -	data = nbio_pcie_id->data_offset;
> +	address = adev->nbio_funcs->get_pcie_index_offset(adev);
> +	data = adev->nbio_funcs->get_pcie_data_offset(adev);
>   
>   	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
>   	WREG32(address, reg);
> @@ -122,15 +115,9 @@ static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)  static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)  {
>   	unsigned long flags, address, data;
> -	const struct nbio_pcie_index_data *nbio_pcie_id;
> -
> -	if (adev->flags & AMD_IS_APU)
> -		nbio_pcie_id = &nbio_v7_0_pcie_index_data;
> -	else
> -		nbio_pcie_id = &nbio_v6_1_pcie_index_data;
>   
> -	address = nbio_pcie_id->index_offset;
> -	data = nbio_pcie_id->data_offset;
> +	address = adev->nbio_funcs->get_pcie_index_offset(adev);
> +	data = adev->nbio_funcs->get_pcie_data_offset(adev);
>   
>   	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
>   	WREG32(address, reg);
> @@ -332,25 +319,34 @@ static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
>   	return true;
>   }
>   
> -static struct amdgpu_allowed_register_entry soc15_allowed_read_registers[] = {
> -	{ SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS)},
> -	{ SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS2)},
> -	{ SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE0)},
> -	{ SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE1)},
> -	{ SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE2)},
> -	{ SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE3)},
> -	{ SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_STATUS_REG)},
> -	{ SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_STATUS_REG)},
> -	{ SOC15_REG_OFFSET(GC, 0, mmCP_STAT)},
> -	{ SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT1)},
> -	{ SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT2)},
> -	{ SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT3)},
> -	{ SOC15_REG_OFFSET(GC, 0, mmCP_CPF_BUSY_STAT)},
> -	{ SOC15_REG_OFFSET(GC, 0, mmCP_CPF_STALLED_STAT1)},
> -	{ SOC15_REG_OFFSET(GC, 0, mmCP_CPF_STATUS)},
> -	{ SOC15_REG_OFFSET(GC, 0, mmCP_CPC_STALLED_STAT1)},
> -	{ SOC15_REG_OFFSET(GC, 0, mmCP_CPC_STATUS)},
> -	{ SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG)},
> +struct soc15_allowed_register_entry {
> +	uint32_t hwip;
> +	uint32_t inst;
> +	uint32_t seg;
> +	uint32_t reg_offset;
> +	bool grbm_indexed;
> +};
> +
> +
> +static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = {
> +	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
> +	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
> +	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
> +	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
> +	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
> +	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
> +	{ SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
> +	{ SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
> +	{ SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
> +	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
> +	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
> +	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
> +	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
> +	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
> +	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
> +	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
> +	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
> +	{ SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
>   };
>   
>   static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num, @@ -390,10 +386,13 @@ static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
>   			    u32 sh_num, u32 reg_offset, u32 *value)  {
>   	uint32_t i;
> +	struct soc15_allowed_register_entry  *en;
>   
>   	*value = 0;
>   	for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
> -		if (reg_offset != soc15_allowed_read_registers[i].reg_offset)
> +		en = &soc15_allowed_read_registers[i];
> +		if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
> +					+ en->reg_offset))
>   			continue;
>   
>   		*value = soc15_get_register_value(adev, @@ -404,6 +403,43 @@ static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
>   	return -EINVAL;
>   }
>   
> +
> +/**
> + * soc15_program_register_sequence - program an array of registers.
> + *
> + * @adev: amdgpu_device pointer
> + * @regs: pointer to the register array
> + * @array_size: size of the register array
> + *
> + * Programs an array or registers with and and or masks.
> + * This is a helper for setting golden registers.
> + */
> +
> +void soc15_program_register_sequence(struct amdgpu_device *adev,
> +					     const struct soc15_reg_golden *regs,
> +					     const u32 array_size)
> +{
> +	const struct soc15_reg_golden *entry;
> +	u32 tmp, reg;
> +	int i;
> +
> +	for (i = 0; i < array_size; ++i) {
> +		entry = &regs[i];
> +		reg =  adev->reg_offset[entry->hwip][entry->instance][entry->segment]
> ++ entry->reg;
> +
> +		if (entry->and_mask == 0xffffffff) {
> +			tmp = entry->or_mask;
> +		} else {
> +			tmp = RREG32(reg);
> +			tmp &= ~(entry->and_mask);
> +			tmp |= entry->or_mask;
> +		}
> +		WREG32(reg, tmp);
> +	}
> +
> +}
> +
> +
>   static int soc15_asic_reset(struct amdgpu_device *adev)  {
>   	u32 i;
> @@ -619,6 +655,11 @@ static int soc15_common_early_init(void *handle)
>   
>   	adev->asic_funcs = &soc15_asic_funcs;
>   
> +	if (adev->flags & AMD_IS_APU)
> +		adev->nbio_funcs = &nbio_v7_0_funcs;
> +	else
> +		adev->nbio_funcs = &nbio_v6_1_funcs;
> +
>   	if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP) &&
>   		(amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_PSP)))
>   		psp_enabled = true;
> diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
> index c34496f..26b3fea 100644
> --- a/drivers/gpu/drm/amd/amdgpu/soc15.h
> +++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
> @@ -29,10 +29,28 @@
>   
>   extern const struct amd_ip_funcs soc15_common_ip_funcs;
>   
> +struct soc15_reg_golden {
> +	u32	hwip;
> +	u32	instance;
> +	u32	segment;
> +	u32	reg;
> +	u32	and_mask;
> +	u32	or_mask;
> +};
> +
> +#define SOC15_REG_ENTRY(ip, inst, reg)	ip##_HWIP, inst, reg##_BASE_IDX, reg
> +
> +#define SOC15_REG_GOLDEN_VALUE(ip, inst, reg, and_mask, or_mask) \
> +	{ ip##_HWIP, inst, reg##_BASE_IDX, reg, and_mask, or_mask }
> +
>   void soc15_grbm_select(struct amdgpu_device *adev,
>   		    u32 me, u32 pipe, u32 queue, u32 vmid);  int soc15_set_ip_blocks(struct amdgpu_device *adev);
>   
> +void soc15_program_register_sequence(struct amdgpu_device *adev,
> +					     const struct soc15_reg_golden *registers,
> +					     const u32 array_size);
> +
>   int vega10_reg_base_init(struct amdgpu_device *adev);
>   
>   #endif
> diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
> index 62a6e21..e2207c5 100644
> --- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h
> +++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
> @@ -25,8 +25,6 @@
>   #define __SOC15_COMMON_H__
>   
>   struct nbio_hdp_flush_reg {
> -	u32 hdp_flush_req_offset;
> -	u32 hdp_flush_done_offset;
>   	u32 ref_and_mask_cp0;
>   	u32 ref_and_mask_cp1;
>   	u32 ref_and_mask_cp2;
> @@ -41,10 +39,6 @@ struct nbio_hdp_flush_reg {
>   	u32 ref_and_mask_sdma1;
>   };
>   
> -struct nbio_pcie_index_data {
> -	u32 index_offset;
> -	u32 data_offset;
> -};
>   
>   /* Register Access Macros */
>   #define SOC15_REG_OFFSET(ip, inst, reg)       (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \
> diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
> index 660fa41..bdf6fe6a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
> @@ -1314,6 +1314,15 @@ static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
>   	uvd_v7_0_vm_reg_wait(ring, data0, data1, mask);  }
>   
> +static void uvd_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t
> +count) {
> +	int i;
> +	struct amdgpu_device *adev = ring->adev;
> +
> +	for (i = 0; i < count; i++)
> +		amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0,
> +mmUVD_NO_OP), 0)); }
> +
>   static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)  {
>   	amdgpu_ring_write(ring, HEVC_ENC_CMD_END); @@ -1681,7 +1690,7 @@ static int uvd_v7_0_set_clockgating_state(void *handle,  static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
>   	.type = AMDGPU_RING_TYPE_UVD,
>   	.align_mask = 0xf,
> -	.nop = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0),
> +	.nop = PACKET0(0x81ff, 0),
>   	.support_64bit_ptrs = false,
>   	.vmhub = AMDGPU_MMHUB,
>   	.get_rptr = uvd_v7_0_ring_get_rptr,
> @@ -1700,7 +1709,7 @@ static int uvd_v7_0_set_clockgating_state(void *handle,
>   	.emit_hdp_invalidate = uvd_v7_0_ring_emit_hdp_invalidate,
>   	.test_ring = uvd_v7_0_ring_test_ring,
>   	.test_ib = amdgpu_uvd_ring_test_ib,
> -	.insert_nop = amdgpu_ring_insert_nop,
> +	.insert_nop = uvd_v7_0_ring_insert_nop,
>   	.pad_ib = amdgpu_ring_generic_pad_ib,
>   	.begin_use = amdgpu_uvd_ring_begin_use,
>   	.end_use = amdgpu_uvd_ring_end_use,
> diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
> index e4673f7..b4a1790 100644
> --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
> @@ -1077,6 +1077,16 @@ static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev,
>   	return 0;
>   }
>   
> +static void vcn_v1_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t
> +count) {
> +	int i;
> +	struct amdgpu_device *adev = ring->adev;
> +
> +	for (i = 0; i < count; i++)
> +		amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0,
> +mmUVD_NO_OP), 0)); }
> +
> +
>   static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
>   	.name = "vcn_v1_0",
>   	.early_init = vcn_v1_0_early_init,
> @@ -1100,7 +1110,7 @@ static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev,  static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
>   	.type = AMDGPU_RING_TYPE_VCN_DEC,
>   	.align_mask = 0xf,
> -	.nop = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0),
> +	.nop = PACKET0(0x81ff, 0),
>   	.support_64bit_ptrs = false,
>   	.vmhub = AMDGPU_MMHUB,
>   	.get_rptr = vcn_v1_0_dec_ring_get_rptr, @@ -1118,7 +1128,7 @@ static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev,
>   	.emit_hdp_invalidate = vcn_v1_0_dec_ring_emit_hdp_invalidate,
>   	.test_ring = amdgpu_vcn_dec_ring_test_ring,
>   	.test_ib = amdgpu_vcn_dec_ring_test_ib,
> -	.insert_nop = amdgpu_ring_insert_nop,
> +	.insert_nop = vcn_v1_0_ring_insert_nop,
>   	.insert_start = vcn_v1_0_dec_ring_insert_start,
>   	.insert_end = vcn_v1_0_dec_ring_insert_end,
>   	.pad_ib = amdgpu_ring_generic_pad_ib,
> --
> 1.9.1
>



More information about the amd-gfx mailing list