[PATCH] drm/amd/amdgpu: Port GMC v6 driver to new SI headers (v2)

Deucher, Alexander Alexander.Deucher at amd.com
Thu Nov 10 15:22:30 UTC 2016


> -----Original Message-----
> From: amd-gfx [mailto:amd-gfx-bounces at lists.freedesktop.org] On Behalf
> Of Tom St Denis
> Sent: Thursday, November 10, 2016 9:30 AM
> To: amd-gfx at lists.freedesktop.org
> Cc: StDenis, Tom
> Subject: [PATCH] drm/amd/amdgpu: Port GMC v6 driver to new SI headers
> (v2)
> 
> Port the GMC v6 driver over to the new SI headers.
> 
> Tested with a Tahiti SI ASIC.
> 
> (v2) Fixed a couple of typos (in commented code) and moved
>      defines to si_enums.h
> 
> Signed-off-by: Tom St Denis <tom.stdenis at amd.com>

Reviewed-by: Alex Deucher <alexander.deucher at amd.com>

> ---
>  drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 323 ++++++++++++++++++---
> -------------
>  drivers/gpu/drm/amd/amdgpu/si_enums.h |   8 +
>  2 files changed, 184 insertions(+), 147 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
> b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
> index 1940d36bc304..3b6ad84df39b 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
> @@ -1,4 +1,3 @@
> -
>  /*
>   * Copyright 2014 Advanced Micro Devices, Inc.
>   *
> @@ -26,7 +25,16 @@
>  #include "amdgpu.h"
>  #include "gmc_v6_0.h"
>  #include "amdgpu_ucode.h"
> -#include "si/sid.h"
> +
> +#include "bif/bif_3_0_d.h"
> +#include "bif/bif_3_0_sh_mask.h"
> +#include "oss/oss_1_0_d.h"
> +#include "oss/oss_1_0_sh_mask.h"
> +#include "gmc/gmc_6_0_d.h"
> +#include "gmc/gmc_6_0_sh_mask.h"
> +#include "dce/dce_6_0_d.h"
> +#include "dce/dce_6_0_sh_mask.h"
> +#include "si_enums.h"
> 
>  static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev);
>  static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
> @@ -37,6 +45,16 @@ MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
>  MODULE_FIRMWARE("radeon/verde_mc.bin");
>  MODULE_FIRMWARE("radeon/oland_mc.bin");
> 
> +#define MC_SEQ_MISC0__MT__MASK   0xf0000000
> +#define MC_SEQ_MISC0__MT__GDDR1  0x10000000
> +#define MC_SEQ_MISC0__MT__DDR2   0x20000000
> +#define MC_SEQ_MISC0__MT__GDDR3  0x30000000
> +#define MC_SEQ_MISC0__MT__GDDR4  0x40000000
> +#define MC_SEQ_MISC0__MT__GDDR5  0x50000000
> +#define MC_SEQ_MISC0__MT__HBM    0x60000000
> +#define MC_SEQ_MISC0__MT__DDR3   0xB0000000
> +
> +
>  static const u32 crtc_offsets[6] =
>  {
>  	SI_CRTC0_REGISTER_OFFSET,
> @@ -57,14 +75,14 @@ static void gmc_v6_0_mc_stop(struct amdgpu_device
> *adev,
> 
>  	gmc_v6_0_wait_for_idle((void *)adev);
> 
> -	blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
> -	if (REG_GET_FIELD(blackout, mmMC_SHARED_BLACKOUT_CNTL,
> xxBLACKOUT_MODE) != 1) {
> +	blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
> +	if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL,
> BLACKOUT_MODE) != 1) {
>  		/* Block CPU access */
> -		WREG32(BIF_FB_EN, 0);
> +		WREG32(mmBIF_FB_EN, 0);
>  		/* blackout the MC */
>  		blackout = REG_SET_FIELD(blackout,
> -					 mmMC_SHARED_BLACKOUT_CNTL,
> xxBLACKOUT_MODE, 0);
> -		WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
> +					 MC_SHARED_BLACKOUT_CNTL,
> BLACKOUT_MODE, 0);
> +		WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
>  	}
>  	/* wait for the MC to settle */
>  	udelay(100);
> @@ -77,13 +95,13 @@ static void gmc_v6_0_mc_resume(struct
> amdgpu_device *adev,
>  	u32 tmp;
> 
>  	/* unblackout the MC */
> -	tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
> -	tmp = REG_SET_FIELD(tmp, mmMC_SHARED_BLACKOUT_CNTL,
> xxBLACKOUT_MODE, 0);
> -	WREG32(MC_SHARED_BLACKOUT_CNTL, tmp);
> +	tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
> +	tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL,
> BLACKOUT_MODE, 0);
> +	WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
>  	/* allow CPU access */
> -	tmp = REG_SET_FIELD(0, mmBIF_FB_EN, xxFB_READ_EN, 1);
> -	tmp = REG_SET_FIELD(tmp, mmBIF_FB_EN, xxFB_WRITE_EN, 1);
> -	WREG32(BIF_FB_EN, tmp);
> +	tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
> +	tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
> +	WREG32(mmBIF_FB_EN, tmp);
> 
>  	if (adev->mode_info.num_crtc)
>  		amdgpu_display_resume_mc_access(adev, save);
> @@ -158,37 +176,37 @@ static int gmc_v6_0_mc_load_microcode(struct
> amdgpu_device *adev)
>  	new_fw_data = (const __le32 *)
>  		(adev->mc.fw->data + le32_to_cpu(hdr-
> >header.ucode_array_offset_bytes));
> 
> -	running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
> +	running = RREG32(mmMC_SEQ_SUP_CNTL) &
> MC_SEQ_SUP_CNTL__RUN_MASK;
> 
>  	if (running == 0) {
> 
>  		/* reset the engine and set to writable */
> -		WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
> -		WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
> +		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
> +		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
> 
>  		/* load mc io regs */
>  		for (i = 0; i < regs_size; i++) {
> -			WREG32(MC_SEQ_IO_DEBUG_INDEX,
> le32_to_cpup(new_io_mc_regs++));
> -			WREG32(MC_SEQ_IO_DEBUG_DATA,
> le32_to_cpup(new_io_mc_regs++));
> +			WREG32(mmMC_SEQ_IO_DEBUG_INDEX,
> le32_to_cpup(new_io_mc_regs++));
> +			WREG32(mmMC_SEQ_IO_DEBUG_DATA,
> le32_to_cpup(new_io_mc_regs++));
>  		}
>  		/* load the MC ucode */
>  		for (i = 0; i < ucode_size; i++) {
> -			WREG32(MC_SEQ_SUP_PGM,
> le32_to_cpup(new_fw_data++));
> +			WREG32(mmMC_SEQ_SUP_PGM,
> le32_to_cpup(new_fw_data++));
>  		}
> 
>  		/* put the engine back into the active state */
> -		WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
> -		WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
> -		WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
> +		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
> +		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
> +		WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
> 
>  		/* wait for training to complete */
>  		for (i = 0; i < adev->usec_timeout; i++) {
> -			if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) &
> TRAIN_DONE_D0)
> +			if (RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL) &
> MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D0_MASK)
>  				break;
>  			udelay(1);
>  		}
>  		for (i = 0; i < adev->usec_timeout; i++) {
> -			if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) &
> TRAIN_DONE_D1)
> +			if (RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL) &
> MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D1_MASK)
>  				break;
>  			udelay(1);
>  		}
> @@ -225,7 +243,7 @@ static void gmc_v6_0_mc_program(struct
> amdgpu_device *adev)
>  		WREG32((0xb08 + j), 0x00000000);
>  		WREG32((0xb09 + j), 0x00000000);
>  	}
> -	WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
> +	WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
> 
>  	gmc_v6_0_mc_stop(adev, &save);
> 
> @@ -233,24 +251,24 @@ static void gmc_v6_0_mc_program(struct
> amdgpu_device *adev)
>  		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
>  	}
> 
> -	WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
> +	WREG32(mmVGA_HDP_CONTROL,
> VGA_HDP_CONTROL__VGA_MEMORY_DISABLE_MASK);
>  	/* Update configuration */
> -	WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
> +	WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
>  	       adev->mc.vram_start >> 12);
> -	WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
> +	WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
>  	       adev->mc.vram_end >> 12);
> -	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
> +	WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
>  	       adev->vram_scratch.gpu_addr >> 12);
>  	tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
>  	tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
> -	WREG32(MC_VM_FB_LOCATION, tmp);
> +	WREG32(mmMC_VM_FB_LOCATION, tmp);
>  	/* XXX double check these! */
> -	WREG32(HDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
> -	WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
> -	WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
> -	WREG32(MC_VM_AGP_BASE, 0);
> -	WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
> -	WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
> +	WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >>
> 8));
> +	WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
> +	WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
> +	WREG32(mmMC_VM_AGP_BASE, 0);
> +	WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
> +	WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
> 
>  	if (gmc_v6_0_wait_for_idle((void *)adev)) {
>  		dev_warn(adev->dev, "Wait for MC idle timedout !\n");
> @@ -265,16 +283,16 @@ static int gmc_v6_0_mc_init(struct amdgpu_device
> *adev)
>  	u32 tmp;
>  	int chansize, numchan;
> 
> -	tmp = RREG32(MC_ARB_RAMCFG);
> -	if (tmp & CHANSIZE_OVERRIDE) {
> +	tmp = RREG32(mmMC_ARB_RAMCFG);
> +	if (tmp & (1 << 11)) {
>  		chansize = 16;
> -	} else if (tmp & CHANSIZE_MASK) {
> +	} else if (tmp & MC_ARB_RAMCFG__CHANSIZE_MASK) {
>  		chansize = 64;
>  	} else {
>  		chansize = 32;
>  	}
> -	tmp = RREG32(MC_SHARED_CHMAP);
> -	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
> +	tmp = RREG32(mmMC_SHARED_CHMAP);
> +	switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >>
> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
>  	case 0:
>  	default:
>  		numchan = 1;
> @@ -309,8 +327,8 @@ static int gmc_v6_0_mc_init(struct amdgpu_device
> *adev)
>  	adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
>  	adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
>  	/* size in MB on si */
> -	adev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL *
> 1024ULL;
> -	adev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL *
> 1024ULL;
> +	adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) *
> 1024ULL * 1024ULL;
> +	adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) *
> 1024ULL * 1024ULL;
>  	adev->mc.visible_vram_size = adev->mc.aper_size;
> 
>  	/* unless the user had overridden it, set the gart
> @@ -329,9 +347,9 @@ static int gmc_v6_0_mc_init(struct amdgpu_device
> *adev)
>  static void gmc_v6_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
>  					uint32_t vmid)
>  {
> -	WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0);
> +	WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
> 
> -	WREG32(VM_INVALIDATE_REQUEST, 1 << vmid);
> +	WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
>  }
> 
>  static int gmc_v6_0_gart_set_pte_pde(struct amdgpu_device *adev,
> @@ -355,20 +373,20 @@ static void
> gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
>  {
>  	u32 tmp;
> 
> -	tmp = RREG32(VM_CONTEXT1_CNTL);
> -	tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
> -			    xxRANGE_PROTECTION_FAULT_ENABLE_DEFAULT,
> value);
> -	tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
> -
> xxDUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
> -	tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
> -			    xxPDE0_PROTECTION_FAULT_ENABLE_DEFAULT,
> value);
> -	tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
> -			    xxVALID_PROTECTION_FAULT_ENABLE_DEFAULT,
> value);
> -	tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
> -			    xxREAD_PROTECTION_FAULT_ENABLE_DEFAULT,
> value);
> -	tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL,
> -			    xxWRITE_PROTECTION_FAULT_ENABLE_DEFAULT,
> value);
> -	WREG32(VM_CONTEXT1_CNTL, tmp);
> +	tmp = RREG32(mmVM_CONTEXT1_CNTL);
> +	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
> +			    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT,
> value);
> +	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
> +
> DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
> +	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
> +			    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT,
> value);
> +	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
> +			    VALID_PROTECTION_FAULT_ENABLE_DEFAULT,
> value);
> +	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
> +			    READ_PROTECTION_FAULT_ENABLE_DEFAULT,
> value);
> +	tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
> +			    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT,
> value);
> +	WREG32(mmVM_CONTEXT1_CNTL, tmp);
>  }
> 
>  static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
> @@ -383,33 +401,39 @@ static int gmc_v6_0_gart_enable(struct
> amdgpu_device *adev)
>  	if (r)
>  		return r;
>  	/* Setup TLB control */
> -	WREG32(MC_VM_MX_L1_TLB_CNTL,
> +	WREG32(mmMC_VM_MX_L1_TLB_CNTL,
>  	       (0xA << 7) |
> -	       ENABLE_L1_TLB |
> -	       ENABLE_L1_FRAGMENT_PROCESSING |
> -	       SYSTEM_ACCESS_MODE_NOT_IN_SYS |
> -	       ENABLE_ADVANCED_DRIVER_MODEL |
> -	       SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
> +	       MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK |
> +
> MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_FRAGMENT_PROCESSING_MASK
> |
> +	       MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK |
> +
> MC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK
> |
> +	       (0UL <<
> MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__S
> HIFT));
>  	/* Setup L2 cache */
> -	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
> -	       ENABLE_L2_FRAGMENT_PROCESSING |
> -	       ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
> -	       ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
> -	       EFFECTIVE_L2_QUEUE_SIZE(7) |
> -	       CONTEXT1_IDENTITY_ACCESS_MODE(1));
> -	WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS |
> INVALIDATE_L2_CACHE);
> -	WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
> -	       BANK_SELECT(4) |
> -	       L2_CACHE_BIGK_FRAGMENT_SIZE(4));
> +	WREG32(mmVM_L2_CNTL,
> +	       VM_L2_CNTL__ENABLE_L2_CACHE_MASK |
> +	       VM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING_MASK |
> +
> VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK |
> +
> VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK |
> +	       (7UL << VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT) |
> +	       (1UL <<
> VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT));
> +	WREG32(mmVM_L2_CNTL2,
> +	       VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS_MASK |
> +	       VM_L2_CNTL2__INVALIDATE_L2_CACHE_MASK);
> +	WREG32(mmVM_L2_CNTL3,
> +	       VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
> +	       (4UL << VM_L2_CNTL3__BANK_SELECT__SHIFT) |
> +	       (4UL <<
> VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
>  	/* setup context0 */
> -	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, adev-
> >mc.gtt_start >> 12);
> -	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, adev-
> >mc.gtt_end >> 12);
> -	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev-
> >gart.table_addr >> 12);
> -	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
> +	WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev-
> >mc.gtt_start >> 12);
> +	WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev-
> >mc.gtt_end >> 12);
> +	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev-
> >gart.table_addr >> 12);
> +	WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
>  			(u32)(adev->dummy_page.addr >> 12));
> -	WREG32(VM_CONTEXT0_CNTL2, 0);
> -	WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT |
> PAGE_TABLE_DEPTH(0) |
> -
> RANGE_PROTECTION_FAULT_ENABLE_DEFAULT));
> +	WREG32(mmVM_CONTEXT0_CNTL2, 0);
> +	WREG32(mmVM_CONTEXT0_CNTL,
> +	       VM_CONTEXT0_CNTL__ENABLE_CONTEXT_MASK |
> +	       (0UL << VM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
> +
> VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_M
> ASK);
> 
>  	WREG32(0x575, 0);
>  	WREG32(0x576, 0);
> @@ -417,39 +441,41 @@ static int gmc_v6_0_gart_enable(struct
> amdgpu_device *adev)
> 
>  	/* empty context1-15 */
>  	/* set vm size, must be a multiple of 4 */
> -	WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
> -	WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, adev-
> >vm_manager.max_pfn - 1);
> +	WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
> +	WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev-
> >vm_manager.max_pfn - 1);
>  	/* Assign the pt base to something valid for now; the pts used for
>  	 * the VMs are determined by the application and setup and assigned
>  	 * on the fly in the vm part of radeon_gart.c
>  	 */
>  	for (i = 1; i < 16; i++) {
>  		if (i < 8)
> -			WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
> + i,
> +
> 	WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
>  			       adev->gart.table_addr >> 12);
>  		else
> -			WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR
> + i - 8,
> +
> 	WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
>  			       adev->gart.table_addr >> 12);
>  	}
> 
>  	/* enable context1-15 */
> -	WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
> +	WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
>  	       (u32)(adev->dummy_page.addr >> 12));
> -	WREG32(VM_CONTEXT1_CNTL2, 4);
> -	WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT |
> PAGE_TABLE_DEPTH(1) |
> -
> 	PAGE_TABLE_BLOCK_SIZE(amdgpu_vm_block_size - 9) |
> -
> 	RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
> -
> 	RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
> -
> 	DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
> -
> 	DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
> -
> 	PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
> -
> 	PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
> -
> 	VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
> -
> 	VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
> -
> 	READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
> -
> 	READ_PROTECTION_FAULT_ENABLE_DEFAULT |
> -
> 	WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
> -
> 	WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
> +	WREG32(mmVM_CONTEXT1_CNTL2, 4);
> +	WREG32(mmVM_CONTEXT1_CNTL,
> +	       VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK |
> +	       (1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
> +	       ((amdgpu_vm_block_size - 9) <<
> VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT) |
> +
> VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_
> MASK |
> +
> VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_M
> ASK |
> +
> VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTE
> RRUPT_MASK |
> +
> VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEF
> AULT_MASK |
> +
> VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_M
> ASK |
> +
> VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MAS
> K |
> +
> VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_
> MASK |
> +
> VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MA
> SK |
> +
> VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_M
> ASK |
> +
> VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MA
> SK |
> +
> VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_
> MASK |
> +
> VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MA
> SK);
> 
>  	gmc_v6_0_gart_flush_gpu_tlb(adev, 0);
>  	dev_info(adev->dev, "PCIE GART of %uM enabled (table at
> 0x%016llX).\n",
> @@ -488,19 +514,22 @@ static void gmc_v6_0_gart_disable(struct
> amdgpu_device *adev)
>  	}*/
> 
>  	/* Disable all tables */
> -	WREG32(VM_CONTEXT0_CNTL, 0);
> -	WREG32(VM_CONTEXT1_CNTL, 0);
> +	WREG32(mmVM_CONTEXT0_CNTL, 0);
> +	WREG32(mmVM_CONTEXT1_CNTL, 0);
>  	/* Setup TLB control */
> -	WREG32(MC_VM_MX_L1_TLB_CNTL,
> SYSTEM_ACCESS_MODE_NOT_IN_SYS |
> -	       SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
> +	WREG32(mmMC_VM_MX_L1_TLB_CNTL,
> +	       MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK |
> +	       (0UL <<
> MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__S
> HIFT));
>  	/* Setup L2 cache */
> -	WREG32(VM_L2_CNTL,
> ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
> -	       ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
> -	       EFFECTIVE_L2_QUEUE_SIZE(7) |
> -	       CONTEXT1_IDENTITY_ACCESS_MODE(1));
> -	WREG32(VM_L2_CNTL2, 0);
> -	WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
> -	       L2_CACHE_BIGK_FRAGMENT_SIZE(0));
> +	WREG32(mmVM_L2_CNTL,
> +
> VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK |
> +
> VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK |
> +	       (7UL << VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT) |
> +	       (1UL <<
> VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT));
> +	WREG32(mmVM_L2_CNTL2, 0);
> +	WREG32(mmVM_L2_CNTL3,
> +	       VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
> +	       (0UL <<
> VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
>  	amdgpu_gart_table_vram_unpin(adev);
>  }
> 
> @@ -523,7 +552,7 @@ static int gmc_v6_0_vm_init(struct amdgpu_device
> *adev)
> 
>  	/* base offset of vram pages */
>  	if (adev->flags & AMD_IS_APU) {
> -		u64 tmp = RREG32(MC_VM_FB_OFFSET);
> +		u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
>  		tmp <<= 22;
>  		adev->vm_manager.vram_base_offset = tmp;
>  	} else
> @@ -540,19 +569,19 @@ static void gmc_v6_0_vm_decode_fault(struct
> amdgpu_device *adev,
>  				     u32 status, u32 addr, u32 mc_client)
>  {
>  	u32 mc_id;
> -	u32 vmid = REG_GET_FIELD(status,
> mmVM_CONTEXT1_PROTECTION_FAULT_STATUS, xxVMID);
> -	u32 protections = REG_GET_FIELD(status,
> mmVM_CONTEXT1_PROTECTION_FAULT_STATUS,
> -					xxPROTECTIONS);
> +	u32 vmid = REG_GET_FIELD(status,
> VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
> +	u32 protections = REG_GET_FIELD(status,
> VM_CONTEXT1_PROTECTION_FAULT_STATUS,
> +					PROTECTIONS);
>  	char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
>  		(mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
> 
> -	mc_id = REG_GET_FIELD(status,
> mmVM_CONTEXT1_PROTECTION_FAULT_STATUS,
> -			      xxMEMORY_CLIENT_ID);
> +	mc_id = REG_GET_FIELD(status,
> VM_CONTEXT1_PROTECTION_FAULT_STATUS,
> +			      MEMORY_CLIENT_ID);
> 
>  	dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s
> from '%s' (0x%08x) (%d)\n",
>  	       protections, vmid, addr,
> -	       REG_GET_FIELD(status,
> mmVM_CONTEXT1_PROTECTION_FAULT_STATUS,
> -			     xxMEMORY_CLIENT_RW) ?
> +	       REG_GET_FIELD(status,
> VM_CONTEXT1_PROTECTION_FAULT_STATUS,
> +			     MEMORY_CLIENT_RW) ?
>  	       "write" : "read", block, mc_client, mc_id);
>  }
> 
> @@ -655,7 +684,7 @@ static void gmc_v6_0_enable_hdp_mgcg(struct
> amdgpu_device *adev,
>  {
>  	u32 orig, data;
> 
> -	orig = data = RREG32(HDP_HOST_PATH_CNTL);
> +	orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
> 
>  	if (enable && (adev->cg_flags &
> AMDGPU_CG_SUPPORT_HDP_MGCG))
>  		data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL,
> CLOCK_GATING_DIS, 0);
> @@ -663,7 +692,7 @@ static void gmc_v6_0_enable_hdp_mgcg(struct
> amdgpu_device *adev,
>  		data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL,
> CLOCK_GATING_DIS, 1);
> 
>  	if (orig != data)
> -		WREG32(HDP_HOST_PATH_CNTL, data);
> +		WREG32(mmHDP_HOST_PATH_CNTL, data);
>  }
> 
>  static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
> @@ -671,7 +700,7 @@ static void gmc_v6_0_enable_hdp_ls(struct
> amdgpu_device *adev,
>  {
>  	u32 orig, data;
> 
> -	orig = data = RREG32(HDP_MEM_POWER_LS);
> +	orig = data = RREG32(mmHDP_MEM_POWER_LS);
> 
>  	if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS))
>  		data = REG_SET_FIELD(data, HDP_MEM_POWER_LS,
> LS_ENABLE, 1);
> @@ -679,7 +708,7 @@ static void gmc_v6_0_enable_hdp_ls(struct
> amdgpu_device *adev,
>  		data = REG_SET_FIELD(data, HDP_MEM_POWER_LS,
> LS_ENABLE, 0);
> 
>  	if (orig != data)
> -		WREG32(HDP_MEM_POWER_LS, data);
> +		WREG32(mmHDP_MEM_POWER_LS, data);
>  }
>  */
> 
> @@ -713,7 +742,7 @@ static int gmc_v6_0_early_init(void *handle)
>  	if (adev->flags & AMD_IS_APU) {
>  		adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
>  	} else {
> -		u32 tmp = RREG32(MC_SEQ_MISC0);
> +		u32 tmp = RREG32(mmMC_SEQ_MISC0);
>  		tmp &= MC_SEQ_MISC0__MT__MASK;
>  		adev->mc.vram_type =
> gmc_v6_0_convert_vram_type(tmp);
>  	}
> @@ -879,7 +908,7 @@ static int gmc_v6_0_resume(void *handle)
>  static bool gmc_v6_0_is_idle(void *handle)
>  {
>  	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
> -	u32 tmp = RREG32(SRBM_STATUS);
> +	u32 tmp = RREG32(mmSRBM_STATUS);
> 
>  	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK |
> SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
>  		   SRBM_STATUS__MCC_BUSY_MASK |
> SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
> @@ -895,7 +924,7 @@ static int gmc_v6_0_wait_for_idle(void *handle)
>  	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
> 
>  	for (i = 0; i < adev->usec_timeout; i++) {
> -		tmp = RREG32(SRBM_STATUS) &
> (SRBM_STATUS__MCB_BUSY_MASK |
> +		tmp = RREG32(mmSRBM_STATUS) &
> (SRBM_STATUS__MCB_BUSY_MASK |
> 
> SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
> 
> SRBM_STATUS__MCC_BUSY_MASK |
> 
> SRBM_STATUS__MCD_BUSY_MASK |
> @@ -913,17 +942,17 @@ static int gmc_v6_0_soft_reset(void *handle)
>  	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
>  	struct amdgpu_mode_mc_save save;
>  	u32 srbm_soft_reset = 0;
> -	u32 tmp = RREG32(SRBM_STATUS);
> +	u32 tmp = RREG32(mmSRBM_STATUS);
> 
>  	if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
>  		srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
> -						mmSRBM_SOFT_RESET,
> xxSOFT_RESET_VMC, 1);
> +						SRBM_SOFT_RESET,
> SOFT_RESET_VMC, 1);
> 
>  	if (tmp & (SRBM_STATUS__MCB_BUSY_MASK |
> SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
>  		   SRBM_STATUS__MCC_BUSY_MASK |
> SRBM_STATUS__MCD_BUSY_MASK)) {
>  		if (!(adev->flags & AMD_IS_APU))
>  			srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
> -
> 	mmSRBM_SOFT_RESET, xxSOFT_RESET_MC, 1);
> +							SRBM_SOFT_RESET,
> SOFT_RESET_MC, 1);
>  	}
> 
>  	if (srbm_soft_reset) {
> @@ -933,17 +962,17 @@ static int gmc_v6_0_soft_reset(void *handle)
>  		}
> 
> 
> -		tmp = RREG32(SRBM_SOFT_RESET);
> +		tmp = RREG32(mmSRBM_SOFT_RESET);
>  		tmp |= srbm_soft_reset;
>  		dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
> -		WREG32(SRBM_SOFT_RESET, tmp);
> -		tmp = RREG32(SRBM_SOFT_RESET);
> +		WREG32(mmSRBM_SOFT_RESET, tmp);
> +		tmp = RREG32(mmSRBM_SOFT_RESET);
> 
>  		udelay(50);
> 
>  		tmp &= ~srbm_soft_reset;
> -		WREG32(SRBM_SOFT_RESET, tmp);
> -		tmp = RREG32(SRBM_SOFT_RESET);
> +		WREG32(mmSRBM_SOFT_RESET, tmp);
> +		tmp = RREG32(mmSRBM_SOFT_RESET);
> 
>  		udelay(50);
> 
> @@ -969,20 +998,20 @@ static int
> gmc_v6_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
> 
>  	switch (state) {
>  	case AMDGPU_IRQ_STATE_DISABLE:
> -		tmp = RREG32(VM_CONTEXT0_CNTL);
> +		tmp = RREG32(mmVM_CONTEXT0_CNTL);
>  		tmp &= ~bits;
> -		WREG32(VM_CONTEXT0_CNTL, tmp);
> -		tmp = RREG32(VM_CONTEXT1_CNTL);
> +		WREG32(mmVM_CONTEXT0_CNTL, tmp);
> +		tmp = RREG32(mmVM_CONTEXT1_CNTL);
>  		tmp &= ~bits;
> -		WREG32(VM_CONTEXT1_CNTL, tmp);
> +		WREG32(mmVM_CONTEXT1_CNTL, tmp);
>  		break;
>  	case AMDGPU_IRQ_STATE_ENABLE:
> -		tmp = RREG32(VM_CONTEXT0_CNTL);
> +		tmp = RREG32(mmVM_CONTEXT0_CNTL);
>  		tmp |= bits;
> -		WREG32(VM_CONTEXT0_CNTL, tmp);
> -		tmp = RREG32(VM_CONTEXT1_CNTL);
> +		WREG32(mmVM_CONTEXT0_CNTL, tmp);
> +		tmp = RREG32(mmVM_CONTEXT1_CNTL);
>  		tmp |= bits;
> -		WREG32(VM_CONTEXT1_CNTL, tmp);
> +		WREG32(mmVM_CONTEXT1_CNTL, tmp);
>  		break;
>  	default:
>  		break;
> @@ -997,9 +1026,9 @@ static int gmc_v6_0_process_interrupt(struct
> amdgpu_device *adev,
>  {
>  	u32 addr, status;
> 
> -	addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
> -	status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
> -	WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
> +	addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
> +	status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
> +	WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
> 
>  	if (!addr && !status)
>  		return 0;
> diff --git a/drivers/gpu/drm/amd/amdgpu/si_enums.h
> b/drivers/gpu/drm/amd/amdgpu/si_enums.h
> index 63c057f1a382..3ecd36f30e2a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/si_enums.h
> +++ b/drivers/gpu/drm/amd/amdgpu/si_enums.h
> @@ -23,6 +23,14 @@
>  #ifndef SI_ENUMS_H
>  #define SI_ENUMS_H
> 
> +#define AMDGPU_NUM_OF_VMIDS                     8
> +#define SI_CRTC0_REGISTER_OFFSET                0
> +#define SI_CRTC1_REGISTER_OFFSET                0x300
> +#define SI_CRTC2_REGISTER_OFFSET                0x2600
> +#define SI_CRTC3_REGISTER_OFFSET                0x2900
> +#define SI_CRTC4_REGISTER_OFFSET                0x2c00
> +#define SI_CRTC5_REGISTER_OFFSET                0x2f00
> +
>  #define DMA0_REGISTER_OFFSET 0x000
>  #define DMA1_REGISTER_OFFSET 0x200
>  #define ES_AND_GS_AUTO       3
> --
> 2.10.0
> 
> _______________________________________________
> amd-gfx mailing list
> amd-gfx at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx


More information about the amd-gfx mailing list