[PATCH 1/2] drm/amd/amdgpu: port of DCE v6 to new headers (v2)

Tom St Denis tstdenis82 at gmail.com
Mon Nov 14 18:57:45 UTC 2016


Port of SI DCE v6 over to new AMDGPU headers.  Tested on a
Tahiti with GNOME through various hot plugs/rotations/sizes/fullscreen/windowed and
staging drm/xf86-video-amdgpu.

(v2) Re-factored to remove formatting changes to si_enums.h
     as well rename various defines.

Signed-off-by: Tom St Denis <tom.stdenis at amd.com>
---
 drivers/gpu/drm/amd/amdgpu/dce_v6_0.c              | 507 +++++++++++----------
 drivers/gpu/drm/amd/amdgpu/si_enums.h              |  78 ++++
 .../gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h   |  12 +
 3 files changed, 350 insertions(+), 247 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index b0fdc291bf43..54e309c1eaf8 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -30,8 +30,19 @@
 #include "atombios_encoders.h"
 #include "amdgpu_pll.h"
 #include "amdgpu_connectors.h"
-#include "si/si_reg.h"
-#include "si/sid.h"
+
+#include "bif/bif_3_0_d.h"
+#include "bif/bif_3_0_sh_mask.h"
+#include "oss/oss_1_0_d.h"
+#include "oss/oss_1_0_sh_mask.h"
+#include "gca/gfx_6_0_d.h"
+#include "gca/gfx_6_0_sh_mask.h"
+#include "gmc/gmc_6_0_d.h"
+#include "gmc/gmc_6_0_sh_mask.h"
+#include "dce/dce_6_0_d.h"
+#include "dce/dce_6_0_sh_mask.h"
+#include "gca/gfx_7_2_enum.h"
+#include "si_enums.h"
 
 static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev);
 static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -48,12 +59,12 @@ static const u32 crtc_offsets[6] =
 
 static const u32 hpd_offsets[] =
 {
-	DC_HPD1_INT_STATUS - DC_HPD1_INT_STATUS,
-	DC_HPD2_INT_STATUS - DC_HPD1_INT_STATUS,
-	DC_HPD3_INT_STATUS - DC_HPD1_INT_STATUS,
-	DC_HPD4_INT_STATUS - DC_HPD1_INT_STATUS,
-	DC_HPD5_INT_STATUS - DC_HPD1_INT_STATUS,
-	DC_HPD6_INT_STATUS - DC_HPD1_INT_STATUS,
+	mmDC_HPD1_INT_STATUS - mmDC_HPD1_INT_STATUS,
+	mmDC_HPD2_INT_STATUS - mmDC_HPD1_INT_STATUS,
+	mmDC_HPD3_INT_STATUS - mmDC_HPD1_INT_STATUS,
+	mmDC_HPD4_INT_STATUS - mmDC_HPD1_INT_STATUS,
+	mmDC_HPD5_INT_STATUS - mmDC_HPD1_INT_STATUS,
+	mmDC_HPD6_INT_STATUS - mmDC_HPD1_INT_STATUS,
 };
 
 static const uint32_t dig_offsets[] = {
@@ -73,32 +84,32 @@ static const struct {
 	uint32_t	hpd;
 
 } interrupt_status_offsets[6] = { {
-	.reg = DISP_INTERRUPT_STATUS,
+	.reg = mmDISP_INTERRUPT_STATUS,
 	.vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
 	.vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
 	.hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
 }, {
-	.reg = DISP_INTERRUPT_STATUS_CONTINUE,
+	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
 	.vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
 }, {
-	.reg = DISP_INTERRUPT_STATUS_CONTINUE2,
+	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
 	.vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
 }, {
-	.reg = DISP_INTERRUPT_STATUS_CONTINUE3,
+	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
 	.vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
 }, {
-	.reg = DISP_INTERRUPT_STATUS_CONTINUE4,
+	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
 	.vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
 }, {
-	.reg = DISP_INTERRUPT_STATUS_CONTINUE5,
+	.reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
 	.vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
 	.vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
 	.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
@@ -119,7 +130,7 @@ static void dce_v6_0_audio_endpt_wreg(struct amdgpu_device *adev,
 
 static bool dce_v6_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
 {
-	if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
+	if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) & CRTC_STATUS__CRTC_V_BLANK_MASK)
 		return true;
 	else
 		return false;
@@ -129,8 +140,8 @@ static bool dce_v6_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
 {
 	u32 pos1, pos2;
 
-	pos1 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
-	pos2 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+	pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
+	pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
 
 	if (pos1 != pos2)
 		return true;
@@ -152,7 +163,7 @@ static void dce_v6_0_vblank_wait(struct amdgpu_device *adev, int crtc)
 	if (crtc >= adev->mode_info.num_crtc)
 		return;
 
-	if (!(RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN))
+	if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK))
 		return;
 
 	/* depending on when we hit vblank, we may be close to active; if so,
@@ -180,7 +191,7 @@ static u32 dce_v6_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
 	if (crtc >= adev->mode_info.num_crtc)
 		return 0;
 	else
-		return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
+		return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
 }
 
 static void dce_v6_0_pageflip_interrupt_init(struct amdgpu_device *adev)
@@ -220,16 +231,16 @@ static void dce_v6_0_page_flip(struct amdgpu_device *adev,
 	struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
 
 	/* flip at hsync for async, default is vsync */
-	WREG32(EVERGREEN_GRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
-	       EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN : 0);
+	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
+	       GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
 	/* update the scanout addresses */
-	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
 	       upper_32_bits(crtc_base));
-	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
 	       (u32)crtc_base);
 
 	/* post the write */
-	RREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
+	RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
 }
 
 static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
@@ -237,8 +248,8 @@ static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
 {
 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
 		return -EINVAL;
-	*vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + crtc_offsets[crtc]);
-	*position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
+	*vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
+	*position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
 
 	return 0;
 
@@ -261,7 +272,7 @@ static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev,
 	if (hpd >= adev->mode_info.num_hpd)
 		return connected;
 
-	if (RREG32(DC_HPD1_INT_STATUS + hpd_offsets[hpd]) & DC_HPDx_SENSE)
+	if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
 		connected = true;
 
 	return connected;
@@ -284,12 +295,12 @@ static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
 	if (hpd >= adev->mode_info.num_hpd)
 		return;
 
-	tmp = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
+	tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
 	if (connected)
-		tmp &= ~DC_HPDx_INT_POLARITY;
+		tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
 	else
-		tmp |= DC_HPDx_INT_POLARITY;
-	WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
+		tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
+	WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
 }
 
 /**
@@ -312,9 +323,9 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
 		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
 			continue;
 
-		tmp = RREG32(DC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
-		tmp |= DC_HPDx_EN;
-		WREG32(DC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
+		tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
+		tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
+		WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 
 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
 		    connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
@@ -323,9 +334,9 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
 			 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
 			 * also avoid interrupt storms during dpms.
 			 */
-			tmp = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
-			tmp &= ~DC_HPDx_INT_EN;
-			WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
+			tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
+			tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
+			WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 			continue;
 		}
 
@@ -355,9 +366,9 @@ static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
 		if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
 			continue;
 
-		tmp = RREG32(DC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
-		tmp &= ~DC_HPDx_EN;
-		WREG32(DC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0);
+		tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
+		tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
+		WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0);
 
 		amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
 	}
@@ -365,7 +376,7 @@ static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
 
 static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
 {
-	return SI_DC_GPIO_HPD_A;
+	return mmDC_GPIO_HPD_A;
 }
 
 static bool dce_v6_0_is_display_hung(struct amdgpu_device *adev)
@@ -380,7 +391,7 @@ static u32 evergreen_get_vblank_counter(struct amdgpu_device* adev, int crtc)
 	if (crtc >= adev->mode_info.num_crtc)
 		return 0;
 	else
-		return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
+		return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
 }
 
 static void dce_v6_0_stop_mc_access(struct amdgpu_device *adev,
@@ -389,25 +400,25 @@ static void dce_v6_0_stop_mc_access(struct amdgpu_device *adev,
 	u32 crtc_enabled, tmp, frame_count;
 	int i, j;
 
-	save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
-	save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
+	save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
+	save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
 
 	/* disable VGA render */
-	WREG32(VGA_RENDER_CONTROL, 0);
+	WREG32(mmVGA_RENDER_CONTROL, 0);
 
 	/* blank the display controllers */
 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
-		crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN;
+		crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK;
 		if (crtc_enabled) {
 			save->crtc_enabled[i] = true;
-			tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
+			tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
 
-			if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
+			if (!(tmp & CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN_MASK)) {
 				dce_v6_0_vblank_wait(adev, i);
-				WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
-				tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
-				WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
-				WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+				tmp |= CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN_MASK;
+				WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
 			}
 			/* wait for the next frame */
 			frame_count = evergreen_get_vblank_counter(adev, i);
@@ -418,11 +429,11 @@ static void dce_v6_0_stop_mc_access(struct amdgpu_device *adev,
 			}
 
 			/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
-			WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
-			tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
-			tmp &= ~EVERGREEN_CRTC_MASTER_EN;
-			WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
-			WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+			WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+			tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
+			tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK;
+			WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
+			WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
 			save->crtc_enabled[i] = false;
 			/* ***** */
 		} else {
@@ -439,41 +450,41 @@ static void dce_v6_0_resume_mc_access(struct amdgpu_device *adev,
 
 	/* update crtc base addresses */
 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
-		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
+		WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
 		       upper_32_bits(adev->mc.vram_start));
-		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
+		WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
 		       upper_32_bits(adev->mc.vram_start));
-		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
+		WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
 		       (u32)adev->mc.vram_start);
-		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
+		WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
 		       (u32)adev->mc.vram_start);
 	}
 
-	WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
-	WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)adev->mc.vram_start);
+	WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
+	WREG32(mmVGA_MEMORY_BASE_ADDRESS, (u32)adev->mc.vram_start);
 
 	/* unlock regs and wait for update */
 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
 		if (save->crtc_enabled[i]) {
-			tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
+			tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]);
 			if ((tmp & 0x7) != 3) {
 				tmp &= ~0x7;
 				tmp |= 0x3;
-				WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
+				WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp);
 			}
-			tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
-			if (tmp & EVERGREEN_GRPH_UPDATE_LOCK) {
-				tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
-				WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
+			tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
+			if (tmp & GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK) {
+				tmp &= ~GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK;
+				WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
 			}
-			tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
+			tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
 			if (tmp & 1) {
 				tmp &= ~1;
-				WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
+				WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
 			}
 			for (j = 0; j < adev->usec_timeout; j++) {
-				tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
-				if ((tmp & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) == 0)
+				tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
+				if ((tmp & GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING_MASK) == 0)
 					break;
 				udelay(1);
 			}
@@ -481,9 +492,9 @@ static void dce_v6_0_resume_mc_access(struct amdgpu_device *adev,
 	}
 
 	/* Unlock vga access */
-	WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
+	WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
 	mdelay(1);
-	WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
+	WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
 
 }
 
@@ -491,8 +502,8 @@ static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
 					  bool render)
 {
 	if (!render)
-		WREG32(R_000300_VGA_RENDER_CONTROL,
-			RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL);
+		WREG32(mmVGA_RENDER_CONTROL,
+			RREG32(mmVGA_RENDER_CONTROL) & VGA_VSTATUS_CNTL);
 
 }
 
@@ -526,14 +537,14 @@ void dce_v6_0_disable_dce(struct amdgpu_device *adev)
 
 		/*Disable crtc*/
 		for (i = 0; i < dce_v6_0_get_num_crtc(adev); i++) {
-			crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) &
-				EVERGREEN_CRTC_MASTER_EN;
+			crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) &
+				CRTC_CONTROL__CRTC_MASTER_EN_MASK;
 			if (crtc_enabled) {
-				WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
-				tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
-				tmp &= ~EVERGREEN_CRTC_MASTER_EN;
-				WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
-				WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
+				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+				tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
+				tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK;
+				WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
+				WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
 			}
 		}
 	}
@@ -569,19 +580,23 @@ static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
 	case 6:
 		if (dither == AMDGPU_FMT_DITHER_ENABLE)
 			/* XXX sort out optimal dither settings */
-			tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
-				FMT_SPATIAL_DITHER_EN);
+			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
+				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
+				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK);
 		else
-			tmp |= FMT_TRUNCATE_EN;
+			tmp |= FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK;
 		break;
 	case 8:
 		if (dither == AMDGPU_FMT_DITHER_ENABLE)
 			/* XXX sort out optimal dither settings */
-			tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
-				FMT_RGB_RANDOM_ENABLE |
-				FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
+			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
+				FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
+				FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
+				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
+				FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK);
 		else
-			tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
+			tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
+				FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK);
 		break;
 	case 10:
 	default:
@@ -589,7 +604,7 @@ static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
 		break;
 	}
 
-	WREG32(FMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+	WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
 }
 
 /**
@@ -603,7 +618,7 @@ static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
  */
 static u32 si_get_number_of_dram_channels(struct amdgpu_device *adev)
 {
-	u32 tmp = RREG32(MC_SHARED_CHMAP);
+	u32 tmp = RREG32(mmMC_SHARED_CHMAP);
 
 	switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
 	case 0:
@@ -1100,28 +1115,28 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
 	}
 
 	/* select wm A */
-	arb_control3 = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
+	arb_control3 = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
 	tmp = arb_control3;
 	tmp &= ~LATENCY_WATERMARK_MASK(3);
 	tmp |= LATENCY_WATERMARK_MASK(1);
-	WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
-	WREG32(DPG_PIPE_LATENCY_CONTROL + amdgpu_crtc->crtc_offset,
-	       (LATENCY_LOW_WATERMARK(latency_watermark_a) |
-		LATENCY_HIGH_WATERMARK(line_time)));
+	WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
+	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
+	       ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT)  |
+		(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
 	/* select wm B */
-	tmp = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
+	tmp = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
 	tmp &= ~LATENCY_WATERMARK_MASK(3);
 	tmp |= LATENCY_WATERMARK_MASK(2);
-	WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
-	WREG32(DPG_PIPE_LATENCY_CONTROL + amdgpu_crtc->crtc_offset,
-	       (LATENCY_LOW_WATERMARK(latency_watermark_b) |
-		LATENCY_HIGH_WATERMARK(line_time)));
+	WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
+	WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
+	       ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
+		(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
 	/* restore original selection */
-	WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, arb_control3);
+	WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, arb_control3);
 
 	/* write the priority marks */
-	WREG32(PRIORITY_A_CNT + amdgpu_crtc->crtc_offset, priority_a_cnt);
-	WREG32(PRIORITY_B_CNT + amdgpu_crtc->crtc_offset, priority_b_cnt);
+	WREG32(mmPRIORITY_A_CNT + amdgpu_crtc->crtc_offset, priority_a_cnt);
+	WREG32(mmPRIORITY_B_CNT + amdgpu_crtc->crtc_offset, priority_b_cnt);
 
 	/* save values for DPM */
 	amdgpu_crtc->line_time = line_time;
@@ -1139,7 +1154,7 @@ static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
 	/*
 	 * Line Buffer Setup
 	 * There are 3 line buffers, each one shared by 2 display controllers.
-	 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
+	 * mmDC_LB_MEMORY_SPLIT controls how that line buffer is shared between
 	 * the display controllers.  The paritioning is done via one of four
 	 * preset allocations specified in bits 21:20:
 	 *  0 - half lb
@@ -1162,14 +1177,14 @@ static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
 		buffer_alloc = 0;
 	}
 
-	WREG32(DC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset,
+	WREG32(mmDC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset,
 	       DC_LB_MEMORY_CONFIG(tmp));
 
-	WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
-	       DMIF_BUFFERS_ALLOCATED(buffer_alloc));
+	WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
+	       (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
 	for (i = 0; i < adev->usec_timeout; i++) {
-		if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
-		    DMIF_BUFFERS_ALLOCATED_COMPLETED)
+		if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
+		    PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK)
 			break;
 		udelay(1);
 	}
@@ -1411,12 +1426,12 @@ static void dce_v6_0_afmt_fini(struct amdgpu_device *adev)
 
 static const u32 vga_control_regs[6] =
 {
-	AVIVO_D1VGA_CONTROL,
-	AVIVO_D2VGA_CONTROL,
-	EVERGREEN_D3VGA_CONTROL,
-	EVERGREEN_D4VGA_CONTROL,
-	EVERGREEN_D5VGA_CONTROL,
-	EVERGREEN_D6VGA_CONTROL,
+	mmD1VGA_CONTROL,
+	mmD2VGA_CONTROL,
+	mmD3VGA_CONTROL,
+	mmD4VGA_CONTROL,
+	mmD5VGA_CONTROL,
+	mmD6VGA_CONTROL,
 };
 
 static void dce_v6_0_vga_enable(struct drm_crtc *crtc, bool enable)
@@ -1436,7 +1451,7 @@ static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable)
 	struct drm_device *dev = crtc->dev;
 	struct amdgpu_device *adev = dev->dev_private;
 
-	WREG32(EVERGREEN_GRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0);
+	WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0);
 }
 
 static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
@@ -1452,7 +1467,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
 	struct amdgpu_bo *abo;
 	uint64_t fb_location, tiling_flags;
 	uint32_t fb_format, fb_pitch_pixels, pipe_config;
-	u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
+	u32 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_NONE);
 	u32 viewport_w, viewport_h;
 	int r;
 	bool bypass_lut = false;
@@ -1495,64 +1510,64 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
 
 	switch (target_fb->pixel_format) {
 	case DRM_FORMAT_C8:
-		fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) |
-			     EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED));
+		fb_format = (GRPH_DEPTH(GRPH_DEPTH_8BPP) |
+			     GRPH_FORMAT(GRPH_FORMAT_INDEXED));
 		break;
 	case DRM_FORMAT_XRGB4444:
 	case DRM_FORMAT_ARGB4444:
-		fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
-			     EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB4444));
+		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
+			     GRPH_FORMAT(GRPH_FORMAT_ARGB4444));
 #ifdef __BIG_ENDIAN
-		fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
 #endif
 		break;
 	case DRM_FORMAT_XRGB1555:
 	case DRM_FORMAT_ARGB1555:
-		fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
-			     EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB1555));
+		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
+			     GRPH_FORMAT(GRPH_FORMAT_ARGB1555));
 #ifdef __BIG_ENDIAN
-		fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
 #endif
 		break;
 	case DRM_FORMAT_BGRX5551:
 	case DRM_FORMAT_BGRA5551:
-		fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
-			     EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA5551));
+		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
+			     GRPH_FORMAT(GRPH_FORMAT_BGRA5551));
 #ifdef __BIG_ENDIAN
-		fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
 #endif
 		break;
 	case DRM_FORMAT_RGB565:
-		fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
-			     EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565));
+		fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
+			     GRPH_FORMAT(GRPH_FORMAT_ARGB565));
 #ifdef __BIG_ENDIAN
-		fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
 #endif
 		break;
 	case DRM_FORMAT_XRGB8888:
 	case DRM_FORMAT_ARGB8888:
-		fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
-			     EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888));
+		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
+			     GRPH_FORMAT(GRPH_FORMAT_ARGB8888));
 #ifdef __BIG_ENDIAN
-		fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
+		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
 #endif
 		break;
 	case DRM_FORMAT_XRGB2101010:
 	case DRM_FORMAT_ARGB2101010:
-		fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
-			     EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB2101010));
+		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
+			     GRPH_FORMAT(GRPH_FORMAT_ARGB2101010));
 #ifdef __BIG_ENDIAN
-		fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
+		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
 #endif
 		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
 		bypass_lut = true;
 		break;
 	case DRM_FORMAT_BGRX1010102:
 	case DRM_FORMAT_BGRA1010102:
-		fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
-			     EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA1010102));
+		fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
+			     GRPH_FORMAT(GRPH_FORMAT_BGRA1010102));
 #ifdef __BIG_ENDIAN
-		fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
+		fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
 #endif
 		/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
 		bypass_lut = true;
@@ -1572,75 +1587,75 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
 
-		fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks);
-		fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
-		fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split);
-		fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw);
-		fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh);
-		fb_format |= EVERGREEN_GRPH_MACRO_TILE_ASPECT(mtaspect);
+		fb_format |= GRPH_NUM_BANKS(num_banks);
+		fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_2D_TILED_THIN1);
+		fb_format |= GRPH_TILE_SPLIT(tile_split);
+		fb_format |= GRPH_BANK_WIDTH(bankw);
+		fb_format |= GRPH_BANK_HEIGHT(bankh);
+		fb_format |= GRPH_MACRO_TILE_ASPECT(mtaspect);
 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
-		fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
+		fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_1D_TILED_THIN1);
 	}
 
 	pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
-	fb_format |= SI_GRPH_PIPE_CONFIG(pipe_config);
+	fb_format |= GRPH_PIPE_CONFIG(pipe_config);
 
 	dce_v6_0_vga_enable(crtc, false);
 
 	/* Make sure surface address is updated at vertical blank rather than
 	 * horizontal blank
 	 */
-	WREG32(EVERGREEN_GRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
+	WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
 
-	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
 	       upper_32_bits(fb_location));
-	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
 	       upper_32_bits(fb_location));
-	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
-	       (u32)fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
-	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
-	       (u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
-	WREG32(EVERGREEN_GRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
-	WREG32(EVERGREEN_GRPH_SWAP_CONTROL + amdgpu_crtc->crtc_offset, fb_swap);
+	WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+	       (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
+	WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+	       (u32) fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
+	WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
+	WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
 
 	/*
 	 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
 	 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
 	 * retain the full precision throughout the pipeline.
 	 */
-	WREG32_P(EVERGREEN_GRPH_LUT_10BIT_BYPASS_CONTROL + amdgpu_crtc->crtc_offset,
-		 (bypass_lut ? EVERGREEN_LUT_10BIT_BYPASS_EN : 0),
-		 ~EVERGREEN_LUT_10BIT_BYPASS_EN);
+	WREG32_P(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset,
+		 (bypass_lut ? GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK : 0),
+		 ~GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK);
 
 	if (bypass_lut)
 		DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
 
-	WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
-	WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
-	WREG32(EVERGREEN_GRPH_X_START + amdgpu_crtc->crtc_offset, 0);
-	WREG32(EVERGREEN_GRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
-	WREG32(EVERGREEN_GRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
-	WREG32(EVERGREEN_GRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
+	WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
+	WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
+	WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
+	WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
+	WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
+	WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
 
 	fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
-	WREG32(EVERGREEN_GRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
+	WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
 
 	dce_v6_0_grph_enable(crtc, true);
 
-	WREG32(EVERGREEN_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
+	WREG32(mmDESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
 		       target_fb->height);
 	x &= ~3;
 	y &= ~1;
-	WREG32(EVERGREEN_VIEWPORT_START + amdgpu_crtc->crtc_offset,
+	WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
 	       (x << 16) | y);
 	viewport_w = crtc->mode.hdisplay;
 	viewport_h = (crtc->mode.vdisplay + 1) & ~1;
 
-	WREG32(EVERGREEN_VIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
+	WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
 	       (viewport_w << 16) | viewport_h);
 
 	/* set pageflip to happen only at start of vblank interval (front porch) */
-	WREG32(EVERGREEN_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3);
+	WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3);
 
 	if (!atomic && fb && fb != crtc->primary->fb) {
 		amdgpu_fb = to_amdgpu_framebuffer(fb);
@@ -1667,10 +1682,10 @@ static void dce_v6_0_set_interleave(struct drm_crtc *crtc,
 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
 
 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
-		WREG32(EVERGREEN_DATA_FORMAT + amdgpu_crtc->crtc_offset,
-		       EVERGREEN_INTERLEAVE_EN);
+		WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset,
+		       INTERLEAVE_EN);
 	else
-		WREG32(EVERGREEN_DATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
+		WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
 }
 
 static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
@@ -1683,54 +1698,52 @@ static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
 
 	DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
 
-	WREG32(NI_INPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
-	       (NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) |
-		NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS)));
-	WREG32(NI_PRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
-	       NI_GRPH_PRESCALE_BYPASS);
-	WREG32(NI_PRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
-	       NI_OVL_PRESCALE_BYPASS);
-	WREG32(NI_INPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
-	       (NI_GRPH_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT) |
-		NI_OVL_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT)));
-
-
+	WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
+	       ((0 << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
+		(0 << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
+	WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
+	       PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
+	WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
+	       PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
+	WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
+	       ((0 << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
+		(0 << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
 
-	WREG32(EVERGREEN_DC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
+	WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
 
-	WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
-	WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
-	WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
+	WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
+	WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
+	WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
 
-	WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
-	WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
-	WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
+	WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
+	WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
+	WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
 
-	WREG32(EVERGREEN_DC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
-	WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
+	WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
+	WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
 
-	WREG32(EVERGREEN_DC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
+	WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
 	for (i = 0; i < 256; i++) {
-		WREG32(EVERGREEN_DC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
+		WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
 		       (amdgpu_crtc->lut_r[i] << 20) |
 		       (amdgpu_crtc->lut_g[i] << 10) |
 		       (amdgpu_crtc->lut_b[i] << 0));
 	}
 
-	WREG32(NI_DEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
-	       (NI_GRPH_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
-		NI_OVL_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
-		NI_ICON_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
-		NI_CURSOR_DEGAMMA_MODE(NI_DEGAMMA_BYPASS)));
-	WREG32(NI_GAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
-	       (NI_GRPH_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS) |
-		NI_OVL_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS)));
-	WREG32(NI_REGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
-	       (NI_GRPH_REGAMMA_MODE(NI_REGAMMA_BYPASS) |
-		NI_OVL_REGAMMA_MODE(NI_REGAMMA_BYPASS)));
-	WREG32(NI_OUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
-	       (NI_OUTPUT_CSC_GRPH_MODE(0) |
-		NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS)));
+	WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
+	       ((0 << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
+		(0 << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
+		ICON_DEGAMMA_MODE(0) |
+		(0 << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
+	WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
+	       ((0 << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
+		(0 << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
+	WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
+	       ((0 << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
+		(0 << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
+	WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
+	       ((0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
+		(0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
 	/* XXX match this to the depth of the crtc fmt block, move to modeset? */
 	WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
 
@@ -1809,12 +1822,12 @@ static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock)
 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
 	uint32_t cur_lock;
 
-	cur_lock = RREG32(EVERGREEN_CUR_UPDATE + amdgpu_crtc->crtc_offset);
+	cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
 	if (lock)
-		cur_lock |= EVERGREEN_CURSOR_UPDATE_LOCK;
+		cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
 	else
-		cur_lock &= ~EVERGREEN_CURSOR_UPDATE_LOCK;
-	WREG32(EVERGREEN_CUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
+		cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
+	WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
 }
 
 static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
@@ -1822,9 +1835,9 @@ static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
 	struct amdgpu_device *adev = crtc->dev->dev_private;
 
-	WREG32_IDX(EVERGREEN_CUR_CONTROL + amdgpu_crtc->crtc_offset,
-		   EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
-		   EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
+	WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
+		   (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
+		   (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
 
 
 }
@@ -1834,15 +1847,15 @@ static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
 	struct amdgpu_device *adev = crtc->dev->dev_private;
 
-	WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
+	WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
 	       upper_32_bits(amdgpu_crtc->cursor_addr));
-	WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
+	WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
 	       lower_32_bits(amdgpu_crtc->cursor_addr));
 
-	WREG32_IDX(EVERGREEN_CUR_CONTROL + amdgpu_crtc->crtc_offset,
-		   EVERGREEN_CURSOR_EN |
-		   EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
-		   EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
+	WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
+		   CUR_CONTROL__CURSOR_EN_MASK |
+		   (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
+		   (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
 
 }
 
@@ -1869,9 +1882,9 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
 		y = 0;
 	}
 
-	WREG32(EVERGREEN_CUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
-	WREG32(EVERGREEN_CUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
-	WREG32(EVERGREEN_CUR_SIZE + amdgpu_crtc->crtc_offset,
+	WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
+	WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
+	WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
 	       ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
 
 	amdgpu_crtc->cursor_x = x;
@@ -2475,14 +2488,14 @@ static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
 
 	switch (state) {
 	case AMDGPU_IRQ_STATE_DISABLE:
-		interrupt_mask = RREG32(INT_MASK + reg_block);
+		interrupt_mask = RREG32(mmINT_MASK + reg_block);
 		interrupt_mask &= ~VBLANK_INT_MASK;
-		WREG32(INT_MASK + reg_block, interrupt_mask);
+		WREG32(mmINT_MASK + reg_block, interrupt_mask);
 		break;
 	case AMDGPU_IRQ_STATE_ENABLE:
-		interrupt_mask = RREG32(INT_MASK + reg_block);
+		interrupt_mask = RREG32(mmINT_MASK + reg_block);
 		interrupt_mask |= VBLANK_INT_MASK;
-		WREG32(INT_MASK + reg_block, interrupt_mask);
+		WREG32(mmINT_MASK + reg_block, interrupt_mask);
 		break;
 	default:
 		break;
@@ -2510,14 +2523,14 @@ static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
 
 	switch (state) {
 	case AMDGPU_IRQ_STATE_DISABLE:
-		dc_hpd_int_cntl = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[type]);
+		dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
 		dc_hpd_int_cntl &= ~DC_HPDx_INT_EN;
-		WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
+		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
 		break;
 	case AMDGPU_IRQ_STATE_ENABLE:
-		dc_hpd_int_cntl = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[type]);
+		dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
 		dc_hpd_int_cntl |= DC_HPDx_INT_EN;
-		WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
+		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
 		break;
 	default:
 		break;
@@ -2585,7 +2598,7 @@ static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
 	switch (entry->src_data) {
 	case 0: /* vblank */
 		if (disp_int & interrupt_status_offsets[crtc].vblank)
-			WREG32(VBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK);
+			WREG32(mmVBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK);
 		else
 			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
 
@@ -2596,7 +2609,7 @@ static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
 		break;
 	case 1: /* vline */
 		if (disp_int & interrupt_status_offsets[crtc].vline)
-			WREG32(VLINE_STATUS + crtc_offsets[crtc], VLINE_ACK);
+			WREG32(mmVLINE_STATUS + crtc_offsets[crtc], VLINE_ACK);
 		else
 			DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
 
@@ -2622,12 +2635,12 @@ static int dce_v6_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
 		return -EINVAL;
 	}
 
-	reg = RREG32(GRPH_INT_CONTROL + crtc_offsets[type]);
+	reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
 	if (state == AMDGPU_IRQ_STATE_DISABLE)
-		WREG32(GRPH_INT_CONTROL + crtc_offsets[type],
+		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
 		       reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
 	else
-		WREG32(GRPH_INT_CONTROL + crtc_offsets[type],
+		WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
 		       reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
 
 	return 0;
@@ -2650,9 +2663,9 @@ static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
 		return -EINVAL;
 	}
 
-	if (RREG32(GRPH_INT_STATUS + crtc_offsets[crtc_id]) &
+	if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
 	    GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
-		WREG32(GRPH_INT_STATUS + crtc_offsets[crtc_id],
+		WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
 		       GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
 
 	/* IRQ could occur when in initial stage */
@@ -2703,9 +2716,9 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
 	mask = interrupt_status_offsets[hpd].hpd;
 
 	if (disp_int & mask) {
-		tmp = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
+		tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
 		tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
-		WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
+		WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
 		schedule_work(&adev->hotplug_work);
 		DRM_INFO("IH: HPD%d\n", hpd + 1);
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/si_enums.h b/drivers/gpu/drm/amd/amdgpu/si_enums.h
index 3ecd36f30e2a..fde2086246fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_enums.h
+++ b/drivers/gpu/drm/amd/amdgpu/si_enums.h
@@ -23,6 +23,84 @@
 #ifndef SI_ENUMS_H
 #define SI_ENUMS_H
 
+#define VBLANK_INT_MASK                (1 << 0)
+#define DC_HPDx_INT_EN                 (1 << 16)
+#define VBLANK_ACK                     (1 << 4)
+#define VLINE_ACK                      (1 << 4)
+
+#define CURSOR_WIDTH 64
+#define CURSOR_HEIGHT 64
+
+#define VGA_VSTATUS_CNTL               0xFFFCFFFF
+#define PRIORITY_MARK_MASK             0x7fff
+#define PRIORITY_OFF                   (1 << 16)
+#define PRIORITY_ALWAYS_ON             (1 << 20)
+#define INTERLEAVE_EN                  (1 << 0)
+
+#define LATENCY_WATERMARK_MASK(x)      ((x) << 16)
+#define DC_LB_MEMORY_CONFIG(x)         ((x) << 20)
+#define ICON_DEGAMMA_MODE(x)           (((x) & 0x3) << 8)
+
+#define GRPH_ENDIAN_SWAP(x)            (((x) & 0x3) << 0)
+#define GRPH_ENDIAN_NONE               0
+#define GRPH_ENDIAN_8IN16              1
+#define GRPH_ENDIAN_8IN32              2
+#define GRPH_ENDIAN_8IN64              3
+
+#define GRPH_DEPTH(x)                  (((x) & 0x3) << 0)
+#define GRPH_DEPTH_8BPP                0
+#define GRPH_DEPTH_16BPP               1
+#define GRPH_DEPTH_32BPP               2
+
+#define GRPH_FORMAT(x)                 (((x) & 0x7) << 8)
+#define GRPH_FORMAT_INDEXED            0
+#define GRPH_FORMAT_ARGB1555           0
+#define GRPH_FORMAT_ARGB565            1
+#define GRPH_FORMAT_ARGB4444           2
+#define GRPH_FORMAT_AI88               3
+#define GRPH_FORMAT_MONO16             4
+#define GRPH_FORMAT_BGRA5551           5
+#define GRPH_FORMAT_ARGB8888           0
+#define GRPH_FORMAT_ARGB2101010        1
+#define GRPH_FORMAT_32BPP_DIG          2
+#define GRPH_FORMAT_8B_ARGB2101010     3
+#define GRPH_FORMAT_BGRA1010102        4
+#define GRPH_FORMAT_8B_BGRA1010102     5
+#define GRPH_FORMAT_RGB111110          6
+#define GRPH_FORMAT_BGR101111          7
+
+#define GRPH_NUM_BANKS(x)              (((x) & 0x3) << 2)
+#define GRPH_ARRAY_MODE(x)             (((x) & 0x7) << 20)
+#define GRPH_ARRAY_LINEAR_GENERAL      0
+#define GRPH_ARRAY_LINEAR_ALIGNED      1
+#define GRPH_ARRAY_1D_TILED_THIN1      2
+#define GRPH_ARRAY_2D_TILED_THIN1      4
+#define GRPH_TILE_SPLIT(x)             (((x) & 0x7) << 13)
+#define GRPH_BANK_WIDTH(x)             (((x) & 0x3) << 6)
+#define GRPH_BANK_HEIGHT(x)            (((x) & 0x3) << 11)
+#define GRPH_MACRO_TILE_ASPECT(x)      (((x) & 0x3) << 18)
+#define GRPH_ARRAY_MODE(x)             (((x) & 0x7) << 20)
+#define GRPH_PIPE_CONFIG(x)                   (((x) & 0x1f) << 24)
+
+#define CURSOR_EN                      (1 << 0)
+#define CURSOR_MODE(x)                 (((x) & 0x3) << 8)
+#define CURSOR_MONO                    0
+#define CURSOR_24_1                    1
+#define CURSOR_24_8_PRE_MULT           2
+#define CURSOR_24_8_UNPRE_MULT         3
+#define CURSOR_2X_MAGNIFY              (1 << 16)
+#define CURSOR_FORCE_MC_ON             (1 << 20)
+#define CURSOR_URGENT_CONTROL(x)       (((x) & 0x7) << 24)
+#define CURSOR_URGENT_ALWAYS           0
+#define CURSOR_URGENT_1_8              1
+#define CURSOR_URGENT_1_4              2
+#define CURSOR_URGENT_3_8              3
+#define CURSOR_URGENT_1_2              4
+#define CURSOR_UPDATE_PENDING          (1 << 0)
+#define CURSOR_UPDATE_TAKEN            (1 << 1)
+#define CURSOR_UPDATE_LOCK             (1 << 16)
+#define CURSOR_DISABLE_MULTIPLE_UPDATE (1 << 24)
+
 #define AMDGPU_NUM_OF_VMIDS                     8
 #define SI_CRTC0_REGISTER_OFFSET                0
 #define SI_CRTC1_REGISTER_OFFSET                0x300
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h
index a17973bb63a6..ae798f768853 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h
@@ -4442,4 +4442,16 @@
 #define mmXDMA_TEST_DEBUG_DATA 0x041D
 #define mmXDMA_TEST_DEBUG_INDEX 0x041C
 
+/* Registers that spilled out of sid.h */
+#define mmDATA_FORMAT                              0x1AC0
+#define mmDESKTOP_HEIGHT                           0x1AC1
+#define mmDC_LB_MEMORY_SPLIT                       0x1AC3
+#define mmPRIORITY_A_CNT                           0x1AC6
+#define mmPRIORITY_B_CNT                           0x1AC7
+#define mmDPG_PIPE_ARBITRATION_CONTROL3            0x1B32
+#define mmINT_MASK                                 0x1AD0
+#define mmVLINE_STATUS                             0x1AEE
+#define mmVBLANK_STATUS                            0x1AEF
+
+
 #endif
-- 
2.10.0



More information about the amd-gfx mailing list